text
stringlengths 213
32.3k
|
---|
import uuid
from nexia.home import NexiaHome
import requests_mock
from homeassistant.components.nexia.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
async def async_init_integration(
hass: HomeAssistant,
skip_setup: bool = False,
) -> MockConfigEntry:
"""Set up the nexia integration in Home Assistant."""
house_fixture = "nexia/mobile_houses_123456.json"
session_fixture = "nexia/session_123456.json"
sign_in_fixture = "nexia/sign_in.json"
with requests_mock.mock() as m, patch(
"nexia.home.load_or_create_uuid", return_value=uuid.uuid4()
):
m.post(NexiaHome.API_MOBILE_SESSION_URL, text=load_fixture(session_fixture))
m.get(
NexiaHome.API_MOBILE_HOUSES_URL.format(house_id=123456),
text=load_fixture(house_fixture),
)
m.post(
NexiaHome.API_MOBILE_ACCOUNTS_SIGN_IN_URL,
text=load_fixture(sign_in_fixture),
)
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_USERNAME: "mock", CONF_PASSWORD: "mock"}
)
entry.add_to_hass(hass)
if not skip_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
import subprocess
import diamond.collector
from diamond.collector import str_to_bool
class NtpdCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(NtpdCollector, self).get_default_config_help()
config_help.update({
'ntpq_bin': 'Path to ntpq binary',
'ntpdc_bin': 'Path to ntpdc binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NtpdCollector, self).get_default_config()
config.update({
'path': 'ntpd',
'ntpq_bin': self.find_binary('/usr/bin/ntpq'),
'ntpdc_bin': self.find_binary('/usr/bin/ntpdc'),
'use_sudo': False,
'sudo_cmd': self.find_binary('/usr/bin/sudo'),
})
return config
def run_command(self, command):
try:
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
return subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()[0]
except OSError:
self.log.exception("Unable to run %s", command)
return ""
def get_ntpq_output(self):
return self.run_command([self.config['ntpq_bin'], '-np'])
def get_ntpq_stats(self):
output = self.get_ntpq_output()
data = {}
for line in output.splitlines():
# Only care about system peer
if not line.startswith('*'):
continue
parts = line[1:].split()
data['stratum'] = {'val': parts[2], 'precision': 0}
data['when'] = {'val': parts[4], 'precision': 0}
if data['when']['val'] == '-':
# sometimes, ntpq returns value '-' for 'when', continuos
# and try other system peer
continue
data['poll'] = {'val': parts[5], 'precision': 0}
data['reach'] = {'val': parts[6], 'precision': 0}
data['delay'] = {'val': parts[7], 'precision': 6}
data['jitter'] = {'val': parts[9], 'precision': 6}
def convert_to_second(when_ntpd_ouput):
value = float(when_ntpd_ouput[:-1])
if when_ntpd_ouput.endswith('m'):
return value * 60
elif when_ntpd_ouput.endswith('h'):
return value * 3600
elif when_ntpd_ouput.endswith('d'):
return value * 86400
if 'when' in data:
if data['when']['val'] == '-':
self.log.warning('ntpq returned bad value for "when"')
return []
if data['when']['val'].endswith(('m', 'h', 'd')):
data['when']['val'] = convert_to_second(data['when']['val'])
return data.items()
def get_ntpdc_kerninfo_output(self):
return self.run_command([self.config['ntpdc_bin'], '-c', 'kerninfo'])
def get_ntpdc_kerninfo_stats(self):
output = self.get_ntpdc_kerninfo_output()
data = {}
for line in output.splitlines():
key, val = line.split(':')
val = float(val.split()[0])
if key == 'pll offset':
data['offset'] = {'val': val, 'precision': 10}
elif key == 'pll frequency':
data['frequency'] = {'val': val, 'precision': 6}
elif key == 'maximum error':
data['max_error'] = {'val': val, 'precision': 6}
elif key == 'estimated error':
data['est_error'] = {'val': val, 'precision': 6}
elif key == 'status':
data['status'] = {'val': val, 'precision': 0}
return data.items()
def get_ntpdc_sysinfo_output(self):
return self.run_command([self.config['ntpdc_bin'], '-c', 'sysinfo'])
def get_ntpdc_sysinfo_stats(self):
output = self.get_ntpdc_sysinfo_output()
data = {}
for line in output.splitlines():
key, val = line.split(':')[0:2]
try:
val = float(val.split()[0])
if key == 'root distance':
data['root_distance'] = {'val': val, 'precision': 6}
elif key == 'root dispersion':
data['root_dispersion'] = {'val': val, 'precision': 6}
except Exception:
pass
return data.items()
def collect(self):
for stat, v in self.get_ntpq_stats():
self.publish(stat, v['val'], precision=v['precision'])
for stat, v in self.get_ntpdc_kerninfo_stats():
self.publish(stat, v['val'], precision=v['precision'])
for stat, v in self.get_ntpdc_sysinfo_stats():
self.publish(stat, v['val'], precision=v['precision'])
|
import dedupe
import dedupe.training as training
import unittest
class TrainingTest(unittest.TestCase):
def setUp(self):
field_definition = [{'field': 'name', 'type': 'String'}]
self.data_model = dedupe.Dedupe(field_definition).data_model
self.training_pairs = {
'match': [({"name": "Bob", "age": "50"},
{"name": "Bob", "age": "75"}),
({"name": "Meredith", "age": "40"},
{"name": "Sue", "age": "10"})],
'distinct': [({"name": "Jimmy", "age": "20"},
{"name": "Jimbo", "age": "21"}),
({"name": "Willy", "age": "35"},
{"name": "William", "age": "35"}),
({"name": "William", "age": "36"},
{"name": "William", "age": "35"})]
}
self.training = self.training_pairs['match'] + \
self.training_pairs['distinct']
self.training_records = []
for pair in self.training:
for record in pair:
if record not in self.training_records:
self.training_records.append(record)
self.simple = lambda x: set([str(k) for k in x
if "CompoundPredicate" not in str(k)])
self.block_learner = training.BlockLearner
self.block_learner.blocker = dedupe.blocking.Fingerprinter(self.data_model.predicates())
self.block_learner.blocker.index_all({i: x for i, x in enumerate(self.training_records)})
def test_dedupe_coverage(self):
coverage = self.block_learner.cover(self.block_learner, self.training)
assert self.simple(coverage.keys()).issuperset(
set(["SimplePredicate: (tokenFieldPredicate, name)",
"SimplePredicate: (commonSixGram, name)",
"TfidfTextCanopyPredicate: (0.4, name)",
"SimplePredicate: (sortedAcronym, name)",
"SimplePredicate: (sameThreeCharStartPredicate, name)",
"TfidfTextCanopyPredicate: (0.2, name)",
"SimplePredicate: (sameFiveCharStartPredicate, name)",
"TfidfTextCanopyPredicate: (0.6, name)",
"SimplePredicate: (wholeFieldPredicate, name)",
"TfidfTextCanopyPredicate: (0.8, name)",
"SimplePredicate: (commonFourGram, name)",
"SimplePredicate: (firstTokenPredicate, name)",
"SimplePredicate: (sameSevenCharStartPredicate, name)"]))
def test_uncovered_by(self):
before = {1: {1, 2, 3}, 2: {1, 2}, 3: {3}}
after = {1: {1, 2}, 2: {1, 2}}
before_copy = before.copy()
assert training.BranchBound.uncovered_by(before, set()) == before
assert training.BranchBound.uncovered_by(before, {3}) == after
assert before == before_copy
def test_covered_pairs(self):
p1 = lambda x, target=None: (1,) # noqa: E 731
self.block_learner.blocker.predicates = (p1,)
cover = self.block_learner.cover(self.block_learner,
[('a', 'b')] * 2)
assert cover[p1] == {0, 1}
if __name__ == "__main__":
unittest.main()
|
import logging
import blebox_uniapi
from homeassistant.components.blebox.const import DOMAIN
from homeassistant.config_entries import ENTRY_STATE_NOT_LOADED, ENTRY_STATE_SETUP_RETRY
from .conftest import mock_config, patch_product_identify
async def test_setup_failure(hass, caplog):
"""Test that setup failure is handled and logged."""
patch_product_identify(None, side_effect=blebox_uniapi.error.ClientError)
entry = mock_config()
entry.add_to_hass(hass)
caplog.set_level(logging.ERROR)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert "Identify failed at 172.100.123.4:80 ()" in caplog.text
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_setup_failure_on_connection(hass, caplog):
"""Test that setup failure is handled and logged."""
patch_product_identify(None, side_effect=blebox_uniapi.error.ConnectionError)
entry = mock_config()
entry.add_to_hass(hass)
caplog.set_level(logging.ERROR)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert "Identify failed at 172.100.123.4:80 ()" in caplog.text
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_config_entry(hass):
"""Test that unloading works properly."""
patch_product_identify(None)
entry = mock_config()
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert hass.data[DOMAIN]
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
assert entry.state == ENTRY_STATE_NOT_LOADED
|
import unittest
from unittest.mock import patch, Mock
from flask import Flask
from cryptography.x509 import DNSName
from lemur.plugins.lemur_acme import acme_handlers
class TestAcmeHandler(unittest.TestCase):
def setUp(self):
self.acme = acme_handlers.AcmeHandler()
# Creates a new Flask application for a test duration. In python 3.8, manual push of application context is
# needed to run tests in dev environment without getting error 'Working outside of application context'.
_app = Flask('lemur_test_acme')
self.ctx = _app.app_context()
assert self.ctx
self.ctx.push()
def tearDown(self):
self.ctx.pop()
def test_strip_wildcard(self):
expected = ("example.com", False)
result = self.acme.strip_wildcard("example.com")
self.assertEqual(expected, result)
expected = ("example.com", True)
result = self.acme.strip_wildcard("*.example.com")
self.assertEqual(expected, result)
def test_authz_record(self):
a = acme_handlers.AuthorizationRecord("domain", "host", "authz", "challenge", "id", "cname_delegation")
self.assertEqual(type(a), acme_handlers.AuthorizationRecord)
def test_setup_acme_client_fail(self):
mock_authority = Mock()
mock_authority.options = []
with self.assertRaises(Exception):
self.acme.setup_acme_client(mock_authority)
def test_reuse_account_not_defined(self):
mock_authority = Mock()
mock_authority.options = []
with self.assertRaises(Exception):
self.acme.reuse_account(mock_authority)
def test_reuse_account_from_authority(self):
mock_authority = Mock()
mock_authority.options = '[{"name": "acme_private_key", "value": "PRIVATE_KEY"}, {"name": "acme_regr", "value": "ACME_REGR"}]'
self.assertTrue(self.acme.reuse_account(mock_authority))
@patch("lemur.plugins.lemur_acme.acme_handlers.current_app")
def test_reuse_account_from_config(self, mock_current_app):
mock_authority = Mock()
mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}]'
mock_current_app.config = {"ACME_PRIVATE_KEY": "PRIVATE_KEY", "ACME_REGR": "ACME_REGR"}
self.assertTrue(self.acme.reuse_account(mock_authority))
def test_reuse_account_no_configuration(self):
mock_authority = Mock()
mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}]'
self.assertFalse(self.acme.reuse_account(mock_authority))
@patch("lemur.plugins.lemur_acme.acme_handlers.authorities_service")
@patch("lemur.plugins.lemur_acme.acme_handlers.BackwardsCompatibleClientV2")
def test_setup_acme_client_success(self, mock_acme, mock_authorities_service):
mock_authority = Mock()
mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}, ' \
'{"name": "store_account", "value": false}]'
mock_client = Mock()
mock_registration = Mock()
mock_registration.uri = "http://test.com"
mock_client.register = mock_registration
mock_client.agree_to_tos = Mock(return_value=True)
mock_acme.return_value = mock_client
result_client, result_registration = self.acme.setup_acme_client(mock_authority)
mock_authorities_service.update_options.assert_not_called()
assert result_client
assert result_registration
def test_get_domains_single(self):
options = {"common_name": "test.netflix.net"}
result = self.acme.get_domains(options)
self.assertEqual(result, [options["common_name"]])
def test_get_domains_multiple(self):
options = {
"common_name": "test.netflix.net",
"extensions": {
"sub_alt_names": {"names": [DNSName("test2.netflix.net"), DNSName("test3.netflix.net")]}
},
}
result = self.acme.get_domains(options)
self.assertEqual(
result, [options["common_name"], "test2.netflix.net", "test3.netflix.net"]
)
def test_get_domains_san(self):
options = {
"common_name": "test.netflix.net",
"extensions": {
"sub_alt_names": {"names": [DNSName("test.netflix.net"), DNSName("test2.netflix.net")]}
},
}
result = self.acme.get_domains(options)
self.assertEqual(
result, [options["common_name"], "test2.netflix.net"]
)
|
import os.path as op
from numpy.testing import assert_array_equal
from scipy import io as sio
from mne.io import read_raw_eximia
from mne.io.tests.test_raw import _test_raw_reader
from mne.utils import run_tests_if_main
from mne.datasets.testing import data_path, requires_testing_data
@requires_testing_data
def test_eximia_nxe():
"""Test reading Eximia NXE files."""
fname = op.join(data_path(), 'eximia', 'test_eximia.nxe')
raw = read_raw_eximia(fname, preload=True)
assert 'RawEximia' in repr(raw)
_test_raw_reader(read_raw_eximia, fname=fname,
test_scaling=False, # XXX probably a scaling problem
)
fname_mat = op.join(data_path(), 'eximia', 'test_eximia.mat')
mc = sio.loadmat(fname_mat)
m_data = mc['data']
m_header = mc['header']
assert raw._data.shape == m_data.shape
assert m_header['Fs'][0, 0][0, 0] == raw.info['sfreq']
m_names = [x[0][0] for x in m_header['label'][0, 0]]
m_names = list(
map(lambda x: x.replace('GATE', 'GateIn').replace('TRIG', 'Trig'),
m_names))
assert raw.ch_names == m_names
m_ch_types = [x[0][0] for x in m_header['chantype'][0, 0]]
m_ch_types = list(
map(lambda x: x.replace('unknown', 'stim').replace('trigger', 'stim'),
m_ch_types))
types_dict = {2: 'eeg', 3: 'stim', 202: 'eog'}
ch_types = [types_dict[raw.info['chs'][x]['kind']]
for x in range(len(raw.ch_names))]
assert ch_types == m_ch_types
assert_array_equal(m_data, raw._data)
run_tests_if_main()
|
import logging
from homeassistant.components.alarm_control_panel import DOMAIN, AlarmControlPanelEntity
from homeassistant.components.alarm_control_panel.const import SUPPORT_ALARM_ARM_AWAY
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DOMAIN as POINT_DOMAIN, POINT_DISCOVERY_NEW, SIGNAL_WEBHOOK
_LOGGER = logging.getLogger(__name__)
EVENT_MAP = {
"off": STATE_ALARM_DISARMED,
"alarm_silenced": STATE_ALARM_DISARMED,
"alarm_grace_period_expired": STATE_ALARM_TRIGGERED,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a Point's alarm_control_panel based on a config entry."""
async def async_discover_home(home_id):
"""Discover and add a discovered home."""
client = hass.data[POINT_DOMAIN][config_entry.entry_id]
async_add_entities([MinutPointAlarmControl(client, home_id)], True)
async_dispatcher_connect(
hass, POINT_DISCOVERY_NEW.format(DOMAIN, POINT_DOMAIN), async_discover_home
)
class MinutPointAlarmControl(AlarmControlPanelEntity):
"""The platform class required by Home Assistant."""
def __init__(self, point_client, home_id):
"""Initialize the entity."""
self._client = point_client
self._home_id = home_id
self._async_unsub_hook_dispatcher_connect = None
self._changed_by = None
async def async_added_to_hass(self):
"""Call when entity is added to HOme Assistant."""
await super().async_added_to_hass()
self._async_unsub_hook_dispatcher_connect = async_dispatcher_connect(
self.hass, SIGNAL_WEBHOOK, self._webhook_event
)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listener when removed."""
await super().async_will_remove_from_hass()
if self._async_unsub_hook_dispatcher_connect:
self._async_unsub_hook_dispatcher_connect()
@callback
def _webhook_event(self, data, webhook):
"""Process new event from the webhook."""
_type = data.get("event", {}).get("type")
_device_id = data.get("event", {}).get("device_id")
_changed_by = data.get("event", {}).get("user_id")
if (
_device_id not in self._home["devices"] and _type not in EVENT_MAP
) and _type != "alarm_silenced": # alarm_silenced does not have device_id
return
_LOGGER.debug("Received webhook: %s", _type)
self._home["alarm_status"] = _type
self._changed_by = _changed_by
self.async_write_ha_state()
@property
def _home(self):
"""Return the home object."""
return self._client.homes[self._home_id]
@property
def name(self):
"""Return name of the device."""
return self._home["name"]
@property
def state(self):
"""Return state of the device."""
return EVENT_MAP.get(self._home["alarm_status"], STATE_ALARM_ARMED_AWAY)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_AWAY
@property
def changed_by(self):
"""Return the user the last change was triggered by."""
return self._changed_by
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
status = await self._client.async_alarm_disarm(self._home_id)
if status:
self._home["alarm_status"] = "off"
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
status = await self._client.async_alarm_arm(self._home_id)
if status:
self._home["alarm_status"] = "on"
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return f"point.{self._home_id}"
@property
def device_info(self):
"""Return a device description for device registry."""
return {
"identifiers": {(POINT_DOMAIN, self._home_id)},
"name": self.name,
"manufacturer": "Minut",
}
|
import homeassistant.components.automation as automation
from homeassistant.core import CoreState
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, patch
from tests.common import async_mock_service
async def test_if_fires_on_hass_start(hass):
"""Test the firing when Home Assistant starts."""
calls = async_mock_service(hass, "test", "automation")
hass.state = CoreState.not_running
config = {
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "homeassistant", "event": "start"},
"action": {"service": "test.automation"},
}
}
assert await async_setup_component(hass, automation.DOMAIN, config)
assert automation.is_on(hass, "automation.hello")
assert len(calls) == 0
await hass.async_start()
await hass.async_block_till_done()
assert automation.is_on(hass, "automation.hello")
assert len(calls) == 1
with patch(
"homeassistant.config.async_hass_config_yaml",
AsyncMock(return_value=config),
):
await hass.services.async_call(
automation.DOMAIN, automation.SERVICE_RELOAD, blocking=True
)
assert automation.is_on(hass, "automation.hello")
assert len(calls) == 1
async def test_if_fires_on_hass_shutdown(hass):
"""Test the firing when Home Assistant shuts down."""
calls = async_mock_service(hass, "test", "automation")
hass.state = CoreState.not_running
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"alias": "hello",
"trigger": {"platform": "homeassistant", "event": "shutdown"},
"action": {"service": "test.automation"},
}
},
)
assert automation.is_on(hass, "automation.hello")
assert len(calls) == 0
await hass.async_start()
assert automation.is_on(hass, "automation.hello")
await hass.async_block_till_done()
assert len(calls) == 0
with patch.object(hass.loop, "stop"):
await hass.async_stop()
assert len(calls) == 1
|
import os
import shutil
import tempfile
from radicale import Application, config
from radicale.tests import BaseTest
from radicale.tests.helpers import get_file_content
class TestBaseRightsRequests(BaseTest):
"""Tests basic requests with rights."""
def setup(self):
self.configuration = config.load()
self.colpath = tempfile.mkdtemp()
self.configuration.update({
"storage": {"filesystem_folder": self.colpath,
# Disable syncing to disk for better performance
"_filesystem_fsync": "False"}},
"test", privileged=True)
def teardown(self):
shutil.rmtree(self.colpath)
def _test_rights(self, rights_type, user, path, mode, expected_status,
with_auth=True):
assert mode in ("r", "w")
assert user in ("", "tmp")
htpasswd_file_path = os.path.join(self.colpath, ".htpasswd")
with open(htpasswd_file_path, "w") as f:
f.write("tmp:bepo\nother:bepo")
self.configuration.update({
"rights": {"type": rights_type},
"auth": {"type": "htpasswd" if with_auth else "none",
"htpasswd_filename": htpasswd_file_path,
"htpasswd_encryption": "plain"}}, "test")
self.application = Application(self.configuration)
for u in ("tmp", "other"):
# Indirect creation of principal collection
self.propfind("/%s/" % u, login="%s:bepo" % u)
(self.propfind if mode == "r" else self.proppatch)(
path, check=expected_status, login="tmp:bepo" if user else None)
def test_owner_only(self):
self._test_rights("owner_only", "", "/", "r", 401)
self._test_rights("owner_only", "", "/", "w", 401)
self._test_rights("owner_only", "", "/tmp/", "r", 401)
self._test_rights("owner_only", "", "/tmp/", "w", 401)
self._test_rights("owner_only", "tmp", "/", "r", 207)
self._test_rights("owner_only", "tmp", "/", "w", 403)
self._test_rights("owner_only", "tmp", "/tmp/", "r", 207)
self._test_rights("owner_only", "tmp", "/tmp/", "w", 207)
self._test_rights("owner_only", "tmp", "/other/", "r", 403)
self._test_rights("owner_only", "tmp", "/other/", "w", 403)
def test_owner_only_without_auth(self):
self._test_rights("owner_only", "", "/", "r", 207, False)
self._test_rights("owner_only", "", "/", "w", 401, False)
self._test_rights("owner_only", "", "/tmp/", "r", 207, False)
self._test_rights("owner_only", "", "/tmp/", "w", 207, False)
def test_owner_write(self):
self._test_rights("owner_write", "", "/", "r", 401)
self._test_rights("owner_write", "", "/", "w", 401)
self._test_rights("owner_write", "", "/tmp/", "r", 401)
self._test_rights("owner_write", "", "/tmp/", "w", 401)
self._test_rights("owner_write", "tmp", "/", "r", 207)
self._test_rights("owner_write", "tmp", "/", "w", 403)
self._test_rights("owner_write", "tmp", "/tmp/", "r", 207)
self._test_rights("owner_write", "tmp", "/tmp/", "w", 207)
self._test_rights("owner_write", "tmp", "/other/", "r", 207)
self._test_rights("owner_write", "tmp", "/other/", "w", 403)
def test_owner_write_without_auth(self):
self._test_rights("owner_write", "", "/", "r", 207, False)
self._test_rights("owner_write", "", "/", "w", 401, False)
self._test_rights("owner_write", "", "/tmp/", "r", 207, False)
self._test_rights("owner_write", "", "/tmp/", "w", 207, False)
def test_authenticated(self):
self._test_rights("authenticated", "", "/", "r", 401)
self._test_rights("authenticated", "", "/", "w", 401)
self._test_rights("authenticated", "", "/tmp/", "r", 401)
self._test_rights("authenticated", "", "/tmp/", "w", 401)
self._test_rights("authenticated", "tmp", "/", "r", 207)
self._test_rights("authenticated", "tmp", "/", "w", 207)
self._test_rights("authenticated", "tmp", "/tmp/", "r", 207)
self._test_rights("authenticated", "tmp", "/tmp/", "w", 207)
self._test_rights("authenticated", "tmp", "/other/", "r", 207)
self._test_rights("authenticated", "tmp", "/other/", "w", 207)
def test_authenticated_without_auth(self):
self._test_rights("authenticated", "", "/", "r", 207, False)
self._test_rights("authenticated", "", "/", "w", 207, False)
self._test_rights("authenticated", "", "/tmp/", "r", 207, False)
self._test_rights("authenticated", "", "/tmp/", "w", 207, False)
def test_from_file(self):
rights_file_path = os.path.join(self.colpath, "rights")
with open(rights_file_path, "w") as f:
f.write("""\
[owner]
user: .+
collection: {user}(/.*)?
permissions: RrWw
[custom]
user: .*
collection: custom(/.*)?
permissions: Rr""")
self.configuration.update(
{"rights": {"file": rights_file_path}}, "test")
self._test_rights("from_file", "", "/other/", "r", 401)
self._test_rights("from_file", "tmp", "/other/", "r", 403)
self._test_rights("from_file", "", "/custom/sub", "r", 404)
self._test_rights("from_file", "tmp", "/custom/sub", "r", 404)
self._test_rights("from_file", "", "/custom/sub", "w", 401)
self._test_rights("from_file", "tmp", "/custom/sub", "w", 403)
def test_from_file_limited_get(self):
rights_file_path = os.path.join(self.colpath, "rights")
with open(rights_file_path, "w") as f:
f.write("""\
[write-all]
user: tmp
collection: .*
permissions: RrWw
[limited-public]
user: .*
collection: public/[^/]*
permissions: i""")
self.configuration.update(
{"rights": {"type": "from_file",
"file": rights_file_path}}, "test")
self.application = Application(self.configuration)
self.mkcalendar("/tmp/calendar", login="tmp:bepo")
self.mkcol("/public", login="tmp:bepo")
self.mkcalendar("/public/calendar", login="tmp:bepo")
self.get("/tmp/calendar", check=401)
self.get("/public/", check=401)
self.get("/public/calendar")
self.get("/public/calendar/1.ics", check=401)
def test_custom(self):
"""Custom rights management."""
self._test_rights("radicale.tests.custom.rights", "", "/", "r", 401)
self._test_rights(
"radicale.tests.custom.rights", "", "/tmp/", "r", 207)
def test_collections_and_items(self):
"""Test rights for creation of collections, calendars and items.
Collections are allowed at "/" and "/.../".
Calendars/Address books are allowed at "/.../.../".
Items are allowed at "/.../.../...".
"""
self.application = Application(self.configuration)
self.mkcalendar("/", check=401)
self.mkcalendar("/user/", check=401)
self.mkcol("/user/")
self.mkcol("/user/calendar/", check=401)
self.mkcalendar("/user/calendar/")
self.mkcol("/user/calendar/item", check=401)
self.mkcalendar("/user/calendar/item", check=401)
def test_put_collections_and_items(self):
"""Test rights for creation of calendars and items with PUT."""
self.application = Application(self.configuration)
self.put("/user/", "BEGIN:VCALENDAR\r\nEND:VCALENDAR", check=401)
self.mkcol("/user/")
self.put("/user/calendar/", "BEGIN:VCALENDAR\r\nEND:VCALENDAR")
event1 = get_file_content("event1.ics")
self.put("/user/calendar/event1.ics", event1)
|
from homeassistant.components.zwave import switch
from tests.async_mock import patch
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
def test_get_device_detects_switch(mock_openzwave):
"""Test get_device returns a Z-Wave switch."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
assert isinstance(device, switch.ZwaveSwitch)
def test_switch_turn_on_and_off(mock_openzwave):
"""Test turning on a Z-Wave switch."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
device.turn_on()
assert node.set_switch.called
value_id, state = node.set_switch.mock_calls[0][1]
assert value_id == value.value_id
assert state is True
node.reset_mock()
device.turn_off()
assert node.set_switch.called
value_id, state = node.set_switch.mock_calls[0][1]
assert value_id == value.value_id
assert state is False
def test_switch_value_changed(mock_openzwave):
"""Test value changed for Z-Wave switch."""
node = MockNode()
value = MockValue(data=False, node=node)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = True
value_changed(value)
assert device.is_on
@patch("time.perf_counter")
def test_switch_refresh_on_update(mock_counter, mock_openzwave):
"""Test value changed for refresh on update Z-Wave switch."""
mock_counter.return_value = 10
node = MockNode(manufacturer_id="013c", product_type="0001", product_id="0005")
value = MockValue(data=False, node=node, instance=1)
values = MockEntityValues(primary=value)
device = switch.get_device(node=node, values=values, node_config={})
assert not device.is_on
mock_counter.return_value = 15
value.data = True
value_changed(value)
assert device.is_on
assert not node.request_state.called
mock_counter.return_value = 45
value.data = False
value_changed(value)
assert not device.is_on
assert node.request_state.called
|
import asyncio
import aiodns
from mcstatus.pinger import PingResponse
from homeassistant.components.minecraft_server.const import (
DEFAULT_NAME,
DEFAULT_PORT,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import patch
from tests.common import MockConfigEntry
class QueryMock:
"""Mock for result of aiodns.DNSResolver.query."""
def __init__(self):
"""Set up query result mock."""
self.host = "mc.dummyserver.com"
self.port = 23456
self.priority = 1
self.weight = 1
self.ttl = None
STATUS_RESPONSE_RAW = {
"description": {"text": "Dummy Description"},
"version": {"name": "Dummy Version", "protocol": 123},
"players": {
"online": 3,
"max": 10,
"sample": [
{"name": "Player 1", "id": "1"},
{"name": "Player 2", "id": "2"},
{"name": "Player 3", "id": "3"},
],
},
}
USER_INPUT = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: f"mc.dummyserver.com:{DEFAULT_PORT}",
}
USER_INPUT_SRV = {CONF_NAME: DEFAULT_NAME, CONF_HOST: "dummyserver.com"}
USER_INPUT_IPV4 = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: f"1.1.1.1:{DEFAULT_PORT}",
}
USER_INPUT_IPV6 = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: f"[::ffff:0101:0101]:{DEFAULT_PORT}",
}
USER_INPUT_PORT_TOO_SMALL = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: "mc.dummyserver.com:1023",
}
USER_INPUT_PORT_TOO_LARGE = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: "mc.dummyserver.com:65536",
}
SRV_RECORDS = asyncio.Future()
SRV_RECORDS.set_result([QueryMock()])
async def test_show_config_form(hass: HomeAssistantType) -> None:
"""Test if initial configuration form is shown."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_invalid_ip(hass: HomeAssistantType) -> None:
"""Test error in case of an invalid IP address."""
with patch("getmac.get_mac_address", return_value=None):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_IPV4
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_ip"}
async def test_same_host(hass: HomeAssistantType) -> None:
"""Test abort in case of same host name."""
with patch(
"aiodns.DNSResolver.query",
side_effect=aiodns.error.DNSError,
):
with patch(
"mcstatus.server.MinecraftServer.status",
return_value=PingResponse(STATUS_RESPONSE_RAW),
):
unique_id = "mc.dummyserver.com-25565"
config_data = {
CONF_NAME: DEFAULT_NAME,
CONF_HOST: "mc.dummyserver.com",
CONF_PORT: DEFAULT_PORT,
}
mock_config_entry = MockConfigEntry(
domain=DOMAIN, unique_id=unique_id, data=config_data
)
mock_config_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_port_too_small(hass: HomeAssistantType) -> None:
"""Test error in case of a too small port."""
with patch(
"aiodns.DNSResolver.query",
side_effect=aiodns.error.DNSError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_PORT_TOO_SMALL
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_port"}
async def test_port_too_large(hass: HomeAssistantType) -> None:
"""Test error in case of a too large port."""
with patch(
"aiodns.DNSResolver.query",
side_effect=aiodns.error.DNSError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_PORT_TOO_LARGE
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_port"}
async def test_connection_failed(hass: HomeAssistantType) -> None:
"""Test error in case of a failed connection."""
with patch(
"aiodns.DNSResolver.query",
side_effect=aiodns.error.DNSError,
):
with patch("mcstatus.server.MinecraftServer.status", side_effect=OSError):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_connection_succeeded_with_srv_record(hass: HomeAssistantType) -> None:
"""Test config entry in case of a successful connection with a SRV record."""
with patch(
"aiodns.DNSResolver.query",
return_value=SRV_RECORDS,
):
with patch(
"mcstatus.server.MinecraftServer.status",
return_value=PingResponse(STATUS_RESPONSE_RAW),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_SRV
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USER_INPUT_SRV[CONF_HOST]
assert result["data"][CONF_NAME] == USER_INPUT_SRV[CONF_NAME]
assert result["data"][CONF_HOST] == USER_INPUT_SRV[CONF_HOST]
async def test_connection_succeeded_with_host(hass: HomeAssistantType) -> None:
"""Test config entry in case of a successful connection with a host name."""
with patch(
"aiodns.DNSResolver.query",
side_effect=aiodns.error.DNSError,
):
with patch(
"mcstatus.server.MinecraftServer.status",
return_value=PingResponse(STATUS_RESPONSE_RAW),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USER_INPUT[CONF_HOST]
assert result["data"][CONF_NAME] == USER_INPUT[CONF_NAME]
assert result["data"][CONF_HOST] == "mc.dummyserver.com"
async def test_connection_succeeded_with_ip4(hass: HomeAssistantType) -> None:
"""Test config entry in case of a successful connection with an IPv4 address."""
with patch("getmac.get_mac_address", return_value="01:23:45:67:89:ab"):
with patch(
"aiodns.DNSResolver.query",
side_effect=aiodns.error.DNSError,
):
with patch(
"mcstatus.server.MinecraftServer.status",
return_value=PingResponse(STATUS_RESPONSE_RAW),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_IPV4
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USER_INPUT_IPV4[CONF_HOST]
assert result["data"][CONF_NAME] == USER_INPUT_IPV4[CONF_NAME]
assert result["data"][CONF_HOST] == "1.1.1.1"
async def test_connection_succeeded_with_ip6(hass: HomeAssistantType) -> None:
"""Test config entry in case of a successful connection with an IPv6 address."""
with patch("getmac.get_mac_address", return_value="01:23:45:67:89:ab"):
with patch(
"aiodns.DNSResolver.query",
side_effect=aiodns.error.DNSError,
):
with patch(
"mcstatus.server.MinecraftServer.status",
return_value=PingResponse(STATUS_RESPONSE_RAW),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=USER_INPUT_IPV6
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USER_INPUT_IPV6[CONF_HOST]
assert result["data"][CONF_NAME] == USER_INPUT_IPV6[CONF_NAME]
assert result["data"][CONF_HOST] == "::ffff:0101:0101"
|
import pytest
import homeassistant.components.google as google
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
@pytest.fixture(name="google_setup")
def mock_google_setup(hass):
"""Mock the google set up functions."""
p_auth = patch(
"homeassistant.components.google.do_authentication", side_effect=google.do_setup
)
p_service = patch("homeassistant.components.google.GoogleCalendarService.get")
p_discovery = patch("homeassistant.components.google.discovery.load_platform")
p_load = patch("homeassistant.components.google.load_config", return_value={})
p_save = patch("homeassistant.components.google.update_config")
with p_auth, p_load, p_service, p_discovery, p_save:
yield
async def test_setup_component(hass, google_setup):
"""Test setup component."""
config = {"google": {CONF_CLIENT_ID: "id", CONF_CLIENT_SECRET: "secret"}}
assert await async_setup_component(hass, "google", config)
async def test_get_calendar_info(hass, test_calendar):
"""Test getting the calendar info."""
calendar_info = await hass.async_add_executor_job(
google.get_calendar_info, hass, test_calendar
)
assert calendar_info == {
"cal_id": "[email protected]",
"entities": [
{
"device_id": "we_are_we_are_a_test_calendar",
"name": "We are, we are, a... Test Calendar",
"track": True,
"ignore_availability": True,
}
],
}
async def test_found_calendar(hass, google_setup, mock_next_event, test_calendar):
"""Test when a calendar is found."""
config = {
"google": {
CONF_CLIENT_ID: "id",
CONF_CLIENT_SECRET: "secret",
"track_new_calendar": True,
}
}
assert await async_setup_component(hass, "google", config)
assert hass.data[google.DATA_INDEX] == {}
await hass.services.async_call(
"google", google.SERVICE_FOUND_CALENDARS, test_calendar, blocking=True
)
assert hass.data[google.DATA_INDEX].get(test_calendar["id"]) is not None
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hostname(host):
assert 'delegated-instance-openstack' == host.check_output('hostname -s')
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
f = host.file('/etc/molecule/delegated-instance-openstack')
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
from nexia.const import (
OPERATION_MODE_AUTO,
OPERATION_MODE_COOL,
OPERATION_MODE_HEAT,
OPERATION_MODE_OFF,
SYSTEM_STATUS_COOL,
SYSTEM_STATUS_HEAT,
SYSTEM_STATUS_IDLE,
UNIT_FAHRENHEIT,
)
import voluptuous as vol
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HUMIDITY,
ATTR_MAX_HUMIDITY,
ATTR_MIN_HUMIDITY,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_TEMPERATURE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from .const import (
ATTR_AIRCLEANER_MODE,
ATTR_DEHUMIDIFY_SETPOINT,
ATTR_DEHUMIDIFY_SUPPORTED,
ATTR_HUMIDIFY_SETPOINT,
ATTR_HUMIDIFY_SUPPORTED,
ATTR_ZONE_STATUS,
DOMAIN,
NEXIA_DEVICE,
SIGNAL_THERMOSTAT_UPDATE,
SIGNAL_ZONE_UPDATE,
UPDATE_COORDINATOR,
)
from .entity import NexiaThermostatZoneEntity
from .util import percent_conv
SERVICE_SET_AIRCLEANER_MODE = "set_aircleaner_mode"
SERVICE_SET_HUMIDIFY_SETPOINT = "set_humidify_setpoint"
SET_AIRCLEANER_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_AIRCLEANER_MODE): cv.string,
}
)
SET_HUMIDITY_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_HUMIDITY): vol.All(
vol.Coerce(int), vol.Range(min=35, max=65)
),
}
)
#
# Nexia has two bits to determine hvac mode
# There are actually eight states so we map to
# the most significant state
#
# 1. Zone Mode : Auto / Cooling / Heating / Off
# 2. Run Mode : Hold / Run Schedule
#
#
HA_TO_NEXIA_HVAC_MODE_MAP = {
HVAC_MODE_HEAT: OPERATION_MODE_HEAT,
HVAC_MODE_COOL: OPERATION_MODE_COOL,
HVAC_MODE_HEAT_COOL: OPERATION_MODE_AUTO,
HVAC_MODE_AUTO: OPERATION_MODE_AUTO,
HVAC_MODE_OFF: OPERATION_MODE_OFF,
}
NEXIA_TO_HA_HVAC_MODE_MAP = {
value: key for key, value in HA_TO_NEXIA_HVAC_MODE_MAP.items()
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up climate for a Nexia device."""
nexia_data = hass.data[DOMAIN][config_entry.entry_id]
nexia_home = nexia_data[NEXIA_DEVICE]
coordinator = nexia_data[UPDATE_COORDINATOR]
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SET_HUMIDIFY_SETPOINT,
SET_HUMIDITY_SCHEMA,
SERVICE_SET_HUMIDIFY_SETPOINT,
)
platform.async_register_entity_service(
SERVICE_SET_AIRCLEANER_MODE, SET_AIRCLEANER_SCHEMA, SERVICE_SET_AIRCLEANER_MODE
)
entities = []
for thermostat_id in nexia_home.get_thermostat_ids():
thermostat = nexia_home.get_thermostat_by_id(thermostat_id)
for zone_id in thermostat.get_zone_ids():
zone = thermostat.get_zone_by_id(zone_id)
entities.append(NexiaZone(coordinator, zone))
async_add_entities(entities, True)
class NexiaZone(NexiaThermostatZoneEntity, ClimateEntity):
"""Provides Nexia Climate support."""
def __init__(self, coordinator, zone):
"""Initialize the thermostat."""
super().__init__(
coordinator, zone, name=zone.get_name(), unique_id=zone.zone_id
)
self._undo_humidfy_dispatcher = None
self._undo_aircleaner_dispatcher = None
# The has_* calls are stable for the life of the device
# and do not do I/O
self._has_relative_humidity = self._thermostat.has_relative_humidity()
self._has_emergency_heat = self._thermostat.has_emergency_heat()
self._has_humidify_support = self._thermostat.has_humidify_support()
self._has_dehumidify_support = self._thermostat.has_dehumidify_support()
@property
def supported_features(self):
"""Return the list of supported features."""
supported = (
SUPPORT_TARGET_TEMPERATURE_RANGE
| SUPPORT_TARGET_TEMPERATURE
| SUPPORT_FAN_MODE
| SUPPORT_PRESET_MODE
)
if self._has_humidify_support or self._has_dehumidify_support:
supported |= SUPPORT_TARGET_HUMIDITY
if self._has_emergency_heat:
supported |= SUPPORT_AUX_HEAT
return supported
@property
def is_fan_on(self):
"""Blower is on."""
return self._thermostat.is_blower_active()
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS if self._thermostat.get_unit() == "C" else TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self._zone.get_temperature()
@property
def fan_mode(self):
"""Return the fan setting."""
return self._thermostat.get_fan_mode()
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._thermostat.get_fan_modes()
@property
def min_temp(self):
"""Minimum temp for the current setting."""
return (self._thermostat.get_setpoint_limits())[0]
@property
def max_temp(self):
"""Maximum temp for the current setting."""
return (self._thermostat.get_setpoint_limits())[1]
def set_fan_mode(self, fan_mode):
"""Set new target fan mode."""
self._thermostat.set_fan_mode(fan_mode)
self._signal_thermostat_update()
@property
def preset_mode(self):
"""Preset that is active."""
return self._zone.get_preset()
@property
def preset_modes(self):
"""All presets."""
return self._zone.get_presets()
def set_humidity(self, humidity):
"""Dehumidify target."""
self._thermostat.set_dehumidify_setpoint(humidity / 100.0)
self._signal_thermostat_update()
@property
def target_humidity(self):
"""Humidity indoors setpoint."""
if self._has_dehumidify_support:
return percent_conv(self._thermostat.get_dehumidify_setpoint())
if self._has_humidify_support:
return percent_conv(self._thermostat.get_humidify_setpoint())
return None
@property
def current_humidity(self):
"""Humidity indoors."""
if self._has_relative_humidity:
return percent_conv(self._thermostat.get_relative_humidity())
return None
@property
def target_temperature(self):
"""Temperature we try to reach."""
current_mode = self._zone.get_current_mode()
if current_mode == OPERATION_MODE_COOL:
return self._zone.get_cooling_setpoint()
if current_mode == OPERATION_MODE_HEAT:
return self._zone.get_heating_setpoint()
return None
@property
def target_temperature_step(self):
"""Step size of temperature units."""
if self._thermostat.get_unit() == UNIT_FAHRENHEIT:
return 1.0
return 0.5
@property
def target_temperature_high(self):
"""Highest temperature we are trying to reach."""
current_mode = self._zone.get_current_mode()
if current_mode in (OPERATION_MODE_COOL, OPERATION_MODE_HEAT):
return None
return self._zone.get_cooling_setpoint()
@property
def target_temperature_low(self):
"""Lowest temperature we are trying to reach."""
current_mode = self._zone.get_current_mode()
if current_mode in (OPERATION_MODE_COOL, OPERATION_MODE_HEAT):
return None
return self._zone.get_heating_setpoint()
@property
def hvac_action(self) -> str:
"""Operation ie. heat, cool, idle."""
system_status = self._thermostat.get_system_status()
zone_called = self._zone.is_calling()
if self._zone.get_requested_mode() == OPERATION_MODE_OFF:
return CURRENT_HVAC_OFF
if not zone_called:
return CURRENT_HVAC_IDLE
if system_status == SYSTEM_STATUS_COOL:
return CURRENT_HVAC_COOL
if system_status == SYSTEM_STATUS_HEAT:
return CURRENT_HVAC_HEAT
if system_status == SYSTEM_STATUS_IDLE:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_IDLE
@property
def hvac_mode(self):
"""Return current mode, as the user-visible name."""
mode = self._zone.get_requested_mode()
hold = self._zone.is_in_permanent_hold()
# If the device is in hold mode with
# OPERATION_MODE_AUTO
# overriding the schedule by still
# heating and cooling to the
# temp range.
if hold and mode == OPERATION_MODE_AUTO:
return HVAC_MODE_HEAT_COOL
return NEXIA_TO_HA_HVAC_MODE_MAP[mode]
@property
def hvac_modes(self):
"""List of HVAC available modes."""
return [
HVAC_MODE_OFF,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_COOL,
]
def set_temperature(self, **kwargs):
"""Set target temperature."""
new_heat_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
new_cool_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
set_temp = kwargs.get(ATTR_TEMPERATURE)
deadband = self._thermostat.get_deadband()
cur_cool_temp = self._zone.get_cooling_setpoint()
cur_heat_temp = self._zone.get_heating_setpoint()
(min_temp, max_temp) = self._thermostat.get_setpoint_limits()
# Check that we're not going to hit any minimum or maximum values
if new_heat_temp and new_heat_temp + deadband > max_temp:
new_heat_temp = max_temp - deadband
if new_cool_temp and new_cool_temp - deadband < min_temp:
new_cool_temp = min_temp + deadband
# Check that we're within the deadband range, fix it if we're not
if new_heat_temp and new_heat_temp != cur_heat_temp:
if new_cool_temp - new_heat_temp < deadband:
new_cool_temp = new_heat_temp + deadband
if new_cool_temp and new_cool_temp != cur_cool_temp:
if new_cool_temp - new_heat_temp < deadband:
new_heat_temp = new_cool_temp - deadband
self._zone.set_heat_cool_temp(
heat_temperature=new_heat_temp,
cool_temperature=new_cool_temp,
set_temperature=set_temp,
)
self._signal_zone_update()
@property
def is_aux_heat(self):
"""Emergency heat state."""
return self._thermostat.is_emergency_heat_active()
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
data = super().device_state_attributes
data[ATTR_ZONE_STATUS] = self._zone.get_status()
if not self._has_relative_humidity:
return data
min_humidity = percent_conv(self._thermostat.get_humidity_setpoint_limits()[0])
max_humidity = percent_conv(self._thermostat.get_humidity_setpoint_limits()[1])
data.update(
{
ATTR_MIN_HUMIDITY: min_humidity,
ATTR_MAX_HUMIDITY: max_humidity,
ATTR_DEHUMIDIFY_SUPPORTED: self._has_dehumidify_support,
ATTR_HUMIDIFY_SUPPORTED: self._has_humidify_support,
}
)
if self._has_dehumidify_support:
dehumdify_setpoint = percent_conv(
self._thermostat.get_dehumidify_setpoint()
)
data[ATTR_DEHUMIDIFY_SETPOINT] = dehumdify_setpoint
if self._has_humidify_support:
humdify_setpoint = percent_conv(self._thermostat.get_humidify_setpoint())
data[ATTR_HUMIDIFY_SETPOINT] = humdify_setpoint
return data
def set_preset_mode(self, preset_mode: str):
"""Set the preset mode."""
self._zone.set_preset(preset_mode)
self._signal_zone_update()
def turn_aux_heat_off(self):
"""Turn. Aux Heat off."""
self._thermostat.set_emergency_heat(False)
self._signal_thermostat_update()
def turn_aux_heat_on(self):
"""Turn. Aux Heat on."""
self._thermostat.set_emergency_heat(True)
self._signal_thermostat_update()
def turn_off(self):
"""Turn. off the zone."""
self.set_hvac_mode(OPERATION_MODE_OFF)
self._signal_zone_update()
def turn_on(self):
"""Turn. on the zone."""
self.set_hvac_mode(OPERATION_MODE_AUTO)
self._signal_zone_update()
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set the system mode (Auto, Heat_Cool, Cool, Heat, etc)."""
if hvac_mode == HVAC_MODE_AUTO:
self._zone.call_return_to_schedule()
self._zone.set_mode(mode=OPERATION_MODE_AUTO)
else:
self._zone.call_permanent_hold()
self._zone.set_mode(mode=HA_TO_NEXIA_HVAC_MODE_MAP[hvac_mode])
self.schedule_update_ha_state()
def set_aircleaner_mode(self, aircleaner_mode):
"""Set the aircleaner mode."""
self._thermostat.set_air_cleaner(aircleaner_mode)
self._signal_thermostat_update()
def set_humidify_setpoint(self, humidity):
"""Set the humidify setpoint."""
self._thermostat.set_humidify_setpoint(humidity / 100.0)
self._signal_thermostat_update()
def _signal_thermostat_update(self):
"""Signal a thermostat update.
Whenever the underlying library does an action against
a thermostat, the data for the thermostat and all
connected zone is updated.
Update all the zones on the thermostat.
"""
dispatcher_send(
self.hass, f"{SIGNAL_THERMOSTAT_UPDATE}-{self._thermostat.thermostat_id}"
)
def _signal_zone_update(self):
"""Signal a zone update.
Whenever the underlying library does an action against
a zone, the data for the zone is updated.
Update a single zone.
"""
dispatcher_send(self.hass, f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}")
|
import pytest
import os
boto3 = pytest.importorskip("boto3")
import boto3 # NOQA
import botocore # NOQA
import vcr # NOQA
try:
from botocore import awsrequest # NOQA
botocore_awsrequest = True
except ImportError:
botocore_awsrequest = False
# skip tests if boto does not use vendored requests anymore
# https://github.com/boto/botocore/pull/1495
boto3_skip_vendored_requests = pytest.mark.skipif(
botocore_awsrequest,
reason="botocore version {ver} does not use vendored requests anymore.".format(ver=botocore.__version__),
)
boto3_skip_awsrequest = pytest.mark.skipif(
not botocore_awsrequest,
reason="botocore version {ver} still uses vendored requests.".format(ver=botocore.__version__),
)
IAM_USER_NAME = "vcrpy"
@pytest.fixture
def iam_client():
def _iam_client(boto3_session=None):
if boto3_session is None:
boto3_session = boto3.Session(
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID", "default"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY", "default"),
aws_session_token=None,
region_name=os.environ.get("AWS_DEFAULT_REGION", "default"),
)
return boto3_session.client("iam")
return _iam_client
@pytest.fixture
def get_user(iam_client):
def _get_user(client=None, user_name=IAM_USER_NAME):
if client is None:
# Default client set with fixture `iam_client`
client = iam_client()
return client.get_user(UserName=user_name)
return _get_user
@boto3_skip_vendored_requests
def test_boto_vendored_stubs(tmpdir):
with vcr.use_cassette(str(tmpdir.join("boto3-stubs.yml"))):
# Perform the imports within the patched context so that
# HTTPConnection, VerifiedHTTPSConnection refers to the patched version.
from botocore.vendored.requests.packages.urllib3.connectionpool import (
HTTPConnection,
VerifiedHTTPSConnection,
)
from vcr.stubs.boto3_stubs import VCRRequestsHTTPConnection, VCRRequestsHTTPSConnection
# Prove that the class was patched by the stub and that we can instantiate it.
assert issubclass(HTTPConnection, VCRRequestsHTTPConnection)
assert issubclass(VerifiedHTTPSConnection, VCRRequestsHTTPSConnection)
HTTPConnection("hostname.does.not.matter")
VerifiedHTTPSConnection("hostname.does.not.matter")
@pytest.mark.skipif(
os.environ.get("TRAVIS_PULL_REQUEST") != "false",
reason="Encrypted Environment Variables from Travis Repository Settings"
" are disabled on PRs from forks. "
"https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions",
)
def test_boto_medium_difficulty(tmpdir, get_user):
with vcr.use_cassette(str(tmpdir.join("boto3-medium.yml"))):
response = get_user()
assert response["User"]["UserName"] == IAM_USER_NAME
with vcr.use_cassette(str(tmpdir.join("boto3-medium.yml"))) as cass:
response = get_user()
assert response["User"]["UserName"] == IAM_USER_NAME
assert cass.all_played
@pytest.mark.skipif(
os.environ.get("TRAVIS_PULL_REQUEST") != "false",
reason="Encrypted Environment Variables from Travis Repository Settings"
" are disabled on PRs from forks. "
"https://docs.travis-ci.com/user/pull-requests/#pull-requests-and-security-restrictions",
)
def test_boto_hardcore_mode(tmpdir, iam_client, get_user):
with vcr.use_cassette(str(tmpdir.join("boto3-hardcore.yml"))):
ses = boto3.Session(
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"),
region_name=os.environ.get("AWS_DEFAULT_REGION"),
)
client = iam_client(ses)
response = get_user(client=client)
assert response["User"]["UserName"] == IAM_USER_NAME
with vcr.use_cassette(str(tmpdir.join("boto3-hardcore.yml"))) as cass:
ses = boto3.Session(
aws_access_key_id=os.environ.get("AWS_ACCESS_KEY_ID"),
aws_secret_access_key=os.environ.get("AWS_SECRET_ACCESS_KEY"),
aws_session_token=None,
region_name=os.environ.get("AWS_DEFAULT_REGION"),
)
client = iam_client(ses)
response = get_user(client=client)
assert response["User"]["UserName"] == IAM_USER_NAME
assert cass.all_played
|
import logging
from homeassistant.helpers.entity import Entity
from . import DOMAIN
from .const import (
KEY_CONSUMER,
KEY_IDENTIFIER,
KEY_MEASUREMENT,
KEY_PARENT_MAC,
KEY_PARENT_NAME,
KEY_UNIT,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the sensor platform."""
if discovery_info is None:
return None
consumer = hass.data[DOMAIN][KEY_CONSUMER]
sensor_list = []
for entity_info in discovery_info:
peripheral = hass.data[DOMAIN][entity_info[KEY_PARENT_MAC]][
entity_info[KEY_IDENTIFIER]
]
parent_name = entity_info[KEY_PARENT_NAME]
unit = entity_info[KEY_UNIT]
measurement = entity_info[KEY_MEASUREMENT]
sensor_list.append(
VSensor(peripheral, parent_name, unit, measurement, consumer)
)
async_add_entities(sensor_list)
class VSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, peripheral, parent_name, unit, measurement, consumer):
"""Initialize the sensor."""
self._state = None
self._available = True
self._name = f"{parent_name} {measurement}"
self._parent_mac = peripheral.parentMac
self._identifier = peripheral.identifier
self._unit = unit
self._measurement = measurement
self.consumer = consumer
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return f"{self._parent_mac}/{self._identifier}/{self._measurement}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
@property
def available(self):
"""Return if the sensor is available."""
return self._available
async def async_update(self):
"""Fetch new state data for the sensor."""
samples = await self.consumer.fetchPeripheralSample(
None, self._identifier, self._parent_mac
)
if samples is not None:
for sample in samples:
if sample.measurement == self._measurement:
self._available = True
self._state = sample.value
break
else:
_LOGGER.error("Sample unavailable")
self._available = False
self._state = None
|
from __future__ import division
def normalize_float(f):
"""Round float errors"""
if abs(f - round(f)) < .0000000000001:
return round(f)
return f
def rgb_to_hsl(r, g, b):
"""Convert a color in r, g, b to a color in h, s, l"""
r = r or 0
g = g or 0
b = b or 0
r /= 255
g /= 255
b /= 255
max_ = max((r, g, b))
min_ = min((r, g, b))
d = max_ - min_
if not d:
h = 0
elif r is max_:
h = 60 * (g - b) / d
elif g is max_:
h = 60 * (b - r) / d + 120
else:
h = 60 * (r - g) / d + 240
l = .5 * (max_ + min_)
if not d:
s = 0
elif l < 0.5:
s = .5 * d / l
else:
s = .5 * d / (1 - l)
return tuple(map(normalize_float, (h % 360, s * 100, l * 100)))
def hsl_to_rgb(h, s, l):
"""Convert a color in h, s, l to a color in r, g, b"""
h /= 360
s /= 100
l /= 100
m2 = l * (s + 1) if l <= .5 else l + s - l * s
m1 = 2 * l - m2
def h_to_rgb(h):
h = h % 1
if 6 * h < 1:
return m1 + 6 * h * (m2 - m1)
if 2 * h < 1:
return m2
if 3 * h < 2:
return m1 + 6 * (2 / 3 - h) * (m2 - m1)
return m1
r, g, b = map(
lambda x: round(x * 255), map(h_to_rgb, (h + 1 / 3, h, h - 1 / 3))
)
return r, g, b
def parse_color(color):
"""Take any css color definition and give back a tuple containing the
r, g, b, a values along with a type which can be: #rgb, #rgba, #rrggbb,
#rrggbbaa, rgb, rgba
"""
r = g = b = a = type = None
if color.startswith('#'):
color = color[1:]
if len(color) == 3:
type = '#rgb'
color = color + 'f'
if len(color) == 4:
type = type or '#rgba'
color = ''.join([c * 2 for c in color])
if len(color) == 6:
type = type or '#rrggbb'
color = color + 'ff'
assert len(color) == 8
type = type or '#rrggbbaa'
r, g, b, a = [
int(''.join(c), 16) for c in zip(color[::2], color[1::2])
]
a /= 255
elif color.startswith('rgb('):
type = 'rgb'
color = color[4:-1]
r, g, b, a = [int(c) for c in color.split(',')] + [1]
elif color.startswith('rgba('):
type = 'rgba'
color = color[5:-1]
r, g, b, a = [int(c) for c in color.split(',')[:-1]
] + [float(color.split(',')[-1])]
return r, g, b, a, type
def unparse_color(r, g, b, a, type):
"""
Take the r, g, b, a color values and give back
a type css color string. This is the inverse function of parse_color
"""
if type == '#rgb':
# Don't lose precision on rgb shortcut
if r % 17 == 0 and g % 17 == 0 and b % 17 == 0:
return '#%x%x%x' % (int(r / 17), int(g / 17), int(b / 17))
type = '#rrggbb'
if type == '#rgba':
if r % 17 == 0 and g % 17 == 0 and b % 17 == 0:
return '#%x%x%x%x' % (
int(r / 17), int(g / 17), int(b / 17), int(a * 15)
)
type = '#rrggbbaa'
if type == '#rrggbb':
return '#%02x%02x%02x' % (r, g, b)
if type == '#rrggbbaa':
return '#%02x%02x%02x%02x' % (r, g, b, int(a * 255))
if type == 'rgb':
return 'rgb(%d, %d, %d)' % (r, g, b)
if type == 'rgba':
return 'rgba(%d, %d, %d, %g)' % (r, g, b, a)
def is_foreground_light(color):
"""
Determine if the background color need a light or dark foreground color
"""
return rgb_to_hsl(*parse_color(color)[:3])[2] < 17.9
_clamp = lambda x: max(0, min(100, x))
def _adjust(hsl, attribute, percent):
"""Internal adjust function"""
hsl = list(hsl)
if attribute > 0:
hsl[attribute] = _clamp(hsl[attribute] + percent)
else:
hsl[attribute] += percent
return hsl
def adjust(color, attribute, percent):
"""Adjust an attribute of color by a percent"""
r, g, b, a, type = parse_color(color)
r, g, b = hsl_to_rgb(*_adjust(rgb_to_hsl(r, g, b), attribute, percent))
return unparse_color(r, g, b, a, type)
def rotate(color, percent):
"""Rotate a color by changing its hue value by percent"""
return adjust(color, 0, percent)
def saturate(color, percent):
"""Saturate a color by increasing its saturation by percent"""
return adjust(color, 1, percent)
def desaturate(color, percent):
"""Desaturate a color by decreasing its saturation by percent"""
return adjust(color, 1, -percent)
def lighten(color, percent):
"""Lighten a color by increasing its lightness by percent"""
return adjust(color, 2, percent)
def darken(color, percent):
"""Darken a color by decreasing its lightness by percent"""
return adjust(color, 2, -percent)
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service, mock_component
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture
def context_with_user():
"""Track calls to a mock service."""
return Context(user_id="test_user_id")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
async def test_if_fires_on_event(hass, calls):
"""Test the firing of events."""
context = Context()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire("test_event", context=context)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_event_extra_data(hass, calls, context_with_user):
"""Test the firing of events still matches with event data and context."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire(
"test_event", {"extra_key": "extra_data"}, context=context_with_user
)
await hass.async_block_till_done()
assert len(calls) == 1
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_event_with_data_and_context(hass, calls, context_with_user):
"""Test the firing of events with data and context."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "event",
"event_type": "test_event",
"event_data": {
"some_attr": "some_value",
"second_attr": "second_value",
},
"context": {"user_id": context_with_user.user_id},
},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire(
"test_event",
{"some_attr": "some_value", "another": "value", "second_attr": "second_value"},
context=context_with_user,
)
await hass.async_block_till_done()
assert len(calls) == 1
hass.bus.async_fire(
"test_event",
{"some_attr": "some_value", "another": "value"},
context=context_with_user,
)
await hass.async_block_till_done()
assert len(calls) == 1 # No new call
hass.bus.async_fire(
"test_event",
{"some_attr": "some_value", "another": "value", "second_attr": "second_value"},
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_event_with_empty_data_and_context_config(
hass, calls, context_with_user
):
"""Test the firing of events with empty data and context config.
The frontend automation editor can produce configurations with an
empty dict for event_data instead of no key.
"""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "event",
"event_type": "test_event",
"event_data": {},
"context": {},
},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire(
"test_event",
{"some_attr": "some_value", "another": "value"},
context=context_with_user,
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_event_with_nested_data(hass, calls):
"""Test the firing of events with nested data."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "event",
"event_type": "test_event",
"event_data": {"parent_attr": {"some_attr": "some_value"}},
},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire(
"test_event", {"parent_attr": {"some_attr": "some_value", "another": "value"}}
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_if_event_data_not_matches(hass, calls):
"""Test firing of event if no data match."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "event",
"event_type": "test_event",
"event_data": {"some_attr": "some_value"},
},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire("test_event", {"some_attr": "some_other_value"})
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_not_fires_if_event_context_not_matches(
hass, calls, context_with_user
):
"""Test firing of event if no context match."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "event",
"event_type": "test_event",
"context": {"user_id": "some_user"},
},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire("test_event", {}, context=context_with_user)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_multiple_user_ids(hass, calls, context_with_user):
"""Test the firing of event when the trigger has multiple user ids."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "event",
"event_type": "test_event",
"event_data": {},
"context": {"user_id": [context_with_user.user_id, "another id"]},
},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire("test_event", {}, context=context_with_user)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_event_data_with_list(hass, calls):
"""Test the (non)firing of event when the data schema has lists."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "event",
"event_type": "test_event",
"event_data": {"some_attr": [1, 2]},
"context": {},
},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire("test_event", {"some_attr": [1, 2]})
await hass.async_block_till_done()
assert len(calls) == 1
# don't match a single value
hass.bus.async_fire("test_event", {"some_attr": 1})
await hass.async_block_till_done()
assert len(calls) == 1
# don't match a containing list
hass.bus.async_fire("test_event", {"some_attr": [1, 2, 3]})
await hass.async_block_till_done()
assert len(calls) == 1
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl.third_party import unittest3_backport
class TextTestResult(unittest3_backport.TextTestResult):
"""TestResult class that provides the default text result formatting."""
def __init__(self, stream, descriptions, verbosity):
# Disable the verbose per-test output from the superclass, since it would
# conflict with our customized output.
super(TextTestResult, self).__init__(stream, descriptions, 0)
self._per_test_output = verbosity > 0
def _print_status(self, tag, test):
if self._per_test_output:
test_id = test.id()
if test_id.startswith('__main__.'):
test_id = test_id[len('__main__.'):]
print('[%s] %s' % (tag, test_id), file=self.stream)
self.stream.flush()
def startTest(self, test):
super(TextTestResult, self).startTest(test)
self._print_status(' RUN ', test)
def addSuccess(self, test):
super(TextTestResult, self).addSuccess(test)
self._print_status(' OK ', test)
def addError(self, test, err):
super(TextTestResult, self).addError(test, err)
self._print_status(' FAILED ', test)
def addFailure(self, test, err):
super(TextTestResult, self).addFailure(test, err)
self._print_status(' FAILED ', test)
def addSkip(self, test, reason):
super(TextTestResult, self).addSkip(test, reason)
self._print_status(' SKIPPED ', test)
def addExpectedFailure(self, test, err):
super(TextTestResult, self).addExpectedFailure(test, err)
self._print_status(' OK ', test)
def addUnexpectedSuccess(self, test):
super(TextTestResult, self).addUnexpectedSuccess(test)
self._print_status(' FAILED ', test)
class TextTestRunner(unittest.TextTestRunner):
"""A test runner that produces formatted text results."""
_TEST_RESULT_CLASS = TextTestResult
# Set this to true at the class or instance level to run tests using a
# debug-friendly method (e.g, one that doesn't catch exceptions and interacts
# better with debuggers).
# Usually this is set using --pdb_post_mortem.
run_for_debugging = False
def run(self, test):
# type: (TestCase) -> TestResult
if self.run_for_debugging:
return self._run_debug(test)
else:
return super(TextTestRunner, self).run(test)
def _run_debug(self, test):
# type: (TestCase) -> TestResult
test.debug()
# Return an empty result to indicate success.
return self._makeResult()
def _makeResult(self):
return TextTestResult(self.stream, self.descriptions, self.verbosity)
|
from app.wraps.login_wrap import login_required
from app import app
from app.utils import ResponseUtil, RequestUtil, AuthUtil
from app.database.model import History
# get history list
@app.route('/api/history/list', methods=['GET'])
@login_required()
def api_history_list():
# login user
user_id = RequestUtil.get_login_user().get('id', '')
webhook_id = RequestUtil.get_parameter('webhook_id', '')
if not AuthUtil.has_readonly_auth(user_id, webhook_id):
return ResponseUtil.standard_response(0, 'Permission deny!')
page = RequestUtil.get_parameter('page', '1')
try:
page = int(page)
if page < 1:
page = 1
except:
page = 1
page_size = 25
paginations = History.query\
.filter_by(webhook_id=webhook_id)\
.order_by(History.id.desc())\
.paginate(page, page_size, error_out=False)
histories = [history.dict() for history in paginations.items]
data = {
'histories': histories,
'has_prev': paginations.has_prev,
'has_next': paginations.has_next,
'page': paginations.page
}
return ResponseUtil.standard_response(1, data)
|
from ._action import BaseDescriptor
class Attribute(BaseDescriptor):
""" Attributes are (readonly, and usually static) values associated with
Component classes. They expose and document a value without
providing means of observing changes like ``Property`` does. (The
actual value is taken from ``component._xx``, with "xx" the name
of the attribute.)
"""
def __init__(self, doc=''):
# Set doc
if not isinstance(doc, str):
raise TypeError('event.Attribute() doc must be a string.')
self._doc = doc
self._set_name('anonymous_attribute')
def _set_name(self, name):
self._name = name # or func.__name__
self.__doc__ = self._format_doc('attribute', name, self._doc)
def __set__(self, instance, value):
t = 'Cannot set attribute %r.'
raise AttributeError(t % self._name)
def __get__(self, instance, owner):
if instance is None:
return self
return getattr(instance, '_' + self._name)
|
import pytest
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.components.sonos import DOMAIN
from homeassistant.const import CONF_HOSTS
from tests.async_mock import Mock, patch as patch
from tests.common import MockConfigEntry
@pytest.fixture(name="config_entry")
def config_entry_fixture():
"""Create a mock Sonos config entry."""
return MockConfigEntry(domain=DOMAIN, title="Sonos")
@pytest.fixture(name="soco")
def soco_fixture(music_library, speaker_info, dummy_soco_service):
"""Create a mock pysonos SoCo fixture."""
with patch("pysonos.SoCo", autospec=True) as mock, patch(
"socket.gethostbyname", return_value="192.168.42.2"
):
mock_soco = mock.return_value
mock_soco.uid = "RINCON_test"
mock_soco.play_mode = "NORMAL"
mock_soco.music_library = music_library
mock_soco.get_speaker_info.return_value = speaker_info
mock_soco.avTransport = dummy_soco_service
mock_soco.renderingControl = dummy_soco_service
mock_soco.zoneGroupTopology = dummy_soco_service
mock_soco.contentDirectory = dummy_soco_service
yield mock_soco
@pytest.fixture(name="discover", autouse=True)
def discover_fixture(soco):
"""Create a mock pysonos discover fixture."""
def do_callback(callback, **kwargs):
callback(soco)
with patch("pysonos.discover_thread", side_effect=do_callback) as mock:
yield mock
@pytest.fixture(name="config")
def config_fixture():
"""Create hass config fixture."""
return {DOMAIN: {MP_DOMAIN: {CONF_HOSTS: ["192.168.42.1"]}}}
@pytest.fixture(name="dummy_soco_service")
def dummy_soco_service_fixture():
"""Create dummy_soco_service fixture."""
service = Mock()
service.subscribe = Mock()
return service
@pytest.fixture(name="music_library")
def music_library_fixture():
"""Create music_library fixture."""
music_library = Mock()
music_library.get_sonos_favorites.return_value = []
return music_library
@pytest.fixture(name="speaker_info")
def speaker_info_fixture():
"""Create speaker_info fixture."""
return {
"zone_name": "Zone A",
"model_name": "Model Name",
"software_version": "49.2-64250",
"mac_address": "00-11-22-33-44-55",
}
|
from .base_classes import ContainerCommand, Command
from .package import Package
from .utils import NoEscape
class PageStyle(ContainerCommand):
r"""Allows the creation of new page styles."""
_latex_name = "fancypagestyle"
packages = [Package('fancyhdr')]
def __init__(self, name, *, header_thickness=0, footer_thickness=0,
data=None):
r"""
Args
----
name: str
The name of the page style
header_thickness: float
Value to set for the line under the header
footer_thickness: float
Value to set for the line over the footer
data: str or `~.LatexObject`
The data to place inside the PageStyle
"""
self.name = name
super().__init__(data=data, arguments=self.name)
self.change_thickness(element="header", thickness=header_thickness)
self.change_thickness(element="footer", thickness=footer_thickness)
# Clear the current header and footer
self.append(Head())
self.append(Foot())
def change_thickness(self, element, thickness):
r"""Change line thickness.
Changes the thickness of the line under/over the header/footer
to the specified thickness.
Args
----
element: str
the name of the element to change thickness for: header, footer
thickness: float
the thickness to set the line to
"""
if element == "header":
self.data.append(Command("renewcommand",
arguments=[NoEscape(r"\headrulewidth"),
str(thickness) + 'pt']))
elif element == "footer":
self.data.append(Command("renewcommand", arguments=[
NoEscape(r"\footrulewidth"), str(thickness) + 'pt']))
def simple_page_number():
"""Get a string containing commands to display the page number.
Returns
-------
str
The latex string that displays the page number
"""
return NoEscape(r'Page \thepage\ of \pageref{LastPage}')
class Head(ContainerCommand):
r"""Allows the creation of headers."""
_latex_name = "fancyhead"
def __init__(self, position=None, *, data=None):
r"""
Args
----
position: str
the headers position: L, C, R
data: str or `~.LatexObject`
The data to place inside the Head element
"""
self.position = position
super().__init__(data=data, options=position)
class Foot(Head):
r"""Allows the creation of footers."""
_latex_name = "fancyfoot"
|
from sqlalchemy import BigInteger, Boolean, Column, ForeignKey, Integer, String
from lemur.database import db
class ApiKey(db.Model):
__tablename__ = "api_keys"
id = Column(Integer, primary_key=True)
name = Column(String)
user_id = Column(Integer, ForeignKey("users.id"))
ttl = Column(BigInteger)
issued_at = Column(BigInteger)
revoked = Column(Boolean)
def __repr__(self):
return "ApiKey(name={name}, user_id={user_id}, ttl={ttl}, issued_at={iat}, revoked={revoked})".format(
user_id=self.user_id,
name=self.name,
ttl=self.ttl,
iat=self.issued_at,
revoked=self.revoked,
)
|
from datetime import timedelta
import logging
import praw
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_MAXIMUM,
CONF_PASSWORD,
CONF_USERNAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_SORT_BY = "sort_by"
CONF_SUBREDDITS = "subreddits"
ATTR_ID = "id"
ATTR_BODY = "body"
ATTR_COMMENTS_NUMBER = "comms_num"
ATTR_CREATED = "created"
ATTR_POSTS = "posts"
ATTR_SUBREDDIT = "subreddit"
ATTR_SCORE = "score"
ATTR_TITLE = "title"
ATTR_URL = "url"
DEFAULT_NAME = "Reddit"
DOMAIN = "reddit"
LIST_TYPES = ["top", "controversial", "hot", "new"]
SCAN_INTERVAL = timedelta(seconds=300)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_SUBREDDITS): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_SORT_BY, default="hot"): vol.All(
cv.string, vol.In(LIST_TYPES)
),
vol.Optional(CONF_MAXIMUM, default=10): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Reddit sensor platform."""
subreddits = config[CONF_SUBREDDITS]
user_agent = "{}_home_assistant_sensor".format(config[CONF_USERNAME])
limit = config[CONF_MAXIMUM]
sort_by = config[CONF_SORT_BY]
try:
reddit = praw.Reddit(
client_id=config[CONF_CLIENT_ID],
client_secret=config[CONF_CLIENT_SECRET],
username=config[CONF_USERNAME],
password=config[CONF_PASSWORD],
user_agent=user_agent,
)
_LOGGER.debug("Connected to praw")
except praw.exceptions.PRAWException as err:
_LOGGER.error("Reddit error %s", err)
return
sensors = [
RedditSensor(reddit, subreddit, limit, sort_by) for subreddit in subreddits
]
add_entities(sensors, True)
class RedditSensor(Entity):
"""Representation of a Reddit sensor."""
def __init__(self, reddit, subreddit: str, limit: int, sort_by: str):
"""Initialize the Reddit sensor."""
self._reddit = reddit
self._subreddit = subreddit
self._limit = limit
self._sort_by = sort_by
self._subreddit_data = []
@property
def name(self):
"""Return the name of the sensor."""
return f"reddit_{self._subreddit}"
@property
def state(self):
"""Return the state of the sensor."""
return len(self._subreddit_data)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_SUBREDDIT: self._subreddit,
ATTR_POSTS: self._subreddit_data,
CONF_SORT_BY: self._sort_by,
}
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:reddit"
def update(self):
"""Update data from Reddit API."""
self._subreddit_data = []
try:
subreddit = self._reddit.subreddit(self._subreddit)
if hasattr(subreddit, self._sort_by):
method_to_call = getattr(subreddit, self._sort_by)
for submission in method_to_call(limit=self._limit):
self._subreddit_data.append(
{
ATTR_ID: submission.id,
ATTR_URL: submission.url,
ATTR_TITLE: submission.title,
ATTR_SCORE: submission.score,
ATTR_COMMENTS_NUMBER: submission.num_comments,
ATTR_CREATED: submission.created,
ATTR_BODY: submission.selftext,
}
)
except praw.exceptions.PRAWException as err:
_LOGGER.error("Reddit error %s", err)
|
__all__ = ["BetterHtmlFormatter"]
__version__ = "0.1.4"
import enum
import re
import warnings
from pygments.formatters.html import HtmlFormatter
MANY_SPACES = re.compile("( +)")
def _sp_to_nbsp(m):
return " " * (m.end() - m.start())
class BetterLinenos(enum.Enum):
TABLE = "table"
OL = "ol"
class BetterHtmlFormatter(HtmlFormatter):
r"""
Format tokens as HTML 4 ``<span>`` tags, with alternate formatting styles.
* ``linenos = 'table'`` renders each line of code in a separate table row
* ``linenos = 'ol'`` renders each line in a <li> element (inside <ol>)
Both options allow word wrap and don't include line numbers when copying.
"""
name = "HTML"
aliases = ["html"]
filenames = ["*.html", "*.htm"]
def __init__(self, **options):
"""Initialize the formatter."""
super().__init__(**options)
self.linenos_name = self.options.get("linenos", "table")
if self.linenos_name is False:
self.linenos_val = False
self.linenos = 0
elif self.linenos_name is True:
self.linenos_name = "table"
if self.linenos_name is not False:
self.linenos_val = BetterLinenos(self.linenos_name)
self.linenos = 2 if self.linenos_val == BetterLinenos.OL else 1
def get_style_defs(self, arg=None, wrapper_classes=None):
"""Generate CSS style definitions.
Return CSS style definitions for the classes produced by the current
highlighting style. ``arg`` can be a string or list of selectors to
insert before the token type classes. ``wrapper_classes`` are a list of
classes for the wrappers, defaults to the ``cssclass`` option.
"""
base = super().get_style_defs(arg)
new_styles = (
("{0} table, {0} tr, {0} td", "border-spacing: 0; border-collapse: separate; padding: 0"),
("{0} pre", "white-space: pre-wrap; line-height: normal"),
(
"{0}table td.linenos",
"vertical-align: top; padding-left: 10px; padding-right: 10px; user-select: none; -webkit-user-select: none",
),
# Hack for Safari (user-select does not affect copy-paste)
("{0}table td.linenos code:before", "content: attr(data-line-number)"),
("{0}table td.code", "overflow-wrap: normal; border-collapse: collapse"),
(
"{0}table td.code code",
"overflow: unset; border: none; padding: 0; margin: 0; white-space: pre-wrap; line-height: unset; background: none",
),
("{0} .lineno.nonumber", "list-style: none"),
)
new_styles_code = []
if wrapper_classes is None:
wrapper_classes = ["." + self.cssclass]
for cls, rule in new_styles:
new_styles_code.append(", ".join(cls.format(c) for c in wrapper_classes) + " { " + rule + " }")
return base + "\n" + "\n".join(new_styles_code)
def _wrap_tablelinenos(self, inner):
lncount = 0
codelines = []
for t, line in inner:
if t:
lncount += 1
codelines.append(line)
fl = self.linenostart
mw = len(str(lncount + fl - 1))
sp = self.linenospecial
st = self.linenostep
la = self.lineanchors
aln = self.anchorlinenos
nocls = self.noclasses
if sp:
lines = []
for i in range(fl, fl + lncount):
line_before = ""
line_after = ""
if i % st == 0:
if i % sp == 0:
if aln:
line_before = '<a href="#%s-%d" class="special">' % (la, i)
line_after = "</a>"
else:
line_before = '<span class="special">'
line_after = "</span>"
elif aln:
line_before = '<a href="#%s-%d">' % (la, i)
line_after = "</a>"
lines.append((line_before, "%*d" % (mw, i), line_after))
else:
lines.append(("", "", ""))
else:
lines = []
for i in range(fl, fl + lncount):
line_before = ""
line_after = ""
if i % st == 0:
if aln:
line_before = '<a href="#%s-%d">' % (la, i)
line_after = "</a>"
lines.append((line_before, "%*d" % (mw, i), line_after))
else:
lines.append(("", "", ""))
yield 0, '<div class="%s"><table class="%stable">' % (
self.cssclass,
self.cssclass,
)
for lndata, cl in zip(lines, codelines):
ln_b, ln, ln_a = lndata
cl = MANY_SPACES.sub(_sp_to_nbsp, cl)
if nocls:
yield 0, (
'<tr><td class="linenos linenodiv" style="background-color: #f0f0f0; padding-right: 10px">' + ln_b +
'<code data-line-number="' + ln + '"></code>' + ln_a + '</td><td class="code"><code>' + cl + "</code></td></tr>"
)
else:
yield 0, (
'<tr><td class="linenos linenodiv">' + ln_b + '<code data-line-number="' + ln +
'"></code>' + ln_a + '</td><td class="code"><code>' + cl + "</code></td></tr>"
)
yield 0, "</table></div>"
def _wrap_inlinelinenos(self, inner):
# Override with new method
return self._wrap_ollineos(self, inner)
def _wrap_ollinenos(self, inner):
lines = inner
sp = self.linenospecial
st = self.linenostep or 1
num = self.linenostart
if self.anchorlinenos:
warnings.warn("anchorlinenos is not supported for linenos='ol'.")
yield 0, "<ol>"
if self.noclasses:
if sp:
for t, line in lines:
if num % sp == 0:
style = "background-color: #ffffc0; padding: 0 5px 0 5px"
else:
style = "background-color: #f0f0f0; padding: 0 5px 0 5px"
if num % st != 0:
style += "; list-style: none"
yield 1, '<li style="%s" value="%s">' % (style, num,) + line + "</li>"
num += 1
else:
for t, line in lines:
yield 1, (
'<li style="background-color: #f0f0f0; padding: 0 5px 0 5px%s" value="%s">'
% (("; list-style: none" if num % st != 0 else ""), num) + line + "</li>"
)
num += 1
elif sp:
for t, line in lines:
yield 1, '<li class="lineno%s%s" value="%s">' % (
" special" if num % sp == 0 else "",
" nonumber" if num % st != 0 else "",
num,
) + line + "</li>"
num += 1
else:
for t, line in lines:
yield 1, '<li class="lineno%s" value="%s">' % (
"" if num % st != 0 else " nonumber",
num,
) + line + "</li>"
num += 1
yield 0, "</ol>"
def format_unencoded(self, tokensource, outfile):
"""Format code and write to outfile.
The formatting process uses several nested generators; which of
them are used is determined by the user's options.
Each generator should take at least one argument, ``inner``,
and wrap the pieces of text generated by this.
Always yield 2-tuples: (code, text). If "code" is 1, the text
is part of the original tokensource being highlighted, if it's
0, the text is some piece of wrapping. This makes it possible to
use several different wrappers that process the original source
linewise, e.g. line number generators.
"""
if self.linenos_val is False:
return super().format_unencoded(tokensource, outfile)
source = self._format_lines(tokensource)
if self.hl_lines:
source = self._highlight_lines(source)
if not self.nowrap:
if self.linenos_val == BetterLinenos.OL:
source = self._wrap_ollinenos(source)
if self.lineanchors:
source = self._wrap_lineanchors(source)
if self.linespans:
source = self._wrap_linespans(source)
if self.linenos_val == BetterLinenos.TABLE:
source = self._wrap_tablelinenos(source)
if self.linenos_val == BetterLinenos.OL:
source = self.wrap(source, outfile)
if self.full:
source = self._wrap_full(source, outfile)
for t, piece in source:
outfile.write(piece)
|
import attr
from PyQt5.QtCore import QUrl, QPoint
import pytest
tabhistory = pytest.importorskip('qutebrowser.browser.webkit.tabhistory')
from qutebrowser.misc.sessions import TabHistoryItem as Item
from qutebrowser.utils import qtutils
pytestmark = pytest.mark.qt_log_ignore('QIODevice::read.*: device not open')
ITEMS = [
Item(QUrl('https://www.heise.de/'), 'heise'),
Item(QUrl('about:blank'), 'blank', active=True),
Item(QUrl('http://example.com/%E2%80%A6'), 'percent'),
Item(QUrl('http://example.com/?foo=bar'), 'arg',
original_url=QUrl('http://original.url.example.com/'),
user_data={'foo': 23, 'bar': 42}),
# From https://github.com/OtterBrowser/otter-browser/issues/709#issuecomment-74749471
Item(QUrl('http://github.com/OtterBrowser/24/134/2344/otter-browser/'
'issues/709/'),
'Page not found | github',
user_data={'zoom': 149, 'scroll-pos': QPoint(0, 0)}),
Item(QUrl('https://mail.google.com/mail/u/0/#label/some+label/'
'234lkjsd0932lkjf884jqwerdf4'),
'"some label" - [email protected] - Gmail"',
user_data={'zoom': 120, 'scroll-pos': QPoint(0, 0)}),
]
@attr.s
class Objects:
history = attr.ib()
user_data = attr.ib()
@pytest.fixture
def empty_history(webpage):
"""Fixture providing an empty QWebHistory."""
hist = webpage.history()
assert hist.count() == 0
return hist
@pytest.fixture
def objects(empty_history):
"""Fixture providing a history (and userdata) filled with example data."""
stream, _data, user_data = tabhistory.serialize(ITEMS)
qtutils.deserialize_stream(stream, empty_history)
return Objects(history=empty_history, user_data=user_data)
def test_count(objects):
"""Check if the history's count was loaded correctly."""
assert objects.history.count() == len(ITEMS)
@pytest.mark.parametrize('i', range(len(ITEMS)))
def test_valid(objects, i):
"""Check if all items are valid."""
assert objects.history.itemAt(i).isValid()
@pytest.mark.parametrize('i', range(len(ITEMS)))
def test_no_userdata(objects, i):
"""Check if all items have no user data."""
assert objects.history.itemAt(i).userData() is None
def test_userdata(objects):
"""Check if all user data has been restored to user_data."""
userdata_items = [item.user_data for item in ITEMS]
assert userdata_items == objects.user_data
def test_currentitem(objects):
"""Check if the current item index was loaded correctly."""
assert objects.history.currentItemIndex() == 1
@pytest.mark.parametrize('i, item', enumerate(ITEMS))
def test_urls(objects, i, item):
"""Check if the URLs were loaded correctly."""
assert objects.history.itemAt(i).url() == item.url
@pytest.mark.parametrize('i, item', enumerate(ITEMS))
def test_original_urls(objects, i, item):
"""Check if the original URLs were loaded correctly."""
assert objects.history.itemAt(i).originalUrl() == item.original_url
@pytest.mark.parametrize('i, item', enumerate(ITEMS))
def test_titles(objects, i, item):
"""Check if the titles were loaded correctly."""
assert objects.history.itemAt(i).title() == item.title
def test_no_active_item():
"""Check tabhistory.serialize with no active item."""
items = [Item(QUrl(), '')]
with pytest.raises(ValueError):
tabhistory.serialize(items)
def test_two_active_items():
"""Check tabhistory.serialize with two active items."""
items = [Item(QUrl(), '', active=True),
Item(QUrl(), ''),
Item(QUrl(), '', active=True)]
with pytest.raises(ValueError):
tabhistory.serialize(items)
def test_empty(empty_history):
"""Check tabhistory.serialize with no items."""
items = []
stream, _data, user_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, empty_history)
assert empty_history.count() == 0
assert empty_history.currentItemIndex() == 0
assert not user_data
|
from django.db.models import Q
from pyparsing import CaselessLiteral
from pyparsing import Combine
from pyparsing import OneOrMore
from pyparsing import Optional
from pyparsing import ParseResults
from pyparsing import StringEnd
from pyparsing import Word
from pyparsing import WordEnd
from pyparsing import alphas
from pyparsing import opAssoc
from pyparsing import operatorPrecedence
from pyparsing import printables
from pyparsing import quotedString
from pyparsing import removeQuotes
from zinnia.models.author import Author
from zinnia.models.entry import Entry
from zinnia.settings import SEARCH_FIELDS
from zinnia.settings import STOP_WORDS
def create_q(token):
"""
Creates the Q() object.
"""
meta = getattr(token, 'meta', None)
query = getattr(token, 'query', '')
wildcards = None
if isinstance(query, str): # Unicode -> Quoted string
search = query
else: # List -> No quoted string (possible wildcards)
if len(query) == 1:
search = query[0]
elif len(query) == 3:
wildcards = 'BOTH'
search = query[1]
elif len(query) == 2:
if query[0] == '*':
wildcards = 'START'
search = query[1]
else:
wildcards = 'END'
search = query[0]
# Ignore short term and stop words
if (len(search) < 3 and not search.isdigit()) or search in STOP_WORDS:
return Q()
if not meta:
q = Q()
for field in SEARCH_FIELDS:
q |= Q(**{'%s__icontains' % field: search})
return q
if meta == 'category':
if wildcards == 'BOTH':
return (Q(categories__title__icontains=search) |
Q(categories__slug__icontains=search))
elif wildcards == 'START':
return (Q(categories__title__iendswith=search) |
Q(categories__slug__iendswith=search))
elif wildcards == 'END':
return (Q(categories__title__istartswith=search) |
Q(categories__slug__istartswith=search))
else:
return (Q(categories__title__iexact=search) |
Q(categories__slug__iexact=search))
elif meta == 'author':
if wildcards == 'BOTH':
return Q(**{'authors__%s__icontains' % Author.USERNAME_FIELD:
search})
elif wildcards == 'START':
return Q(**{'authors__%s__iendswith' % Author.USERNAME_FIELD:
search})
elif wildcards == 'END':
return Q(**{'authors__%s__istartswith' % Author.USERNAME_FIELD:
search})
else:
return Q(**{'authors__%s__iexact' % Author.USERNAME_FIELD:
search})
elif meta == 'tag': # TODO: tags ignore wildcards
return Q(tags__icontains=search)
def union_q(token):
"""
Appends all the Q() objects.
"""
query = Q()
operation = 'and'
negation = False
for t in token:
if type(t) is ParseResults: # See tokens recursively
query &= union_q(t)
else:
if t in ('or', 'and'): # Set the new op and go to next token
operation = t
elif t == '-': # Next tokens needs to be negated
negation = True
else: # Append to query the token
if negation:
t = ~t
if operation == 'or':
query |= t
else:
query &= t
return query
NO_BRTS = printables.replace('(', '').replace(')', '')
SINGLE = Word(NO_BRTS.replace('*', ''))
WILDCARDS = Optional('*') + SINGLE + Optional('*') + WordEnd(wordChars=NO_BRTS)
QUOTED = quotedString.setParseAction(removeQuotes)
OPER_AND = CaselessLiteral('and')
OPER_OR = CaselessLiteral('or')
OPER_NOT = '-'
TERM = Combine(Optional(Word(alphas).setResultsName('meta') + ':') +
(QUOTED.setResultsName('query') |
WILDCARDS.setResultsName('query')))
TERM.setParseAction(create_q)
EXPRESSION = operatorPrecedence(TERM, [
(OPER_NOT, 1, opAssoc.RIGHT),
(OPER_OR, 2, opAssoc.LEFT),
(Optional(OPER_AND, default='and'), 2, opAssoc.LEFT)])
EXPRESSION.setParseAction(union_q)
QUERY = OneOrMore(EXPRESSION) + StringEnd()
QUERY.setParseAction(union_q)
def advanced_search(pattern):
"""
Parse the grammar of a pattern and build a queryset with it.
"""
query_parsed = QUERY.parseString(pattern)
return Entry.published.filter(query_parsed[0]).distinct()
|
from copy import copy
from django.contrib.messages import ERROR
from django.test import SimpleTestCase
from django.urls import reverse
from weblate.trans.forms import SimpleUploadForm
from weblate.trans.models import ComponentList
from weblate.trans.tests.test_views import ViewTestCase
from weblate.trans.tests.utils import get_test_file
TEST_PO = get_test_file("cs.po")
TEST_CSV = get_test_file("cs.csv")
TEST_CSV_QUOTES = get_test_file("cs-quotes.csv")
TEST_CSV_QUOTES_ESCAPED = get_test_file("cs-quotes-escaped.csv")
TEST_PO_BOM = get_test_file("cs-bom.po")
TEST_FUZZY_PO = get_test_file("cs-fuzzy.po")
TEST_BADPLURALS = get_test_file("cs-badplurals.po")
TEST_POT = get_test_file("hello.pot")
TEST_POT_CHARSET = get_test_file("hello-charset.pot")
TEST_MO = get_test_file("cs.mo")
TEST_XLIFF = get_test_file("cs.poxliff")
TEST_ANDROID = get_test_file("strings-cs.xml")
TEST_XLSX = get_test_file("cs.xlsx")
TRANSLATION_OURS = "Nazdar světe!\n"
TRANSLATION_PO = "Ahoj světe!\n"
class ImportBaseTest(ViewTestCase):
"""Base test of file imports."""
test_file = TEST_PO
def setUp(self):
super().setUp()
# We need extra privileges for overwriting
self.user.is_superuser = True
self.user.save()
def do_import(self, test_file=None, follow=False, **kwargs):
"""Helper to perform file import."""
if test_file is None:
test_file = self.test_file
with open(test_file, "rb") as handle:
params = {
"file": handle,
"method": "translate",
"author_name": self.user.full_name,
"author_email": self.user.email,
}
params.update(kwargs)
return self.client.post(
reverse("upload_translation", kwargs=self.kw_translation),
params,
follow=follow,
)
class ImportTest(ImportBaseTest):
"""Testing of file imports."""
test_file = TEST_PO
def test_import_normal(self):
"""Test importing normally."""
response = self.do_import()
self.assertRedirects(response, self.translation_url)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 1)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, 4)
# Verify unit
unit = self.get_unit()
self.assertEqual(unit.target, TRANSLATION_PO)
def test_import_author(self):
"""Test importing normally."""
response = self.do_import(
author_name="Testing User", author_email="[email protected]"
)
self.assertRedirects(response, self.translation_url)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 1)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, 4)
# Verify unit
unit = self.get_unit()
self.assertEqual(unit.target, TRANSLATION_PO)
def test_import_overwrite(self):
"""Test importing with overwriting."""
# Translate one unit
self.change_unit(TRANSLATION_OURS)
response = self.do_import(conflicts="replace-translated")
self.assertRedirects(response, self.translation_url)
# Verify unit
unit = self.get_unit()
self.assertEqual(unit.target, TRANSLATION_PO)
def test_import_no_overwrite(self):
"""Test importing without overwriting."""
# Translate one unit
self.change_unit(TRANSLATION_OURS)
response = self.do_import()
self.assertRedirects(response, self.translation_url)
# Verify unit
unit = self.get_unit()
self.assertEqual(unit.target, TRANSLATION_OURS)
def test_import_fuzzy(self):
"""Test importing as fuzzy."""
response = self.do_import(method="fuzzy")
self.assertRedirects(response, self.translation_url)
# Verify unit
unit = self.get_unit()
self.assertEqual(unit.target, TRANSLATION_PO)
self.assertEqual(unit.fuzzy, True)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 0)
self.assertEqual(translation.stats.fuzzy, 1)
self.assertEqual(translation.stats.all, 4)
def test_import_suggest(self):
"""Test importing as suggestion."""
response = self.do_import(method="suggest")
self.assertRedirects(response, self.translation_url)
# Verify unit
unit = self.get_unit()
self.assertEqual(unit.translated, False)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 0)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, 4)
self.assertEqual(translation.stats.suggestions, 1)
def test_import_xliff(self):
response = self.do_import(test_file=TEST_XLIFF, follow=True)
self.assertContains(response, "updated: 1")
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 1)
class ImportErrorTest(ImportBaseTest):
"""Testing import of broken files."""
def test_mismatched_plurals(self):
"""Test importing a file with different number of plural forms.
In response to issue #900
"""
response = self.do_import(test_file=TEST_BADPLURALS, follow=True)
self.assertRedirects(response, self.translation_url)
messages = list(response.context["messages"])
self.assertEqual(len(messages), 1)
self.assertEqual(messages[0].level, ERROR)
self.assertIn(
"Plural forms in the uploaded file do not match", messages[0].message
)
class BOMImportTest(ImportTest):
test_file = TEST_PO_BOM
class XliffImportTest(ImportTest):
test_file = TEST_XLIFF
class ImportFuzzyTest(ImportBaseTest):
"""Testing of fuzzy file imports."""
test_file = TEST_FUZZY_PO
def test_import_normal(self):
"""Test importing normally."""
response = self.do_import(fuzzy="")
self.assertRedirects(response, self.translation_url)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 0)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, 4)
def test_import_process(self):
"""Test importing including fuzzy strings."""
response = self.do_import(fuzzy="process")
self.assertRedirects(response, self.translation_url)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 0)
self.assertEqual(translation.stats.fuzzy, 1)
self.assertEqual(translation.stats.all, 4)
def test_import_approve(self):
"""Test importing ignoring fuzzy flag."""
response = self.do_import(fuzzy="approve")
self.assertRedirects(response, self.translation_url)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 1)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, 4)
def test_import_review(self):
"""Test importing as approved."""
self.project.translation_review = True
self.project.save()
response = self.do_import(method="approve", fuzzy="approve")
self.assertRedirects(response, self.translation_url)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.approved, 1)
self.assertEqual(translation.stats.translated, 1)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, 4)
class ImportMoTest(ImportTest):
"""Testing of mo file imports."""
test_file = TEST_MO
class ImportMoPoTest(ImportTest):
"""Testing of mo file imports."""
test_file = TEST_MO
def create_component(self):
return self.create_po()
class ImportJoomlaTest(ImportTest):
def create_component(self):
return self.create_joomla()
class ImportJSONTest(ImportTest):
def create_component(self):
return self.create_json()
class ImportJSONMonoTest(ImportTest):
def create_component(self):
return self.create_json_mono()
class ImportPHPMonoTest(ImportTest):
def create_component(self):
return self.create_php_mono()
class StringsImportTest(ImportTest):
def create_component(self):
return self.create_iphone()
class AndroidImportTest(ViewTestCase):
def create_component(self):
return self.create_android()
def test_import(self):
with open(TEST_ANDROID, "rb") as handle:
self.client.post(
reverse("upload_translation", kwargs=self.kw_translation),
{
"file": handle,
"method": "translate",
"author_name": self.user.full_name,
"author_email": self.user.email,
},
)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 2)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, 4)
def test_replace(self):
self.user.is_superuser = True
self.user.save()
kwargs = copy(self.kw_translation)
kwargs["lang"] = "en"
with open(TEST_ANDROID, "rb") as handle:
self.client.post(
reverse("upload_translation", kwargs=kwargs),
{
"file": handle,
"method": "replace",
"author_name": self.user.full_name,
"author_email": self.user.email,
},
)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 0)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, 2)
class CSVImportTest(ViewTestCase):
test_file = TEST_CSV
def test_import(self):
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 0)
self.assertEqual(translation.stats.fuzzy, 0)
with open(self.test_file, "rb") as handle:
self.client.post(
reverse("upload_translation", kwargs=self.kw_translation),
{
"file": handle,
"method": "translate",
"author_name": self.user.full_name,
"author_email": self.user.email,
},
)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 1)
self.assertEqual(translation.stats.fuzzy, 0)
class CSVQuotesImportTest(CSVImportTest):
test_file = TEST_CSV_QUOTES
class CSVQuotesEscapedImportTest(CSVImportTest):
test_file = TEST_CSV_QUOTES_ESCAPED
class XlsxImportTest(CSVImportTest):
test_file = TEST_XLSX
class ExportTest(ViewTestCase):
"""Testing of file export."""
source = "Hello, world!\n"
target = "Nazdar svete!\n"
test_match_1 = "Weblate Hello World 2016"
test_match_2 = "Nazdar svete!"
test_header = "attachment; filename=test-test-cs.po"
test_source = "Orangutan has %d banana"
test_source_plural = "Orangutan has %d bananas"
def create_component(self):
# Needs to create PO file to have language pack option
return self.create_po()
def setUp(self):
super().setUp()
# Add some content so that .mo files is non empty
self.edit_unit(self.source, self.target)
def assert_response_contains(self, response, *matches):
"""Replacement of assertContains to work on streamed responses."""
self.assertEqual(
response.status_code,
200,
"Couldn't retrieve content: Response code was %d" % response.status_code,
)
if response.streaming:
content = b"".join(response.streaming_content)
else:
content = response.content
for match in matches:
self.assertIn(
match.encode() if isinstance(match, str) else match,
content,
f"Couldn't find {match!r} in response",
)
def test_export(self):
response = self.client.get(
reverse("download_translation", kwargs=self.kw_translation)
)
self.assert_response_contains(response, self.test_match_1, self.test_match_2)
self.assertEqual(response["Content-Disposition"], self.test_header)
def export_format(self, fmt, **extra):
extra["format"] = fmt
return self.client.get(
reverse("download_translation", kwargs=self.kw_translation), extra
)
def test_export_po(self):
response = self.export_format("po")
self.assert_response_contains(
response,
self.test_source,
self.test_source_plural,
"/projects/test/test/cs/",
)
def test_export_po_todo(self):
response = self.export_format("po", q="state:<translated")
self.assert_response_contains(
response,
self.test_source,
self.test_source_plural,
"/projects/test/test/cs/",
)
def test_export_tmx(self):
response = self.export_format("tmx")
self.assert_response_contains(response, self.test_source)
def test_export_xliff(self):
response = self.export_format("xliff")
self.assert_response_contains(
response, self.test_source, self.test_source_plural
)
def test_export_xliff11(self):
response = self.export_format("xliff11")
self.assert_response_contains(
response, "urn:oasis:names:tc:xliff:document:1.1", self.test_source
)
def test_export_xlsx(self):
response = self.export_format("xlsx")
self.assertEqual(
response["Content-Disposition"], "attachment; filename=test-test-cs.xlsx"
)
self.assertEqual(
response["Content-Type"],
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
"; charset=utf-8",
)
def test_export_xlsx_empty(self):
response = self.export_format("xlsx", q="check:inconsistent")
self.assertEqual(
response["Content-Disposition"], "attachment; filename=test-test-cs.xlsx"
)
self.assertEqual(
response["Content-Type"],
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet"
"; charset=utf-8",
)
def test_export_invalid(self):
response = self.export_format("invalid")
self.assertEqual(response.status_code, 302)
class ExportMultifileTest(ExportTest):
source = "Weblate - continuous localization"
target = "Weblate - průběžná lokalizace"
test_match_1 = b"PK\001\002"
test_match_2 = b"PK\005\006"
test_header = "attachment; filename=test-test-cs.zip"
test_source = "https://www.youtube.com/watch?v=IVlXt6QdgdA"
test_source_plural = "https://www.youtube.com/watch?v=IVlXt6QdgdA"
def create_component(self):
return self.create_appstore()
class FormTest(SimpleTestCase):
def test_remove(self):
form = SimpleUploadForm()
form.remove_translation_choice("suggest")
self.assertEqual(
[x[0] for x in form.fields["method"].choices],
["translate", "approve", "fuzzy", "replace", "source"],
)
class ImportReplaceTest(ImportBaseTest):
"""Testing of file imports."""
test_file = TEST_BADPLURALS
def test_import(self):
"""Test importing normally."""
response = self.do_import(method="replace")
self.assertRedirects(response, self.translation_url)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 2)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, 2)
# Verify unit
unit = self.get_unit()
self.assertEqual(unit.target, TRANSLATION_PO)
class ImportSourceTest(ImportBaseTest):
"""Testing of source strings update imports."""
test_file = TEST_POT_CHARSET
expected = "Processed 3 strings from the uploaded files"
expected_count = 3
def setUp(self):
super().setUp()
self.kw_translation["lang"] = "en"
self.translation_url = reverse("translation", kwargs=self.kw_translation)
def test_import(self):
"""Test importing normally."""
response = self.do_import(method="source", follow=True)
self.assertRedirects(response, self.translation_url)
messages = list(response.context["messages"])
self.assertIn(self.expected, messages[0].message)
# Verify stats
translation = self.get_translation()
self.assertEqual(translation.stats.translated, 0)
self.assertEqual(translation.stats.fuzzy, 0)
self.assertEqual(translation.stats.all, self.expected_count)
# Verify unit
unit = self.get_unit()
self.assertEqual(unit.target, "")
class ImportSourceBrokenTest(ImportSourceTest):
test_file = TEST_POT
expected = 'Charset "CHARSET" is not a portable encoding name.'
expected_count = 4
class DownloadMultiTest(ViewTestCase):
def test_component(self):
response = self.client.get(
reverse("download_component", kwargs=self.kw_component)
)
self.assert_zip(response)
def test_project(self):
response = self.client.get(reverse("download_project", kwargs=self.kw_project))
self.assert_zip(response)
def test_project_lang(self):
response = self.client.get(
reverse(
"download_lang_project",
kwargs={"lang": "cs", "project": self.project.slug},
)
)
self.assert_zip(response)
def test_component_list(self):
clist = ComponentList.objects.create(name="TestCL", slug="testcl")
clist.components.add(self.component)
response = self.client.get(
reverse("download_component_list", kwargs={"name": "testcl"})
)
self.assert_zip(response)
|
from homeassistant import config_entries, setup
from homeassistant.components.NEW_DOMAIN.const import (
DOMAIN,
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
from homeassistant.helpers import config_entry_oauth2_flow
from tests.async_mock import patch
CLIENT_ID = "1234"
CLIENT_SECRET = "5678"
async def test_full_flow(hass, aiohttp_client, aioclient_mock, current_request):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
"NEW_DOMAIN",
{
"NEW_DOMAIN": {"client_id": CLIENT_ID, "client_secret": CLIENT_SECRET},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
"NEW_DOMAIN", context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
assert result["url"] == (
f"{OAUTH2_AUTHORIZE}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.NEW_DOMAIN.async_setup_entry", return_value=True
) as mock_setup:
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
|
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TITLE,
DOMAIN,
SERVICE_NOTIFY,
)
from homeassistant.loader import bind_hass
@bind_hass
def send_message(hass, message, title=None, data=None):
"""Send a notification message."""
info = {ATTR_MESSAGE: message}
if title is not None:
info[ATTR_TITLE] = title
if data is not None:
info[ATTR_DATA] = data
hass.services.call(DOMAIN, SERVICE_NOTIFY, info)
|
from distutils.version import LooseVersion
import numpy as np
from numpy.testing import assert_allclose, assert_array_equal
from scipy import linalg
import pytest
from mne.utils import _sym_mat_pow, _reg_pinv, requires_version
@requires_version('numpy', '1.17') # pinv bugs
@pytest.mark.parametrize('dtype', (np.float64, np.complex128)) # real, complex
@pytest.mark.parametrize('ndim', (2, 3, 4))
@pytest.mark.parametrize('n', (3, 4))
@pytest.mark.parametrize('psdef', (True, False))
@pytest.mark.parametrize('deficient, reduce_rank', [
(False, False),
(True, False), # should auto-remove the reduced component
(True, True), # force removal of one component (though redundant here)
])
@pytest.mark.parametrize('func', [
_sym_mat_pow,
_reg_pinv,
])
def test_pos_semidef_inv(ndim, dtype, n, deficient, reduce_rank, psdef, func):
"""Test positive semidefinite matrix inverses."""
if LooseVersion(np.__version__) >= LooseVersion('1.19'):
svd = np.linalg.svd
else:
from mne.fixes import svd
# make n-dimensional matrix
n_extra = 2 # how many we add along the other dims
rng = np.random.RandomState(73)
shape = (n_extra,) * (ndim - 2) + (n, n)
mat = rng.randn(*shape) + 1j * rng.randn(*shape)
proj = np.eye(n)
if deficient:
vec = np.ones(n) / np.sqrt(n)
proj -= np.outer(vec, vec)
with pytest.warns(None): # intentionally discard imag
mat = mat.astype(dtype)
# now make it conjugate symmetric or positive semi-definite
if psdef:
mat = np.matmul(mat, mat.swapaxes(-2, -1).conj())
else:
mat += mat.swapaxes(-2, -1).conj()
assert_allclose(mat, mat.swapaxes(-2, -1).conj(), atol=1e-6)
s = svd(mat, hermitian=True)[1]
assert (s >= 0).all()
# make it rank deficient (maybe)
if deficient:
mat = np.matmul(np.matmul(proj, mat), proj)
# if the dtype is complex, the conjugate transpose != transpose
kwargs = dict(atol=1e-10, rtol=1e-10)
orig_eq_t = np.allclose(
mat, mat.swapaxes(-2, -1), **kwargs)
t_eq_ct = np.allclose(
mat.swapaxes(-2, -1), mat.conj().swapaxes(-2, -1), **kwargs)
if np.iscomplexobj(mat):
assert not orig_eq_t
assert not t_eq_ct
else:
assert t_eq_ct
assert orig_eq_t
assert mat.shape == shape
# ensure pos-semidef
s = np.linalg.svd(mat, compute_uv=False)
assert s.shape == shape[:-1]
rank = (s > s[..., :1] * 1e-12).sum(-1)
want_rank = n - deficient
assert_array_equal(rank, want_rank)
# assert equiv with NumPy
mat_pinv = np.linalg.pinv(mat)
if func is _sym_mat_pow:
if not psdef:
with pytest.raises(ValueError, match='not positive semi-'):
func(mat, -1)
return
mat_symv = func(mat, -1, reduce_rank=reduce_rank)
mat_sqrt = func(mat, 0.5)
if ndim == 2:
mat_sqrt_scipy = linalg.sqrtm(mat)
assert_allclose(mat_sqrt, mat_sqrt_scipy, atol=1e-6)
mat_2 = np.matmul(mat_sqrt, mat_sqrt)
assert_allclose(mat, mat_2, atol=1e-6)
mat_symv_2 = func(mat, -0.5, reduce_rank=reduce_rank)
mat_symv_2 = np.matmul(mat_symv_2, mat_symv_2)
assert_allclose(mat_symv_2, mat_symv, atol=1e-6)
else:
assert func is _reg_pinv
mat_symv, _, _ = func(mat, rank=None)
assert_allclose(mat_pinv, mat_symv, **kwargs)
want = np.dot(proj, np.eye(n))
if deficient:
want -= want.mean(axis=0)
for _ in range(ndim - 2):
want = np.repeat(want[np.newaxis], n_extra, axis=0)
assert_allclose(np.matmul(mat_symv, mat), want, **kwargs)
assert_allclose(np.matmul(mat, mat_symv), want, **kwargs)
|
from tesla_powerwall import MissingAttributeError, PowerwallUnreachableError
from homeassistant import config_entries, setup
from homeassistant.components.powerwall.const import DOMAIN
from homeassistant.const import CONF_IP_ADDRESS
from .mocks import _mock_powerwall_side_effect, _mock_powerwall_site_name
from tests.async_mock import patch
async def test_form_source_user(hass):
"""Test we get config flow setup form as a user."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
mock_powerwall = await _mock_powerwall_site_name(hass, "My site")
with patch(
"homeassistant.components.powerwall.config_flow.Powerwall",
return_value=mock_powerwall,
), patch(
"homeassistant.components.powerwall.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.powerwall.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_IP_ADDRESS: "1.2.3.4"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "My site"
assert result2["data"] == {CONF_IP_ADDRESS: "1.2.3.4"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_source_import(hass):
"""Test we setup the config entry via import."""
await setup.async_setup_component(hass, "persistent_notification", {})
mock_powerwall = await _mock_powerwall_site_name(hass, "Imported site")
with patch(
"homeassistant.components.powerwall.config_flow.Powerwall",
return_value=mock_powerwall,
), patch(
"homeassistant.components.powerwall.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.powerwall.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_IP_ADDRESS: "1.2.3.4"},
)
await hass.async_block_till_done()
assert result["type"] == "create_entry"
assert result["title"] == "Imported site"
assert result["data"] == {CONF_IP_ADDRESS: "1.2.3.4"}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_powerwall = _mock_powerwall_side_effect(site_info=PowerwallUnreachableError)
with patch(
"homeassistant.components.powerwall.config_flow.Powerwall",
return_value=mock_powerwall,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_IP_ADDRESS: "1.2.3.4"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_wrong_version(hass):
"""Test we can handle wrong version error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
mock_powerwall = _mock_powerwall_side_effect(
site_info=MissingAttributeError({}, "")
)
with patch(
"homeassistant.components.powerwall.config_flow.Powerwall",
return_value=mock_powerwall,
):
result3 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_IP_ADDRESS: "1.2.3.4"},
)
assert result3["type"] == "form"
assert result3["errors"] == {"base": "wrong_version"}
|
import asyncio
import aiohttp
import homeassistant.components.rest.switch as rest
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.const import (
CONF_HEADERS,
CONF_NAME,
CONF_PLATFORM,
CONF_RESOURCE,
CONTENT_TYPE_JSON,
HTTP_INTERNAL_SERVER_ERROR,
HTTP_NOT_FOUND,
HTTP_OK,
)
from homeassistant.helpers.template import Template
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component
"""Tests for setting up the REST switch platform."""
NAME = "foo"
METHOD = "post"
RESOURCE = "http://localhost/"
STATE_RESOURCE = RESOURCE
HEADERS = {"Content-type": CONTENT_TYPE_JSON}
AUTH = None
async def test_setup_missing_config(hass):
"""Test setup with configuration missing required entries."""
assert not await rest.async_setup_platform(hass, {CONF_PLATFORM: rest.DOMAIN}, None)
async def test_setup_missing_schema(hass):
"""Test setup with resource missing schema."""
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: rest.DOMAIN, CONF_RESOURCE: "localhost"},
None,
)
async def test_setup_failed_connect(hass, aioclient_mock):
"""Test setup when connection error occurs."""
aioclient_mock.get("http://localhost", exc=aiohttp.ClientError)
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: rest.DOMAIN, CONF_RESOURCE: "http://localhost"},
None,
)
async def test_setup_timeout(hass, aioclient_mock):
"""Test setup when connection timeout occurs."""
aioclient_mock.get("http://localhost", exc=asyncio.TimeoutError())
assert not await rest.async_setup_platform(
hass,
{CONF_PLATFORM: rest.DOMAIN, CONF_RESOURCE: "http://localhost"},
None,
)
async def test_setup_minimum(hass, aioclient_mock):
"""Test setup with minimum configuration."""
aioclient_mock.get("http://localhost", status=HTTP_OK)
with assert_setup_component(1, SWITCH_DOMAIN):
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_RESOURCE: "http://localhost",
}
},
)
assert aioclient_mock.call_count == 1
async def test_setup(hass, aioclient_mock):
"""Test setup with valid configuration."""
aioclient_mock.get("http://localhost", status=HTTP_OK)
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_NAME: "foo",
CONF_RESOURCE: "http://localhost",
CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON},
rest.CONF_BODY_ON: "custom on text",
rest.CONF_BODY_OFF: "custom off text",
}
},
)
assert aioclient_mock.call_count == 1
assert_setup_component(1, SWITCH_DOMAIN)
async def test_setup_with_state_resource(hass, aioclient_mock):
"""Test setup with valid configuration."""
aioclient_mock.get("http://localhost", status=HTTP_NOT_FOUND)
aioclient_mock.get("http://localhost/state", status=HTTP_OK)
assert await async_setup_component(
hass,
SWITCH_DOMAIN,
{
SWITCH_DOMAIN: {
CONF_PLATFORM: rest.DOMAIN,
CONF_NAME: "foo",
CONF_RESOURCE: "http://localhost",
rest.CONF_STATE_RESOURCE: "http://localhost/state",
CONF_HEADERS: {"Content-type": CONTENT_TYPE_JSON},
rest.CONF_BODY_ON: "custom on text",
rest.CONF_BODY_OFF: "custom off text",
}
},
)
assert aioclient_mock.call_count == 1
assert_setup_component(1, SWITCH_DOMAIN)
"""Tests for REST switch platform."""
def _setup_test_switch(hass):
body_on = Template("on", hass)
body_off = Template("off", hass)
switch = rest.RestSwitch(
NAME,
RESOURCE,
STATE_RESOURCE,
METHOD,
HEADERS,
AUTH,
body_on,
body_off,
None,
10,
True,
)
switch.hass = hass
return switch, body_on, body_off
def test_name(hass):
"""Test the name."""
switch, body_on, body_off = _setup_test_switch(hass)
assert NAME == switch.name
def test_is_on_before_update(hass):
"""Test is_on in initial state."""
switch, body_on, body_off = _setup_test_switch(hass)
assert switch.is_on is None
async def test_turn_on_success(hass, aioclient_mock):
"""Test turn_on."""
aioclient_mock.post(RESOURCE, status=HTTP_OK)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert body_on.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on
async def test_turn_on_status_not_ok(hass, aioclient_mock):
"""Test turn_on when error status returned."""
aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert body_on.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on is None
async def test_turn_on_timeout(hass, aioclient_mock):
"""Test turn_on when timeout occurs."""
aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert switch.is_on is None
async def test_turn_off_success(hass, aioclient_mock):
"""Test turn_off."""
aioclient_mock.post(RESOURCE, status=HTTP_OK)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_off()
assert body_off.template == aioclient_mock.mock_calls[-1][2].decode()
assert not switch.is_on
async def test_turn_off_status_not_ok(hass, aioclient_mock):
"""Test turn_off when error status returned."""
aioclient_mock.post(RESOURCE, status=HTTP_INTERNAL_SERVER_ERROR)
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_off()
assert body_off.template == aioclient_mock.mock_calls[-1][2].decode()
assert switch.is_on is None
async def test_turn_off_timeout(hass, aioclient_mock):
"""Test turn_off when timeout occurs."""
aioclient_mock.post(RESOURCE, exc=asyncio.TimeoutError())
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_turn_on()
assert switch.is_on is None
async def test_update_when_on(hass, aioclient_mock):
"""Test update when switch is on."""
switch, body_on, body_off = _setup_test_switch(hass)
aioclient_mock.get(RESOURCE, text=body_on.template)
await switch.async_update()
assert switch.is_on
async def test_update_when_off(hass, aioclient_mock):
"""Test update when switch is off."""
switch, body_on, body_off = _setup_test_switch(hass)
aioclient_mock.get(RESOURCE, text=body_off.template)
await switch.async_update()
assert not switch.is_on
async def test_update_when_unknown(hass, aioclient_mock):
"""Test update when unknown status returned."""
aioclient_mock.get(RESOURCE, text="unknown status")
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_update()
assert switch.is_on is None
async def test_update_timeout(hass, aioclient_mock):
"""Test update when timeout occurs."""
aioclient_mock.get(RESOURCE, exc=asyncio.TimeoutError())
switch, body_on, body_off = _setup_test_switch(hass)
await switch.async_update()
assert switch.is_on is None
|
import os
from celery import Celery
from celery.signals import task_failure
from django.conf import settings
# set the default Django settings module for the 'celery' program.
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "weblate.settings")
app = Celery("weblate")
# Using a string here means the worker doesn't have to serialize
# the configuration object to child processes.
# - namespace='CELERY' means all celery-related configuration keys
# should have a `CELERY_` prefix.
app.config_from_object("django.conf:settings", namespace="CELERY")
# Load task modules from all registered Django app configs.
app.autodiscover_tasks()
@task_failure.connect
def handle_task_failure(exception=None, **kwargs):
from weblate.utils.errors import report_error
report_error(
extra_data=kwargs,
cause="Failure while executing task",
skip_sentry=True,
print_tb=True,
level="error",
)
@app.on_after_configure.connect
def configure_error_handling(sender, **kargs):
"""Rollbar and Sentry integration.
Based on
https://www.mattlayman.com/blog/2017/django-celery-rollbar/
"""
if not bool(os.environ.get("CELERY_WORKER_RUNNING", False)):
return
from weblate.utils.errors import init_error_collection
init_error_collection(celery=True)
def get_queue_length(queue="celery"):
with app.connection_or_acquire() as conn:
return conn.default_channel.queue_declare(
queue=queue, durable=True, auto_delete=False
).message_count
def get_queue_list():
"""List queues in Celery."""
result = {"celery"}
for route in settings.CELERY_TASK_ROUTES.values():
if "queue" in route:
result.add(route["queue"])
return result
def get_queue_stats():
"""Calculate queue stats."""
return {queue: get_queue_length(queue) for queue in get_queue_list()}
def is_task_ready(task):
"""Workaround broken ready() for failed Celery results.
In case the task ends with an exception, the result tries to reconstruct
that. It can fail in case the exception can not be reconstructed using
data in args attribute.
See https://github.com/celery/celery/issues/5057
"""
try:
return task.ready()
except TypeError:
return True
def get_task_progress(task):
"""Return progress of a Celery task."""
# Completed task
if is_task_ready(task):
return 100
# In progress
result = task.result
if task.state == "PROGRESS" and result is not None:
return result["progress"]
# Not yet started
return 0
|
from time import time
from flexx import flx
class Test(flx.Widget):
def init(self):
self.t = time()
with flx.HFix():
self.label1 = flx.Label(flex=2, style='overflow-y:scroll; font-size:60%;')
flx.Widget(flex=1)
with flx.VFix(flex=2):
flx.Widget(flex=1)
test_widget1 = flx.Widget(flex=2, style='background: #afa;')
flx.Widget(flex=1)
test_widget2 = flx.Widget(flex=2, style='background: #faa;')
flx.Widget(flex=1)
flx.Widget(flex=1)
self.label2 = flx.Label(flex=1, style='overflow-y:scroll; font-size:60%;')
for name in ['pointerdown', 'pointermove', 'pointerup', 'pointercancel',
'mousedown', 'mousemove', 'mouseup', 'click', 'dblclick',
'touchstart', 'touchmove', 'touchend', 'touchcancel'
]:
test_widget1.node.addEventListener(name,
lambda e: self.show_event1(e.type))
def reaction(*events):
for ev in events:
self.show_event2(ev.type)
test_widget2.reaction(reaction,
'pointer_down', 'pointer_move', 'pointer_up',
'pointer_cancel',
'pointer_click', 'pointer_double_click',
)
@flx.action
def show_event1(self, name):
dt = time() - self.t
lines = self.label1.html.split('<br>')
lines = lines[:200]
lines.insert(0, f'{dt:.1f} {name}')
self.label1.set_html('<br>'.join(lines))
@flx.action
def show_event2(self, name):
dt = time() - self.t
lines = self.label2.html.split('<br>')
lines = lines[:200]
lines.insert(0, f'{dt:.1f} {name}')
self.label2.set_html('<br>'.join(lines))
a = flx.App(Test)
m = a.launch()
flx.run()
|
from scattertext.Common import PAIR_PLOT_HTML_VIZ_FILE_NAME, PAIR_PLOT_WITHOUT_HALO_HTML_VIZ_FILE_NAME
from scattertext.categoryprojector.CategoryProjection import CategoryProjection, CategoryProjectionBase
from scattertext.viz.BasicHTMLFromScatterplotStructure import D3URLs, ExternalJSUtilts, PackedDataUtils
from scattertext.viz.HTMLSemioticSquareViz import ClickableTerms
class PairPlotFromScatterplotStructure(object):
def __init__(self,
category_scatterplot_structure,
term_scatterplot_structure,
category_projection,
category_width,
category_height,
include_category_labels=True,
show_halo=True,
num_terms=5,
d3_url_struct=None,
x_dim=0,
y_dim=1,
protocol='http',
term_plot_interface='termPlotInterface',
category_plot_interface='categoryPlotInterface'):
''',
Parameters
----------
category_scatterplot_structure: ScatterplotStructure
term_scatterplot_structure: ScatterplotStructure,
category_projection: CategoryProjection
category_height: int
category_width: int
show_halo: bool
num_terms: int, default 5
include_category_labels: bool, default True
d3_url_struct: D3URLs
x_dim: int, 0
y_dim: int, 1
protocol: str
http or https
term_plot_interface : str
category_plot_interface : str
'''
self.category_scatterplot_structure = category_scatterplot_structure
self.term_scatterplot_structure = term_scatterplot_structure
self.category_projection = category_projection
self.d3_url_struct = d3_url_struct if d3_url_struct else D3URLs()
ExternalJSUtilts.ensure_valid_protocol(protocol)
self.protocol = protocol
self.category_width = category_width
self.category_height = category_height
self.num_terms = num_terms
self.show_halo = show_halo
self.x_dim = x_dim
self.y_dim = y_dim
self.include_category_labels = include_category_labels
self.term_plot_interface = term_plot_interface
self.category_plot_interface = category_plot_interface
def to_html(self):
'''
Returns
-------
str, the html file representation
'''
javascript_to_insert = '\n'.join([
PackedDataUtils.full_content_of_javascript_files(),
self.category_scatterplot_structure._visualization_data.to_javascript('getCategoryDataAndInfo'),
self.category_scatterplot_structure.get_js_to_call_build_scatterplot_with_a_function(
self.category_plot_interface
),
self.term_scatterplot_structure._visualization_data.to_javascript('getTermDataAndInfo'),
self.term_scatterplot_structure.get_js_to_call_build_scatterplot_with_a_function(
self.term_plot_interface
),
self.term_scatterplot_structure.get_js_reset_function(
values_to_set = [self.category_plot_interface, self.term_plot_interface],
functions_to_reset = ['build'+self.category_plot_interface, 'build'+self.term_plot_interface]
),
PackedDataUtils.javascript_post_build_viz('categorySearch', self.category_plot_interface),
PackedDataUtils.javascript_post_build_viz('termSearch', self.term_plot_interface),
])
autocomplete_css = PackedDataUtils.full_content_of_default_autocomplete_css()
html_template = self._get_html_template()
html_content = (
html_template
.replace('/***AUTOCOMPLETE CSS***/', autocomplete_css, 1)
.replace('<!-- INSERT SCRIPT -->', javascript_to_insert, 1)
.replace('<!--D3URL-->', self.d3_url_struct.get_d3_url(), 1)
.replace('<!--D3SCALECHROMATIC-->', self.d3_url_struct.get_d3_scale_chromatic_url())
# .replace('<!-- INSERT D3 -->', self._get_packaged_file_content('d3.min.js'), 1)
)
html_content = (html_content.replace('http://', self.protocol + '://'))
if self.show_halo:
axes_labels = self.category_projection.get_nearest_terms(
num_terms=self.num_terms
)
for position, terms in axes_labels.items():
html_content = html_content.replace('{%s}' % position, self._get_lexicon_html(terms))
return html_content.replace('{width}', str(self.category_width)).replace('{height}', str(self.category_height))
def _get_html_template(self):
if self.show_halo:
return PackedDataUtils.get_packaged_html_template_content(PAIR_PLOT_HTML_VIZ_FILE_NAME)
return PackedDataUtils.get_packaged_html_template_content(PAIR_PLOT_WITHOUT_HALO_HTML_VIZ_FILE_NAME)
def _get_lexicon_html(self, terms):
lexicon_html = ''
for i, term in enumerate(terms):
lexicon_html += '<b>' + ClickableTerms.get_clickable_term(term, self.term_plot_interface) + '</b>'
if self.include_category_labels:
category = self.category_projection.category_counts.loc[term].idxmax()
lexicon_html += (
' (<i>%s</i>)' %
ClickableTerms.get_clickable_term(category, self.category_plot_interface,
self.term_plot_interface))
if i != len(terms) - 1:
lexicon_html += ',\n'
return lexicon_html
|
from pyownet.protocol import Error as ProtocolError
import pytest
from homeassistant.components.onewire.const import (
DEFAULT_OWSERVER_PORT,
DOMAIN,
PRESSURE_CBAR,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
ELECTRICAL_CURRENT_AMPERE,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_MBAR,
TEMP_CELSIUS,
VOLT,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import mock_device_registry, mock_registry
MOCK_CONFIG = {
SENSOR_DOMAIN: {
"platform": DOMAIN,
"host": "localhost",
"port": DEFAULT_OWSERVER_PORT,
"names": {
"10.111111111111": "My DS18B20",
},
}
}
MOCK_DEVICE_SENSORS = {
"00.111111111111": {
"inject_reads": [
b"", # read device type
],
"sensors": [],
},
"10.111111111111": {
"inject_reads": [
b"DS18S20", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "10.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS18S20",
"name": "10.111111111111",
},
"sensors": [
{
"entity_id": "sensor.my_ds18b20_temperature",
"unique_id": "/10.111111111111/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"12.111111111111": {
"inject_reads": [
b"DS2406", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "12.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS2406",
"name": "12.111111111111",
},
"sensors": [
{
"entity_id": "sensor.12_111111111111_temperature",
"unique_id": "/12.111111111111/TAI8570/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
{
"entity_id": "sensor.12_111111111111_pressure",
"unique_id": "/12.111111111111/TAI8570/pressure",
"injected_value": b" 1025.123",
"result": "1025.1",
"unit": PRESSURE_MBAR,
"class": DEVICE_CLASS_PRESSURE,
},
],
},
"1D.111111111111": {
"inject_reads": [
b"DS2423", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "1D.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS2423",
"name": "1D.111111111111",
},
"sensors": [
{
"entity_id": "sensor.1d_111111111111_counter_a",
"unique_id": "/1D.111111111111/counter.A",
"injected_value": b" 251123",
"result": "251123",
"unit": "count",
"class": None,
},
{
"entity_id": "sensor.1d_111111111111_counter_b",
"unique_id": "/1D.111111111111/counter.B",
"injected_value": b" 248125",
"result": "248125",
"unit": "count",
"class": None,
},
],
},
"22.111111111111": {
"inject_reads": [
b"DS1822", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "22.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS1822",
"name": "22.111111111111",
},
"sensors": [
{
"entity_id": "sensor.22_111111111111_temperature",
"unique_id": "/22.111111111111/temperature",
"injected_value": ProtocolError,
"result": "unknown",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"26.111111111111": {
"inject_reads": [
b"DS2438", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "26.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS2438",
"name": "26.111111111111",
},
"sensors": [
{
"entity_id": "sensor.26_111111111111_temperature",
"unique_id": "/26.111111111111/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
{
"entity_id": "sensor.26_111111111111_humidity",
"unique_id": "/26.111111111111/humidity",
"injected_value": b" 72.7563",
"result": "72.8",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.26_111111111111_humidity_hih3600",
"unique_id": "/26.111111111111/HIH3600/humidity",
"injected_value": b" 73.7563",
"result": "73.8",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.26_111111111111_humidity_hih4000",
"unique_id": "/26.111111111111/HIH4000/humidity",
"injected_value": b" 74.7563",
"result": "74.8",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.26_111111111111_humidity_hih5030",
"unique_id": "/26.111111111111/HIH5030/humidity",
"injected_value": b" 75.7563",
"result": "75.8",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.26_111111111111_humidity_htm1735",
"unique_id": "/26.111111111111/HTM1735/humidity",
"injected_value": ProtocolError,
"result": "unknown",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.26_111111111111_pressure",
"unique_id": "/26.111111111111/B1-R1-A/pressure",
"injected_value": b" 969.265",
"result": "969.3",
"unit": PRESSURE_MBAR,
"class": DEVICE_CLASS_PRESSURE,
},
{
"entity_id": "sensor.26_111111111111_illuminance",
"unique_id": "/26.111111111111/S3-R1-A/illuminance",
"injected_value": b" 65.8839",
"result": "65.9",
"unit": LIGHT_LUX,
"class": DEVICE_CLASS_ILLUMINANCE,
},
{
"entity_id": "sensor.26_111111111111_voltage_vad",
"unique_id": "/26.111111111111/VAD",
"injected_value": b" 2.97",
"result": "3.0",
"unit": VOLT,
"class": DEVICE_CLASS_VOLTAGE,
},
{
"entity_id": "sensor.26_111111111111_voltage_vdd",
"unique_id": "/26.111111111111/VDD",
"injected_value": b" 4.74",
"result": "4.7",
"unit": VOLT,
"class": DEVICE_CLASS_VOLTAGE,
},
{
"entity_id": "sensor.26_111111111111_current",
"unique_id": "/26.111111111111/IAD",
"injected_value": b" 1",
"result": "1.0",
"unit": ELECTRICAL_CURRENT_AMPERE,
"class": DEVICE_CLASS_CURRENT,
},
],
},
"28.111111111111": {
"inject_reads": [
b"DS18B20", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "28.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS18B20",
"name": "28.111111111111",
},
"sensors": [
{
"entity_id": "sensor.28_111111111111_temperature",
"unique_id": "/28.111111111111/temperature",
"injected_value": b" 26.984",
"result": "27.0",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"3B.111111111111": {
"inject_reads": [
b"DS1825", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "3B.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS1825",
"name": "3B.111111111111",
},
"sensors": [
{
"entity_id": "sensor.3b_111111111111_temperature",
"unique_id": "/3B.111111111111/temperature",
"injected_value": b" 28.243",
"result": "28.2",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"42.111111111111": {
"inject_reads": [
b"DS28EA00", # read device type
],
"device_info": {
"identifiers": {(DOMAIN, "42.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "DS28EA00",
"name": "42.111111111111",
},
"sensors": [
{
"entity_id": "sensor.42_111111111111_temperature",
"unique_id": "/42.111111111111/temperature",
"injected_value": b" 29.123",
"result": "29.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"EF.111111111111": {
"inject_reads": [
b"HobbyBoards_EF", # read type
],
"device_info": {
"identifiers": {(DOMAIN, "EF.111111111111")},
"manufacturer": "Maxim Integrated",
"model": "HobbyBoards_EF",
"name": "EF.111111111111",
},
"sensors": [
{
"entity_id": "sensor.ef_111111111111_humidity",
"unique_id": "/EF.111111111111/humidity/humidity_corrected",
"injected_value": b" 67.745",
"result": "67.7",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.ef_111111111111_humidity_raw",
"unique_id": "/EF.111111111111/humidity/humidity_raw",
"injected_value": b" 65.541",
"result": "65.5",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.ef_111111111111_temperature",
"unique_id": "/EF.111111111111/humidity/temperature",
"injected_value": b" 25.123",
"result": "25.1",
"unit": TEMP_CELSIUS,
"class": DEVICE_CLASS_TEMPERATURE,
},
],
},
"EF.111111111112": {
"inject_reads": [
b"HB_MOISTURE_METER", # read type
b" 1", # read is_leaf_0
b" 1", # read is_leaf_1
b" 0", # read is_leaf_2
b" 0", # read is_leaf_3
],
"device_info": {
"identifiers": {(DOMAIN, "EF.111111111112")},
"manufacturer": "Maxim Integrated",
"model": "HB_MOISTURE_METER",
"name": "EF.111111111112",
},
"sensors": [
{
"entity_id": "sensor.ef_111111111112_wetness_0",
"unique_id": "/EF.111111111112/moisture/sensor.0",
"injected_value": b" 41.745",
"result": "41.7",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.ef_111111111112_wetness_1",
"unique_id": "/EF.111111111112/moisture/sensor.1",
"injected_value": b" 42.541",
"result": "42.5",
"unit": PERCENTAGE,
"class": DEVICE_CLASS_HUMIDITY,
},
{
"entity_id": "sensor.ef_111111111112_moisture_2",
"unique_id": "/EF.111111111112/moisture/sensor.2",
"injected_value": b" 43.123",
"result": "43.1",
"unit": PRESSURE_CBAR,
"class": DEVICE_CLASS_PRESSURE,
},
{
"entity_id": "sensor.ef_111111111112_moisture_3",
"unique_id": "/EF.111111111112/moisture/sensor.3",
"injected_value": b" 44.123",
"result": "44.1",
"unit": PRESSURE_CBAR,
"class": DEVICE_CLASS_PRESSURE,
},
],
},
}
@pytest.mark.parametrize("device_id", MOCK_DEVICE_SENSORS.keys())
async def test_owserver_setup_valid_device(hass, device_id):
"""Test for 1-Wire device."""
entity_registry = mock_registry(hass)
device_registry = mock_device_registry(hass)
mock_device_sensor = MOCK_DEVICE_SENSORS[device_id]
dir_return_value = [f"/{device_id}/"]
read_side_effect = [device_id[0:2].encode()]
if "inject_reads" in mock_device_sensor:
read_side_effect += mock_device_sensor["inject_reads"]
expected_sensors = mock_device_sensor["sensors"]
for expected_sensor in expected_sensors:
read_side_effect.append(expected_sensor["injected_value"])
# Ensure enough read side effect
read_side_effect.extend([ProtocolError("Missing injected value")] * 10)
with patch("homeassistant.components.onewire.onewirehub.protocol.proxy") as owproxy:
owproxy.return_value.dir.return_value = dir_return_value
owproxy.return_value.read.side_effect = read_side_effect
assert await async_setup_component(hass, SENSOR_DOMAIN, MOCK_CONFIG)
await hass.async_block_till_done()
assert len(entity_registry.entities) == len(expected_sensors)
if len(expected_sensors) > 0:
device_info = mock_device_sensor["device_info"]
assert len(device_registry.devices) == 1
registry_entry = device_registry.async_get_device({(DOMAIN, device_id)}, set())
assert registry_entry is not None
assert registry_entry.identifiers == {(DOMAIN, device_id)}
assert registry_entry.manufacturer == device_info["manufacturer"]
assert registry_entry.name == device_info["name"]
assert registry_entry.model == device_info["model"]
for expected_sensor in expected_sensors:
entity_id = expected_sensor["entity_id"]
registry_entry = entity_registry.entities.get(entity_id)
assert registry_entry is not None
assert registry_entry.unique_id == expected_sensor["unique_id"]
assert registry_entry.unit_of_measurement == expected_sensor["unit"]
assert registry_entry.device_class == expected_sensor["class"]
state = hass.states.get(entity_id)
assert state.state == expected_sensor["result"]
|
import argparse
import chainer
import mxnet as mx
from chainercv.experimental.links import FCISResNet101
def main():
parser = argparse.ArgumentParser(
description='Script to convert mxnet params to chainer npz')
parser.add_argument(
'mxnet_param_file', metavar='mxnet-param-file',
help='Mxnet param file i.e. fcis_coco-0000.params')
parser.add_argument('--process', action='store_true')
parser.add_argument(
'--dataset', choices=('sbd', 'coco'), type=str, default='sbd')
parser.add_argument(
'--out', '-o', type=str, default=None)
args = parser.parse_args()
if args.dataset == 'sbd':
model = FCISResNet101(
n_fg_class=20,
pretrained_model=None)
elif args.dataset == 'coco':
model = FCISResNet101(
n_fg_class=80,
pretrained_model=None,
anchor_scales=[4, 8, 16, 32],
proposal_creator_params={
'nms_thresh': 0.7,
'n_train_pre_nms': 6000,
'n_train_post_nms': 300,
'n_test_pre_nms': 6000,
'n_test_post_nms': 300,
'force_cpu_nms': False,
'min_size': 2})
params = mx.nd.load(args.mxnet_param_file)
print('mxnet param is loaded: {}'.format(args.mxnet_param_file))
print('start conversion')
if args.process:
tests = [k for k in params.keys() if k.endswith('_test')]
for test in tests:
params[test.replace('_test', '')] = params.pop(test)
model = convert(model, params)
print('finish conversion')
if args.out is None:
out = 'fcis_resnet101_{}_converted.npz'.format(args.dataset)
print('saving to {}'.format(out))
chainer.serializers.save_npz(out, model)
def convert(model, params):
finished_keys = []
for key, value in params.items():
value = value.asnumpy()
param_type, param_name = key.split(':')
if param_type == 'arg':
if param_name.endswith('_test'):
continue
elif param_name.startswith('rpn'):
if param_name == 'rpn_bbox_pred_bias':
value = value.reshape((-1, 4))
value = value[:, [1, 0, 3, 2]]
value = value.reshape(-1)
assert model.rpn.loc.b.shape == value.shape
model.rpn.loc.b.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_bbox_pred_weight':
value = value.reshape((-1, 4, 512, 1, 1))
value = value[:, [1, 0, 3, 2]]
value = value.reshape((-1, 512, 1, 1))
assert model.rpn.loc.W.shape == value.shape
model.rpn.loc.W.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_cls_score_bias':
value = value.reshape((2, -1))
value = value.transpose((1, 0))
value = value.reshape(-1)
assert model.rpn.score.b.shape == value.shape
model.rpn.score.b.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_cls_score_weight':
value = value.reshape((2, -1, 512, 1, 1))
value = value.transpose((1, 0, 2, 3, 4))
value = value.reshape((-1, 512, 1, 1))
assert model.rpn.score.W.shape == value.shape
model.rpn.score.W.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_conv_3x3_bias':
assert model.rpn.conv1.b.shape == value.shape
model.rpn.conv1.b.array[:] = value
finished_keys.append(key)
elif param_name == 'rpn_conv_3x3_weight':
assert model.rpn.conv1.W.shape == value.shape
model.rpn.conv1.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('conv1'):
if param_name == 'conv1_weight':
assert model.extractor.conv1.conv.W.shape \
== value.shape
model.extractor.conv1.conv.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('bn_conv1'):
if param_name == 'bn_conv1_beta':
assert model.extractor.conv1.bn.beta.shape \
== value.shape
model.extractor.conv1.bn.beta.array[:] = value
finished_keys.append(key)
elif param_name == 'bn_conv1_gamma':
assert model.extractor.conv1.bn.gamma.shape \
== value.shape
model.extractor.conv1.bn.gamma.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('fcis'):
if param_name == 'fcis_bbox_bias':
value = value.reshape((2, 4, 7 * 7))
value = value[:, [1, 0, 3, 2]]
value = value.reshape(392)
assert model.head.ag_loc.b.shape == value.shape
model.head.ag_loc.b.array[:] = value
finished_keys.append(key)
elif param_name == 'fcis_bbox_weight':
value = value.reshape((2, 4, 7 * 7, 1024, 1, 1))
value = value[:, [1, 0, 3, 2]]
value = value.reshape((392, 1024, 1, 1))
assert model.head.ag_loc.W.shape == value.shape
model.head.ag_loc.W.array[:] = value
finished_keys.append(key)
elif param_name == 'fcis_cls_seg_bias':
assert model.head.cls_seg.b.shape == value.shape
model.head.cls_seg.b.array[:] = value
finished_keys.append(key)
elif param_name == 'fcis_cls_seg_weight':
assert model.head.cls_seg.W.shape == value.shape
model.head.cls_seg.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('conv_new_1'):
if param_name == 'conv_new_1_bias':
assert model.head.conv1.b.shape == value.shape
model.head.conv1.b.array[:] = value
finished_keys.append(key)
elif param_name == 'conv_new_1_weight':
assert model.head.conv1.W.shape == value.shape
model.head.conv1.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('res'):
block_name, branch_name, prm_name = param_name.split('_')
resblock_name = block_name[:4]
resblock = getattr(model.extractor, resblock_name)
if block_name[4:] == 'a':
blck_name = block_name[4:]
elif block_name[4:] == 'b':
blck_name = 'b1'
elif block_name[4:].startswith('b'):
blck_name = block_name[4:]
elif block_name[4:] == 'c':
blck_name = 'b2'
block = getattr(resblock, blck_name)
if branch_name == 'branch1':
conv_bn_name = 'residual_conv'
elif branch_name == 'branch2a':
conv_bn_name = 'conv1'
elif branch_name == 'branch2b':
conv_bn_name = 'conv2'
elif branch_name == 'branch2c':
conv_bn_name = 'conv3'
conv_bn = getattr(block, conv_bn_name)
if prm_name == 'weight':
assert conv_bn.conv.W.shape == value.shape
conv_bn.conv.W.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('bn'):
block_name, branch_name, prm_name = param_name.split('_')
resblock_name = 'res{}'.format(block_name[2])
resblock = getattr(model.extractor, resblock_name)
if block_name[3:] == 'a':
blck_name = block_name[3:]
elif block_name[3:] == 'b':
blck_name = 'b1'
elif block_name[3:].startswith('b'):
blck_name = block_name[3:]
elif block_name[3:] == 'c':
blck_name = 'b2'
block = getattr(resblock, blck_name)
if branch_name == 'branch1':
conv_bn_name = 'residual_conv'
elif branch_name == 'branch2a':
conv_bn_name = 'conv1'
elif branch_name == 'branch2b':
conv_bn_name = 'conv2'
elif branch_name == 'branch2c':
conv_bn_name = 'conv3'
conv_bn = getattr(block, conv_bn_name)
if prm_name == 'beta':
assert conv_bn.bn.beta.shape == value.shape
conv_bn.bn.beta.array[:] = value
finished_keys.append(key)
elif prm_name == 'gamma':
assert conv_bn.bn.gamma.shape == value.shape
conv_bn.bn.gamma.array[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
else:
print('param: {} is not converted'.format(key))
elif param_type == 'aux':
if param_name.endswith('_test'):
continue
elif param_name.startswith('bn_conv1'):
if param_name == 'bn_conv1_moving_mean':
assert model.extractor.conv1.bn.avg_mean.shape \
== value.shape
model.extractor.conv1.bn.avg_mean[:] = value
finished_keys.append(key)
elif param_name == 'bn_conv1_moving_var':
assert model.extractor.conv1.bn.avg_var.shape \
== value.shape
model.extractor.conv1.bn.avg_var[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
elif param_name.startswith('bn'):
block_name, branch_name, _, prm_name = \
param_name.split('_')
resblock_name = 'res{}'.format(block_name[2])
resblock = getattr(model.extractor, resblock_name)
if block_name[3:] == 'a':
blck_name = block_name[3:]
elif block_name[3:] == 'b':
blck_name = 'b1'
elif block_name[3:].startswith('b'):
blck_name = block_name[3:]
elif block_name[3:] == 'c':
blck_name = 'b2'
block = getattr(resblock, blck_name)
if branch_name == 'branch1':
conv_bn_name = 'residual_conv'
elif branch_name == 'branch2a':
conv_bn_name = 'conv1'
elif branch_name == 'branch2b':
conv_bn_name = 'conv2'
elif branch_name == 'branch2c':
conv_bn_name = 'conv3'
conv_bn = getattr(block, conv_bn_name)
if prm_name == 'mean':
assert conv_bn.bn.avg_mean.shape == value.shape
conv_bn.bn.avg_mean[:] = value
finished_keys.append(key)
elif prm_name == 'var':
assert conv_bn.bn.avg_var.shape == value.shape
conv_bn.bn.avg_var[:] = value
finished_keys.append(key)
else:
print('param: {} is not converted'.format(key))
else:
print('param: {} is not converted'.format(key))
else:
print('param: {} is not converted'.format(key))
return model
if __name__ == '__main__':
main()
|
from unittest import mock
import pytest
from xarray.backends.lru_cache import LRUCache
def test_simple():
cache = LRUCache(maxsize=2)
cache["x"] = 1
cache["y"] = 2
assert cache["x"] == 1
assert cache["y"] == 2
assert len(cache) == 2
assert dict(cache) == {"x": 1, "y": 2}
assert list(cache.keys()) == ["x", "y"]
assert list(cache.items()) == [("x", 1), ("y", 2)]
cache["z"] = 3
assert len(cache) == 2
assert list(cache.items()) == [("y", 2), ("z", 3)]
def test_trivial():
cache = LRUCache(maxsize=0)
cache["x"] = 1
assert len(cache) == 0
def test_invalid():
with pytest.raises(TypeError):
LRUCache(maxsize=None)
with pytest.raises(ValueError):
LRUCache(maxsize=-1)
def test_update_priority():
cache = LRUCache(maxsize=2)
cache["x"] = 1
cache["y"] = 2
assert list(cache) == ["x", "y"]
assert "x" in cache # contains
assert list(cache) == ["y", "x"]
assert cache["y"] == 2 # getitem
assert list(cache) == ["x", "y"]
cache["x"] = 3 # setitem
assert list(cache.items()) == [("y", 2), ("x", 3)]
def test_del():
cache = LRUCache(maxsize=2)
cache["x"] = 1
cache["y"] = 2
del cache["x"]
assert dict(cache) == {"y": 2}
def test_on_evict():
on_evict = mock.Mock()
cache = LRUCache(maxsize=1, on_evict=on_evict)
cache["x"] = 1
cache["y"] = 2
on_evict.assert_called_once_with("x", 1)
def test_on_evict_trivial():
on_evict = mock.Mock()
cache = LRUCache(maxsize=0, on_evict=on_evict)
cache["x"] = 1
on_evict.assert_called_once_with("x", 1)
def test_resize():
cache = LRUCache(maxsize=2)
assert cache.maxsize == 2
cache["w"] = 0
cache["x"] = 1
cache["y"] = 2
assert list(cache.items()) == [("x", 1), ("y", 2)]
cache.maxsize = 10
cache["z"] = 3
assert list(cache.items()) == [("x", 1), ("y", 2), ("z", 3)]
cache.maxsize = 1
assert list(cache.items()) == [("z", 3)]
with pytest.raises(ValueError):
cache.maxsize = -1
|
import unittest
from dedupe import predicates
from future.builtins import str
class TestPuncStrip(unittest.TestCase):
def test_sevenchar(self):
s1 = predicates.StringPredicate(predicates.sameSevenCharStartPredicate,
'foo')
assert s1({'foo': u'fo,18v*1vaad80'}) == s1({'foo': u'fo18v1vaad80'})
def test_set(self):
s1 = predicates.SimplePredicate(predicates.wholeSetPredicate,
'foo')
colors = set(['red', 'blue', 'green'])
assert s1({'foo': colors}) == (str(colors),)
class TestMetaphone(unittest.TestCase):
def test_metaphone_token(self):
block_val = predicates.metaphoneToken('9301 S. State St. ')
assert block_val == set([u'STT', u'S', u'ST'])
class TestWholeSet(unittest.TestCase):
def setUp(self):
self.s1 = set(['red', 'blue', 'green'])
def test_full_set(self):
block_val = predicates.wholeSetPredicate(self.s1)
self.assertEqual(block_val, (str(self.s1),))
class TestSetElement(unittest.TestCase):
def setUp(self):
self.s1 = set(['red', 'blue', 'green'])
def test_long_set(self):
block_val = predicates.commonSetElementPredicate(self.s1)
self.assertEqual(set(block_val), set(('blue', 'green', 'red')))
def test_empty_set(self):
block_val = predicates.commonSetElementPredicate(set())
self.assertEqual(block_val, tuple())
def test_first_last(self):
block_val = predicates.lastSetElementPredicate(self.s1)
assert block_val == ('red',)
block_val = predicates.firstSetElementPredicate(self.s1)
assert block_val == ('blue',)
def test_magnitude(self):
block_val = predicates.magnitudeOfCardinality(self.s1)
assert block_val == (u'0', )
block_val = predicates.magnitudeOfCardinality(())
assert block_val == ()
class TestLatLongGrid(unittest.TestCase):
def setUp(self):
self.latlong1 = (42.535, -5.012)
def test_precise_latlong(self):
block_val = predicates.latLongGridPredicate(self.latlong1)
assert block_val == (u'[42.5, -5.0]',)
block_val = predicates.latLongGridPredicate((0, 0))
assert block_val == ()
class TestNumericPredicates(unittest.TestCase):
def test_order_of_magnitude(self):
assert predicates.orderOfMagnitude(10) == (u'1',)
assert predicates.orderOfMagnitude(9) == (u'1',)
assert predicates.orderOfMagnitude(2) == (u'0',)
assert predicates.orderOfMagnitude(-2) == ()
def test_round_to_1(self):
assert predicates.roundTo1(22315) == (u'20000',)
assert predicates.roundTo1(-22315) == (u'-20000',)
class TestCompoundPredicate(unittest.TestCase):
def test_escapes_colon(self):
'''
Regression test for issue #836
'''
predicate_1 = predicates.SimplePredicate(
predicates.commonSetElementPredicate, 'col_1')
predicate_2 = predicates.SimplePredicate(
predicates.commonSetElementPredicate, 'col_2')
record = {
'col_1': ['foo:', 'foo'],
'col_2': [':bar', 'bar']
}
block_val = predicates.CompoundPredicate([
predicate_1,
predicate_2
])(record)
assert len(set(block_val)) == 4
assert block_val == ['foo\\::\\:bar', 'foo\\::bar', 'foo:\\:bar', 'foo:bar']
def test_escapes_escaped_colon(self):
'''
Regression test for issue #836
'''
predicate_1 = predicates.SimplePredicate(
predicates.commonSetElementPredicate, 'col_1')
predicate_2 = predicates.SimplePredicate(
predicates.commonSetElementPredicate, 'col_2')
record = {
'col_1': ['foo\\:', 'foo'],
'col_2': ['\\:bar', 'bar']
}
block_val = predicates.CompoundPredicate([
predicate_1,
predicate_2
])(record)
assert len(set(block_val)) == 4
assert block_val == ['foo\\\\::\\\\:bar', 'foo\\\\::bar', 'foo:\\\\:bar', 'foo:bar']
if __name__ == '__main__':
unittest.main()
|
DEBUG_PROXY_ISSUES = False # True
import gc
import os
import os.path
import re
import sys
import tempfile
import unittest
from contextlib import contextmanager
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
from urllib import pathname2url
except:
from urllib.request import pathname2url
from lxml import etree, html
def make_version_tuple(version_string):
return tuple(
int(part) if part.isdigit() else part
for part in re.findall('([0-9]+|[^0-9.]+)', version_string)
)
IS_PYPY = (getattr(sys, 'implementation', None) == 'pypy' or
getattr(sys, 'pypy_version_info', None) is not None)
IS_PYTHON3 = sys.version_info[0] >= 3
IS_PYTHON2 = sys.version_info[0] < 3
from xml.etree import ElementTree
if hasattr(ElementTree, 'VERSION'):
ET_VERSION = make_version_tuple(ElementTree.VERSION)
else:
ET_VERSION = (0,0,0)
if IS_PYTHON2:
from xml.etree import cElementTree
if hasattr(cElementTree, 'VERSION'):
CET_VERSION = make_version_tuple(cElementTree.VERSION)
else:
CET_VERSION = (0,0,0)
else:
CET_VERSION = (0, 0, 0)
cElementTree = None
def filter_by_version(test_class, version_dict, current_version):
"""Remove test methods that do not work with the current lib version.
"""
find_required_version = version_dict.get
def dummy_test_method(self):
pass
for name in dir(test_class):
expected_version = find_required_version(name, (0,0,0))
if expected_version > current_version:
setattr(test_class, name, dummy_test_method)
import doctest
try:
import pytest
except ImportError:
class skipif(object):
"Using a class because a function would bind into a method when used in classes"
def __init__(self, *args): pass
def __call__(self, func, *args): return func
else:
skipif = pytest.mark.skipif
def _get_caller_relative_path(filename, frame_depth=2):
module = sys.modules[sys._getframe(frame_depth).f_globals['__name__']]
return os.path.normpath(os.path.join(
os.path.dirname(getattr(module, '__file__', '')), filename))
from io import StringIO
unichr_escape = re.compile(r'\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}')
if sys.version_info[0] >= 3:
# Python 3
from builtins import str as unicode
from codecs import unicode_escape_decode
_chr = chr
def _str(s, encoding="UTF-8"):
return unichr_escape.sub(lambda x: unicode_escape_decode(x.group(0))[0], s)
def _bytes(s, encoding="UTF-8"):
return s.encode(encoding)
from io import BytesIO as _BytesIO
def BytesIO(*args):
if args and isinstance(args[0], str):
args = (args[0].encode("UTF-8"),)
return _BytesIO(*args)
doctest_parser = doctest.DocTestParser()
_fix_unicode = re.compile(r'(\s+)u(["\'])').sub
_fix_exceptions = re.compile(r'(.*except [^(]*),\s*(.*:)').sub
def make_doctest(filename):
filename = _get_caller_relative_path(filename)
doctests = read_file(filename)
doctests = _fix_unicode(r'\1\2', doctests)
doctests = _fix_exceptions(r'\1 as \2', doctests)
return doctest.DocTestCase(
doctest_parser.get_doctest(
doctests, {}, os.path.basename(filename), filename, 0))
else:
# Python 2
from __builtin__ import unicode
_chr = unichr
def _str(s, encoding="UTF-8"):
s = unicode(s, encoding=encoding)
return unichr_escape.sub(lambda x:
x.group(0).decode('unicode-escape'),
s)
def _bytes(s, encoding="UTF-8"):
return s
from io import BytesIO
doctest_parser = doctest.DocTestParser()
_fix_traceback = re.compile(r'^(\s*)(?:\w+\.)+(\w*(?:Error|Exception|Invalid):)', re.M).sub
_fix_exceptions = re.compile(r'(.*except [^(]*)\s+as\s+(.*:)').sub
_fix_bytes = re.compile(r'(\s+)b(["\'])').sub
def make_doctest(filename):
filename = _get_caller_relative_path(filename)
doctests = read_file(filename)
doctests = _fix_traceback(r'\1\2', doctests)
doctests = _fix_exceptions(r'\1, \2', doctests)
doctests = _fix_bytes(r'\1\2', doctests)
return doctest.DocTestCase(
doctest_parser.get_doctest(
doctests, {}, os.path.basename(filename), filename, 0))
try:
skipIf = unittest.skipIf
except AttributeError:
def skipIf(condition, why):
def _skip(thing):
import types
if isinstance(thing, (type, types.ClassType)):
return type(thing.__name__, (object,), {})
else:
return None
if condition:
return _skip
return lambda thing: thing
class HelperTestCase(unittest.TestCase):
def tearDown(self):
if DEBUG_PROXY_ISSUES:
gc.collect()
def parse(self, text, parser=None):
f = BytesIO(text) if isinstance(text, bytes) else StringIO(text)
return etree.parse(f, parser=parser)
def _rootstring(self, tree):
return etree.tostring(tree.getroot()).replace(
_bytes(' '), _bytes('')).replace(_bytes('\n'), _bytes(''))
class SillyFileLike:
def __init__(self, xml_data=_bytes('<foo><bar/></foo>')):
self.xml_data = xml_data
def read(self, amount=None):
if self.xml_data:
if amount:
data = self.xml_data[:amount]
self.xml_data = self.xml_data[amount:]
else:
data = self.xml_data
self.xml_data = _bytes('')
return data
return _bytes('')
class LargeFileLike:
def __init__(self, charlen=100, depth=4, children=5):
self.data = BytesIO()
self.chars = _bytes('a') * charlen
self.children = range(children)
self.more = self.iterelements(depth)
def iterelements(self, depth):
yield _bytes('<root>')
depth -= 1
if depth > 0:
for child in self.children:
for element in self.iterelements(depth):
yield element
yield self.chars
else:
yield self.chars
yield _bytes('</root>')
def read(self, amount=None):
data = self.data
append = data.write
if amount:
for element in self.more:
append(element)
if data.tell() >= amount:
break
else:
for element in self.more:
append(element)
result = data.getvalue()
data.seek(0)
data.truncate()
if amount:
append(result[amount:])
result = result[:amount]
return result
class LargeFileLikeUnicode(LargeFileLike):
def __init__(self, charlen=100, depth=4, children=5):
LargeFileLike.__init__(self, charlen, depth, children)
self.data = StringIO()
self.chars = _str('a') * charlen
self.more = self.iterelements(depth)
def iterelements(self, depth):
yield _str('<root>')
depth -= 1
if depth > 0:
for child in self.children:
for element in self.iterelements(depth):
yield element
yield self.chars
else:
yield self.chars
yield _str('</root>')
def fileInTestDir(name):
_testdir = os.path.dirname(__file__)
return os.path.join(_testdir, name)
def path2url(path):
return urlparse.urljoin(
'file:', pathname2url(path))
def fileUrlInTestDir(name):
return path2url(fileInTestDir(name))
def read_file(name, mode='r'):
with open(name, mode) as f:
data = f.read()
return data
def write_to_file(name, data, mode='w'):
with open(name, mode) as f:
f.write(data)
def readFileInTestDir(name, mode='r'):
return read_file(fileInTestDir(name), mode)
def canonicalize(xml):
tree = etree.parse(BytesIO(xml) if isinstance(xml, bytes) else StringIO(xml))
f = BytesIO()
tree.write_c14n(f)
return f.getvalue()
@contextmanager
def tmpfile(**kwargs):
handle, filename = tempfile.mkstemp(**kwargs)
try:
yield filename
finally:
os.close(handle)
os.remove(filename)
|
import tests
from pyVim import connect
class SoapAdapterTests(tests.VCRTestBase):
def test_invoke_method_login_session_exception(self):
def login_fail(*args, **kwargs):
raise vim_session.SESSION_EXCEPTIONS[0]()
stub = connect.SoapStubAdapter()
vim_session = connect.VimSessionOrientedStub(stub, login_fail)
self.assertRaises(SystemError, vim_session.InvokeAccessor, "mo", "info")
|
import aionotion
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.notion import DOMAIN, config_flow
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
@pytest.fixture
def mock_client():
"""Define a fixture for a client creation coroutine."""
return AsyncMock(return_value=None)
@pytest.fixture
def mock_aionotion(mock_client):
"""Mock the aionotion library."""
with patch("homeassistant.components.notion.config_flow.async_get_client") as mock_:
mock_.side_effect = mock_client
yield mock_
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
conf = {CONF_USERNAME: "[email protected]", CONF_PASSWORD: "password123"}
MockConfigEntry(domain=DOMAIN, unique_id="[email protected]", data=conf).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
@pytest.mark.parametrize(
"mock_client", [AsyncMock(side_effect=aionotion.errors.NotionError)]
)
async def test_invalid_credentials(hass, mock_aionotion):
"""Test that an invalid API/App Key throws an error."""
conf = {CONF_USERNAME: "[email protected]", CONF_PASSWORD: "password123"}
flow = config_flow.NotionFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=conf)
assert result["errors"] == {"base": "invalid_auth"}
async def test_show_form(hass):
"""Test that the form is served with no input."""
flow = config_flow.NotionFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=None)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_import(hass, mock_aionotion):
"""Test that the import step works."""
conf = {CONF_USERNAME: "[email protected]", CONF_PASSWORD: "password123"}
flow = config_flow.NotionFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_import(import_config=conf)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "[email protected]"
assert result["data"] == {
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "password123",
}
async def test_step_user(hass, mock_aionotion):
"""Test that the user step works."""
conf = {CONF_USERNAME: "[email protected]", CONF_PASSWORD: "password123"}
flow = config_flow.NotionFlowHandler()
flow.hass = hass
flow.context = {"source": SOURCE_USER}
result = await flow.async_step_user(user_input=conf)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "[email protected]"
assert result["data"] == {
CONF_USERNAME: "[email protected]",
CONF_PASSWORD: "password123",
}
|
import unittest
from mock import Mock
from trashcli.restore import TrashDirectories
class TestTrashDirectories(unittest.TestCase):
def setUp(self):
volume_of = lambda x: "volume_of(%s)" % x
getuid = Mock(return_value=123)
environ = {'HOME': '~'}
self.trash_directories = TrashDirectories(volume_of, getuid, environ)
def test_list_all_directories(self):
result = list(self.trash_directories.all_trash_directories(
['/', '/mnt']
))
assert ([
('~/.local/share/Trash', 'volume_of(~/.local/share/Trash)'),
('/.Trash/123', '/'),
('/.Trash-123', '/'),
('/mnt/.Trash/123', '/mnt'),
('/mnt/.Trash-123', '/mnt')] ==
result)
|
import asyncio
from functools import partial
import logging
from tellduslive import DIM, TURNON, UP, Session
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_SCAN_INTERVAL
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_call_later
from . import config_flow # noqa: F401
from .const import (
CONF_HOST,
DOMAIN,
KEY_SCAN_INTERVAL,
KEY_SESSION,
MIN_UPDATE_INTERVAL,
NOT_SO_PRIVATE_KEY,
PUBLIC_KEY,
SCAN_INTERVAL,
SIGNAL_UPDATE_ENTITY,
TELLDUS_DISCOVERY_NEW,
)
APPLICATION_NAME = "Home Assistant"
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_HOST, default=DOMAIN): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): vol.All(
cv.time_period, vol.Clamp(min=MIN_UPDATE_INTERVAL)
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
DATA_CONFIG_ENTRY_LOCK = "tellduslive_config_entry_lock"
CONFIG_ENTRY_IS_SETUP = "telldus_config_entry_is_setup"
NEW_CLIENT_TASK = "telldus_new_client_task"
INTERVAL_TRACKER = f"{DOMAIN}_INTERVAL"
async def async_setup_entry(hass, entry):
"""Create a tellduslive session."""
conf = entry.data[KEY_SESSION]
if CONF_HOST in conf:
# Session(**conf) does blocking IO when
# communicating with local devices.
session = await hass.async_add_executor_job(partial(Session, **conf))
else:
session = Session(
PUBLIC_KEY, NOT_SO_PRIVATE_KEY, application=APPLICATION_NAME, **conf
)
if not session.is_authorized:
_LOGGER.error("Authentication Error")
return False
hass.data[DATA_CONFIG_ENTRY_LOCK] = asyncio.Lock()
hass.data[CONFIG_ENTRY_IS_SETUP] = set()
hass.data[NEW_CLIENT_TASK] = hass.loop.create_task(
async_new_client(hass, session, entry)
)
return True
async def async_new_client(hass, session, entry):
"""Add the hubs associated with the current client to device_registry."""
interval = entry.data[KEY_SCAN_INTERVAL]
_LOGGER.debug("Update interval %s seconds", interval)
client = TelldusLiveClient(hass, entry, session, interval)
hass.data[DOMAIN] = client
dev_reg = await hass.helpers.device_registry.async_get_registry()
for hub in await client.async_get_hubs():
_LOGGER.debug("Connected hub %s", hub["name"])
dev_reg.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, hub["id"])},
manufacturer="Telldus",
name=hub["name"],
model=hub["type"],
sw_version=hub["version"],
)
await client.update()
async def async_setup(hass, config):
"""Set up the Telldus Live component."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={
CONF_HOST: config[DOMAIN].get(CONF_HOST),
KEY_SCAN_INTERVAL: config[DOMAIN][CONF_SCAN_INTERVAL],
},
)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
if not hass.data[NEW_CLIENT_TASK].done():
hass.data[NEW_CLIENT_TASK].cancel()
interval_tracker = hass.data.pop(INTERVAL_TRACKER)
interval_tracker()
await asyncio.wait(
[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in hass.data.pop(CONFIG_ENTRY_IS_SETUP)
]
)
del hass.data[DOMAIN]
del hass.data[DATA_CONFIG_ENTRY_LOCK]
return True
class TelldusLiveClient:
"""Get the latest data and update the states."""
def __init__(self, hass, config_entry, session, interval):
"""Initialize the Tellus data object."""
self._known_devices = set()
self._device_infos = {}
self._hass = hass
self._config_entry = config_entry
self._client = session
self._interval = interval
async def async_get_hubs(self):
"""Return hubs registered for the user."""
clients = await self._hass.async_add_executor_job(self._client.get_clients)
return clients or []
def device_info(self, device_id):
"""Return device info."""
return self._device_infos.get(device_id)
@staticmethod
def identify_device(device):
"""Find out what type of HA component to create."""
if device.is_sensor:
return "sensor"
if device.methods & DIM:
return "light"
if device.methods & UP:
return "cover"
if device.methods & TURNON:
return "switch"
if device.methods == 0:
return "binary_sensor"
_LOGGER.warning("Unidentified device type (methods: %d)", device.methods)
return "switch"
async def _discover(self, device_id):
"""Discover the component."""
device = self._client.device(device_id)
component = self.identify_device(device)
self._device_infos.update(
{device_id: await self._hass.async_add_executor_job(device.info)}
)
async with self._hass.data[DATA_CONFIG_ENTRY_LOCK]:
if component not in self._hass.data[CONFIG_ENTRY_IS_SETUP]:
await self._hass.config_entries.async_forward_entry_setup(
self._config_entry, component
)
self._hass.data[CONFIG_ENTRY_IS_SETUP].add(component)
device_ids = []
if device.is_sensor:
for item in device.items:
device_ids.append((device.device_id, item.name, item.scale))
else:
device_ids.append(device_id)
for _id in device_ids:
async_dispatcher_send(
self._hass, TELLDUS_DISCOVERY_NEW.format(component, DOMAIN), _id
)
async def update(self, *args):
"""Periodically poll the servers for current state."""
try:
if not await self._hass.async_add_executor_job(self._client.update):
_LOGGER.warning("Failed request")
return
dev_ids = {dev.device_id for dev in self._client.devices}
new_devices = dev_ids - self._known_devices
# just await each discover as `gather` use up all HTTPAdapter pools
for d_id in new_devices:
await self._discover(d_id)
self._known_devices |= new_devices
async_dispatcher_send(self._hass, SIGNAL_UPDATE_ENTITY)
finally:
self._hass.data[INTERVAL_TRACKER] = async_call_later(
self._hass, self._interval, self.update
)
def device(self, device_id):
"""Return device representation."""
return self._client.device(device_id)
def is_available(self, device_id):
"""Return device availability."""
return device_id in self._client.device_ids
|
import numpy as np
import six
def assert_is_semantic_segmentation_link(link, n_class):
"""Checks if a link satisfies semantic segmentation link APIs.
This function checks if a given link satisfies semantic segmentation link
APIs or not.
If the link does not satifiy the APIs, this function raises an
:class:`AssertionError`.
Args:
link: A link to be checked.
n_class (int): The number of classes including background.
"""
imgs = [
np.random.randint(0, 256, size=(3, 480, 640)).astype(np.float32),
np.random.randint(0, 256, size=(3, 480, 320)).astype(np.float32)]
labels = link.predict(imgs)
assert len(labels) == len(imgs), \
'The length of labels must be same as that of imgs.'
for img, label in six.moves.zip(imgs, labels):
assert isinstance(label, np.ndarray), \
'label must be a numpy.ndarray.'
assert label.dtype == np.int32, \
'The type of label must be numpy.int32.'
assert label.shape == img.shape[1:], \
'The shape of label must be (H, W).'
assert label.min() >= 0 and label.max() < n_class, \
'The value of label must be in [0, n_class - 1].'
|
from pygal import Line
from pygal.style import (
DarkenStyle, DesaturateStyle, LightenStyle, LightStyle, RotateStyle,
SaturateStyle
)
STYLES = LightenStyle, DarkenStyle, SaturateStyle, DesaturateStyle, RotateStyle
def test_parametric_styles():
"""Test that no parametric produce the same result"""
chart = None
for style in STYLES:
line = Line(style=style('#f4e83a'))
line.add('_', [1, 2, 3])
line.x_labels = 'abc'
new_chart = line.render()
assert chart != new_chart
chart = new_chart
def test_parametric_styles_with_parameters():
"""Test a parametric style with parameters"""
line = Line(
style=RotateStyle('#de3804', step=12, max_=180, base_style=LightStyle)
)
line.add('_', [1, 2, 3])
line.x_labels = 'abc'
assert line.render()
|
from .stateful_unit import StatefulUnit
class Vocabulary(StatefulUnit):
"""
Vocabulary class.
:param pad_value: The string value for the padding position.
:param oov_value: The string value for the out-of-vocabulary terms.
Examples:
>>> vocab = Vocabulary(pad_value='[PAD]', oov_value='[OOV]')
>>> vocab.fit(['A', 'B', 'C', 'D', 'E'])
>>> term_index = vocab.state['term_index']
>>> term_index # doctest: +SKIP
{'[PAD]': 0, '[OOV]': 1, 'D': 2, 'A': 3, 'B': 4, 'C': 5, 'E': 6}
>>> index_term = vocab.state['index_term']
>>> index_term # doctest: +SKIP
{0: '[PAD]', 1: '[OOV]', 2: 'D', 3: 'A', 4: 'B', 5: 'C', 6: 'E'}
>>> term_index['out-of-vocabulary-term']
1
>>> index_term[0]
'[PAD]'
>>> index_term[42]
Traceback (most recent call last):
...
KeyError: 42
>>> a_index = term_index['A']
>>> c_index = term_index['C']
>>> vocab.transform(['C', 'A', 'C']) == [c_index, a_index, c_index]
True
>>> vocab.transform(['C', 'A', '[OOV]']) == [c_index, a_index, 1]
True
>>> indices = vocab.transform(list('ABCDDZZZ'))
>>> ' '.join(vocab.state['index_term'][i] for i in indices)
'A B C D D [OOV] [OOV] [OOV]'
"""
def __init__(self, pad_value: str = '<PAD>', oov_value: str = '<OOV>'):
"""Vocabulary unit initializer."""
super().__init__()
self._pad = pad_value
self._oov = oov_value
self._context['term_index'] = self.TermIndex()
self._context['index_term'] = dict()
class TermIndex(dict):
"""Map term to index."""
def __missing__(self, key):
"""Map out-of-vocabulary terms to index 1."""
return 1
def fit(self, tokens: list):
"""Build a :class:`TermIndex` and a :class:`IndexTerm`."""
self._context['term_index'][self._pad] = 0
self._context['term_index'][self._oov] = 1
self._context['index_term'][0] = self._pad
self._context['index_term'][1] = self._oov
terms = set(tokens)
for index, term in enumerate(terms):
self._context['term_index'][term] = index + 2
self._context['index_term'][index + 2] = term
def transform(self, input_: list) -> list:
"""Transform a list of tokens to corresponding indices."""
return [self._context['term_index'][token] for token in input_]
class BertVocabulary(StatefulUnit):
"""
Vocabulary class.
:param pad_value: The string value for the padding position.
:param oov_value: The string value for the out-of-vocabulary terms.
Examples:
>>> vocab = BertVocabulary(pad_value='[PAD]', oov_value='[UNK]')
>>> indices = vocab.transform(list('ABCDDZZZ'))
"""
def __init__(self, pad_value: str = '[PAD]', oov_value: str = '[UNK]'):
"""Vocabulary unit initializer."""
super().__init__()
self._pad = pad_value
self._oov = oov_value
self._context['term_index'] = self.TermIndex()
self._context['index_term'] = {}
class TermIndex(dict):
"""Map term to index."""
def __missing__(self, key):
"""Map out-of-vocabulary terms to index 100 ."""
return 100
def fit(self, vocab_path: str):
"""Build a :class:`TermIndex` and a :class:`IndexTerm`."""
with open(vocab_path, 'r', encoding='utf-8') as vocab_file:
for idx, line in enumerate(vocab_file):
term = line.strip()
self._context['term_index'][term] = idx
self._context['index_term'][idx] = term
def transform(self, input_: list) -> list:
"""Transform a list of tokens to corresponding indices."""
return [self._context['term_index'][token] for token in input_]
|
import logging
from homeassistant.components import litejet
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
_LOGGER = logging.getLogger(__name__)
ATTR_NUMBER = "number"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up lights for the LiteJet platform."""
litejet_ = hass.data["litejet_system"]
devices = []
for i in litejet_.loads():
name = litejet_.get_load_name(i)
if not litejet.is_ignored(hass, name):
devices.append(LiteJetLight(hass, litejet_, i, name))
add_entities(devices, True)
class LiteJetLight(LightEntity):
"""Representation of a single LiteJet light."""
def __init__(self, hass, lj, i, name):
"""Initialize a LiteJet light."""
self._hass = hass
self._lj = lj
self._index = i
self._brightness = 0
self._name = name
lj.on_load_activated(i, self._on_load_changed)
lj.on_load_deactivated(i, self._on_load_changed)
def _on_load_changed(self):
"""Handle state changes."""
_LOGGER.debug("Updating due to notification for %s", self._name)
self.schedule_update_ha_state(True)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def name(self):
"""Return the light's name."""
return self._name
@property
def brightness(self):
"""Return the light's brightness."""
return self._brightness
@property
def is_on(self):
"""Return if the light is on."""
return self._brightness != 0
@property
def should_poll(self):
"""Return that lights do not require polling."""
return False
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return {ATTR_NUMBER: self._index}
def turn_on(self, **kwargs):
"""Turn on the light."""
if ATTR_BRIGHTNESS in kwargs:
brightness = int(kwargs[ATTR_BRIGHTNESS] / 255 * 99)
self._lj.activate_load_at(self._index, brightness, 0)
else:
self._lj.activate_load(self._index)
def turn_off(self, **kwargs):
"""Turn off the light."""
self._lj.deactivate_load(self._index)
def update(self):
"""Retrieve the light's brightness from the LiteJet system."""
self._brightness = self._lj.get_load_level(self._index) / 99 * 255
|
from unittest import mock
from PyQt5.QtCore import Qt, PYQT_VERSION
import pytest
from qutebrowser.keyinput import basekeyparser, keyutils
from qutebrowser.utils import usertypes
# Alias because we need this a lot in here.
def keyseq(s):
return keyutils.KeySequence.parse(s)
def _create_keyparser(mode):
kp = basekeyparser.BaseKeyParser(mode=mode, win_id=0)
kp.execute = mock.Mock()
return kp
@pytest.fixture
def keyparser(key_config_stub, keyinput_bindings):
return _create_keyparser(usertypes.KeyMode.normal)
@pytest.fixture
def prompt_keyparser(key_config_stub, keyinput_bindings):
return _create_keyparser(usertypes.KeyMode.prompt)
@pytest.fixture
def handle_text():
"""Helper function to handle multiple fake keypresses."""
def func(kp, *args):
for key in args:
info = keyutils.KeyInfo(key, Qt.NoModifier)
kp.handle(info.to_event())
return func
class TestDebugLog:
"""Make sure _debug_log only logs when do_log is set."""
def test_log(self, keyparser, caplog):
keyparser._debug_log('foo')
assert caplog.messages == ['BaseKeyParser for mode normal: foo']
def test_no_log(self, keyparser, caplog):
keyparser._do_log = False
keyparser._debug_log('foo')
assert not caplog.records
@pytest.mark.parametrize('input_key, supports_count, count, command', [
# (input_key, supports_count, expected)
('10', True, '10', ''),
('10g', True, '10', 'g'),
('10e4g', True, '4', 'g'),
('g', True, '', 'g'),
('0', True, '', ''),
('10g', False, '', 'g'),
])
def test_split_count(config_stub, key_config_stub,
input_key, supports_count, count, command):
kp = basekeyparser.BaseKeyParser(mode=usertypes.KeyMode.normal, win_id=0,
supports_count=supports_count)
for info in keyseq(input_key):
kp.handle(info.to_event())
assert kp._count == count
assert kp._sequence == keyseq(command)
def test_empty_binding(keyparser, config_stub):
"""Make sure setting an empty binding doesn't crash."""
config_stub.val.bindings.commands = {'normal': {'co': ''}}
# The config is re-read automatically
@pytest.mark.parametrize('changed_mode, expected', [
('normal', True), ('command', False),
])
def test_read_config(keyparser, key_config_stub, changed_mode, expected):
keyparser._read_config()
# Sanity checks
assert keyseq('a') in keyparser.bindings
assert keyseq('new') not in keyparser.bindings
key_config_stub.bind(keyseq('new'), 'message-info new',
mode=changed_mode)
assert keyseq('a') in keyparser.bindings
assert (keyseq('new') in keyparser.bindings) == expected
class TestHandle:
def test_valid_key(self, prompt_keyparser, handle_text):
infos = [
keyutils.KeyInfo(Qt.Key_A, Qt.ControlModifier),
keyutils.KeyInfo(Qt.Key_X, Qt.ControlModifier),
]
for info in infos:
prompt_keyparser.handle(info.to_event())
prompt_keyparser.execute.assert_called_once_with(
'message-info ctrla', None)
assert not prompt_keyparser._sequence
def test_valid_key_count(self, prompt_keyparser):
infos = [
keyutils.KeyInfo(Qt.Key_5, Qt.NoModifier),
keyutils.KeyInfo(Qt.Key_A, Qt.ControlModifier),
]
for info in infos:
prompt_keyparser.handle(info.to_event())
prompt_keyparser.execute.assert_called_once_with(
'message-info ctrla', 5)
@pytest.mark.parametrize('keys', [
[(Qt.Key_B, Qt.NoModifier), (Qt.Key_C, Qt.NoModifier)],
[(Qt.Key_A, Qt.ControlModifier | Qt.AltModifier)],
# Only modifier
[(Qt.Key_Shift, Qt.ShiftModifier)],
])
def test_invalid_keys(self, prompt_keyparser, keys):
for key, modifiers in keys:
info = keyutils.KeyInfo(key, modifiers)
prompt_keyparser.handle(info.to_event())
assert not prompt_keyparser.execute.called
assert not prompt_keyparser._sequence
def test_dry_run(self, prompt_keyparser):
b_info = keyutils.KeyInfo(Qt.Key_B, Qt.NoModifier)
prompt_keyparser.handle(b_info.to_event())
a_info = keyutils.KeyInfo(Qt.Key_A, Qt.NoModifier)
prompt_keyparser.handle(a_info.to_event(), dry_run=True)
assert not prompt_keyparser.execute.called
assert prompt_keyparser._sequence
def test_dry_run_count(self, prompt_keyparser):
info = keyutils.KeyInfo(Qt.Key_9, Qt.NoModifier)
prompt_keyparser.handle(info.to_event(), dry_run=True)
assert not prompt_keyparser._count
def test_invalid_key(self, prompt_keyparser):
keys = [Qt.Key_B, 0x0]
for key in keys:
info = keyutils.KeyInfo(key, Qt.NoModifier)
prompt_keyparser.handle(info.to_event())
assert not prompt_keyparser._sequence
def test_valid_keychain(self, handle_text, prompt_keyparser):
handle_text(prompt_keyparser,
# Press 'x' which is ignored because of no match
Qt.Key_X,
# Then start the real chain
Qt.Key_B, Qt.Key_A)
prompt_keyparser.execute.assert_called_with('message-info ba', None)
assert not prompt_keyparser._sequence
@pytest.mark.parametrize('key, modifiers, number', [
(Qt.Key_0, Qt.NoModifier, 0),
(Qt.Key_1, Qt.NoModifier, 1),
(Qt.Key_1, Qt.KeypadModifier, 1),
])
def test_number_press(self, prompt_keyparser,
key, modifiers, number):
prompt_keyparser.handle(keyutils.KeyInfo(key, modifiers).to_event())
command = 'message-info {}'.format(number)
prompt_keyparser.execute.assert_called_once_with(command, None)
assert not prompt_keyparser._sequence
@pytest.mark.parametrize('modifiers, text', [
(Qt.NoModifier, '2'),
(Qt.KeypadModifier, 'num-2'),
])
def test_number_press_keypad(self, keyparser, config_stub,
modifiers, text):
"""Make sure a <Num+2> binding overrides the 2 binding."""
config_stub.val.bindings.commands = {'normal': {
'2': 'message-info 2',
'<Num+2>': 'message-info num-2'}}
keyparser.handle(keyutils.KeyInfo(Qt.Key_2, modifiers).to_event())
command = 'message-info {}'.format(text)
keyparser.execute.assert_called_once_with(command, None)
assert not keyparser._sequence
def test_umlauts(self, handle_text, keyparser, config_stub):
config_stub.val.bindings.commands = {'normal': {'ü': 'message-info ü'}}
handle_text(keyparser, Qt.Key_Udiaeresis)
keyparser.execute.assert_called_once_with('message-info ü', None)
def test_mapping(self, config_stub, handle_text, prompt_keyparser):
handle_text(prompt_keyparser, Qt.Key_X)
prompt_keyparser.execute.assert_called_once_with(
'message-info a', None)
def test_mapping_keypad(self, config_stub, keyparser):
"""Make sure falling back to non-numpad keys works with mappings."""
config_stub.val.bindings.commands = {'normal': {'a': 'nop'}}
config_stub.val.bindings.key_mappings = {'1': 'a'}
info = keyutils.KeyInfo(Qt.Key_1, Qt.KeypadModifier)
keyparser.handle(info.to_event())
keyparser.execute.assert_called_once_with('nop', None)
def test_binding_and_mapping(self, config_stub, handle_text, prompt_keyparser):
"""with a conflicting binding/mapping, the binding should win."""
handle_text(prompt_keyparser, Qt.Key_B)
assert not prompt_keyparser.execute.called
def test_mapping_in_key_chain(self, config_stub, handle_text, keyparser):
"""A mapping should work even as part of a keychain."""
config_stub.val.bindings.commands = {'normal':
{'aa': 'message-info aa'}}
handle_text(keyparser, Qt.Key_A, Qt.Key_X)
keyparser.execute.assert_called_once_with('message-info aa', None)
def test_binding_with_shift(self, prompt_keyparser):
"""Simulate a binding which involves shift."""
for key, modifiers in [(Qt.Key_Y, Qt.NoModifier),
(Qt.Key_Shift, Qt.ShiftModifier),
(Qt.Key_Y, Qt.ShiftModifier)]:
info = keyutils.KeyInfo(key, modifiers)
prompt_keyparser.handle(info.to_event())
prompt_keyparser.execute.assert_called_once_with('yank -s', None)
def test_partial_before_full_match(self, keyparser, config_stub):
"""Make sure full matches always take precedence over partial ones."""
config_stub.val.bindings.commands = {
'normal': {
'ab': 'message-info bar',
'a': 'message-info foo'
}
}
info = keyutils.KeyInfo(Qt.Key_A, Qt.NoModifier)
keyparser.handle(info.to_event())
keyparser.execute.assert_called_once_with('message-info foo', None)
class TestCount:
"""Test execute() with counts."""
def test_no_count(self, handle_text, prompt_keyparser):
"""Test with no count added."""
handle_text(prompt_keyparser, Qt.Key_B, Qt.Key_A)
prompt_keyparser.execute.assert_called_once_with(
'message-info ba', None)
assert not prompt_keyparser._sequence
def test_count_0(self, handle_text, prompt_keyparser):
handle_text(prompt_keyparser, Qt.Key_0, Qt.Key_B, Qt.Key_A)
calls = [mock.call('message-info 0', None),
mock.call('message-info ba', None)]
prompt_keyparser.execute.assert_has_calls(calls)
assert not prompt_keyparser._sequence
def test_count_42(self, handle_text, prompt_keyparser):
handle_text(prompt_keyparser, Qt.Key_4, Qt.Key_2, Qt.Key_B, Qt.Key_A)
prompt_keyparser.execute.assert_called_once_with('message-info ba', 42)
assert not prompt_keyparser._sequence
def test_count_42_invalid(self, handle_text, prompt_keyparser):
# Invalid call with ccx gets ignored
handle_text(prompt_keyparser,
Qt.Key_4, Qt.Key_2, Qt.Key_C, Qt.Key_C, Qt.Key_X)
assert not prompt_keyparser.execute.called
assert not prompt_keyparser._sequence
# Valid call with ccc gets the correct count
handle_text(prompt_keyparser,
Qt.Key_2, Qt.Key_3, Qt.Key_C, Qt.Key_C, Qt.Key_C)
prompt_keyparser.execute.assert_called_once_with(
'message-info ccc', 23)
assert not prompt_keyparser._sequence
def test_superscript(self, handle_text, prompt_keyparser):
# https://github.com/qutebrowser/qutebrowser/issues/3743
handle_text(prompt_keyparser, Qt.Key_twosuperior, Qt.Key_B, Qt.Key_A)
@pytest.mark.skipif(PYQT_VERSION == 0x050F01,
reason='waitSignals is broken in PyQt 5.15.1')
def test_count_keystring_update(self, qtbot,
handle_text, prompt_keyparser):
"""Make sure the keystring is updated correctly when entering count."""
with qtbot.waitSignals([
prompt_keyparser.keystring_updated,
prompt_keyparser.keystring_updated]) as blocker:
handle_text(prompt_keyparser, Qt.Key_4, Qt.Key_2)
sig1, sig2 = blocker.all_signals_and_args
assert sig1.args == ('4',)
assert sig2.args == ('42',)
def test_numpad(self, prompt_keyparser):
"""Make sure we can enter a count via numpad."""
for key, modifiers in [(Qt.Key_4, Qt.KeypadModifier),
(Qt.Key_2, Qt.KeypadModifier),
(Qt.Key_B, Qt.NoModifier),
(Qt.Key_A, Qt.NoModifier)]:
info = keyutils.KeyInfo(key, modifiers)
prompt_keyparser.handle(info.to_event())
prompt_keyparser.execute.assert_called_once_with('message-info ba', 42)
def test_clear_keystring(qtbot, keyparser):
"""Test that the keystring is cleared and the signal is emitted."""
keyparser._sequence = keyseq('test')
keyparser._count = '23'
with qtbot.waitSignal(keyparser.keystring_updated):
keyparser.clear_keystring()
assert not keyparser._sequence
assert not keyparser._count
def test_clear_keystring_empty(qtbot, keyparser):
"""Test that no signal is emitted when clearing an empty keystring.."""
keyparser._sequence = keyseq('')
with qtbot.assert_not_emitted(keyparser.keystring_updated):
keyparser.clear_keystring()
|
from datetime import timedelta
from prayer_times_calculator.exceptions import InvalidResponseError
from homeassistant import config_entries
from homeassistant.components import islamic_prayer_times
from homeassistant.setup import async_setup_component
from . import (
NEW_PRAYER_TIMES,
NEW_PRAYER_TIMES_TIMESTAMPS,
NOW,
PRAYER_TIMES,
PRAYER_TIMES_TIMESTAMPS,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_setup_with_config(hass, legacy_patchable_time):
"""Test that we import the config and setup the client."""
config = {
islamic_prayer_times.DOMAIN: {islamic_prayer_times.CONF_CALC_METHOD: "isna"}
}
with patch(
"prayer_times_calculator.PrayerTimesCalculator.fetch_prayer_times",
return_value=PRAYER_TIMES,
):
assert (
await async_setup_component(hass, islamic_prayer_times.DOMAIN, config)
is True
)
await hass.async_block_till_done()
async def test_successful_config_entry(hass, legacy_patchable_time):
"""Test that Islamic Prayer Times is configured successfully."""
entry = MockConfigEntry(
domain=islamic_prayer_times.DOMAIN,
data={},
)
entry.add_to_hass(hass)
with patch(
"prayer_times_calculator.PrayerTimesCalculator.fetch_prayer_times",
return_value=PRAYER_TIMES,
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ENTRY_STATE_LOADED
assert entry.options == {
islamic_prayer_times.CONF_CALC_METHOD: islamic_prayer_times.DEFAULT_CALC_METHOD
}
async def test_setup_failed(hass, legacy_patchable_time):
"""Test Islamic Prayer Times failed due to an error."""
entry = MockConfigEntry(
domain=islamic_prayer_times.DOMAIN,
data={},
)
entry.add_to_hass(hass)
# test request error raising ConfigEntryNotReady
with patch(
"prayer_times_calculator.PrayerTimesCalculator.fetch_prayer_times",
side_effect=InvalidResponseError(),
):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ENTRY_STATE_SETUP_RETRY
async def test_unload_entry(hass, legacy_patchable_time):
"""Test removing Islamic Prayer Times."""
entry = MockConfigEntry(
domain=islamic_prayer_times.DOMAIN,
data={},
)
entry.add_to_hass(hass)
with patch(
"prayer_times_calculator.PrayerTimesCalculator.fetch_prayer_times",
return_value=PRAYER_TIMES,
):
await hass.config_entries.async_setup(entry.entry_id)
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == config_entries.ENTRY_STATE_NOT_LOADED
assert islamic_prayer_times.DOMAIN not in hass.data
async def test_islamic_prayer_times_timestamp_format(hass, legacy_patchable_time):
"""Test Islamic prayer times timestamp format."""
entry = MockConfigEntry(domain=islamic_prayer_times.DOMAIN, data={})
entry.add_to_hass(hass)
with patch(
"prayer_times_calculator.PrayerTimesCalculator.fetch_prayer_times",
return_value=PRAYER_TIMES,
), patch("homeassistant.util.dt.now", return_value=NOW):
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert (
hass.data[islamic_prayer_times.DOMAIN].prayer_times_info
== PRAYER_TIMES_TIMESTAMPS
)
async def test_update(hass, legacy_patchable_time):
"""Test sensors are updated with new prayer times."""
entry = MockConfigEntry(domain=islamic_prayer_times.DOMAIN, data={})
entry.add_to_hass(hass)
with patch(
"prayer_times_calculator.PrayerTimesCalculator.fetch_prayer_times"
) as FetchPrayerTimes, patch("homeassistant.util.dt.now", return_value=NOW):
FetchPrayerTimes.side_effect = [
PRAYER_TIMES,
PRAYER_TIMES,
NEW_PRAYER_TIMES,
]
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
pt_data = hass.data[islamic_prayer_times.DOMAIN]
assert pt_data.prayer_times_info == PRAYER_TIMES_TIMESTAMPS
future = pt_data.prayer_times_info["Midnight"] + timedelta(days=1, minutes=1)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert (
hass.data[islamic_prayer_times.DOMAIN].prayer_times_info
== NEW_PRAYER_TIMES_TIMESTAMPS
)
|
import vcr
from urllib.request import urlopen
def true_matcher(r1, r2):
return True
def false_matcher(r1, r2):
return False
def test_registered_true_matcher(tmpdir, httpbin):
my_vcr = vcr.VCR()
my_vcr.register_matcher("true", true_matcher)
testfile = str(tmpdir.join("test.yml"))
with my_vcr.use_cassette(testfile, match_on=["true"]):
# These 2 different urls are stored as the same request
urlopen(httpbin.url)
urlopen(httpbin.url + "/get")
with my_vcr.use_cassette(testfile, match_on=["true"]):
# I can get the response twice even though I only asked for it once
urlopen(httpbin.url + "/get")
urlopen(httpbin.url + "/get")
def test_registered_false_matcher(tmpdir, httpbin):
my_vcr = vcr.VCR()
my_vcr.register_matcher("false", false_matcher)
testfile = str(tmpdir.join("test.yml"))
with my_vcr.use_cassette(testfile, match_on=["false"]) as cass:
# These 2 different urls are stored as different requests
urlopen(httpbin.url)
urlopen(httpbin.url + "/get")
assert len(cass) == 2
|
import mock
from pyramid import testing
from paasta_tools.api.views.service import list_instances
from paasta_tools.api.views.service import list_services_for_cluster
@mock.patch(
"paasta_tools.api.views.service.list_all_instances_for_service", autospec=True
)
def test_list_instances(mock_list_all_instances_for_service,):
fake_instances = ["fake_instance_a", "fake_instance_b", "fake_instance_c"]
mock_list_all_instances_for_service.return_value = fake_instances
request = testing.DummyRequest()
request.swagger_data = {"service": "fake_service"}
response = list_instances(request)
assert response["instances"] == fake_instances
@mock.patch("paasta_tools.api.views.service.get_services_for_cluster", autospec=True)
def test_list_services_for_cluster(mock_get_services_for_cluster,):
fake_services_and_instances = [
("fake_service", "fake_instance_a"),
("fake_service", "fake_instance_b"),
("fake_service", "fake_instance_c"),
]
mock_get_services_for_cluster.return_value = fake_services_and_instances
request = testing.DummyRequest()
response = list_services_for_cluster(request)
assert response["services"] == [
("fake_service", "fake_instance_a"),
("fake_service", "fake_instance_b"),
("fake_service", "fake_instance_c"),
]
|
import numpy as np
from ...transforms import (combine_transforms, invert_transform, Transform,
_quat_to_affine, _fit_matched_points, apply_trans,
get_ras_to_neuromag_trans)
from ...utils import logger
from ..constants import FIFF
from .constants import CTF
def _make_transform_card(fro, to, r_lpa, r_nasion, r_rpa):
"""Make a transform from cardinal landmarks."""
return invert_transform(Transform(
to, fro, get_ras_to_neuromag_trans(r_nasion, r_lpa, r_rpa)))
def _quaternion_align(from_frame, to_frame, from_pts, to_pts, diff_tol=1e-4):
"""Perform an alignment using the unit quaternions (modifies points)."""
assert from_pts.shape[1] == to_pts.shape[1] == 3
trans = _quat_to_affine(_fit_matched_points(from_pts, to_pts)[0])
# Test the transformation and print the results
logger.info(' Quaternion matching (desired vs. transformed):')
for fro, to in zip(from_pts, to_pts):
rr = apply_trans(trans, fro)
diff = np.linalg.norm(to - rr)
logger.info(' %7.2f %7.2f %7.2f mm <-> %7.2f %7.2f %7.2f mm '
'(orig : %7.2f %7.2f %7.2f mm) diff = %8.3f mm'
% (tuple(1000 * to) + tuple(1000 * rr) +
tuple(1000 * fro) + (1000 * diff,)))
if diff > diff_tol:
raise RuntimeError('Something is wrong: quaternion matching did '
'not work (see above)')
return Transform(from_frame, to_frame, trans)
def _make_ctf_coord_trans_set(res4, coils):
"""Figure out the necessary coordinate transforms."""
# CTF head > Neuromag head
lpa = rpa = nas = T1 = T2 = T3 = T5 = None
if coils is not None:
for p in coils:
if p['valid'] and (p['coord_frame'] ==
FIFF.FIFFV_MNE_COORD_CTF_HEAD):
if lpa is None and p['kind'] == CTF.CTFV_COIL_LPA:
lpa = p
elif rpa is None and p['kind'] == CTF.CTFV_COIL_RPA:
rpa = p
elif nas is None and p['kind'] == CTF.CTFV_COIL_NAS:
nas = p
if lpa is None or rpa is None or nas is None:
raise RuntimeError('Some of the mandatory HPI device-coordinate '
'info was not there.')
t = _make_transform_card('head', 'ctf_head',
lpa['r'], nas['r'], rpa['r'])
T3 = invert_transform(t)
# CTF device -> Neuromag device
#
# Rotate the CTF coordinate frame by 45 degrees and shift by 190 mm
# in z direction to get a coordinate system comparable to the Neuromag one
#
R = np.eye(4)
R[:3, 3] = [0., 0., 0.19]
val = 0.5 * np.sqrt(2.)
R[0, 0] = val
R[0, 1] = -val
R[1, 0] = val
R[1, 1] = val
T4 = Transform('ctf_meg', 'meg', R)
# CTF device -> CTF head
# We need to make the implicit transform explicit!
h_pts = dict()
d_pts = dict()
kinds = (CTF.CTFV_COIL_LPA, CTF.CTFV_COIL_RPA, CTF.CTFV_COIL_NAS,
CTF.CTFV_COIL_SPARE)
if coils is not None:
for p in coils:
if p['valid']:
if p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_HEAD:
for kind in kinds:
if kind not in h_pts and p['kind'] == kind:
h_pts[kind] = p['r']
elif p['coord_frame'] == FIFF.FIFFV_MNE_COORD_CTF_DEVICE:
for kind in kinds:
if kind not in d_pts and p['kind'] == kind:
d_pts[kind] = p['r']
if any(kind not in h_pts for kind in kinds[:-1]):
raise RuntimeError('Some of the mandatory HPI device-coordinate '
'info was not there.')
if any(kind not in d_pts for kind in kinds[:-1]):
raise RuntimeError('Some of the mandatory HPI head-coordinate '
'info was not there.')
use_kinds = [kind for kind in kinds
if (kind in h_pts and kind in d_pts)]
r_head = np.array([h_pts[kind] for kind in use_kinds])
r_dev = np.array([d_pts[kind] for kind in use_kinds])
T2 = _quaternion_align('ctf_meg', 'ctf_head', r_dev, r_head)
# The final missing transform
if T3 is not None and T2 is not None:
T5 = combine_transforms(T2, T3, 'ctf_meg', 'head')
T1 = combine_transforms(invert_transform(T4), T5, 'meg', 'head')
s = dict(t_dev_head=T1, t_ctf_dev_ctf_head=T2, t_ctf_head_head=T3,
t_ctf_dev_dev=T4, t_ctf_dev_head=T5)
logger.info(' Coordinate transformations established.')
return s
|
from absl import flags
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
JAVA_HOME = '/usr'
flags.DEFINE_string('openjdk_version', None, 'Version of openjdk to use. '
'By default, the version of openjdk is automatically '
'detected.')
def _OpenJdkPackage(vm, format_string):
version = FLAGS.openjdk_version
if version is None:
# Only install Java 7 if Java 8 is not available.
if (vm.HasPackage(format_string.format('7'))
and not vm.HasPackage(format_string.format('8'))):
version = '7'
else:
version = '8'
return format_string.format(version)
def YumInstall(vm):
"""Installs the OpenJDK package on the VM."""
vm.InstallPackages(_OpenJdkPackage(vm, 'java-1.{0}.0-openjdk-devel'))
@vm_util.Retry()
def _AddRepository(vm):
"""Install could fail when Ubuntu keyservers are overloaded."""
vm.RemoteCommand(
'sudo add-apt-repository -y ppa:openjdk-r/ppa && sudo apt-get update')
def AptInstall(vm):
"""Installs the OpenJDK package on the VM."""
package_name = _OpenJdkPackage(vm, 'openjdk-{0}-jdk')
if not vm.HasPackage(package_name):
_AddRepository(vm)
vm.InstallPackages(package_name)
# Populate the ca-certificates-java's trustAnchors parameter.
vm.RemoteCommand(
'sudo /var/lib/dpkg/info/ca-certificates-java.postinst configure')
|
import diamond.collector
try:
import boto
boto
from boto.s3.connection import S3Connection
except ImportError:
boto = None
class S3BucketCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(S3BucketCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(S3BucketCollector, self).get_default_config()
config.update({
'path': 'aws.s3',
'byte_unit': 'byte'
})
return config
def getBucket(self, aws_access, aws_secret, bucket_name):
self.log.info("S3: Open Bucket, %s, %s, %s" % (bucket_name, aws_access,
aws_secret))
s3 = S3Connection(aws_access, aws_secret)
return s3.lookup(bucket_name)
def getBucketSize(self, bucket):
total_bytes = 0
for key in bucket:
total_bytes += key.size
return total_bytes
def collect(self):
"""
Collect s3 bucket stats
"""
if boto is None:
self.log.error("Unable to import boto python module")
return {}
for s3instance in self.config['s3']:
self.log.info("S3: byte_unit: %s" % self.config['byte_unit'])
aws_access = self.config['s3'][s3instance]['aws_access_key']
aws_secret = self.config['s3'][s3instance]['aws_secret_key']
for bucket_name in self.config['s3'][s3instance]['buckets']:
bucket = self.getBucket(aws_access, aws_secret, bucket_name)
# collect bucket size
total_size = self.getBucketSize(bucket)
for byte_unit in self.config['byte_unit']:
new_size = diamond.convertor.binary.convert(
value=total_size,
oldUnit='byte',
newUnit=byte_unit
)
self.publish("%s.size.%s" % (bucket_name, byte_unit),
new_size)
|
from homeassistant.components.mqtt.models import Message
from tests.common import MockConfigEntry
async def test_mqtt_abort_if_existing_entry(hass, mqtt_mock):
"""Check MQTT flow aborts when an entry already exist."""
MockConfigEntry(domain="tasmota").add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
"tasmota", context={"source": "mqtt"}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
async def test_mqtt_abort_invalid_topic(hass, mqtt_mock):
"""Check MQTT flow aborts if discovery topic is invalid."""
discovery_info = Message("", "", 0, False, subscribed_topic="custom_prefix/##")
result = await hass.config_entries.flow.async_init(
"tasmota", context={"source": "mqtt"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_discovery_info"
async def test_mqtt_setup(hass, mqtt_mock) -> None:
"""Test we can finish a config flow through MQTT with custom prefix."""
discovery_info = Message("", "", 0, False, subscribed_topic="custom_prefix/123/#")
result = await hass.config_entries.flow.async_init(
"tasmota", context={"source": "mqtt"}, data=discovery_info
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "create_entry"
assert result["result"].data == {
"discovery_prefix": "custom_prefix/123",
}
async def test_user_setup(hass, mqtt_mock):
"""Test we can finish a config flow."""
result = await hass.config_entries.flow.async_init(
"tasmota", context={"source": "user"}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == "create_entry"
assert result["result"].data == {
"discovery_prefix": "tasmota/discovery",
}
async def test_user_setup_advanced(hass, mqtt_mock):
"""Test we can finish a config flow."""
result = await hass.config_entries.flow.async_init(
"tasmota", context={"source": "user", "show_advanced_options": True}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"discovery_prefix": "test_tasmota/discovery"}
)
assert result["type"] == "create_entry"
assert result["result"].data == {
"discovery_prefix": "test_tasmota/discovery",
}
async def test_user_setup_advanced_strip_wildcard(hass, mqtt_mock):
"""Test we can finish a config flow."""
result = await hass.config_entries.flow.async_init(
"tasmota", context={"source": "user", "show_advanced_options": True}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"discovery_prefix": "test_tasmota/discovery/#"}
)
assert result["type"] == "create_entry"
assert result["result"].data == {
"discovery_prefix": "test_tasmota/discovery",
}
async def test_user_setup_invalid_topic_prefix(hass, mqtt_mock):
"""Test abort on invalid discovery topic."""
result = await hass.config_entries.flow.async_init(
"tasmota", context={"source": "user", "show_advanced_options": True}
)
assert result["type"] == "form"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {"discovery_prefix": "tasmota/config/##"}
)
assert result["type"] == "form"
assert result["errors"]["base"] == "invalid_discovery_topic"
async def test_user_single_instance(hass, mqtt_mock):
"""Test we only allow a single config flow."""
MockConfigEntry(domain="tasmota").add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
"tasmota", context={"source": "user"}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
|
import itertools
from datetime import datetime
from pytest import mark
from cerberus import errors, Validator
from cerberus.tests import (
assert_document_error,
assert_fail,
assert_has_error,
assert_not_has_error,
assert_success,
)
from cerberus.tests.conftest import sample_schema
def test_empty_document():
assert_document_error(None, sample_schema, None, errors.DOCUMENT_MISSING)
def test_bad_document_type():
document = "not a dict"
assert_document_error(
document, sample_schema, None, errors.DOCUMENT_FORMAT.format(document)
)
def test_unknown_field(validator):
field = 'surname'
assert_fail(
{field: 'doe'},
validator=validator,
error=(field, (), errors.UNKNOWN_FIELD, None),
)
assert validator.errors == {field: ['unknown field']}
def test_empty_field_definition(document):
field = 'name'
schema = {field: {}}
assert_success(document, schema)
def test_bad_valuesrules():
field = 'a_dict_with_valuesrules'
schema_field = 'a_string'
value = {schema_field: 'not an integer'}
exp_child_errors = [
(
(field, schema_field),
(field, 'valuesrules', 'type'),
errors.TYPE,
('integer',),
)
]
assert_fail(
{field: value},
error=(
field,
(field, 'valuesrules'),
errors.VALUESRULES,
{'type': ('integer',)},
),
child_errors=exp_child_errors,
)
def test_validate_update():
assert_success(
{
'an_integer': 100,
'a_dict': {'address': 'adr'},
'a_list_of_dicts': [{'sku': 'let'}],
},
update=True,
)
@mark.parametrize(
"document",
[
{'a_boolean': True},
{'a_datetime': datetime.now()},
{'a_float': 3.5},
{
'a_list_of_dicts': [
{'sku': 'AK345', 'price': 100},
{'sku': 'YZ069', 'price': 25},
]
},
{'a_list_of_integers': [99, 100]},
{'a_list_of_values': ['hello', 100]},
{'a_number': 3},
{'a_number': 3.5},
{'a_restricted_string': 'client'},
{'a_string': 'john doe'},
{'an_array': ['agent', 'client']},
{'an_integer': 50},
],
)
def test_success_with_multiple_rules(document):
assert_success(document)
def test_one_of_two_types(validator):
field = 'one_or_more_strings'
assert_success({field: 'foo'})
assert_success({field: ['foo', 'bar']})
exp_child_errors = [
((field, 1), (field, 'itemsrules', 'type'), errors.TYPE, ('string',))
]
assert_fail(
{field: ['foo', 23]},
validator=validator,
error=(field, (field, 'itemsrules'), errors.ITEMSRULES, {'type': ('string',)}),
child_errors=exp_child_errors,
)
assert_fail(
{field: 23}, error=((field,), (field, 'type'), errors.TYPE, ('string', 'list'))
)
assert validator.errors == {
field: [{1: ["must be one of these types: ('string',)"]}]
}
def test_custom_validator():
class MyValidator(Validator):
def _validate_isodd(self, isodd, field, value):
""" {'type': 'boolean'} """
if isodd and not bool(value & 1):
self._error(field, 'Not an odd number')
schema = {'test_field': {'isodd': True}}
validator = MyValidator(schema)
assert_success({'test_field': 7}, validator=validator)
assert_fail(
{'test_field': 6},
validator=validator,
error=('test_field', (), errors.CUSTOM, None, ('Not an odd number',)),
)
assert validator.errors == {'test_field': ['Not an odd number']}
def test_ignore_none_values():
# original commits:
# 96532fc8efbc0b057dd6cd23d0324c8c5a929456
# d6422991c41587467673716cb6e4e929fa9d7b77
field = 'test'
schema = {field: {'type': ('string',), 'empty': False, 'required': False}}
document = {field: None}
# Test normal behaviour
validator = Validator(schema, ignore_none_values=False)
assert_fail(document, validator=validator)
validator.schema[field]['required'] = True
validator.schema.validate()
_errors = assert_fail(document, validator=validator)
assert_not_has_error(
_errors, field, (field, 'required'), errors.REQUIRED_FIELD, True
)
# Test ignore None behaviour
validator = Validator(schema, ignore_none_values=True)
validator.schema[field]['required'] = False
validator.schema.validate()
assert_success(document, validator=validator)
validator.schema[field]['required'] = True
assert validator.schema[field].get('required') is True
_errors = assert_fail(document=document, validator=validator)
assert_has_error(_errors, field, (field, 'required'), errors.REQUIRED_FIELD, True)
assert_not_has_error(_errors, field, (field, 'type'), errors.TYPE, 'string')
def test_unknown_keys():
schema = {}
# test that unknown fields are allowed when allow_unknown is True.
v = Validator(allow_unknown=True, schema=schema)
assert_success({"unknown1": True, "unknown2": "yes"}, validator=v)
# test that unknown fields are allowed only if they meet the
# allow_unknown schema when provided.
v.allow_unknown = {'type': 'string'}
assert_success(document={'name': 'mark'}, validator=v)
assert_fail({"name": 1}, validator=v)
# test that unknown fields are not allowed if allow_unknown is False
v.allow_unknown = False
assert_fail({'name': 'mark'}, validator=v)
def test_unknown_key_dict(validator):
# https://github.com/pyeve/cerberus/issues/177
validator.allow_unknown = True
document = {'a_dict': {'foo': 'foo_value', 'bar': 25}}
assert_success(document, {}, validator=validator)
def test_unknown_key_list(validator):
# https://github.com/pyeve/cerberus/issues/177
validator.allow_unknown = True
document = {'a_dict': ['foo', 'bar']}
assert_success(document, {}, validator=validator)
def test_unknown_keys_list_of_dicts(validator):
# test that allow_unknown is honored even for subdicts in lists.
# https://github.com/pyeve/cerberus/issues/67.
validator.allow_unknown = True
document = {'a_list_of_dicts': [{'sku': 'YZ069', 'price': 25, 'extra': True}]}
assert_success(document, validator=validator)
def test_unknown_keys_retain_custom_rules():
# test that allow_unknown schema respect custom validation rules.
# https://github.com/pyeve/cerberus/issues/#66.
class CustomValidator(Validator):
def _check_with_foo(self, field, value):
return value == "foo"
validator = CustomValidator({})
validator.allow_unknown = {"check_with": "foo"}
assert_success(document={"fred": "foo", "barney": "foo"}, validator=validator)
def test_callable_validator():
"""
Validator instance is callable, functions as a shorthand
passthrough to validate()
"""
schema = {'test_field': {'type': 'string'}}
validator = Validator(schema)
assert validator.validate({'test_field': 'foo'})
assert validator({'test_field': 'foo'})
assert not validator.validate({'test_field': 1})
assert not validator({'test_field': 1})
def test_self_root_document():
"""
Make sure self.root_document is always the root document. See:
* https://github.com/pyeve/cerberus/pull/42
* https://github.com/pyeve/eve/issues/295
"""
class MyValidator(Validator):
def _validate_root_doc(self, root_doc, field, value):
""" {'type': 'boolean'} """
if 'sub' not in self.root_document or len(self.root_document['sub']) != 2:
self._error(field, 'self.context is not the root doc!')
schema = {
'sub': {
'type': 'list',
'root_doc': True,
'itemsrules': {
'type': 'dict',
'schema': {'foo': {'type': 'string', 'root_doc': True}},
},
}
}
assert_success(
{'sub': [{'foo': 'bar'}, {'foo': 'baz'}]}, validator=MyValidator(schema)
)
def test_validated(validator):
validator.schema = {'property': {'type': 'string'}}
document = {'property': 'string'}
assert validator.validated(document) == document
document = {'property': 0}
assert validator.validated(document) is None
def test_issue_107(validator):
# https://github.com/pyeve/cerberus/issues/107
schema = {
'info': {
'type': 'dict',
'schema': {'name': {'type': 'string', 'required': True}},
}
}
document = {'info': {'name': 'my name'}}
assert_success(document, schema, validator=validator)
v = Validator(schema)
assert_success(document, schema, v)
# it once was observed that this behaves other than the previous line
assert v.validate(document)
def test_document_path():
class DocumentPathTester(Validator):
def _validate_trail(self, constraint, field, value):
""" {'type': 'boolean'} """
test_doc = self.root_document
for crumb in self.document_path:
test_doc = test_doc[crumb]
assert test_doc == self.document
v = DocumentPathTester()
schema = {'foo': {'schema': {'bar': {'trail': True}}}}
document = {'foo': {'bar': {}}}
assert_success(document, schema, validator=v)
def test_require_all_simple():
schema = {'foo': {'type': 'string'}}
validator = Validator(require_all=True)
assert_fail(
{},
schema,
validator,
error=('foo', '__require_all__', errors.REQUIRED_FIELD, True),
)
assert_success({'foo': 'bar'}, schema, validator)
validator.require_all = False
assert_success({}, schema, validator)
assert_success({'foo': 'bar'}, schema, validator)
def test_require_all_override_by_required():
schema = {'foo': {'type': 'string', 'required': False}}
validator = Validator(require_all=True)
assert_success({}, schema, validator)
assert_success({'foo': 'bar'}, schema, validator)
validator.require_all = False
assert_success({}, schema, validator)
assert_success({'foo': 'bar'}, schema, validator)
schema = {'foo': {'type': 'string', 'required': True}}
validator.require_all = True
assert_fail(
{},
schema,
validator,
error=('foo', ('foo', 'required'), errors.REQUIRED_FIELD, True),
)
assert_success({'foo': 'bar'}, schema, validator)
validator.require_all = False
assert_fail(
{},
schema,
validator,
error=('foo', ('foo', 'required'), errors.REQUIRED_FIELD, True),
)
assert_success({'foo': 'bar'}, schema, validator)
@mark.parametrize(
"validator_require_all, sub_doc_require_all",
list(itertools.product([True, False], repeat=2)),
)
def test_require_all_override_by_subdoc_require_all(
validator_require_all, sub_doc_require_all
):
sub_schema = {"bar": {"type": "string"}}
schema = {
"foo": {
"type": "dict",
"require_all": sub_doc_require_all,
"schema": sub_schema,
}
}
validator = Validator(require_all=validator_require_all)
assert_success({"foo": {"bar": "baz"}}, schema, validator)
if validator_require_all:
assert_fail({}, schema, validator)
else:
assert_success({}, schema, validator)
if sub_doc_require_all:
assert_fail({"foo": {}}, schema, validator)
else:
assert_success({"foo": {}}, schema, validator)
def test_require_all_and_exclude():
schema = {
'foo': {'type': 'string', 'excludes': 'bar'},
'bar': {'type': 'string', 'excludes': 'foo'},
}
validator = Validator(require_all=True)
assert_fail(
{},
schema,
validator,
errors=[
('foo', '__require_all__', errors.REQUIRED_FIELD, True),
('bar', '__require_all__', errors.REQUIRED_FIELD, True),
],
)
assert_success({'foo': 'value'}, schema, validator)
assert_success({'bar': 'value'}, schema, validator)
assert_fail({'foo': 'value', 'bar': 'value'}, schema, validator)
validator.require_all = False
assert_success({}, schema, validator)
assert_success({'foo': 'value'}, schema, validator)
assert_success({'bar': 'value'}, schema, validator)
assert_fail({'foo': 'value', 'bar': 'value'}, schema, validator)
def test_novalidate_noerrors(validator):
"""
In v0.1.0 and below `self.errors` raised an exception if no
validation had been performed yet.
"""
assert validator.errors == {}
|
from copy import deepcopy
import datetime
import os
from pathlib import Path
from unittest import mock
from PIL import UnidentifiedImageError
import pytest
import simplehound.core as hound
import homeassistant.components.image_processing as ip
import homeassistant.components.sighthound.image_processing as sh
from homeassistant.const import ATTR_ENTITY_ID, CONF_API_KEY
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
TEST_DIR = os.path.dirname(__file__)
VALID_CONFIG = {
ip.DOMAIN: {
"platform": "sighthound",
CONF_API_KEY: "abc123",
ip.CONF_SOURCE: {ip.CONF_ENTITY_ID: "camera.demo_camera"},
},
"camera": {"platform": "demo"},
}
VALID_ENTITY_ID = "image_processing.sighthound_demo_camera"
MOCK_DETECTIONS = {
"image": {"width": 960, "height": 480, "orientation": 1},
"objects": [
{
"type": "person",
"boundingBox": {"x": 227, "y": 133, "height": 245, "width": 125},
},
{
"type": "person",
"boundingBox": {"x": 833, "y": 137, "height": 268, "width": 93},
},
],
"requestId": "545cec700eac4d389743e2266264e84b",
}
MOCK_NOW = datetime.datetime(2020, 2, 20, 10, 5, 3)
@pytest.fixture
def mock_detections():
"""Return a mock detection."""
with mock.patch(
"simplehound.core.cloud.detect", return_value=MOCK_DETECTIONS
) as detection:
yield detection
@pytest.fixture
def mock_image():
"""Return a mock camera image."""
with mock.patch(
"homeassistant.components.demo.camera.DemoCamera.camera_image",
return_value=b"Test",
) as image:
yield image
@pytest.fixture
def mock_bad_image_data():
"""Mock bad image data."""
with mock.patch(
"homeassistant.components.sighthound.image_processing.Image.open",
side_effect=UnidentifiedImageError,
) as bad_data:
yield bad_data
@pytest.fixture
def mock_now():
"""Return a mock now datetime."""
with mock.patch("homeassistant.util.dt.now", return_value=MOCK_NOW) as now_dt:
yield now_dt
async def test_bad_api_key(hass, caplog):
"""Catch bad api key."""
with mock.patch(
"simplehound.core.cloud.detect", side_effect=hound.SimplehoundException
):
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert "Sighthound error" in caplog.text
assert not hass.states.get(VALID_ENTITY_ID)
async def test_setup_platform(hass, mock_detections):
"""Set up platform with one entity."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
async def test_process_image(hass, mock_image, mock_detections):
"""Process an image."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
person_events = []
@callback
def capture_person_event(event):
"""Mock event."""
person_events.append(event)
hass.bus.async_listen(sh.EVENT_PERSON_DETECTED, capture_person_event)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == "2"
assert len(person_events) == 2
async def test_catch_bad_image(
hass, caplog, mock_image, mock_detections, mock_bad_image_data
):
"""Process an image."""
valid_config_save_file = deepcopy(VALID_CONFIG)
valid_config_save_file[ip.DOMAIN].update({sh.CONF_SAVE_FILE_FOLDER: TEST_DIR})
await async_setup_component(hass, ip.DOMAIN, valid_config_save_file)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
assert "Sighthound unable to process image" in caplog.text
async def test_save_image(hass, mock_image, mock_detections):
"""Save a processed image."""
valid_config_save_file = deepcopy(VALID_CONFIG)
valid_config_save_file[ip.DOMAIN].update({sh.CONF_SAVE_FILE_FOLDER: TEST_DIR})
await async_setup_component(hass, ip.DOMAIN, valid_config_save_file)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
with mock.patch(
"homeassistant.components.sighthound.image_processing.Image.open"
) as pil_img_open:
pil_img = pil_img_open.return_value
pil_img = pil_img.convert.return_value
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == "2"
assert pil_img.save.call_count == 1
directory = Path(TEST_DIR)
latest_save_path = directory / "sighthound_demo_camera_latest.jpg"
assert pil_img.save.call_args_list[0] == mock.call(latest_save_path)
async def test_save_timestamped_image(hass, mock_image, mock_detections, mock_now):
"""Save a processed image."""
valid_config_save_ts_file = deepcopy(VALID_CONFIG)
valid_config_save_ts_file[ip.DOMAIN].update({sh.CONF_SAVE_FILE_FOLDER: TEST_DIR})
valid_config_save_ts_file[ip.DOMAIN].update({sh.CONF_SAVE_TIMESTAMPTED_FILE: True})
await async_setup_component(hass, ip.DOMAIN, valid_config_save_ts_file)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
with mock.patch(
"homeassistant.components.sighthound.image_processing.Image.open"
) as pil_img_open:
pil_img = pil_img_open.return_value
pil_img = pil_img.convert.return_value
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == "2"
assert pil_img.save.call_count == 2
directory = Path(TEST_DIR)
timestamp_save_path = (
directory / "sighthound_demo_camera_2020-02-20_10:05:03.jpg"
)
assert pil_img.save.call_args_list[1] == mock.call(timestamp_save_path)
|
import sys
PY2 = sys.version_info[0] == 2
_identity = lambda x: x
if PY2:
unichr = unichr
text_type = unicode
string_types = (str, unicode)
integer_types = (int, long)
from urllib import urlretrieve
text_to_native = lambda s, enc: s.encode(enc)
iterkeys = lambda d: d.iterkeys()
itervalues = lambda d: d.itervalues()
iteritems = lambda d: d.iteritems()
from cStringIO import StringIO as BytesIO
from StringIO import StringIO
import cPickle as pickle
import ConfigParser as configparser
from itertools import izip, imap
range_type = xrange
cmp = cmp
input = raw_input
from string import lower as ascii_lowercase
import urlparse
exec('def reraise(tp, value, tb=None):\n raise tp, value, tb')
def implements_to_string(cls):
cls.__unicode__ = cls.__str__
cls.__str__ = lambda x: x.__unicode__().encode('utf-8')
return cls
def console_to_str(s):
return s.decode('utf_8')
else:
unichr = chr
text_type = str
string_types = (str,)
integer_types = (int,)
text_to_native = lambda s, enc: s
iterkeys = lambda d: iter(d.keys())
itervalues = lambda d: iter(d.values())
iteritems = lambda d: iter(d.items())
from io import StringIO, BytesIO
import pickle
import configparser
izip = zip
imap = map
range_type = range
cmp = lambda a, b: (a > b) - (a < b)
input = input
from string import ascii_lowercase
import urllib.parse as urllib
import urllib.parse as urlparse
from urllib.request import urlretrieve
console_encoding = sys.__stdout__.encoding
implements_to_string = _identity
def console_to_str(s):
""" From pypa/pip project, pip.backwardwardcompat. License MIT. """
try:
return s.decode(console_encoding)
except UnicodeDecodeError:
return s.decode('utf_8')
def reraise(tp, value, tb=None):
if value.__traceback__ is not tb:
raise (value.with_traceback(tb))
raise value
number_types = integer_types + (float,)
|
from homeassistant.components.weather import WeatherEntity
from homeassistant.const import TEMP_CELSIUS
from .const import (
ATTR_API_CONDITION,
ATTR_API_FORECAST,
ATTR_API_HUMIDITY,
ATTR_API_PRESSURE,
ATTR_API_TEMPERATURE,
ATTR_API_WIND_BEARING,
ATTR_API_WIND_SPEED,
ATTRIBUTION,
DOMAIN,
ENTRY_FORECAST_COORDINATOR,
ENTRY_NAME,
ENTRY_WEATHER_COORDINATOR,
)
from .forecast_update_coordinator import ForecastUpdateCoordinator
from .weather_update_coordinator import WeatherUpdateCoordinator
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up OpenWeatherMap weather entity based on a config entry."""
domain_data = hass.data[DOMAIN][config_entry.entry_id]
name = domain_data[ENTRY_NAME]
weather_coordinator = domain_data[ENTRY_WEATHER_COORDINATOR]
forecast_coordinator = domain_data[ENTRY_FORECAST_COORDINATOR]
unique_id = f"{config_entry.unique_id}"
owm_weather = OpenWeatherMapWeather(
name, unique_id, weather_coordinator, forecast_coordinator
)
async_add_entities([owm_weather], False)
class OpenWeatherMapWeather(WeatherEntity):
"""Implementation of an OpenWeatherMap sensor."""
def __init__(
self,
name,
unique_id,
weather_coordinator: WeatherUpdateCoordinator,
forecast_coordinator: ForecastUpdateCoordinator,
):
"""Initialize the sensor."""
self._name = name
self._unique_id = unique_id
self._weather_coordinator = weather_coordinator
self._forecast_coordinator = forecast_coordinator
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._unique_id
@property
def should_poll(self):
"""Return the polling requirement of the entity."""
return False
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def condition(self):
"""Return the current condition."""
return self._weather_coordinator.data[ATTR_API_CONDITION]
@property
def temperature(self):
"""Return the temperature."""
return self._weather_coordinator.data[ATTR_API_TEMPERATURE]
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self._weather_coordinator.data[ATTR_API_PRESSURE]
@property
def humidity(self):
"""Return the humidity."""
return self._weather_coordinator.data[ATTR_API_HUMIDITY]
@property
def wind_speed(self):
"""Return the wind speed."""
wind_speed = self._weather_coordinator.data[ATTR_API_WIND_SPEED]
if self.hass.config.units.name == "imperial":
return round(wind_speed * 2.24, 2)
return round(wind_speed * 3.6, 2)
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self._weather_coordinator.data[ATTR_API_WIND_BEARING]
@property
def forecast(self):
"""Return the forecast array."""
return self._forecast_coordinator.data[ATTR_API_FORECAST]
@property
def available(self):
"""Return True if entity is available."""
return (
self._weather_coordinator.last_update_success
and self._forecast_coordinator.last_update_success
)
async def async_added_to_hass(self):
"""Connect to dispatcher listening for entity data notifications."""
self.async_on_remove(
self._weather_coordinator.async_add_listener(self.async_write_ha_state)
)
self.async_on_remove(
self._forecast_coordinator.async_add_listener(self.async_write_ha_state)
)
async def async_update(self):
"""Get the latest data from OWM and updates the states."""
await self._weather_coordinator.async_request_refresh()
await self._forecast_coordinator.async_request_refresh()
|
import asyncio
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, ServiceDataType
from homeassistant.util import slugify
from .account import IcloudAccount
from .const import (
CONF_GPS_ACCURACY_THRESHOLD,
CONF_MAX_INTERVAL,
CONF_WITH_FAMILY,
DEFAULT_GPS_ACCURACY_THRESHOLD,
DEFAULT_MAX_INTERVAL,
DEFAULT_WITH_FAMILY,
DOMAIN,
PLATFORMS,
STORAGE_KEY,
STORAGE_VERSION,
)
ATTRIBUTION = "Data provided by Apple iCloud"
# entity attributes
ATTR_ACCOUNT_FETCH_INTERVAL = "account_fetch_interval"
ATTR_BATTERY = "battery"
ATTR_BATTERY_STATUS = "battery_status"
ATTR_DEVICE_NAME = "device_name"
ATTR_DEVICE_STATUS = "device_status"
ATTR_LOW_POWER_MODE = "low_power_mode"
ATTR_OWNER_NAME = "owner_fullname"
# services
SERVICE_ICLOUD_PLAY_SOUND = "play_sound"
SERVICE_ICLOUD_DISPLAY_MESSAGE = "display_message"
SERVICE_ICLOUD_LOST_DEVICE = "lost_device"
SERVICE_ICLOUD_UPDATE = "update"
ATTR_ACCOUNT = "account"
ATTR_LOST_DEVICE_MESSAGE = "message"
ATTR_LOST_DEVICE_NUMBER = "number"
ATTR_LOST_DEVICE_SOUND = "sound"
SERVICE_SCHEMA = vol.Schema({vol.Optional(ATTR_ACCOUNT): cv.string})
SERVICE_SCHEMA_PLAY_SOUND = vol.Schema(
{vol.Required(ATTR_ACCOUNT): cv.string, vol.Required(ATTR_DEVICE_NAME): cv.string}
)
SERVICE_SCHEMA_DISPLAY_MESSAGE = vol.Schema(
{
vol.Required(ATTR_ACCOUNT): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_LOST_DEVICE_MESSAGE): cv.string,
vol.Optional(ATTR_LOST_DEVICE_SOUND): cv.boolean,
}
)
SERVICE_SCHEMA_LOST_DEVICE = vol.Schema(
{
vol.Required(ATTR_ACCOUNT): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_LOST_DEVICE_NUMBER): cv.string,
vol.Required(ATTR_LOST_DEVICE_MESSAGE): cv.string,
}
)
ACCOUNT_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_WITH_FAMILY, default=DEFAULT_WITH_FAMILY): cv.boolean,
vol.Optional(CONF_MAX_INTERVAL, default=DEFAULT_MAX_INTERVAL): cv.positive_int,
vol.Optional(
CONF_GPS_ACCURACY_THRESHOLD, default=DEFAULT_GPS_ACCURACY_THRESHOLD
): cv.positive_int,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [ACCOUNT_SCHEMA]))},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up iCloud from legacy config file."""
conf = config.get(DOMAIN)
if conf is None:
return True
for account_conf in conf:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=account_conf
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry) -> bool:
"""Set up an iCloud account from a config entry."""
hass.data.setdefault(DOMAIN, {})
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
with_family = entry.data[CONF_WITH_FAMILY]
max_interval = entry.data[CONF_MAX_INTERVAL]
gps_accuracy_threshold = entry.data[CONF_GPS_ACCURACY_THRESHOLD]
# For backwards compat
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=username)
icloud_dir = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
account = IcloudAccount(
hass,
username,
password,
icloud_dir,
with_family,
max_interval,
gps_accuracy_threshold,
entry,
)
await hass.async_add_executor_job(account.setup)
hass.data[DOMAIN][entry.unique_id] = account
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
def play_sound(service: ServiceDataType) -> None:
"""Play sound on the device."""
account = service.data[ATTR_ACCOUNT]
device_name = service.data.get(ATTR_DEVICE_NAME)
device_name = slugify(device_name.replace(" ", "", 99))
for device in _get_account(account).get_devices_with_name(device_name):
device.play_sound()
def display_message(service: ServiceDataType) -> None:
"""Display a message on the device."""
account = service.data[ATTR_ACCOUNT]
device_name = service.data.get(ATTR_DEVICE_NAME)
device_name = slugify(device_name.replace(" ", "", 99))
message = service.data.get(ATTR_LOST_DEVICE_MESSAGE)
sound = service.data.get(ATTR_LOST_DEVICE_SOUND, False)
for device in _get_account(account).get_devices_with_name(device_name):
device.display_message(message, sound)
def lost_device(service: ServiceDataType) -> None:
"""Make the device in lost state."""
account = service.data[ATTR_ACCOUNT]
device_name = service.data.get(ATTR_DEVICE_NAME)
device_name = slugify(device_name.replace(" ", "", 99))
number = service.data.get(ATTR_LOST_DEVICE_NUMBER)
message = service.data.get(ATTR_LOST_DEVICE_MESSAGE)
for device in _get_account(account).get_devices_with_name(device_name):
device.lost_device(number, message)
def update_account(service: ServiceDataType) -> None:
"""Call the update function of an iCloud account."""
account = service.data.get(ATTR_ACCOUNT)
if account is None:
for account in hass.data[DOMAIN].values():
account.keep_alive()
else:
_get_account(account).keep_alive()
def _get_account(account_identifier: str) -> any:
if account_identifier is None:
return None
icloud_account = hass.data[DOMAIN].get(account_identifier)
if icloud_account is None:
for account in hass.data[DOMAIN].values():
if account.username == account_identifier:
icloud_account = account
if icloud_account is None:
raise Exception(
f"No iCloud account with username or name {account_identifier}"
)
return icloud_account
hass.services.async_register(
DOMAIN, SERVICE_ICLOUD_PLAY_SOUND, play_sound, schema=SERVICE_SCHEMA_PLAY_SOUND
)
hass.services.async_register(
DOMAIN,
SERVICE_ICLOUD_DISPLAY_MESSAGE,
display_message,
schema=SERVICE_SCHEMA_DISPLAY_MESSAGE,
)
hass.services.async_register(
DOMAIN,
SERVICE_ICLOUD_LOST_DEVICE,
lost_device,
schema=SERVICE_SCHEMA_LOST_DEVICE,
)
hass.services.async_register(
DOMAIN, SERVICE_ICLOUD_UPDATE, update_account, schema=SERVICE_SCHEMA
)
return True
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.data[CONF_USERNAME])
return unload_ok
|
import pytest
from homeassistant.components import stt
from homeassistant.setup import async_setup_component
@pytest.fixture(autouse=True)
async def setup_comp(hass):
"""Set up demo component."""
assert await async_setup_component(hass, stt.DOMAIN, {"stt": {"platform": "demo"}})
await hass.async_block_till_done()
async def test_demo_settings(hass_client):
"""Test retrieve settings from demo provider."""
client = await hass_client()
response = await client.get("/api/stt/demo")
response_data = await response.json()
assert response.status == 200
assert response_data == {
"languages": ["en", "de"],
"bit_rates": [16],
"sample_rates": [16000, 44100],
"formats": ["wav"],
"codecs": ["pcm"],
"channels": [2],
}
async def test_demo_speech_no_metadata(hass_client):
"""Test retrieve settings from demo provider."""
client = await hass_client()
response = await client.post("/api/stt/demo", data=b"Test")
assert response.status == 400
async def test_demo_speech_wrong_metadata(hass_client):
"""Test retrieve settings from demo provider."""
client = await hass_client()
response = await client.post(
"/api/stt/demo",
headers={
"X-Speech-Content": "format=wav; codec=pcm; sample_rate=8000; bit_rate=16; channel=1; language=de"
},
data=b"Test",
)
assert response.status == 415
async def test_demo_speech(hass_client):
"""Test retrieve settings from demo provider."""
client = await hass_client()
response = await client.post(
"/api/stt/demo",
headers={
"X-Speech-Content": "format=wav; codec=pcm; sample_rate=16000; bit_rate=16; channel=2; language=de"
},
data=b"Test",
)
response_data = await response.json()
assert response.status == 200
assert response_data == {"text": "Turn the Kitchen Lights on", "result": "success"}
|
import argparse
import glob
import os
import time
import random
COLOURS = (b'\xFF\x00\x00', b'\x00\xFF\x00', b'\x00\x00\xFF', b'\xFF\xFF\x00', b'\xFF\x00\xFF', b'\x00\xFF\xFF')
def write_binary(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'wb') as open_file:
open_file.write(payload)
def read_string(driver_path, device_file):
with open(os.path.join(driver_path, device_file), 'r') as open_file:
return open_file.read().rstrip('\n')
def write_string(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'w') as open_file:
open_file.write(payload)
def find_devices(vid, pid):
driver_paths = glob.glob(os.path.join('/sys/bus/hid/drivers/razerkbd', '*:{0:04X}:{1:04X}.*'.format(vid, pid)))
for driver_path in driver_paths:
device_type_path = os.path.join(driver_path, 'device_type')
if os.path.exists(device_type_path):
yield driver_path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--skip-standard', action='store_true')
parser.add_argument('--skip-custom', action='store_true')
parser.add_argument('--skip-game-led', action='store_true')
parser.add_argument('--skip-macro-led', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
found_chroma = False
for index, driver_path in enumerate(find_devices(0x1532, 0x0203), start=1):
found_chroma = True
print("Blackwidow Chroma {0}\n".format(index))
print("Driver version: {0}".format(read_string(driver_path, 'version')))
print("Driver firmware version: {0}".format(read_string(driver_path, 'firmware_version')))
print("Device serial: {0}".format(read_string(driver_path, 'device_serial')))
print("Device type: {0}".format(read_string(driver_path, 'device_type')))
print("Device mode: {0}".format(read_string(driver_path, 'device_mode')))
# Set to static red so that we have something standard
write_binary(driver_path, 'matrix_effect_static', b'\xFF\x00\x00')
if not args.skip_standard:
print("Starting brightness test. Press enter to begin.")
input()
print("Max brightness...", end='')
write_string(driver_path, 'matrix_brightness', '255')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Half brightness...", end='')
write_string(driver_path, 'matrix_brightness', '128')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Zero brightness...", end='')
write_string(driver_path, 'matrix_brightness', '0')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
write_string(driver_path, 'matrix_brightness', '255')
print("Starting other colour effect tests. Press enter to begin.")
input()
print("Green Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\x00')
time.sleep(5)
print("Cyan Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\xFF')
time.sleep(5)
print("Spectrum")
write_binary(driver_path, 'matrix_effect_spectrum', b'\x00')
time.sleep(10)
print("None")
write_binary(driver_path, 'matrix_effect_none', b'\x00')
time.sleep(5)
print("Wave Left")
write_string(driver_path, 'matrix_effect_wave', '1')
time.sleep(5)
print("Wave Right")
write_string(driver_path, 'matrix_effect_wave', '2')
time.sleep(5)
print("Breathing random")
write_binary(driver_path, 'matrix_effect_breath', b'\x00')
time.sleep(10)
print("Breathing red")
write_binary(driver_path, 'matrix_effect_breath', b'\xFF\x00\x00')
time.sleep(10)
print("Breathing blue-green")
write_binary(driver_path, 'matrix_effect_breath', b'\x00\xFF\x00\x00\x00\xFF')
time.sleep(10)
if not args.skip_custom:
# Custom LEDs all rows
payload_all = b''
for row in range(0, 6): # 0x15 is 21. 0->21 inclusive
payload_all += row.to_bytes(1, byteorder='big') + b'\x00\x15'
for i in range(0, 22):
payload_all += random.choice(COLOURS)
# Custom LEDs M1-5
payload_m1_5 = b''
for row in range(0, 6): # Column 0 or column 0
payload_m1_5 += row.to_bytes(1, byteorder='big') + b'\x00\x00' + b'\xFF\xFF\xFF'
print("Custom LED matrix colours test. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_all)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
print("Custom LED matrix partial colours test. Setting M1-5 to white. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_m1_5)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
time.sleep(0.5)
if not args.skip_game_led:
# Game mode test
print("Starting game mode LED tests. Press enter to begin.")
input()
print("Enabling game mode LED")
write_string(driver_path, 'game_led_state', '1')
time.sleep(5)
print("Disabling game mode LED")
write_string(driver_path, 'game_led_state', '0')
time.sleep(5)
if not args.skip_macro_led:
print("Starting marco LED tests. Press enter to begin.")
input()
print("Enabling macro mode LED")
write_string(driver_path, 'macro_led_state', '1')
time.sleep(5)
print("Enabling macro mode LED static effect")
write_string(driver_path, 'macro_led_effect', '0')
time.sleep(5)
print("Enabling macro mode LED blinking effect")
write_string(driver_path, 'macro_led_effect', '1')
time.sleep(5)
write_string(driver_path, 'macro_led_effect', '0')
print("Disabling macro mode LED")
write_string(driver_path, 'macro_led_state', '0')
time.sleep(5)
print("Finished")
if not found_chroma:
print("No Blackwidow Chromas found")
|
from absl import flags
FLAGS = flags.FLAGS
TF_MODELS_GIT = 'https://github.com/tensorflow/models.git'
flags.DEFINE_string('tensorflow_models_commit_hash',
'4fa82ae1cb08c374a44e2713e731f57d44bf7e61',
'git commit hash of desired TensorFlow models commit.')
def Install(vm):
"""Installs TensorFlow models on the VM."""
vm.InstallPackages('git')
vm.RemoteCommand('git clone {}'.format(TF_MODELS_GIT), should_log=True)
vm.RemoteCommand('cd models && git checkout {}'.format(
FLAGS.tensorflow_models_commit_hash))
def Uninstall(vm):
"""Uninstalls TensorFlow models on the VM."""
vm.RemoteCommand('rm -rf models', should_log=True)
|
import logging
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_SHADE,
DOMAIN,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from . import DOMAIN as CASETA_DOMAIN, LutronCasetaDevice
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Lutron Caseta cover platform.
Adds shades from the Caseta bridge associated with the config_entry as
cover entities.
"""
entities = []
bridge = hass.data[CASETA_DOMAIN][config_entry.entry_id]
cover_devices = bridge.get_devices_by_domain(DOMAIN)
for cover_device in cover_devices:
entity = LutronCasetaCover(cover_device, bridge)
entities.append(entity)
async_add_entities(entities, True)
class LutronCasetaCover(LutronCasetaDevice, CoverEntity):
"""Representation of a Lutron shade."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP | SUPPORT_SET_POSITION
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._device["current_state"] < 1
@property
def current_cover_position(self):
"""Return the current position of cover."""
return self._device["current_state"]
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_SHADE
async def async_stop_cover(self, **kwargs):
"""Top the cover."""
await self._smartbridge.stop_cover(self.device_id)
async def async_close_cover(self, **kwargs):
"""Close the cover."""
await self._smartbridge.lower_cover(self.device_id)
self.async_update()
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
await self._smartbridge.raise_cover(self.device_id)
self.async_update()
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs):
"""Move the shade to a specific position."""
if ATTR_POSITION in kwargs:
position = kwargs[ATTR_POSITION]
await self._smartbridge.set_value(self.device_id, position)
async def async_update(self):
"""Call when forcing a refresh of the device."""
self._device = self._smartbridge.get_device_by_id(self.device_id)
_LOGGER.debug(self._device)
|
import json
import requests
from requests.exceptions import ConnectionError
from datetime import datetime
from flask import current_app
from lemur.plugins import lemur_atlas as atlas
from lemur.plugins.bases.metric import MetricPlugin
def millis_since_epoch():
"""
current time since epoch in milliseconds
"""
epoch = datetime.utcfromtimestamp(0)
delta = datetime.now() - epoch
return int(delta.total_seconds() * 1000.0)
class AtlasMetricPlugin(MetricPlugin):
title = "Atlas"
slug = "atlas-metric"
description = "Adds support for sending key metrics to Atlas"
version = atlas.VERSION
author = "Kevin Glisson"
author_url = "https://github.com/netflix/lemur"
options = [
{
"name": "sidecar_host",
"type": "str",
"required": False,
"help_message": "If no host is provided localhost is assumed",
"default": "localhost",
},
{"name": "sidecar_port", "type": "int", "required": False, "default": 8078},
]
metric_data = {}
sidecar_host = None
sidecar_port = None
def submit(
self, metric_name, metric_type, metric_value, metric_tags=None, options=None
):
if not options:
options = self.options
# TODO marshmallow schema?
valid_types = ["COUNTER", "GAUGE", "TIMER"]
if metric_type.upper() not in valid_types:
raise Exception(
"Invalid Metric Type for Atlas: '{metric}' choose from: {options}".format(
metric=metric_type, options=",".join(valid_types)
)
)
if metric_tags:
if not isinstance(metric_tags, dict):
raise Exception(
"Invalid Metric Tags for Atlas: Tags must be in dict format"
)
if (
metric_value == "NaN"
or isinstance(metric_value, int)
or isinstance(metric_value, float)
):
self.metric_data["value"] = metric_value
else:
raise Exception("Invalid Metric Value for Atlas: Metric must be a number")
self.metric_data["type"] = metric_type.upper()
self.metric_data["name"] = str(metric_name)
self.metric_data["tags"] = metric_tags
self.metric_data["timestamp"] = millis_since_epoch()
self.sidecar_host = self.get_option("sidecar_host", options)
self.sidecar_port = self.get_option("sidecar_port", options)
try:
res = requests.post(
"http://{host}:{port}/metrics".format(
host=self.sidecar_host, port=self.sidecar_port
),
data=json.dumps([self.metric_data]),
)
if res.status_code != 200:
current_app.logger.warning(
"Failed to publish altas metric. {0}".format(res.content)
)
except ConnectionError:
current_app.logger.warning(
"AtlasMetrics: could not connect to sidecar at {host}:{port}".format(
host=self.sidecar_host, port=self.sidecar_port
)
)
|
import logging
from libpurecool.dyson import DysonAccount
import voluptuous as vol
from homeassistant.const import CONF_DEVICES, CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_LANGUAGE = "language"
CONF_RETRY = "retry"
DEFAULT_TIMEOUT = 5
DEFAULT_RETRY = 10
DYSON_DEVICES = "dyson_devices"
DYSON_PLATFORMS = ["sensor", "fan", "vacuum", "climate", "air_quality"]
DOMAIN = "dyson"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_LANGUAGE): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_RETRY, default=DEFAULT_RETRY): cv.positive_int,
vol.Optional(CONF_DEVICES, default=[]): vol.All(cv.ensure_list, [dict]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Dyson parent component."""
_LOGGER.info("Creating new Dyson component")
if DYSON_DEVICES not in hass.data:
hass.data[DYSON_DEVICES] = []
dyson_account = DysonAccount(
config[DOMAIN].get(CONF_USERNAME),
config[DOMAIN].get(CONF_PASSWORD),
config[DOMAIN].get(CONF_LANGUAGE),
)
logged = dyson_account.login()
timeout = config[DOMAIN].get(CONF_TIMEOUT)
retry = config[DOMAIN].get(CONF_RETRY)
if not logged:
_LOGGER.error("Not connected to Dyson account. Unable to add devices")
return False
_LOGGER.info("Connected to Dyson account")
dyson_devices = dyson_account.devices()
if CONF_DEVICES in config[DOMAIN] and config[DOMAIN].get(CONF_DEVICES):
configured_devices = config[DOMAIN].get(CONF_DEVICES)
for device in configured_devices:
dyson_device = next(
(d for d in dyson_devices if d.serial == device["device_id"]), None
)
if dyson_device:
try:
connected = dyson_device.connect(device["device_ip"])
if connected:
_LOGGER.info("Connected to device %s", dyson_device)
hass.data[DYSON_DEVICES].append(dyson_device)
else:
_LOGGER.warning("Unable to connect to device %s", dyson_device)
except OSError as ose:
_LOGGER.error(
"Unable to connect to device %s: %s",
str(dyson_device.network_device),
str(ose),
)
else:
_LOGGER.warning(
"Unable to find device %s in Dyson account", device["device_id"]
)
else:
# Not yet reliable
for device in dyson_devices:
_LOGGER.info(
"Trying to connect to device %s with timeout=%i and retry=%i",
device,
timeout,
retry,
)
connected = device.auto_connect(timeout, retry)
if connected:
_LOGGER.info("Connected to device %s", device)
hass.data[DYSON_DEVICES].append(device)
else:
_LOGGER.warning("Unable to connect to device %s", device)
# Start fan/sensors components
if hass.data[DYSON_DEVICES]:
_LOGGER.debug("Starting sensor/fan components")
for platform in DYSON_PLATFORMS:
discovery.load_platform(hass, platform, DOMAIN, {}, config)
return True
|
import argparse
import getpass
from socket import gaierror
import ephemeral_port_reserve
import mock
from mock import patch
from pytest import mark
from pytest import raises
from paasta_tools.cli import utils
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.utils import SystemPaastaConfig
@patch("socket.gethostbyname_ex", autospec=True)
def test_bad_calculate_remote_master(mock_get_by_hostname, system_paasta_config):
mock_get_by_hostname.side_effect = gaierror(42, "bar")
ips, output = utils.calculate_remote_masters("myhost", system_paasta_config)
assert ips == []
assert "ERROR while doing DNS lookup of paasta-myhost.yelp:\nbar\n" in output
@patch("socket.gethostbyname_ex", autospec=True)
def test_ok_remote_masters(mock_get_by_hostname, system_paasta_config):
mock_get_by_hostname.return_value = ("myhost", [], ["1.2.3.4", "1.2.3.5"])
ips, output = utils.calculate_remote_masters("myhost", system_paasta_config)
assert output is None
assert ips == ["1.2.3.4", "1.2.3.5"]
@patch("paasta_tools.cli.utils.check_ssh_on_master", autospec=True)
def test_find_connectable_master_happy_path(mock_check_ssh_on_master):
masters = ["192.0.2.1", "192.0.2.2", "192.0.2.3"]
timeout = 6.0
mock_check_ssh_on_master.return_value = (True, None)
actual = utils.find_connectable_master(masters)
expected = (masters[0], None)
assert mock_check_ssh_on_master.call_count == 1
mock_check_ssh_on_master.assert_called_once_with(masters[0], timeout=timeout)
assert actual == expected
@patch("random.shuffle", autospec=True)
@patch("paasta_tools.cli.utils.find_connectable_master", autospec=True)
@patch("paasta_tools.cli.utils.calculate_remote_masters", autospec=True)
def test_connectable_master_random(
mock_calculate_remote_masters,
mock_find_connectable_master,
mock_shuffle,
system_paasta_config,
):
masters = ["192.0.2.1", "192.0.2.2", "192.0.2.3"]
mock_calculate_remote_masters.return_value = (masters, None)
mock_find_connectable_master.return_value = (masters[0], None)
mock_shuffle.return_value = None
utils.connectable_master("fake_cluster", system_paasta_config)
mock_shuffle.assert_called_once_with(masters)
@patch("paasta_tools.cli.utils.check_ssh_on_master", autospec=True)
def test_find_connectable_master_one_failure(mock_check_ssh_on_master):
masters = ["192.0.2.1", "192.0.2.2", "192.0.2.3"]
timeout = 6.0
# iter() is a workaround
# (http://lists.idyll.org/pipermail/testing-in-python/2013-April/005527.html)
# for a bug in mock (http://bugs.python.org/issue17826)
create_connection_side_effects = iter(
[(False, "something bad"), (True, "unused"), (True, "unused")]
)
mock_check_ssh_on_master.side_effect = create_connection_side_effects
mock_check_ssh_on_master.return_value = True
actual = utils.find_connectable_master(masters)
assert mock_check_ssh_on_master.call_count == 2
mock_check_ssh_on_master.assert_any_call(masters[0], timeout=timeout)
mock_check_ssh_on_master.assert_any_call(masters[1], timeout=timeout)
assert actual == ("192.0.2.2", None)
@patch("paasta_tools.cli.utils.check_ssh_on_master", autospec=True)
def test_find_connectable_master_all_failures(mock_check_ssh_on_master):
masters = ["192.0.2.1", "192.0.2.2", "192.0.2.3"]
timeout = 6.0
mock_check_ssh_on_master.return_value = (255, "timeout")
actual = utils.find_connectable_master(masters)
assert mock_check_ssh_on_master.call_count == 3
mock_check_ssh_on_master.assert_any_call((masters[0]), timeout=timeout)
mock_check_ssh_on_master.assert_any_call((masters[1]), timeout=timeout)
mock_check_ssh_on_master.assert_any_call((masters[2]), timeout=timeout)
assert actual[0] is None
assert "timeout" in actual[1]
@patch("paasta_tools.cli.utils._run", autospec=True)
def test_check_ssh_on_master_check_successful(mock_run):
master = "fake_master"
mock_run.return_value = (0, "fake_output")
expected_command = "ssh -A -n -o StrictHostKeyChecking=no %s /bin/true" % master
actual = utils.check_ssh_on_master(master)
mock_run.assert_called_once_with(expected_command, timeout=mock.ANY)
assert actual == (True, None)
@patch("paasta_tools.cli.utils._run", autospec=True)
def test_check_ssh_on_master_check_ssh_failure(mock_run):
master = "fake_master"
mock_run.return_value = (255, "fake_output")
actual = utils.check_ssh_on_master(master)
assert actual[0] is False
assert "fake_output" in actual[1]
assert "255" in actual[1]
@patch("paasta_tools.cli.utils._run", autospec=True)
def test_check_ssh_on_master_check_sudo_failure(mock_run):
master = "fake_master"
mock_run.return_value = (1, "fake_output")
actual = utils.check_ssh_on_master(master)
assert actual[0] is False
assert "1" in actual[1]
assert "fake_output" in actual[1]
@patch("paasta_tools.cli.utils._run", autospec=True)
def test_run_paasta_metastatus(mock_run):
mock_run.return_value = (0, "fake_output")
expected_command = (
"ssh -A -n -o StrictHostKeyChecking=no fake_master sudo paasta_metastatus"
)
return_code, actual = utils.run_paasta_metastatus("fake_master", [], 0)
mock_run.assert_called_once_with(expected_command, timeout=mock.ANY)
assert return_code == 0
assert actual == mock_run.return_value[1]
@patch("paasta_tools.cli.utils._run", autospec=True)
def test_run_paasta_metastatus_verbose(mock_run):
mock_run.return_value = (0, "fake_output")
expected_command = (
"ssh -A -n -o StrictHostKeyChecking=no fake_master sudo paasta_metastatus -v"
)
return_code, actual = utils.run_paasta_metastatus("fake_master", [], 1)
mock_run.assert_called_once_with(expected_command, timeout=mock.ANY)
assert return_code == 0
assert actual == mock_run.return_value[1]
@patch("paasta_tools.cli.utils._run", autospec=True)
def test_run_paasta_metastatus_very_verbose(mock_run):
mock_run.return_value = (0, "fake_output")
return_code, actual = utils.run_paasta_metastatus("fake_master", [], 2, False)
expected_command = (
"ssh -A -n -o StrictHostKeyChecking=no fake_master sudo paasta_metastatus -vv"
)
mock_run.assert_called_once_with(expected_command, timeout=mock.ANY)
assert return_code == 0
assert actual == mock_run.return_value[1]
@patch("paasta_tools.cli.utils.list_all_instances_for_service", autospec=True)
@patch("paasta_tools.cli.utils.list_services", autospec=True)
def test_list_service_instances(mock_list_services, mock_list_instances):
mock_list_services.return_value = ["fake_service"]
mock_list_instances.return_value = ["canary", "main"]
expected = ["fake_service.canary", "fake_service.main"]
actual = utils.list_service_instances()
assert actual == expected
@patch("paasta_tools.cli.utils.list_all_instances_for_service", autospec=True)
@patch("paasta_tools.cli.utils.list_services", autospec=True)
def test_list_paasta_services(mock_list_services, mock_list_instances):
mock_list_services.return_value = ["fake_service"]
mock_list_instances.return_value = ["canary", "main"]
expected = ["fake_service"]
actual = utils.list_paasta_services()
assert actual == expected
@patch("paasta_tools.cli.utils.guess_service_name", autospec=True)
@patch("paasta_tools.cli.utils.validate_service_name", autospec=True)
@patch("paasta_tools.cli.utils.list_all_instances_for_service", autospec=True)
def test_list_instances_with_autodetect(
mock_list_instance_for_service, mock_validate_service_name, mock_guess_service_name
):
expected = ["instance1", "instance2", "instance3"]
mock_guess_service_name.return_value = "fake_service"
mock_validate_service_name.return_value = None
mock_list_instance_for_service.return_value = expected
actual = utils.list_instances()
assert actual == expected
mock_validate_service_name.assert_called_once_with("fake_service")
mock_list_instance_for_service.assert_called_once_with("fake_service")
@patch("paasta_tools.cli.utils.guess_service_name", autospec=True)
@patch("paasta_tools.cli.utils.validate_service_name", autospec=True)
@patch("paasta_tools.cli.utils.list_all_instances_for_service", autospec=True)
@patch("paasta_tools.cli.utils.list_services", autospec=True)
def test_list_instances_no_service(
mock_list_services,
mock_list_instance_for_service,
mock_validate_service_name,
mock_guess_service_name,
):
expected = ["instance1", "instance2", "instance3"]
mock_guess_service_name.return_value = "unused"
mock_list_services.return_value = ["fake_service1"]
mock_validate_service_name.side_effect = utils.NoSuchService(None)
mock_list_instance_for_service.return_value = expected
actual = utils.list_instances()
mock_validate_service_name.assert_called_once_with("unused")
mock_list_instance_for_service.assert_called_once_with("fake_service1")
assert actual == expected
def test_lazy_choices_completer():
completer = utils.lazy_choices_completer(lambda: ["1", "2", "3"])
assert completer(prefix="") == ["1", "2", "3"]
@mock.patch("paasta_tools.cli.utils.INSTANCE_TYPE_HANDLERS", dict(), autospec=None)
@mock.patch("paasta_tools.cli.utils.validate_service_instance", autospec=True)
def test_get_instance_config_by_instance_type(mock_validate_service_instance,):
instance_type = "fake_type"
mock_validate_service_instance.return_value = instance_type
mock_load_config = mock.MagicMock()
mock_load_config.return_value = "fake_service_config"
utils.INSTANCE_TYPE_HANDLERS[instance_type] = utils.InstanceTypeHandler(
None, mock_load_config
)
actual = utils.get_instance_config(
service="fake_service",
instance="fake_instance",
cluster="fake_cluster",
soa_dir="fake_soa_dir",
)
assert mock_validate_service_instance.call_count == 1
assert mock_load_config.call_count == 1
assert actual == "fake_service_config"
@mock.patch("paasta_tools.cli.utils.validate_service_instance", autospec=True)
def test_get_instance_config_unknown(mock_validate_service_instance,):
with raises(NotImplementedError):
mock_validate_service_instance.return_value = "some bogus unsupported framework"
utils.get_instance_config(
service="fake_service",
instance="fake_instance",
cluster="fake_cluster",
soa_dir="fake_soa_dir",
)
assert mock_validate_service_instance.call_count == 1
def test_get_subparser():
mock_subparser = mock.Mock()
mock_function = mock.Mock()
mock_command = "test"
mock_help_text = "HALP"
mock_description = "what_i_do"
utils.get_subparser(
subparsers=mock_subparser,
function=mock_function,
help_text=mock_help_text,
description=mock_description,
command=mock_command,
)
mock_subparser.add_parser.assert_called_with(
"test",
help="HALP",
description=("what_i_do"),
epilog=(
"Note: This command requires SSH and "
"sudo privileges on the remote PaaSTA nodes."
),
)
mock_subparser.add_parser.return_value.set_defaults.assert_called_with(
command=mock_function
)
def test_pick_slave_from_status():
mock_slaves = [1, 2]
mock_status = mock.Mock(marathon=mock.Mock(slaves=mock_slaves))
assert utils.pick_slave_from_status(mock_status, host=None) == 1
assert utils.pick_slave_from_status(mock_status, host="lolhost") == "lolhost"
def test_git_sha_validation():
assert (
utils.validate_full_git_sha("060ce8bc10efe0030c048a4711ad5dd85de5adac")
== "060ce8bc10efe0030c048a4711ad5dd85de5adac"
)
with raises(argparse.ArgumentTypeError):
utils.validate_full_git_sha("BAD")
assert utils.validate_short_git_sha("060c") == "060c"
with raises(argparse.ArgumentTypeError):
utils.validate_short_git_sha("BAD")
@patch("paasta_tools.cli.utils.get_instance_configs_for_service", autospec=True)
def test_list_deploy_groups_parses_configs(mock_get_instance_configs_for_service,):
mock_get_instance_configs_for_service.return_value = [
MarathonServiceConfig(
service="foo",
cluster="",
instance="",
config_dict={"deploy_group": "fake_deploy_group"},
branch_dict=None,
),
MarathonServiceConfig(
service="foo",
cluster="fake_cluster",
instance="fake_instance",
config_dict={},
branch_dict=None,
),
]
actual = utils.list_deploy_groups(service="foo")
assert actual == {"fake_deploy_group", "fake_cluster.fake_instance"}
def test_get_container_name():
mock_task = mock.Mock(executor={"container": "container1"})
ret = utils.get_container_name(mock_task)
assert ret == "mesos-container1"
def test_pick_random_port():
def fake_epr(ip, port):
return port
with mock.patch.object(
ephemeral_port_reserve, "reserve", side_effect=fake_epr
), mock.patch.object(getpass, "getuser", return_value="nobody", autospec=True):
# Two calls with the same service should try to reserve the same port.
port1 = utils.pick_random_port("fake_service")
port2 = utils.pick_random_port("fake_service")
assert port1 == port2
assert 33000 <= port1 < 58000
# A third call with a different service should try to reserve a different port.
port3 = utils.pick_random_port("different_fake_service")
assert port1 != port3
assert 33000 <= port3 < 58000
@patch("paasta_tools.cli.utils._log_audit", autospec=True)
@patch("paasta_tools.cli.utils.run_paasta_cluster_boost", autospec=True)
@patch("paasta_tools.cli.utils.connectable_master", autospec=True)
@mark.parametrize(
"master_result,boost_result,expected_result",
[(utils.NoMasterError("error"), None, 1), (mock.Mock(), 1, 1), (mock.Mock(), 0, 0)],
)
def test_execute_paasta_cluster_boost_on_remote_master(
mock_connectable_master,
mock_boost,
mock_log,
master_result,
boost_result,
expected_result,
):
mock_c1 = mock.Mock()
mock_connectable_master.side_effect = [mock_c1, master_result]
clusters = ["c1", "c2"]
mock_config = mock.Mock()
mock_boost.side_effect = [(0, ""), (boost_result, "")]
code, output = utils.execute_paasta_cluster_boost_on_remote_master(
clusters,
mock_config,
"do_action",
"a_pool",
duration=30,
override=False,
boost=2,
verbose=1,
)
shared_kwargs = dict(
action="do_action",
pool="a_pool",
duration=30,
override=False,
boost=2,
verbose=1,
)
expected_calls = [mock.call(master=mock_c1, **shared_kwargs)]
if not isinstance(master_result, utils.NoMasterError):
expected_calls.append(mock.call(master=master_result, **shared_kwargs))
assert mock_boost.call_args_list == expected_calls
assert code == expected_result
@mock.patch("paasta_tools.cli.utils._log", mock.Mock(), autospec=None)
@mock.patch("paasta_tools.cli.utils.load_system_paasta_config", autospec=True)
@mock.patch("socket.socket", autospec=True)
def test_trigger_deploys(mock_socket, mock_load_config):
mock_load_config.return_value = SystemPaastaConfig({}, "/some/fake/dir")
mock_client = mock_socket.return_value
utils.trigger_deploys("a_service")
assert mock_load_config.call_count == 1
assert mock_client.connect.call_args_list == [
mock.call(("sysgit.yelpcorp.com", 5049))
]
assert mock_client.send.call_args_list == [mock.call("a_service\n".encode("utf-8"))]
assert mock_client.close.call_count == 1
|
from wled import WLEDConnectionError
from homeassistant.components.wled.const import DOMAIN
from homeassistant.config_entries import ENTRY_STATE_SETUP_RETRY
from homeassistant.core import HomeAssistant
from tests.async_mock import MagicMock, patch
from tests.components.wled import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
@patch("homeassistant.components.wled.WLED.update", side_effect=WLEDConnectionError)
async def test_config_entry_not_ready(
mock_update: MagicMock, hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the WLED configuration entry not ready."""
entry = await init_integration(hass, aioclient_mock)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_config_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the WLED configuration entry unloading."""
entry = await init_integration(hass, aioclient_mock)
assert hass.data[DOMAIN]
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
async def test_setting_unique_id(hass, aioclient_mock):
"""Test we set unique ID if not set yet."""
entry = await init_integration(hass, aioclient_mock)
assert hass.data[DOMAIN]
assert entry.unique_id == "aabbccddeeff"
|
revision = "318b66568358"
down_revision = "9f79024fe67b"
from alembic import op
def upgrade():
connection = op.get_bind()
# Delete duplicate entries
connection.execute("UPDATE certificates SET deleted = false WHERE deleted IS NULL")
def downgrade():
pass
|
import base
from docker_registry.core import compat
json = compat.json
class TestIndex(base.TestCase):
"""The Index module is fake at the moment
hence the unit tests only test the return codes
"""
def test_users(self):
# GET
resp = self.http_client.get('/v1/users/')
self.assertEqual(resp.status_code, 200, resp.data)
# POST
resp = self.http_client.post('/v1/users/',
data=json.dumps('JSON DATA PLACEHOLDER'))
self.assertEqual(resp.status_code, 201, resp.data)
# PUT
resp = self.http_client.put('/v1/users/{0}/'.format(
self.gen_random_string()))
self.assertEqual(resp.status_code, 204, resp.data)
def test_repository_images(self):
repo = 'test/{0}'.format(self.gen_random_string())
images = [{'id': self.gen_random_string()},
{'id': self.gen_random_string()}]
# PUT
resp = self.http_client.put('/v1/repositories/{0}/'.format(repo),
data=json.dumps(images))
self.assertEqual(resp.status_code, 200, resp.data)
resp = self.http_client.put('/v1/repositories/{0}/images'.format(repo),
data=json.dumps(images))
self.assertEqual(resp.status_code, 204, resp.data)
# GET
resp = self.http_client.get('/v1/repositories/{0}/images'.format(repo))
self.assertEqual(resp.status_code, 200, resp.data)
# Note(dmp): unicode patch XXX not applied assume requests does the job
data = json.loads(resp.data)
self.assertEqual(len(data), 2)
self.assertTrue('id' in data[0])
# DELETE
resp = self.http_client.delete('/v1/repositories/{0}/images'.format(
repo))
self.assertEqual(resp.status_code, 204, resp.data)
def test_auth(self):
repo = 'test/{0}'.format(self.gen_random_string())
resp = self.http_client.put('/v1/repositories/{0}/auth'.format(repo))
self.assertEqual(resp.status_code, 200, resp.data)
def test_search(self):
search_term = self.gen_random_string()
resp = self.http_client.get('/v1/search?q={0}'.format(search_term))
self.assertEqual(resp.status_code, 200, resp.data)
|
import os
import shlex
import shutil
import subprocess
import sys
import unittest
def run_cmd(cmd):
"""Run a command and return a tuple with (stdout, stderr, exit_code)"""
print('\n$ ' + cmd)
process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
print(stdout.decode('utf-8'))
print(stderr.decode('utf-8'))
return stdout, stderr, process.wait()
class TestMigrate(unittest.TestCase):
def setUp(self):
os.chdir(os.path.split(os.path.abspath(__file__))[0])
try:
os.remove('app.db')
except OSError:
pass
try:
shutil.rmtree('migrations')
except OSError:
pass
try:
shutil.rmtree('temp_folder')
except OSError:
pass
def tearDown(self):
try:
os.remove('app.db')
except OSError:
pass
try:
shutil.rmtree('migrations')
except OSError:
pass
try:
shutil.rmtree('temp_folder')
except OSError:
pass
def test_alembic_version(self):
from flask_migrate import alembic_version
self.assertEqual(len(alembic_version), 3)
for v in alembic_version:
self.assertTrue(isinstance(v, int))
def test_migrate_upgrade(self):
(o, e, s) = run_cmd(sys.executable + ' app.py db init')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(sys.executable + ' app.py db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(sys.executable + ' app.py db upgrade')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(sys.executable + ' app.py add')
self.assertTrue(s == 0)
def test_custom_directory(self):
(o, e, s) = run_cmd(
sys.executable + ' app_custom_directory.py db init')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(
sys.executable + ' app_custom_directory.py db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(
sys.executable + ' app_custom_directory.py db upgrade')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(sys.executable + ' app_custom_directory.py add')
self.assertTrue(s == 0)
def test_custom_directory_path(self):
(o, e, s) = run_cmd(
sys.executable + ' app_custom_directory_path.py db init')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(
sys.executable + ' app_custom_directory_path.py db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(
sys.executable + ' app_custom_directory_path.py db upgrade')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(
sys.executable + ' app_custom_directory_path.py add')
self.assertTrue(s == 0)
def test_compare_type(self):
(o, e, s) = run_cmd(sys.executable + ' app_compare_type1.py db init')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(
sys.executable + ' app_compare_type1.py db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(
sys.executable + ' app_compare_type1.py db upgrade')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(
sys.executable + ' app_compare_type2.py db migrate')
self.assertTrue(s == 0)
self.assertTrue(b'Detected type change from VARCHAR(length=128) '
b'to String(length=10)' in e)
|
import unittest
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import sbd_instance_segmentation_label_names
from chainercv.datasets import SBDInstanceSegmentationDataset
from chainercv.utils import assert_is_instance_segmentation_dataset
try:
import scipy # NOQA
_available = True
except ImportError:
_available = False
@testing.parameterize(
{'split': 'train'},
{'split': 'val'},
{'split': 'trainval'}
)
@unittest.skipUnless(_available, 'SciPy is not installed')
class TestSBDInstanceSegmentationDataset(unittest.TestCase):
def setUp(self):
self.dataset = SBDInstanceSegmentationDataset(split=self.split)
@attr.slow
def test_sbd_instance_segmentation_dataset(self):
assert_is_instance_segmentation_dataset(
self.dataset,
len(sbd_instance_segmentation_label_names),
n_example=10)
testing.run_module(__name__, __file__)
|
import pytest
from plumbum import local, SshMachine
from plumbum.path.utils import copy, delete, move
from plumbum._testtools import skip_on_windows
@skip_on_windows
class TestUtils:
def test_copy_move_delete(self):
from plumbum.cmd import touch
with local.tempdir() as dir:
(dir / "orog").mkdir()
(dir / "orog" / "rec").mkdir()
for i in range(20):
touch(dir / "orog" / ("f%d.txt" % (i,)))
for i in range(20,40):
touch(dir / "orog" / "rec" / ("f%d.txt" % (i,)))
move(dir / "orog", dir / "orig")
s1 = sorted(f.name for f in (dir / "orig").walk())
copy(dir / "orig", dir / "dup")
s2 = sorted(f.name for f in (dir / "dup").walk())
assert s1 == s2
with SshMachine("localhost") as rem:
with rem.tempdir() as dir2:
copy(dir / "orig", dir2)
s3 = sorted(f.name for f in (dir2 / "orig").walk())
assert s1 == s3
copy(dir2 / "orig", dir2 / "dup")
s4 = sorted(f.name for f in (dir2 / "dup").walk())
assert s1 == s4
copy(dir2 / "dup", dir / "dup2")
s5 = sorted(f.name for f in (dir / "dup2").walk())
assert s1 == s5
with SshMachine("localhost") as rem2:
with rem2.tempdir() as dir3:
copy(dir2 / "dup", dir3)
s6 = sorted(f.name for f in (dir3 / "dup").walk())
assert s1 == s6
move(dir3 / "dup", dir / "superdup")
assert not (dir3 / "dup").exists()
s7 = sorted(f.name for f in (dir / "superdup").walk())
assert s1 == s7
# test rm
delete(dir)
|
import logging
import numpy as np
import networkx as nx
from pgmpy.models import BayesianModel
from pgmpy.factors.continuous import LinearGaussianCPD
from pgmpy.factors.distributions import GaussianDistribution
class LinearGaussianBayesianNetwork(BayesianModel):
"""
A Linear Gaussian Bayesian Network is a Bayesian Network, all
of whose variables are continuous, and where all of the CPDs
are linear Gaussians.
An important result is that the linear Gaussian Bayesian Networks
are an alternative representation for the class of multivariate
Gaussian distributions.
"""
def add_cpds(self, *cpds):
"""
Add linear Gaussian CPD (Conditional Probability Distribution)
to the Bayesian Model.
Parameters
----------
cpds : instances of LinearGaussianCPD
List of LinearGaussianCPDs which will be associated with the model
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> for cpd in model.cpds:
... print(cpd)
P(x1) = N(1; 4)
P(x2| x1) = N(0.5*x1_mu); -5)
P(x3| x2) = N(-1*x2_mu); 4)
"""
for cpd in cpds:
if not isinstance(cpd, LinearGaussianCPD):
raise ValueError("Only LinearGaussianCPD can be added.")
if set(cpd.variables) - set(cpd.variables).intersection(set(self.nodes())):
raise ValueError("CPD defined on variable not in the model", cpd)
for prev_cpd_index in range(len(self.cpds)):
if self.cpds[prev_cpd_index].variable == cpd.variable:
logging.warning(f"Replacing existing CPD for {cpd.variable}")
self.cpds[prev_cpd_index] = cpd
break
else:
self.cpds.append(cpd)
def get_cpds(self, node=None):
"""
Returns the cpd of the node. If node is not specified returns all the CPDs
that have been added till now to the graph
Parameter
---------
node: any hashable python object (optional)
The node whose CPD we want. If node not specified returns all the
CPDs added to the model.
Returns
-------
A list of linear Gaussian CPDs.
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> model.get_cpds()
"""
return super(LinearGaussianBayesianNetwork, self).get_cpds(node)
def remove_cpds(self, *cpds):
"""
Removes the cpds that are provided in the argument.
Parameters
----------
*cpds: LinearGaussianCPD object
A LinearGaussianCPD object on any subset of the variables
of the model which is to be associated with the model.
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> for cpd in model.get_cpds():
... print(cpd)
P(x1) = N(1; 4)
P(x2| x1) = N(0.5*x1_mu); -5)
P(x3| x2) = N(-1*x2_mu); 4)
>>> model.remove_cpds(cpd2, cpd3)
>>> for cpd in model.get_cpds():
... print(cpd)
P(x1) = N(1; 4)
"""
return super(LinearGaussianBayesianNetwork, self).remove_cpds(*cpds)
def to_joint_gaussian(self):
"""
The linear Gaussian Bayesian Networks are an alternative
representation for the class of multivariate Gaussian distributions.
This method returns an equivalent joint Gaussian distribution.
Returns
-------
GaussianDistribution: An equivalent joint Gaussian
distribution for the network.
Reference
---------
Section 7.2, Example 7.3,
Probabilistic Graphical Models, Principles and Techniques
Examples
--------
>>> from pgmpy.models import LinearGaussianBayesianNetwork
>>> from pgmpy.factors.continuous import LinearGaussianCPD
>>> model = LinearGaussianBayesianNetwork([('x1', 'x2'), ('x2', 'x3')])
>>> cpd1 = LinearGaussianCPD('x1', [1], 4)
>>> cpd2 = LinearGaussianCPD('x2', [-5, 0.5], 4, ['x1'])
>>> cpd3 = LinearGaussianCPD('x3', [4, -1], 3, ['x2'])
>>> model.add_cpds(cpd1, cpd2, cpd3)
>>> jgd = model.to_joint_gaussian()
>>> jgd.variables
['x1', 'x2', 'x3']
>>> jgd.mean
array([[ 1. ],
[-4.5],
[ 8.5]])
>>> jgd.covariance
array([[ 4., 2., -2.],
[ 2., 5., -5.],
[-2., -5., 8.]])
"""
variables = list(nx.topological_sort(self))
mean = np.zeros(len(variables))
covariance = np.zeros((len(variables), len(variables)))
for node_idx in range(len(variables)):
cpd = self.get_cpds(variables[node_idx])
mean[node_idx] = (
sum(
[
coeff * mean[variables.index(parent)]
for coeff, parent in zip(cpd.mean, cpd.evidence)
]
)
+ cpd.mean[0]
)
covariance[node_idx, node_idx] = (
sum(
[
coeff
* coeff
* covariance[variables.index(parent), variables.index(parent)]
for coeff, parent in zip(cpd.mean, cpd.evidence)
]
)
+ cpd.variance
)
for node_i_idx in range(len(variables)):
for node_j_idx in range(len(variables)):
if covariance[node_j_idx, node_i_idx] != 0:
covariance[node_i_idx, node_j_idx] = covariance[
node_j_idx, node_i_idx
]
else:
cpd_j = self.get_cpds(variables[node_j_idx])
covariance[node_i_idx, node_j_idx] = sum(
[
coeff * covariance[node_i_idx, variables.index(parent)]
for coeff, parent in zip(cpd_j.mean, cpd_j.evidence)
]
)
return GaussianDistribution(variables, mean, covariance)
def check_model(self):
"""
Checks the model for various errors. This method checks for the following
error -
* Checks if the CPDs associated with nodes are consistent with their parents.
Returns
-------
check: boolean
True if all the checks pass.
"""
for node in self.nodes():
cpd = self.get_cpds(node=node)
if isinstance(cpd, LinearGaussianCPD):
if set(cpd.evidence) != set(self.get_parents(node)):
raise ValueError(
"CPD associated with %s doesn't have "
"proper parents associated with it." % node
)
return True
def get_cardinality(self, node):
"""
Cardinality is not defined for continuous variables.
"""
raise ValueError("Cardinality is not defined for continuous variables.")
def fit(
self, data, estimator=None, state_names=[], complete_samples_only=True, **kwargs
):
"""
For now, fit method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError(
"fit method has not been implemented for LinearGaussianBayesianNetwork."
)
def predict(self, data):
"""
For now, predict method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError(
"predict method has not been implemented for LinearGaussianBayesianNetwork."
)
def to_markov_model(self):
"""
For now, to_markov_model method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError(
"to_markov_model method has not been implemented for LinearGaussianBayesianNetwork."
)
def is_imap(self, JPD):
"""
For now, is_imap method has not been implemented for LinearGaussianBayesianNetwork.
"""
raise NotImplementedError(
"is_imap method has not been implemented for LinearGaussianBayesianNetwork."
)
|
import typing
from pathlib import Path
import pandas as pd
import keras
import matchzoo
_url = "https://nlp.stanford.edu/projects/snli/snli_1.0.zip"
def load_data(
stage: str = 'train',
task: str = 'classification',
target_label: str = 'entailment',
return_classes: bool = False
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load SNLI data.
:param stage: One of `train`, `dev`, and `test`. (default: `train`)
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance. (default: `ranking`)
:param target_label: If `ranking`, chose one of `entailment`,
`contradiction`, `neutral`, and `-` as the positive label.
(default: `entailment`)
:param return_classes: `True` to return classes for classification task,
`False` otherwise.
:return: A DataPack unless `task` is `classificiation` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f'snli_1.0_{stage}.txt')
data_pack = _read_data(file_path)
if task == 'ranking':
task = matchzoo.tasks.Ranking()
if task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
if target_label not in ['entailment', 'contradiction', 'neutral', '-']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `entailment`, `contradiction`, "
f"`neutral` and `-`.")
binary = (data_pack.relation['label'] == target_label).astype(float)
data_pack.relation['label'] = binary
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
classes = ['entailment', 'contradiction', 'neutral', '-']
label = data_pack.relation['label'].apply(classes.index)
data_pack.relation['label'] = label
data_pack.one_hot_encode_label(num_classes=4, inplace=True)
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'snli', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='snli'
)
return Path(ref_path).parent.joinpath('snli_1.0')
def _read_data(path):
table = pd.read_csv(path, sep='\t')
df = pd.DataFrame({
'text_left': table['sentence1'],
'text_right': table['sentence2'],
'label': table['gold_label']
})
df = df.dropna(axis=0, how='any').reset_index(drop=True)
return matchzoo.pack(df)
|
from collections import deque
import os
from queue import Empty, LifoQueue as _LifoQueue
from . import exceptions
from .utils.compat import register_after_fork
from .utils.functional import lazy
def _after_fork_cleanup_resource(resource):
try:
resource.force_close_all()
except Exception:
pass
class LifoQueue(_LifoQueue):
"""Last in first out version of Queue."""
def _init(self, maxsize):
self.queue = deque()
class Resource:
"""Pool of resources."""
LimitExceeded = exceptions.LimitExceeded
close_after_fork = False
def __init__(self, limit=None, preload=None, close_after_fork=None):
self._limit = limit
self.preload = preload or 0
self._closed = False
self.close_after_fork = (
close_after_fork
if close_after_fork is not None else self.close_after_fork
)
self._resource = LifoQueue()
self._dirty = set()
if self.close_after_fork and register_after_fork is not None:
register_after_fork(self, _after_fork_cleanup_resource)
self.setup()
def setup(self):
raise NotImplementedError('subclass responsibility')
def _add_when_empty(self):
if self.limit and len(self._dirty) >= self.limit:
raise self.LimitExceeded(self.limit)
# All taken, put new on the queue and
# try get again, this way the first in line
# will get the resource.
self._resource.put_nowait(self.new())
def acquire(self, block=False, timeout=None):
"""Acquire resource.
Arguments:
block (bool): If the limit is exceeded,
then block until there is an available item.
timeout (float): Timeout to wait
if ``block`` is true. Default is :const:`None` (forever).
Raises:
LimitExceeded: if block is false and the limit has been exceeded.
"""
if self._closed:
raise RuntimeError('Acquire on closed pool')
if self.limit:
while 1:
try:
R = self._resource.get(block=block, timeout=timeout)
except Empty:
self._add_when_empty()
else:
try:
R = self.prepare(R)
except BaseException:
if isinstance(R, lazy):
# not evaluated yet, just put it back
self._resource.put_nowait(R)
else:
# evaluted so must try to release/close first.
self.release(R)
raise
self._dirty.add(R)
break
else:
R = self.prepare(self.new())
def release():
"""Release resource so it can be used by another thread.
Warnings:
The caller is responsible for discarding the object,
and to never use the resource again. A new resource must
be acquired if so needed.
"""
self.release(R)
R.release = release
return R
def prepare(self, resource):
return resource
def close_resource(self, resource):
resource.close()
def release_resource(self, resource):
pass
def replace(self, resource):
"""Replace existing resource with a new instance.
This can be used in case of defective resources.
"""
if self.limit:
self._dirty.discard(resource)
self.close_resource(resource)
def release(self, resource):
if self.limit:
self._dirty.discard(resource)
self._resource.put_nowait(resource)
self.release_resource(resource)
else:
self.close_resource(resource)
def collect_resource(self, resource):
pass
def force_close_all(self):
"""Close and remove all resources in the pool (also those in use).
Used to close resources from parent processes after fork
(e.g. sockets/connections).
"""
if self._closed:
return
self._closed = True
dirty = self._dirty
resource = self._resource
while 1: # - acquired
try:
dres = dirty.pop()
except KeyError:
break
try:
self.collect_resource(dres)
except AttributeError: # Issue #78
pass
while 1: # - available
# deque supports '.clear', but lists do not, so for that
# reason we use pop here, so that the underlying object can
# be any object supporting '.pop' and '.append'.
try:
res = resource.queue.pop()
except IndexError:
break
try:
self.collect_resource(res)
except AttributeError:
pass # Issue #78
def resize(self, limit, force=False, ignore_errors=False, reset=False):
prev_limit = self._limit
if (self._dirty and 0 < limit < self._limit) and not ignore_errors:
if not force:
raise RuntimeError(
"Can't shrink pool when in use: was={} now={}".format(
self._limit, limit))
reset = True
self._limit = limit
if reset:
try:
self.force_close_all()
except Exception:
pass
self.setup()
if limit < prev_limit:
self._shrink_down(collect=limit > 0)
def _shrink_down(self, collect=True):
class Noop:
def __enter__(self):
pass
def __exit__(self, type, value, traceback):
pass
resource = self._resource
# Items to the left are last recently used, so we remove those first.
with getattr(resource, 'mutex', Noop()):
while len(resource.queue) > self.limit:
R = resource.queue.popleft()
if collect:
self.collect_resource(R)
@property
def limit(self):
return self._limit
@limit.setter
def limit(self, limit):
self.resize(limit)
if os.environ.get('KOMBU_DEBUG_POOL'): # pragma: no cover
_orig_acquire = acquire
_orig_release = release
_next_resource_id = 0
def acquire(self, *args, **kwargs): # noqa
import traceback
id = self._next_resource_id = self._next_resource_id + 1
print(f'+{id} ACQUIRE {self.__class__.__name__}')
r = self._orig_acquire(*args, **kwargs)
r._resource_id = id
print(f'-{id} ACQUIRE {self.__class__.__name__}')
if not hasattr(r, 'acquired_by'):
r.acquired_by = []
r.acquired_by.append(traceback.format_stack())
return r
def release(self, resource): # noqa
id = resource._resource_id
print(f'+{id} RELEASE {self.__class__.__name__}')
r = self._orig_release(resource)
print(f'-{id} RELEASE {self.__class__.__name__}')
self._next_resource_id -= 1
return r
|
import os
import unittest
import mock
from perfkitbenchmarker.linux_benchmarks import stress_ng_benchmark
class StressngTestCase(unittest.TestCase):
def setUp(self):
super(StressngTestCase, self).setUp()
p = mock.patch(stress_ng_benchmark.__name__)
p.start()
self.addCleanup(p.stop)
path = os.path.join(
os.path.dirname(__file__), '../data', 'stress_ng_output.txt')
with open(path) as fp:
self.contents = fp.read()
def testParseStressngResult(self):
metadata = {
'duration_sec': 10,
'threads': 16
}
samples = []
samples.append(stress_ng_benchmark._ParseStressngResult(
metadata, self.contents))
# Test metadata
metadata = samples[0].metadata
# Test metric and value
expected = {
'context': 4485.820000
}
for sample in samples:
self.assertEqual(expected[sample.metric], sample.value)
self.assertEqual(10, sample.metadata['duration_sec'])
self.assertEqual(16, sample.metadata['threads'])
self.assertEqual(len(samples), 1)
def testGeoMean(self):
floats = [1.0, 3.0, 5.0]
self.assertAlmostEqual(stress_ng_benchmark._GeoMeanOverflow(floats),
2.466212074)
if __name__ == '__main__':
unittest.main()
|
import json
import unittest
from aiohttp.hdrs import CONTENT_TYPE
import defusedxml.ElementTree as ET
import requests
from homeassistant import const, setup
from homeassistant.components import emulated_hue
from homeassistant.components.emulated_hue import upnp
from homeassistant.const import CONTENT_TYPE_JSON, HTTP_OK
from tests.common import get_test_home_assistant, get_test_instance_port
HTTP_SERVER_PORT = get_test_instance_port()
BRIDGE_SERVER_PORT = get_test_instance_port()
BRIDGE_URL_BASE = f"http://127.0.0.1:{BRIDGE_SERVER_PORT}" + "{}"
JSON_HEADERS = {CONTENT_TYPE: const.CONTENT_TYPE_JSON}
class MockTransport:
"""Mock asyncio transport."""
def __init__(self):
"""Create a place to store the sends."""
self.sends = []
def sendto(self, response, addr):
"""Mock sendto."""
self.sends.append((response, addr))
class TestEmulatedHue(unittest.TestCase):
"""Test the emulated Hue component."""
hass = None
@classmethod
def setUpClass(cls):
"""Set up the class."""
cls.hass = hass = get_test_home_assistant()
setup.setup_component(
hass,
emulated_hue.DOMAIN,
{emulated_hue.DOMAIN: {emulated_hue.CONF_LISTEN_PORT: BRIDGE_SERVER_PORT}},
)
cls.hass.start()
@classmethod
def tearDownClass(cls):
"""Stop the class."""
cls.hass.stop()
def test_upnp_discovery_basic(self):
"""Tests the UPnP basic discovery response."""
upnp_responder_protocol = upnp.UPNPResponderProtocol(
None, None, "192.0.2.42", 8080
)
mock_transport = MockTransport()
upnp_responder_protocol.transport = mock_transport
"""Original request emitted by the Hue Bridge v1 app."""
request = """M-SEARCH * HTTP/1.1
HOST:239.255.255.250:1900
ST:ssdp:all
Man:"ssdp:discover"
MX:3
"""
encoded_request = request.replace("\n", "\r\n").encode("utf-8")
upnp_responder_protocol.datagram_received(encoded_request, 1234)
expected_response = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://192.0.2.42:8080/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/1.16.0
hue-bridgeid: 001788FFFE23BFC2
ST: urn:schemas-upnp-org:device:basic:1
USN: uuid:2f402f80-da50-11e1-9b23-001788255acc
"""
expected_send = expected_response.replace("\n", "\r\n").encode("utf-8")
assert mock_transport.sends == [(expected_send, 1234)]
def test_upnp_discovery_rootdevice(self):
"""Tests the UPnP rootdevice discovery response."""
upnp_responder_protocol = upnp.UPNPResponderProtocol(
None, None, "192.0.2.42", 8080
)
mock_transport = MockTransport()
upnp_responder_protocol.transport = mock_transport
"""Original request emitted by Busch-Jaeger free@home SysAP."""
request = """M-SEARCH * HTTP/1.1
HOST: 239.255.255.250:1900
MAN: "ssdp:discover"
MX: 40
ST: upnp:rootdevice
"""
encoded_request = request.replace("\n", "\r\n").encode("utf-8")
upnp_responder_protocol.datagram_received(encoded_request, 1234)
expected_response = """HTTP/1.1 200 OK
CACHE-CONTROL: max-age=60
EXT:
LOCATION: http://192.0.2.42:8080/description.xml
SERVER: FreeRTOS/6.0.5, UPnP/1.0, IpBridge/1.16.0
hue-bridgeid: 001788FFFE23BFC2
ST: upnp:rootdevice
USN: uuid:2f402f80-da50-11e1-9b23-001788255acc::upnp:rootdevice
"""
expected_send = expected_response.replace("\n", "\r\n").encode("utf-8")
assert mock_transport.sends == [(expected_send, 1234)]
def test_upnp_no_response(self):
"""Tests the UPnP does not response on an invalid request."""
upnp_responder_protocol = upnp.UPNPResponderProtocol(
None, None, "192.0.2.42", 8080
)
mock_transport = MockTransport()
upnp_responder_protocol.transport = mock_transport
"""Original request emitted by the Hue Bridge v1 app."""
request = """INVALID * HTTP/1.1
HOST:239.255.255.250:1900
ST:ssdp:all
Man:"ssdp:discover"
MX:3
"""
encoded_request = request.replace("\n", "\r\n").encode("utf-8")
upnp_responder_protocol.datagram_received(encoded_request, 1234)
assert mock_transport.sends == []
def test_description_xml(self):
"""Test the description."""
result = requests.get(BRIDGE_URL_BASE.format("/description.xml"), timeout=5)
assert result.status_code == HTTP_OK
assert "text/xml" in result.headers["content-type"]
# Make sure the XML is parsable
try:
root = ET.fromstring(result.text)
ns = {"s": "urn:schemas-upnp-org:device-1-0"}
assert root.find("./s:device/s:serialNumber", ns).text == "001788FFFE23BFC2"
except: # noqa: E722 pylint: disable=bare-except
self.fail("description.xml is not valid XML!")
def test_create_username(self):
"""Test the creation of an username."""
request_json = {"devicetype": "my_device"}
result = requests.post(
BRIDGE_URL_BASE.format("/api"), data=json.dumps(request_json), timeout=5
)
assert result.status_code == HTTP_OK
assert CONTENT_TYPE_JSON in result.headers["content-type"]
resp_json = result.json()
success_json = resp_json[0]
assert "success" in success_json
assert "username" in success_json["success"]
def test_unauthorized_view(self):
"""Test unauthorized view."""
request_json = {"devicetype": "my_device"}
result = requests.get(
BRIDGE_URL_BASE.format("/api/unauthorized"),
data=json.dumps(request_json),
timeout=5,
)
assert result.status_code == HTTP_OK
assert CONTENT_TYPE_JSON in result.headers["content-type"]
resp_json = result.json()
assert len(resp_json) == 1
success_json = resp_json[0]
assert len(success_json) == 1
assert "error" in success_json
error_json = success_json["error"]
assert len(error_json) == 3
assert "/" in error_json["address"]
assert "unauthorized user" in error_json["description"]
assert "1" in error_json["type"]
def test_valid_username_request(self):
"""Test request with a valid username."""
request_json = {"invalid_key": "my_device"}
result = requests.post(
BRIDGE_URL_BASE.format("/api"), data=json.dumps(request_json), timeout=5
)
assert result.status_code == 400
|
from unittest.mock import Mock
import pandas as pd
import pytest
import pytz
from qstrader.portcon.order_sizer.long_short import (
LongShortLeveragedOrderSizer
)
@pytest.mark.parametrize(
"gross_leverage,expected",
[
(-1.0, None),
(0.0, None),
(0.01, 0.01),
(0.99, 0.99),
(1.0, 1.0),
(2.0, 2.0),
(5.0, 5.0),
]
)
def test_check_set_gross_leverage(gross_leverage, expected):
"""
Checks that the gross leverage falls into the appropriate
range and raises otherwise.
"""
broker = Mock()
broker_portfolio_id = "1234"
data_handler = Mock()
if expected is None:
with pytest.raises(ValueError):
order_sizer = LongShortLeveragedOrderSizer(
broker, broker_portfolio_id, data_handler, gross_leverage
)
else:
order_sizer = LongShortLeveragedOrderSizer(
broker, broker_portfolio_id, data_handler, gross_leverage
)
assert order_sizer.gross_leverage == expected
@pytest.mark.parametrize(
"weights,gross_leverage,expected",
[
(
{'EQ:ABC': 0.2, 'EQ:DEF': 0.6},
1.0,
{'EQ:ABC': 0.25, 'EQ:DEF': 0.75}
),
(
{'EQ:ABC': 0.5, 'EQ:DEF': 0.5},
1.0,
{'EQ:ABC': 0.5, 'EQ:DEF': 0.5}
),
(
{'EQ:ABC': 0.01, 'EQ:DEF': 0.01},
1.0,
{'EQ:ABC': 0.5, 'EQ:DEF': 0.5}
),
(
{'EQ:ABC': 0.2, 'EQ:DEF': 0.6},
2.0,
{'EQ:ABC': 0.5, 'EQ:DEF': 1.5}
),
(
{'EQ:ABC': 0.2, 'EQ:DEF': 0.6},
0.5,
{'EQ:ABC': 0.125, 'EQ:DEF': 0.375}
),
(
{'EQ:ABC': 0.1, 'EQ:DEF': 0.3, 'EQ:GHI': 0.02, 'EQ:JKL': 0.8},
1.0,
{'EQ:ABC': 0.1 / 1.22, 'EQ:DEF': 0.3 / 1.22, 'EQ:GHI': 0.02 / 1.22, 'EQ:JKL': 0.8 / 1.22}
),
(
{'EQ:ABC': 0.1, 'EQ:DEF': 0.3, 'EQ:GHI': 0.02, 'EQ:JKL': 0.8},
3.0,
{'EQ:ABC': 0.3 / 1.22, 'EQ:DEF': 0.9 / 1.22, 'EQ:GHI': 0.06 / 1.22, 'EQ:JKL': 2.4 / 1.22}
),
(
{'EQ:ABC': 0.0, 'EQ:DEF': 0.0},
1.0,
{'EQ:ABC': 0.0, 'EQ:DEF': 0.0}
),
(
{'EQ:ABC': -0.2, 'EQ:DEF': 0.6},
1.0,
{'EQ:ABC': -0.25, 'EQ:DEF': 0.75}
),
(
{'EQ:ABC': -0.2, 'EQ:DEF': 0.6},
2.0,
{'EQ:ABC': -0.5, 'EQ:DEF': 1.5}
),
(
{'EQ:ABC': -0.1, 'EQ:DEF': 0.3, 'EQ:GHI': 0.02, 'EQ:JKL': -0.8},
3.0,
{'EQ:ABC': -0.3 / 1.22, 'EQ:DEF': 0.9 / 1.22, 'EQ:GHI': 0.06 / 1.22, 'EQ:JKL': -2.4 / 1.22}
)
]
)
def test_normalise_weights(weights, gross_leverage, expected):
"""
Checks that the _normalise_weights method rescales the weights
for the correct gross exposure and leverage.
"""
broker = Mock()
broker_portfolio_id = "1234"
data_handler = Mock()
order_sizer = LongShortLeveragedOrderSizer(
broker, broker_portfolio_id, data_handler, gross_leverage
)
if expected is None:
with pytest.raises(ValueError):
result = order_sizer._normalise_weights(weights)
else:
result = order_sizer._normalise_weights(weights)
assert result == pytest.approx(expected)
@pytest.mark.parametrize(
"total_equity,gross_leverage,weights,asset_prices,expected",
[
(
1e6,
1.0,
{'EQ:SPY': 0.5, 'EQ:AGG': 0.5},
{'EQ:SPY': 250.0, 'EQ:AGG': 150.0},
{'EQ:SPY': {'quantity': 2000}, 'EQ:AGG': {'quantity': 3333}}
),
(
325000.0,
1.5,
{'EQ:SPY': 0.6, 'EQ:AGG': 0.4},
{'EQ:SPY': 352.0, 'EQ:AGG': 178.0},
{'EQ:SPY': {'quantity': 830}, 'EQ:AGG': {'quantity': 1095}}
),
(
687523.0,
2.0,
{'EQ:SPY': 0.05, 'EQ:AGG': 0.328, 'EQ:TLT': 0.842, 'EQ:GLD': 0.9113},
{'EQ:SPY': 1036.23, 'EQ:AGG': 456.55, 'EQ:TLT': 987.63, 'EQ:GLD': 14.76},
{
'EQ:SPY': {'quantity': 31},
'EQ:AGG': {'quantity': 463},
'EQ:TLT': {'quantity': 550},
'EQ:GLD': {'quantity': 39833},
}
),
(
687523.0,
2.0,
{'EQ:SPY': 0.05, 'EQ:AGG': -0.328, 'EQ:TLT': -0.842, 'EQ:GLD': 0.9113},
{'EQ:SPY': 1036.23, 'EQ:AGG': 456.55, 'EQ:TLT': 987.63, 'EQ:GLD': 14.76},
{
'EQ:SPY': {'quantity': 31},
'EQ:AGG': {'quantity': -463},
'EQ:TLT': {'quantity': -550},
'EQ:GLD': {'quantity': 39833},
}
)
]
)
def test_call(total_equity, gross_leverage, weights, asset_prices, expected):
"""
Checks that the __call__ method correctly outputs the target
portfolio from a given set of weights and a timestamp.
"""
dt = pd.Timestamp('2019-01-01 15:00:00', tz=pytz.utc)
broker_portfolio_id = "1234"
broker = Mock()
broker.get_portfolio_total_equity.return_value = total_equity
broker.fee_model.calc_total_cost.return_value = 0.0
data_handler = Mock()
data_handler.get_asset_latest_ask_price.side_effect = lambda self, x: asset_prices[x]
order_sizer = LongShortLeveragedOrderSizer(
broker, broker_portfolio_id, data_handler, gross_leverage
)
result = order_sizer(dt, weights)
assert result == expected
|
import asyncio
from unittest import mock
import pytest
import zigpy.profiles.zha
import zigpy.types as t
import zigpy.zcl.clusters
import homeassistant.components.zha.core.channels as zha_channels
import homeassistant.components.zha.core.channels.base as base_channels
import homeassistant.components.zha.core.const as zha_const
import homeassistant.components.zha.core.registries as registries
from .common import get_zha_gateway, make_zcl_header
import tests.async_mock
@pytest.fixture
def ieee():
"""IEEE fixture."""
return t.EUI64.deserialize(b"ieeeaddr")[0]
@pytest.fixture
def nwk():
"""NWK fixture."""
return t.NWK(0xBEEF)
@pytest.fixture
async def zha_gateway(hass, setup_zha):
"""Return ZhaGateway fixture."""
await setup_zha()
return get_zha_gateway(hass)
@pytest.fixture
def channel_pool():
"""Endpoint Channels fixture."""
ch_pool_mock = mock.MagicMock(spec_set=zha_channels.ChannelPool)
type(ch_pool_mock).skip_configuration = mock.PropertyMock(return_value=False)
ch_pool_mock.id = 1
return ch_pool_mock
@pytest.fixture
def poll_control_ch(channel_pool, zigpy_device_mock):
"""Poll control channel fixture."""
cluster_id = zigpy.zcl.clusters.general.PollControl.cluster_id
zigpy_dev = zigpy_device_mock(
{1: {"in_clusters": [cluster_id], "out_clusters": [], "device_type": 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
cluster = zigpy_dev.endpoints[1].in_clusters[cluster_id]
channel_class = registries.ZIGBEE_CHANNEL_REGISTRY.get(cluster_id)
return channel_class(cluster, channel_pool)
@pytest.fixture
async def poll_control_device(zha_device_restored, zigpy_device_mock):
"""Poll control device fixture."""
cluster_id = zigpy.zcl.clusters.general.PollControl.cluster_id
zigpy_dev = zigpy_device_mock(
{1: {"in_clusters": [cluster_id], "out_clusters": [], "device_type": 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
zha_device = await zha_device_restored(zigpy_dev)
return zha_device
@pytest.mark.parametrize(
"cluster_id, bind_count, attrs",
[
(0x0000, 1, {}),
(0x0001, 1, {"battery_voltage", "battery_percentage_remaining"}),
(0x0003, 1, {}),
(0x0004, 1, {}),
(0x0005, 1, {}),
(0x0006, 1, {"on_off"}),
(0x0007, 1, {}),
(0x0008, 1, {"current_level"}),
(0x0009, 1, {}),
(0x000C, 1, {"present_value"}),
(0x000D, 1, {"present_value"}),
(0x000E, 1, {"present_value"}),
(0x000D, 1, {"present_value"}),
(0x0010, 1, {"present_value"}),
(0x0011, 1, {"present_value"}),
(0x0012, 1, {"present_value"}),
(0x0013, 1, {"present_value"}),
(0x0014, 1, {"present_value"}),
(0x0015, 1, {}),
(0x0016, 1, {}),
(0x0019, 1, {}),
(0x001A, 1, {}),
(0x001B, 1, {}),
(0x0020, 1, {}),
(0x0021, 1, {}),
(0x0101, 1, {"lock_state"}),
(0x0202, 1, {"fan_mode"}),
(0x0300, 1, {"current_x", "current_y", "color_temperature"}),
(0x0400, 1, {"measured_value"}),
(0x0401, 1, {"level_status"}),
(0x0402, 1, {"measured_value"}),
(0x0403, 1, {"measured_value"}),
(0x0404, 1, {"measured_value"}),
(0x0405, 1, {"measured_value"}),
(0x0406, 1, {"occupancy"}),
(0x0702, 1, {"instantaneous_demand"}),
(0x0B04, 1, {"active_power"}),
(0x1000, 1, {}),
],
)
async def test_in_channel_config(
cluster_id, bind_count, attrs, channel_pool, zigpy_device_mock, zha_gateway
):
"""Test ZHA core channel configuration for input clusters."""
zigpy_dev = zigpy_device_mock(
{1: {"in_clusters": [cluster_id], "out_clusters": [], "device_type": 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
cluster = zigpy_dev.endpoints[1].in_clusters[cluster_id]
channel_class = registries.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base_channels.ZigbeeChannel
)
channel = channel_class(cluster, channel_pool)
await channel.async_configure()
assert cluster.bind.call_count == bind_count
assert cluster.configure_reporting.call_count == len(attrs)
reported_attrs = {attr[0][0] for attr in cluster.configure_reporting.call_args_list}
assert set(attrs) == reported_attrs
@pytest.mark.parametrize(
"cluster_id, bind_count",
[
(0x0000, 1),
(0x0001, 1),
(0x0003, 1),
(0x0004, 1),
(0x0005, 1),
(0x0006, 1),
(0x0007, 1),
(0x0008, 1),
(0x0009, 1),
(0x0015, 1),
(0x0016, 1),
(0x0019, 1),
(0x001A, 1),
(0x001B, 1),
(0x0020, 1),
(0x0021, 1),
(0x0101, 1),
(0x0202, 1),
(0x0300, 1),
(0x0400, 1),
(0x0402, 1),
(0x0403, 1),
(0x0405, 1),
(0x0406, 1),
(0x0702, 1),
(0x0B04, 1),
(0x1000, 1),
],
)
async def test_out_channel_config(
cluster_id, bind_count, channel_pool, zigpy_device_mock, zha_gateway
):
"""Test ZHA core channel configuration for output clusters."""
zigpy_dev = zigpy_device_mock(
{1: {"out_clusters": [cluster_id], "in_clusters": [], "device_type": 0x1234}},
"00:11:22:33:44:55:66:77",
"test manufacturer",
"test model",
)
cluster = zigpy_dev.endpoints[1].out_clusters[cluster_id]
cluster.bind_only = True
channel_class = registries.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base_channels.ZigbeeChannel
)
channel = channel_class(cluster, channel_pool)
await channel.async_configure()
assert cluster.bind.call_count == bind_count
assert cluster.configure_reporting.call_count == 0
def test_channel_registry():
"""Test ZIGBEE Channel Registry."""
for (cluster_id, channel) in registries.ZIGBEE_CHANNEL_REGISTRY.items():
assert isinstance(cluster_id, int)
assert 0 <= cluster_id <= 0xFFFF
assert issubclass(channel, base_channels.ZigbeeChannel)
def test_epch_unclaimed_channels(channel):
"""Test unclaimed channels."""
ch_1 = channel(zha_const.CHANNEL_ON_OFF, 6)
ch_2 = channel(zha_const.CHANNEL_LEVEL, 8)
ch_3 = channel(zha_const.CHANNEL_COLOR, 768)
ep_channels = zha_channels.ChannelPool(
mock.MagicMock(spec_set=zha_channels.Channels), mock.sentinel.ep
)
all_channels = {ch_1.id: ch_1, ch_2.id: ch_2, ch_3.id: ch_3}
with mock.patch.dict(ep_channels.all_channels, all_channels, clear=True):
available = ep_channels.unclaimed_channels()
assert ch_1 in available
assert ch_2 in available
assert ch_3 in available
ep_channels.claimed_channels[ch_2.id] = ch_2
available = ep_channels.unclaimed_channels()
assert ch_1 in available
assert ch_2 not in available
assert ch_3 in available
ep_channels.claimed_channels[ch_1.id] = ch_1
available = ep_channels.unclaimed_channels()
assert ch_1 not in available
assert ch_2 not in available
assert ch_3 in available
ep_channels.claimed_channels[ch_3.id] = ch_3
available = ep_channels.unclaimed_channels()
assert ch_1 not in available
assert ch_2 not in available
assert ch_3 not in available
def test_epch_claim_channels(channel):
"""Test channel claiming."""
ch_1 = channel(zha_const.CHANNEL_ON_OFF, 6)
ch_2 = channel(zha_const.CHANNEL_LEVEL, 8)
ch_3 = channel(zha_const.CHANNEL_COLOR, 768)
ep_channels = zha_channels.ChannelPool(
mock.MagicMock(spec_set=zha_channels.Channels), mock.sentinel.ep
)
all_channels = {ch_1.id: ch_1, ch_2.id: ch_2, ch_3.id: ch_3}
with mock.patch.dict(ep_channels.all_channels, all_channels, clear=True):
assert ch_1.id not in ep_channels.claimed_channels
assert ch_2.id not in ep_channels.claimed_channels
assert ch_3.id not in ep_channels.claimed_channels
ep_channels.claim_channels([ch_2])
assert ch_1.id not in ep_channels.claimed_channels
assert ch_2.id in ep_channels.claimed_channels
assert ep_channels.claimed_channels[ch_2.id] is ch_2
assert ch_3.id not in ep_channels.claimed_channels
ep_channels.claim_channels([ch_3, ch_1])
assert ch_1.id in ep_channels.claimed_channels
assert ep_channels.claimed_channels[ch_1.id] is ch_1
assert ch_2.id in ep_channels.claimed_channels
assert ep_channels.claimed_channels[ch_2.id] is ch_2
assert ch_3.id in ep_channels.claimed_channels
assert ep_channels.claimed_channels[ch_3.id] is ch_3
assert "1:0x0300" in ep_channels.claimed_channels
@mock.patch(
"homeassistant.components.zha.core.channels.ChannelPool.add_client_channels"
)
@mock.patch(
"homeassistant.components.zha.core.discovery.PROBE.discover_entities",
mock.MagicMock(),
)
def test_ep_channels_all_channels(m1, zha_device_mock):
"""Test EndpointChannels adding all channels."""
zha_device = zha_device_mock(
{
1: {
"in_clusters": [0, 1, 6, 8],
"out_clusters": [],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
},
2: {
"in_clusters": [0, 1, 6, 8, 768],
"out_clusters": [],
"device_type": 0x0000,
},
}
)
channels = zha_channels.Channels(zha_device)
ep_channels = zha_channels.ChannelPool.new(channels, 1)
assert "1:0x0000" in ep_channels.all_channels
assert "1:0x0001" in ep_channels.all_channels
assert "1:0x0006" in ep_channels.all_channels
assert "1:0x0008" in ep_channels.all_channels
assert "1:0x0300" not in ep_channels.all_channels
assert "2:0x0000" not in ep_channels.all_channels
assert "2:0x0001" not in ep_channels.all_channels
assert "2:0x0006" not in ep_channels.all_channels
assert "2:0x0008" not in ep_channels.all_channels
assert "2:0x0300" not in ep_channels.all_channels
channels = zha_channels.Channels(zha_device)
ep_channels = zha_channels.ChannelPool.new(channels, 2)
assert "1:0x0000" not in ep_channels.all_channels
assert "1:0x0001" not in ep_channels.all_channels
assert "1:0x0006" not in ep_channels.all_channels
assert "1:0x0008" not in ep_channels.all_channels
assert "1:0x0300" not in ep_channels.all_channels
assert "2:0x0000" in ep_channels.all_channels
assert "2:0x0001" in ep_channels.all_channels
assert "2:0x0006" in ep_channels.all_channels
assert "2:0x0008" in ep_channels.all_channels
assert "2:0x0300" in ep_channels.all_channels
@mock.patch(
"homeassistant.components.zha.core.channels.ChannelPool.add_client_channels"
)
@mock.patch(
"homeassistant.components.zha.core.discovery.PROBE.discover_entities",
mock.MagicMock(),
)
def test_channel_power_config(m1, zha_device_mock):
"""Test that channels only get a single power channel."""
in_clusters = [0, 1, 6, 8]
zha_device = zha_device_mock(
{
1: {"in_clusters": in_clusters, "out_clusters": [], "device_type": 0x0000},
2: {
"in_clusters": [*in_clusters, 768],
"out_clusters": [],
"device_type": 0x0000,
},
}
)
channels = zha_channels.Channels.new(zha_device)
pools = {pool.id: pool for pool in channels.pools}
assert "1:0x0000" in pools[1].all_channels
assert "1:0x0001" in pools[1].all_channels
assert "1:0x0006" in pools[1].all_channels
assert "1:0x0008" in pools[1].all_channels
assert "1:0x0300" not in pools[1].all_channels
assert "2:0x0000" in pools[2].all_channels
assert "2:0x0001" not in pools[2].all_channels
assert "2:0x0006" in pools[2].all_channels
assert "2:0x0008" in pools[2].all_channels
assert "2:0x0300" in pools[2].all_channels
zha_device = zha_device_mock(
{
1: {"in_clusters": [], "out_clusters": [], "device_type": 0x0000},
2: {"in_clusters": in_clusters, "out_clusters": [], "device_type": 0x0000},
}
)
channels = zha_channels.Channels.new(zha_device)
pools = {pool.id: pool for pool in channels.pools}
assert "1:0x0001" not in pools[1].all_channels
assert "2:0x0001" in pools[2].all_channels
zha_device = zha_device_mock(
{2: {"in_clusters": in_clusters, "out_clusters": [], "device_type": 0x0000}}
)
channels = zha_channels.Channels.new(zha_device)
pools = {pool.id: pool for pool in channels.pools}
assert "2:0x0001" in pools[2].all_channels
async def test_ep_channels_configure(channel):
"""Test unclaimed channels."""
ch_1 = channel(zha_const.CHANNEL_ON_OFF, 6)
ch_2 = channel(zha_const.CHANNEL_LEVEL, 8)
ch_3 = channel(zha_const.CHANNEL_COLOR, 768)
ch_3.async_configure = tests.async_mock.AsyncMock(side_effect=asyncio.TimeoutError)
ch_3.async_initialize = tests.async_mock.AsyncMock(side_effect=asyncio.TimeoutError)
ch_4 = channel(zha_const.CHANNEL_ON_OFF, 6)
ch_5 = channel(zha_const.CHANNEL_LEVEL, 8)
ch_5.async_configure = tests.async_mock.AsyncMock(side_effect=asyncio.TimeoutError)
ch_5.async_initialize = tests.async_mock.AsyncMock(side_effect=asyncio.TimeoutError)
channels = mock.MagicMock(spec_set=zha_channels.Channels)
type(channels).semaphore = mock.PropertyMock(return_value=asyncio.Semaphore(3))
ep_channels = zha_channels.ChannelPool(channels, mock.sentinel.ep)
claimed = {ch_1.id: ch_1, ch_2.id: ch_2, ch_3.id: ch_3}
client_chans = {ch_4.id: ch_4, ch_5.id: ch_5}
with mock.patch.dict(ep_channels.claimed_channels, claimed, clear=True):
with mock.patch.dict(ep_channels.client_channels, client_chans, clear=True):
await ep_channels.async_configure()
await ep_channels.async_initialize(mock.sentinel.from_cache)
for ch in [*claimed.values(), *client_chans.values()]:
assert ch.async_initialize.call_count == 1
assert ch.async_initialize.await_count == 1
assert ch.async_initialize.call_args[0][0] is mock.sentinel.from_cache
assert ch.async_configure.call_count == 1
assert ch.async_configure.await_count == 1
assert ch_3.warning.call_count == 2
assert ch_5.warning.call_count == 2
async def test_poll_control_configure(poll_control_ch):
"""Test poll control channel configuration."""
await poll_control_ch.async_configure()
assert poll_control_ch.cluster.write_attributes.call_count == 1
assert poll_control_ch.cluster.write_attributes.call_args[0][0] == {
"checkin_interval": poll_control_ch.CHECKIN_INTERVAL
}
async def test_poll_control_checkin_response(poll_control_ch):
"""Test poll control channel checkin response."""
rsp_mock = tests.async_mock.AsyncMock()
set_interval_mock = tests.async_mock.AsyncMock()
cluster = poll_control_ch.cluster
patch_1 = mock.patch.object(cluster, "checkin_response", rsp_mock)
patch_2 = mock.patch.object(cluster, "set_long_poll_interval", set_interval_mock)
with patch_1, patch_2:
await poll_control_ch.check_in_response(33)
assert rsp_mock.call_count == 1
assert set_interval_mock.call_count == 1
await poll_control_ch.check_in_response(33)
assert cluster.endpoint.request.call_count == 2
assert cluster.endpoint.request.await_count == 2
assert cluster.endpoint.request.call_args_list[0][0][1] == 33
assert cluster.endpoint.request.call_args_list[0][0][0] == 0x0020
assert cluster.endpoint.request.call_args_list[1][0][0] == 0x0020
async def test_poll_control_cluster_command(hass, poll_control_device):
"""Test poll control channel response to cluster command."""
checkin_mock = tests.async_mock.AsyncMock()
poll_control_ch = poll_control_device.channels.pools[0].all_channels["1:0x0020"]
cluster = poll_control_ch.cluster
events = []
hass.bus.async_listen("zha_event", lambda x: events.append(x))
await hass.async_block_till_done()
with mock.patch.object(poll_control_ch, "check_in_response", checkin_mock):
tsn = 22
hdr = make_zcl_header(0, global_command=False, tsn=tsn)
assert not events
cluster.handle_message(
hdr, [mock.sentinel.args, mock.sentinel.args2, mock.sentinel.args3]
)
await hass.async_block_till_done()
assert checkin_mock.call_count == 1
assert checkin_mock.await_count == 1
assert checkin_mock.await_args[0][0] == tsn
assert len(events) == 1
data = events[0].data
assert data["command"] == "checkin"
assert data["args"][0] is mock.sentinel.args
assert data["args"][1] is mock.sentinel.args2
assert data["args"][2] is mock.sentinel.args3
assert data["unique_id"] == "00:11:22:33:44:55:66:77:1:0x0020"
|
import glob
import os
import numpy as np
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.datasets.ade20k.ade20k_utils import get_ade20k
from chainercv.utils import read_image
from chainercv.utils import read_label
root = 'pfnet/chainercv/ade20k'
url = 'http://data.csail.mit.edu/places/ADEchallenge/ADEChallengeData2016.zip'
class ADE20KSemanticSegmentationDataset(GetterDataset):
"""Semantic segmentation dataset for `ADE20K`_.
This is ADE20K dataset distributed in MIT Scene Parsing Benchmark website.
It has 20,210 training images and 2,000 validation images.
.. _`MIT Scene Parsing Benchmark`: http://sceneparsing.csail.mit.edu/
Args:
data_dir (string): Path to the dataset directory. The directory should
contain the :obj:`ADEChallengeData2016` directory. And that
directory should contain at least :obj:`images` and
:obj:`annotations` directries. If :obj:`auto` is given, the dataset
is automatically downloaded into
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/ade20k`.
split ({'train', 'val'}): Select from dataset splits used in
MIT Scene Parsing Benchmark dataset (ADE20K).
This dataset returns the following data.
.. csv-table::
:header: name, shape, dtype, format
:obj:`img`, ":math:`(3, H, W)`", :obj:`float32`, \
"RGB, :math:`[0, 255]`"
:obj:`label`, ":math:`(H, W)`", :obj:`int32`, \
":math:`[-1, \#class - 1]`"
"""
def __init__(self, data_dir='auto', split='train'):
super(ADE20KSemanticSegmentationDataset, self).__init__()
if data_dir is 'auto':
data_dir = get_ade20k(root, url)
if split == 'train' or split == 'val':
img_dir = os.path.join(
data_dir, 'ADEChallengeData2016', 'images',
'training' if split == 'train' else 'validation')
label_dir = os.path.join(
data_dir, 'ADEChallengeData2016', 'annotations',
'training' if split == 'train' else 'validation')
else:
raise ValueError(
'Please give \'split\' argument with either \'train\' or '
'\'val\'.')
self.img_paths = sorted(glob.glob(os.path.join(img_dir, '*.jpg')))
self.label_paths = sorted(glob.glob(os.path.join(label_dir, '*.png')))
self.add_getter('img', self._get_image)
self.add_getter('label', self._get_label)
def __len__(self):
return len(self.img_paths)
def _get_image(self, i):
return read_image(self.img_paths[i])
def _get_label(self, i):
label = read_label(self.label_paths[i], dtype=np.int32)
# [-1, n_class - 1]
return label - 1
|
import logging
import voluptuous as vol
from homeassistant.components import pilight
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_PAYLOAD, CONF_UNIT_OF_MEASUREMENT
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_VARIABLE = "variable"
DEFAULT_NAME = "Pilight Sensor"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_VARIABLE): cv.string,
vol.Required(CONF_PAYLOAD): vol.Schema(dict),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Pilight Sensor."""
add_entities(
[
PilightSensor(
hass=hass,
name=config.get(CONF_NAME),
variable=config.get(CONF_VARIABLE),
payload=config.get(CONF_PAYLOAD),
unit_of_measurement=config.get(CONF_UNIT_OF_MEASUREMENT),
)
]
)
class PilightSensor(Entity):
"""Representation of a sensor that can be updated using Pilight."""
def __init__(self, hass, name, variable, payload, unit_of_measurement):
"""Initialize the sensor."""
self._state = None
self._hass = hass
self._name = name
self._variable = variable
self._payload = payload
self._unit_of_measurement = unit_of_measurement
hass.bus.listen(pilight.EVENT, self._handle_code)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the entity."""
return self._state
def _handle_code(self, call):
"""Handle received code by the pilight-daemon.
If the code matches the defined payload
of this sensor the sensor state is changed accordingly.
"""
# Check if received code matches defined payload
# True if payload is contained in received code dict, not
# all items have to match
if self._payload.items() <= call.data.items():
try:
value = call.data[self._variable]
self._state = value
self.schedule_update_ha_state()
except KeyError:
_LOGGER.error(
"No variable %s in received code data %s",
str(self._variable),
str(call.data),
)
|
from typing import Any, List, Mapping
from PyQt5.QtCore import QByteArray, QDataStream, QIODevice, QUrl
from qutebrowser.utils import qtutils
def _serialize_items(items, current_idx, stream):
# {'currentItemIndex': 0,
# 'history': [{'children': [],
# 'documentSequenceNumber': 1485030525573123,
# 'documentState': [],
# 'formContentType': '',
# 'itemSequenceNumber': 1485030525573122,
# 'originalURLString': 'about:blank',
# 'pageScaleFactor': 0.0,
# 'referrer': '',
# 'scrollPosition': {'x': 0, 'y': 0},
# 'target': '',
# 'title': '',
# 'urlString': 'about:blank'}]}
data = {'currentItemIndex': current_idx, 'history': []}
for item in items:
data['history'].append(_serialize_item(item))
stream.writeInt(3) # history stream version
stream.writeQVariantMap(data)
def _serialize_item(item):
data = {
'originalURLString': item.original_url.toString(QUrl.FullyEncoded),
'scrollPosition': {'x': 0, 'y': 0},
'title': item.title,
'urlString': item.url.toString(QUrl.FullyEncoded),
}
try:
data['scrollPosition']['x'] = item.user_data['scroll-pos'].x()
data['scrollPosition']['y'] = item.user_data['scroll-pos'].y()
except (KeyError, TypeError):
pass
return data
def serialize(items):
"""Serialize a list of WebHistoryItems to a data stream.
Args:
items: An iterable of WebHistoryItems.
Return:
A (stream, data, user_data) tuple.
stream: The reset QDataStream.
data: The QByteArray with the raw data.
user_data: A list with each item's user data.
Warning:
If 'data' goes out of scope, reading from 'stream' will result in a
segfault!
"""
data = QByteArray()
stream = QDataStream(data, QIODevice.ReadWrite)
user_data: List[Mapping[str, Any]] = []
current_idx = None
for i, item in enumerate(items):
if item.active:
if current_idx is not None:
raise ValueError("Multiple active items ({} and {}) "
"found!".format(current_idx, i))
current_idx = i
if items:
if current_idx is None:
raise ValueError("No active item found!")
else:
current_idx = 0
_serialize_items(items, current_idx, stream)
user_data += [item.user_data for item in items]
stream.device().reset()
qtutils.check_qdatastream(stream)
return stream, data, user_data
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
import tensorflow.contrib.slim as slim
def mobilenet(inputs,
num_classes=1000,
is_training=True,
width_multiplier=1,
scope='MobileNet'):
""" MobileNet
More detail, please refer to Google's paper(https://arxiv.org/abs/1704.04861).
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
scope: Optional scope for the variables.
Returns:
logits: the pre-softmax activations, a tensor of size
[batch_size, `num_classes`]
end_points: a dictionary from components of the network to the corresponding
activation.
"""
def _depthwise_separable_conv(inputs,
num_pwc_filters,
width_multiplier,
sc,
downsample=False):
""" Helper function to build the depth-wise separable convolution layer.
"""
num_pwc_filters = round(num_pwc_filters * width_multiplier)
_stride = 2 if downsample else 1
# skip pointwise by setting num_outputs=None
depthwise_conv = slim.separable_convolution2d(inputs,
num_outputs=None,
stride=_stride,
depth_multiplier=1,
kernel_size=[3, 3],
scope=sc+'/depthwise_conv')
bn = slim.batch_norm(depthwise_conv, scope=sc+'/dw_batch_norm')
pointwise_conv = slim.convolution2d(bn,
num_pwc_filters,
kernel_size=[1, 1],
scope=sc+'/pointwise_conv')
bn = slim.batch_norm(pointwise_conv, scope=sc+'/pw_batch_norm')
return bn
with tf.variable_scope(scope) as sc:
end_points_collection = sc.name + '_end_points'
with slim.arg_scope([slim.convolution2d, slim.separable_convolution2d],
activation_fn=None,
outputs_collections=[end_points_collection]):
with slim.arg_scope([slim.batch_norm],
is_training=is_training,
activation_fn=tf.nn.relu,
fused=True):
net = slim.convolution2d(inputs, round(32 * width_multiplier), [3, 3], stride=2, padding='SAME', scope='conv_1')
net = slim.batch_norm(net, scope='conv_1/batch_norm')
net = _depthwise_separable_conv(net, 64, width_multiplier, sc='conv_ds_2')
net = _depthwise_separable_conv(net, 128, width_multiplier, downsample=True, sc='conv_ds_3')
net = _depthwise_separable_conv(net, 128, width_multiplier, sc='conv_ds_4')
net = _depthwise_separable_conv(net, 256, width_multiplier, downsample=True, sc='conv_ds_5')
net = _depthwise_separable_conv(net, 256, width_multiplier, sc='conv_ds_6')
net = _depthwise_separable_conv(net, 512, width_multiplier, downsample=True, sc='conv_ds_7')
net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_8')
net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_9')
net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_10')
net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_11')
net = _depthwise_separable_conv(net, 512, width_multiplier, sc='conv_ds_12')
net = _depthwise_separable_conv(net, 1024, width_multiplier, downsample=True, sc='conv_ds_13')
net = _depthwise_separable_conv(net, 1024, width_multiplier, sc='conv_ds_14')
net = slim.avg_pool2d(net, [7, 7], scope='avg_pool_15')
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
net = tf.squeeze(net, [1, 2], name='SpatialSqueeze')
end_points['squeeze'] = net
logits = slim.fully_connected(net, num_classes, activation_fn=None, scope='fc_16')
predictions = slim.softmax(logits, scope='Predictions')
end_points['Logits'] = logits
end_points['Predictions'] = predictions
return logits, end_points
mobilenet.default_image_size = 224
def mobilenet_arg_scope(weight_decay=0.0):
"""Defines the default mobilenet argument scope.
Args:
weight_decay: The weight decay to use for regularizing the model.
Returns:
An `arg_scope` to use for the MobileNet model.
"""
with slim.arg_scope(
[slim.convolution2d, slim.separable_convolution2d],
weights_initializer=slim.initializers.xavier_initializer(),
biases_initializer=slim.init_ops.zeros_initializer(),
weights_regularizer=slim.l2_regularizer(weight_decay)) as sc:
return sc
|
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
PLATFORM_SCHEMA,
BaseNotificationService,
)
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.template as template_helper
from .const import (
ATTR_ADDRESS,
ATTR_CHANNEL,
ATTR_INTERFACE,
ATTR_PARAM,
ATTR_VALUE,
DOMAIN,
SERVICE_SET_DEVICE_VALUE,
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_INTERFACE): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Homematic notification service."""
data = {
ATTR_ADDRESS: config[ATTR_ADDRESS],
ATTR_CHANNEL: config[ATTR_CHANNEL],
ATTR_PARAM: config[ATTR_PARAM],
ATTR_VALUE: config[ATTR_VALUE],
}
if ATTR_INTERFACE in config:
data[ATTR_INTERFACE] = config[ATTR_INTERFACE]
return HomematicNotificationService(hass, data)
class HomematicNotificationService(BaseNotificationService):
"""Implement the notification service for Homematic."""
def __init__(self, hass, data):
"""Initialize the service."""
self.hass = hass
self.data = data
def send_message(self, message="", **kwargs):
"""Send a notification to the device."""
data = {**self.data, **kwargs.get(ATTR_DATA, {})}
if data.get(ATTR_VALUE) is not None:
templ = template_helper.Template(self.data[ATTR_VALUE], self.hass)
data[ATTR_VALUE] = template_helper.render_complex(templ, None)
self.hass.services.call(DOMAIN, SERVICE_SET_DEVICE_VALUE, data)
|
import re
from collections import Counter
from unittest import TestCase
from scattertext import whitespace_nlp
from scattertext.WhitespaceNLP import Tok, Doc, _regex_parse_sentence, whitespace_nlp_with_sentences
from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc
from scattertext.features.FeatsFromSpacyDocOnlyNounChunks import FeatsFromSpacyDocOnlyNounChunks
def bad_whitespace_nlp(doc):
toks = []
for tok in doc.split():
pos = 'WORD'
if tok.strip() == '':
pos = 'SPACE'
elif re.match('^\W+$', tok):
pos = 'PUNCT'
toks.append(Tok(pos,
tok[:2].lower(),
tok.lower(),
ent_type='',
tag=''))
return Doc([toks])
class Span:
def __init__(self, toks):
self.text = ' '.join([t.lower_ for t in toks])
def __str__(self):
return self.text
def whitespace_nlp_with_fake_chunks(doc, entity_type=None, tag_type=None):
toks = _regex_parse_sentence(doc, entity_type, tag_type)
words = [t for t in toks if t.pos_ == 'WORD']
if len(words) < 5:
return Doc([toks])
else:
return Doc([toks], noun_chunks=[Span(words[:2]), Span(words[1:3])])
class TestFeatsFromSpacyDoc(TestCase):
def test_main(self):
doc = whitespace_nlp("A a bb cc.")
term_freq = FeatsFromSpacyDoc().get_feats(doc)
self.assertEqual(Counter({'a': 2, 'bb': 1, 'a bb': 1, 'cc': 1, 'a a': 1, 'bb cc': 1}),
term_freq)
def test_singleton_with_sentences(self):
doc = whitespace_nlp_with_sentences("Blah")
term_freq = FeatsFromSpacyDoc().get_feats(doc)
self.assertEqual(Counter({'blah': 1}),
term_freq)
def test_lemmas(self):
doc = whitespace_nlp("A a bb ddddd.")
term_freq = FeatsFromSpacyDoc(use_lemmas=True).get_feats(doc)
self.assertEqual(Counter({'a': 2, 'bb': 1, 'a bb': 1, 'dd': 1, 'a a': 1, 'bb dd': 1}),
term_freq)
def test_feats_from_spacy_doc_only_chunks(self):
doc = whitespace_nlp_with_fake_chunks('This is a fake noun chunk generating sentence.')
term_freq = FeatsFromSpacyDocOnlyNounChunks().get_feats(doc)
self.assertEqual(term_freq, Counter({'this is': 1, 'is a': 1}))
def test_empty(self):
doc = whitespace_nlp("")
term_freq = FeatsFromSpacyDoc().get_feats(doc)
self.assertEqual(Counter(), term_freq)
def test_entity_types_to_censor_not_a_set(self):
doc = whitespace_nlp("A a bb cc.", {'bb': 'A'})
with self.assertRaises(AssertionError):
FeatsFromSpacyDoc(entity_types_to_censor='A').get_feats(doc)
def test_entity_censor(self):
doc = whitespace_nlp("A a bb cc.", {'bb': 'BAD'})
term_freq = FeatsFromSpacyDoc(entity_types_to_censor=set(['BAD'])).get_feats(doc)
self.assertEqual(Counter({'a': 2, 'a _BAD': 1, '_BAD cc': 1, 'cc': 1, 'a a': 1, '_BAD': 1}),
term_freq)
def test_entity_tags(self):
doc = whitespace_nlp("A a bb cc Bob.", {'bb': 'BAD'}, {'Bob': 'NNP'})
term_freq = FeatsFromSpacyDoc(entity_types_to_censor=set(['BAD'])).get_feats(doc)
self.assertEqual(Counter({'a': 2, 'a _BAD': 1,
'_BAD cc': 1, 'cc': 1,
'a a': 1, '_BAD': 1, 'bob': 1, 'cc bob': 1}),
term_freq)
term_freq = FeatsFromSpacyDoc(entity_types_to_censor=set(['BAD']),
tag_types_to_censor=set(['NNP'])).get_feats(doc)
self.assertEqual(Counter({'a': 2, 'a _BAD': 1,
'_BAD cc': 1, 'cc': 1,
'a a': 1, '_BAD': 1, 'NNP': 1, 'cc NNP': 1}),
term_freq)
def test_strip_final_period(self):
doc = bad_whitespace_nlp('''I CAN'T ANSWER THAT
QUESTION.
I HAVE NOT ASKED THEM
SPECIFICALLY IF THEY HAVE
ENOUGH.''')
feats = FeatsFromSpacyDoc().get_feats(doc)
self.assertEqual(feats, Counter(
{'i': 2, 'have': 2, 'that question.': 1, 'answer': 1, 'question.': 1, 'enough.': 1, 'i have': 1,
'them specifically': 1, 'have enough.': 1, 'not asked': 1, 'they have': 1, 'have not': 1, 'specifically': 1,
'answer that': 1, 'question. i': 1, "can't": 1, 'if': 1, 'they': 1, "can't answer": 1, 'asked': 1, 'them': 1,
'if they': 1, 'asked them': 1, 'that': 1, 'not': 1, "i can't": 1, 'specifically if': 1}))
feats = FeatsFromSpacyDoc(strip_final_period=True).get_feats(doc)
self.assertEqual(feats, Counter(
{'i': 2, 'have': 2, 'that question': 1, 'answer': 1, 'question': 1, 'enough': 1, 'i have': 1,
'them specifically': 1, 'have enough': 1, 'not asked': 1, 'they have': 1,
'have not': 1, 'specifically': 1,
'answer that': 1, 'question i': 1, "can't": 1, 'if': 1, 'they': 1,
"can't answer": 1, 'asked': 1, 'them': 1,
'if they': 1, 'asked them': 1, 'that': 1, 'not': 1, "i can't": 1, 'specifically if': 1}))
|
from pyspark import SparkContext, SparkConf
from hyperopt import STATUS_OK
from hyperas.distributions import choice, uniform
import six.moves.cPickle as pickle
from elephas.hyperparam import HyperParamModel
def data():
"""Data providing function:
Make sure to have every relevant import statement included here and return data as
used in model function below. This function is separated from model() so that hyperopt
won't reload data for each evaluation run.
"""
from keras.datasets import mnist
from keras.utils import np_utils
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
nb_classes = 10
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
return x_train, y_train, x_test, y_test
def model(x_train, y_train, x_test, y_test):
"""Model providing function:
Create Keras model with double curly brackets dropped-in as needed.
Return value has to be a valid python dictionary with two customary keys:
- loss: Specify a numeric evaluation metric to be minimized
- status: Just use STATUS_OK and see hyperopt documentation if not feasible
The last one is optional, though recommended, namely:
- model: specify the model just created so that we can later use it again.
"""
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import RMSprop
keras_model = Sequential()
keras_model.add(Dense(512, input_shape=(784,)))
keras_model.add(Activation('relu'))
keras_model.add(Dropout({{uniform(0, 1)}}))
keras_model.add(Dense({{choice([256, 512, 1024])}}))
keras_model.add(Activation('relu'))
keras_model.add(Dropout({{uniform(0, 1)}}))
keras_model.add(Dense(10))
keras_model.add(Activation('softmax'))
rms = RMSprop()
keras_model.compile(loss='categorical_crossentropy', optimizer=rms, metrics=['acc'])
keras_model.fit(x_train, y_train,
batch_size={{choice([64, 128])}},
epochs=1,
verbose=2,
validation_data=(x_test, y_test))
score, acc = keras_model.evaluate(x_test, y_test, verbose=0)
print('Test accuracy:', acc)
return {'loss': -acc, 'status': STATUS_OK, 'model': keras_model.to_yaml(),
'weights': pickle.dumps(keras_model.get_weights())}
# Create Spark context
conf = SparkConf().setAppName('Elephas_Hyperparameter_Optimization').setMaster('local[8]')
sc = SparkContext(conf=conf)
# Define hyper-parameter model and run optimization.
hyperparam_model = HyperParamModel(sc)
hyperparam_model.minimize(model=model, data=data, max_evals=5)
|
import unittest
from urwid.compat import B
import urwid
class TextTest(unittest.TestCase):
def setUp(self):
self.t = urwid.Text("I walk the\ncity in the night")
def test1_wrap(self):
expected = [B(t) for t in ("I walk the","city in ","the night ")]
got = self.t.render((10,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
def test2_left(self):
self.t.set_align_mode('left')
expected = [B(t) for t in ("I walk the ","city in the night ")]
got = self.t.render((18,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
def test3_right(self):
self.t.set_align_mode('right')
expected = [B(t) for t in (" I walk the"," city in the night")]
got = self.t.render((18,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
def test4_center(self):
self.t.set_align_mode('center')
expected = [B(t) for t in (" I walk the "," city in the night")]
got = self.t.render((18,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
def test5_encode_error(self):
urwid.set_encoding("ascii")
expected = [B("? ")]
got = urwid.Text(u'û').render((3,))._text
assert got == expected, "got: %r expected: %r" % (got, expected)
class EditTest(unittest.TestCase):
def setUp(self):
self.t1 = urwid.Edit(B(""),"blah blah")
self.t2 = urwid.Edit(B("stuff:"), "blah blah")
self.t3 = urwid.Edit(B("junk:\n"),"blah blah\n\nbloo",1)
self.t4 = urwid.Edit(u"better:")
def ktest(self, e, key, expected, pos, desc):
got= e.keypress((12,),key)
assert got == expected, "%s. got: %r expected:%r" % (desc, got,
expected)
assert e.edit_pos == pos, "%s. pos: %r expected pos: %r" % (
desc, e.edit_pos, pos)
def test1_left(self):
self.t1.set_edit_pos(0)
self.ktest(self.t1,'left','left',0,"left at left edge")
self.ktest(self.t2,'left',None,8,"left within text")
self.t3.set_edit_pos(10)
self.ktest(self.t3,'left',None,9,"left after newline")
def test2_right(self):
self.ktest(self.t1,'right','right',9,"right at right edge")
self.t2.set_edit_pos(8)
self.ktest(self.t2,'right',None,9,"right at right edge-1")
self.t3.set_edit_pos(0)
self.t3.keypress((12,),'right')
assert self.t3.get_pref_col((12,)) == 1
def test3_up(self):
self.ktest(self.t1,'up','up',9,"up at top")
self.t2.set_edit_pos(2)
self.t2.keypress((12,),"left")
assert self.t2.get_pref_col((12,)) == 7
self.ktest(self.t2,'up','up',1,"up at top again")
assert self.t2.get_pref_col((12,)) == 7
self.t3.set_edit_pos(10)
self.ktest(self.t3,'up',None,0,"up at top+1")
def test4_down(self):
self.ktest(self.t1,'down','down',9,"down single line")
self.t3.set_edit_pos(5)
self.ktest(self.t3,'down',None,10,"down line 1 to 2")
self.ktest(self.t3,'down',None,15,"down line 2 to 3")
self.ktest(self.t3,'down','down',15,"down at bottom")
def test_utf8_input(self):
urwid.set_encoding("utf-8")
self.t1.set_edit_text('')
self.t1.keypress((12,), u'û')
self.assertEqual(self.t1.edit_text, u'û'.encode('utf-8'))
self.t4.keypress((12,), u'û')
self.assertEqual(self.t4.edit_text, u'û')
class EditRenderTest(unittest.TestCase):
def rtest(self, w, expected_text, expected_cursor):
expected_text = [B(t) for t in expected_text]
get_cursor = w.get_cursor_coords((4,))
assert get_cursor == expected_cursor, "got: %r expected: %r" % (
get_cursor, expected_cursor)
r = w.render((4,), focus = 1)
text = [t for a, cs, t in [ln[0] for ln in r.content()]]
assert text == expected_text, "got: %r expected: %r" % (text,
expected_text)
assert r.cursor == expected_cursor, "got: %r expected: %r" % (
r.cursor, expected_cursor)
def test1_SpaceWrap(self):
w = urwid.Edit("","blah blah")
w.set_edit_pos(0)
self.rtest(w,["blah","blah"],(0,0))
w.set_edit_pos(4)
self.rtest(w,["lah ","blah"],(3,0))
w.set_edit_pos(5)
self.rtest(w,["blah","blah"],(0,1))
w.set_edit_pos(9)
self.rtest(w,["blah","lah "],(3,1))
def test2_ClipWrap(self):
w = urwid.Edit("","blah\nblargh",1)
w.set_wrap_mode('clip')
w.set_edit_pos(0)
self.rtest(w,["blah","blar"],(0,0))
w.set_edit_pos(10)
self.rtest(w,["blah","argh"],(3,1))
w.set_align_mode('right')
w.set_edit_pos(6)
self.rtest(w,["blah","larg"],(0,1))
def test3_AnyWrap(self):
w = urwid.Edit("","blah blah")
w.set_wrap_mode('any')
self.rtest(w,["blah"," bla","h "],(1,2))
def test4_CursorNudge(self):
w = urwid.Edit("","hi",align='right')
w.keypress((4,),'end')
self.rtest(w,[" hi "],(3,0))
w.keypress((4,),'left')
self.rtest(w,[" hi"],(3,0))
|
from __future__ import division
import numpy as np
import chainer
import chainer.functions as F
def _elementwise_softmax_cross_entropy(x, t):
assert x.shape[:-1] == t.shape
shape = t.shape
x = F.reshape(x, (-1, x.shape[-1]))
t = F.flatten(t)
return F.reshape(
F.softmax_cross_entropy(x, t, reduce='no'), shape)
def _hard_negative(x, positive, k):
rank = (x * (positive - 1)).argsort(axis=1).argsort(axis=1)
hard_negative = rank < (positive.sum(axis=1) * k)[:, np.newaxis]
return hard_negative
def multibox_loss(mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k, comm=None):
"""Computes multibox losses.
This is a loss function used in [#]_.
This function returns :obj:`loc_loss` and :obj:`conf_loss`.
:obj:`loc_loss` is a loss for localization and
:obj:`conf_loss` is a loss for classification.
The formulas of these losses can be found in
the equation (2) and (3) in the original paper.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan,
Christian Szegedy, Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Args:
mb_locs (chainer.Variable or array): The offsets and scales
for predicted bounding boxes.
Its shape is :math:`(B, K, 4)`,
where :math:`B` is the number of samples in the batch and
:math:`K` is the number of default bounding boxes.
mb_confs (chainer.Variable or array): The classes of predicted
bounding boxes.
Its shape is :math:`(B, K, n\_class)`.
This function assumes the first class is background (negative).
gt_mb_locs (chainer.Variable or array): The offsets and scales
for ground truth bounding boxes.
Its shape is :math:`(B, K, 4)`.
gt_mb_labels (chainer.Variable or array): The classes of ground truth
bounding boxes.
Its shape is :math:`(B, K)`.
k (float): A coefficient which is used for hard negative mining.
This value determines the ratio between the number of positives
and that of mined negatives. The value used in the original paper
is :obj:`3`.
comm (~chainermn.communicators.CommunicatorBase):
A ChainerMN communicator.
If it is specified, the number of positive examples is computed
among all GPUs.
Returns:
tuple of chainer.Variable:
This function returns two :obj:`chainer.Variable`: :obj:`loc_loss` and
:obj:`conf_loss`.
"""
mb_locs = chainer.as_variable(mb_locs)
mb_confs = chainer.as_variable(mb_confs)
gt_mb_locs = chainer.as_variable(gt_mb_locs)
gt_mb_labels = chainer.as_variable(gt_mb_labels)
xp = chainer.backends.cuda.get_array_module(gt_mb_labels.array)
with chainer.backends.cuda.get_device_from_array(gt_mb_labels.array):
positive = gt_mb_labels.array > 0
n_positive = positive.sum()
if comm:
n_positive = comm.allreduce_obj(n_positive) / comm.size
if n_positive == 0:
z = chainer.Variable(xp.zeros((), dtype=np.float32))
return z, z
loc_loss = F.huber_loss(mb_locs, gt_mb_locs, 1, reduce='no')
loc_loss = F.sum(loc_loss, axis=-1)
loc_loss *= positive.astype(loc_loss.dtype)
loc_loss = F.sum(loc_loss) / n_positive
conf_loss = _elementwise_softmax_cross_entropy(mb_confs, gt_mb_labels)
hard_negative = _hard_negative(conf_loss.array, positive, k)
conf_loss *= xp.logical_or(
positive, hard_negative).astype(conf_loss.dtype)
conf_loss = F.sum(conf_loss) / n_positive
return loc_loss, conf_loss
|
import voluptuous as vol
from homeassistant.components.lock import PLATFORM_SCHEMA, LockEntity
from homeassistant.const import (
CONF_NAME,
CONF_OPTIMISTIC,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_LOCKED,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.script import Script
from .const import CONF_AVAILABILITY_TEMPLATE, DOMAIN, PLATFORMS
from .template_entity import TemplateEntity
CONF_LOCK = "lock"
CONF_UNLOCK = "unlock"
DEFAULT_NAME = "Template Lock"
DEFAULT_OPTIMISTIC = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_LOCK): cv.SCRIPT_SCHEMA,
vol.Required(CONF_UNLOCK): cv.SCRIPT_SCHEMA,
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
async def _async_create_entities(hass, config):
"""Create the Template lock."""
device = config.get(CONF_NAME)
value_template = config.get(CONF_VALUE_TEMPLATE)
availability_template = config.get(CONF_AVAILABILITY_TEMPLATE)
return [
TemplateLock(
hass,
device,
value_template,
availability_template,
config.get(CONF_LOCK),
config.get(CONF_UNLOCK),
config.get(CONF_OPTIMISTIC),
config.get(CONF_UNIQUE_ID),
)
]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template lock."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities(await _async_create_entities(hass, config))
class TemplateLock(TemplateEntity, LockEntity):
"""Representation of a template lock."""
def __init__(
self,
hass,
name,
value_template,
availability_template,
command_lock,
command_unlock,
optimistic,
unique_id,
):
"""Initialize the lock."""
super().__init__(availability_template=availability_template)
self._state = None
self._name = name
self._state_template = value_template
domain = __name__.split(".")[-2]
self._command_lock = Script(hass, command_lock, name, domain)
self._command_unlock = Script(hass, command_unlock, name, domain)
self._optimistic = optimistic
self._unique_id = unique_id
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def name(self):
"""Return the name of the lock."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this lock."""
return self._unique_id
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state
@callback
def _update_state(self, result):
super()._update_state(result)
if isinstance(result, TemplateError):
self._state = None
return
if isinstance(result, bool):
self._state = result
return
if isinstance(result, str):
self._state = result.lower() in ("true", STATE_ON, STATE_LOCKED)
return
self._state = False
async def async_added_to_hass(self):
"""Register callbacks."""
self.add_template_attribute(
"_state", self._state_template, None, self._update_state
)
await super().async_added_to_hass()
async def async_lock(self, **kwargs):
"""Lock the device."""
if self._optimistic:
self._state = True
self.async_write_ha_state()
await self._command_lock.async_run(context=self._context)
async def async_unlock(self, **kwargs):
"""Unlock the device."""
if self._optimistic:
self._state = False
self.async_write_ha_state()
await self._command_unlock.async_run(context=self._context)
|
from queue import Queue
from threading import Event
from mock import Mock
from mock import patch
from pytest import raises
from paasta_tools.cli.cmds import mark_for_deployment
from paasta_tools.cli.cmds.mark_for_deployment import NoSuchCluster
from paasta_tools.cli.cmds.wait_for_deployment import get_latest_marked_sha
from paasta_tools.cli.cmds.wait_for_deployment import paasta_wait_for_deployment
from paasta_tools.cli.cmds.wait_for_deployment import validate_git_sha_is_latest
from paasta_tools.cli.utils import NoSuchService
from paasta_tools.marathon_tools import MarathonServiceConfig
from paasta_tools.paastaapi import ApiException
from paasta_tools.remote_git import LSRemoteException
from paasta_tools.utils import TimeoutError
class fake_args:
deploy_group = "test_deploy_group"
service = "test_service"
git_url = ""
commit = "d670460b4b4aece5915caf5c68d12f560a9fe3e4"
soa_dir = "fake_soa_dir"
timeout = 0
verbose = False
def mock_status_instance_side_effect(
service, instance, include_smartstack, include_envoy, include_mesos
):
if instance in ["instance1", "instance6", "notaninstance", "api_error"]:
# valid completed instance
mock_mstatus = Mock(
app_count=1,
deploy_status="Running",
expected_instance_count=2,
running_instance_count=2,
)
if instance == "instance2":
# too many marathon apps
mock_mstatus = Mock(
app_count=2,
deploy_status="Running",
expected_instance_count=2,
running_instance_count=2,
)
if instance == "instance3":
# too many running instances
mock_mstatus = Mock(
app_count=1,
deploy_status="Running",
expected_instance_count=2,
running_instance_count=4,
)
if instance == "instance4":
# still Deploying
mock_mstatus = Mock(
app_count=1,
deploy_status="Deploying",
expected_instance_count=2,
running_instance_count=2,
)
if instance == "instance4.1":
# still Deploying
mock_mstatus = Mock(
app_count=1,
deploy_status="Waiting",
expected_instance_count=2,
running_instance_count=2,
)
if instance == "instance5":
# not a marathon instance
mock_mstatus = None
if instance == "instance7":
# paasta stop'd
mock_mstatus = Mock(
app_count=1,
deploy_status="Stopped",
expected_instance_count=0,
running_instance_count=0,
desired_state="stop",
)
if instance == "instance8":
# paasta has autoscaled to 0
mock_mstatus = Mock(
app_count=1,
deploy_status="Stopped",
expected_instance_count=0,
running_instance_count=0,
)
mock_status = Mock()
mock_status.git_sha = "somesha"
if instance == "instance6":
# running the wrong version
mock_status.git_sha = "anothersha"
mock_status.marathon = mock_mstatus
mock_status.kubernetes = None
if instance == "notaninstance":
# not an instance paasta can find
raise ApiException(status=404, reason="")
if instance == "api_error":
# not an instance paasta can find
raise ApiException(status=500, reason="")
return mock_status
@patch("paasta_tools.cli.cmds.mark_for_deployment._log", autospec=True)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.client.get_paasta_oapi_client",
autospec=True,
)
def test_instances_deployed(mock_get_paasta_oapi_client, mock__log):
mock_paasta_api_client = Mock()
mock_paasta_api_client.api_error = ApiException
mock_get_paasta_oapi_client.return_value = mock_paasta_api_client
mock_paasta_api_client.service.status_instance.side_effect = (
mock_status_instance_side_effect
)
f = mark_for_deployment.instances_deployed
e = Event()
e.set()
cluster_data = mark_for_deployment.ClusterData(
cluster="cluster",
service="service1",
git_sha="somesha",
instances_queue=Queue(),
)
cluster_data.instances_queue.put(mock_marathon_instance_config("instance1"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.empty()
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put(mock_marathon_instance_config("instance1"))
cluster_data.instances_queue.put(mock_marathon_instance_config("instance2"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.get(block=False).get_instance() == "instance2"
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put("instance3")
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.empty()
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put(mock_marathon_instance_config("instance4"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.empty()
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put(mock_marathon_instance_config("instance4.1"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.empty()
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put(mock_marathon_instance_config("instance5"))
cluster_data.instances_queue.put(mock_marathon_instance_config("instance1"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.empty()
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put(mock_marathon_instance_config("instance6"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.get(block=False).get_instance() == "instance6"
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put(mock_marathon_instance_config("notaninstance"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.get(block=False).get_instance() == "notaninstance"
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put(mock_marathon_instance_config("api_error"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.get(block=False).get_instance() == "api_error"
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put(mock_marathon_instance_config("instance7"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.empty()
cluster_data.instances_queue = Queue()
cluster_data.instances_queue.put(mock_marathon_instance_config("instance8"))
instances_out = Queue()
f(cluster_data, instances_out, e)
assert cluster_data.instances_queue.empty()
assert instances_out.empty()
def instances_deployed_side_effect(cluster_data, instances_out, green_light):
while not cluster_data.instances_queue.empty():
instance_config = cluster_data.instances_queue.get()
if instance_config.get_instance() not in ["instance1", "instance2"]:
instances_out.put(instance_config)
cluster_data.instances_queue.task_done()
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.load_system_paasta_config", autospec=True
)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.PaastaServiceConfigLoader", autospec=True
)
@patch("paasta_tools.cli.cmds.mark_for_deployment._log", autospec=True)
@patch("paasta_tools.cli.cmds.mark_for_deployment.instances_deployed", autospec=True)
def test_wait_for_deployment(
mock_instances_deployed,
mock__log,
mock_paasta_service_config_loader,
mock_load_system_paasta_config,
):
mock_paasta_service_config_loader.return_value.clusters = ["cluster1"]
mock_paasta_service_config_loader.return_value.instance_configs.return_value = [
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance2"),
mock_marathon_instance_config("instance3"),
]
mock_instances_deployed.side_effect = instances_deployed_side_effect
mock_load_system_paasta_config.return_value.get_api_endpoints.return_value = {
"cluster1": "some_url_1",
"cluster2": "some_url_2",
}
with raises(TimeoutError):
with patch("time.time", side_effect=[0, 0, 2], autospec=True):
with patch("time.sleep", autospec=True):
mark_for_deployment.wait_for_deployment(
"service", "fake_deploy_group", "somesha", "/nail/soa", 1
)
mock_paasta_service_config_loader.return_value.clusters = ["cluster1", "cluster2"]
# TODO: mock clusters_data_to_wait_for instead of this
mock_paasta_service_config_loader.return_value.instance_configs.side_effect = [
[
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance2"),
],
[
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance2"),
],
[],
[],
[],
[],
]
with patch("sys.stdout", autospec=True, flush=Mock()):
assert (
mark_for_deployment.wait_for_deployment(
"service", "fake_deploy_group", "somesha", "/nail/soa", 5
)
== 0
)
mock_paasta_service_config_loader.return_value.clusters = ["cluster1", "cluster2"]
# TODO: mock clusters_data_to_wait_for instead of this
mock_paasta_service_config_loader.return_value.instance_configs.side_effect = [
[
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance2"),
],
[
mock_marathon_instance_config("instance1"),
mock_marathon_instance_config("instance3"),
],
[],
[],
[],
[],
]
with raises(TimeoutError):
mark_for_deployment.wait_for_deployment(
"service", "fake_deploy_group", "somesha", "/nail/soa", 0
)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.load_system_paasta_config", autospec=True
)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.PaastaServiceConfigLoader", autospec=True
)
@patch("paasta_tools.cli.cmds.mark_for_deployment._log", autospec=True)
@patch("paasta_tools.cli.cmds.mark_for_deployment.instances_deployed", autospec=True)
def test_wait_for_deployment_raise_no_such_cluster(
mock_instances_deployed,
mock__log,
mock_paasta_service_config_loader,
mock_load_system_paasta_config,
):
mock_load_system_paasta_config.return_value.get_api_endpoints.return_value = {
"cluster1": "some_url_1",
"cluster2": "some_url_2",
}
mock_paasta_service_config_loader.return_value.clusters = ["cluster3"]
with raises(NoSuchCluster):
mark_for_deployment.wait_for_deployment(
"service", "deploy_group_3", "somesha", "/nail/soa", 0
)
@patch("paasta_tools.cli.cmds.wait_for_deployment.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.mark_for_deployment.wait_for_deployment", autospec=True)
def test_paasta_wait_for_deployment_return_1_when_no_such_service(
mock_wait_for_deployment, mock_validate_service_name
):
mock_validate_service_name.side_effect = NoSuchService("Some text")
assert paasta_wait_for_deployment(fake_args) == 1
assert mock_wait_for_deployment.call_args_list == []
assert mock_validate_service_name.called
@patch("paasta_tools.cli.cmds.wait_for_deployment.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.mark_for_deployment.wait_for_deployment", autospec=True)
def test_paasta_wait_for_deployment_return_1_when_deploy_group_not_found(
mock_wait_for_deployment, mock_list_deploy_groups, mock_validate_service_name
):
mock_list_deploy_groups.return_value = {"another_test_deploy_group"}
assert paasta_wait_for_deployment(fake_args) == 1
assert mock_wait_for_deployment.call_args_list == []
assert mock_validate_service_name.called
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.load_system_paasta_config", autospec=True
)
@patch(
"paasta_tools.cli.cmds.mark_for_deployment.PaastaServiceConfigLoader", autospec=True
)
@patch("paasta_tools.cli.cmds.wait_for_deployment.validate_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.wait_for_deployment.validate_git_sha", autospec=True)
@patch(
"paasta_tools.cli.cmds.wait_for_deployment.validate_git_sha_is_latest",
autospec=True,
)
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.mark_for_deployment._log", autospec=True)
@patch("paasta_tools.cli.cmds.wait_for_deployment._log", autospec=True)
def test_paasta_wait_for_deployment_return_0_when_no_instances_in_deploy_group(
mock__log1,
mock__log2,
mock_list_deploy_groups,
mock_validate_git_sha_is_latest,
mock_validate_git_sha,
mock_validate_service_name,
mock_paasta_service_config_loader,
mock_load_system_paasta_config,
system_paasta_config,
):
mock__log1.return_value = None
mock__log2.return_value = None
mock_load_system_paasta_config.return_value = system_paasta_config
mock_paasta_service_config_loader.return_value.instance_configs.return_value = [
mock_marathon_instance_config("some_instance")
]
mock_list_deploy_groups.return_value = {"test_deploy_group"}
assert paasta_wait_for_deployment(fake_args) == 0
assert mock_validate_service_name.called
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs", autospec=True)
def test_get_latest_marked_sha_good(mock_list_remote_refs):
mock_list_remote_refs.return_value = {
"refs/tags/paasta-fake_group1-20161129T203750-deploy": "968b948b3fca457326718dc7b2e278f89ccc5c87",
"refs/tags/paasta-fake_group1-20161117T122449-deploy": "eac9a6d7909d09ffec00538bbc43b64502aa2dc0",
"refs/tags/paasta-fake_group2-20161125T095651-deploy": "a4911648beb2e53886658ba7ea7eb93d582d754c",
"refs/tags/paasta-fake_group1.everywhere-20161109T223959-deploy": "71e97ec397a3f0e7c4ee46e8ea1e2982cbcb0b79",
}
assert (
get_latest_marked_sha("", "fake_group1")
== "968b948b3fca457326718dc7b2e278f89ccc5c87"
)
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs", autospec=True)
def test_get_latest_marked_sha_bad(mock_list_remote_refs):
mock_list_remote_refs.return_value = {
"refs/tags/paasta-fake_group2-20161129T203750-deploy": "968b948b3fca457326718dc7b2e278f89ccc5c87"
}
assert get_latest_marked_sha("", "fake_group1") == ""
@patch("paasta_tools.cli.cmds.wait_for_deployment.list_remote_refs", autospec=True)
def test_validate_deploy_group_when_is_git_not_available(mock_list_remote_refs, capsys):
test_error_message = "Git error"
mock_list_remote_refs.side_effect = LSRemoteException(test_error_message)
assert (
validate_git_sha_is_latest(
"fake sha", "fake_git_url", "fake_group", "fake_service"
)
is None
)
def mock_marathon_instance_config(fake_name) -> "MarathonServiceConfig":
return MarathonServiceConfig(
service="fake_service",
cluster="fake_cluster",
instance=fake_name,
config_dict={"deploy_group": "fake_deploy_group"},
branch_dict=None,
soa_dir="fake_soa_dir",
)
def test_compose_timeout_message():
clusters_data = []
clusters_data.append(
mark_for_deployment.ClusterData(
cluster="cluster1",
service="someservice",
git_sha="somesha",
instances_queue=Queue(),
)
)
clusters_data[0].instances_queue.put(mock_marathon_instance_config("instance1"))
clusters_data[0].instances_queue.put(mock_marathon_instance_config("instance2"))
clusters_data.append(
mark_for_deployment.ClusterData(
cluster="cluster2",
service="someservice",
git_sha="somesha",
instances_queue=Queue(),
)
)
clusters_data[1].instances_queue.put(mock_marathon_instance_config("instance3"))
clusters_data.append(
mark_for_deployment.ClusterData(
cluster="cluster3",
service="someservice",
git_sha="somesha",
instances_queue=Queue(),
)
)
message = mark_for_deployment.compose_timeout_message(
clusters_data, 1, "fake_group", "someservice", "some_git_sha"
)
assert (
" paasta status -c cluster1 -s someservice -i instance1,instance2" in message
)
assert " paasta status -c cluster2 -s someservice -i instance3" in message
assert (
" paasta logs -c cluster1 -s someservice -i instance1,instance2 -C deploy -l 1000"
in message
)
assert (
" paasta logs -c cluster2 -s someservice -i instance3 -C deploy -l 1000"
in message
)
|
from matplotlib.ticker import FuncFormatter
from matplotlib import cm
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import matplotlib.dates as mdates
import numpy as np
import seaborn as sns
import qstrader.statistics.performance as perf
from qstrader.statistics.statistics import Statistics
from qstrader import settings
class TearsheetStatistics(Statistics):
"""
Displays a Matplotlib-generated 'one-pager' as often
found in institutional strategy performance reports.
"""
def __init__(
self,
strategy_equity,
benchmark_equity=None,
title=None,
periods=252
):
self.strategy_equity = strategy_equity
self.benchmark_equity = benchmark_equity
self.title = title
self.periods = periods
def get_results(self, equity_df):
"""
Return a dict with all important results & stats.
"""
# Returns
equity_df["returns"] = equity_df["Equity"].pct_change().fillna(0.0)
# Cummulative Returns
equity_df["cum_returns"] = np.exp(np.log(1 + equity_df["returns"]).cumsum())
# Drawdown, max drawdown, max drawdown duration
dd_s, max_dd, dd_dur = perf.create_drawdowns(equity_df["cum_returns"])
# Equity statistics
statistics = {}
statistics["sharpe"] = perf.create_sharpe_ratio(
equity_df["returns"], self.periods
)
statistics["drawdowns"] = dd_s
statistics["max_drawdown"] = max_dd
statistics["max_drawdown_pct"] = max_dd
statistics["max_drawdown_duration"] = dd_dur
statistics["equity"] = equity_df["Equity"]
statistics["returns"] = equity_df["returns"]
statistics["cum_returns"] = equity_df["cum_returns"]
return statistics
def _plot_equity(self, strat_stats, bench_stats=None, ax=None, **kwargs):
"""
Plots cumulative rolling returns versus some benchmark.
"""
def format_two_dec(x, pos):
return '%.2f' % x
equity = strat_stats['cum_returns']
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(format_two_dec)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.xaxis.set_tick_params(reset=True)
ax.yaxis.grid(linestyle=':')
ax.xaxis.set_major_locator(mdates.YearLocator(1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
ax.xaxis.grid(linestyle=':')
equity.plot(lw=2, color='green', alpha=0.6, x_compat=False,
label='Strategy', ax=ax, **kwargs)
if bench_stats is not None:
bench_stats['cum_returns'].plot(
lw=2, color='gray', alpha=0.6, x_compat=False,
label='Benchmark', ax=ax, **kwargs
)
ax.axhline(1.0, linestyle='--', color='black', lw=1)
ax.set_ylabel('Cumulative returns')
ax.legend(loc='best')
ax.set_xlabel('')
plt.setp(ax.get_xticklabels(), visible=True, rotation=0, ha='center')
return ax
def _plot_drawdown(self, stats, ax=None, **kwargs):
"""
Plots the underwater curve
"""
def format_perc(x, pos):
return '%.0f%%' % x
drawdown = stats['drawdowns']
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(format_perc)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.yaxis.grid(linestyle=':')
ax.xaxis.set_tick_params(reset=True)
ax.xaxis.set_major_locator(mdates.YearLocator(1))
ax.xaxis.set_major_formatter(mdates.DateFormatter('%Y'))
ax.xaxis.grid(linestyle=':')
underwater = -100 * drawdown
underwater.plot(ax=ax, lw=2, kind='area', color='red', alpha=0.3, **kwargs)
ax.set_ylabel('')
ax.set_xlabel('')
plt.setp(ax.get_xticklabels(), visible=True, rotation=0, ha='center')
ax.set_title('Drawdown (%)', fontweight='bold')
return ax
def _plot_monthly_returns(self, stats, ax=None, **kwargs):
"""
Plots a heatmap of the monthly returns.
"""
returns = stats['returns']
if ax is None:
ax = plt.gca()
monthly_ret = perf.aggregate_returns(returns, 'monthly')
monthly_ret = monthly_ret.unstack()
monthly_ret = np.round(monthly_ret, 3)
monthly_ret.rename(
columns={1: 'Jan', 2: 'Feb', 3: 'Mar', 4: 'Apr',
5: 'May', 6: 'Jun', 7: 'Jul', 8: 'Aug',
9: 'Sep', 10: 'Oct', 11: 'Nov', 12: 'Dec'},
inplace=True
)
sns.heatmap(
monthly_ret.fillna(0) * 100.0,
annot=True,
fmt="0.1f",
annot_kws={"size": 8},
alpha=1.0,
center=0.0,
cbar=False,
cmap=cm.RdYlGn,
ax=ax, **kwargs)
ax.set_title('Monthly Returns (%)', fontweight='bold')
ax.set_ylabel('')
ax.set_yticklabels(ax.get_yticklabels(), rotation=0)
ax.set_xlabel('')
return ax
def _plot_yearly_returns(self, stats, ax=None, **kwargs):
"""
Plots a barplot of returns by year.
"""
def format_perc(x, pos):
return '%.0f%%' % x
returns = stats['returns']
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(format_perc)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
ax.yaxis.grid(linestyle=':')
yly_ret = perf.aggregate_returns(returns, 'yearly') * 100.0
yly_ret.plot(ax=ax, kind="bar")
ax.set_title('Yearly Returns (%)', fontweight='bold')
ax.set_ylabel('')
ax.set_xlabel('')
ax.set_xticklabels(ax.get_xticklabels(), rotation=45)
ax.xaxis.grid(False)
return ax
def _plot_txt_curve(self, stats, bench_stats=None, ax=None, **kwargs):
"""
Outputs the statistics for the equity curve.
"""
def format_perc(x, pos):
return '%.0f%%' % x
if ax is None:
ax = plt.gca()
y_axis_formatter = FuncFormatter(format_perc)
ax.yaxis.set_major_formatter(FuncFormatter(y_axis_formatter))
# Strategy statistics
returns = stats["returns"]
cum_returns = stats['cum_returns']
tot_ret = cum_returns[-1] - 1.0
cagr = perf.create_cagr(cum_returns, self.periods)
sharpe = perf.create_sharpe_ratio(returns, self.periods)
sortino = perf.create_sortino_ratio(returns, self.periods)
dd, dd_max, dd_dur = perf.create_drawdowns(cum_returns)
# Benchmark statistics
if bench_stats is not None:
bench_returns = bench_stats["returns"]
bench_cum_returns = bench_stats['cum_returns']
bench_tot_ret = bench_cum_returns[-1] - 1.0
bench_cagr = perf.create_cagr(bench_cum_returns, self.periods)
bench_sharpe = perf.create_sharpe_ratio(bench_returns, self.periods)
bench_sortino = perf.create_sortino_ratio(bench_returns, self.periods)
bench_dd, bench_dd_max, bench_dd_dur = perf.create_drawdowns(bench_cum_returns)
# Strategy Values
ax.text(7.50, 8.2, 'Strategy', fontweight='bold', horizontalalignment='right', fontsize=8, color='green')
ax.text(0.25, 6.9, 'Total Return', fontsize=8)
ax.text(7.50, 6.9, '{:.0%}'.format(tot_ret), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(0.25, 5.9, 'CAGR', fontsize=8)
ax.text(7.50, 5.9, '{:.2%}'.format(cagr), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(0.25, 4.9, 'Sharpe Ratio', fontsize=8)
ax.text(7.50, 4.9, '{:.2f}'.format(sharpe), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(0.25, 3.9, 'Sortino Ratio', fontsize=8)
ax.text(7.50, 3.9, '{:.2f}'.format(sortino), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(0.25, 2.9, 'Annual Volatility', fontsize=8)
ax.text(7.50, 2.9, '{:.2%}'.format(returns.std() * np.sqrt(252)), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(0.25, 1.9, 'Max Daily Drawdown', fontsize=8)
ax.text(7.50, 1.9, '{:.2%}'.format(dd_max), color='red', fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(0.25, 0.9, 'Max Drawdown Duration (Days)', fontsize=8)
ax.text(7.50, 0.9, '{:.0f}'.format(dd_dur), fontweight='bold', horizontalalignment='right', fontsize=8)
# Benchmark Values
if bench_stats is not None:
ax.text(10.0, 8.2, 'Benchmark', fontweight='bold', horizontalalignment='right', fontsize=8, color='gray')
ax.text(10.0, 6.9, '{:.0%}'.format(bench_tot_ret), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(10.0, 5.9, '{:.2%}'.format(bench_cagr), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(10.0, 4.9, '{:.2f}'.format(bench_sharpe), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(10.0, 3.9, '{:.2f}'.format(bench_sortino), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(10.0, 2.9, '{:.2%}'.format(bench_returns.std() * np.sqrt(252)), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(10.0, 1.9, '{:.2%}'.format(bench_dd_max), color='red', fontweight='bold', horizontalalignment='right', fontsize=8)
ax.text(10.0, 0.9, '{:.0f}'.format(bench_dd_dur), fontweight='bold', horizontalalignment='right', fontsize=8)
ax.set_title('Equity Curve', fontweight='bold')
ax.grid(False)
ax.spines['top'].set_linewidth(2.0)
ax.spines['bottom'].set_linewidth(2.0)
ax.spines['right'].set_visible(False)
ax.spines['left'].set_visible(False)
ax.get_yaxis().set_visible(False)
ax.get_xaxis().set_visible(False)
ax.set_ylabel('')
ax.set_xlabel('')
ax.axis([0, 10, 0, 10])
return ax
def plot_results(self, filename=None):
"""
Plot the Tearsheet
"""
if settings.PRINT_EVENTS:
print('Plotting the tearsheet...')
rc = {
'lines.linewidth': 1.0,
'axes.facecolor': '0.995',
'figure.facecolor': '0.97',
'font.family': 'serif',
'font.serif': 'Ubuntu',
'font.monospace': 'Ubuntu Mono',
'font.size': 10,
'axes.labelsize': 10,
'axes.labelweight': 'bold',
'axes.titlesize': 10,
'xtick.labelsize': 8,
'ytick.labelsize': 8,
'legend.fontsize': 10,
'figure.titlesize': 12
}
sns.set_context(rc)
sns.set_style("whitegrid")
sns.set_palette("deep", desat=.6)
vertical_sections = 5
fig = plt.figure(figsize=(16, 12))
fig.suptitle(self.title, y=0.94, weight='bold')
gs = gridspec.GridSpec(vertical_sections, 3, wspace=0.25, hspace=0.5)
stats = self.get_results(self.strategy_equity)
bench_stats = None
if self.benchmark_equity is not None:
bench_stats = self.get_results(self.benchmark_equity)
ax_equity = plt.subplot(gs[:2, :])
ax_drawdown = plt.subplot(gs[2, :])
ax_monthly_returns = plt.subplot(gs[3, :2])
ax_yearly_returns = plt.subplot(gs[3, 2])
ax_txt_curve = plt.subplot(gs[4, 0])
# ax_txt_trade = plt.subplot(gs[4, 1])
# ax_txt_time = plt.subplot(gs[4, 2])
self._plot_equity(stats, bench_stats=bench_stats, ax=ax_equity)
self._plot_drawdown(stats, ax=ax_drawdown)
self._plot_monthly_returns(stats, ax=ax_monthly_returns)
self._plot_yearly_returns(stats, ax=ax_yearly_returns)
self._plot_txt_curve(stats, bench_stats=bench_stats, ax=ax_txt_curve)
# self._plot_txt_trade(stats, ax=ax_txt_trade)
# self._plot_txt_time(stats, ax=ax_txt_time)
# Plot the figure
plt.show()
|
from datetime import timedelta
import logging
from niluclient import (
CO,
CO2,
NO,
NO2,
NOX,
OZONE,
PM1,
PM10,
PM25,
POLLUTION_INDEX,
SO2,
create_location_client,
create_station_client,
lookup_stations_in_area,
)
import voluptuous as vol
from homeassistant.components.air_quality import PLATFORM_SCHEMA, AirQualityEntity
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_SHOW_ON_MAP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_AREA = "area"
ATTR_POLLUTION_INDEX = "nilu_pollution_index"
ATTRIBUTION = "Data provided by luftkvalitet.info and nilu.no"
CONF_AREA = "area"
CONF_STATION = "stations"
DEFAULT_NAME = "NILU"
SCAN_INTERVAL = timedelta(minutes=30)
CONF_ALLOWED_AREAS = [
"Bergen",
"Birkenes",
"Bodø",
"Brumunddal",
"Bærum",
"Drammen",
"Elverum",
"Fredrikstad",
"Gjøvik",
"Grenland",
"Halden",
"Hamar",
"Harstad",
"Hurdal",
"Karasjok",
"Kristiansand",
"Kårvatn",
"Lillehammer",
"Lillesand",
"Lillestrøm",
"Lørenskog",
"Mo i Rana",
"Moss",
"Narvik",
"Oslo",
"Prestebakke",
"Sandve",
"Sarpsborg",
"Stavanger",
"Sør-Varanger",
"Tromsø",
"Trondheim",
"Tustervatn",
"Zeppelinfjellet",
"Ålesund",
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
vol.Exclusive(
CONF_AREA,
"station_collection",
"Can only configure one specific station or "
"stations in a specific area pr sensor. "
"Please only configure station or area.",
): vol.All(cv.string, vol.In(CONF_ALLOWED_AREAS)),
vol.Exclusive(
CONF_STATION,
"station_collection",
"Can only configure one specific station or "
"stations in a specific area pr sensor. "
"Please only configure station or area.",
): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SHOW_ON_MAP, default=False): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NILU air quality sensor."""
name = config.get(CONF_NAME)
area = config.get(CONF_AREA)
stations = config.get(CONF_STATION)
show_on_map = config.get(CONF_SHOW_ON_MAP)
sensors = []
if area:
stations = lookup_stations_in_area(area)
elif not area and not stations:
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
location_client = create_location_client(latitude, longitude)
stations = location_client.station_names
for station in stations:
client = NiluData(create_station_client(station))
client.update()
if client.data.sensors:
sensors.append(NiluSensor(client, name, show_on_map))
else:
_LOGGER.warning("%s didn't give any sensors results", station)
add_entities(sensors, True)
class NiluData:
"""Class for handling the data retrieval."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
@property
def data(self):
"""Get data cached in client."""
return self.api.data
@Throttle(SCAN_INTERVAL)
def update(self):
"""Get the latest data from nilu API."""
self.api.update()
class NiluSensor(AirQualityEntity):
"""Single nilu station air sensor."""
def __init__(self, api_data: NiluData, name: str, show_on_map: bool):
"""Initialize the sensor."""
self._api = api_data
self._name = f"{name} {api_data.data.name}"
self._max_aqi = None
self._attrs = {}
if show_on_map:
self._attrs[CONF_LATITUDE] = api_data.data.latitude
self._attrs[CONF_LONGITUDE] = api_data.data.longitude
@property
def attribution(self) -> str:
"""Return the attribution."""
return ATTRIBUTION
@property
def device_state_attributes(self) -> dict:
"""Return other details about the sensor state."""
return self._attrs
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def air_quality_index(self) -> str:
"""Return the Air Quality Index (AQI)."""
return self._max_aqi
@property
def carbon_monoxide(self) -> str:
"""Return the CO (carbon monoxide) level."""
return self.get_component_state(CO)
@property
def carbon_dioxide(self) -> str:
"""Return the CO2 (carbon dioxide) level."""
return self.get_component_state(CO2)
@property
def nitrogen_oxide(self) -> str:
"""Return the N2O (nitrogen oxide) level."""
return self.get_component_state(NOX)
@property
def nitrogen_monoxide(self) -> str:
"""Return the NO (nitrogen monoxide) level."""
return self.get_component_state(NO)
@property
def nitrogen_dioxide(self) -> str:
"""Return the NO2 (nitrogen dioxide) level."""
return self.get_component_state(NO2)
@property
def ozone(self) -> str:
"""Return the O3 (ozone) level."""
return self.get_component_state(OZONE)
@property
def particulate_matter_2_5(self) -> str:
"""Return the particulate matter 2.5 level."""
return self.get_component_state(PM25)
@property
def particulate_matter_10(self) -> str:
"""Return the particulate matter 10 level."""
return self.get_component_state(PM10)
@property
def particulate_matter_0_1(self) -> str:
"""Return the particulate matter 0.1 level."""
return self.get_component_state(PM1)
@property
def sulphur_dioxide(self) -> str:
"""Return the SO2 (sulphur dioxide) level."""
return self.get_component_state(SO2)
def get_component_state(self, component_name: str) -> str:
"""Return formatted value of specified component."""
if component_name in self._api.data.sensors:
sensor = self._api.data.sensors[component_name]
return sensor.value
return None
def update(self) -> None:
"""Update the sensor."""
self._api.update()
sensors = self._api.data.sensors.values()
if sensors:
max_index = max([s.pollution_index for s in sensors])
self._max_aqi = max_index
self._attrs[ATTR_POLLUTION_INDEX] = POLLUTION_INDEX[self._max_aqi]
self._attrs[ATTR_AREA] = self._api.data.area
|
import logging
from homeassistant.const import CONF_NAME, STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import ToggleEntity
from .const import DOMAIN, SWITCH_TYPES
_LOGGING = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Transmission switch."""
tm_client = hass.data[DOMAIN][config_entry.entry_id]
name = config_entry.data[CONF_NAME]
dev = []
for switch_type, switch_name in SWITCH_TYPES.items():
dev.append(TransmissionSwitch(switch_type, switch_name, tm_client, name))
async_add_entities(dev, True)
class TransmissionSwitch(ToggleEntity):
"""Representation of a Transmission switch."""
def __init__(self, switch_type, switch_name, tm_client, name):
"""Initialize the Transmission switch."""
self._name = switch_name
self.client_name = name
self.type = switch_type
self._tm_client = tm_client
self._state = STATE_OFF
self._data = None
self.unsub_update = None
@property
def name(self):
"""Return the name of the switch."""
return f"{self.client_name} {self._name}"
@property
def unique_id(self):
"""Return the unique id of the entity."""
return f"{self._tm_client.api.host}-{self.name}"
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def should_poll(self):
"""Poll for status regularly."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state == STATE_ON
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._tm_client.api.available
def turn_on(self, **kwargs):
"""Turn the device on."""
if self.type == "on_off":
_LOGGING.debug("Starting all torrents")
self._tm_client.api.start_torrents()
elif self.type == "turtle_mode":
_LOGGING.debug("Turning Turtle Mode of Transmission on")
self._tm_client.api.set_alt_speed_enabled(True)
self._tm_client.api.update()
def turn_off(self, **kwargs):
"""Turn the device off."""
if self.type == "on_off":
_LOGGING.debug("Stopping all torrents")
self._tm_client.api.stop_torrents()
if self.type == "turtle_mode":
_LOGGING.debug("Turning Turtle Mode of Transmission off")
self._tm_client.api.set_alt_speed_enabled(False)
self._tm_client.api.update()
async def async_added_to_hass(self):
"""Handle entity which will be added."""
self.unsub_update = async_dispatcher_connect(
self.hass,
self._tm_client.api.signal_update,
self._schedule_immediate_update,
)
@callback
def _schedule_immediate_update(self):
self.async_schedule_update_ha_state(True)
async def will_remove_from_hass(self):
"""Unsubscribe from update dispatcher."""
if self.unsub_update:
self.unsub_update()
self.unsub_update = None
def update(self):
"""Get the latest data from Transmission and updates the state."""
active = None
if self.type == "on_off":
self._data = self._tm_client.api.data
if self._data:
active = self._data.activeTorrentCount > 0
elif self.type == "turtle_mode":
active = self._tm_client.api.get_alt_speed_enabled()
if active is None:
return
self._state = STATE_ON if active else STATE_OFF
|
import os
from perfkitbenchmarker import linux_packages
SHOC_GIT_URL = 'https://github.com/vetter/shoc.git'
SHOC_DIR = '%s/shoc' % linux_packages.INSTALL_DIR
SHOC_BIN_DIR = os.path.join(SHOC_DIR, 'bin')
SHOC_PATCH = 'shoc_config.patch'
APT_PACKAGES = 'wget automake git zip libopenmpi-dev'
def _IsShocInstalled(vm):
"""Returns whether shoc is installed or not"""
command = os.path.join(SHOC_BIN_DIR, 'shocdriver')
resp, _ = vm.RemoteHostCommand('command -v %s' % command,
ignore_failure=True,
suppress_warning=True)
if resp.rstrip() == "":
return False
return True
def AptInstall(vm):
"""Installs the SHOC benchmark suite on the VM."""
if _IsShocInstalled(vm):
return
vm.InstallPackages(APT_PACKAGES)
vm.Install('cuda_toolkit')
vm.RemoteCommand('cd %s && git clone %s' %
(linux_packages.INSTALL_DIR, SHOC_GIT_URL))
vm.RemoteCommand(('cd %s && ./configure '
'CUDA_CPPFLAGS=-gencode=arch=compute_37,code=compute_37 '
'NVCC=/usr/local/cuda/bin/nvcc')
% SHOC_DIR)
vm.RemoteCommand('cd %s && make -j8 && make install' % SHOC_DIR)
def YumInstall(vm):
"""TODO: PKB currently only supports the installation of SHOC
on Ubuntu.
"""
raise NotImplementedError()
|
from absl import flags
from perfkitbenchmarker import disk
FLAGS = flags.FLAGS
class MesosDisk(disk.BaseDisk):
"""
Base class for Mesos Disks.
"""
def __init__(self, disk_spec):
super(MesosDisk, self).__init__(disk_spec)
self.mount_point = disk_spec.mount_point
def _Create(self):
return
def _Delete(self):
return
def Attach(self, vm):
return
def Detach(self):
return
def GetDevicePath(self):
return None
class LocalDisk(MesosDisk):
"""
Represents DAS (direct-attached storage). For each disk, a new directory
on a host is created and then mounted as a volume to a Docker instance.
"""
def __init__(self, disk_num, disk_spec, name):
super(LocalDisk, self).__init__(disk_spec)
self.name = 'local-disk-%s-%s' % (name, disk_num)
def AttachVolumeInfo(self, container_body):
# Intentionally not using 'volumes' API as it doesn't allow to
# create local volumes automatically - when empty hostPath is passed to
# Marathon's API then the sandbox path is used as a host path. However,
# new directory should be created and used for this specific purpose.
volume_param = {
'key': 'volume',
'value': self.mount_point
}
container_body['docker']['parameters'].append(volume_param)
|
from django.contrib.auth.models import AnonymousUser
from django.urls import reverse
from shop.models.cart import CartModel, CartItemModel
from shop.models.customer import CustomerModel
from shop.views.catalog import ProductListView, ProductRetrieveView, AddToCartView
import pytest
@pytest.mark.django_db
def test_catalog_list(commodity_factory, rf):
product = commodity_factory()
request = rf.get('/catalog/')
request.current_page = product.cms_pages.first()
response = ProductListView.as_view()(request)
response.render()
assert response.data['count'] == 1
assert response.data['next'] is None
assert len(response.data['results']) == 1
assert response.data['results'][0]['product_url'] == request.path + product.slug
@pytest.mark.django_db
def test_catalog_detail(commodity_factory, customer_factory, rf):
product = commodity_factory()
request = rf.get('/catalog/{}'.format(product.slug))
request.current_page = product.cms_pages.first()
request.customer = customer_factory()
response = ProductRetrieveView.as_view()(request, slug=product.slug)
response.render()
assert response.data['product_code'] == product.product_code
assert response.data['price'] == str(product.unit_price)
assert response.data['slug'] == product.slug
@pytest.mark.django_db
def test_get_add_to_cart(commodity_factory, customer_factory, rf):
product = commodity_factory()
request = rf.get(product.get_absolute_url() + '/add-to-cart')
request.current_page = product.cms_pages.first()
request.customer = customer_factory()
response = AddToCartView.as_view()(request, slug=product.slug)
response.render()
assert response.data['quantity'] == 1
assert response.data['unit_price'] == str(product.unit_price)
assert response.data['product_code'] == product.product_code
assert response.data['is_in_cart'] is False
@pytest.mark.django_db
def test_too_greedy(commodity_factory, customer_factory, rf):
"""
Add more products to the cart than quantity in stock
"""
product = commodity_factory()
data = {'quantity': 10, 'product': product.id}
request = rf.post(product.get_absolute_url() + '/add-to-cart', data=data)
request.current_page = product.cms_pages.first()
request.customer = customer_factory()
response = AddToCartView.as_view()(request, slug=product.slug)
assert response.status_code == 202
assert response.data['quantity'] == 5 # not 10, as requested
assert response.data['unit_price'] == str(product.unit_price)
assert response.data['subtotal'] == str(5 * product.unit_price)
@pytest.mark.django_db
def test_merge_with_cart(registered_customer, api_client, rf, empty_cart, commodity_factory):
# add items to the cart while it belongs to an anonymous customer
assert empty_cart.num_items == 0
product1 = commodity_factory()
CartItemModel.objects.create(cart=empty_cart, quantity=1, product=product1)
product2 = commodity_factory()
CartItemModel.objects.create(cart=empty_cart, quantity=2, product=product2)
assert empty_cart.num_items == 2
# add other items to cart belonging to registered user
other_cart = CartModel.objects.create(customer=registered_customer)
CartItemModel.objects.create(cart=other_cart, quantity=2, product=product2)
product3 = commodity_factory()
CartItemModel.objects.create(cart=other_cart, quantity=3, product=product3)
assert other_cart.num_items == 2
# after logging in, both carts must be merged and assigned to the registered customer
login_url = reverse('shop:login')
data = {
'form_data': {
'username': registered_customer.email,
'password': 'secret',
}
}
assert api_client.post(login_url, data=data, format='json').status_code == 200
request = rf.get('/my-cart')
request.customer = registered_customer
cart = CartModel.objects.get_from_request(request)
assert cart.num_items == 3
item1 = cart.items.get(product=product1)
assert item1.quantity == 1
item2 = cart.items.get(product=product2)
assert item2.quantity == 4
item3 = cart.items.get(product=product3)
assert item3.quantity == 3
@pytest.mark.django_db
def test_add_to_watch_list(commodity_factory, api_client, rf):
# add a product to the cart
product = commodity_factory()
data = {'quantity': 1, 'product': product.id}
response = api_client.post(reverse('shop:watch-list'), data)
assert response.status_code == 201
assert response.data['quantity'] == 0
# verify that the product is in the watch-list
request = rf.get('/my-watch-list')
request.session = api_client.session
request.user = AnonymousUser()
request.customer = CustomerModel.objects.get_from_request(request)
cart = CartModel.objects.get_from_request(request)
assert cart.num_items == 0
items = cart.items.all()
assert items.count() == 1
assert items[0].product == product
assert items[0].quantity == 0
return cart
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.