text
stringlengths 213
32.3k
|
---|
import sys
import coverage
from tests.coveragetest import CoverageTest
class SetupPyTest(CoverageTest):
"""Tests of setup.py"""
run_in_temp_dir = False
def setUp(self):
super(SetupPyTest, self).setUp()
# Force the most restrictive interpretation.
self.set_environ('LC_ALL', 'C')
def test_metadata(self):
status, output = self.run_command_status(
"python setup.py --description --version --url --author"
)
self.assertEqual(status, 0)
out = output.splitlines()
self.assertIn("measurement", out[0])
self.assertEqual(coverage.__version__, out[1])
self.assertIn("github.com/nedbat/coveragepy", out[2])
self.assertIn("Ned Batchelder", out[3])
def test_more_metadata(self):
# Let's be sure we pick up our own setup.py
# CoverageTest restores the original sys.path for us.
sys.path.insert(0, '')
from setup import setup_args
classifiers = setup_args['classifiers']
self.assertGreater(len(classifiers), 7)
self.assert_starts_with(classifiers[-1], "Development Status ::")
self.assertIn("Programming Language :: Python :: %d" % sys.version_info[:1], classifiers)
self.assertIn("Programming Language :: Python :: %d.%d" % sys.version_info[:2], classifiers)
long_description = setup_args['long_description'].splitlines()
self.assertGreater(len(long_description), 7)
self.assertNotEqual(long_description[0].strip(), "")
self.assertNotEqual(long_description[-1].strip(), "")
|
from .util import async_init_integration
async def test_water_heater_create_sensors(hass):
"""Test creation of water heater."""
await async_init_integration(hass)
state = hass.states.get("water_heater.water_heater")
assert state.state == "auto"
expected_attributes = {
"current_temperature": None,
"friendly_name": "Water Heater",
"max_temp": 31.0,
"min_temp": 16.0,
"operation_list": ["auto", "heat", "off"],
"operation_mode": "auto",
"supported_features": 3,
"target_temp_high": None,
"target_temp_low": None,
"temperature": 65.0,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
state = hass.states.get("water_heater.second_water_heater")
assert state.state == "heat"
expected_attributes = {
"current_temperature": None,
"friendly_name": "Second Water Heater",
"max_temp": 31.0,
"min_temp": 16.0,
"operation_list": ["auto", "heat", "off"],
"operation_mode": "heat",
"supported_features": 3,
"target_temp_high": None,
"target_temp_low": None,
"temperature": 30.0,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
|
from homeassistant.components.sonarr.const import (
CONF_UPCOMING_DAYS,
CONF_WANTED_MAX_ITEMS,
DEFAULT_UPCOMING_DAYS,
DEFAULT_WANTED_MAX_ITEMS,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_REAUTH, SOURCE_USER
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_SOURCE, CONF_VERIFY_SSL
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import patch
from tests.components.sonarr import (
HOST,
MOCK_REAUTH_INPUT,
MOCK_USER_INPUT,
_patch_async_setup,
_patch_async_setup_entry,
mock_connection,
mock_connection_error,
mock_connection_invalid_auth,
setup_integration,
)
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_show_user_form(hass: HomeAssistantType) -> None:
"""Test that the user set up form is served."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
)
assert result["step_id"] == "user"
assert result["type"] == RESULT_TYPE_FORM
async def test_cannot_connect(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we show user form on connection error."""
mock_connection_error(aioclient_mock)
user_input = MOCK_USER_INPUT.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_invalid_auth(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we show user form on invalid auth."""
mock_connection_invalid_auth(aioclient_mock)
user_input = MOCK_USER_INPUT.copy()
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "invalid_auth"}
async def test_unknown_error(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test we show user form on unknown error."""
user_input = MOCK_USER_INPUT.copy()
with patch(
"homeassistant.components.sonarr.config_flow.Sonarr.update",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data=user_input,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
async def test_full_reauth_flow_implementation(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the manual reauth flow from start to finish."""
entry = await setup_integration(hass, aioclient_mock, skip_entry_setup=True)
assert entry
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_REAUTH},
data={"config_entry_id": entry.entry_id, **entry.data},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "reauth_confirm"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
user_input = MOCK_REAUTH_INPUT.copy()
with _patch_async_setup(), _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=user_input
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "reauth_successful"
assert entry.data[CONF_API_KEY] == "test-api-key-reauth"
mock_setup_entry.assert_called_once()
async def test_full_user_flow_implementation(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the full manual user flow from start to finish."""
mock_connection(aioclient_mock)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
user_input = MOCK_USER_INPUT.copy()
with _patch_async_setup(), _patch_async_setup_entry():
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=user_input,
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == HOST
assert result["data"]
assert result["data"][CONF_HOST] == HOST
async def test_full_user_flow_advanced_options(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the full manual user flow with advanced options."""
mock_connection(aioclient_mock)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER, "show_advanced_options": True}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
user_input = {
**MOCK_USER_INPUT,
CONF_VERIFY_SSL: True,
}
with _patch_async_setup(), _patch_async_setup_entry():
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=user_input,
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == HOST
assert result["data"]
assert result["data"][CONF_HOST] == HOST
assert result["data"][CONF_VERIFY_SSL]
async def test_options_flow(hass, aioclient_mock: AiohttpClientMocker):
"""Test updating options."""
with patch("homeassistant.components.sonarr.PLATFORMS", []):
entry = await setup_integration(hass, aioclient_mock)
assert entry.options[CONF_UPCOMING_DAYS] == DEFAULT_UPCOMING_DAYS
assert entry.options[CONF_WANTED_MAX_ITEMS] == DEFAULT_WANTED_MAX_ITEMS
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
with _patch_async_setup(), _patch_async_setup_entry():
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_UPCOMING_DAYS: 2, CONF_WANTED_MAX_ITEMS: 100},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_UPCOMING_DAYS] == 2
assert result["data"][CONF_WANTED_MAX_ITEMS] == 100
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from compare_gan.metrics import eval_task
import tensorflow as tf
import tensorflow_gan as tfgan
# Special value returned when FID code returned exception.
FID_CODE_FAILED = 4242.0
class FIDScoreTask(eval_task.EvalTask):
"""Evaluation task for the FID score."""
_LABEL = "fid_score"
def run_after_session(self, fake_dset, real_dset):
logging.info("Calculating FID.")
with tf.Graph().as_default():
fake_activations = tf.convert_to_tensor(fake_dset.activations)
real_activations = tf.convert_to_tensor(real_dset.activations)
fid = tfgan.eval.frechet_classifier_distance_from_activations(
real_activations=real_activations,
generated_activations=fake_activations)
with self._create_session() as sess:
fid = sess.run(fid)
logging.info("Frechet Inception Distance: %.3f.", fid)
return {self._LABEL: fid}
def compute_fid_from_activations(fake_activations, real_activations):
"""Returns the FID based on activations.
Args:
fake_activations: NumPy array with fake activations.
real_activations: NumPy array with real activations.
Returns:
A float, the Frechet Inception Distance.
"""
logging.info("Computing FID score.")
assert fake_activations.shape == real_activations.shape
with tf.Session(graph=tf.Graph()) as sess:
fake_activations = tf.convert_to_tensor(fake_activations)
real_activations = tf.convert_to_tensor(real_activations)
fid = tfgan.eval.frechet_classifier_distance_from_activations(
real_activations=real_activations,
generated_activations=fake_activations)
return sess.run(fid)
|
import os
from collections import OrderedDict
from glob import glob
from itertools import chain
from typing import List, Optional, Tuple, Union
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from weblate.formats.base import TranslationFormat, TranslationUnit
from weblate.utils.errors import report_error
class TextItem:
"""Actual text unit object."""
def __init__(self, filename, line, text, flags=None):
self.filename = filename
self.line = line
self.text = text
self.flags = flags
@cached_property
def location(self):
return f"{self.filename}:{self.line}"
def getid(self):
return self.location
class TextParser:
"""Simple text parser returning all content as single unit."""
def __init__(self, storefile, filename=None, flags=None):
with open(storefile) as handle:
content = handle.read()
if filename:
self.filename = filename
else:
self.filename = os.path.basename(storefile)
self.units = [TextItem(self.filename, 1, content.strip(), flags)]
class TextSerializer:
def __init__(self, filename, units):
self.units = [unit for unit in units if unit.filename == filename]
def __call__(self, handle):
for unit in self.units:
handle.write(unit.text.encode())
handle.write(b"\n")
class MultiParser:
filenames: Tuple[Tuple[str, str], ...] = ()
def __init__(self, storefile):
if not isinstance(storefile, str):
raise ValueError("Needs string as a storefile!")
self.base = storefile
self.parsers = self.load_parser()
self.units = list(
chain.from_iterable(parser.units for parser in self.parsers.values())
)
def file_key(self, filename):
return filename
def load_parser(self):
result = OrderedDict()
for name, flags in self.filenames:
filename = self.get_filename(name)
for match in sorted(glob(filename), key=self.file_key):
# Needed to allow overlapping globs, more specific first
if match in result:
continue
result[match] = TextParser(
match, os.path.relpath(match, self.base), flags
)
return result
def get_filename(self, name):
return os.path.join(self.base, name)
class AppStoreParser(MultiParser):
filenames = (
("title.txt", "max-length:50"),
("short[_-]description.txt", "max-length:80"),
("full[_-]description.txt", "max-length:4000"),
("subtitle.txt", "max-length:80"),
("description.txt", "max-length:4000"),
("keywords.txt", "max-length:100"),
("video.txt", "max-length:256, url"),
("marketing_url.txt", "max-length:256, url"),
("privacy_url.txt", "max-length:256, url"),
("support_url.txt", "max-length:256, url"),
("changelogs/*.txt", "max-length:500"),
("*.txt", ""),
)
def file_key(self, filename):
parts = filename.rsplit("changelogs/", 1)
if len(parts) == 2:
try:
return -int(parts[1].split(".")[0])
except ValueError:
pass
return filename
class TextUnit(TranslationUnit):
@cached_property
def locations(self):
"""Return comma separated list of locations."""
return self.mainunit.location
@cached_property
def source(self):
"""Return source string from a ttkit unit."""
if self.template is not None:
return self.template.text
return self.unit.text
@cached_property
def target(self):
"""Return target string from a ttkit unit."""
if self.unit is None:
return ""
return self.unit.text
@cached_property
def context(self):
"""Return context of message."""
return self.mainunit.location
@cached_property
def flags(self):
"""Return flags from unit."""
if self.mainunit.flags:
return self.mainunit.flags
return ""
def set_target(self, target):
"""Set translation unit target."""
self._invalidate_target()
self.unit.text = target
def mark_fuzzy(self, fuzzy):
"""Set fuzzy flag on translated unit."""
return
def mark_approved(self, value):
"""Set approved flag on translated unit."""
return
class AppStoreFormat(TranslationFormat):
name = _("App store metadata files")
format_id = "appstore"
can_add_unit = False
monolingual = True
unit_class = TextUnit
simple_filename = False
@classmethod
def load(cls, storefile, template_store):
return AppStoreParser(storefile)
def create_unit(self, key: str, source: Union[str, List[str]]):
raise ValueError("Create not supported")
@classmethod
def create_new_file(cls, filename, language, base):
"""Handle creation of new translation file."""
os.makedirs(filename)
def add_unit(self, ttkit_unit):
"""Add new unit to underlaying store."""
self.store.units.append(ttkit_unit)
def save(self):
"""Save underlaying store to disk."""
for unit in self.store.units:
if not unit.text:
continue
self.save_atomic(
self.store.get_filename(unit.filename),
TextSerializer(unit.filename, self.store.units),
)
def get_filenames(self):
return [self.store.get_filename(unit.filename) for unit in self.store.units]
@classmethod
def get_class(cls):
return None
@classmethod
def is_valid_base_for_new(cls, base, monolingual, errors: Optional[List] = None):
"""Check whether base is valid."""
if not base:
return True
try:
AppStoreParser(base)
return True
except Exception:
report_error(cause="File parse error")
return False
def delete_unit(self, ttkit_unit) -> Optional[str]:
filename = self.store.get_filename(ttkit_unit.filename)
os.unlink(filename)
return filename
|
from datetime import timedelta
from aioasuswrt.asuswrt import Device
from homeassistant.components import sensor
from homeassistant.components.asuswrt import (
CONF_DNSMASQ,
CONF_INTERFACE,
CONF_MODE,
CONF_PORT,
CONF_PROTOCOL,
CONF_SENSORS,
DOMAIN,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import AsyncMock, patch
from tests.common import async_fire_time_changed
VALID_CONFIG_ROUTER_SSH = {
DOMAIN: {
CONF_DNSMASQ: "/",
CONF_HOST: "fake_host",
CONF_INTERFACE: "eth0",
CONF_MODE: "router",
CONF_PORT: "22",
CONF_PROTOCOL: "ssh",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "fake_pass",
CONF_SENSORS: [
"devices",
"download_speed",
"download",
"upload_speed",
"upload",
],
}
}
MOCK_DEVICES = {
"a1:b1:c1:d1:e1:f1": Device("a1:b1:c1:d1:e1:f1", "192.168.1.2", "Test"),
"a2:b2:c2:d2:e2:f2": Device("a2:b2:c2:d2:e2:f2", "192.168.1.3", "TestTwo"),
"a3:b3:c3:d3:e3:f3": Device("a3:b3:c3:d3:e3:f3", "192.168.1.4", "TestThree"),
}
MOCK_BYTES_TOTAL = [60000000000, 50000000000]
MOCK_CURRENT_TRANSFER_RATES = [20000000, 10000000]
async def test_sensors(hass: HomeAssistant, mock_device_tracker_conf):
"""Test creating an AsusWRT sensor."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().async_get_connected_devices = AsyncMock(return_value=MOCK_DEVICES)
AsusWrt().async_get_bytes_total = AsyncMock(return_value=MOCK_BYTES_TOTAL)
AsusWrt().async_get_current_transfer_rates = AsyncMock(
return_value=MOCK_CURRENT_TRANSFER_RATES
)
assert await async_setup_component(hass, DOMAIN, VALID_CONFIG_ROUTER_SSH)
await hass.async_block_till_done()
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert (
hass.states.get(f"{sensor.DOMAIN}.asuswrt_devices_connected").state == "3"
)
assert (
hass.states.get(f"{sensor.DOMAIN}.asuswrt_download_speed").state == "160.0"
)
assert hass.states.get(f"{sensor.DOMAIN}.asuswrt_download").state == "60.0"
assert hass.states.get(f"{sensor.DOMAIN}.asuswrt_upload_speed").state == "80.0"
assert hass.states.get(f"{sensor.DOMAIN}.asuswrt_upload").state == "50.0"
|
from homematicip.aio.auth import AsyncAuth
from homematicip.base.base_connection import HmipConnectionError
import pytest
from homeassistant.components.homematicip_cloud import DOMAIN as HMIPC_DOMAIN
from homeassistant.components.homematicip_cloud.const import (
HMIPC_AUTHTOKEN,
HMIPC_HAPID,
HMIPC_NAME,
HMIPC_PIN,
)
from homeassistant.components.homematicip_cloud.errors import HmipcConnectionError
from homeassistant.components.homematicip_cloud.hap import (
HomematicipAuth,
HomematicipHAP,
)
from homeassistant.config_entries import ENTRY_STATE_NOT_LOADED
from homeassistant.exceptions import ConfigEntryNotReady
from .helper import HAPID, HAPPIN
from tests.async_mock import Mock, patch
async def test_auth_setup(hass):
"""Test auth setup for client registration."""
config = {HMIPC_HAPID: "ABC123", HMIPC_PIN: "123", HMIPC_NAME: "hmip"}
hmip_auth = HomematicipAuth(hass, config)
with patch.object(hmip_auth, "get_auth"):
assert await hmip_auth.async_setup()
async def test_auth_setup_connection_error(hass):
"""Test auth setup connection error behaviour."""
config = {HMIPC_HAPID: "ABC123", HMIPC_PIN: "123", HMIPC_NAME: "hmip"}
hmip_auth = HomematicipAuth(hass, config)
with patch.object(hmip_auth, "get_auth", side_effect=HmipcConnectionError):
assert not await hmip_auth.async_setup()
async def test_auth_auth_check_and_register(hass):
"""Test auth client registration."""
config = {HMIPC_HAPID: "ABC123", HMIPC_PIN: "123", HMIPC_NAME: "hmip"}
hmip_auth = HomematicipAuth(hass, config)
hmip_auth.auth = Mock(spec=AsyncAuth)
with patch.object(
hmip_auth.auth, "isRequestAcknowledged", return_value=True
), patch.object(
hmip_auth.auth, "requestAuthToken", return_value="ABC"
), patch.object(
hmip_auth.auth, "confirmAuthToken"
):
assert await hmip_auth.async_checkbutton()
assert await hmip_auth.async_register() == "ABC"
async def test_auth_auth_check_and_register_with_exception(hass):
"""Test auth client registration."""
config = {HMIPC_HAPID: "ABC123", HMIPC_PIN: "123", HMIPC_NAME: "hmip"}
hmip_auth = HomematicipAuth(hass, config)
hmip_auth.auth = Mock(spec=AsyncAuth)
with patch.object(
hmip_auth.auth, "isRequestAcknowledged", side_effect=HmipConnectionError
), patch.object(
hmip_auth.auth, "requestAuthToken", side_effect=HmipConnectionError
):
assert not await hmip_auth.async_checkbutton()
assert await hmip_auth.async_register() is False
async def test_hap_setup_works():
"""Test a successful setup of a accesspoint."""
hass = Mock()
entry = Mock()
home = Mock()
entry.data = {HMIPC_HAPID: "ABC123", HMIPC_AUTHTOKEN: "123", HMIPC_NAME: "hmip"}
hap = HomematicipHAP(hass, entry)
with patch.object(hap, "get_hap", return_value=home):
assert await hap.async_setup()
assert hap.home is home
assert len(hass.config_entries.async_forward_entry_setup.mock_calls) == 8
assert hass.config_entries.async_forward_entry_setup.mock_calls[0][1] == (
entry,
"alarm_control_panel",
)
assert hass.config_entries.async_forward_entry_setup.mock_calls[1][1] == (
entry,
"binary_sensor",
)
async def test_hap_setup_connection_error():
"""Test a failed accesspoint setup."""
hass = Mock()
entry = Mock()
entry.data = {HMIPC_HAPID: "ABC123", HMIPC_AUTHTOKEN: "123", HMIPC_NAME: "hmip"}
hap = HomematicipHAP(hass, entry)
with patch.object(hap, "get_hap", side_effect=HmipcConnectionError), pytest.raises(
ConfigEntryNotReady
):
assert not await hap.async_setup()
assert not hass.async_add_job.mock_calls
assert not hass.config_entries.flow.async_init.mock_calls
async def test_hap_reset_unloads_entry_if_setup(hass, default_mock_hap_factory):
"""Test calling reset while the entry has been setup."""
mock_hap = await default_mock_hap_factory.async_get_mock_hap()
assert hass.data[HMIPC_DOMAIN][HAPID] == mock_hap
config_entries = hass.config_entries.async_entries(HMIPC_DOMAIN)
assert len(config_entries) == 1
# hap_reset is called during unload
await hass.config_entries.async_unload(config_entries[0].entry_id)
# entry is unloaded
assert config_entries[0].state == ENTRY_STATE_NOT_LOADED
assert hass.data[HMIPC_DOMAIN] == {}
async def test_hap_create(hass, hmip_config_entry, simple_mock_home):
"""Mock AsyncHome to execute get_hap."""
hass.config.components.add(HMIPC_DOMAIN)
hap = HomematicipHAP(hass, hmip_config_entry)
assert hap
with patch.object(hap, "async_connect"):
assert await hap.async_setup()
async def test_hap_create_exception(hass, hmip_config_entry, mock_connection_init):
"""Mock AsyncHome to execute get_hap."""
hass.config.components.add(HMIPC_DOMAIN)
hap = HomematicipHAP(hass, hmip_config_entry)
assert hap
with patch(
"homeassistant.components.homematicip_cloud.hap.AsyncHome.get_current_state",
side_effect=Exception,
):
assert not await hap.async_setup()
with patch(
"homeassistant.components.homematicip_cloud.hap.AsyncHome.get_current_state",
side_effect=HmipConnectionError,
), pytest.raises(ConfigEntryNotReady):
await hap.async_setup()
async def test_auth_create(hass, simple_mock_auth):
"""Mock AsyncAuth to execute get_auth."""
config = {HMIPC_HAPID: HAPID, HMIPC_PIN: HAPPIN, HMIPC_NAME: "hmip"}
hmip_auth = HomematicipAuth(hass, config)
assert hmip_auth
with patch(
"homeassistant.components.homematicip_cloud.hap.AsyncAuth",
return_value=simple_mock_auth,
):
assert await hmip_auth.async_setup()
await hass.async_block_till_done()
assert hmip_auth.auth.pin == HAPPIN
async def test_auth_create_exception(hass, simple_mock_auth):
"""Mock AsyncAuth to execute get_auth."""
config = {HMIPC_HAPID: HAPID, HMIPC_PIN: HAPPIN, HMIPC_NAME: "hmip"}
hmip_auth = HomematicipAuth(hass, config)
simple_mock_auth.connectionRequest.side_effect = HmipConnectionError
assert hmip_auth
with patch(
"homeassistant.components.homematicip_cloud.hap.AsyncAuth",
return_value=simple_mock_auth,
):
assert not await hmip_auth.async_setup()
with patch(
"homeassistant.components.homematicip_cloud.hap.AsyncAuth",
return_value=simple_mock_auth,
):
assert not await hmip_auth.get_auth(hass, HAPID, HAPPIN)
|
import json
import pytest
from homeassistant.components import camera
from homeassistant.setup import async_setup_component
from .test_common import (
help_test_availability_when_connection_lost,
help_test_availability_without_topic,
help_test_custom_availability_payload,
help_test_default_availability_payload,
help_test_discovery_broken,
help_test_discovery_removal,
help_test_discovery_update,
help_test_discovery_update_attr,
help_test_discovery_update_unchanged,
help_test_entity_debug_info_message,
help_test_entity_device_info_remove,
help_test_entity_device_info_update,
help_test_entity_device_info_with_connection,
help_test_entity_device_info_with_identifier,
help_test_entity_id_update_discovery_update,
help_test_entity_id_update_subscriptions,
help_test_setting_attribute_via_mqtt_json_message,
help_test_setting_attribute_with_template,
help_test_unique_id,
help_test_update_with_json_attrs_bad_JSON,
help_test_update_with_json_attrs_not_dict,
)
from tests.async_mock import patch
from tests.common import async_fire_mqtt_message
DEFAULT_CONFIG = {
camera.DOMAIN: {"platform": "mqtt", "name": "test", "topic": "test_topic"}
}
async def test_run_camera_setup(hass, aiohttp_client, mqtt_mock):
"""Test that it fetches the given payload."""
topic = "test/camera"
await async_setup_component(
hass,
"camera",
{"camera": {"platform": "mqtt", "topic": topic, "name": "Test Camera"}},
)
await hass.async_block_till_done()
url = hass.states.get("camera.test_camera").attributes["entity_picture"]
async_fire_mqtt_message(hass, topic, "beer")
client = await aiohttp_client(hass.http.app)
resp = await client.get(url)
assert resp.status == 200
body = await resp.text()
assert body == "beer"
async def test_availability_when_connection_lost(hass, mqtt_mock):
"""Test availability after MQTT disconnection."""
await help_test_availability_when_connection_lost(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_availability_without_topic(hass, mqtt_mock):
"""Test availability without defined availability topic."""
await help_test_availability_without_topic(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_default_availability_payload(hass, mqtt_mock):
"""Test availability by default payload with defined topic."""
await help_test_default_availability_payload(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_custom_availability_payload(hass, mqtt_mock):
"""Test availability by custom payload with defined topic."""
await help_test_custom_availability_payload(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_via_mqtt_json_message(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_via_mqtt_json_message(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_setting_attribute_with_template(hass, mqtt_mock):
"""Test the setting of attribute via MQTT with JSON payload."""
await help_test_setting_attribute_with_template(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_not_dict(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_not_dict(
hass, mqtt_mock, caplog, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_update_with_json_attrs_bad_JSON(hass, mqtt_mock, caplog):
"""Test attributes get extracted from a JSON result."""
await help_test_update_with_json_attrs_bad_JSON(
hass, mqtt_mock, caplog, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_discovery_update_attr(hass, mqtt_mock, caplog):
"""Test update of discovered MQTTAttributes."""
await help_test_discovery_update_attr(
hass, mqtt_mock, caplog, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_unique_id(hass, mqtt_mock):
"""Test unique id option only creates one camera per unique_id."""
config = {
camera.DOMAIN: [
{
"platform": "mqtt",
"name": "Test 1",
"topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
{
"platform": "mqtt",
"name": "Test 2",
"topic": "test-topic",
"unique_id": "TOTALLY_UNIQUE",
},
]
}
await help_test_unique_id(hass, mqtt_mock, camera.DOMAIN, config)
async def test_discovery_removal_camera(hass, mqtt_mock, caplog):
"""Test removal of discovered camera."""
data = json.dumps(DEFAULT_CONFIG[camera.DOMAIN])
await help_test_discovery_removal(hass, mqtt_mock, caplog, camera.DOMAIN, data)
async def test_discovery_update_camera(hass, mqtt_mock, caplog):
"""Test update of discovered camera."""
data1 = '{ "name": "Beer", "topic": "test_topic"}'
data2 = '{ "name": "Milk", "topic": "test_topic"}'
await help_test_discovery_update(
hass, mqtt_mock, caplog, camera.DOMAIN, data1, data2
)
async def test_discovery_update_unchanged_camera(hass, mqtt_mock, caplog):
"""Test update of discovered camera."""
data1 = '{ "name": "Beer", "topic": "test_topic"}'
with patch(
"homeassistant.components.mqtt.camera.MqttCamera.discovery_update"
) as discovery_update:
await help_test_discovery_update_unchanged(
hass, mqtt_mock, caplog, camera.DOMAIN, data1, discovery_update
)
@pytest.mark.no_fail_on_log_exception
async def test_discovery_broken(hass, mqtt_mock, caplog):
"""Test handling of bad discovery message."""
data1 = '{ "name": "Beer" }'
data2 = '{ "name": "Milk", "topic": "test_topic"}'
await help_test_discovery_broken(
hass, mqtt_mock, caplog, camera.DOMAIN, data1, data2
)
async def test_entity_device_info_with_connection(hass, mqtt_mock):
"""Test MQTT camera device registry integration."""
await help_test_entity_device_info_with_connection(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_with_identifier(hass, mqtt_mock):
"""Test MQTT camera device registry integration."""
await help_test_entity_device_info_with_identifier(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_update(hass, mqtt_mock):
"""Test device registry update."""
await help_test_entity_device_info_update(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_device_info_remove(hass, mqtt_mock):
"""Test device registry remove."""
await help_test_entity_device_info_remove(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_id_update_subscriptions(hass, mqtt_mock):
"""Test MQTT subscriptions are managed when entity_id is updated."""
await help_test_entity_id_update_subscriptions(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG, ["test_topic"]
)
async def test_entity_id_update_discovery_update(hass, mqtt_mock):
"""Test MQTT discovery update when entity_id is updated."""
await help_test_entity_id_update_discovery_update(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG
)
async def test_entity_debug_info_message(hass, mqtt_mock):
"""Test MQTT debug info."""
await help_test_entity_debug_info_message(
hass, mqtt_mock, camera.DOMAIN, DEFAULT_CONFIG, "test_topic", b"ON"
)
|
import sys
import mne
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option("-s", "--surf", dest="surf",
help="Surface in Freesurfer format", metavar="FILE")
parser.add_option("-f", "--fif", dest="fif",
help="FIF file produced", metavar="FILE")
parser.add_option("-i", "--id", dest="id", default=4,
help=("Surface Id (e.g. 4 sur head surface)"))
options, args = parser.parse_args()
if options.surf is None:
parser.print_help()
sys.exit(1)
print("Converting %s to BEM FIF file." % options.surf)
surf = mne.bem._surfaces_to_bem([options.surf], [int(options.id)],
sigmas=[1])
mne.write_bem_surfaces(options.fif, surf)
mne.utils.run_command_if_main()
|
from __future__ import absolute_import
from . import etree
try:
import cssselect as external_cssselect
except ImportError:
raise ImportError(
'cssselect does not seem to be installed. '
'See http://packages.python.org/cssselect/')
SelectorSyntaxError = external_cssselect.SelectorSyntaxError
ExpressionError = external_cssselect.ExpressionError
SelectorError = external_cssselect.SelectorError
__all__ = ['SelectorSyntaxError', 'ExpressionError', 'SelectorError',
'CSSSelector']
class LxmlTranslator(external_cssselect.GenericTranslator):
"""
A custom CSS selector to XPath translator with lxml-specific extensions.
"""
def xpath_contains_function(self, xpath, function):
# Defined there, removed in later drafts:
# http://www.w3.org/TR/2001/CR-css3-selectors-20011113/#content-selectors
if function.argument_types() not in (['STRING'], ['IDENT']):
raise ExpressionError(
"Expected a single string or ident for :contains(), got %r"
% function.arguments)
value = function.arguments[0].value
return xpath.add_condition(
'contains(__lxml_internal_css:lower-case(string(.)), %s)'
% self.xpath_literal(value.lower()))
class LxmlHTMLTranslator(LxmlTranslator, external_cssselect.HTMLTranslator):
"""
lxml extensions + HTML support.
"""
def _make_lower_case(context, s):
return s.lower()
ns = etree.FunctionNamespace('http://codespeak.net/lxml/css/')
ns.prefix = '__lxml_internal_css'
ns['lower-case'] = _make_lower_case
class CSSSelector(etree.XPath):
"""A CSS selector.
Usage::
>>> from lxml import etree, cssselect
>>> select = cssselect.CSSSelector("a tag > child")
>>> root = etree.XML("<a><b><c/><tag><child>TEXT</child></tag></b></a>")
>>> [ el.tag for el in select(root) ]
['child']
To use CSS namespaces, you need to pass a prefix-to-namespace
mapping as ``namespaces`` keyword argument::
>>> rdfns = 'http://www.w3.org/1999/02/22-rdf-syntax-ns#'
>>> select_ns = cssselect.CSSSelector('root > rdf|Description',
... namespaces={'rdf': rdfns})
>>> rdf = etree.XML((
... '<root xmlns:rdf="%s">'
... '<rdf:Description>blah</rdf:Description>'
... '</root>') % rdfns)
>>> [(el.tag, el.text) for el in select_ns(rdf)]
[('{http://www.w3.org/1999/02/22-rdf-syntax-ns#}Description', 'blah')]
"""
def __init__(self, css, namespaces=None, translator='xml'):
if translator == 'xml':
translator = LxmlTranslator()
elif translator == 'html':
translator = LxmlHTMLTranslator()
elif translator == 'xhtml':
translator = LxmlHTMLTranslator(xhtml=True)
path = translator.css_to_xpath(css)
etree.XPath.__init__(self, path, namespaces=namespaces)
self.css = css
def __repr__(self):
return '<%s %s for %r>' % (
self.__class__.__name__,
hex(abs(id(self)))[2:],
self.css)
|
import unittest
from absl import flags
import mock
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import pkb
from perfkitbenchmarker import stages
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
FLAGS.mark_as_parsed()
class TestCreateFailedRunSampleFlag(unittest.TestCase):
def PatchPkbFunction(self, function_name):
patcher = mock.patch(pkb.__name__ + '.' + function_name)
mock_function = patcher.start()
self.addCleanup(patcher.stop)
return mock_function
def setUp(self):
self.flags_mock = self.PatchPkbFunction('FLAGS')
self.provision_mock = self.PatchPkbFunction('DoProvisionPhase')
self.prepare_mock = self.PatchPkbFunction('DoPreparePhase')
self.run_mock = self.PatchPkbFunction('DoRunPhase')
self.cleanup_mock = self.PatchPkbFunction('DoCleanupPhase')
self.teardown_mock = self.PatchPkbFunction('DoTeardownPhase')
self.make_failed_run_sample_mock = self.PatchPkbFunction(
'MakeFailedRunSample')
self.flags_mock.skip_pending_runs_file = None
self.flags_mock.run_stage = [
stages.PROVISION, stages.PREPARE, stages.RUN, stages.CLEANUP,
stages.TEARDOWN
]
self.spec = mock.MagicMock()
self.collector = mock.Mock()
def testCreateProvisionFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.provision_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.PROVISION)
def testCreatePrepareFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.prepare_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.PREPARE)
def testCreateRunFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.run_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.RUN)
def testCreateCleanupFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.cleanup_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.CLEANUP)
def testCreateTeardownFailedSample(self):
self.flags_mock.create_failed_run_samples = True
error_msg = 'error'
self.teardown_mock.side_effect = Exception(error_msg)
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_called_once_with(
self.spec, error_msg, stages.TEARDOWN)
def testDontCreateFailedRunSample(self):
self.flags_mock.create_failed_run_samples = False
self.run_mock.side_effect = Exception('error')
self.assertRaises(Exception, pkb.RunBenchmark, self.spec, self.collector)
self.make_failed_run_sample_mock.assert_not_called()
class TestMakeFailedRunSample(unittest.TestCase):
@mock.patch('perfkitbenchmarker.sample.Sample')
def testMakeFailedRunSample(self, sample_mock):
error_msg = 'error'
spec = mock.MagicMock()
spec.vms = []
spec.failed_substatus = None
pkb.MakeFailedRunSample(spec, error_msg, stages.PROVISION)
sample_mock.assert_called_once()
sample_mock.assert_called_with('Run Failed', 1, 'Run Failed', {
'error_message': error_msg,
'run_stage': stages.PROVISION,
'flags': '{}'
})
@mock.patch('perfkitbenchmarker.sample.Sample')
def testMakeFailedRunSampleWithTruncation(self, sample_mock):
error_msg = 'This is a long error message that should be truncated.'
spec = mock.MagicMock()
spec.vms = []
spec.failed_substatus = 'QuotaExceeded'
pkb.FLAGS.failed_run_samples_error_length = 7
pkb.MakeFailedRunSample(spec, error_msg, stages.PROVISION)
sample_mock.assert_called_once()
sample_mock.assert_called_with('Run Failed', 1, 'Run Failed', {
'error_message': 'This is',
'run_stage': stages.PROVISION,
'flags': '{}',
'failed_substatus': 'QuotaExceeded'
})
class TestMiscFunctions(pkb_common_test_case.PkbCommonTestCase):
"""Testing for various functions in pkb.py."""
def _MockVm(
self, name: str, remote_command_text: str
) -> linux_virtual_machine.BaseLinuxVirtualMachine:
vm_spec = pkb_common_test_case.CreateTestVmSpec()
vm = pkb_common_test_case.TestLinuxVirtualMachine(vm_spec=vm_spec)
vm.OS_TYPE = 'debian9'
vm.name = name
vm.RemoteCommand = mock.Mock(return_value=(remote_command_text, ''))
return vm
def _MockVmWithVuln(
self, name: str,
cpu_vuln: linux_virtual_machine.CpuVulnerabilities) -> mock.Mock:
vm = mock.Mock(OS_TYPE='debian9')
vm.name = name
type(vm).cpu_vulnerabilities = mock.PropertyMock(return_value=cpu_vuln)
return vm
def testGatherCpuVulnerabilitiesNonLinux(self):
# Windows VMs do not currently have code to detect CPU vulnerabilities
vuln = linux_virtual_machine.CpuVulnerabilities()
vuln.mitigations['a'] = 'b'
vm = self._MockVmWithVuln('vm1', vuln)
vm.OS_TYPE = 'windows'
self.assertLen(pkb._CreateCpuVulnerabilitySamples([vm]), 0)
def testGatherCpuVulnerabilitiesEmpty(self):
# Even if CpuVulnerabilities is empty a sample is created
vm = self._MockVmWithVuln('vm1', linux_virtual_machine.CpuVulnerabilities())
samples = pkb._CreateCpuVulnerabilitySamples([vm])
self.assertEqual({'vm_name': 'vm1'}, samples[0].metadata)
self.assertLen(samples, 1)
def testGatherCpuVulnerabilities(self):
prefix = '/sys/devices/system/cpu/vulnerabilities'
vm0 = self._MockVm('vm0', f"""{prefix}/itlb_multihit:KVM: Vulnerable""")
vm1 = self._MockVm('vm1', f"""{prefix}/l1tf:Mitigation: PTE Inversion""")
samples = pkb._CreateCpuVulnerabilitySamples([vm0, vm1])
self.assertEqual('cpu_vuln', samples[0].metric)
expected_metadata0 = {
'vm_name': 'vm0',
'vulnerabilities': 'itlb_multihit',
'vulnerability_itlb_multihit': 'KVM',
}
expected_metadata1 = {
'vm_name': 'vm1',
'mitigations': 'l1tf',
'mitigation_l1tf': 'PTE Inversion',
}
self.assertEqual(expected_metadata0, samples[0].metadata)
self.assertEqual(expected_metadata1, samples[1].metadata)
self.assertLen(samples, 2)
if __name__ == '__main__':
unittest.main()
|
import asyncio
import logging
import os
from subprocess import PIPE
import sys
import pkg_resources
import pytest
import homeassistant.util.package as package
from tests.async_mock import MagicMock, call, patch
RESOURCE_DIR = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "resources")
)
TEST_NEW_REQ = "pyhelloworld3==1.0.0"
TEST_ZIP_REQ = "file://{}#{}".format(
os.path.join(RESOURCE_DIR, "pyhelloworld3.zip"), TEST_NEW_REQ
)
@pytest.fixture
def mock_sys():
"""Mock sys."""
with patch("homeassistant.util.package.sys", spec=object) as sys_mock:
sys_mock.executable = "python3"
yield sys_mock
@pytest.fixture
def deps_dir():
"""Return path to deps directory."""
return os.path.abspath("/deps_dir")
@pytest.fixture
def lib_dir(deps_dir):
"""Return path to lib directory."""
return os.path.join(deps_dir, "lib_dir")
@pytest.fixture
def mock_popen(lib_dir):
"""Return a Popen mock."""
with patch("homeassistant.util.package.Popen") as popen_mock:
popen_mock.return_value.communicate.return_value = (
bytes(lib_dir, "utf-8"),
b"error",
)
popen_mock.return_value.returncode = 0
yield popen_mock
@pytest.fixture
def mock_env_copy():
"""Mock os.environ.copy."""
with patch("homeassistant.util.package.os.environ.copy") as env_copy:
env_copy.return_value = {}
yield env_copy
@pytest.fixture
def mock_venv():
"""Mock homeassistant.util.package.is_virtual_env."""
with patch("homeassistant.util.package.is_virtual_env") as mock:
mock.return_value = True
yield mock
def mock_async_subprocess():
"""Return an async Popen mock."""
async_popen = MagicMock()
async def communicate(input=None):
"""Communicate mock."""
stdout = bytes("/deps_dir/lib_dir", "utf-8")
return (stdout, None)
async_popen.communicate = communicate
return async_popen
def test_install(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install attempt on a package that doesn't exist."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ, False)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[mock_sys.executable, "-m", "pip", "install", "--quiet", TEST_NEW_REQ],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_upgrade(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an upgrade attempt on a package."""
env = mock_env_copy()
assert package.install_package(TEST_NEW_REQ)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--upgrade",
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target."""
target = "target_folder"
env = mock_env_copy()
env["PYTHONUSERBASE"] = os.path.abspath(target)
mock_venv.return_value = False
mock_sys.platform = "linux"
args = [
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--user",
"--prefix=",
]
assert package.install_package(TEST_NEW_REQ, False, target=target)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_target_venv(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test an install with a target in a virtual environment."""
target = "target_folder"
with pytest.raises(AssertionError):
package.install_package(TEST_NEW_REQ, False, target=target)
def test_install_error(caplog, mock_sys, mock_popen, mock_venv):
"""Test an install with a target."""
caplog.set_level(logging.WARNING)
mock_popen.return_value.returncode = 1
assert not package.install_package(TEST_NEW_REQ)
assert len(caplog.records) == 1
for record in caplog.records:
assert record.levelname == "ERROR"
def test_install_constraint(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test install with constraint file on not installed package."""
env = mock_env_copy()
constraints = "constraints_file.txt"
assert package.install_package(TEST_NEW_REQ, False, constraints=constraints)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--constraint",
constraints,
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
def test_install_find_links(mock_sys, mock_popen, mock_env_copy, mock_venv):
"""Test install with find-links on not installed package."""
env = mock_env_copy()
link = "https://wheels-repository"
assert package.install_package(TEST_NEW_REQ, False, find_links=link)
assert mock_popen.call_count == 1
assert mock_popen.call_args == call(
[
mock_sys.executable,
"-m",
"pip",
"install",
"--quiet",
TEST_NEW_REQ,
"--find-links",
link,
"--prefer-binary",
],
stdin=PIPE,
stdout=PIPE,
stderr=PIPE,
env=env,
)
assert mock_popen.return_value.communicate.call_count == 1
async def test_async_get_user_site(mock_env_copy):
"""Test async get user site directory."""
deps_dir = "/deps_dir"
env = mock_env_copy()
env["PYTHONUSERBASE"] = os.path.abspath(deps_dir)
args = [sys.executable, "-m", "site", "--user-site"]
with patch(
"homeassistant.util.package.asyncio.create_subprocess_exec",
return_value=mock_async_subprocess(),
) as popen_mock:
ret = await package.async_get_user_site(deps_dir)
assert popen_mock.call_count == 1
assert popen_mock.call_args == call(
*args,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
env=env,
)
assert ret == os.path.join(deps_dir, "lib_dir")
def test_check_package_global():
"""Test for an installed package."""
installed_package = list(pkg_resources.working_set)[0].project_name
assert package.is_installed(installed_package)
def test_check_package_zip():
"""Test for an installed zip package."""
assert not package.is_installed(TEST_ZIP_REQ)
|
import flatbuffers
class Interrupt(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsInterrupt(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = Interrupt()
x.Init(buf, n + offset)
return x
# Interrupt
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# Interrupt
def Request(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint64Flags, o + self._tab.Pos)
return 0
# Interrupt
def Mode(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 1
def InterruptStart(builder): builder.StartObject(2)
def InterruptAddRequest(builder, request): builder.PrependUint64Slot(0, request, 0)
def InterruptAddMode(builder, mode): builder.PrependUint8Slot(1, mode, 1)
def InterruptEnd(builder): return builder.EndObject()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gin
from six.moves import range
import tensorflow as tf
from tensorflow.contrib.tpu.python.tpu import tpu_function
def cross_replica_concat(value, replica_id, num_replicas):
"""Reduce a concatenation of the `value` across TPU replicas.
Args:
value: Tensor to concatenate.
replica_id: Integer tensor that indicates the index of the replica.
num_replicas: Python integer, total number of replicas.
Returns:
Tensor of the same rank as value with first dimension `num_replicas`
times larger.
Raises:
ValueError: If `value` is a scalar.
"""
if value.shape.ndims < 1:
raise ValueError("Value must have at least rank 1 but got {}.".format(
value.shape.ndims))
if num_replicas <= 1:
return value
with tf.name_scope(None, "tpu_cross_replica_concat"):
# Mask is one hot encoded position of the core_index.
mask = tf.to_float(tf.equal(tf.range(num_replicas), replica_id))
# Expand dims with 1's to match rank of value.
mask = tf.reshape(mask, [num_replicas] + [1] * value.shape.ndims)
if value.dtype in {tf.bfloat16, tf.float32}:
result = mask * value
else:
result = mask * tf.to_float(value)
# Thanks to broadcasting now result is set only in the position pointed by
# replica_id, the rest of the vector is set to 0's.
# All these steps are basically implementing tf.scatter_nd which is missing
# in TPU's backend since it doesn't support sparse operations.
# Merge first 2 dimensions.
# This is equivalent to (value.shape[0].value * num_replicas).
# Using [-1] trick to support also scalar input.
result = tf.reshape(result, [-1] + result.shape.as_list()[2:])
# Each core set the "results" in position pointed by replica_id. When we now
# sum across replicas we exchange the information and fill in local 0's with
# values from other cores.
result = tf.contrib.tpu.cross_replica_sum(result)
# Now all the cores see exactly the same data.
return tf.cast(result, dtype=value.dtype)
def cross_replica_mean(inputs, group_size=None):
"""Calculates the average value of inputs tensor across TPU replicas."""
num_replicas = tpu_function.get_tpu_context().number_of_shards
if not group_size:
group_size = num_replicas
if group_size == 1:
return inputs
if group_size != num_replicas:
group_assignment = []
assert num_replicas % group_size == 0
for g in range(num_replicas // group_size):
replica_ids = [g * group_size + i for i in range(group_size)]
group_assignment.append(replica_ids)
else:
group_assignment = None
return tf.contrib.tpu.cross_replica_sum(inputs, group_assignment) / tf.cast(
group_size, inputs.dtype)
@gin.configurable(blacklist=["inputs", "axis"])
def cross_replica_moments(inputs, axis, parallel=True, group_size=None):
"""Compute mean and variance of the inputs tensor across TPU replicas.
Args:
inputs: A tensor with 2 or more dimensions.
axis: Array of ints. Axes along which to compute mean and variance.
parallel: Use E[x^2] - (E[x])^2 to compute variance. Then can be done
in parallel to computing the mean and reducing the communication overhead.
group_size: Integer, the number of replicas to compute moments arcoss.
None or 0 will use all replicas (global).
Returns:
Two tensors with mean and variance.
"""
# Compute local mean and then average across replicas.
mean = tf.math.reduce_mean(inputs, axis=axis)
mean = cross_replica_mean(mean)
if parallel:
# Compute variance using the E[x^2] - (E[x])^2 formula. This is less
# numerically stable than the E[(x-E[x])^2] formula, but allows the two
# cross-replica sums to be computed in parallel, saving communication
# overhead.
mean_of_squares = tf.reduce_mean(tf.square(inputs), axis=axis)
mean_of_squares = cross_replica_mean(mean_of_squares, group_size=group_size)
mean_squared = tf.square(mean)
variance = mean_of_squares - mean_squared
else:
variance = tf.math.reduce_mean(
tf.math.square(inputs - mean), axis=axis)
variance = cross_replica_mean(variance, group_size=group_size)
return mean, variance
|
import os
import pytest
from molecule import util
from molecule.command import prepare
@pytest.fixture
def _patched_ansible_prepare(mocker):
return mocker.patch('molecule.provisioner.ansible.Ansible.prepare')
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
def test_execute(mocker, patched_logger_info, _patched_ansible_prepare,
patched_config_validate, config_instance):
pb = os.path.join(config_instance.scenario.directory, 'prepare.yml')
util.write_file(pb, '')
p = prepare.Prepare(config_instance)
p.execute()
x = [
mocker.call("Scenario: 'default'"),
mocker.call("Action: 'prepare'"),
]
assert x == patched_logger_info.mock_calls
_patched_ansible_prepare.assert_called_once_with()
assert config_instance.state.prepared
def test_execute_skips_when_instances_already_prepared(
patched_logger_warn, _patched_ansible_prepare, config_instance):
config_instance.state.change_state('prepared', True)
p = prepare.Prepare(config_instance)
p.execute()
msg = 'Skipping, instances already prepared.'
patched_logger_warn.assert_called_once_with(msg)
assert not _patched_ansible_prepare.called
def test_execute_skips_when_playbook_not_configured(
patched_logger_warn, _patched_ansible_prepare, config_instance):
p = prepare.Prepare(config_instance)
p.execute()
msg = 'Skipping, prepare playbook not configured.'
patched_logger_warn.assert_called_once_with(msg)
assert not _patched_ansible_prepare.called
def test_execute_when_instances_already_prepared_but_force_provided(
mocker, patched_logger_warn, _patched_ansible_prepare,
config_instance):
pb = os.path.join(config_instance.scenario.directory, 'prepare.yml')
util.write_file(pb, '')
config_instance.state.change_state('prepared', True)
config_instance.command_args = {'force': True}
p = prepare.Prepare(config_instance)
p.execute()
_patched_ansible_prepare.assert_called_once_with()
|
import io
import logging
import os
import time
from PIL import Image, ImageDraw, UnidentifiedImageError
from pydoods import PyDOODS
import voluptuous as vol
from homeassistant.components.image_processing import (
CONF_CONFIDENCE,
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
PLATFORM_SCHEMA,
ImageProcessingEntity,
)
from homeassistant.const import CONF_TIMEOUT
from homeassistant.core import split_entity_id
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.util.pil import draw_box
_LOGGER = logging.getLogger(__name__)
ATTR_MATCHES = "matches"
ATTR_SUMMARY = "summary"
ATTR_TOTAL_MATCHES = "total_matches"
ATTR_PROCESS_TIME = "process_time"
CONF_URL = "url"
CONF_AUTH_KEY = "auth_key"
CONF_DETECTOR = "detector"
CONF_LABELS = "labels"
CONF_AREA = "area"
CONF_COVERS = "covers"
CONF_TOP = "top"
CONF_BOTTOM = "bottom"
CONF_RIGHT = "right"
CONF_LEFT = "left"
CONF_FILE_OUT = "file_out"
AREA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_BOTTOM, default=1): cv.small_float,
vol.Optional(CONF_LEFT, default=0): cv.small_float,
vol.Optional(CONF_RIGHT, default=1): cv.small_float,
vol.Optional(CONF_TOP, default=0): cv.small_float,
vol.Optional(CONF_COVERS, default=True): cv.boolean,
}
)
LABEL_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_AREA): AREA_SCHEMA,
vol.Optional(CONF_CONFIDENCE): vol.Range(min=0, max=100),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.string,
vol.Required(CONF_DETECTOR): cv.string,
vol.Required(CONF_TIMEOUT, default=90): cv.positive_int,
vol.Optional(CONF_AUTH_KEY, default=""): cv.string,
vol.Optional(CONF_FILE_OUT, default=[]): vol.All(cv.ensure_list, [cv.template]),
vol.Optional(CONF_CONFIDENCE, default=0.0): vol.Range(min=0, max=100),
vol.Optional(CONF_LABELS, default=[]): vol.All(
cv.ensure_list, [vol.Any(cv.string, LABEL_SCHEMA)]
),
vol.Optional(CONF_AREA): AREA_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Doods client."""
url = config[CONF_URL]
auth_key = config[CONF_AUTH_KEY]
detector_name = config[CONF_DETECTOR]
timeout = config[CONF_TIMEOUT]
doods = PyDOODS(url, auth_key, timeout)
response = doods.get_detectors()
if not isinstance(response, dict):
_LOGGER.warning("Could not connect to doods server: %s", url)
return
detector = {}
for server_detector in response["detectors"]:
if server_detector["name"] == detector_name:
detector = server_detector
break
if not detector:
_LOGGER.warning(
"Detector %s is not supported by doods server %s", detector_name, url
)
return
entities = []
for camera in config[CONF_SOURCE]:
entities.append(
Doods(
hass,
camera[CONF_ENTITY_ID],
camera.get(CONF_NAME),
doods,
detector,
config,
)
)
add_entities(entities)
class Doods(ImageProcessingEntity):
"""Doods image processing service client."""
def __init__(self, hass, camera_entity, name, doods, detector, config):
"""Initialize the DOODS entity."""
self.hass = hass
self._camera_entity = camera_entity
if name:
self._name = name
else:
name = split_entity_id(camera_entity)[1]
self._name = f"Doods {name}"
self._doods = doods
self._file_out = config[CONF_FILE_OUT]
self._detector_name = detector["name"]
# detector config and aspect ratio
self._width = None
self._height = None
self._aspect = None
if detector["width"] and detector["height"]:
self._width = detector["width"]
self._height = detector["height"]
self._aspect = self._width / self._height
# the base confidence
dconfig = {}
confidence = config[CONF_CONFIDENCE]
# handle labels and specific detection areas
labels = config[CONF_LABELS]
self._label_areas = {}
self._label_covers = {}
for label in labels:
if isinstance(label, dict):
label_name = label[CONF_NAME]
if label_name not in detector["labels"] and label_name != "*":
_LOGGER.warning("Detector does not support label %s", label_name)
continue
# If label confidence is not specified, use global confidence
label_confidence = label.get(CONF_CONFIDENCE)
if not label_confidence:
label_confidence = confidence
if label_name not in dconfig or dconfig[label_name] > label_confidence:
dconfig[label_name] = label_confidence
# Label area
label_area = label.get(CONF_AREA)
self._label_areas[label_name] = [0, 0, 1, 1]
self._label_covers[label_name] = True
if label_area:
self._label_areas[label_name] = [
label_area[CONF_TOP],
label_area[CONF_LEFT],
label_area[CONF_BOTTOM],
label_area[CONF_RIGHT],
]
self._label_covers[label_name] = label_area[CONF_COVERS]
else:
if label not in detector["labels"] and label != "*":
_LOGGER.warning("Detector does not support label %s", label)
continue
self._label_areas[label] = [0, 0, 1, 1]
self._label_covers[label] = True
if label not in dconfig or dconfig[label] > confidence:
dconfig[label] = confidence
if not dconfig:
dconfig["*"] = confidence
# Handle global detection area
self._area = [0, 0, 1, 1]
self._covers = True
area_config = config.get(CONF_AREA)
if area_config:
self._area = [
area_config[CONF_TOP],
area_config[CONF_LEFT],
area_config[CONF_BOTTOM],
area_config[CONF_RIGHT],
]
self._covers = area_config[CONF_COVERS]
template.attach(hass, self._file_out)
self._dconfig = dconfig
self._matches = {}
self._total_matches = 0
self._last_image = None
self._process_time = 0
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera_entity
@property
def name(self):
"""Return the name of the image processor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
return self._total_matches
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_MATCHES: self._matches,
ATTR_SUMMARY: {
label: len(values) for label, values in self._matches.items()
},
ATTR_TOTAL_MATCHES: self._total_matches,
ATTR_PROCESS_TIME: self._process_time,
}
def _save_image(self, image, matches, paths):
img = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
img_width, img_height = img.size
draw = ImageDraw.Draw(img)
# Draw custom global region/area
if self._area != [0, 0, 1, 1]:
draw_box(
draw, self._area, img_width, img_height, "Detection Area", (0, 255, 255)
)
for label, values in matches.items():
# Draw custom label regions/areas
if label in self._label_areas and self._label_areas[label] != [0, 0, 1, 1]:
box_label = f"{label.capitalize()} Detection Area"
draw_box(
draw,
self._label_areas[label],
img_width,
img_height,
box_label,
(0, 255, 0),
)
# Draw detected objects
for instance in values:
box_label = f'{label} {instance["score"]:.1f}%'
# Already scaled, use 1 for width and height
draw_box(
draw,
instance["box"],
img_width,
img_height,
box_label,
(255, 255, 0),
)
for path in paths:
_LOGGER.info("Saving results image to %s", path)
if not os.path.exists(os.path.dirname(path)):
os.makedirs(os.path.dirname(path), exist_ok=True)
img.save(path)
def process_image(self, image):
"""Process the image."""
try:
img = Image.open(io.BytesIO(bytearray(image))).convert("RGB")
except UnidentifiedImageError:
_LOGGER.warning("Unable to process image, bad data")
return
img_width, img_height = img.size
if self._aspect and abs((img_width / img_height) - self._aspect) > 0.1:
_LOGGER.debug(
"The image aspect: %s and the detector aspect: %s differ by more than 0.1",
(img_width / img_height),
self._aspect,
)
# Run detection
start = time.monotonic()
response = self._doods.detect(
image, dconfig=self._dconfig, detector_name=self._detector_name
)
_LOGGER.debug(
"doods detect: %s response: %s duration: %s",
self._dconfig,
response,
time.monotonic() - start,
)
matches = {}
total_matches = 0
if not response or "error" in response:
if "error" in response:
_LOGGER.error(response["error"])
self._matches = matches
self._total_matches = total_matches
self._process_time = time.monotonic() - start
return
for detection in response["detections"]:
score = detection["confidence"]
boxes = [
detection["top"],
detection["left"],
detection["bottom"],
detection["right"],
]
label = detection["label"]
# Exclude unlisted labels
if "*" not in self._dconfig and label not in self._dconfig:
continue
# Exclude matches outside global area definition
if self._covers:
if (
boxes[0] < self._area[0]
or boxes[1] < self._area[1]
or boxes[2] > self._area[2]
or boxes[3] > self._area[3]
):
continue
else:
if (
boxes[0] > self._area[2]
or boxes[1] > self._area[3]
or boxes[2] < self._area[0]
or boxes[3] < self._area[1]
):
continue
# Exclude matches outside label specific area definition
if self._label_areas.get(label):
if self._label_covers[label]:
if (
boxes[0] < self._label_areas[label][0]
or boxes[1] < self._label_areas[label][1]
or boxes[2] > self._label_areas[label][2]
or boxes[3] > self._label_areas[label][3]
):
continue
else:
if (
boxes[0] > self._label_areas[label][2]
or boxes[1] > self._label_areas[label][3]
or boxes[2] < self._label_areas[label][0]
or boxes[3] < self._label_areas[label][1]
):
continue
if label not in matches:
matches[label] = []
matches[label].append({"score": float(score), "box": boxes})
total_matches += 1
# Save Images
if total_matches and self._file_out:
paths = []
for path_template in self._file_out:
if isinstance(path_template, template.Template):
paths.append(
path_template.render(camera_entity=self._camera_entity)
)
else:
paths.append(path_template)
self._save_image(image, matches, paths)
self._matches = matches
self._total_matches = total_matches
self._process_time = time.monotonic() - start
|
from itertools import chain
from io import BytesIO
import pyparsing as pp
# TODO input and output state
try:
from lxml import etree
except ImportError:
try:
import xml.etree.ElementTree as etree
except ImportError:
# try:
# import xml.etree.cElementTree as etree
# commented out because xml.etree.cElementTree is giving errors with dictionary attributes
print("Failed to import ElementTree from any known place")
import numpy as np
from pgmpy.models import BayesianModel
from pgmpy.factors.discrete import TabularCPD, State
class XMLBIFReader(object):
"""
Base class for reading network file in XMLBIF format.
"""
def __init__(self, path=None, string=None):
"""
Initialisation of XMLBIFReader object.
Parameters
----------
path : file or str
File of XMLBIF data
File of XMLBIF data
string : str
String of XMLBIF data
Examples
--------
# xmlbif_test.xml is the file present in
# http://www.cs.cmu.edu/~fgcozman/Research/InterchangeFormat/
>>> reader = XMLBIFReader("xmlbif_test.xml")
"""
if path:
self.network = etree.ElementTree(file=path).getroot().find("NETWORK")
elif string:
self.network = etree.fromstring(string.encode("utf-8")).find("NETWORK")
else:
raise ValueError("Must specify either path or string")
self.network_name = self.network.find("NAME").text
self.variables = self.get_variables()
self.variable_parents = self.get_parents()
self.edge_list = self.get_edges()
self.variable_states = self.get_states()
self.variable_CPD = self.get_values()
self.variable_property = self.get_property()
self.state_names = self.get_states()
def get_variables(self):
"""
Returns list of variables of the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_variables()
['light-on', 'bowel-problem', 'dog-out', 'hear-bark', 'family-out']
"""
variables = [
variable.find("NAME").text for variable in self.network.findall("VARIABLE")
]
return variables
def get_edges(self):
"""
Returns the edges of the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_edges()
[['family-out', 'light-on'],
['family-out', 'dog-out'],
['bowel-problem', 'dog-out'],
['dog-out', 'hear-bark']]
"""
edge_list = [
[value, key]
for key in self.variable_parents
for value in self.variable_parents[key]
]
return edge_list
def get_states(self):
"""
Returns the states of variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_states()
{'bowel-problem': ['true', 'false'],
'dog-out': ['true', 'false'],
'family-out': ['true', 'false'],
'hear-bark': ['true', 'false'],
'light-on': ['true', 'false']}
"""
variable_states = {
variable.find("NAME").text: [
outcome.text for outcome in variable.findall("OUTCOME")
]
for variable in self.network.findall("VARIABLE")
}
return variable_states
def get_parents(self):
"""
Returns the parents of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_parents()
{'bowel-problem': [],
'dog-out': ['family-out', 'bowel-problem'],
'family-out': [],
'hear-bark': ['dog-out'],
'light-on': ['family-out']}
"""
variable_parents = {
definition.find("FOR").text: [
edge.text for edge in definition.findall("GIVEN")
]
for definition in self.network.findall("DEFINITION")
}
return variable_parents
def get_values(self):
"""
Returns the CPD of the variables present in the network
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_values()
{'bowel-problem': array([[ 0.01],
[ 0.99]]),
'dog-out': array([[ 0.99, 0.01, 0.97, 0.03],
[ 0.9 , 0.1 , 0.3 , 0.7 ]]),
'family-out': array([[ 0.15],
[ 0.85]]),
'hear-bark': array([[ 0.7 , 0.3 ],
[ 0.01, 0.99]]),
'light-on': array([[ 0.6 , 0.4 ],
[ 0.05, 0.95]])}
"""
variable_CPD = {
definition.find("FOR").text: list(map(float, table.text.split()))
for definition in self.network.findall("DEFINITION")
for table in definition.findall("TABLE")
}
for variable in variable_CPD:
arr = np.array(variable_CPD[variable])
arr = arr.reshape(
(
len(self.variable_states[variable]),
arr.size // len(self.variable_states[variable]),
),
order="F",
)
variable_CPD[variable] = arr
return variable_CPD
def get_property(self):
"""
Returns the property of the variable
Examples
--------
>>> reader = XMLBIF.XMLBIFReader("xmlbif_test.xml")
>>> reader.get_property()
{'bowel-problem': ['position = (190, 69)'],
'dog-out': ['position = (155, 165)'],
'family-out': ['position = (112, 69)'],
'hear-bark': ['position = (154, 241)'],
'light-on': ['position = (73, 165)']}
"""
variable_property = {
variable.find("NAME").text: [
property.text for property in variable.findall("PROPERTY")
]
for variable in self.network.findall("VARIABLE")
}
return variable_property
def get_model(self, state_name_type=str):
"""
Returns a Bayesian Model instance from the file/string.
Parameters
----------
state_name_type: int, str, or bool (default: str)
The data type to which to convert the state names of the variables.
Returns
-------
BayesianModel instance: The read model.
"""
model = BayesianModel()
model.add_nodes_from(self.variables)
model.add_edges_from(self.edge_list)
model.name = self.network_name
tabular_cpds = []
for var, values in self.variable_CPD.items():
evidence_card = [
len(self.variable_states[evidence_var])
for evidence_var in self.variable_parents[var]
]
cpd = TabularCPD(
var,
len(self.variable_states[var]),
values,
evidence=self.variable_parents[var],
evidence_card=evidence_card,
state_names={
var: list(map(state_name_type, self.state_names[var]))
for var in chain([var], self.variable_parents[var])
},
)
tabular_cpds.append(cpd)
model.add_cpds(*tabular_cpds)
for node, properties in self.variable_property.items():
for prop in properties:
if prop is not None:
prop_name, prop_value = map(lambda t: t.strip(), prop.split("="))
model.nodes[node][prop_name] = prop_value
return model
class XMLBIFWriter(object):
"""
Base class for writing XMLBIF network file format.
"""
def __init__(self, model, encoding="utf-8", prettyprint=True):
"""
Initialise a XMLBIFWriter object.
Parameters
----------
model: BayesianModel Instance
Model to write
encoding: str (optional)
Encoding for text data
prettyprint: Bool(optional)
Indentation in output XML if true
Examples
--------
>>> writer = XMLBIFWriter(model)
"""
if not isinstance(model, BayesianModel):
raise TypeError("model must an instance of BayesianModel")
self.model = model
self.encoding = encoding
self.prettyprint = prettyprint
self.xml = etree.Element("BIF", attrib={"VERSION": "0.3"})
self.network = etree.SubElement(self.xml, "NETWORK")
if self.model.name:
etree.SubElement(self.network, "NAME").text = self.model.name
else:
etree.SubElement(self.network, "NAME").text = "UNTITLED"
self.variables = self.get_variables()
self.states = self.get_states()
self.properties = self.get_properties()
self.definition = self.get_definition()
self.tables = self.get_values()
def __str__(self):
"""
Return the XML as string.
"""
if self.prettyprint:
self.indent(self.xml)
f = BytesIO()
et = etree.ElementTree(self.xml)
et.write(f, encoding=self.encoding, xml_declaration=True)
return f.getvalue().decode(self.encoding)
def indent(self, elem, level=0):
"""
Inplace prettyprint formatter.
"""
i = "\n" + level * " "
if len(elem):
if not elem.text or not elem.text.strip():
elem.text = i + " "
if not elem.tail or not elem.tail.strip():
elem.tail = i
for elem in elem:
self.indent(elem, level + 1)
if not elem.tail or not elem.tail.strip():
elem.tail = i
else:
if level and (not elem.tail or not elem.tail.strip()):
elem.tail = i
def get_variables(self):
"""
Add variables to XMLBIF
Return
------
dict: dict of type {variable: variable tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_variables()
{'bowel-problem': <Element VARIABLE at 0x7fe28607dd88>,
'family-out': <Element VARIABLE at 0x7fe28607de08>,
'hear-bark': <Element VARIABLE at 0x7fe28607de48>,
'dog-out': <Element VARIABLE at 0x7fe28607ddc8>,
'light-on': <Element VARIABLE at 0x7fe28607de88>}
"""
variables = self.model.nodes()
variable_tag = {}
for var in sorted(variables):
variable_tag[var] = etree.SubElement(
self.network, "VARIABLE", attrib={"TYPE": "nature"}
)
etree.SubElement(variable_tag[var], "NAME").text = var
return variable_tag
def get_states(self):
"""
Add outcome to variables of XMLBIF
Return
------
dict: dict of type {variable: outcome tags}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_states()
{'dog-out': [<Element OUTCOME at 0x7ffbabfcdec8>, <Element OUTCOME at 0x7ffbabfcdf08>],
'family-out': [<Element OUTCOME at 0x7ffbabfd4108>, <Element OUTCOME at 0x7ffbabfd4148>],
'bowel-problem': [<Element OUTCOME at 0x7ffbabfd4088>, <Element OUTCOME at 0x7ffbabfd40c8>],
'hear-bark': [<Element OUTCOME at 0x7ffbabfcdf48>, <Element OUTCOME at 0x7ffbabfcdf88>],
'light-on': [<Element OUTCOME at 0x7ffbabfcdfc8>, <Element OUTCOME at 0x7ffbabfd4048>]}
"""
outcome_tag = {}
cpds = self.model.get_cpds()
for cpd in cpds:
var = cpd.variable
outcome_tag[var] = []
if cpd.state_names is None or cpd.state_names.get(var) is None:
states = range(cpd.get_cardinality([var])[var])
else:
states = cpd.state_names[var]
for state in states:
state_tag = etree.SubElement(self.variables[var], "OUTCOME")
state_tag.text = self._make_valid_state_name(state)
outcome_tag[var].append(state_tag)
return outcome_tag
def _make_valid_state_name(self, state_name):
"""Transform the input state_name into a valid state in XMLBIF.
XMLBIF states must start with a letter an only contain letters,
numbers and underscores.
"""
# TODO: Throw a warning that the state names are going to be modified instead of silently modifying it.
s = str(state_name)
s_fixed = (
pp.CharsNotIn(pp.alphanums + "_")
.setParseAction(pp.replaceWith("_"))
.transformString(s)
)
if not s_fixed[0].isalpha():
s_fixed = s_fixed
return s_fixed
def get_properties(self):
"""
Add property to variables in XMLBIF
Return
------
dict: dict of type {variable: property tag}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_property()
{'light-on': <Element PROPERTY at 0x7f7a2ffac1c8>,
'family-out': <Element PROPERTY at 0x7f7a2ffac148>,
'hear-bark': <Element PROPERTY at 0x7f7a2ffac188>,
'bowel-problem': <Element PROPERTY at 0x7f7a2ffac0c8>,
'dog-out': <Element PROPERTY at 0x7f7a2ffac108>}
"""
variables = self.model.nodes()
property_tag = {}
for var in sorted(variables):
properties = self.model.nodes[var]
property_tag[var] = etree.SubElement(self.variables[var], "PROPERTY")
for prop, val in properties.items():
property_tag[var].text = str(prop) + " = " + str(val)
return property_tag
def get_definition(self):
"""
Add Definition to XMLBIF
Return
------
dict: dict of type {variable: definition tag}
Examples
--------
>>> writer = XMLBIFWriter(model)
>>> writer.get_definition()
{'hear-bark': <Element DEFINITION at 0x7f1d48977408>,
'family-out': <Element DEFINITION at 0x7f1d489773c8>,
'dog-out': <Element DEFINITION at 0x7f1d48977388>,
'bowel-problem': <Element DEFINITION at 0x7f1d48977348>,
'light-on': <Element DEFINITION at 0x7f1d48977448>}
"""
cpds = self.model.get_cpds()
cpds.sort(key=lambda x: x.variable)
definition_tag = {}
for cpd in cpds:
definition_tag[cpd.variable] = etree.SubElement(self.network, "DEFINITION")
etree.SubElement(definition_tag[cpd.variable], "FOR").text = cpd.variable
for parent in cpd.variables[1:]:
etree.SubElement(definition_tag[cpd.variable], "GIVEN").text = parent
return definition_tag
def get_values(self):
"""
Add Table to XMLBIF.
Return
---------------
dict: dict of type {variable: table tag}
Examples
-------
>>> writer = XMLBIFWriter(model)
>>> writer.get_values()
{'dog-out': <Element TABLE at 0x7f240726f3c8>,
'light-on': <Element TABLE at 0x7f240726f488>,
'bowel-problem': <Element TABLE at 0x7f240726f388>,
'family-out': <Element TABLE at 0x7f240726f408>,
'hear-bark': <Element TABLE at 0x7f240726f448>}
"""
cpds = self.model.get_cpds()
definition_tag = self.definition
table_tag = {}
for cpd in cpds:
table_tag[cpd.variable] = etree.SubElement(
definition_tag[cpd.variable], "TABLE"
)
table_tag[cpd.variable].text = ""
for val in cpd.get_values().ravel(order="F"):
table_tag[cpd.variable].text += str(val) + " "
return table_tag
def write_xmlbif(self, filename):
"""
Write the xml data into the file.
Parameters
----------
filename: Name of the file.
Examples
-------
>>> writer = XMLBIFWriter(model)
>>> writer.write_xmlbif(test_file)
"""
with open(filename, "w") as fout:
fout.write(self.__str__())
|
import logging
from brunt import BruntAPI
import voluptuous as vol
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_WINDOW,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
CoverEntity,
)
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
COVER_FEATURES = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
ATTR_REQUEST_POSITION = "request_position"
NOTIFICATION_ID = "brunt_notification"
NOTIFICATION_TITLE = "Brunt Cover Setup"
ATTRIBUTION = "Based on an unofficial Brunt SDK."
CLOSED_POSITION = 0
OPEN_POSITION = 100
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the brunt platform."""
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
bapi = BruntAPI(username=username, password=password)
try:
things = bapi.getThings()["things"]
if not things:
_LOGGER.error("No things present in account")
else:
add_entities(
[
BruntDevice(bapi, thing["NAME"], thing["thingUri"])
for thing in things
],
True,
)
except (TypeError, KeyError, NameError, ValueError) as ex:
_LOGGER.error("%s", ex)
hass.components.persistent_notification.create(
"Error: {ex}<br />You will need to restart hass after fixing.",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
class BruntDevice(CoverEntity):
"""
Representation of a Brunt cover device.
Contains the common logic for all Brunt devices.
"""
def __init__(self, bapi, name, thing_uri):
"""Init the Brunt device."""
self._bapi = bapi
self._name = name
self._thing_uri = thing_uri
self._state = {}
self._available = None
@property
def name(self):
"""Return the name of the device as reported by tellcore."""
return self._name
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._available
@property
def current_cover_position(self):
"""
Return current position of cover.
None is unknown, 0 is closed, 100 is fully open.
"""
pos = self._state.get("currentPosition")
return int(pos) if pos else None
@property
def request_cover_position(self):
"""
Return request position of cover.
The request position is the position of the last request
to Brunt, at times there is a diff of 1 to current
None is unknown, 0 is closed, 100 is fully open.
"""
pos = self._state.get("requestPosition")
return int(pos) if pos else None
@property
def move_state(self):
"""
Return current moving state of cover.
None is unknown, 0 when stopped, 1 when opening, 2 when closing
"""
mov = self._state.get("moveState")
return int(mov) if mov else None
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self.move_state == 1
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self.move_state == 2
@property
def device_state_attributes(self):
"""Return the detailed device state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_REQUEST_POSITION: self.request_cover_position,
}
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_WINDOW
@property
def supported_features(self):
"""Flag supported features."""
return COVER_FEATURES
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return self.current_cover_position == CLOSED_POSITION
def update(self):
"""Poll the current state of the device."""
try:
self._state = self._bapi.getState(thingUri=self._thing_uri).get("thing")
self._available = True
except (TypeError, KeyError, NameError, ValueError) as ex:
_LOGGER.error("%s", ex)
self._available = False
def open_cover(self, **kwargs):
"""Set the cover to the open position."""
self._bapi.changeRequestPosition(OPEN_POSITION, thingUri=self._thing_uri)
def close_cover(self, **kwargs):
"""Set the cover to the closed position."""
self._bapi.changeRequestPosition(CLOSED_POSITION, thingUri=self._thing_uri)
def set_cover_position(self, **kwargs):
"""Set the cover to a specific position."""
self._bapi.changeRequestPosition(
kwargs[ATTR_POSITION], thingUri=self._thing_uri
)
|
from flexx.util.testing import run_tests_if_main, raises
from flexx import app, event
class MyPropClass1(app.PyComponent):
foo = event.IntProp(1, settable=True)
class MyPropClass2(MyPropClass1):
def init(self, foo_val=11):
self.set_foo(foo_val)
def test_launching_with_props():
m = app.launch(MyPropClass1)
assert m.foo == 1
m.session.close()
m = app.App(MyPropClass1, foo=3).launch()
assert m.foo == 3
m.session.close()
def test_launching_with_init_args():
m = app.launch(MyPropClass2)
event.loop.iter()
assert m.foo == 11
m.session.close()
m = app.App(MyPropClass2, 13).launch()
event.loop.iter()
assert m.foo == 13
m.session.close()
run_tests_if_main()
|
import asyncio
from datetime import timedelta
import logging
from aionotion import async_get_client
from aionotion.errors import InvalidCredentialsError, NotionError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import (
aiohttp_client,
config_validation as cv,
device_registry as dr,
)
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DATA_COORDINATOR, DOMAIN
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["binary_sensor", "sensor"]
ATTR_SYSTEM_MODE = "system_mode"
ATTR_SYSTEM_NAME = "system_name"
DEFAULT_ATTRIBUTION = "Data provided by Notion"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=1)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the Notion component."""
hass.data[DOMAIN] = {DATA_COORDINATOR: {}}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: conf[CONF_USERNAME],
CONF_PASSWORD: conf[CONF_PASSWORD],
},
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Set up Notion as a config entry."""
if not entry.unique_id:
hass.config_entries.async_update_entry(
entry, unique_id=entry.data[CONF_USERNAME]
)
session = aiohttp_client.async_get_clientsession(hass)
try:
client = await async_get_client(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], session
)
except InvalidCredentialsError:
_LOGGER.error("Invalid username and/or password")
return False
except NotionError as err:
_LOGGER.error("Config entry failed: %s", err)
raise ConfigEntryNotReady from err
async def async_update():
"""Get the latest data from the Notion API."""
data = {"bridges": {}, "sensors": {}, "tasks": {}}
tasks = {
"bridges": client.bridge.async_all(),
"sensors": client.sensor.async_all(),
"tasks": client.task.async_all(),
}
results = await asyncio.gather(*tasks.values(), return_exceptions=True)
for attr, result in zip(tasks, results):
if isinstance(result, NotionError):
raise UpdateFailed(
f"There was a Notion error while updating {attr}: {result}"
)
if isinstance(result, Exception):
raise UpdateFailed(
f"There was an unknown error while updating {attr}: {result}"
)
for item in result:
if attr == "bridges" and item["id"] not in data["bridges"]:
# If a new bridge is discovered, register it:
hass.async_create_task(async_register_new_bridge(hass, item, entry))
data[attr][item["id"]] = item
return data
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][
entry.entry_id
] = DataUpdateCoordinator(
hass,
_LOGGER,
name=entry.data[CONF_USERNAME],
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=async_update,
)
await coordinator.async_refresh()
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Unload a Notion config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, platform)
for platform in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(entry.entry_id)
return unload_ok
async def async_register_new_bridge(
hass: HomeAssistant, bridge: dict, entry: ConfigEntry
):
"""Register a new bridge."""
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, bridge["hardware_id"])},
manufacturer="Silicon Labs",
model=bridge["hardware_revision"],
name=bridge["name"] or bridge["id"],
sw_version=bridge["firmware_version"]["wifi"],
)
class NotionEntity(CoordinatorEntity):
"""Define a base Notion entity."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
task_id: str,
sensor_id: str,
bridge_id: str,
system_id: str,
name: str,
device_class: str,
):
"""Initialize the entity."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._bridge_id = bridge_id
self._device_class = device_class
self._name = name
self._sensor_id = sensor_id
self._state = None
self._system_id = system_id
self._task_id = task_id
@property
def available(self) -> bool:
"""Return True if entity is available."""
return (
self.coordinator.last_update_success
and self._task_id in self.coordinator.data["tasks"]
)
@property
def device_class(self) -> str:
"""Return the device class."""
return self._device_class
@property
def device_state_attributes(self) -> dict:
"""Return the state attributes."""
return self._attrs
@property
def device_info(self) -> dict:
"""Return device registry information for this entity."""
bridge = self.coordinator.data["bridges"].get(self._bridge_id, {})
sensor = self.coordinator.data["sensors"][self._sensor_id]
return {
"identifiers": {(DOMAIN, sensor["hardware_id"])},
"manufacturer": "Silicon Labs",
"model": sensor["hardware_revision"],
"name": sensor["name"],
"sw_version": sensor["firmware_version"],
"via_device": (DOMAIN, bridge.get("hardware_id")),
}
@property
def name(self) -> str:
"""Return the name of the entity."""
sensor = self.coordinator.data["sensors"][self._sensor_id]
return f'{sensor["name"]}: {self._name}'
@property
def unique_id(self) -> str:
"""Return a unique, unchanging string that represents this entity."""
task = self.coordinator.data["tasks"][self._task_id]
return f'{self._sensor_id}_{task["task_type"]}'
async def _async_update_bridge_id(self) -> None:
"""Update the entity's bridge ID if it has changed.
Sensors can move to other bridges based on signal strength, etc.
"""
sensor = self.coordinator.data["sensors"][self._sensor_id]
# If the sensor's bridge ID is the same as what we had before or if it points
# to a bridge that doesn't exist (which can happen due to a Notion API bug),
# return immediately:
if (
self._bridge_id == sensor["bridge"]["id"]
or sensor["bridge"]["id"] not in self.coordinator.data["bridges"]
):
return
self._bridge_id = sensor["bridge"]["id"]
device_registry = await dr.async_get_registry(self.hass)
bridge = self.coordinator.data["bridges"][self._bridge_id]
bridge_device = device_registry.async_get_device(
{DOMAIN: bridge["hardware_id"]}, set()
)
this_device = device_registry.async_get_device(
{DOMAIN: sensor["hardware_id"]}, set()
)
device_registry.async_update_device(
this_device.id, via_device_id=bridge_device.id
)
@callback
def _async_update_from_latest_data(self) -> None:
"""Update the entity from the latest data."""
raise NotImplementedError
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
@callback
def update():
"""Update the state."""
self._async_update_from_latest_data()
self.async_write_ha_state()
self.async_on_remove(self.coordinator.async_add_listener(update))
self._async_update_from_latest_data()
|
import logging
import sys
from typing import Final
IS_DEBUG: Final[bool] = "--debug" in sys.argv
def is_debug() -> bool:
return IS_DEBUG
def debug_exc_log(lg: logging.Logger, exc: Exception, msg: str = None) -> None:
"""Logs an exception if logging is set to DEBUG level"""
if lg.getEffectiveLevel() <= logging.DEBUG:
if msg is None:
msg = f"{exc}"
lg.exception(msg, exc_info=exc)
|
import voluptuous as vol
from homeassistant.const import CONF_PLATFORM
from homeassistant.core import HassJob, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_point_in_utc_time
import homeassistant.util.dt as dt_util
# mypy: allow-untyped-defs, no-check-untyped-defs
CONF_NUMBER = "number"
CONF_HELD_MORE_THAN = "held_more_than"
CONF_HELD_LESS_THAN = "held_less_than"
TRIGGER_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "litejet",
vol.Required(CONF_NUMBER): cv.positive_int,
vol.Optional(CONF_HELD_MORE_THAN): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_HELD_LESS_THAN): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for events based on configuration."""
number = config.get(CONF_NUMBER)
held_more_than = config.get(CONF_HELD_MORE_THAN)
held_less_than = config.get(CONF_HELD_LESS_THAN)
pressed_time = None
cancel_pressed_more_than = None
job = HassJob(action)
@callback
def call_action():
"""Call action with right context."""
hass.async_run_hass_job(
job,
{
"trigger": {
CONF_PLATFORM: "litejet",
CONF_NUMBER: number,
CONF_HELD_MORE_THAN: held_more_than,
CONF_HELD_LESS_THAN: held_less_than,
"description": f"litejet switch #{number}",
}
},
)
# held_more_than and held_less_than: trigger on released (if in time range)
# held_more_than: trigger after pressed with calculation
# held_less_than: trigger on released with calculation
# neither: trigger on pressed
@callback
def pressed_more_than_satisfied(now):
"""Handle the LiteJet's switch's button pressed >= held_more_than."""
call_action()
def pressed():
"""Handle the press of the LiteJet switch's button."""
nonlocal cancel_pressed_more_than, pressed_time
nonlocal held_less_than, held_more_than
pressed_time = dt_util.utcnow()
if held_more_than is None and held_less_than is None:
hass.add_job(call_action)
if held_more_than is not None and held_less_than is None:
cancel_pressed_more_than = track_point_in_utc_time(
hass, pressed_more_than_satisfied, dt_util.utcnow() + held_more_than
)
def released():
"""Handle the release of the LiteJet switch's button."""
nonlocal cancel_pressed_more_than, pressed_time
nonlocal held_less_than, held_more_than
# pylint: disable=not-callable
if cancel_pressed_more_than is not None:
cancel_pressed_more_than()
cancel_pressed_more_than = None
held_time = dt_util.utcnow() - pressed_time
if (
held_less_than is not None
and held_time < held_less_than
and (held_more_than is None or held_time > held_more_than)
):
hass.add_job(call_action)
hass.data["litejet_system"].on_switch_pressed(number, pressed)
hass.data["litejet_system"].on_switch_released(number, released)
@callback
def async_remove():
"""Remove all subscriptions used for this trigger."""
return
return async_remove
|
from flexx import flx
# Associate bootstrap CSS with this module
url = "https://maxcdn.bootstrapcdn.com/bootstrap/4.0.0-beta.3/css/bootstrap.min.css"
flx.assets.associate_asset(__name__, url)
class Example(flx.Widget):
persons = flx.TupleProp((), doc=""" People to show cards for""")
first_name = flx.StringProp('', settable=True)
last_name = flx.StringProp('', settable=True)
@flx.action
def add_person(self, name, info):
""" Add a person to our stack.
"""
ppl = list(self.persons)
ppl.append((name, info))
self._mutate_persons(ppl)
def _button_clicked(self, *events):
self.add_person(self.first_name, self.last_name)
def _render_dom(self):
""" This function gets automatically called when needed; Flexx is aware
of what properties are used here.
"""
# Create form elements
form_nodes = [
flx.create_element('div',
{'class': 'form-group mb-2'},
flx.create_element('input',
{'class': 'form-control',
'id': 'inputFirstName',
'oninput': lambda e: self.set_first_name(e.target.value)
},
'First name'
)
),
flx.create_element('div',
{'class': 'form-group mx-sm-3 mb-2'},
flx.create_element('input',
{'class': 'form-control',
'id': 'inputLastName',
'oninput': lambda e: self.set_last_name(e.target.value)
},
'Last name'
)
),
flx.create_element('button',
{'class': 'btn btn-primary mb-2',
'onclick': self._button_clicked
},
'Submit'
),
]
# Create virtual DOM nodes for all persons. We use bootstrap cards
card_nodes = []
for name, info in self.persons:
person_node = flx.create_element('div', {'class': 'card'},
flx.create_element('div', {'class': 'card-body'},
flx.create_element('h5', {'class': 'card-title'}, name),
flx.create_element('p', {'class': 'card-text'}, info),
)
)
card_nodes.append(person_node)
# Compose finaly DOM tree
return flx.create_element('div', {},
flx.create_element('div',
{'class': 'form-inline'},
form_nodes
),
*card_nodes)
if __name__ == '__main__':
m = flx.launch(Example, 'firefox-browser')
flx.run()
|
import json
from absl import flags
from perfkitbenchmarker import container_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import azure
from perfkitbenchmarker.providers.azure import azure_network
from perfkitbenchmarker.providers.azure import service_principal
from perfkitbenchmarker.providers.azure import util
FLAGS = flags.FLAGS
class AzureContainerRegistry(container_service.BaseContainerRegistry):
"""Class for building and storing container images on Azure."""
CLOUD = azure.CLOUD
def __init__(self, registry_spec):
super(AzureContainerRegistry, self).__init__(registry_spec)
self.location = util.GetLocationFromZone(self.zone)
self.resource_group = azure_network.GetResourceGroup(self.location)
self.login_server = None
self.sku = 'Basic'
self._deleted = False
self.acr_id = None
self.service_principal = service_principal.ServicePrincipal.GetInstance()
def _Exists(self):
"""Returns True if the registry exists."""
if self._deleted:
return False
stdout, _, _ = vm_util.IssueCommand([
azure.AZURE_PATH, 'acr', 'show', '--name', self.name,
], suppress_warning=True, raise_on_failure=False)
try:
registry = json.loads(stdout)
self.login_server = registry['loginServer']
self.acr_id = registry['id']
return True
except ValueError:
return False
def _Create(self):
"""Creates the registry."""
if self._Exists():
return
vm_util.IssueCommand([
azure.AZURE_PATH, 'acr', 'create',
'--name', self.name,
'--sku', self.sku
] + self.resource_group.args)
def _Delete(self):
"""Deletes the registry."""
# This will be deleted along with the resource group
self._deleted = True
def _PostCreate(self):
"""Allow the service principle to read from the repository."""
create_role_assignment_cmd = [
azure.AZURE_PATH, 'role', 'assignment', 'create',
'--assignee', self.service_principal.app_id,
'--role', 'Reader',
'--scope', self.acr_id,
]
vm_util.IssueRetryableCommand(create_role_assignment_cmd)
def _CreateDependencies(self):
"""Creates the resource group."""
self.resource_group.Create()
self.service_principal.Create()
def _DeleteDependencies(self):
"""Deletes the resource group."""
self.resource_group.Delete()
self.service_principal.Delete()
def Login(self):
"""Logs in to the registry."""
vm_util.IssueCommand([
azure.AZURE_PATH, 'acr', 'login',
'--name', self.name,
])
def GetFullRegistryTag(self, image):
"""Gets the full tag of the image."""
full_tag = '{login_server}/{name}'.format(
login_server=self.login_server, name=image)
return full_tag
class AksCluster(container_service.KubernetesCluster):
"""Class representing an Azure Kubernetes Service cluster."""
CLOUD = azure.CLOUD
def __init__(self, spec):
"""Initializes the cluster."""
super(AksCluster, self).__init__(spec)
if util.IsZone(spec.vm_spec.zone):
raise errors.Config.InvalidValue(
'Availability zones are currently not supported by Aks Cluster')
self.location = util.GetLocationFromZone(self.zone)
self.resource_group = azure_network.GetResourceGroup(self.location)
self.name = 'pkbcluster%s' % FLAGS.run_uri
# TODO(pclay): replace with built in service principal once I figure out how
# to make it work with ACR
self.service_principal = service_principal.ServicePrincipal.GetInstance()
self.cluster_version = FLAGS.container_cluster_version
self._deleted = False
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the cluster.
Returns:
dict mapping string property key to value.
"""
result = super(AksCluster, self).GetResourceMetadata()
result['container_cluster_version'] = self.cluster_version
result['boot_disk_type'] = self.vm_config.os_disk.disk_type
result['boot_disk_size'] = self.vm_config.os_disk.disk_size
return result
# Creating an AKS cluster with a fresh service principal usually fails due
# to a race condition. Active Directory knows the service principal exists,
# but AKS does not. (https://github.com/Azure/azure-cli/issues/9585)
@vm_util.Retry()
def _Create(self):
"""Creates the AKS cluster."""
cmd = [
azure.AZURE_PATH, 'aks', 'create',
'--name', self.name,
'--node-vm-size', self.vm_config.machine_type,
'--node-count', str(self.num_nodes),
'--location', self.location,
'--dns-name-prefix', 'pkb' + FLAGS.run_uri,
'--ssh-key-value', vm_util.GetPublicKeyPath(),
'--service-principal', self.service_principal.app_id,
# TODO(pclay): avoid logging client secret
'--client-secret', self.service_principal.password,
] + self.resource_group.args
if self.vm_config.os_disk and self.vm_config.os_disk.disk_size:
cmd += ['--node-osdisk-size', str(self.vm_config.os_disk.disk_size)]
if self.cluster_version:
cmd += ['--kubernetes-version', self.cluster_version]
# TODO(pclay): expose quota and capacity errors
vm_util.IssueCommand(cmd, timeout=1800)
def _Exists(self):
"""Returns True if the cluster exists."""
if self._deleted:
return False
stdout, _, _ = vm_util.IssueCommand([
azure.AZURE_PATH, 'aks', 'show', '--name', self.name,
] + self.resource_group.args, raise_on_failure=False)
try:
json.loads(stdout)
return True
except ValueError:
return False
def _Delete(self):
"""Deletes the AKS cluster."""
# This will be deleted along with the resource group
self._deleted = True
def _PostCreate(self):
"""Tags the cluster resource group."""
super(AksCluster, self)._PostCreate()
cluster_resource_group_name = 'MC_%s_%s_%s' % (
self.resource_group.name, self.name, self.zone)
set_tags_cmd = [
azure.AZURE_PATH, 'group', 'update', '-g', cluster_resource_group_name,
'--set', util.GetTagsJson(self.resource_group.timeout_minutes)
]
vm_util.IssueCommand(set_tags_cmd)
def _IsReady(self):
"""Returns True if the cluster is ready."""
vm_util.IssueCommand([
azure.AZURE_PATH, 'aks', 'get-credentials',
'--admin',
'--name', self.name,
'--file', FLAGS.kubeconfig,
] + self.resource_group.args, suppress_warning=True)
version_cmd = [FLAGS.kubectl, '--kubeconfig', FLAGS.kubeconfig, 'version']
_, _, retcode = vm_util.IssueCommand(version_cmd, suppress_warning=True,
raise_on_failure=False)
if retcode:
return False
# POD creation will fail until the default service account is created.
get_cmd = [
FLAGS.kubectl, '--kubeconfig', FLAGS.kubeconfig,
'get', 'serviceAccounts'
]
stdout, _, _ = vm_util.IssueCommand(get_cmd)
return 'default' in stdout
def _CreateDependencies(self):
"""Creates the resource group."""
self.resource_group.Create()
self.service_principal.Create()
def _DeleteDependencies(self):
"""Deletes the resource group."""
self.resource_group.Delete()
self.service_principal.Delete()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
import six
from six.moves import range
BENCHMARK_NAME = 'bonnieplusplus'
BENCHMARK_CONFIG = """
bonnieplusplus:
description: >
Runs Bonnie++. Running this benchmark inside
a container is currently not supported,
since Docker tries to run it as root, which
is not recommended.
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
"""
LATENCY_REGEX = r'([0-9]*\.?[0-9]+)(\w+)'
# Bonnie++ result fields mapping, see man bon_csv2txt for details.
BONNIE_RESULTS_MAPPING = {
'format_version': 0,
'bonnie_version': 1,
'name': 2,
'concurrency': 3,
'seed': 4,
'file_size': 5,
'chunk_size': 6,
'putc': 7,
'putc_cpu': 8,
'put_block': 9,
'put_block_cpu': 10,
'rewrite': 11,
'rewrite_cpu': 12,
'getc': 13,
'getc_cpu': 14,
'get_block': 15,
'get_block_cpu': 16,
'seeks': 17,
'seeks_cpu': 18,
'num_files': 19,
'max_size': 20,
'min_size': 21,
'num_dirs': 22,
'file_chunk_size': 23,
'seq_create': 24,
'seq_create_cpu': 25,
'seq_stat': 26,
'seq_stat_cpu': 27,
'seq_del': 28,
'seq_del_cpu': 29,
'ran_create': 30,
'ran_create_cpu': 31,
'ran_stat': 32,
'ran_stat_cpu': 33,
'ran_del': 34,
'ran_del_cpu': 35,
'putc_latency': 36,
'put_block_latency': 37,
'rewrite_latency': 38,
'getc_latency': 39,
'get_block_latency': 40,
'seeks_latency': 41,
'seq_create_latency': 42,
'seq_stat_latency': 43,
'seq_del_latency': 44,
'ran_create_latency': 45,
'ran_stat_latency': 46,
'ran_del_latency': 47}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Install Bonnie++ on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('Bonnie++ prepare on %s', vm)
vm.Install('bonnieplusplus')
def IsValueValid(value):
"""Validate the value.
An invalid value is either an empty string or a string of multiple '+'.
Args:
value: string. The value in raw result.
Returns:
A boolean indicates if the value is valid or not.
"""
if value == '' or '+' in value:
return False
return True
def IsCpuField(field):
"""Check if the field is cpu percentage.
Args:
field: string. The name of the field.
Returns:
A boolean indicates if the field contains keyword 'cpu'.
"""
return 'cpu' in field
def IsLatencyField(field):
"""Check if the field is latency.
Args:
field: string. The name of the field.
Returns:
A boolean indicates if the field contains keyword 'latency'.
"""
return 'latency' in field
def ParseLatencyResult(result):
"""Parse latency result into value and unit.
Args:
result: string. Latency value in string format, contains value and unit.
eg. 200ms
Returns:
A tuple of value (float) and unit (string).
"""
match = regex_util.ExtractAllMatches(LATENCY_REGEX, result)[0]
return float(match[0]), match[1]
def UpdateMetadata(metadata, key, value):
"""Check if the value is valid, update metadata with the key, value pair.
Args:
metadata: dict. A dictionary of sample metadata.
key: string. Key that will be added into metadata dictionary.
value: Value that of the key.
"""
if IsValueValid(value):
metadata[key] = value
def CreateSamples(results, start_index, end_index, metadata,
field_index_mapping):
"""Create samples with data in results from start_index to end_index.
Args:
results: A list of string representing bonnie++ results.
start_index: integer. The start index in results list of the samples.
end_index: integer. The end index in results list of the samples.
metadata: dict. A dictionary of metadata added into samples.
field_index_mapping: dict. A dictionary maps field index to field names.
Returns:
A list of sample.Sample instances.
"""
samples = []
for field_index in range(start_index, end_index):
field_name = field_index_mapping[field_index]
value = results[field_index]
if not IsValueValid(value):
continue
if IsCpuField(field_name):
unit = '%s'
elif IsLatencyField(field_name):
value, unit = ParseLatencyResult(value)
else:
unit = 'K/sec'
samples.append(sample.Sample(field_name, float(value), unit, metadata))
return samples
def ParseCSVResults(results):
"""Parse csv format bonnie++ results.
Sample Results:
1.96,1.96,perfkit-7b22f510-0,1,1421800799,7423M,,,,72853,15,47358,5,,,
156821,7,537.7,10,100,,,,,49223,58,+++++,+++,54405,53,2898,97,+++++,+++,
59089,60,,512ms,670ms,,44660us,200ms,3747us,1759us,1643us,33518us,192us,
839us
Args:
results: string. Bonnie++ results.
Returns:
A list of samples in the form of 3 or 4 tuples. The tuples contain
the sample metric (string), value (float), and unit (string).
If a 4th element is included, it is a dictionary of sample
metadata.
"""
field_index_mapping = {}
for field, value in six.iteritems(BONNIE_RESULTS_MAPPING):
field_index_mapping[value] = field
results = results.split(',')
assert len(results) == len(BONNIE_RESULTS_MAPPING)
samples = []
metadata = {}
for field_index in range(BONNIE_RESULTS_MAPPING['format_version'],
BONNIE_RESULTS_MAPPING['chunk_size'] + 1):
UpdateMetadata(metadata, field_index_mapping[field_index],
results[field_index])
for field_index in range(BONNIE_RESULTS_MAPPING['num_files'],
BONNIE_RESULTS_MAPPING['file_chunk_size'] + 1):
UpdateMetadata(metadata, field_index_mapping[field_index],
results[field_index])
samples.extend(CreateSamples(results,
BONNIE_RESULTS_MAPPING['putc'],
BONNIE_RESULTS_MAPPING['num_files'],
metadata, field_index_mapping))
samples.extend(CreateSamples(results,
BONNIE_RESULTS_MAPPING['seq_create'],
BONNIE_RESULTS_MAPPING['ran_del_latency'] + 1,
metadata, field_index_mapping))
return samples
def Run(benchmark_spec):
"""Run Bonnie++ on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of samples in the form of 3 or 4 tuples. The tuples contain
the sample metric (string), value (float), and unit (string).
If a 4th element is included, it is a dictionary of sample
metadata.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('Bonnie++ running on %s', vm)
bonnie_command = ('/usr/sbin/bonnie++ -q -d %s -s %d -n 100 -f' %
(vm.GetScratchDir(),
2 * vm.total_memory_kb / 1024))
logging.info('Bonnie++ Results:')
out, _ = vm.RemoteCommand(bonnie_command, should_log=True)
return ParseCSVResults(out.strip())
def Cleanup(benchmark_spec):
"""Cleanup Bonnie++ on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
|
import locale
import shlex
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, QObject, QProcess,
QProcessEnvironment)
from qutebrowser.utils import message, log
from qutebrowser.browser import qutescheme
class GUIProcess(QObject):
"""An external process which shows notifications in the GUI.
Args:
cmd: The command which was started.
args: A list of arguments which gets passed.
verbose: Whether to show more messages.
_output_messages: Show output as messages.
_started: Whether the underlying process is started.
_proc: The underlying QProcess.
_what: What kind of thing is spawned (process/editor/userscript/...).
Used in messages.
Signals:
error/finished/started signals proxied from QProcess.
"""
error = pyqtSignal(QProcess.ProcessError)
finished = pyqtSignal(int, QProcess.ExitStatus)
started = pyqtSignal()
def __init__(self, what, *, verbose=False, additional_env=None,
output_messages=False, parent=None):
super().__init__(parent)
self._what = what
self.verbose = verbose
self._output_messages = output_messages
self._started = False
self.cmd = None
self.args = None
self._proc = QProcess(self)
self._proc.errorOccurred.connect(self._on_error)
self._proc.errorOccurred.connect(self.error)
self._proc.finished.connect(self._on_finished)
self._proc.finished.connect(self.finished)
self._proc.started.connect(self._on_started)
self._proc.started.connect(self.started)
if additional_env is not None:
procenv = QProcessEnvironment.systemEnvironment()
for k, v in additional_env.items():
procenv.insert(k, v)
self._proc.setProcessEnvironment(procenv)
@pyqtSlot()
def _on_error(self):
"""Show a message if there was an error while spawning."""
msg = self._proc.errorString()
message.error("Error while spawning {}: {}".format(self._what, msg))
@pyqtSlot(int, QProcess.ExitStatus)
def _on_finished(self, code, status):
"""Show a message when the process finished."""
self._started = False
log.procs.debug("Process finished with code {}, status {}.".format(
code, status))
encoding = locale.getpreferredencoding(do_setlocale=False)
stderr = self._proc.readAllStandardError().data().decode(
encoding, 'replace')
stdout = self._proc.readAllStandardOutput().data().decode(
encoding, 'replace')
if self._output_messages:
if stdout:
message.info(stdout.strip())
if stderr:
message.error(stderr.strip())
if status == QProcess.CrashExit:
exitinfo = "{} crashed!".format(self._what.capitalize())
message.error(exitinfo)
elif status == QProcess.NormalExit and code == 0:
exitinfo = "{} exited successfully.".format(
self._what.capitalize())
if self.verbose:
message.info(exitinfo)
else:
assert status == QProcess.NormalExit
# We call this 'status' here as it makes more sense to the user -
# it's actually 'code'.
exitinfo = ("{} exited with status {}, see :messages for "
"details.").format(self._what.capitalize(), code)
message.error(exitinfo)
if stdout:
log.procs.error("Process stdout:\n" + stdout.strip())
if stderr:
log.procs.error("Process stderr:\n" + stderr.strip())
qutescheme.spawn_output = self._spawn_format(exitinfo, stdout, stderr)
def _spawn_format(self, exitinfo, stdout, stderr):
"""Produce a formatted string for spawn output."""
stdout = (stdout or "(No output)").strip()
stderr = (stderr or "(No output)").strip()
spawn_string = ("{}\n"
"\nProcess stdout:\n {}"
"\nProcess stderr:\n {}").format(exitinfo,
stdout, stderr)
return spawn_string
@pyqtSlot()
def _on_started(self):
"""Called when the process started successfully."""
log.procs.debug("Process started.")
assert not self._started
self._started = True
def _pre_start(self, cmd, args):
"""Prepare starting of a QProcess."""
if self._started:
raise ValueError("Trying to start a running QProcess!")
self.cmd = cmd
self.args = args
fake_cmdline = ' '.join(shlex.quote(e) for e in [cmd] + list(args))
log.procs.debug("Executing: {}".format(fake_cmdline))
if self.verbose:
message.info('Executing: ' + fake_cmdline)
def start(self, cmd, args):
"""Convenience wrapper around QProcess::start."""
log.procs.debug("Starting process.")
self._pre_start(cmd, args)
self._proc.start(cmd, args)
self._proc.closeWriteChannel()
def start_detached(self, cmd, args):
"""Convenience wrapper around QProcess::startDetached."""
log.procs.debug("Starting detached.")
self._pre_start(cmd, args)
ok, _pid = self._proc.startDetached(
cmd, args, None) # type: ignore[call-arg]
if not ok:
message.error("Error while spawning {}".format(self._what))
return False
log.procs.debug("Process started.")
self._started = True
return True
def exit_status(self):
return self._proc.exitStatus()
|
import numpy as np
import unittest
from chainercv.utils import assert_is_point
from chainercv.utils import testing
def _random_visible_including_true(n):
while True:
visible = np.random.randint(0, 2, size=n).astype(np.bool)
if visible.any():
return visible
@testing.parameterize(
# no visible and size
{'point': np.random.uniform(-1, 1, size=(1, 10, 2)).astype(np.float32),
'valid': True},
{'point': [((1., 2.), (4., 8.))],
'valid': False},
{'point': np.random.uniform(-1, 1, size=(1, 10, 2)).astype(np.int32),
'valid': False},
{'point': np.random.uniform(-1, 1, size=(1, 10, 3)).astype(np.float32),
'valid': False},
# use visible, no size
{'point': np.random.uniform(-1, 1, size=(1, 10, 2)).astype(np.float32),
'visible': np.random.randint(0, 2, size=(1, 10,)).astype(np.bool),
'valid': True},
{'point': np.random.uniform(-1, 1, size=(1, 4, 2)).astype(np.float32),
'visible': [(True, True, True, True)],
'valid': False},
{'point': np.random.uniform(-1, 1, size=(1, 10, 2)).astype(np.float32),
'visible': np.random.randint(0, 2, size=(1, 10,)).astype(np.int32),
'valid': False},
{'point': np.random.uniform(-1, 1, size=(1, 10, 2)).astype(np.float32),
'visible': np.random.randint(0, 2, size=(1, 10, 2)).astype(np.bool),
'valid': False},
{'point': np.random.uniform(-1, 1, size=(1, 10, 2)).astype(np.float32),
'visible': np.random.randint(0, 2, size=(1, 9,)).astype(np.bool),
'valid': False},
# no visible, use size
{'point': np.random.uniform(0, 32, size=(1, 10, 2)).astype(np.float32),
'size': (32, 32),
'valid': True},
{'point': np.random.uniform(32, 64, size=(1, 10, 2)).astype(np.float32),
'size': (32, 32),
'valid': False},
# use visible and size
{'point': np.random.uniform(0, 32, size=(1, 10, 2)).astype(np.float32),
'visible': np.random.randint(0, 2, size=(1, 10,)).astype(np.bool),
'size': (32, 32),
'valid': True},
{'point': np.random.uniform(32, 64, size=(1, 10, 2)).astype(np.float32),
'visible': [_random_visible_including_true(10)],
'size': (32, 32),
'valid': False},
# check n_point
{'point': np.random.uniform(-1, 1, size=(1, 10, 2)).astype(np.float32),
'visible': np.random.randint(0, 2, size=(1, 10,)).astype(np.bool),
'n_point': 10,
'valid': True},
{'point': np.random.uniform(-1, 1, size=(1, 10, 2)).astype(np.float32),
'visible': np.random.randint(0, 2, size=(1, 10,)).astype(np.bool),
'n_point': 11,
'valid': False,
},
# check different instance size
{'point': np.random.uniform(-1, 1, size=(1, 10, 2)).astype(np.float32),
'visible': np.random.randint(0, 2, size=(2, 10,)).astype(np.bool),
'valid': False},
)
class TestAssertIsPoint(unittest.TestCase):
def setUp(self):
if not hasattr(self, 'visible'):
self.visible = None
if not hasattr(self, 'size'):
self.size = None
if not hasattr(self, 'n_point'):
self.n_point = None
def test_assert_is_point(self):
if self.valid:
assert_is_point(
self.point, self.visible, self.size, self.n_point)
else:
with self.assertRaises(AssertionError):
assert_is_point(
self.point, self.visible, self.size, self.n_point)
testing.run_module(__name__, __file__)
|
import logging
import math
from gensim import interfaces, matutils, utils
logger = logging.getLogger(__name__)
class LogEntropyModel(interfaces.TransformationABC):
r"""Objects of this class realize the transformation between word-document co-occurrence matrix (int)
into a locally/globally weighted matrix (positive floats).
This is done by a log entropy normalization, optionally normalizing the resulting documents to unit length.
The following formulas explain how o compute the log entropy weight for term :math:`i` in document :math:`j`:
.. math::
local\_weight_{i,j} = log(frequency_{i,j} + 1)
P_{i,j} = \frac{frequency_{i,j}}{\sum_j frequency_{i,j}}
global\_weight_i = 1 + \frac{\sum_j P_{i,j} * log(P_{i,j})}{log(number\_of\_documents + 1)}
final\_weight_{i,j} = local\_weight_{i,j} * global\_weight_i
Examples
--------
.. sourcecode:: pycon
>>> from gensim.models import LogEntropyModel
>>> from gensim.test.utils import common_texts
>>> from gensim.corpora import Dictionary
>>>
>>> dct = Dictionary(common_texts) # fit dictionary
>>> corpus = [dct.doc2bow(row) for row in common_texts] # convert to BoW format
>>> model = LogEntropyModel(corpus) # fit model
>>> vector = model[corpus[1]] # apply model to document
"""
def __init__(self, corpus, normalize=True):
"""
Parameters
----------
corpus : iterable of iterable of (int, int)
Input corpus in BoW format.
normalize : bool, optional
If True, the resulted log entropy weighted vector will be normalized to length of 1,
If False - do nothing.
"""
self.normalize = normalize
self.n_docs = 0
self.n_words = 0
self.entr = {}
if corpus is not None:
self.initialize(corpus)
def __str__(self):
return "LogEntropyModel(n_docs=%s, n_words=%s)" % (self.n_docs, self.n_words)
def initialize(self, corpus):
"""Calculates the global weighting for all terms in a given corpus and transforms the simple
count representation into the log entropy normalized space.
Parameters
----------
corpus : iterable of iterable of (int, int)
Corpus is BoW format
"""
logger.info("calculating counts")
glob_freq = {}
glob_num_words, doc_no = 0, -1
for doc_no, bow in enumerate(corpus):
if doc_no % 10000 == 0:
logger.info("PROGRESS: processing document #%i", doc_no)
glob_num_words += len(bow)
for term_id, term_count in bow:
glob_freq[term_id] = glob_freq.get(term_id, 0) + term_count
# keep some stats about the training corpus
self.n_docs = doc_no + 1
self.n_words = glob_num_words
# and finally compute the global weights
logger.info(
"calculating global log entropy weights for %i documents and %i features (%i matrix non-zeros)",
self.n_docs, len(glob_freq), self.n_words
)
logger.debug('iterating over corpus')
# initialize doc_no2 index in case corpus is empty
doc_no2 = 0
for doc_no2, bow in enumerate(corpus):
for key, freq in bow:
p = (float(freq) / glob_freq[key]) * math.log(float(freq) / glob_freq[key])
self.entr[key] = self.entr.get(key, 0.0) + p
if doc_no2 != doc_no:
raise ValueError("LogEntropyModel doesn't support generators as training data")
logger.debug('iterating over keys')
for key in self.entr:
self.entr[key] = 1 + self.entr[key] / math.log(self.n_docs + 1)
def __getitem__(self, bow):
"""Get log entropy representation of the input vector and/or corpus.
Parameters
----------
bow : list of (int, int)
Document in BoW format.
Returns
-------
list of (int, float)
Log-entropy vector for passed `bow`.
"""
# if the input vector is in fact a corpus, return a transformed corpus
is_corpus, bow = utils.is_corpus(bow)
if is_corpus:
return self._apply(bow)
# unknown (new) terms will be given zero weight (NOT infinity/huge)
vector = [
(term_id, math.log(tf + 1) * self.entr.get(term_id))
for term_id, tf in bow
if term_id in self.entr
]
if self.normalize:
vector = matutils.unitvec(vector)
return vector
|
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import docker
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cloudsuite_media_streaming'
BENCHMARK_CONFIG = """
cloudsuite_media_streaming:
description: >
Run Cloudsuite media streaming benchmark.
vm_groups:
server:
vm_spec: *default_single_core
vm_count: 1
client:
vm_spec: *default_single_core
vm_count: 1
"""
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Install docker. Pull images. Create datasets. Start Nginx server.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
server = benchmark_spec.vm_groups['server'][0]
client = benchmark_spec.vm_groups['client'][0]
def PrepareCommon(vm):
if not docker.IsInstalled(vm):
vm.Install('docker')
vm.Install('cloudsuite/media-streaming:dataset')
vm.RemoteCommand('sudo docker create --name dataset '
'cloudsuite/media-streaming:dataset')
def PrepareServer(vm):
PrepareCommon(vm)
vm.Install('cloudsuite/media-streaming:server')
vm.RemoteCommand('sudo docker run -d --name server --net host '
'--volumes-from dataset '
'cloudsuite/media-streaming:server')
def PrepareClient(vm):
PrepareCommon(vm)
vm.Install('cloudsuite/media-streaming:client')
target_arg_tuples = [(PrepareServer, [server], {}),
(PrepareClient, [client], {})]
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
def Run(benchmark_spec):
"""Run the media streaming benchmark.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
server = benchmark_spec.vm_groups['server'][0]
client = benchmark_spec.vm_groups['client'][0]
results = []
stdout, _ = client.RemoteCommand('sudo docker run --rm --name client '
'--net host --volumes-from dataset '
'cloudsuite/media-streaming:client %s'
% server.internal_ip)
match = re.search(r'^Requests: (.+)$', stdout, re.MULTILINE)
if match:
results.append(sample.Sample('Requests', float(match.group(1)), ''))
match = re.search(r'^Replies: (.+)$', stdout, re.MULTILINE)
if match:
results.append(sample.Sample('Replies', float(match.group(1)), ''))
match = re.search(r'^Reply rate: (.+)$', stdout, re.MULTILINE)
if match:
results.append(sample.Sample('Reply rate', float(match.group(1)),
'replies/s'))
match = re.search(r'^Reply time: (.+)$', stdout, re.MULTILINE)
if match:
results.append(sample.Sample('Reply time', float(match.group(1)), 'ms'))
match = re.search(r'^Net I/O: (.+)$', stdout, re.MULTILINE)
if match:
results.append(sample.Sample('Net I/O', float(match.group(1)), 'KB/s'))
return results
def Cleanup(benchmark_spec):
"""Stop and remove docker containers. Remove images.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
server = benchmark_spec.vm_groups['server'][0]
client = benchmark_spec.vm_groups['client'][0]
def CleanupCommon(vm):
vm.RemoteCommand('sudo docker rm -v dataset')
def CleanupServer(vm):
server.RemoteCommand('sudo docker stop server')
server.RemoteCommand('sudo docker rm server')
CleanupCommon(vm)
def CleanupClient(vm):
CleanupCommon(vm)
target_arg_tuples = [(CleanupServer, [server], {}),
(CleanupClient, [client], {})]
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
|
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import docker
FLAGS = flags.FLAGS
flags.DEFINE_string('cloudsuite_web_search_server_heap_size',
'3g',
'Java heap size for Solr server in the usual java format.')
flags.DEFINE_integer('cloudsuite_web_search_ramp_up',
90,
'Benchmark ramp up time in seconds.',
lower_bound=1)
flags.DEFINE_integer('cloudsuite_web_search_ramp_down',
60,
'Benchmark ramp down time in seconds.',
lower_bound=1)
flags.DEFINE_integer('cloudsuite_web_search_steady_state',
60,
'Benchmark steady state time in seconds.',
lower_bound=1)
flags.DEFINE_integer('cloudsuite_web_search_scale',
50,
'Number of simulated web search users.',
lower_bound=1)
BENCHMARK_NAME = 'cloudsuite_web_search'
BENCHMARK_CONFIG = """
cloudsuite_web_search:
description: >
Run Cloudsuite Web Search benchmark. Specify the number of
clients with --num_vms.
vm_groups:
servers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
clients:
vm_spec: *default_single_core
vm_count: 1
"""
DISK_PATH = '/scratch'
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['num_vms'].present:
config['vm_groups']['clients']['vm_count'] = FLAGS.num_vms
return config
def Prepare(benchmark_spec):
"""Install docker. Pull the required images from DockerHub.
Start Solr index node and client.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
servers = benchmark_spec.vm_groups['servers'][0]
clients = benchmark_spec.vm_groups['clients']
def PrepareCommon(vm):
if not docker.IsInstalled(vm):
vm.Install('docker')
def PrepareServer(vm):
PrepareCommon(vm)
server_cmd = ('sudo echo \'DOCKER_OPTS="-g %s"\''
'| sudo tee /etc/default/docker > /dev/null' % (DISK_PATH))
stdout, _ = vm.RemoteCommand(server_cmd, should_log=True)
server_cmd = 'sudo service docker restart'
stdout, _ = vm.RemoteCommand(server_cmd, should_log=True)
vm.Install('cloudsuite/web-search:server')
server_cmd = ('sudo docker run -d --net host '
'--name server cloudsuite/web-search:server %s 1' %
(FLAGS.cloudsuite_web_search_server_heap_size))
stdout, _ = servers.RemoteCommand(server_cmd, should_log=True)
def PrepareClient(vm):
PrepareCommon(vm)
vm.Install('cloudsuite/web-search:client')
PrepareServer(servers)
target_arg_tuples = ([(PrepareClient, [vm], {}) for vm in clients])
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
def Run(benchmark_spec):
"""Run the Web Search benchmark.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
clients = benchmark_spec.vm_groups['clients'][0]
servers = benchmark_spec.vm_groups['servers'][0]
benchmark_cmd = ('sudo docker run --rm --net host --name client '
'cloudsuite/web-search:client %s %d %d %d %d ' %
(servers.internal_ip,
FLAGS.cloudsuite_web_search_scale,
FLAGS.cloudsuite_web_search_ramp_up,
FLAGS.cloudsuite_web_search_steady_state,
FLAGS.cloudsuite_web_search_ramp_down))
stdout, _ = clients.RemoteCommand(benchmark_cmd, should_log=True)
ops_per_sec = re.findall(r'\<metric unit="ops/sec"\>(\d+\.?\d*)', stdout)
num_ops_per_sec = float(ops_per_sec[0])
p90 = re.findall(r'\<p90th\>(\d+\.?\d*)', stdout)
num_p90 = float(p90[0])
p99 = re.findall(r'\<p99th\>(\d+\.?\d*)', stdout)
num_p99 = float(p99[0])
results = []
results.append(sample.Sample('Operations per second', num_ops_per_sec,
'ops/s'))
results.append(sample.Sample('90th percentile latency', num_p90, 's'))
results.append(sample.Sample('99th percentile latency', num_p99, 's'))
return results
def Cleanup(benchmark_spec):
"""Stop and remove docker containers. Remove images.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
servers = benchmark_spec.vm_groups['servers'][0]
clients = benchmark_spec.vm_groups['clients']
def CleanupClient(vm):
vm.RemoteCommand('sudo docker stop client')
vm.RemoteCommand('sudo docker rm client')
def CleanupServer(vm):
vm.RemoteCommand('sudo docker stop server')
vm.RemoteCommand('sudo docker rm server')
target_arg_tuples = ([(CleanupClient, [vm], {}) for vm in clients] +
[(CleanupServer, [servers], {})])
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
|
import os
import sys
import six
IN_PYTHONISTA = sys.executable.find('Pythonista') >= 0
ON_TRAVIS = "TRAVIS" in os.environ
# ========================== PYTHONISTA =======================
if IN_PYTHONISTA:
# ------------- clipboard --------------
import clipboard
def clipboard_get():
"""
Get the clipboard content.
:return: clipboard content
:rtype: six.text_type
"""
return clipboard.get()
def clipboard_set(s):
"""
Set the clipboard content.
:param s: string to set
:type s: six.text_type
"""
# TODO: non-unicode support
assert isinstance(s, six.text_type)
clipboard.set(s)
# -------------- pip ----------------------
if six.PY3:
SITE_PACKAGES_DIR_NAME = "site-packages-3"
else:
SITE_PACKAGES_DIR_NAME = "site-packages-2"
SITE_PACKAGES_DIR_NAME_6 = "site-packages"
SITE_PACKAGES_FOLDER = os.path.expanduser('~/Documents/{}'.format(SITE_PACKAGES_DIR_NAME))
SITE_PACKAGES_FOLDER_6 = os.path.expanduser('~/Documents/{}'.format(SITE_PACKAGES_DIR_NAME_6))
BUNDLED_MODULES = [
'bottle',
'beautifulsoup4',
'pycrypto',
'py-dateutil',
'dropbox',
'ecdsa',
'evernote',
'Faker',
'feedparser',
'flask',
'html2text',
'html5lib',
'httplib2',
'itsdangerous',
'jedi',
'jinja2',
'markdown',
'markdown2',
'matplotlib',
'mechanize',
'midiutil',
'mpmath',
'numpy',
'oauth2',
'paramiko',
'parsedatetime',
'Pillow',
'pycparser',
'pyflakes',
'pygments',
'pyparsing',
'PyPDF2',
'pytz',
'qrcode',
'reportlab',
'requests',
'simpy',
'six',
'sqlalchemy',
'pysqlite',
'sympy',
'thrift',
'werkzeug',
'wsgiref',
'pisa',
'xmltodict',
'PyYAML',
]
# -------------- open in / quicklook ----------------------
import console
from objc_util import on_main_thread
@on_main_thread
def open_in(path):
"""
Open a file in another application.
If possible, let the user decide the application
:param path: path to file
:type path: str
"""
console.open_in(path)
@on_main_thread
def quicklook(path):
"""
Show a preview of the file.
:param path: path to file
:type path: str
"""
console.quicklook(path)
# ======================== DEFAULT / PC / travis =========================
else:
# ------------- clipboard --------------
# travis is a variation of PC
if not ON_TRAVIS:
# use pyperclip
import pyperclip
def clipboard_get():
"""
Get the clipboard content.
:return: clipboard content
:rtype: six.text_type
"""
return pyperclip.paste()
def clipboard_set(s):
"""
Set the clipboard content.
:param s: string to set
:type s: six.text_type
"""
# TODO: non-unicode support
assert isinstance(s, six.text_type)
pyperclip.copy(s)
else:
# use fake implementation
global _CLIPBOARD; _CLIPBOARD = u""
def clipboard_get():
"""
Get the clipboard content.
:return: clipboard content
:rtype: six.text_type
"""
return _CLIPBOARD
def clipboard_set(s):
"""
Set the clipboard content.
:param s: string to set
:type s: six.text_type
"""
global _CLIPBOARD
assert isinstance(s, six.text_type)
_CLIPBOARD = s
# -------------- pip ----------------------
import site
try:
SITE_PACKAGES_FOLDER = site.getsitepackages()[0]
except AttributeError:
# site.getsitepackages() unavalaible in virtualenv
import stash
SITE_PACKAGES_FOLDER = os.path.dirname(stash.__path__[0])
SITE_PACKAGES_FOLDER_6 = None
BUNDLED_MODULES = [
'six',
]
# -------------- open in / quicklook ----------------------
import webbrowser
def open_in(path):
"""
Open a file in another application.
If possible, let the user decide the application
:param path: path to file
:type path: str
"""
webbrowser.open(path, new=1)
def quicklook(path):
"""
Show a preview of the file.
:param path: path to file
:type path: str
"""
webbrowser.open(path, new=1)
|
import io
import json.decoder
import os
import sys
import shutil
import subprocess
import time
import requests
import pygments
from pygments.lexers import PythonLexer
from pygments.formatters import TerminalFormatter
from nikola.plugin_categories import Command
from nikola import utils
LOGGER = utils.get_logger('plugin')
class CommandPlugin(Command):
"""Manage plugins."""
json = None
name = "plugin"
doc_usage = "[-u url] [--user] [-i name] [-r name] [--upgrade] [-l] [--list-installed]"
doc_purpose = "manage plugins"
output_dir = None
needs_config = False
cmd_options = [
{
'name': 'install',
'short': 'i',
'long': 'install',
'type': str,
'default': '',
'help': 'Install a plugin.',
},
{
'name': 'uninstall',
'long': 'uninstall',
'short': 'r',
'type': str,
'default': '',
'help': 'Uninstall a plugin.'
},
{
'name': 'list',
'short': 'l',
'long': 'list',
'type': bool,
'default': False,
'help': 'Show list of available plugins.'
},
{
'name': 'url',
'short': 'u',
'long': 'url',
'type': str,
'help': "URL for the plugin repository",
'default': 'https://plugins.getnikola.com/v8/plugins.json'
},
{
'name': 'user',
'long': 'user',
'type': bool,
'help': "Install user-wide, available for all sites.",
'default': False
},
{
'name': 'upgrade',
'long': 'upgrade',
'type': bool,
'help': "Upgrade all installed plugins.",
'default': False
},
{
'name': 'list_installed',
'long': 'list-installed',
'type': bool,
'help': "List the installed plugins with their location.",
'default': False
},
]
def _execute(self, options, args):
"""Install plugin into current site."""
url = options['url']
user_mode = options['user']
# See the "mode" we need to operate in
install = options.get('install')
uninstall = options.get('uninstall')
upgrade = options.get('upgrade')
list_available = options.get('list')
list_installed = options.get('list_installed')
show_install_notes = options.get('show_install_notes', True)
command_count = [bool(x) for x in (
install,
uninstall,
upgrade,
list_available,
list_installed)].count(True)
if command_count > 1 or command_count == 0:
print(self.help())
return 2
if options.get('output_dir') is not None:
self.output_dir = options.get('output_dir')
else:
if not self.site.configured and not user_mode and install:
LOGGER.warning('No site found, assuming --user')
user_mode = True
if user_mode:
self.output_dir = os.path.expanduser(os.path.join('~', '.nikola', 'plugins'))
else:
self.output_dir = 'plugins'
if list_available:
return self.list_available(url)
elif list_installed:
return self.list_installed()
elif upgrade:
return self.do_upgrade(url)
elif uninstall:
return self.do_uninstall(uninstall)
elif install:
return self.do_install(url, install, show_install_notes)
def list_available(self, url):
"""List all available plugins."""
data = self.get_json(url)
print("Available Plugins:")
print("------------------")
for plugin in sorted(data.keys()):
print(plugin)
return 0
def list_installed(self):
"""List installed plugins."""
plugins = []
for plugin in self.site.plugin_manager.getAllPlugins():
p = plugin.path
if os.path.isdir(p):
p = p + os.sep
else:
p = p + '.py'
plugins.append([plugin.name, p])
plugins.sort()
print('Installed Plugins:')
print('------------------')
maxlength = max(len(i[0]) for i in plugins)
if self.site.colorful:
formatstring = '\x1b[1m{0:<{2}}\x1b[0m at {1}'
else:
formatstring = '{0:<{2}} at {1}'
for name, path in plugins:
print(formatstring.format(name, path, maxlength))
dp = self.site.config['DISABLED_PLUGINS']
if dp:
print('\n\nAlso, you have disabled these plugins: {}'.format(', '.join(dp)))
else:
print('\n\nNo plugins are disabled.')
return 0
def do_upgrade(self, url):
"""Upgrade all installed plugins."""
LOGGER.warning('This is not very smart, it just reinstalls some plugins and hopes for the best')
data = self.get_json(url)
plugins = []
for plugin in self.site.plugin_manager.getAllPlugins():
p = plugin.path
if os.path.isdir(p):
p = p + os.sep
else:
p = p + '.py'
if plugin.name in data:
plugins.append([plugin.name, p])
print('Will upgrade {0} plugins: {1}'.format(len(plugins), ', '.join(n for n, _ in plugins)))
for name, path in plugins:
print('Upgrading {0}'.format(name))
p = path
while True:
tail, head = os.path.split(path)
if head == 'plugins':
self.output_dir = path
break
elif tail == '':
LOGGER.error("Can't find the plugins folder for path: {0}".format(p))
return 1
else:
path = tail
self.do_install(url, name)
return 0
def do_install(self, url, name, show_install_notes=True):
"""Download and install a plugin."""
data = self.get_json(url)
if name in data:
utils.makedirs(self.output_dir)
url = data[name]
LOGGER.info("Downloading '{0}'".format(url))
try:
zip_data = requests.get(url).content
except requests.exceptions.SSLError:
LOGGER.warning("SSL error, using http instead of https (press ^C to abort)")
time.sleep(1)
url = url.replace('https', 'http', 1)
zip_data = requests.get(url).content
zip_file = io.BytesIO()
zip_file.write(zip_data)
LOGGER.info('Extracting: {0} into {1}/'.format(name, self.output_dir))
utils.extract_all(zip_file, self.output_dir)
dest_path = os.path.join(self.output_dir, name)
else:
LOGGER.error("Can't find plugin " + name)
return 1
reqpath = os.path.join(dest_path, 'requirements.txt')
if os.path.exists(reqpath):
LOGGER.warning('This plugin has Python dependencies.')
LOGGER.info('Installing dependencies with pip...')
try:
subprocess.check_call((sys.executable, '-m', 'pip', 'install', '-r', reqpath))
except subprocess.CalledProcessError:
LOGGER.error('Could not install the dependencies.')
print('Contents of the requirements.txt file:\n')
with io.open(reqpath, 'r', encoding='utf-8-sig') as fh:
print(utils.indent(fh.read(), 4 * ' '))
print('You have to install those yourself or through a '
'package manager.')
else:
LOGGER.info('Dependency installation succeeded.')
reqnpypath = os.path.join(dest_path, 'requirements-nonpy.txt')
if os.path.exists(reqnpypath):
LOGGER.warning('This plugin has third-party '
'dependencies you need to install '
'manually.')
print('Contents of the requirements-nonpy.txt file:\n')
with io.open(reqnpypath, 'r', encoding='utf-8-sig') as fh:
for l in fh.readlines():
i, j = l.split('::')
print(utils.indent(i.strip(), 4 * ' '))
print(utils.indent(j.strip(), 8 * ' '))
print()
print('You have to install those yourself or through a package '
'manager.')
req_plug_path = os.path.join(dest_path, 'requirements-plugins.txt')
if os.path.exists(req_plug_path):
LOGGER.info('This plugin requires other Nikola plugins.')
LOGGER.info('Installing plugins...')
plugin_failure = False
try:
with io.open(req_plug_path, 'r', encoding='utf-8-sig') as inf:
for plugname in inf.readlines():
plugin_failure = self.do_install(url, plugname.strip(), show_install_notes) != 0
except Exception:
plugin_failure = True
if plugin_failure:
LOGGER.error('Could not install a plugin.')
print('Contents of the requirements-plugins.txt file:\n')
with io.open(req_plug_path, 'r', encoding='utf-8-sig') as fh:
print(utils.indent(fh.read(), 4 * ' '))
print('You have to install those yourself manually.')
else:
LOGGER.info('Dependency installation succeeded.')
confpypath = os.path.join(dest_path, 'conf.py.sample')
if os.path.exists(confpypath) and show_install_notes:
LOGGER.warning('This plugin has a sample config file. Integrate it with yours in order to make this plugin work!')
print('Contents of the conf.py.sample file:\n')
with io.open(confpypath, 'r', encoding='utf-8-sig') as fh:
if self.site.colorful:
print(pygments.highlight(fh.read(), PythonLexer(), TerminalFormatter()))
else:
print(fh.read())
return 0
def do_uninstall(self, name):
"""Uninstall a plugin."""
for plugin in self.site.plugin_manager.getAllPlugins(): # FIXME: this is repeated thrice
if name == plugin.name: # Uninstall this one
p = plugin.path
if os.path.isdir(p):
# Plugins that have a package in them need to delete parent
# Issue #2356
p = p + os.sep
p = os.path.abspath(os.path.join(p, os.pardir))
else:
p = os.path.dirname(p)
LOGGER.warning('About to uninstall plugin: {0}'.format(name))
LOGGER.warning('This will delete {0}'.format(p))
sure = utils.ask_yesno('Are you sure?')
if sure:
LOGGER.warning('Removing {0}'.format(p))
shutil.rmtree(p)
return 0
return 1
LOGGER.error('Unknown plugin: {0}'.format(name))
return 1
def get_json(self, url):
"""Download the JSON file with all plugins."""
if self.json is None:
try:
try:
self.json = requests.get(url).json()
except requests.exceptions.SSLError:
LOGGER.warning("SSL error, using http instead of https (press ^C to abort)")
time.sleep(1)
url = url.replace('https', 'http', 1)
self.json = requests.get(url).json()
except json.decoder.JSONDecodeError as e:
LOGGER.error("Failed to decode JSON data in response from server.")
LOGGER.error("JSON error encountered: " + str(e))
LOGGER.error("This issue might be caused by server-side issues, or by to unusual activity in your "
"network (as determined by CloudFlare). Please visit https://plugins.getnikola.com/ in "
"a browser.")
sys.exit(2)
return self.json
|
from unittest import TestCase
import numpy as np
from scattertext import TermDocMatrixFromScikit
from scattertext.indexstore import IndexStore
from scattertext.test.test_semioticSquare import get_docs_categories_semiotic
class TestTermDocMatrixFromScikit(TestCase):
def test_build(self):
from sklearn.feature_extraction.text import CountVectorizer
categories, docs = get_docs_categories_semiotic()
idx_store = IndexStore()
y = np.array([idx_store.getidx(c) for c in categories])
count_vectorizer = CountVectorizer()
X_counts = count_vectorizer.fit_transform(docs)
term_doc_mat = TermDocMatrixFromScikit(
X=X_counts,
y=y,
feature_vocabulary=count_vectorizer.vocabulary_,
category_names=idx_store.values()).build()
self.assertEqual(term_doc_mat.get_categories()[:2], ['hamlet', 'jay-z/r. kelly'])
self.assertEqual(term_doc_mat
.get_term_freq_df()
.assign(score=term_doc_mat.get_scaled_f_scores('hamlet'))
.sort_values(by='score', ascending=False).index.tolist()[:5],
['that', 'march', 'did', 'majesty', 'sometimes'])
|
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.auth.models import RefreshToken, User
from homeassistant.components.http.ban import process_success_login, process_wrong_login
from homeassistant.const import __version__
from .connection import ActiveConnection
from .error import Disconnect
# mypy: allow-untyped-calls, allow-untyped-defs
TYPE_AUTH = "auth"
TYPE_AUTH_INVALID = "auth_invalid"
TYPE_AUTH_OK = "auth_ok"
TYPE_AUTH_REQUIRED = "auth_required"
AUTH_MESSAGE_SCHEMA = vol.Schema(
{
vol.Required("type"): TYPE_AUTH,
vol.Exclusive("api_password", "auth"): str,
vol.Exclusive("access_token", "auth"): str,
}
)
def auth_ok_message():
"""Return an auth_ok message."""
return {"type": TYPE_AUTH_OK, "ha_version": __version__}
def auth_required_message():
"""Return an auth_required message."""
return {"type": TYPE_AUTH_REQUIRED, "ha_version": __version__}
def auth_invalid_message(message):
"""Return an auth_invalid message."""
return {"type": TYPE_AUTH_INVALID, "message": message}
class AuthPhase:
"""Connection that requires client to authenticate first."""
def __init__(self, logger, hass, send_message, request):
"""Initialize the authentiated connection."""
self._hass = hass
self._send_message = send_message
self._logger = logger
self._request = request
self._authenticated = False
self._connection = None
async def async_handle(self, msg):
"""Handle authentication."""
try:
msg = AUTH_MESSAGE_SCHEMA(msg)
except vol.Invalid as err:
error_msg = (
f"Auth message incorrectly formatted: {humanize_error(msg, err)}"
)
self._logger.warning(error_msg)
self._send_message(auth_invalid_message(error_msg))
raise Disconnect from err
if "access_token" in msg:
self._logger.debug("Received access_token")
refresh_token = await self._hass.auth.async_validate_access_token(
msg["access_token"]
)
if refresh_token is not None:
return await self._async_finish_auth(refresh_token.user, refresh_token)
self._send_message(auth_invalid_message("Invalid access token or password"))
await process_wrong_login(self._request)
raise Disconnect
async def _async_finish_auth(
self, user: User, refresh_token: RefreshToken
) -> ActiveConnection:
"""Create an active connection."""
self._logger.debug("Auth OK")
await process_success_login(self._request)
self._send_message(auth_ok_message())
return ActiveConnection(
self._logger, self._hass, self._send_message, user, refresh_token
)
|
import sys, re, string, time, copy, gc
from itertools import *
import time
try:
izip
except NameError:
izip = zip # Py3
def exec_(code, glob):
if sys.version_info[0] >= 3:
exec(code, glob)
else:
exec("exec code in glob")
TREE_FACTOR = 1 # increase tree size with '-l / '-L' cmd option
_TEXT = "some ASCII text" * TREE_FACTOR
_UTEXT = u"some klingon: \F8D2" * TREE_FACTOR
_ATTRIBUTES = {
'{attr}test1' : _TEXT,
'{attr}test2' : _TEXT,
'bla1' : _TEXT,
'bla2' : _TEXT,
'bla3' : _TEXT
}
def initArgs(argv):
global TREE_FACTOR
try:
argv.remove('-l')
# use large trees
TREE_FACTOR *= 2
except ValueError:
pass
try:
argv.remove('-L')
# use LARGE trees
TREE_FACTOR *= 2
except ValueError:
pass
############################################################
# benchmark decorators
############################################################
def with_attributes(*use_attributes):
"Decorator for benchmarks that use attributes"
vmap = {False : 0, True : 1}
values = [ vmap[bool(v)] for v in use_attributes ]
def set_value(function):
try:
function.ATTRIBUTES.update(values)
except AttributeError:
function.ATTRIBUTES = set(values)
return function
return set_value
def with_text(no_text=False, text=False, utext=False):
"Decorator for benchmarks that use text"
values = []
if no_text:
values.append(0)
if text:
values.append(1)
if utext:
values.append(2)
def set_value(function):
try:
function.TEXT.add(values)
except AttributeError:
function.TEXT = set(values)
return function
return set_value
def onlylib(*libs):
"Decorator to restrict benchmarks to specific libraries"
def set_libs(function):
if libs:
function.LIBS = libs
return function
return set_libs
def serialized(function):
"Decorator for benchmarks that require serialized XML data"
function.STRING = True
return function
def children(function):
"Decorator for benchmarks that require a list of root children"
function.CHILDREN = True
return function
def nochange(function):
"Decorator for benchmarks that do not change the XML tree"
function.NO_CHANGE = True
return function
############################################################
# benchmark baseclass
############################################################
class SkippedTest(Exception):
pass
class TreeBenchMark(object):
atoz = string.ascii_lowercase
repeat100 = range(100)
repeat500 = range(500)
repeat1000 = range(1000)
_LIB_NAME_MAP = {
'etree' : 'lxe',
'ElementTree' : 'ET',
'cElementTree' : 'cET'
}
SEARCH_TAG = "{cdefg}a00001"
def __init__(self, etree, etree_parser=None):
self.etree = etree
libname = etree.__name__.split('.')[-1]
self.lib_name = self._LIB_NAME_MAP.get(libname, libname)
if libname == 'etree':
deepcopy = copy.deepcopy
def set_property(root, fname):
xml = self._serialize_tree(root)
if etree_parser is not None:
setattr(self, fname, lambda : etree.XML(xml, etree_parser))
else:
setattr(self, fname, lambda : deepcopy(root))
setattr(self, fname + '_xml', lambda : xml)
setattr(self, fname + '_children', lambda : root[:])
else:
def set_property(root, fname):
setattr(self, fname, self.et_make_clone_factory(root))
xml = self._serialize_tree(root)
setattr(self, fname + '_xml', lambda : xml)
setattr(self, fname + '_children', lambda : root[:])
attribute_list = list(enumerate( [{}, _ATTRIBUTES] ))
text_list = list(enumerate( [None, _TEXT, _UTEXT] ))
build_name = self._tree_builder_name
self.setup_times = []
for tree in self._all_trees():
times = []
self.setup_times.append(times)
setup = getattr(self, '_setup_tree%d' % tree)
for an, attributes in attribute_list:
for tn, text in text_list:
root, t = setup(text, attributes)
times.append(t)
set_property(root, build_name(tree, tn, an))
def _tree_builder_name(self, tree, tn, an):
return '_root%d_T%d_A%d' % (tree, tn, an)
def tree_builder(self, tree, tn, an, serial, children):
name = self._tree_builder_name(tree, tn, an)
if serial:
name += '_xml'
elif children:
name += '_children'
return getattr(self, name)
def _serialize_tree(self, root):
return self.etree.tostring(root, encoding='UTF-8')
def et_make_clone_factory(self, elem):
def generate_elem(append, elem, level):
var = "e" + str(level)
arg = repr(elem.tag)
if elem.attrib:
arg += ", **%r" % elem.attrib
if level == 1:
append(" e1 = Element(%s)" % arg)
else:
append(" %s = SubElement(e%d, %s)" % (var, level-1, arg))
if elem.text:
append(" %s.text = %r" % (var, elem.text))
if elem.tail:
append(" %s.tail = %r" % (var, elem.tail))
for e in elem:
generate_elem(append, e, level+1)
# generate code for a function that creates a tree
output = ["def element_factory():"]
generate_elem(output.append, elem, 1)
output.append(" return e1")
# setup global function namespace
namespace = {
"Element" : self.etree.Element,
"SubElement" : self.etree.SubElement
}
# create function object
exec_("\n".join(output), namespace)
return namespace["element_factory"]
def _all_trees(self):
all_trees = []
for name in dir(self):
if name.startswith('_setup_tree'):
all_trees.append(int(name[11:]))
return all_trees
def _setup_tree1(self, text, attributes):
"tree with 26 2nd level and 520 * TREE_FACTOR 3rd level children"
atoz = self.atoz
SubElement = self.etree.SubElement
current_time = time.time
t = current_time()
root = self.etree.Element('{abc}rootnode')
for ch1 in atoz:
el = SubElement(root, "{abc}"+ch1*5, attributes)
el.text = text
for ch2 in atoz:
tag = "{cdefg}%s00001" % ch2
for i in range(20 * TREE_FACTOR):
SubElement(el, tag).tail = text
t = current_time() - t
return root, t
def _setup_tree2(self, text, attributes):
"tree with 520 * TREE_FACTOR 2nd level and 26 3rd level children"
atoz = self.atoz
SubElement = self.etree.SubElement
current_time = time.time
t = current_time()
root = self.etree.Element('{abc}rootnode')
for ch1 in atoz:
for i in range(20 * TREE_FACTOR):
el = SubElement(root, "{abc}"+ch1*5, attributes)
el.text = text
for ch2 in atoz:
SubElement(el, "{cdefg}%s00001" % ch2).tail = text
t = current_time() - t
return root, t
def _setup_tree3(self, text, attributes):
"tree of depth 8 + TREE_FACTOR with 3 children per node"
SubElement = self.etree.SubElement
current_time = time.time
t = current_time()
root = self.etree.Element('{abc}rootnode')
children = [root]
for i in range(6 + TREE_FACTOR):
children = [ SubElement(c, "{cdefg}a%05d" % (i%8), attributes)
for i,c in enumerate(chain(children, children, children)) ]
for child in children:
child.text = text
child.tail = text
t = current_time() - t
return root, t
def _setup_tree4(self, text, attributes):
"small tree with 26 2nd level and 2 3rd level children"
SubElement = self.etree.SubElement
current_time = time.time
t = current_time()
root = self.etree.Element('{abc}rootnode')
for ch1 in self.atoz:
el = SubElement(root, "{abc}"+ch1*5, attributes)
el.text = text
SubElement(el, "{cdefg}a00001", attributes).tail = text
SubElement(el, "{cdefg}z00000", attributes).tail = text
t = current_time() - t
return root, t
def benchmarks(self):
"""Returns a list of all benchmarks.
A benchmark is a tuple containing a method name and a list of tree
numbers. Trees are prepared by the setup function.
"""
all_trees = self._all_trees()
benchmarks = []
for name in dir(self):
if not name.startswith('bench_'):
continue
method = getattr(self, name)
if hasattr(method, 'LIBS') and self.lib_name not in method.LIBS:
method_call = None
else:
method_call = method
if method.__doc__:
tree_sets = method.__doc__.split()
else:
tree_sets = ()
if tree_sets:
tree_tuples = [list(map(int, tree_set.split(',')))
for tree_set in tree_sets]
else:
try:
arg_count = method.func_code.co_argcount - 1
except AttributeError:
try:
arg_count = method.__code__.co_argcount - 1
except AttributeError:
arg_count = 1
tree_tuples = self._permutations(all_trees, arg_count)
serialized = getattr(method, 'STRING', False)
children = getattr(method, 'CHILDREN', False)
no_change = getattr(method, 'NO_CHANGE', False)
for tree_tuple in tree_tuples:
for tn in sorted(getattr(method, 'TEXT', (0,))):
for an in sorted(getattr(method, 'ATTRIBUTES', (0,))):
benchmarks.append((name, method_call, tree_tuple,
tn, an, serialized, children,
no_change))
return benchmarks
def _permutations(self, seq, count):
def _permutations(prefix, remainder, count):
if count == 0:
return [ prefix[:] ]
count -= 1
perms = []
prefix.append(None)
for pos, el in enumerate(remainder):
new_remainder = remainder[:pos] + remainder[pos+1:]
prefix[-1] = el
perms.extend( _permutations(prefix, new_remainder, count) )
prefix.pop()
return perms
return _permutations([], seq, count)
############################################################
# Prepare and run benchmark suites
############################################################
def buildSuites(benchmark_class, etrees, selected):
benchmark_suites = list(map(benchmark_class, etrees))
# sorted by name and tree tuple
benchmarks = [ sorted(b.benchmarks()) for b in benchmark_suites ]
selected = [ re.compile(r).search for r in selected ]
if selected:
benchmarks = [ [ b for b in bs
if [ match for match in selected
if match(b[0]) ] ]
for bs in benchmarks ]
return benchmark_suites, benchmarks
def build_treeset_name(trees, tn, an, serialized, children):
text = {0:'-', 1:'S', 2:'U'}[tn]
attr = {0:'-', 1:'A'}[an]
ser = {True:'X', False:'T'}[serialized]
chd = {True:'C', False:'R'}[children]
return "%s%s%s%s T%s" % (text, attr, ser, chd, ',T'.join(map(str, trees))[:6])
def printSetupTimes(benchmark_suites):
print("Setup times for trees in seconds:")
for b in benchmark_suites:
sys.stdout.write("%-3s: " % b.lib_name)
for an in (0,1):
for tn in (0,1,2):
sys.stdout.write(' %s ' %
build_treeset_name((), tn, an, False, False)[:2])
print('')
for i, tree_times in enumerate(b.setup_times):
print(" T%d: %s" % (i+1, ' '.join("%6.4f" % t for t in tree_times)))
print('')
def runBench(suite, method_name, method_call, tree_set, tn, an,
serial, children, no_change):
if method_call is None:
raise SkippedTest
current_time = time.time
call_repeat = range(10)
tree_builders = [ suite.tree_builder(tree, tn, an, serial, children)
for tree in tree_set ]
rebuild_trees = not no_change and not serial
args = tuple([ build() for build in tree_builders ])
method_call(*args) # run once to skip setup overhead
times = []
for i in range(3):
gc.collect()
gc.disable()
t = -1
for i in call_repeat:
if rebuild_trees:
args = [ build() for build in tree_builders ]
t_one_call = current_time()
method_call(*args)
t_one_call = current_time() - t_one_call
if t < 0:
t = t_one_call
else:
t = min(t, t_one_call)
times.append(1000.0 * t)
gc.enable()
if rebuild_trees:
args = ()
args = ()
gc.collect()
return times
def runBenchmarks(benchmark_suites, benchmarks):
for bench_calls in izip(*benchmarks):
for lib, (bench, benchmark_setup) in enumerate(izip(benchmark_suites, bench_calls)):
bench_name = benchmark_setup[0]
tree_set_name = build_treeset_name(*benchmark_setup[-6:-1])
sys.stdout.write("%-3s: %-28s (%-10s) " % (
bench.lib_name, bench_name[6:34], tree_set_name))
sys.stdout.flush()
try:
result = runBench(bench, *benchmark_setup)
except SkippedTest:
print("skipped")
except KeyboardInterrupt:
print("interrupted by user")
sys.exit(1)
except Exception:
exc_type, exc_value = sys.exc_info()[:2]
print("failed: %s: %s" % (exc_type.__name__, exc_value))
exc_type = exc_value = None
else:
print("%9.4f msec/pass, best of (%s)" % (
min(result), ' '.join("%9.4f" % t for t in result)))
if len(benchmark_suites) > 1:
print('') # empty line between different benchmarks
############################################################
# Main program
############################################################
def main(benchmark_class):
import_lxml = True
callgrind_zero = False
if len(sys.argv) > 1:
try:
sys.argv.remove('-i')
# run benchmark 'inplace'
sys.path.insert(0, 'src')
except ValueError:
pass
try:
sys.argv.remove('-nolxml')
# run without lxml
import_lxml = False
except ValueError:
pass
try:
sys.argv.remove('-z')
# reset callgrind after tree setup
callgrind_zero = True
except ValueError:
pass
initArgs(sys.argv)
_etrees = []
if import_lxml:
from lxml import etree
_etrees.append(etree)
try:
sys.argv.remove('-fel')
except ValueError:
pass
else:
# use fast element creation in lxml.etree
etree.set_element_class_lookup(
etree.ElementDefaultClassLookup())
if len(sys.argv) > 1:
if '-a' in sys.argv or '-c' in sys.argv:
# 'all' or 'C-implementations' ?
try:
sys.argv.remove('-c')
except ValueError:
pass
try:
import cElementTree as cET
_etrees.append(cET)
except ImportError:
try:
import xml.etree.cElementTree as cET
_etrees.append(cET)
except ImportError:
pass
try:
# 'all' ?
sys.argv.remove('-a')
except ValueError:
pass
else:
try:
from elementtree import ElementTree as ET
_etrees.append(ET)
except ImportError:
try:
from xml.etree import ElementTree as ET
_etrees.append(ET)
except ImportError:
pass
if not _etrees:
print("No library to test. Exiting.")
sys.exit(1)
print("Preparing test suites and trees ...")
selected = set( sys.argv[1:] )
benchmark_suites, benchmarks = \
buildSuites(benchmark_class, _etrees, selected)
print("Running benchmark on", ', '.join(b.lib_name
for b in benchmark_suites))
print('')
printSetupTimes(benchmark_suites)
if callgrind_zero:
cmd = open("callgrind.cmd", 'w')
cmd.write('+Instrumentation\n')
cmd.write('Zero\n')
cmd.close()
runBenchmarks(benchmark_suites, benchmarks)
|
try:
from twisted.internet.task import react
except ImportError:
react = lambda _, _reactor=None: None
inlineCallbacks = lambda _: lambda: None
returnValue = lambda _: lambda: None
backend = 'empty'
else:
from twisted.internet.defer import inlineCallbacks
from twisted.internet.defer import returnValue
backend = 'twisted'
class Reactor(object):
fake = False
reactor = Reactor()
coroutine = inlineCallbacks
return_value = returnValue
_issync = backend == 'empty'
_isasync = not _issync
|
import datetime
import aiohttp
import pytest
from homeassistant import config_entries
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.common import MockConfigEntry, async_fire_time_changed
DUMMY_REQUEST_INFO = aiohttp.client.RequestInfo(
url="http://example.com", method="GET", headers={}, real_url="http://example.com"
)
CONNECTION_EXCEPTIONS = [
aiohttp.ClientConnectionError("Mock connection error"),
aiohttp.ClientResponseError(DUMMY_REQUEST_INFO, [], message="Mock response error"),
]
async def async_setup_test_fixture(hass, mock_get_station, initial_value):
"""Create a dummy config entry for testing polling."""
mock_get_station.return_value = initial_value
entry = MockConfigEntry(
version=1,
domain="eafm",
entry_id="VikingRecorder1234",
data={"station": "L1234"},
title="Viking Recorder",
connection_class=config_entries.CONN_CLASS_CLOUD_PUSH,
)
entry.add_to_hass(hass)
assert await async_setup_component(hass, "eafm", {})
assert entry.state == config_entries.ENTRY_STATE_LOADED
await hass.async_block_till_done()
async def poll(value):
mock_get_station.reset_mock(return_value=True, side_effect=True)
if isinstance(value, Exception):
mock_get_station.side_effect = value
else:
mock_get_station.return_value = value
next_update = dt_util.utcnow() + datetime.timedelta(60 * 15)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
return entry, poll
async def test_reading_measures_not_list(hass, mock_get_station):
"""
Test that a measure can be a dict not a list.
E.g. https://environment.data.gov.uk/flood-monitoring/id/stations/751110
"""
_ = await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": {
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
},
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
async def test_reading_no_unit(hass, mock_get_station):
"""
Test that a sensor functions even if its unit is not known.
E.g. https://environment.data.gov.uk/flood-monitoring/id/stations/L0410
"""
_ = await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
}
],
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
async def test_ignore_invalid_latest_reading(hass, mock_get_station):
"""
Test that a sensor functions even if its unit is not known.
E.g. https://environment.data.gov.uk/flood-monitoring/id/stations/L0410
"""
_ = await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": "http://environment.data.gov.uk/flood-monitoring/data/readings/L0410-level-stage-i-15_min----/2017-02-22T10-30-00Z",
"stationReference": "L0410",
},
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Other",
"latestReading": {"value": 5},
"stationReference": "L0411",
},
],
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state is None
state = hass.states.get("sensor.my_station_other_stage")
assert state.state == "5"
@pytest.mark.parametrize("exception", CONNECTION_EXCEPTIONS)
async def test_reading_unavailable(hass, mock_get_station, exception):
"""Test that a sensor is marked as unavailable if there is a connection error."""
_, poll = await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
}
],
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
await poll(exception)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "unavailable"
@pytest.mark.parametrize("exception", CONNECTION_EXCEPTIONS)
async def test_recover_from_failure(hass, mock_get_station, exception):
"""Test that a sensor recovers from failures."""
_, poll = await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
}
],
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
await poll(exception)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "unavailable"
await poll(
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 56},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
}
],
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "56"
async def test_reading_is_sampled(hass, mock_get_station):
"""Test that a sensor is added and polled."""
await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
}
],
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == "m"
async def test_multiple_readings_are_sampled(hass, mock_get_station):
"""Test that multiple sensors are added and polled."""
await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
},
{
"@id": "really-long-unique-id-2",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Second Stage",
"parameterName": "Water Level",
"latestReading": {"value": 4},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
},
],
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == "m"
state = hass.states.get("sensor.my_station_water_level_second_stage")
assert state.state == "4"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == "m"
async def test_ignore_no_latest_reading(hass, mock_get_station):
"""Test that a measure is ignored if it has no latest reading."""
await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
},
{
"@id": "really-long-unique-id-2",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Second Stage",
"parameterName": "Water Level",
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
},
],
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == "m"
state = hass.states.get("sensor.my_station_water_level_second_stage")
assert state is None
async def test_mark_existing_as_unavailable_if_no_latest(hass, mock_get_station):
"""Test that a measure is marked as unavailable if it has no latest reading."""
_, poll = await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
}
],
},
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == "m"
await poll(
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
}
],
}
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "unavailable"
await poll(
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
}
],
}
)
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
async def test_unload_entry(hass, mock_get_station):
"""Test being able to unload an entry."""
entry, _ = await async_setup_test_fixture(
hass,
mock_get_station,
{
"label": "My station",
"measures": [
{
"@id": "really-long-unique-id",
"label": "York Viking Recorder - level-stage-i-15_min----",
"qualifier": "Stage",
"parameterName": "Water Level",
"latestReading": {"value": 5},
"stationReference": "L1234",
"unit": "http://qudt.org/1.1/vocab/unit#Meter",
"unitName": "m",
}
],
},
)
# And there should be an entity
state = hass.states.get("sensor.my_station_water_level_stage")
assert state.state == "5"
assert await entry.async_unload(hass)
# And the entity should be gone
assert not hass.states.get("sensor.my_station_water_level_stage")
|
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_FORCE_UPDATE, CONF_NAME
import homeassistant.helpers.config_validation as cv
import homeassistant.helpers.event as evt
from . import CONF_ALIASES, CONF_DEVICES, RflinkDevice
CONF_OFF_DELAY = "off_delay"
DEFAULT_FORCE_UPDATE = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(
CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE
): cv.boolean,
vol.Optional(CONF_OFF_DELAY): cv.positive_int,
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
}
},
extra=vol.ALLOW_EXTRA,
)
def devices_from_config(domain_config):
"""Parse configuration and add Rflink sensor devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
device = RflinkBinarySensor(device_id, **config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink platform."""
async_add_entities(devices_from_config(config))
class RflinkBinarySensor(RflinkDevice, BinarySensorEntity):
"""Representation of an Rflink binary sensor."""
def __init__(
self, device_id, device_class=None, force_update=False, off_delay=None, **kwargs
):
"""Handle sensor specific args and super init."""
self._state = None
self._device_class = device_class
self._force_update = force_update
self._off_delay = off_delay
self._delay_listener = None
super().__init__(device_id, **kwargs)
def _handle_event(self, event):
"""Domain specific event handler."""
command = event["command"]
if command in ["on", "allon"]:
self._state = True
elif command in ["off", "alloff"]:
self._state = False
if self._state and self._off_delay is not None:
def off_delay_listener(now):
"""Switch device off after a delay."""
self._delay_listener = None
self._state = False
self.async_write_ha_state()
if self._delay_listener is not None:
self._delay_listener()
self._delay_listener = evt.async_call_later(
self.hass, self._off_delay, off_delay_listener
)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def force_update(self):
"""Force update."""
return self._force_update
|
from functools import partial
from ...utils import verbose, get_config
from ..utils import (has_dataset, _data_path, _data_path_doc,
_get_version, _version_doc)
has_spm_data = partial(has_dataset, name='spm')
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='spm',
download=download)
data_path.__doc__ = _data_path_doc.format(name='spm',
conf='MNE_DATASETS_SPM_DATA_PATH')
def get_version(): # noqa: D103
return _get_version('spm')
get_version.__doc__ = _version_doc.format(name='spm')
def _skip_spm_data():
skip_testing = (get_config('MNE_SKIP_TESTING_DATASET_TESTS', 'false') ==
'true')
skip = skip_testing or not has_spm_data()
return skip
def requires_spm_data(func):
"""Skip testing data test."""
import pytest
return pytest.mark.skipif(_skip_spm_data(),
reason='Requires spm dataset')(func)
|
import typing
import keras
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine.param_table import ParamTable
from matchzoo.engine import hyper_spaces
class ArcI(BaseModel):
"""
ArcI Model.
Examples:
>>> model = ArcI()
>>> model.params['num_blocks'] = 1
>>> model.params['left_filters'] = [32]
>>> model.params['right_filters'] = [32]
>>> model.params['left_kernel_sizes'] = [3]
>>> model.params['right_kernel_sizes'] = [3]
>>> model.params['left_pool_sizes'] = [2]
>>> model.params['right_pool_sizes'] = [4]
>>> model.params['conv_activation_func'] = 'relu'
>>> model.params['mlp_num_layers'] = 1
>>> model.params['mlp_num_units'] = 64
>>> model.params['mlp_num_fan_out'] = 32
>>> model.params['mlp_activation_func'] = 'relu'
>>> model.params['dropout_rate'] = 0.5
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls) -> ParamTable:
""":return: model default parameters."""
params = super().get_default_params(
with_embedding=True,
with_multi_layer_perceptron=True
)
params['optimizer'] = 'adam'
params.add(Param(name='num_blocks', value=1,
desc="Number of convolution blocks."))
params.add(Param(name='left_filters', value=[32],
desc="The filter size of each convolution "
"blocks for the left input."))
params.add(Param(name='left_kernel_sizes', value=[3],
desc="The kernel size of each convolution "
"blocks for the left input."))
params.add(Param(name='right_filters', value=[32],
desc="The filter size of each convolution "
"blocks for the right input."))
params.add(Param(name='right_kernel_sizes', value=[3],
desc="The kernel size of each convolution "
"blocks for the right input."))
params.add(Param(name='conv_activation_func', value='relu',
desc="The activation function in the "
"convolution layer."))
params.add(Param(name='left_pool_sizes', value=[2],
desc="The pooling size of each convolution "
"blocks for the left input."))
params.add(Param(name='right_pool_sizes', value=[2],
desc="The pooling size of each convolution "
"blocks for the right input."))
params.add(Param(
name='padding',
value='same',
hyper_space=hyper_spaces.choice(
['same', 'valid', 'causal']),
desc="The padding mode in the convolution layer. It should be one"
"of `same`, `valid`, and `causal`."
))
params.add(Param(
'dropout_rate', 0.0,
hyper_space=hyper_spaces.quniform(
low=0.0, high=0.8, q=0.01),
desc="The dropout rate."
))
return params
def build(self):
"""
Build model structure.
ArcI use Siamese arthitecture.
"""
input_left, input_right = self._make_inputs()
embedding = self._make_embedding_layer()
embed_left = embedding(input_left)
embed_right = embedding(input_right)
for i in range(self._params['num_blocks']):
embed_left = self._conv_pool_block(
embed_left,
self._params['left_filters'][i],
self._params['left_kernel_sizes'][i],
self._params['padding'],
self._params['conv_activation_func'],
self._params['left_pool_sizes'][i]
)
embed_right = self._conv_pool_block(
embed_right,
self._params['right_filters'][i],
self._params['right_kernel_sizes'][i],
self._params['padding'],
self._params['conv_activation_func'],
self._params['right_pool_sizes'][i]
)
rep_left = keras.layers.Flatten()(embed_left)
rep_right = keras.layers.Flatten()(embed_right)
concat = keras.layers.Concatenate(axis=1)([rep_left, rep_right])
dropout = keras.layers.Dropout(
rate=self._params['dropout_rate'])(concat)
mlp = self._make_multi_layer_perceptron_layer()(dropout)
inputs = [input_left, input_right]
x_out = self._make_output_layer()(mlp)
self._backend = keras.Model(inputs=inputs, outputs=x_out)
def _conv_pool_block(
self,
input_: typing.Any,
filters: int,
kernel_size: int,
padding: str,
conv_activation_func: str,
pool_size: int
) -> typing.Any:
output = keras.layers.Conv1D(
filters,
kernel_size,
padding=padding,
activation=conv_activation_func
)(input_)
output = keras.layers.MaxPooling1D(pool_size=pool_size)(output)
return output
|
from django.utils.translation import gettext_lazy as _
from weblate.formats.helpers import CONTROLCHARS
from weblate.trans.autofixes.base import AutoFix
class ReplaceTrailingDotsWithEllipsis(AutoFix):
"""Replace Trailing Dots with an Ellipsis."""
fix_id = "end-ellipsis"
name = _("Trailing ellipsis")
def fix_single_target(self, target, source, unit):
if source and source[-1] == "…" and target.endswith("..."):
return "{}…".format(target[:-3]), True
return target, False
class RemoveZeroSpace(AutoFix):
"""Remove zero width space if there is none in the source."""
fix_id = "zero-width-space"
name = _("Zero-width space")
def fix_single_target(self, target, source, unit):
if unit.translation.language.base_code == "km":
return target, False
if "\u200b" not in source and "\u200b" in target:
return target.replace("\u200b", ""), True
return target, False
class RemoveControlChars(AutoFix):
"""Remove control characters from the string."""
fix_id = "control-chars"
name = _("Control characters")
def fix_single_target(self, target, source, unit):
modified = False
for char in CONTROLCHARS:
if char not in source and char in target:
target = target.replace(char, "")
modified = True
return target, modified
|
from stash.tests.stashtest import StashTestCase, requires_network
class SelfupdateTests(StashTestCase):
"""
Tests for the 'selfupdate' command.
I have no idea how to test the actual selfupdate, so the tests
currently only test 'selfupdate --check' and 'selfupdate --help'.
"""
def test_help(self):
"""tests 'selfupdate --help'"""
output = self.run_command("selfupdate --help", exitcode=0)
self.assertIn("selfupdate", output)
self.assertIn("usage", output)
self.assertIn("-n", output)
self.assertIn("--check", output)
self.assertIn("-f", output)
self.assertIn("--force", output)
@requires_network
def test_check_no_download(self):
"""ensures 'selfupdate --check' does not download anything."""
output = self.run_command("selfupdate --check", exitcode=0)
contains_latest_version = ("Already at latest version" in output)
contains_new_version = ("New version available" in output)
assert (contains_latest_version or contains_new_version)
self.assertNotIn("Url: ", output)
self.assertNotIn("Update completed.", output)
self.assertNotIn("Failed to update. Please try again.", output)
def test_default_repo_branch(self):
"""test that selfupdate uses the correct default repo and branch"""
# network may be unavailable, but we are not interested anyway,
# so we ignore the exitcode
output = self.run_command("selfupdate --check", exitcode=None)
self.assertIn("Target: ywangd:master", output)
self.assertNotIn("Target: ywangd:dev", output)
def test_default_repo(self):
"""test that selfupdate uses the correct default repo"""
# network may be unavailable, but we are not interested anyway,
# so we ignore the exitcode
output = self.run_command("selfupdate --check dev", exitcode=None)
self.assertIn("Target: ywangd:dev", output)
self.assertNotIn("Target: ywangd:master", output)
def test_SELFUPDATE_TARGET(self):
"""test that selfupdate uses the correct default repo"""
# network may be unavailable, but we are not interested anyway,
# so we ignore the exitcode
output = self.run_command("SELFUPDATE_TARGET=ywangd:dev selfupdate --check", exitcode=None)
self.assertIn("Target: ywangd:dev", output)
self.assertNotIn("Target: ywangd:master", output)
def test_target_repo(self):
"""test that selfupdate uses the correct default repo"""
# network may be unavailable, but we are not interested anyway,
# so we ignore the exitcode
output = self.run_command("selfupdate --check bennr01:dev", exitcode=None)
self.assertIn("Target: bennr01:dev", output)
self.assertNotIn("Target: ywangd:master", output)
@requires_network
def test_version_check_outdated(self):
"""test the version check on an outdated branch."""
output = self.run_command("selfupdate --check bennr01:selfupdate_test_outdated", exitcode=0)
self.assertIn("Target: bennr01:selfupdate_test_outdated", output)
self.assertNotIn("Target: ywangd:master", output)
self.assertIn("Already at latest version", output)
self.assertNotIn("New version available", output)
self.assertNotIn("Error: ", output)
@requires_network
def test_version_check_update_available(self):
"""test the version check on an outdated branch."""
output = self.run_command("selfupdate --check bennr01:selfupdate_test_future", exitcode=0)
self.assertIn("Target: bennr01:selfupdate_test_future", output)
self.assertNotIn("Target: ywangd:master", output)
self.assertNotIn("Already at latest version", output)
self.assertIn("New version available", output)
self.assertNotIn("Error: ", output)
@requires_network
def test_version_check_does_not_exist(self):
"""test the version check on an nonexistend branch."""
output = self.run_command("selfupdate --check selfupdate_test_does_not_exist", exitcode=0)
self.assertIn("Target: ywangd:selfupdate_test_does_not_exist", output)
self.assertNotIn("Target: ywangd:master", output)
self.assertNotIn("Already at latest version", output)
self.assertNotIn("New version available", output)
self.assertIn("Error: ", output)
|
import os
import cherrypy
from cherrypy import _cpconfig, _cplogging, _cprequest, _cpwsgi, tools
from cherrypy.lib import httputil, reprconf
class Application(object):
"""A CherryPy Application.
Servers and gateways should not instantiate Request objects directly.
Instead, they should ask an Application object for a request object.
An instance of this class may also be used as a WSGI callable
(WSGI application object) for itself.
"""
root = None
"""The top-most container of page handlers for this app. Handlers should
be arranged in a hierarchy of attributes, matching the expected URI
hierarchy; the default dispatcher then searches this hierarchy for a
matching handler. When using a dispatcher other than the default,
this value may be None."""
config = {}
"""A dict of {path: pathconf} pairs, where 'pathconf' is itself a dict
of {key: value} pairs."""
namespaces = reprconf.NamespaceSet()
toolboxes = {'tools': cherrypy.tools}
log = None
"""A LogManager instance. See _cplogging."""
wsgiapp = None
"""A CPWSGIApp instance. See _cpwsgi."""
request_class = _cprequest.Request
response_class = _cprequest.Response
relative_urls = False
def __init__(self, root, script_name='', config=None):
"""Initialize Application with given root."""
self.log = _cplogging.LogManager(id(self), cherrypy.log.logger_root)
self.root = root
self.script_name = script_name
self.wsgiapp = _cpwsgi.CPWSGIApp(self)
self.namespaces = self.namespaces.copy()
self.namespaces['log'] = lambda k, v: setattr(self.log, k, v)
self.namespaces['wsgi'] = self.wsgiapp.namespace_handler
self.config = self.__class__.config.copy()
if config:
self.merge(config)
def __repr__(self):
"""Generate a representation of the Application instance."""
return '%s.%s(%r, %r)' % (self.__module__, self.__class__.__name__,
self.root, self.script_name)
script_name_doc = """The URI "mount point" for this app. A mount point
is that portion of the URI which is constant for all URIs that are
serviced by this application; it does not include scheme, host, or proxy
("virtual host") portions of the URI.
For example, if script_name is "/my/cool/app", then the URL
"http://www.example.com/my/cool/app/page1" might be handled by a
"page1" method on the root object.
The value of script_name MUST NOT end in a slash. If the script_name
refers to the root of the URI, it MUST be an empty string (not "/").
If script_name is explicitly set to None, then the script_name will be
provided for each call from request.wsgi_environ['SCRIPT_NAME'].
"""
@property
def script_name(self): # noqa: D401; irrelevant for properties
"""The URI "mount point" for this app.
A mount point is that portion of the URI which is constant for all URIs
that are serviced by this application; it does not include scheme,
host, or proxy ("virtual host") portions of the URI.
For example, if script_name is "/my/cool/app", then the URL
"http://www.example.com/my/cool/app/page1" might be handled by a
"page1" method on the root object.
The value of script_name MUST NOT end in a slash. If the script_name
refers to the root of the URI, it MUST be an empty string (not "/").
If script_name is explicitly set to None, then the script_name will be
provided for each call from request.wsgi_environ['SCRIPT_NAME'].
"""
if self._script_name is not None:
return self._script_name
# A `_script_name` with a value of None signals that the script name
# should be pulled from WSGI environ.
return cherrypy.serving.request.wsgi_environ['SCRIPT_NAME'].rstrip('/')
@script_name.setter
def script_name(self, value):
if value:
value = value.rstrip('/')
self._script_name = value
def merge(self, config):
"""Merge the given config into self.config."""
_cpconfig.merge(self.config, config)
# Handle namespaces specified in config.
self.namespaces(self.config.get('/', {}))
def find_config(self, path, key, default=None):
"""Return the most-specific value for key along path, or default."""
trail = path or '/'
while trail:
nodeconf = self.config.get(trail, {})
if key in nodeconf:
return nodeconf[key]
lastslash = trail.rfind('/')
if lastslash == -1:
break
elif lastslash == 0 and trail != '/':
trail = '/'
else:
trail = trail[:lastslash]
return default
def get_serving(self, local, remote, scheme, sproto):
"""Create and return a Request and Response object."""
req = self.request_class(local, remote, scheme, sproto)
req.app = self
for name, toolbox in self.toolboxes.items():
req.namespaces[name] = toolbox
resp = self.response_class()
cherrypy.serving.load(req, resp)
cherrypy.engine.publish('acquire_thread')
cherrypy.engine.publish('before_request')
return req, resp
def release_serving(self):
"""Release the current serving (request and response)."""
req = cherrypy.serving.request
cherrypy.engine.publish('after_request')
try:
req.close()
except Exception:
cherrypy.log(traceback=True, severity=40)
cherrypy.serving.clear()
def __call__(self, environ, start_response):
"""Call a WSGI-callable."""
return self.wsgiapp(environ, start_response)
class Tree(object):
"""A registry of CherryPy applications, mounted at diverse points.
An instance of this class may also be used as a WSGI callable
(WSGI application object), in which case it dispatches to all
mounted apps.
"""
apps = {}
"""
A dict of the form {script name: application}, where "script name"
is a string declaring the URI mount point (no trailing slash), and
"application" is an instance of cherrypy.Application (or an arbitrary
WSGI callable if you happen to be using a WSGI server)."""
def __init__(self):
"""Initialize registry Tree."""
self.apps = {}
def mount(self, root, script_name='', config=None):
"""Mount a new app from a root object, script_name, and config.
root
An instance of a "controller class" (a collection of page
handler methods) which represents the root of the application.
This may also be an Application instance, or None if using
a dispatcher other than the default.
script_name
A string containing the "mount point" of the application.
This should start with a slash, and be the path portion of the
URL at which to mount the given root. For example, if root.index()
will handle requests to "http://www.example.com:8080/dept/app1/",
then the script_name argument would be "/dept/app1".
It MUST NOT end in a slash. If the script_name refers to the
root of the URI, it MUST be an empty string (not "/").
config
A file or dict containing application config.
"""
if script_name is None:
raise TypeError(
"The 'script_name' argument may not be None. Application "
'objects may, however, possess a script_name of None (in '
'order to inpect the WSGI environ for SCRIPT_NAME upon each '
'request). You cannot mount such Applications on this Tree; '
'you must pass them to a WSGI server interface directly.')
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip('/')
if isinstance(root, Application):
app = root
if script_name != '' and script_name != app.script_name:
raise ValueError(
'Cannot specify a different script name and pass an '
'Application instance to cherrypy.mount')
script_name = app.script_name
else:
app = Application(root, script_name)
# If mounted at "", add favicon.ico
needs_favicon = (
script_name == ''
and root is not None
and not hasattr(root, 'favicon_ico')
)
if needs_favicon:
favicon = os.path.join(
os.getcwd(),
os.path.dirname(__file__),
'favicon.ico',
)
root.favicon_ico = tools.staticfile.handler(favicon)
if config:
app.merge(config)
self.apps[script_name] = app
return app
def graft(self, wsgi_callable, script_name=''):
"""Mount a wsgi callable at the given script_name."""
# Next line both 1) strips trailing slash and 2) maps "/" -> "".
script_name = script_name.rstrip('/')
self.apps[script_name] = wsgi_callable
def script_name(self, path=None):
"""Return the script_name of the app at the given path, or None.
If path is None, cherrypy.request is used.
"""
if path is None:
try:
request = cherrypy.serving.request
path = httputil.urljoin(request.script_name,
request.path_info)
except AttributeError:
return None
while True:
if path in self.apps:
return path
if path == '':
return None
# Move one node up the tree and try again.
path = path[:path.rfind('/')]
def __call__(self, environ, start_response):
"""Pre-initialize WSGI env and call WSGI-callable."""
# If you're calling this, then you're probably setting SCRIPT_NAME
# to '' (some WSGI servers always set SCRIPT_NAME to '').
# Try to look up the app using the full path.
env1x = environ
path = httputil.urljoin(env1x.get('SCRIPT_NAME', ''),
env1x.get('PATH_INFO', ''))
sn = self.script_name(path or '/')
if sn is None:
start_response('404 Not Found', [])
return []
app = self.apps[sn]
# Correct the SCRIPT_NAME and PATH_INFO environ entries.
environ = environ.copy()
environ['SCRIPT_NAME'] = sn
environ['PATH_INFO'] = path[len(sn.rstrip('/')):]
return app(environ, start_response)
|
import traceback
from typing import Any, Callable, Iterable, List, Tuple, Union
from PyQt5.QtCore import pyqtSignal, pyqtBoundSignal, QObject
from qutebrowser.utils import usertypes, log
def _log_stack(typ: str, stack: str) -> None:
"""Log the given message stacktrace.
Args:
typ: The type of the message.
stack: An optional stacktrace.
"""
lines = stack.splitlines()
stack_text = '\n'.join(line.rstrip() for line in lines)
log.message.debug("Stack for {} message:\n{}".format(typ, stack_text))
def error(message: str, *, stack: str = None, replace: bool = False) -> None:
"""Display an error message.
Args:
message: The message to show.
stack: The stack trace to show (if any).
replace: Replace existing messages which are still being shown.
"""
if stack is None:
stack = ''.join(traceback.format_stack())
typ = 'error'
else:
typ = 'error (from exception)'
_log_stack(typ, stack)
log.message.error(message)
global_bridge.show(usertypes.MessageLevel.error, message, replace)
def warning(message: str, *, replace: bool = False) -> None:
"""Display a warning message.
Args:
message: The message to show.
replace: Replace existing messages which are still being shown.
"""
_log_stack('warning', ''.join(traceback.format_stack()))
log.message.warning(message)
global_bridge.show(usertypes.MessageLevel.warning, message, replace)
def info(message: str, *, replace: bool = False) -> None:
"""Display an info message.
Args:
message: The message to show.
replace: Replace existing messages which are still being shown.
"""
log.message.info(message)
global_bridge.show(usertypes.MessageLevel.info, message, replace)
def _build_question(title: str,
text: str = None, *,
mode: usertypes.PromptMode,
default: Union[None, bool, str] = None,
abort_on: Iterable[pyqtBoundSignal] = (),
url: str = None,
option: bool = None) -> usertypes.Question:
"""Common function for ask/ask_async."""
question = usertypes.Question()
question.title = title
question.text = text
question.mode = mode
question.default = default
question.url = url
if option is not None:
if mode != usertypes.PromptMode.yesno:
raise ValueError("Can only 'option' with PromptMode.yesno")
if url is None:
raise ValueError("Need 'url' given when 'option' is given")
question.option = option
for sig in abort_on:
sig.connect(question.abort)
return question
def ask(*args: Any, **kwargs: Any) -> Any:
"""Ask a modular question in the statusbar (blocking).
Args:
message: The message to display to the user.
mode: A PromptMode.
default: The default value to display.
text: Additional text to show
option: The option for always/never question answers.
Only available with PromptMode.yesno.
abort_on: A list of signals which abort the question if emitted.
Return:
The answer the user gave or None if the prompt was cancelled.
"""
question = _build_question(*args, **kwargs) # pylint: disable=missing-kwoa
global_bridge.ask(question, blocking=True)
answer = question.answer
question.deleteLater()
return answer
def ask_async(title: str,
mode: usertypes.PromptMode,
handler: Callable[[Any], None],
**kwargs: Any) -> None:
"""Ask an async question in the statusbar.
Args:
message: The message to display to the user.
mode: A PromptMode.
handler: The function to get called with the answer as argument.
default: The default value to display.
text: Additional text to show.
"""
question = _build_question(title, mode=mode, **kwargs)
question.answered.connect(handler)
question.completed.connect(question.deleteLater)
global_bridge.ask(question, blocking=False)
_ActionType = Callable[[], Any]
def confirm_async(*, yes_action: _ActionType,
no_action: _ActionType = None,
cancel_action: _ActionType = None,
**kwargs: Any) -> usertypes.Question:
"""Ask a yes/no question to the user and execute the given actions.
Args:
message: The message to display to the user.
yes_action: Callable to be called when the user answered yes.
no_action: Callable to be called when the user answered no.
cancel_action: Callable to be called when the user cancelled the
question.
default: True/False to set a default value, or None.
option: The option for always/never question answers.
text: Additional text to show.
Return:
The question object.
"""
kwargs['mode'] = usertypes.PromptMode.yesno
question = _build_question(**kwargs) # pylint: disable=missing-kwoa
question.answered_yes.connect(yes_action)
if no_action is not None:
question.answered_no.connect(no_action)
if cancel_action is not None:
question.cancelled.connect(cancel_action)
question.completed.connect(question.deleteLater)
global_bridge.ask(question, blocking=False)
return question
class GlobalMessageBridge(QObject):
"""Global (not per-window) message bridge for errors/infos/warnings.
Attributes:
_connected: Whether a slot is connected and we can show messages.
_cache: Messages shown while we were not connected.
Signals:
show_message: Show a message
arg 0: A MessageLevel member
arg 1: The text to show
arg 2: Whether to replace other messages with
replace=True.
prompt_done: Emitted when a prompt was answered somewhere.
ask_question: Ask a question to the user.
arg 0: The Question object to ask.
arg 1: Whether to block (True) or ask async (False).
IMPORTANT: Slots need to be connected to this signal via
a Qt.DirectConnection!
mode_left: Emitted when a keymode was left in any window.
"""
show_message = pyqtSignal(usertypes.MessageLevel, str, bool)
prompt_done = pyqtSignal(usertypes.KeyMode)
ask_question = pyqtSignal(usertypes.Question, bool)
mode_left = pyqtSignal(usertypes.KeyMode)
clear_messages = pyqtSignal()
def __init__(self, parent: QObject = None) -> None:
super().__init__(parent)
self._connected = False
self._cache: List[Tuple[usertypes.MessageLevel, str, bool]] = []
def ask(self, question: usertypes.Question,
blocking: bool, *,
log_stack: bool = False) -> None:
"""Ask a question to the user.
Note this method doesn't return the answer, it only blocks. The caller
needs to construct a Question object and get the answer.
Args:
question: A Question object.
blocking: Whether to return immediately or wait until the
question is answered.
log_stack: ignored
"""
self.ask_question.emit(question, blocking)
def show(self, level: usertypes.MessageLevel,
text: str,
replace: bool = False) -> None:
"""Show the given message."""
if self._connected:
self.show_message.emit(level, text, replace)
else:
self._cache.append((level, text, replace))
def flush(self) -> None:
"""Flush messages which accumulated while no handler was connected.
This is so we don't miss messages shown during some early init phase.
It needs to be called once the show_message signal is connected.
"""
self._connected = True
for args in self._cache:
self.show(*args)
self._cache = []
global_bridge = GlobalMessageBridge()
|
import re
import os
import traceback
_desc_re = re.compile(r'\s*Description:\s*(.*)')
_expect_re = re.compile(r'\s*Expect:\s*(.*)')
_data_expect_re = re.compile(r"entries\[0\]\['[^']+'\](?:\[0\]\['value'\])?\s*==\s*(.*)")
_feed_data_expect_re = re.compile(r"feed\['[^']+'\]\s*==\s*(.*)")
def parse_content(content):
match = _desc_re.search(content)
desc = match.group(1)
match = _expect_re.search(content)
expect = match.group(1)
data = None
for regex in [_data_expect_re, _feed_data_expect_re]:
match = regex.search(expect)
if match:
# Icky, but I'll trust it
data = eval(match.group(1).strip())
break
c = None
for tag in ['content', 'summary', 'title', 'copyright', 'tagline', 'info', 'subtitle', 'fullitem', 'body', 'description', 'content:encoded']:
regex = re.compile(r"<%s.*?>(.*)</%s>" % (tag, tag), re.S)
match = regex.search(content)
if match:
c = match.group(1)
break
assert c is not None
# Seems like body isn't quoted
if tag != 'body':
c = c.replace('<', '<')
c = c.replace('&', '&')
# FIXME: I should really do more unescaping...
return {
'Description': desc,
'Expect': expect,
'data': data,
'content': c}
def serialize_content(d):
s = '''\
Description: %(Description)s
Expect: %(Expect)s
Options:
%(content)s
''' % d
if d.get('data') is not None:
s += '----------\n%s' % d['data']
return s
def translate_file(filename):
f = open(filename, 'rb')
c = f.read()
f.close()
try:
output = serialize_content(parse_content(c))
except:
print('Bad data in %s:' % filename)
print(c)
traceback.print_exc()
print('-'*60)
return
new = os.path.splitext(filename)[0] + '.data'
f = open(new, 'wb')
f.write(output)
f.close()
def translate_all(dir):
for fn in os.listdir(dir):
fn = os.path.join(dir, fn)
if fn.endswith('.xml'):
translate_file(fn)
if __name__ == '__main__':
translate_all(os.path.join(os.path.dirname(__file__), 'feedparser-data'))
|
import datetime
import io
import logging
import os
import re
import subprocess
import sys
import time
import unittest
import warnings
import contextlib
import portend
import pytest
from cheroot.test import webtest
import cherrypy
from cherrypy._cpcompat import text_or_bytes, HTTPSConnection, ntob
from cherrypy.lib import httputil
from cherrypy.lib import gctools
log = logging.getLogger(__name__)
thisdir = os.path.abspath(os.path.dirname(__file__))
serverpem = os.path.join(os.getcwd(), thisdir, 'test.pem')
class Supervisor(object):
"""Base class for modeling and controlling servers during testing."""
def __init__(self, **kwargs):
for k, v in kwargs.items():
if k == 'port':
setattr(self, k, int(v))
setattr(self, k, v)
def log_to_stderr(msg, level):
return sys.stderr.write(msg + os.linesep)
class LocalSupervisor(Supervisor):
"""Base class for modeling/controlling servers which run in the same
process.
When the server side runs in a different process, start/stop can dump all
state between each test module easily. When the server side runs in the
same process as the client, however, we have to do a bit more work to
ensure config and mounted apps are reset between tests.
"""
using_apache = False
using_wsgi = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
cherrypy.server.httpserver = self.httpserver_class
# This is perhaps the wrong place for this call but this is the only
# place that i've found so far that I KNOW is early enough to set this.
cherrypy.config.update({'log.screen': False})
engine = cherrypy.engine
if hasattr(engine, 'signal_handler'):
engine.signal_handler.subscribe()
if hasattr(engine, 'console_control_handler'):
engine.console_control_handler.subscribe()
def start(self, modulename=None):
"""Load and start the HTTP server."""
if modulename:
# Unhook httpserver so cherrypy.server.start() creates a new
# one (with config from setup_server, if declared).
cherrypy.server.httpserver = None
cherrypy.engine.start()
self.sync_apps()
def sync_apps(self):
"""Tell the server about any apps which the setup functions mounted."""
pass
def stop(self):
td = getattr(self, 'teardown', None)
if td:
td()
cherrypy.engine.exit()
for name, server in getattr(cherrypy, 'servers', {}).copy().items():
server.unsubscribe()
del cherrypy.servers[name]
class NativeServerSupervisor(LocalSupervisor):
"""Server supervisor for the builtin HTTP server."""
httpserver_class = 'cherrypy._cpnative_server.CPHTTPServer'
using_apache = False
using_wsgi = False
def __str__(self):
return 'Builtin HTTP Server on %s:%s' % (self.host, self.port)
class LocalWSGISupervisor(LocalSupervisor):
"""Server supervisor for the builtin WSGI server."""
httpserver_class = 'cherrypy._cpwsgi_server.CPWSGIServer'
using_apache = False
using_wsgi = True
def __str__(self):
return 'Builtin WSGI Server on %s:%s' % (self.host, self.port)
def sync_apps(self):
"""Hook a new WSGI app into the origin server."""
cherrypy.server.httpserver.wsgi_app = self.get_app()
def get_app(self, app=None):
"""Obtain a new (decorated) WSGI app to hook into the origin server."""
if app is None:
app = cherrypy.tree
if self.validate:
try:
from wsgiref import validate
except ImportError:
warnings.warn(
'Error importing wsgiref. The validator will not run.')
else:
# wraps the app in the validator
app = validate.validator(app)
return app
def get_cpmodpy_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_cpmodpy
return sup
def get_modpygw_supervisor(**options):
from cherrypy.test import modpy
sup = modpy.ModPythonSupervisor(**options)
sup.template = modpy.conf_modpython_gateway
sup.using_wsgi = True
return sup
def get_modwsgi_supervisor(**options):
from cherrypy.test import modwsgi
return modwsgi.ModWSGISupervisor(**options)
def get_modfcgid_supervisor(**options):
from cherrypy.test import modfcgid
return modfcgid.ModFCGISupervisor(**options)
def get_modfastcgi_supervisor(**options):
from cherrypy.test import modfastcgi
return modfastcgi.ModFCGISupervisor(**options)
def get_wsgi_u_supervisor(**options):
cherrypy.server.wsgi_version = ('u', 0)
return LocalWSGISupervisor(**options)
class CPWebCase(webtest.WebCase):
script_name = ''
scheme = 'http'
available_servers = {'wsgi': LocalWSGISupervisor,
'wsgi_u': get_wsgi_u_supervisor,
'native': NativeServerSupervisor,
'cpmodpy': get_cpmodpy_supervisor,
'modpygw': get_modpygw_supervisor,
'modwsgi': get_modwsgi_supervisor,
'modfcgid': get_modfcgid_supervisor,
'modfastcgi': get_modfastcgi_supervisor,
}
default_server = 'wsgi'
@classmethod
def _setup_server(cls, supervisor, conf):
v = sys.version.split()[0]
log.info('Python version used to run this test script: %s' % v)
log.info('CherryPy version: %s' % cherrypy.__version__)
if supervisor.scheme == 'https':
ssl = ' (ssl)'
else:
ssl = ''
log.info('HTTP server version: %s%s' % (supervisor.protocol, ssl))
log.info('PID: %s' % os.getpid())
cherrypy.server.using_apache = supervisor.using_apache
cherrypy.server.using_wsgi = supervisor.using_wsgi
if sys.platform[:4] == 'java':
cherrypy.config.update({'server.nodelay': False})
if isinstance(conf, text_or_bytes):
parser = cherrypy.lib.reprconf.Parser()
conf = parser.dict_from_file(conf).get('global', {})
else:
conf = conf or {}
baseconf = conf.copy()
baseconf.update({'server.socket_host': supervisor.host,
'server.socket_port': supervisor.port,
'server.protocol_version': supervisor.protocol,
'environment': 'test_suite',
})
if supervisor.scheme == 'https':
# baseconf['server.ssl_module'] = 'builtin'
baseconf['server.ssl_certificate'] = serverpem
baseconf['server.ssl_private_key'] = serverpem
# helper must be imported lazily so the coverage tool
# can run against module-level statements within cherrypy.
# Also, we have to do "from cherrypy.test import helper",
# exactly like each test module does, because a relative import
# would stick a second instance of webtest in sys.modules,
# and we wouldn't be able to globally override the port anymore.
if supervisor.scheme == 'https':
webtest.WebCase.HTTP_CONN = HTTPSConnection
return baseconf
@classmethod
def setup_class(cls):
''
# Creates a server
conf = {
'scheme': 'http',
'protocol': 'HTTP/1.1',
'port': 54583,
'host': '127.0.0.1',
'validate': False,
'server': 'wsgi',
}
supervisor_factory = cls.available_servers.get(
conf.get('server', 'wsgi'))
if supervisor_factory is None:
raise RuntimeError('Unknown server in config: %s' % conf['server'])
supervisor = supervisor_factory(**conf)
# Copied from "run_test_suite"
cherrypy.config.reset()
baseconf = cls._setup_server(supervisor, conf)
cherrypy.config.update(baseconf)
setup_client()
if hasattr(cls, 'setup_server'):
# Clear the cherrypy tree and clear the wsgi server so that
# it can be updated with the new root
cherrypy.tree = cherrypy._cptree.Tree()
cherrypy.server.httpserver = None
cls.setup_server()
# Add a resource for verifying there are no refleaks
# to *every* test class.
cherrypy.tree.mount(gctools.GCRoot(), '/gc')
cls.do_gc_test = True
supervisor.start(cls.__module__)
cls.supervisor = supervisor
@classmethod
def teardown_class(cls):
''
if hasattr(cls, 'setup_server'):
cls.supervisor.stop()
do_gc_test = False
def test_gc(self):
if not self.do_gc_test:
return
self.getPage('/gc/stats')
try:
self.assertBody('Statistics:')
except Exception:
'Failures occur intermittently. See #1420'
def prefix(self):
return self.script_name.rstrip('/')
def base(self):
if ((self.scheme == 'http' and self.PORT == 80) or
(self.scheme == 'https' and self.PORT == 443)):
port = ''
else:
port = ':%s' % self.PORT
return '%s://%s%s%s' % (self.scheme, self.HOST, port,
self.script_name.rstrip('/'))
def exit(self):
sys.exit()
def getPage(self, url, *args, **kwargs):
"""Open the url.
"""
if self.script_name:
url = httputil.urljoin(self.script_name, url)
return webtest.WebCase.getPage(self, url, *args, **kwargs)
def skip(self, msg='skipped '):
pytest.skip(msg)
def assertErrorPage(self, status, message=None, pattern=''):
"""Compare the response body with a built in error page.
The function will optionally look for the regexp pattern,
within the exception embedded in the error page."""
# This will never contain a traceback
page = cherrypy._cperror.get_error_page(status, message=message)
# First, test the response body without checking the traceback.
# Stick a match-all group (.*) in to grab the traceback.
def esc(text):
return re.escape(ntob(text))
epage = re.escape(page)
epage = epage.replace(
esc('<pre id="traceback"></pre>'),
esc('<pre id="traceback">') + b'(.*)' + esc('</pre>'))
m = re.match(epage, self.body, re.DOTALL)
if not m:
self._handlewebError(
'Error page does not match; expected:\n' + page)
return
# Now test the pattern against the traceback
if pattern is None:
# Special-case None to mean that there should be *no* traceback.
if m and m.group(1):
self._handlewebError('Error page contains traceback')
else:
if (m is None) or (
not re.search(ntob(re.escape(pattern), self.encoding),
m.group(1))):
msg = 'Error page does not contain %s in traceback'
self._handlewebError(msg % repr(pattern))
date_tolerance = 2
def assertEqualDates(self, dt1, dt2, seconds=None):
"""Assert abs(dt1 - dt2) is within Y seconds."""
if seconds is None:
seconds = self.date_tolerance
if dt1 > dt2:
diff = dt1 - dt2
else:
diff = dt2 - dt1
if not diff < datetime.timedelta(seconds=seconds):
raise AssertionError('%r and %r are not within %r seconds.' %
(dt1, dt2, seconds))
def _test_method_sorter(_, x, y):
"""Monkeypatch the test sorter to always run test_gc last in each suite."""
if x == 'test_gc':
return 1
if y == 'test_gc':
return -1
if x > y:
return 1
if x < y:
return -1
return 0
unittest.TestLoader.sortTestMethodsUsing = _test_method_sorter
def setup_client():
"""Set up the WebCase classes to match the server's socket settings."""
webtest.WebCase.PORT = cherrypy.server.socket_port
webtest.WebCase.HOST = cherrypy.server.socket_host
if cherrypy.server.ssl_certificate:
CPWebCase.scheme = 'https'
# --------------------------- Spawning helpers --------------------------- #
class CPProcess(object):
pid_file = os.path.join(thisdir, 'test.pid')
config_file = os.path.join(thisdir, 'test.conf')
config_template = """[global]
server.socket_host: '%(host)s'
server.socket_port: %(port)s
checker.on: False
log.screen: False
log.error_file: r'%(error_log)s'
log.access_file: r'%(access_log)s'
%(ssl)s
%(extra)s
"""
error_log = os.path.join(thisdir, 'test.error.log')
access_log = os.path.join(thisdir, 'test.access.log')
def __init__(self, wait=False, daemonize=False, ssl=False,
socket_host=None, socket_port=None):
self.wait = wait
self.daemonize = daemonize
self.ssl = ssl
self.host = socket_host or cherrypy.server.socket_host
self.port = socket_port or cherrypy.server.socket_port
def write_conf(self, extra=''):
if self.ssl:
serverpem = os.path.join(thisdir, 'test.pem')
ssl = """
server.ssl_certificate: r'%s'
server.ssl_private_key: r'%s'
""" % (serverpem, serverpem)
else:
ssl = ''
conf = self.config_template % {
'host': self.host,
'port': self.port,
'error_log': self.error_log,
'access_log': self.access_log,
'ssl': ssl,
'extra': extra,
}
with io.open(self.config_file, 'w', encoding='utf-8') as f:
f.write(str(conf))
def start(self, imports=None):
"""Start cherryd in a subprocess."""
portend.free(self.host, self.port, timeout=1)
args = [
'-m',
'cherrypy',
'-c', self.config_file,
'-p', self.pid_file,
]
r"""
Command for running cherryd server with autoreload enabled
Using
```
['-c',
"__requires__ = 'CherryPy'; \
import pkg_resources, re, sys; \
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0]); \
sys.exit(\
pkg_resources.load_entry_point(\
'CherryPy', 'console_scripts', 'cherryd')())"]
```
doesn't work as it's impossible to reconstruct the `-c`'s contents.
Ref: https://github.com/cherrypy/cherrypy/issues/1545
"""
if not isinstance(imports, (list, tuple)):
imports = [imports]
for i in imports:
if i:
args.append('-i')
args.append(i)
if self.daemonize:
args.append('-d')
env = os.environ.copy()
# Make sure we import the cherrypy package in which this module is
# defined.
grandparentdir = os.path.abspath(os.path.join(thisdir, '..', '..'))
if env.get('PYTHONPATH', ''):
env['PYTHONPATH'] = os.pathsep.join(
(grandparentdir, env['PYTHONPATH']))
else:
env['PYTHONPATH'] = grandparentdir
self._proc = subprocess.Popen([sys.executable] + args, env=env)
if self.wait:
self.exit_code = self._proc.wait()
else:
portend.occupied(self.host, self.port, timeout=5)
# Give the engine a wee bit more time to finish STARTING
if self.daemonize:
time.sleep(2)
else:
time.sleep(1)
def get_pid(self):
if self.daemonize:
return int(open(self.pid_file, 'rb').read())
return self._proc.pid
def join(self):
"""Wait for the process to exit."""
if self.daemonize:
return self._join_daemon()
self._proc.wait()
def _join_daemon(self):
with contextlib.suppress(IOError):
os.waitpid(self.get_pid(), 0)
|
import numpy as np
from ..annotations import (Annotations, _annotations_starts_stops)
from ..transforms import (quat_to_rot, _average_quats, _angle_between_quats,
apply_trans, _quat_to_affine)
from ..filter import filter_data
from .. import Transform
from ..utils import (_mask_to_onsets_offsets, logger, verbose)
@verbose
def annotate_muscle_zscore(raw, threshold=4, ch_type=None, min_length_good=0.1,
filter_freq=(110, 140), n_jobs=1, verbose=None):
"""Create annotations for segments that likely contain muscle artifacts.
Detects data segments containing activity in the frequency range given by
``filter_freq`` whose envelope magnitude exceeds the specified z-score
threshold, when summed across channels and divided by ``sqrt(n_channels)``.
False-positive transient peaks are prevented by low-pass filtering the
resulting z-score time series at 4 Hz. Only operates on a single channel
type, if ``ch_type`` is ``None`` it will select the first type in the list
``mag``, ``grad``, ``eeg``.
See :footcite:`Muthukumaraswamy2013` for background on choosing
``filter_freq`` and ``threshold``.
Parameters
----------
raw : instance of Raw
Data to estimate segments with muscle artifacts.
threshold : float
The threshold in z-scores for marking segments as containing muscle
activity artifacts.
ch_type : 'mag' | 'grad' | 'eeg' | None
The type of sensors to use. If ``None`` it will take the first type in
``mag``, ``grad``, ``eeg``.
min_length_good : float | None
The shortest allowed duration of "good data" (in seconds) between
adjacent annotations; shorter segments will be incorporated into the
surrounding annotations.``None`` is equivalent to ``0``.
Default is ``0.1``.
filter_freq : array-like, shape (2,)
The lower and upper frequencies of the band-pass filter.
Default is ``(110, 140)``.
%(n_jobs)s
%(verbose)s
Returns
-------
annot : mne.Annotations
Periods with muscle artifacts annotated as BAD_muscle.
scores_muscle : array
Z-score values averaged across channels for each sample.
References
----------
.. footbibliography::
"""
from scipy.stats import zscore
from scipy.ndimage.measurements import label
raw_copy = raw.copy()
if ch_type is None:
raw_ch_type = raw_copy.get_channel_types()
if 'mag' in raw_ch_type:
ch_type = 'mag'
elif 'grad' in raw_ch_type:
ch_type = 'grad'
elif 'eeg' in raw_ch_type:
ch_type = 'eeg'
else:
raise ValueError('No M/EEG channel types found, please specify a'
' ch_type or provide M/EEG sensor data')
logger.info('Using %s sensors for muscle artifact detection'
% (ch_type))
if ch_type in ('mag', 'grad'):
raw_copy.pick_types(meg=ch_type, ref_meg=False)
else:
ch_type = {'meg': False, ch_type: True}
raw_copy.pick_types(**ch_type)
raw_copy.filter(filter_freq[0], filter_freq[1], fir_design='firwin',
pad="reflect_limited", n_jobs=n_jobs)
raw_copy.apply_hilbert(envelope=True, n_jobs=n_jobs)
data = raw_copy.get_data(reject_by_annotation="NaN")
nan_mask = ~np.isnan(data[0])
sfreq = raw_copy.info['sfreq']
art_scores = zscore(data[:, nan_mask], axis=1)
art_scores = art_scores.sum(axis=0) / np.sqrt(art_scores.shape[0])
art_scores = filter_data(art_scores, sfreq, None, 4)
scores_muscle = np.zeros(data.shape[1])
scores_muscle[nan_mask] = art_scores
art_mask = scores_muscle > threshold
# return muscle scores with NaNs
scores_muscle[~nan_mask] = np.nan
# remove artifact free periods shorter than min_length_good
min_length_good = 0 if min_length_good is None else min_length_good
min_samps = min_length_good * sfreq
comps, num_comps = label(art_mask == 0)
for com in range(1, num_comps + 1):
l_idx = np.nonzero(comps == com)[0]
if len(l_idx) < min_samps:
art_mask[l_idx] = True
annot = _annotations_from_mask(raw_copy.times, art_mask, 'BAD_muscle')
return annot, scores_muscle
def annotate_movement(raw, pos, rotation_velocity_limit=None,
translation_velocity_limit=None,
mean_distance_limit=None, use_dev_head_trans='average'):
"""Detect segments with movement.
Detects segments periods further from rotation_velocity_limit,
translation_velocity_limit and mean_distance_limit. It returns an
annotation with the bad segments.
Parameters
----------
raw : instance of Raw
Data to compute head position.
pos : array, shape (N, 10)
The position and quaternion parameters from cHPI fitting. Obtained
with `mne.chpi` functions.
rotation_velocity_limit : float
Head rotation velocity limit in radians per second.
translation_velocity_limit : float
Head translation velocity limit in radians per second.
mean_distance_limit : float
Head position limit from mean recording in meters.
use_dev_head_trans : 'average' (default) | 'info'
Identify the device to head transform used to define the
fixed HPI locations for computing moving distances.
If ``average`` the average device to head transform is
computed using ``compute_average_dev_head_t``.
If ``info``, ``raw.info['dev_head_t']`` is used.
Returns
-------
annot : mne.Annotations
Periods with head motion.
hpi_disp : array
Head position over time with respect to the mean head pos.
See Also
--------
compute_average_dev_head_t
"""
sfreq = raw.info['sfreq']
hp_ts = pos[:, 0].copy()
hp_ts -= raw.first_samp / sfreq
dt = np.diff(hp_ts)
hp_ts = np.concatenate([hp_ts, [hp_ts[-1] + 1. / sfreq]])
annot = Annotations([], [], [], orig_time=None) # rel to data start
# Annotate based on rotational velocity
t_tot = raw.times[-1]
if rotation_velocity_limit is not None:
assert rotation_velocity_limit > 0
# Rotational velocity (radians / sec)
r = _angle_between_quats(pos[:-1, 1:4], pos[1:, 1:4])
r /= dt
bad_mask = (r >= np.deg2rad(rotation_velocity_limit))
onsets, offsets = _mask_to_onsets_offsets(bad_mask)
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(u'Omitting %5.1f%% (%3d segments): '
u'ω >= %5.1f°/s (max: %0.1f°/s)'
% (bad_pct, len(onsets), rotation_velocity_limit,
np.rad2deg(r.max())))
annot += _annotations_from_mask(hp_ts, bad_mask, 'BAD_mov_rotat_vel')
# Annotate based on translational velocity limit
if translation_velocity_limit is not None:
assert translation_velocity_limit > 0
v = np.linalg.norm(np.diff(pos[:, 4:7], axis=0), axis=-1)
v /= dt
bad_mask = (v >= translation_velocity_limit)
onsets, offsets = _mask_to_onsets_offsets(bad_mask)
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(u'Omitting %5.1f%% (%3d segments): '
u'v >= %5.4fm/s (max: %5.4fm/s)'
% (bad_pct, len(onsets), translation_velocity_limit,
v.max()))
annot += _annotations_from_mask(hp_ts, bad_mask, 'BAD_mov_trans_vel')
# Annotate based on displacement from mean head position
disp = []
if mean_distance_limit is not None:
assert mean_distance_limit > 0
# compute dev to head transform for fixed points
use_dev_head_trans = use_dev_head_trans.lower()
if use_dev_head_trans not in ['average', 'info']:
raise ValueError('use_dev_head_trans must be either' +
' \'average\' or \'info\': got \'%s\''
% (use_dev_head_trans,))
if use_dev_head_trans == 'average':
fixed_dev_head_t = compute_average_dev_head_t(raw, pos)
elif use_dev_head_trans == 'info':
fixed_dev_head_t = raw.info['dev_head_t']
# Get static head pos from file, used to convert quat to cartesian
chpi_pos = sorted([d for d in raw.info['hpi_results'][-1]
['dig_points']], key=lambda x: x['ident'])
chpi_pos = np.array([d['r'] for d in chpi_pos])
# Get head pos changes during recording
chpi_pos_mov = np.array([apply_trans(_quat_to_affine(quat), chpi_pos)
for quat in pos[:, 1:7]])
# get fixed position
chpi_pos_fix = apply_trans(fixed_dev_head_t, chpi_pos)
# get movement displacement from mean pos
hpi_disp = chpi_pos_mov - np.tile(chpi_pos_fix, (pos.shape[0], 1, 1))
# get positions above threshold distance
disp = np.sqrt((hpi_disp ** 2).sum(axis=2))
bad_mask = np.any(disp > mean_distance_limit, axis=1)
onsets, offsets = _mask_to_onsets_offsets(bad_mask)
onsets, offsets = hp_ts[onsets], hp_ts[offsets]
bad_pct = 100 * (offsets - onsets).sum() / t_tot
logger.info(u'Omitting %5.1f%% (%3d segments): '
u'disp >= %5.4fm (max: %5.4fm)'
% (bad_pct, len(onsets), mean_distance_limit, disp.max()))
annot += _annotations_from_mask(hp_ts, bad_mask, 'BAD_mov_dist')
return annot, disp
def compute_average_dev_head_t(raw, pos):
"""Get new device to head transform based on good segments.
Segments starting with "BAD" annotations are not included for calculating
the mean head position.
Parameters
----------
raw : instance of Raw
Data to compute head position.
pos : array, shape (N, 10)
The position and quaternion parameters from cHPI fitting.
Returns
-------
dev_head_t : array
New trans matrix using the averaged good head positions.
"""
sfreq = raw.info['sfreq']
seg_good = np.ones(len(raw.times))
trans_pos = np.zeros(3)
hp = pos.copy()
hp_ts = hp[:, 0] - raw._first_time
# Check rounding issues at 0 time
if hp_ts[0] < 0:
hp_ts[0] = 0
assert hp_ts[1] > 1. / sfreq
# Mask out segments if beyond scan time
mask = hp_ts <= raw.times[-1]
if not mask.all():
logger.info(
' Removing %d samples > raw.times[-1] (%s)'
% (np.sum(~mask), raw.times[-1]))
hp = hp[mask]
del mask, hp_ts
# Get time indices
ts = np.concatenate((hp[:, 0], [(raw.last_samp + 1) / sfreq]))
assert (np.diff(ts) > 0).all()
ts -= raw.first_samp / sfreq
idx = raw.time_as_index(ts, use_rounding=True)
del ts
if idx[0] == -1: # annoying rounding errors
idx[0] = 0
assert idx[1] > 0
assert (idx >= 0).all()
assert idx[-1] == len(seg_good)
assert (np.diff(idx) > 0).all()
# Mark times bad that are bad according to annotations
onsets, ends = _annotations_starts_stops(raw, 'bad')
for onset, end in zip(onsets, ends):
seg_good[onset:end] = 0
dt = np.diff(np.cumsum(np.concatenate([[0], seg_good]))[idx])
assert (dt >= 0).all()
dt = dt / sfreq
del seg_good, idx
# Get weighted head pos trans and rot
trans_pos += np.dot(dt, hp[:, 4:7])
rot_qs = hp[:, 1:4]
best_q = _average_quats(rot_qs, weights=dt)
trans = np.eye(4)
trans[:3, :3] = quat_to_rot(best_q)
trans[:3, 3] = trans_pos / dt.sum()
assert np.linalg.norm(trans[:3, 3]) < 1 # less than 1 meter is sane
dev_head_t = Transform('meg', 'head', trans)
return dev_head_t
def _annotations_from_mask(times, art_mask, art_name):
"""Construct annotations from boolean mask of the data."""
from scipy.ndimage.measurements import label
comps, num_comps = label(art_mask)
onsets, durations, desc = [], [], []
n_times = len(times)
for lbl in range(1, num_comps + 1):
l_idx = np.nonzero(comps == lbl)[0]
onsets.append(times[l_idx[0]])
# duration is to the time after the last labeled time
# or to the end of the times.
if 1 + l_idx[-1] < n_times:
durations.append(times[1 + l_idx[-1]] - times[l_idx[0]])
else:
durations.append(times[l_idx[-1]] - times[l_idx[0]])
desc.append(art_name)
return Annotations(onsets, durations, desc)
|
from numpy.testing import assert_array_equal, assert_equal
import pytest
import numpy as np
from mne.utils import run_tests_if_main
from mne.preprocessing import peak_finder
def test_peak_finder():
"""Test the peak detection method."""
# check for random data
rng = np.random.RandomState(42)
peak_inds, peak_mags = peak_finder(rng.randn(20))
assert_equal(peak_inds.dtype, np.dtype('int64'))
assert_equal(peak_mags.dtype, np.dtype('float64'))
# check for empty array as created in the #5025
with pytest.raises(ValueError):
peak_finder(np.arange(2, 1, 0.05))
# check for empty array
with pytest.raises(ValueError):
peak_finder([])
# check for monotonic function
peak_inds, peak_mags = peak_finder(np.arange(1, 2, 0.05))
assert_equal(peak_inds.dtype, np.dtype('int64'))
assert_equal(peak_mags.dtype, np.dtype('float64'))
# check for no peaks
peak_inds, peak_mags = peak_finder(np.zeros(20))
assert_equal(peak_inds.dtype, np.dtype('int64'))
assert_equal(peak_mags.dtype, np.dtype('float64'))
# check values
peak_inds, peak_mags = peak_finder([0, 2, 5, 0, 6, -1])
assert_array_equal(peak_inds, [2, 4])
run_tests_if_main()
|
import os
import tempfile
from . import helpers, pprint
def parser(subparsers, _):
"""Adds the history parser to the given subparsers object."""
desc = 'show commit history'
history_parser = subparsers.add_parser(
'history', help=desc, description=desc.capitalize(), aliases=['hs'])
history_parser.add_argument(
'-v', '--verbose', help='be verbose, will output the diffs of the commit',
action='store_true')
history_parser.add_argument(
'-l', '--limit', help='limit number of commits displayed', type=int)
history_parser.add_argument(
'-c', '--compact', help='output history in a compact format',
action='store_true', default=False)
history_parser.add_argument(
'-b', '--branch', nargs='?', metavar='branch_name', dest='b',
help='the branch to show history of (defaults to the current branch)')
history_parser.set_defaults(func=main)
def main(args, repo):
b = helpers.get_branch(args.b, repo) if args.b else repo.current_branch
with tempfile.NamedTemporaryFile(mode='w', delete=False) as tf:
count = 0
for ci in b.history():
if args.limit and count == args.limit:
break
pprint.commit(ci, compact=args.compact, stream=tf.write)
if not args.compact:
pprint.puts(stream=tf.write)
if args.verbose and len(ci.parents) == 1:
for patch in b.diff_commits(ci.parents[0], ci):
pprint.diff(patch, stream=tf.write)
count += 1
helpers.page(tf.name, repo)
os.remove(tf.name)
return True
|
import numpy as np
from numpy.testing import (assert_almost_equal, assert_allclose,
assert_array_equal)
from scipy import stats
import pytest
from mne.stats import fdr_correction, bonferroni_correction
def test_bonferroni_pval_clip():
"""Test that p-values are never exceed 1.0."""
p = (0.2, 0.9)
_, p_corrected = bonferroni_correction(p)
assert p_corrected.max() <= 1.0
def test_multi_pval_correction():
"""Test pval correction for multi comparison (FDR and Bonferroni)."""
rng = np.random.RandomState(0)
X = rng.randn(10, 1000, 10)
X[:, :50, 0] += 4.0 # 50 significant tests
alpha = 0.05
T, pval = stats.ttest_1samp(X, 0)
n_samples = X.shape[0]
n_tests = X.size / n_samples
thresh_uncorrected = stats.t.ppf(1.0 - alpha, n_samples - 1)
reject_bonferroni, pval_bonferroni = bonferroni_correction(pval, alpha)
thresh_bonferroni = stats.t.ppf(1.0 - alpha / n_tests, n_samples - 1)
assert pval_bonferroni.ndim == 2
assert reject_bonferroni.ndim == 2
assert_allclose(pval_bonferroni, (pval * 10000).clip(max=1))
reject_expected = pval_bonferroni < alpha
assert_array_equal(reject_bonferroni, reject_expected)
fwer = np.mean(reject_bonferroni)
assert_almost_equal(fwer, alpha, 1)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='indep')
assert pval_fdr.ndim == 2
assert reject_fdr.ndim == 2
thresh_fdr = np.min(np.abs(T)[reject_fdr])
assert 0 <= (reject_fdr.sum() - 50) <= 50 * 1.05
assert thresh_uncorrected <= thresh_fdr <= thresh_bonferroni
pytest.raises(ValueError, fdr_correction, pval, alpha, method='blah')
assert np.all(fdr_correction(pval, alpha=0)[0] == 0)
reject_fdr, pval_fdr = fdr_correction(pval, alpha=alpha, method='negcorr')
thresh_fdr = np.min(np.abs(T)[reject_fdr])
assert 0 <= (reject_fdr.sum() - 50) <= 50 * 1.05
assert thresh_uncorrected <= thresh_fdr <= thresh_bonferroni
|
import os
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from diamond.collector import Collector
from kvm import KVMCollector
##########################################################################
class TestKVMCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('KVMCollector', {
'interval': 10,
})
self.collector = KVMCollector(config, None)
self.collector.PROC = os.path.dirname(__file__) + '/fixtures/'
def test_import(self):
self.assertTrue(KVMCollector)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock):
patch_open = patch('__builtin__.open', Mock(return_value=StringIO(
'0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0' +
'\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n0\n'
)))
patch_open.start()
self.collector.collect()
patch_open.stop()
self.assertPublishedMany(publish_mock, {})
self.collector.collect()
metrics = {
'efer_reload': 0.000000,
'exits': 1436135848.000000,
'fpu_reload': 121764903.500000,
'halt_exits': 544586282.600000,
'halt_wakeup': 235093451.400000,
'host_state_reload': 801854250.600000,
'hypercalls': 0.000000,
'insn_emulation': 1314391264.700000,
'insn_emulation_fail': 0.000000,
'invlpg': 0.000000,
'io_exits': 248822813.200000,
'irq_exits': 701647108.400000,
'irq_injections': 986654069.600000,
'irq_window': 162240965.200000,
'largepages': 351789.400000,
'mmio_exits': 20169.400000,
'mmu_cache_miss': 1643.300000,
'mmu_flooded': 0.000000,
'mmu_pde_zapped': 0.000000,
'mmu_pte_updated': 0.000000,
'mmu_pte_write': 11144.000000,
'mmu_recycled': 0.000000,
'mmu_shadow_zapped': 384.700000,
'mmu_unsync': 0.000000,
'nmi_injections': 0.000000,
'nmi_window': 0.000000,
'pf_fixed': 355636.100000,
'pf_guest': 0.000000,
'remote_tlb_flush': 111.200000,
'request_irq': 0.000000,
'signal_exits': 0.000000,
'tlb_flush': 0.000000,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from collections import namedtuple
import datetime
from nessclient import ArmingState, Client
import voluptuous as vol
from homeassistant.components.binary_sensor import DEVICE_CLASSES
from homeassistant.const import (
ATTR_CODE,
ATTR_STATE,
CONF_HOST,
CONF_SCAN_INTERVAL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import async_dispatcher_send
DOMAIN = "ness_alarm"
DATA_NESS = "ness_alarm"
CONF_DEVICE_PORT = "port"
CONF_INFER_ARMING_STATE = "infer_arming_state"
CONF_ZONES = "zones"
CONF_ZONE_NAME = "name"
CONF_ZONE_TYPE = "type"
CONF_ZONE_ID = "id"
ATTR_OUTPUT_ID = "output_id"
DEFAULT_ZONES = []
DEFAULT_SCAN_INTERVAL = datetime.timedelta(minutes=1)
DEFAULT_INFER_ARMING_STATE = False
SIGNAL_ZONE_CHANGED = "ness_alarm.zone_changed"
SIGNAL_ARMING_STATE_CHANGED = "ness_alarm.arming_state_changed"
ZoneChangedData = namedtuple("ZoneChangedData", ["zone_id", "state"])
DEFAULT_ZONE_TYPE = "motion"
ZONE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ZONE_NAME): cv.string,
vol.Required(CONF_ZONE_ID): cv.positive_int,
vol.Optional(CONF_ZONE_TYPE, default=DEFAULT_ZONE_TYPE): vol.In(DEVICE_CLASSES),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_DEVICE_PORT): cv.port,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.positive_time_period,
vol.Optional(CONF_ZONES, default=DEFAULT_ZONES): vol.All(
cv.ensure_list, [ZONE_SCHEMA]
),
vol.Optional(
CONF_INFER_ARMING_STATE, default=DEFAULT_INFER_ARMING_STATE
): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_PANIC = "panic"
SERVICE_AUX = "aux"
SERVICE_SCHEMA_PANIC = vol.Schema({vol.Required(ATTR_CODE): cv.string})
SERVICE_SCHEMA_AUX = vol.Schema(
{
vol.Required(ATTR_OUTPUT_ID): cv.positive_int,
vol.Optional(ATTR_STATE, default=True): cv.boolean,
}
)
async def async_setup(hass, config):
"""Set up the Ness Alarm platform."""
conf = config[DOMAIN]
zones = conf[CONF_ZONES]
host = conf[CONF_HOST]
port = conf[CONF_DEVICE_PORT]
scan_interval = conf[CONF_SCAN_INTERVAL]
infer_arming_state = conf[CONF_INFER_ARMING_STATE]
client = Client(
host=host,
port=port,
loop=hass.loop,
update_interval=scan_interval.total_seconds(),
infer_arming_state=infer_arming_state,
)
hass.data[DATA_NESS] = client
async def _close(event):
await client.close()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close)
hass.async_create_task(
async_load_platform(hass, "binary_sensor", DOMAIN, {CONF_ZONES: zones}, config)
)
hass.async_create_task(
async_load_platform(hass, "alarm_control_panel", DOMAIN, {}, config)
)
def on_zone_change(zone_id: int, state: bool):
"""Receives and propagates zone state updates."""
async_dispatcher_send(
hass, SIGNAL_ZONE_CHANGED, ZoneChangedData(zone_id=zone_id, state=state)
)
def on_state_change(arming_state: ArmingState):
"""Receives and propagates arming state updates."""
async_dispatcher_send(hass, SIGNAL_ARMING_STATE_CHANGED, arming_state)
client.on_zone_change(on_zone_change)
client.on_state_change(on_state_change)
# Force update for current arming status and current zone states
hass.loop.create_task(client.keepalive())
hass.loop.create_task(client.update())
async def handle_panic(call):
await client.panic(call.data[ATTR_CODE])
async def handle_aux(call):
await client.aux(call.data[ATTR_OUTPUT_ID], call.data[ATTR_STATE])
hass.services.async_register(
DOMAIN, SERVICE_PANIC, handle_panic, schema=SERVICE_SCHEMA_PANIC
)
hass.services.async_register(
DOMAIN, SERVICE_AUX, handle_aux, schema=SERVICE_SCHEMA_AUX
)
return True
|
from __future__ import absolute_import
from __future__ import print_function
from keras.datasets import mnist
from keras.models import Sequential
from keras.layers.core import Dense, Dropout, Activation
from keras.optimizers import SGD
from keras.utils import np_utils
from elephas.spark_model import SparkModel
from elephas.utils.rdd_utils import to_simple_rdd
from pyspark import SparkContext, SparkConf
# Define basic parameters
batch_size = 64
nb_classes = 10
epochs = 1
# Create Spark context
conf = SparkConf().setAppName('Mnist_Spark_MLP').setMaster('local[8]')
sc = SparkContext(conf=conf)
# Load data
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train.reshape(60000, 784)
x_test = x_test.reshape(10000, 784)
x_train = x_train.astype("float32")
x_test = x_test.astype("float32")
x_train /= 255
x_test /= 255
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# Convert class vectors to binary class matrices
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
model = Sequential()
model.add(Dense(128, input_dim=784))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Dense(10))
model.add(Activation('softmax'))
sgd = SGD(lr=0.1)
model.compile(sgd, 'categorical_crossentropy', ['acc'])
# Build RDD from numpy features and labels
rdd = to_simple_rdd(sc, x_train, y_train)
# Initialize SparkModel from Keras model and Spark context
spark_model = SparkModel(model, frequency='epoch', mode='asynchronous')
# Train Spark model
spark_model.fit(rdd, epochs=epochs, batch_size=batch_size, verbose=0, validation_split=0.1)
# Evaluate Spark model by evaluating the underlying model
score = spark_model.master_network.evaluate(x_test, y_test, verbose=2)
print('Test accuracy:', score[1])
|
import contextlib
import json
import logging
from dateutil.parser import parse as parse_time
from random import choice
from string import ascii_letters
from datetime import datetime, timedelta
import xml.etree.ElementTree as ET
from typing import ClassVar, Optional, List, Tuple
import aiohttp
import discord
from .errors import (
APIError,
OfflineStream,
InvalidTwitchCredentials,
InvalidYoutubeCredentials,
StreamNotFound,
YoutubeQuotaExceeded,
)
from redbot.core.i18n import Translator
from redbot.core.utils.chat_formatting import humanize_number, humanize_timedelta
TWITCH_BASE_URL = "https://api.twitch.tv"
TWITCH_ID_ENDPOINT = TWITCH_BASE_URL + "/helix/users"
TWITCH_STREAMS_ENDPOINT = TWITCH_BASE_URL + "/helix/streams/"
TWITCH_COMMUNITIES_ENDPOINT = TWITCH_BASE_URL + "/helix/communities"
YOUTUBE_BASE_URL = "https://www.googleapis.com/youtube/v3"
YOUTUBE_CHANNELS_ENDPOINT = YOUTUBE_BASE_URL + "/channels"
YOUTUBE_SEARCH_ENDPOINT = YOUTUBE_BASE_URL + "/search"
YOUTUBE_VIDEOS_ENDPOINT = YOUTUBE_BASE_URL + "/videos"
YOUTUBE_CHANNEL_RSS = "https://www.youtube.com/feeds/videos.xml?channel_id={channel_id}"
_ = Translator("Streams", __file__)
log = logging.getLogger("redbot.cogs.Streams")
def rnd(url):
"""Appends a random parameter to the url to avoid Discord's caching"""
return url + "?rnd=" + "".join([choice(ascii_letters) for _loop_counter in range(6)])
def get_video_ids_from_feed(feed):
root = ET.fromstring(feed)
rss_video_ids = []
for child in root.iter("{http://www.w3.org/2005/Atom}entry"):
for i in child.iter("{http://www.youtube.com/xml/schemas/2015}videoId"):
yield i.text
class Stream:
token_name: ClassVar[Optional[str]] = None
def __init__(self, **kwargs):
self.name = kwargs.pop("name", None)
self.channels = kwargs.pop("channels", [])
# self.already_online = kwargs.pop("already_online", False)
self._messages_cache = kwargs.pop("_messages_cache", [])
self.type = self.__class__.__name__
async def is_online(self):
raise NotImplementedError()
def make_embed(self):
raise NotImplementedError()
def export(self):
data = {}
for k, v in self.__dict__.items():
if not k.startswith("_"):
data[k] = v
data["messages"] = []
for m in self._messages_cache:
data["messages"].append({"channel": m.channel.id, "message": m.id})
return data
def __repr__(self):
return "<{0.__class__.__name__}: {0.name}>".format(self)
class YoutubeStream(Stream):
token_name = "youtube"
def __init__(self, **kwargs):
self.id = kwargs.pop("id", None)
self._token = kwargs.pop("token", None)
self._config = kwargs.pop("config")
self.not_livestreams: List[str] = []
self.livestreams: List[str] = []
super().__init__(**kwargs)
async def is_online(self):
if not self._token:
raise InvalidYoutubeCredentials("YouTube API key is not set.")
if not self.id:
self.id = await self.fetch_id()
elif not self.name:
self.name = await self.fetch_name()
async with aiohttp.ClientSession() as session:
async with session.get(YOUTUBE_CHANNEL_RSS.format(channel_id=self.id)) as r:
rssdata = await r.text()
if self.not_livestreams:
self.not_livestreams = list(dict.fromkeys(self.not_livestreams))
if self.livestreams:
self.livestreams = list(dict.fromkeys(self.livestreams))
for video_id in get_video_ids_from_feed(rssdata):
if video_id in self.not_livestreams:
log.debug(f"video_id in not_livestreams: {video_id}")
continue
log.debug(f"video_id not in not_livestreams: {video_id}")
params = {
"key": self._token["api_key"],
"id": video_id,
"part": "id,liveStreamingDetails",
}
async with aiohttp.ClientSession() as session:
async with session.get(YOUTUBE_VIDEOS_ENDPOINT, params=params) as r:
data = await r.json()
stream_data = data.get("items", [{}])[0].get("liveStreamingDetails", {})
log.debug(f"stream_data for {video_id}: {stream_data}")
if (
stream_data
and stream_data != "None"
and stream_data.get("actualEndTime", None) is None
):
actual_start_time = stream_data.get("actualStartTime", None)
scheduled = stream_data.get("scheduledStartTime", None)
if scheduled is not None and actual_start_time is None:
scheduled = parse_time(scheduled)
if (
scheduled.replace(tzinfo=None) - datetime.now()
).total_seconds() < -3600:
continue
elif actual_start_time is None:
continue
if video_id not in self.livestreams:
self.livestreams.append(data["items"][0]["id"])
else:
self.not_livestreams.append(data["items"][0]["id"])
if video_id in self.livestreams:
self.livestreams.remove(video_id)
log.debug(f"livestreams for {self.name}: {self.livestreams}")
log.debug(f"not_livestreams for {self.name}: {self.not_livestreams}")
# This is technically redundant since we have the
# info from the RSS ... but incase you don't wanna deal with fully rewritting the
# code for this part, as this is only a 2 quota query.
if self.livestreams:
params = {
"key": self._token["api_key"],
"id": self.livestreams[-1],
"part": "snippet,liveStreamingDetails",
}
async with aiohttp.ClientSession() as session:
async with session.get(YOUTUBE_VIDEOS_ENDPOINT, params=params) as r:
data = await r.json()
return await self.make_embed(data)
raise OfflineStream()
async def make_embed(self, data):
vid_data = data["items"][0]
video_url = "https://youtube.com/watch?v={}".format(vid_data["id"])
title = vid_data["snippet"]["title"]
thumbnail = vid_data["snippet"]["thumbnails"]["medium"]["url"]
channel_title = vid_data["snippet"]["channelTitle"]
embed = discord.Embed(title=title, url=video_url)
is_schedule = False
if vid_data["liveStreamingDetails"].get("scheduledStartTime", None) is not None:
if "actualStartTime" not in vid_data["liveStreamingDetails"]:
start_time = parse_time(vid_data["liveStreamingDetails"]["scheduledStartTime"])
start_in = start_time.replace(tzinfo=None) - datetime.now()
if start_in.total_seconds() > 0:
embed.description = _("This stream will start in {time}").format(
time=humanize_timedelta(
timedelta=timedelta(minutes=start_in.total_seconds() // 60)
) # getting rid of seconds
)
else:
embed.description = _(
"This stream was scheduled for {min} minutes ago"
).format(min=round((start_in.total_seconds() * -1) // 60))
embed.timestamp = start_time
is_schedule = True
else:
# repost message
to_remove = []
for message in self._messages_cache:
if message.embeds[0].description is discord.Embed.Empty:
continue
with contextlib.suppress(Exception):
autodelete = await self._config.guild(message.guild).autodelete()
if autodelete:
await message.delete()
to_remove.append(message.id)
self._messages_cache = [x for x in self._messages_cache if x.id not in to_remove]
embed.set_author(name=channel_title)
embed.set_image(url=rnd(thumbnail))
embed.colour = 0x9255A5
return embed, is_schedule
async def fetch_id(self):
return await self._fetch_channel_resource("id")
async def fetch_name(self):
snippet = await self._fetch_channel_resource("snippet")
return snippet["title"]
async def _fetch_channel_resource(self, resource: str):
params = {"key": self._token["api_key"], "part": resource}
if resource == "id":
params["forUsername"] = self.name
else:
params["id"] = self.id
async with aiohttp.ClientSession() as session:
async with session.get(YOUTUBE_CHANNELS_ENDPOINT, params=params) as r:
data = await r.json()
if "error" in data:
error_code = data["error"]["code"]
if error_code == 400 and data["error"]["errors"][0]["reason"] == "keyInvalid":
raise InvalidYoutubeCredentials()
elif error_code == 403 and data["error"]["errors"][0]["reason"] in (
"dailyLimitExceeded",
"quotaExceeded",
"rateLimitExceeded",
):
raise YoutubeQuotaExceeded()
elif "items" in data and len(data["items"]) == 0:
raise StreamNotFound()
elif "items" in data:
return data["items"][0][resource]
elif (
"pageInfo" in data
and "totalResults" in data["pageInfo"]
and data["pageInfo"]["totalResults"] < 1
):
raise StreamNotFound()
raise APIError(data)
def __repr__(self):
return "<{0.__class__.__name__}: {0.name} (ID: {0.id})>".format(self)
class TwitchStream(Stream):
token_name = "twitch"
def __init__(self, **kwargs):
self.id = kwargs.pop("id", None)
self._client_id = kwargs.pop("token", None)
self._bearer = kwargs.pop("bearer", None)
super().__init__(**kwargs)
async def is_online(self):
if not self.id:
self.id = await self.fetch_id()
url = TWITCH_STREAMS_ENDPOINT
header = {"Client-ID": str(self._client_id)}
if self._bearer is not None:
header = {**header, "Authorization": f"Bearer {self._bearer}"}
params = {"user_id": self.id}
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=header, params=params) as r:
data = await r.json(encoding="utf-8")
if r.status == 200:
if not data["data"]:
raise OfflineStream()
self.name = data["data"][0]["user_name"]
data = data["data"][0]
data["game_name"] = None
data["followers"] = None
data["view_count"] = None
data["profile_image_url"] = None
data["login"] = None
game_id = data["game_id"]
if game_id:
params = {"id": game_id}
async with aiohttp.ClientSession() as session:
async with session.get(
"https://api.twitch.tv/helix/games", headers=header, params=params
) as r:
game_data = await r.json(encoding="utf-8")
if game_data:
game_data = game_data["data"][0]
data["game_name"] = game_data["name"]
params = {"to_id": self.id}
async with aiohttp.ClientSession() as session:
async with session.get(
"https://api.twitch.tv/helix/users/follows", headers=header, params=params
) as r:
user_data = await r.json(encoding="utf-8")
if user_data:
followers = user_data["total"]
data["followers"] = followers
params = {"id": self.id}
async with aiohttp.ClientSession() as session:
async with session.get(
"https://api.twitch.tv/helix/users", headers=header, params=params
) as r:
user_profile_data = await r.json(encoding="utf-8")
if user_profile_data:
profile_image_url = user_profile_data["data"][0]["profile_image_url"]
data["profile_image_url"] = profile_image_url
data["view_count"] = user_profile_data["data"][0]["view_count"]
data["login"] = user_profile_data["data"][0]["login"]
is_rerun = False
return self.make_embed(data), is_rerun
elif r.status == 400:
raise InvalidTwitchCredentials()
elif r.status == 404:
raise StreamNotFound()
else:
raise APIError(data)
async def fetch_id(self):
header = {"Client-ID": str(self._client_id)}
if self._bearer is not None:
header = {**header, "Authorization": f"Bearer {self._bearer}"}
url = TWITCH_ID_ENDPOINT
params = {"login": self.name}
async with aiohttp.ClientSession() as session:
async with session.get(url, headers=header, params=params) as r:
data = await r.json()
if r.status == 200:
if not data["data"]:
raise StreamNotFound()
return data["data"][0]["id"]
elif r.status == 400:
raise StreamNotFound()
elif r.status == 401:
raise InvalidTwitchCredentials()
else:
raise APIError(data)
def make_embed(self, data):
is_rerun = data["type"] == "rerun"
url = f"https://www.twitch.tv/{data['login']}" if data["login"] is not None else None
logo = data["profile_image_url"]
if logo is None:
logo = "https://static-cdn.jtvnw.net/jtv_user_pictures/xarth/404_user_70x70.png"
status = data["title"]
if not status:
status = _("Untitled broadcast")
if is_rerun:
status += _(" - Rerun")
embed = discord.Embed(title=status, url=url, color=0x6441A4)
embed.set_author(name=data["user_name"])
embed.add_field(name=_("Followers"), value=humanize_number(data["followers"]))
embed.add_field(name=_("Total views"), value=humanize_number(data["view_count"]))
embed.set_thumbnail(url=logo)
if data["thumbnail_url"]:
embed.set_image(url=rnd(data["thumbnail_url"].format(width=320, height=180)))
if data["game_name"]:
embed.set_footer(text=_("Playing: ") + data["game_name"])
return embed
def __repr__(self):
return "<{0.__class__.__name__}: {0.name} (ID: {0.id})>".format(self)
class HitboxStream(Stream):
token_name = None # This streaming services don't currently require an API key
async def is_online(self):
url = "https://api.smashcast.tv/media/live/" + self.name
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
# data = await r.json(encoding='utf-8')
data = await r.text()
data = json.loads(data, strict=False)
if "livestream" not in data:
raise StreamNotFound()
elif data["livestream"][0]["media_is_live"] == "0":
# self.already_online = False
raise OfflineStream()
elif data["livestream"][0]["media_is_live"] == "1":
# self.already_online = True
return self.make_embed(data)
raise APIError(data)
def make_embed(self, data):
base_url = "https://edge.sf.hitbox.tv"
livestream = data["livestream"][0]
channel = livestream["channel"]
url = channel["channel_link"]
embed = discord.Embed(title=livestream["media_status"], url=url, color=0x98CB00)
embed.set_author(name=livestream["media_name"])
embed.add_field(name=_("Followers"), value=humanize_number(channel["followers"]))
embed.set_thumbnail(url=base_url + channel["user_logo"])
if livestream["media_thumbnail"]:
embed.set_image(url=rnd(base_url + livestream["media_thumbnail"]))
embed.set_footer(text=_("Playing: ") + livestream["category_name"])
return embed
class PicartoStream(Stream):
token_name = None # This streaming services don't currently require an API key
async def is_online(self):
url = "https://api.picarto.tv/v1/channel/name/" + self.name
async with aiohttp.ClientSession() as session:
async with session.get(url) as r:
data = await r.text(encoding="utf-8")
if r.status == 200:
data = json.loads(data)
if data["online"] is True:
# self.already_online = True
return self.make_embed(data)
else:
# self.already_online = False
raise OfflineStream()
elif r.status == 404:
raise StreamNotFound()
else:
raise APIError(data)
def make_embed(self, data):
avatar = rnd(
"https://picarto.tv/user_data/usrimg/{}/dsdefault.jpg".format(data["name"].lower())
)
url = "https://picarto.tv/" + data["name"]
thumbnail = data["thumbnails"]["web"]
embed = discord.Embed(title=data["title"], url=url, color=0x4C90F3)
embed.set_author(name=data["name"])
embed.set_image(url=rnd(thumbnail))
embed.add_field(name=_("Followers"), value=humanize_number(data["followers"]))
embed.add_field(name=_("Total views"), value=humanize_number(data["viewers_total"]))
embed.set_thumbnail(url=avatar)
data["tags"] = ", ".join(data["tags"])
if not data["tags"]:
data["tags"] = _("None")
if data["adult"]:
data["adult"] = _("NSFW | ")
else:
data["adult"] = ""
embed.set_footer(text=_("{adult}Category: {category} | Tags: {tags}").format(**data))
return embed
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import logging
import posixpath
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import hpc_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import openfoam
from perfkitbenchmarker.linux_packages import openmpi
_DEFAULT_CASE = 'motorbike'
_CASE_PATHS = {
'motorbike': 'tutorials/incompressible/simpleFoam/motorBike',
}
assert _DEFAULT_CASE in _CASE_PATHS
FLAGS = flags.FLAGS
flags.DEFINE_enum('openfoam_case', _DEFAULT_CASE,
sorted(list(_CASE_PATHS.keys())),
'Name of the OpenFOAM case to run.')
flags.DEFINE_list('openfoam_dimensions', ['20_8_8'], 'Dimensions of the case.')
flags.DEFINE_integer(
'openfoam_num_threads_per_vm', None,
'The number of threads per VM to run OpenFOAM with. If None, defaults to '
'half the total number of vCPUs available.')
flags.DEFINE_string(
'openfoam_mpi_mapping', 'core:SPAN',
'Mpirun process mapping to use as arguments to "mpirun --map-by".')
flags.DEFINE_enum(
'openfoam_decomp_method', 'scotch', ['scotch', 'hierarchical', 'simple'],
'Decomposition method to use in decomposePar. See: '
'https://cfd.direct/openfoam/user-guide/v7-running-applications-parallel/')
flags.DEFINE_integer(
'openfoam_max_global_cells', 200 * 1000 * 1000,
'The maximum number of refinement cells to use in snappHexMeshDict. See: '
'https://cfd.direct/openfoam/user-guide/v6-snappyhexmesh/')
BENCHMARK_NAME = 'openfoam'
_BENCHMARK_ROOT = '$HOME/OpenFOAM/run'
BENCHMARK_CONFIG = """
openfoam:
description: Runs an OpenFOAM benchmark.
vm_groups:
default:
vm_spec:
GCP:
machine_type: c2-standard-8
zone: us-east1-c
boot_disk_size: 100
Azure:
machine_type: Standard_F8s_v2
zone: eastus2
boot_disk_size: 100
AWS:
machine_type: c5.2xlarge
zone: us-east-1f
boot_disk_size: 100
os_type: ubuntu1604
vm_count: 2
disk_spec:
GCP:
disk_type: nfs
nfs_managed: False
mount_point: {path}
Azure:
disk_type: nfs
nfs_managed: False
mount_point: {path}
AWS:
disk_type: nfs
nfs_managed: False
mount_point: {path}
""".format(path=_BENCHMARK_ROOT)
_MACHINE_FILE = posixpath.join(_BENCHMARK_ROOT, 'MACHINEFILE')
_RUN_SCRIPT = 'Allrun'
_BLOCK_MESH_DICT = 'system/blockMeshDict'
_DECOMPOSE_DICT = 'system/decomposeParDict'
_SNAPPY_HEX_MESH_DICT = 'system/snappyHexMeshDict'
_SSH_CONFIG_CMD = ('echo "LogLevel ERROR\nHost *\n IdentitiesOnly yes\n" | '
'tee -a $HOME/.ssh/config')
_RUN_SCRIPT_EXCLUDED_PREFIXES = ['#', '.', 'cd']
_RUN_SCRIPT_VALID_COMMANDS = [
'cp', 'surfaceFeatures', 'blockMesh', 'decomposePar', 'snappyHexMesh',
'patchSummary', 'potentialFoam', '$(getApplication)', 'reconstructParMesh',
'reconstructPar'
]
def GetConfig(user_config):
"""Returns the configuration of a benchmark."""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['num_vms'].present:
config['vm_groups']['default']['vm_count'] = FLAGS.num_vms
return config
@flags.validator('openfoam_dimensions')
def _CheckDimensions(dimensions_list):
# throws InvalidValue if an entry is not correct
for dimensions in dimensions_list:
_ParseDimensions(dimensions)
return True
def _ParseDimensions(dimensions):
"""Parse and validate an individual dimensions entry.
Args:
dimensions: String formatted as "_" separated integers like: '80_20_20'.
Returns:
Parsed dimensions like: '80 20 20'.
Raises:
errors.Config.InvalidValue: If input dimensions are incorrectly formatted.
"""
dimensions = dimensions.split('_')
if not all(value.isdigit() for value in dimensions):
raise errors.Config.InvalidValue(
'Expected list of ints separated by "_" in --openfoam_dimensions '
'but received %s.' % dimensions)
return ' '.join(dimensions)
def Prepare(benchmark_spec):
"""Prepares the VMs and other resources for running the benchmark.
This is a good place to download binaries onto the VMs, create any data files
needed for a benchmark run, etc.
Args:
benchmark_spec: The benchmark spec for this sample benchmark.
"""
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: vm.Install('openfoam'), vms)
# Allow ssh access to other vms.
vm_util.RunThreaded(lambda vm: vm.AuthenticateVm(), vms)
# Avoids printing ssh warnings and prevents too many auth errors.
vm_util.RunThreaded(lambda vm: vm.RemoteCommand(_SSH_CONFIG_CMD), vms)
# Tell mpirun about other nodes.
hpc_util.CreateMachineFile(vms, remote_path=_MACHINE_FILE)
def _GetSample(line):
"""Parse a single output line into a performance sample.
Input format:
real 100.00
Args:
line: A single line from the OpenFOAM timing output.
Returns:
A single performance sample, with times in ms.
"""
runtime_category, runtime_output = line.split()
try:
runtime_seconds = int(float(runtime_output))
except:
raise ValueError(
'Output "%s" does not match expected format "real 100.00".' % line)
logging.info('Runtime of %s seconds from [%s, %s]', runtime_seconds,
runtime_category, runtime_output)
runtime_category = 'time_' + runtime_category
return sample.Sample(runtime_category, runtime_seconds, 'seconds')
def _GetSamples(output):
"""Parse the output and return performance samples.
Output is in the format (example numbers):
real 100.00
user 60.55
sys 99.31
Args:
output: The output from running the OpenFOAM benchmark.
Returns:
A list of performance samples.
"""
return [_GetSample(line) for line in output.strip().splitlines()]
def _GetOpenfoamVersion(vm):
"""Get the installed OpenFOAM version from the vm."""
return vm.RemoteCommand('echo $WM_PROJECT_VERSION')[0].rstrip()
def _GetWorkingDirPath():
"""Get the base directory name of the case being run."""
case_dir_name = posixpath.basename(_CASE_PATHS[FLAGS.openfoam_case])
return posixpath.join(_BENCHMARK_ROOT, case_dir_name)
def _GetPath(openfoam_file):
"""Get the absolute path to the file in the working directory."""
return posixpath.join(_GetWorkingDirPath(), openfoam_file)
def _SetDictEntry(vm, key, value, dict_file_name):
"""Sets an entry in an OpenFOAM dictionary file.
Args:
vm: The VM to set the entry on.
key: String; name of the key to set (like hierarchicalCoeffs.n).
value: String; the value to set.
dict_file_name: String; name of the file to set the specified entry. This
file should be in the working directory. Example: system/snappyHexMeshDict
"""
vm.RemoteCommand('foamDictionary -entry {key} -set "{value}" {file}'.format(
key=key, value=value, file=_GetPath(dict_file_name)))
def _UseMpi(vm, num_processes, mapping):
"""Configure OpenFOAM to use MPI if running with more than 1 VM.
This function looks for the word "runParallel" in the run script and replaces
it with an mpirun command.
Args:
vm: The worker VM to use MPI on.
num_processes: An integer representing the total number of processes for the
MPI job.
mapping: A string for the mpirun --map-by flag.
"""
run_script = _GetPath(_RUN_SCRIPT)
vm_util.ReplaceText(
vm, 'runParallel', 'mpirun '
'-hostfile {machinefile} '
'-mca btl ^openib '
'--map-by {mapping} '
'-np {num_processes}'.format(
machinefile=_MACHINE_FILE,
mapping=mapping,
num_processes=num_processes), run_script, '|')
vm_util.ReplaceText(vm, '^mpirun.*', '& -parallel', run_script)
def _GetBaseCommand(command):
"""Returns a base OpenFOAM command.
Example:
command "mpirun -hostfile /home/perfkit/OpenFOAM/run/MACHINEFILE -mca btl
^openib --map-by core:SPAN -np 16 potentialFoam -parallel"
returns "potentialFoam"
Args:
command: String, the command to parse.
Returns:
The base OpenFOAM command from _RUN_SCRIPT_VALID_COMMANDS.
"""
for base_command in _RUN_SCRIPT_VALID_COMMANDS:
if base_command in command:
return base_command
raise ValueError('Unrecognized command in "%s", please add it to '
'_RUN_SCRIPT_VALID_COMMANDS' % command)
def _RunCommand(vm, command):
"""Runs a valid OpenFOAM command, returning samples."""
_, output = vm.RemoteCommand('cd %s && time -p %s' %
(_GetWorkingDirPath(), command))
results = _GetSamples(output)
for result in results:
result.metadata['full_command'] = command
result.metadata['command'] = _GetBaseCommand(command)
return results
def _IsValidCommand(command):
if not command:
return False
for prefix in _RUN_SCRIPT_EXCLUDED_PREFIXES:
if command.startswith(prefix):
return False
return True
def _ParseRunCommands(vm, remote_run_file):
"""Parses OpenFOAM run commands from a case's Allrun file."""
local_destination = vm_util.PrependTempDir(_RUN_SCRIPT)
vm.PullFile(local_destination, remote_run_file)
commands = []
for command in open(local_destination):
command = command.strip('\n')
if _IsValidCommand(command):
commands.append(command)
logging.info('Parsed run commands from %s:\n%s', remote_run_file, commands)
return commands
def _GenerateFullRuntimeSamples(samples):
"""Append the full runtime results to samples."""
assert samples, '%s should not be an empty list' % samples
counts = collections.Counter()
for s in samples:
counts[s.metric] += s.value
for metric in ('time_real', 'time_user', 'time_sys'):
samples.append(sample.Sample(metric, counts[metric], 'seconds'))
def _RunCase(master_vm, dimensions):
"""Runs the case with the given dimensions.
This function automatically looks for the "Allrun" script in the working
directory.
Args:
master_vm: The vm to run the case commands on. If using the default NFS
server, it doesn't actually matter which vm this is.
dimensions: A string of the dimensions to run with. Like "100 24 24".
Returns:
A list of performance samples for the given dimensions.
"""
dims_entry = ('( hex ( 0 1 2 3 4 5 6 7 ) ( {dimensions} ) '
'simpleGrading ( 1 1 1 ) )').format(
dimensions=_ParseDimensions(dimensions))
_SetDictEntry(master_vm, 'blocks', dims_entry, _BLOCK_MESH_DICT)
master_vm.RemoteCommand('cd %s && ./Allclean' % _GetWorkingDirPath())
results = []
run_script_path = _GetPath(_RUN_SCRIPT)
for command in _ParseRunCommands(master_vm, run_script_path):
command_results = _RunCommand(master_vm, command)
results.extend(command_results)
_GenerateFullRuntimeSamples(results)
# Update every run with run-specific metadata.
for result in results:
result.metadata['dimensions'] = dimensions
return results
def Run(benchmark_spec):
"""Runs the benchmark and returns a dict of performance data.
It must be possible to run the benchmark multiple times after the Prepare
stage. This method runs a single case with multiple dimensions.
Args:
benchmark_spec: The benchmark spec for the OpenFOAM benchmark.
Returns:
A list of performance samples.
"""
vms = benchmark_spec.vms
master_vm = vms[0]
num_vms = len(vms)
# Run configuration metadata:
num_cpus_available = num_vms * master_vm.NumCpusForBenchmark()
if FLAGS.openfoam_num_threads_per_vm is None:
num_cpus_to_use = num_cpus_available // 2
else:
num_cpus_to_use = num_vms * FLAGS.openfoam_num_threads_per_vm
case_name = FLAGS.openfoam_case
mpi_mapping = FLAGS.openfoam_mpi_mapping
decomp_method = FLAGS.openfoam_decomp_method
max_global_cells = FLAGS.openfoam_max_global_cells
openfoam_version = _GetOpenfoamVersion(master_vm)
openmpi_version = openmpi.GetMpiVersion(master_vm)
common_metadata = {
'case_name': case_name,
'decomp_method': decomp_method,
'max_global_cells': max_global_cells,
'mpi_mapping': mpi_mapping,
'openfoam_version': openfoam_version,
'openmpi_version': openmpi_version,
'total_cpus_available': num_cpus_available,
'total_cpus_used': num_cpus_to_use,
}
logging.info('Running %s case on %s/%s cores on %s vms', case_name,
num_cpus_to_use, num_cpus_available, num_vms)
logging.info('Common metadata: %s', common_metadata)
# Copy the run directory.
master_vm.RemoteCommand('cp -r {case_path} {destination}'.format(
case_path=posixpath.join(openfoam.OPENFOAM_ROOT, _CASE_PATHS[case_name]),
destination=_BENCHMARK_ROOT))
# Configure common parameters.
_SetDictEntry(master_vm, 'method', decomp_method, _DECOMPOSE_DICT)
_SetDictEntry(master_vm, 'numberOfSubdomains', num_cpus_to_use,
_DECOMPOSE_DICT)
_SetDictEntry(master_vm, 'hierarchicalCoeffs.n',
'({} 1 1)'.format(num_cpus_to_use), _DECOMPOSE_DICT)
_SetDictEntry(master_vm, 'castellatedMeshControls.maxGlobalCells',
max_global_cells, _SNAPPY_HEX_MESH_DICT)
_UseMpi(master_vm, num_cpus_to_use, mpi_mapping)
# Run and gather samples.
samples = []
for dimensions in FLAGS.openfoam_dimensions:
results = _RunCase(master_vm, dimensions)
# Update every case run with common metadata.
for result in results:
result.metadata.update(common_metadata)
samples.extend(results)
return samples
def Cleanup(benchmark_spec):
"""Cleans up after the benchmark completes.
The state of the VMs should be equivalent to the state before Prepare was
called.
Args:
benchmark_spec: The benchmark spec for the OpenFOAM benchmark.
"""
del benchmark_spec
|
import aiohttp
from homeassistant.components.elgato.const import DOMAIN
from homeassistant.config_entries import ENTRY_STATE_SETUP_RETRY
from homeassistant.core import HomeAssistant
from tests.components.elgato import init_integration
from tests.test_util.aiohttp import AiohttpClientMocker
async def test_config_entry_not_ready(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the Elgato Key Light configuration entry not ready."""
aioclient_mock.get(
"http://1.2.3.4:9123/elgato/accessory-info", exc=aiohttp.ClientError
)
entry = await init_integration(hass, aioclient_mock)
assert entry.state == ENTRY_STATE_SETUP_RETRY
async def test_unload_config_entry(
hass: HomeAssistant, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the Elgato Key Light configuration entry unloading."""
entry = await init_integration(hass, aioclient_mock)
assert hass.data[DOMAIN]
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
|
import unittest
from sklearn import datasets
from rgf.sklearn import RGFClassifier
class TestRGF(unittest.TestCase):
def test_classifier(self):
iris = datasets.load_iris()
X, y = iris.data, iris.target
rgf = RGFClassifier(max_leaf=400,
algorithm="RGF_Sib",
test_interval=100,
n_iter=1)
rgf.fit(X,y)
|
from __future__ import division
import numbers
import numpy as np
import six
from chainer.backends import cuda
from chainer import function
from chainer.utils import type_check
from chainercv.functions.ps_roi_average_pooling_2d import _outsize
class PSROIMaxPooling2D(function.Function):
def __init__(self, outsize, spatial_scale, group_size):
out_c, out_h, out_w = _outsize(outsize)
if out_c is not None and \
not (isinstance(out_c, numbers.Integral) and out_c > 0):
raise TypeError(
'outsize[0] must be positive integer: {}, {}'
.format(type(out_c), out_c))
if not (isinstance(out_h, numbers.Integral) and out_h > 0):
raise TypeError(
'outsize[1] must be positive integer: {}, {}'
.format(type(out_h), out_h))
if not (isinstance(out_w, numbers.Integral) and out_w > 0):
raise TypeError(
'outsize[2] must be positive integer: {}, {}'
.format(type(out_w), out_w))
if isinstance(spatial_scale, numbers.Integral):
spatial_scale = float(spatial_scale)
if not (isinstance(spatial_scale, numbers.Real)
and spatial_scale > 0):
raise TypeError(
'spatial_scale must be a positive float number: {}, {}'
.format(type(spatial_scale), spatial_scale))
if not (isinstance(group_size, numbers.Integral) and group_size > 0):
raise TypeError(
'group_size must be positive integer: {}, {}'
.format(type(group_size), group_size))
self.out_c, self.out_h, self.out_w = out_c, out_h, out_w
self.spatial_scale = spatial_scale
self.group_size = group_size
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 3)
x_type, roi_type, roi_index_type = in_types
type_check.expect(
x_type.dtype == np.float32,
x_type.ndim == 4,
roi_type.dtype == np.float32,
roi_type.ndim == 2,
roi_type.shape[1] == 4,
roi_index_type.dtype == np.int32,
roi_index_type.ndim == 1,
roi_type.shape[0] == roi_index_type.shape[0]
)
def forward_cpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = np.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = np.empty(top_data.shape, dtype=np.int32)
spatial_scale = self.spatial_scale
pooled_height = self.out_h
pooled_width = self.out_w
group_size = self.group_size
for i in six.moves.range(top_data.size):
n, ctop, ph, pw = np.unravel_index(i, top_data.shape)
roi_batch_ind = bottom_roi_indices[n]
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
hstart = int(np.floor(ph * bin_size_h + roi_start_h))
wstart = int(np.floor(pw * bin_size_w + roi_start_w))
hend = int(np.ceil((ph + 1) * bin_size_h + roi_start_h))
wend = int(np.ceil((pw + 1) * bin_size_w + roi_start_w))
hstart = min(max(hstart, 0), height)
wstart = min(max(wstart, 0), width)
hend = min(max(hend, 0), height)
wend = min(max(wend, 0), width)
gh = int(np.floor(ph * group_size / pooled_height))
gw = int(np.floor(pw * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
maxval = - np.inf
maxidx = -1
for y in six.moves.range(hstart, hend):
for x in six.moves.range(wstart, wend):
tmpval = bottom_data[roi_batch_ind, c, y, x]
bottom_index = y * width + x
if (tmpval > maxval):
maxval = tmpval
maxidx = bottom_index
top_data[n, ctop, ph, pw] = maxval
self.argmax_data[n, ctop, ph, pw] = maxidx
return top_data,
def forward_gpu(self, inputs):
self.retain_inputs((1, 2))
self._bottom_data_shape = inputs[0].shape
bottom_data, bottom_rois, bottom_roi_indices = inputs
channel, height, width = bottom_data.shape[1:]
if self.out_c is None:
if channel % (self.group_size * self.group_size) != 0:
raise ValueError(
'input channel must be divided by group_size * group_size:'
'{} % {} != 0'
.format(channel, self.group_size * self.group_size))
out_c = channel // (self.group_size * self.group_size)
else:
if channel != self.out_c * self.group_size * self.group_size:
raise ValueError(
'input channel must be equal to '
'outsize[0] * group_size * group_size: {} != {}'
.format(channel,
self.out_c * self.group_size * self.group_size))
out_c = self.out_c
n_roi = bottom_rois.shape[0]
top_data = cuda.cupy.empty(
(n_roi, out_c, self.out_h, self.out_w), dtype=np.float32)
self.argmax_data = cuda.cupy.empty(top_data.shape, np.int32)
cuda.elementwise(
'''
raw T bottom_data, raw T bottom_rois,
raw int32 bottom_roi_indices,
T spatial_scale, int32 channels,
int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size
''',
'T top_data, int32 argmax_data',
'''
// pos in output filter
int ph = (i / pooled_width) % pooled_height;
int pw = i % pooled_width;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw) * bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
wstart = min(max(wstart, 0), width);
hend = min(max(hend, 0), height);
wend = min(max(wend, 0), width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int data_offset = (roi_batch_ind * channels + c) * height * width;
T maxval = - (T) (1.0 / 0.0);
int maxidx = -1;
for (int h = hstart; h < hend; ++h){
for (int w = wstart; w < wend; ++w){
int bottom_index = h * width + w;
float tmpval = bottom_data[data_offset + bottom_index];
if (tmpval > maxval) {
maxval = tmpval;
maxidx = bottom_index;
}
}
}
top_data = maxval;
argmax_data = maxidx;
''', 'ps_roi_max_pooling_2d_fwd'
)(bottom_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channel, height, width,
out_c, self.out_h, self.out_w, self.group_size,
top_data, self.argmax_data)
return top_data,
def backward_cpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
top_diff = gy[0]
height, width = self._bottom_data_shape[2:]
bottom_diff = np.zeros(self._bottom_data_shape, np.float32)
spatial_scale = self.spatial_scale
pooled_height = self.out_h
pooled_width = self.out_w
group_size = self.group_size
for i in six.moves.range(top_diff.size):
n, ctop, ph, pw = np.unravel_index(i, top_diff.shape)
roi_batch_ind = int(bottom_roi_indices[n])
roi_start_h = bottom_rois[n, 0] * spatial_scale
roi_start_w = bottom_rois[n, 1] * spatial_scale
roi_end_h = bottom_rois[n, 2] * spatial_scale
roi_end_w = bottom_rois[n, 3] * spatial_scale
roi_height = max(roi_end_h - roi_start_h, 0.1)
roi_width = max(roi_end_w - roi_start_w, 0.1)
bin_size_h = roi_height / pooled_height
bin_size_w = roi_width / pooled_width
hstart = int(np.floor(ph * bin_size_h + roi_start_h))
wstart = int(np.floor(pw * bin_size_w + roi_start_w))
hend = int(np.ceil((ph + 1) * bin_size_h + roi_start_h))
wend = int(np.ceil((pw + 1) * bin_size_w + roi_start_w))
hstart = min(max(hstart, 0), height)
wstart = min(max(wstart, 0), width)
hend = min(max(hend, 0), height)
wend = min(max(wend, 0), width)
gh = int(np.floor(ph * group_size / pooled_height))
gw = int(np.floor(pw * group_size / pooled_width))
gh = min(max(gh, 0), group_size - 1)
gw = min(max(gw, 0), group_size - 1)
c = (ctop * group_size + gh) * group_size + gw
maxidx = self.argmax_data[n, ctop, ph, pw]
if maxidx != -1:
y = int(maxidx / width)
x = maxidx % width
bottom_diff[roi_batch_ind, c, y, x] \
+= top_diff[n, ctop, ph, pw]
return bottom_diff, None, None
def backward_gpu(self, inputs, gy):
_, bottom_rois, bottom_roi_indices = inputs
channels, height, width = self._bottom_data_shape[1:]
out_c, out_h, out_w = gy[0].shape[1:]
bottom_diff = cuda.cupy.zeros(self._bottom_data_shape, np.float32)
cuda.elementwise(
'''
raw T top_diff, raw int32 argmax_data,
raw T bottom_rois, raw int32 bottom_roi_indices,
T spatial_scale, int32 channels, int32 height, int32 width,
int32 pooled_dim, int32 pooled_height, int32 pooled_width,
int32 group_size
''',
'raw T bottom_diff',
'''
int ph = (i / pooled_width) % pooled_height;
int pw = i % pooled_width;
int ctop = (i / pooled_width / pooled_height) % pooled_dim;
int n = i / pooled_width / pooled_height / pooled_dim;
// [start, end) interval for spatial sampling
int roi_batch_ind = bottom_roi_indices[n];
T roi_start_h = bottom_rois[n * 4 + 0] * spatial_scale;
T roi_start_w = bottom_rois[n * 4 + 1] * spatial_scale;
T roi_end_h = bottom_rois[n * 4 + 2] * spatial_scale;
T roi_end_w = bottom_rois[n * 4 + 3] * spatial_scale;
// Force too small ROIs to be 1x1
T roi_height = max(roi_end_h - roi_start_h, 0.1);
T roi_width = max(roi_end_w - roi_start_w, 0.1); // avoid 0
// Compute w and h at bottom
T bin_size_h = roi_height / static_cast<T>(pooled_height);
T bin_size_w = roi_width / static_cast<T>(pooled_width);
int hstart = floor(
static_cast<T>(ph) * bin_size_h + roi_start_h);
int wstart = floor(
static_cast<T>(pw) * bin_size_w + roi_start_w);
int hend = ceil(
static_cast<T>(ph + 1.0) * bin_size_h + roi_start_h);
int wend = ceil(
static_cast<T>(pw + 1.0) * bin_size_w + roi_start_w);
// Add roi offsets and clip to input boundaries
hstart = min(max(hstart, 0), height);
wstart = min(max(wstart, 0), width);
hend = min(max(hend, 0), height);
wend = min(max(wend, 0), width);
// Compute c at bottom
int gh = floor(
static_cast<T>(ph) * group_size / pooled_height);
int gw = floor(
static_cast<T>(pw) * group_size / pooled_width);
gh = min(max(gh, 0), group_size - 1);
gw = min(max(gw, 0), group_size - 1);
int c = (ctop * group_size + gh) * group_size + gw;
int bottom_diff_offset = (roi_batch_ind * channels + c);
bottom_diff_offset = bottom_diff_offset * height * width;
int top_diff_offset =
(n * pooled_dim + ctop) * pooled_height * pooled_width;
int maxidx =
argmax_data[top_diff_offset + ph * pooled_width + pw];
if (maxidx != -1) {
atomicAdd(
&bottom_diff[bottom_diff_offset + maxidx],
top_diff[top_diff_offset + ph * pooled_width + pw]);
}
''', 'ps_roi_max_pooling_2d_bwd'
)(gy[0], self.argmax_data, bottom_rois, bottom_roi_indices,
self.spatial_scale, channels, height, width,
out_c, out_h, out_w, self.group_size, bottom_diff,
size=gy[0].size)
return bottom_diff, None, None
def ps_roi_max_pooling_2d(
x, rois, roi_indices, outsize,
spatial_scale, group_size
):
"""Position Sensitive Region of Interest (ROI) Max pooling function.
This function computes position sensitive max of input spatial patch
with the given region of interests. Each ROI is splitted into
:math:`(group\_size, group\_size)` regions, and position sensitive values
in each region is computed.
Args:
x (~chainer.Variable): Input variable. The shape is expected to be
4 dimentional: (n: batch, c: channel, h, height, w: width).
rois (array): Input roi. The shape is expected to
be :math:`(R, 4)`, and each datum is set as below:
(y_min, x_min, y_max, x_max). The dtype is :obj:`numpy.float32`.
roi_indices (array): Input roi indices. The shape is expected to
be :math:`(R, )`. The dtype is :obj:`numpy.int32`.
outsize ((int, int, int) or (int, int) or int): Expected output size
after pooled: (channel, height, width) or (height, width)
or outsize. ``outsize=o`` and ``outsize=(o, o)`` are equivalent.
Channel parameter is used to assert the input shape.
spatial_scale (float): Scale of the roi is resized.
group_size (int): Position sensitive group size.
Returns:
~chainer.Variable: Output variable.
See the original paper proposing PSROIPooling:
`R-FCN <https://arxiv.org/abs/1605.06409>`_.
"""
return PSROIMaxPooling2D(outsize, spatial_scale,
group_size)(x, rois, roi_indices)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import mock
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import s3
class S3Test(unittest.TestCase):
def setUp(self):
super(S3Test, self).setUp()
flag_values = {
'timeout_minutes': 0,
'persistent_timeout_minutes': 0}
p = mock.patch.object(s3, 'FLAGS')
flags_mock = p.start()
flags_mock.configure_mock(**flag_values)
self.mock_command = mock.patch.object(vm_util, 'IssueCommand').start()
self.mock_retryable_command = mock.patch.object(
vm_util, 'IssueRetryableCommand').start()
self.s3_service = s3.S3Service()
self.s3_service.PrepareService(None) # will use s3.DEFAULT_AWS_REGION
def tearDown(self):
super(S3Test, self).tearDown()
mock.patch.stopall()
def test_make_bucket(self):
self.mock_command.return_value = (None, None, None)
self.s3_service.MakeBucket(bucket_name='test_bucket')
self.mock_command.assert_called_once_with([
'aws', 's3', 'mb', 's3://test_bucket',
'--region={}'.format(s3.DEFAULT_AWS_REGION)], raise_on_failure=False)
self.mock_retryable_command.assert_called_once_with([
'aws', 's3api', 'put-bucket-tagging', '--bucket', 'test_bucket',
'--tagging', 'TagSet=[]', '--region={}'.format(s3.DEFAULT_AWS_REGION)])
if __name__ == '__main__':
unittest.main()
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.setup import async_setup_component
from tests.async_mock import ANY
from tests.common import async_fire_mqtt_message, async_mock_service, mock_component
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass, mqtt_mock):
"""Initialize components."""
mock_component(hass, "group")
async def test_if_fires_on_topic_match(hass, calls):
"""Test if message is fired on topic match."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "mqtt", "topic": "test-topic"},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.platform }} - {{ trigger.topic }}"
" - {{ trigger.payload }} - "
"{{ trigger.payload_json.hello }}"
},
},
}
},
)
async_fire_mqtt_message(hass, "test-topic", '{ "hello": "world" }')
await hass.async_block_till_done()
assert len(calls) == 1
assert 'mqtt - test-topic - { "hello": "world" } - world' == calls[0].data["some"]
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
async_fire_mqtt_message(hass, "test-topic", "test_payload")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_fires_on_topic_and_payload_match(hass, calls):
"""Test if message is fired on topic and payload match."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "mqtt",
"topic": "test-topic",
"payload": "hello",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_mqtt_message(hass, "test-topic", "hello")
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_on_topic_but_no_payload_match(hass, calls):
"""Test if message is not fired on topic but no payload."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "mqtt",
"topic": "test-topic",
"payload": "hello",
},
"action": {"service": "test.automation"},
}
},
)
async_fire_mqtt_message(hass, "test-topic", "no-hello")
await hass.async_block_till_done()
assert len(calls) == 0
async def test_encoding_default(hass, calls, mqtt_mock):
"""Test default encoding."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "mqtt", "topic": "test-topic"},
"action": {"service": "test.automation"},
}
},
)
mqtt_mock.async_subscribe.assert_called_once_with("test-topic", ANY, 0, "utf-8")
async def test_encoding_custom(hass, calls, mqtt_mock):
"""Test default encoding."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "mqtt", "topic": "test-topic", "encoding": ""},
"action": {"service": "test.automation"},
}
},
)
mqtt_mock.async_subscribe.assert_called_once_with("test-topic", ANY, 0, None)
|
import argparse
import datetime
import sys
import time
import a_sync
from paasta_tools.mesos_tools import get_mesos_master
from paasta_tools.utils import load_system_paasta_config
OUTPUT_FORMAT = "{:<30} {:<8} {:<20} {:<27} {}"
FRAMEWORK_NAME = "marathon"
MAX_BOUNCE_TIME_IN_HOURS = 4
def parse_args():
parser = argparse.ArgumentParser(
description="Find all containers serving previous push versions."
)
parser.add_argument(
"--bounce-time",
dest="bounce_time",
type=int,
default=MAX_BOUNCE_TIME_IN_HOURS,
help=(
"Ignore versions that were launched in the last BOUNCE_TIME hours "
"because they probably are still bouncing."
),
)
return parser.parse_args()
def get_mesos_state():
state = a_sync.block(get_mesos_master(use_mesos_cache=True).state)
return state
def marathon_tasks(state):
for framework in state.get("frameworks", []):
if framework["name"].lower().startswith(FRAMEWORK_NAME):
for task in framework.get("tasks", []):
yield task
def create_slave_id_to_hostname_dict(state):
res = {}
for slave in state["slaves"]:
res[slave["id"]] = slave["hostname"]
return res
def group_running_tasks_by_id_and_gitsha(state):
res = {}
for t in marathon_tasks(state):
if t["state"] == "TASK_RUNNING":
task_id = t["name"][: t["name"].find(".", t["name"].find(".") + 1)]
gitsha = t["name"][len(task_id) + 1 : t["name"].find(".", len(task_id) + 1)]
res.setdefault(task_id, {}).setdefault(gitsha, []).append(t)
return res
def detect_outdated_gitshas(versions, max_bounce_time_in_hours):
"""Find versions that should have drained more than 'max_bounce_time_in_hours' ago"""
if len(versions) < 2:
return []
deploy_time = {}
latest_deploy = 0
for version, tasks in versions.items():
deploy_time[version] = sum(t["statuses"][0]["timestamp"] for t in tasks) / len(
tasks
)
if (
deploy_time[version] > latest_deploy
and time.time() - deploy_time[version] > max_bounce_time_in_hours * 3600
):
latest_deploy = deploy_time[version]
return [version for version, dtime in deploy_time.items() if dtime < latest_deploy]
def report_outdated_instances(task_id, gitsha, tasks, slave_id2hostname):
output = []
remedy = []
for t in tasks:
deploy_time = datetime.datetime.fromtimestamp(
int(t["statuses"][0]["timestamp"])
).strftime("%Y-%m-%d %H:%M:%S")
container_name = "mesos-{}.{}".format(
t["slave_id"], t["statuses"][0]["container_status"]["container_id"]["value"]
)
hostname = slave_id2hostname[t["slave_id"]]
hostname = hostname[: hostname.find(".")]
service_instance = task_id.replace("--", "_")
output.append(
OUTPUT_FORMAT.format(
service_instance[:30], gitsha[3:], deploy_time, hostname, container_name
)
)
remedy.append(
'ssh {0} "sudo hadown {1}; sleep 10; sudo docker stop {2}; sudo haup {1}"'.format(
hostname, service_instance, container_name
)
)
return output, remedy
def check_mesos_tasks(max_bounce_time_in_hours=MAX_BOUNCE_TIME_IN_HOURS):
output = []
remedy = []
state = get_mesos_state()
aggregated_tasks = group_running_tasks_by_id_and_gitsha(state)
slave_id2hostname = create_slave_id_to_hostname_dict(state)
for task_id, versions in aggregated_tasks.items():
for gitsha in detect_outdated_gitshas(versions, max_bounce_time_in_hours):
temp_output, temp_remedy = report_outdated_instances(
task_id, gitsha, versions[gitsha], slave_id2hostname
)
output.extend(temp_output)
remedy.extend(temp_remedy)
return output, remedy
def main():
args = parse_args()
cluster = load_system_paasta_config().get_cluster()
output, remedy = check_mesos_tasks(args.bounce_time)
if output:
print(
"CRITICAL - There are {} tasks running in {} that are more than {}h older than their"
" last bounce.".format(len(output), cluster, args.bounce_time)
)
print(
OUTPUT_FORMAT.format(
"SERVICE.INSTANCE", "COMMIT", "CREATED", "HOSTNAME", "CONTAINER"
)
)
print("\n".join(output))
print("")
print("Run the following commands to terminate them:")
print("{code}")
print("\n".join(remedy))
print("{code}")
return 1
else:
print(f"OK - There are no outdated tasks in {cluster}")
return 0
if __name__ == "__main__":
sys.exit(main())
|
import sys
import os
import tempfile
import time
import shutil
from stash.tests.stashtest import StashTestCase, ON_TRAVIS
class GetstashTests(StashTestCase):
"""
Test for the installation using getstash.py
"""
def get_source_path(self):
"""
Return the path to the StaSh root directory
:return: path of $STASH_ROOT
:rtype: str
"""
cp = sys.modules[self.stash.__module__].__file__
rp = os.path.dirname(cp)
return rp
def get_getstash_path(self):
"""
Return the path of getstash.py
:return: the path of getstash.py
:rtype: str
"""
rp = self.get_source_path()
gsp = os.path.join(rp, "getstash.py")
return gsp
def load_getstash(self):
"""
Load and compile getstash.py
:return: the compiled code
:rtype: Code
"""
p = self.get_getstash_path()
with open(p, "r") as fin:
content = fin.read()
code = compile(content, p, "exec", dont_inherit=True)
return code
def run_getstash(self, repo=None, branch=None, install_path=None, launcher_path=None, zippath=None, dist=None, dryrun=False, asuser=False):
"""
Run getstash with the specified arguments.
Not all arguments may be available for all installation types.
:param repo: repo to pass to getstash.py
:type repo: str
:param branch: branch to pass to getstash.py
:type branch: str
:param install_path: path to install into
:type install_path: str
:param launcher_path: path to install launcher to
:type launcher_path: str
:param zippath: alternative path to zipfile to install from
:type zippath: str
:param dist: install type to force
:type dist: str
:param dryrun: if True, tell the installer to not actually do anything
:type dryrun: bool
:param asuser: if True, install for user
:type asuser: bool
"""
# build namespace to run installer in
ns = {
"__name__": "__main__",
}
# we add the keys only when they are specified so getstash assumes default values.
if repo is not None:
ns["_owner"] = repo
if branch is not None:
ns["_branch"] = branch
if install_path is not None:
ns["_target"] = install_path
if launcher_path is not None:
ns["_launcher_path"] = launcher_path
if zippath is not None:
ns["_zippath"] = zippath
if dist is not None:
ns["_force_dist"] = dist
if dryrun:
ns["_dryrun"] = True
if asuser:
ns["_asuser"] = True
code = self.load_getstash()
exec(code, ns, ns)
def get_new_tempdir(self, create=True):
"""
Create a temporary directory and return the path to it.
:param create: if True, create the directory
:type create: bool
:return: path to a temporary directory
:rtype: str
"""
tp = tempfile.gettempdir()
p = os.path.join(tp, "stash_test_getstash" + str(time.time()))
if not os.path.exists(p) and create:
os.makedirs(p)
return p
def create_stash_zipfile(self):
"""
Create a github-like zipfile from this source and return the path.
:return: path to zipfile
:rtype: str
"""
tp = self.get_new_tempdir(create=True)
toplevel_name = "stash-testing"
toplevel = os.path.join(tp, toplevel_name)
zipname = "{}.zip".format(toplevel)
zippath = os.path.join(tp, zipname)
zippath_wo_ext = os.path.splitext(zippath)[0]
sourcepath = self.get_source_path()
shutil.copytree(sourcepath, toplevel)
shutil.make_archive(zippath_wo_ext, "zip", tp, toplevel_name)
return zippath
def test_getstash_exists(self):
"""
Check that getstash.py exists in the right repository
You should NOT modify this test. 'getstash.py' **must** be in the root directory for selfupdate to work.
"""
p = self.get_getstash_path()
self.assertTrue(os.path.exists(p), "getstash.py not in StaSh root directory!")
def test_getstash_compiles(self):
"""
Test that getstash.py successfully compiles.
"""
self.load_getstash()
def test_install_pythonista(self):
"""
Run a dummy install for pythonista.
"""
zp = self.create_stash_zipfile()
td = self.get_new_tempdir()
sd = os.path.join(td, "stash")
lp = os.path.join(td, "launch_stash.py")
self.run_getstash(install_path=sd, launcher_path=lp, zippath=zp, dist="pythonista")
expected = [
"bin",
"system",
"man",
"lib",
]
self.assertTrue(os.path.exists(sd), "StaSh base directory not found after install!")
self.assertTrue(os.path.exists(lp), "'launch_stash.py' not found after install!")
for fn in expected:
p = os.path.join(sd, fn)
self.assertTrue(os.path.exists(sd), "'{}' not found after install!".format(p))
def test_install_setup(self):
"""
Run a dummy install using setup.py install
"""
zp = self.create_stash_zipfile()
td = self.get_new_tempdir()
sd = os.path.join(td, "stash")
asuser = (not ON_TRAVIS)
self.run_getstash(zippath=zp, dist="setup", asuser=asuser, dryrun=True)
|
import json
from lemur import database
from lemur.common.utils import truthiness
from lemur.extensions import metrics
from lemur.authorities.models import Authority
from lemur.certificates.models import Certificate
from lemur.roles import service as role_service
from lemur.certificates.service import upload
def update(authority_id, description, owner, active, roles):
"""
Update an authority with new values.
:param authority_id:
:param roles: roles that are allowed to use this authority
:return:
"""
authority = get(authority_id)
authority.roles = roles
authority.active = active
authority.description = description
authority.owner = owner
return database.update(authority)
def update_options(authority_id, options):
"""
Update an authority with new options.
:param authority_id:
:param options: the new options to be saved into the authority
:return:
"""
authority = get(authority_id)
authority.options = options
return database.update(authority)
def mint(**kwargs):
"""
Creates the authority based on the plugin provided.
"""
issuer = kwargs["plugin"]["plugin_object"]
values = issuer.create_authority(kwargs)
# support older plugins
if len(values) == 3:
body, chain, roles = values
private_key = None
elif len(values) == 4:
body, private_key, chain, roles = values
roles = create_authority_roles(
roles,
kwargs["owner"],
kwargs["plugin"]["plugin_object"].title,
kwargs["creator"],
)
return body, private_key, chain, roles
def create_authority_roles(roles, owner, plugin_title, creator):
"""
Creates all of the necessary authority roles.
:param creator:
:param roles:
:return:
"""
role_objs = []
for r in roles:
role = role_service.get_by_name(r["name"])
if not role:
role = role_service.create(
r["name"],
password=r["password"],
description="Auto generated role for {0}".format(plugin_title),
username=r["username"],
)
# the user creating the authority should be able to administer it
if role.username == "admin":
creator.roles.append(role)
role_objs.append(role)
# create an role for the owner and assign it
owner_role = role_service.get_by_name(owner)
if not owner_role:
owner_role = role_service.create(
owner, description="Auto generated role based on owner: {0}".format(owner)
)
role_objs.append(owner_role)
return role_objs
def create(**kwargs):
"""
Creates a new authority.
"""
body, private_key, chain, roles = mint(**kwargs)
kwargs["creator"].roles = list(set(list(kwargs["creator"].roles) + roles))
kwargs["body"] = body
kwargs["private_key"] = private_key
kwargs["chain"] = chain
if kwargs.get("roles"):
kwargs["roles"] += roles
else:
kwargs["roles"] = roles
cert = upload(**kwargs)
kwargs["authority_certificate"] = cert
if kwargs.get("plugin", {}).get("plugin_options", []):
kwargs["options"] = json.dumps(kwargs["plugin"]["plugin_options"])
authority = Authority(**kwargs)
authority = database.create(authority)
kwargs["creator"].authorities.append(authority)
metrics.send(
"authority_created", "counter", 1, metric_tags=dict(owner=authority.owner)
)
return authority
def get_all():
"""
Get all authorities that are currently in Lemur.
:rtype : List
:return:
"""
query = database.session_query(Authority)
return database.find_all(query, Authority, {}).all()
def get(authority_id):
"""
Retrieves an authority given it's ID
:param authority_id:
:return:
"""
return database.get(Authority, authority_id)
def get_by_name(authority_name):
"""
Retrieves an authority given it's name.
:param authority_name:
:return:
"""
return database.get(Authority, authority_name, field="name")
def get_authority_role(ca_name, creator=None):
"""
Attempts to get the authority role for a given ca uses current_user
as a basis for accomplishing that.
:param ca_name:
"""
if creator:
if creator.is_admin:
return role_service.get_by_name("{0}_admin".format(ca_name))
return role_service.get_by_name("{0}_operator".format(ca_name))
def render(args):
"""
Helper that helps us render the REST Api responses.
:param args:
:return:
"""
query = database.session_query(Authority)
filt = args.pop("filter")
if filt:
terms = filt.split(";")
if "active" in filt:
query = query.filter(Authority.active == truthiness(terms[1]))
elif "cn" in filt:
term = "%{0}%".format(terms[1])
sub_query = (
database.session_query(Certificate.root_authority_id)
.filter(Certificate.cn.ilike(term))
.subquery()
)
query = query.filter(Authority.id.in_(sub_query))
else:
query = database.filter(query, Authority, terms)
# we make sure that a user can only use an authority they either own are a member of - admins can see all
if not args["user"].is_admin:
authority_ids = []
for authority in args["user"].authorities:
authority_ids.append(authority.id)
for role in args["user"].roles:
for authority in role.authorities:
authority_ids.append(authority.id)
query = query.filter(Authority.id.in_(authority_ids))
return database.sort_and_page(query, Authority, args)
|
from __future__ import division, absolute_import
# import compatibility functions and utilities
from .utils import _range
# to inherit from the tqdm class
from .std import tqdm as std_tqdm
from .std import TqdmExperimentalWarning
from warnings import warn
__author__ = {"github.com/": ["casperdcl", "lrq3000"]}
__all__ = ['tqdm_gui', 'tgrange', 'tqdm', 'trange']
class tqdm_gui(std_tqdm): # pragma: no cover
"""
Experimental GUI version of tqdm!
"""
# TODO: @classmethod: write() on GUI?
def __init__(self, *args, **kwargs):
import matplotlib as mpl
import matplotlib.pyplot as plt
from collections import deque
kwargs['gui'] = True
super(tqdm_gui, self).__init__(*args, **kwargs)
# Initialize the GUI display
if self.disable or not kwargs['gui']:
return
warn('GUI is experimental/alpha', TqdmExperimentalWarning, stacklevel=2)
self.mpl = mpl
self.plt = plt
self.sp = None
# Remember if external environment uses toolbars
self.toolbar = self.mpl.rcParams['toolbar']
self.mpl.rcParams['toolbar'] = 'None'
self.mininterval = max(self.mininterval, 0.5)
self.fig, ax = plt.subplots(figsize=(9, 2.2))
# self.fig.subplots_adjust(bottom=0.2)
total = len(self)
if total is not None:
self.xdata = []
self.ydata = []
self.zdata = []
else:
self.xdata = deque([])
self.ydata = deque([])
self.zdata = deque([])
self.line1, = ax.plot(self.xdata, self.ydata, color='b')
self.line2, = ax.plot(self.xdata, self.zdata, color='k')
ax.set_ylim(0, 0.001)
if total is not None:
ax.set_xlim(0, 100)
ax.set_xlabel('percent')
self.fig.legend((self.line1, self.line2), ('cur', 'est'),
loc='center right')
# progressbar
self.hspan = plt.axhspan(0, 0.001,
xmin=0, xmax=0, color='g')
else:
# ax.set_xlim(-60, 0)
ax.set_xlim(0, 60)
ax.invert_xaxis()
ax.set_xlabel('seconds')
ax.legend(('cur', 'est'), loc='lower left')
ax.grid()
# ax.set_xlabel('seconds')
ax.set_ylabel((self.unit if self.unit else 'it') + '/s')
if self.unit_scale:
plt.ticklabel_format(style='sci', axis='y',
scilimits=(0, 0))
ax.yaxis.get_offset_text().set_x(-0.15)
# Remember if external environment is interactive
self.wasion = plt.isinteractive()
plt.ion()
self.ax = ax
def __iter__(self):
# TODO: somehow allow the following:
# if not self.gui:
# return super(tqdm_gui, self).__iter__()
iterable = self.iterable
if self.disable:
for obj in iterable:
yield obj
return
# ncols = self.ncols
mininterval = self.mininterval
maxinterval = self.maxinterval
miniters = self.miniters
dynamic_miniters = self.dynamic_miniters
last_print_t = self.last_print_t
last_print_n = self.last_print_n
n = self.n
# dynamic_ncols = self.dynamic_ncols
smoothing = self.smoothing
avg_time = self.avg_time
time = self._time
for obj in iterable:
yield obj
# Update and possibly print the progressbar.
# Note: does not call self.update(1) for speed optimisation.
n += 1
# check counter first to avoid calls to time()
if n - last_print_n >= self.miniters:
miniters = self.miniters # watch monitoring thread changes
delta_t = time() - last_print_t
if delta_t >= mininterval:
cur_t = time()
delta_it = n - last_print_n
# EMA (not just overall average)
if smoothing and delta_t and delta_it:
rate = delta_t / delta_it
avg_time = self.ema(rate, avg_time, smoothing)
self.avg_time = avg_time
self.n = n
self.display()
# If no `miniters` was specified, adjust automatically
# to the max iteration rate seen so far between 2 prints
if dynamic_miniters:
if maxinterval and delta_t >= maxinterval:
# Adjust miniters to time interval by rule of 3
if mininterval:
# Set miniters to correspond to mininterval
miniters = delta_it * mininterval / delta_t
else:
# Set miniters to correspond to maxinterval
miniters = delta_it * maxinterval / delta_t
elif smoothing:
# EMA-weight miniters to converge
# towards the timeframe of mininterval
rate = delta_it
if mininterval and delta_t:
rate *= mininterval / delta_t
miniters = self.ema(rate, miniters, smoothing)
else:
# Maximum nb of iterations between 2 prints
miniters = max(miniters, delta_it)
# Store old values for next call
self.n = self.last_print_n = last_print_n = n
self.last_print_t = last_print_t = cur_t
self.miniters = miniters
# Closing the progress bar.
# Update some internal variables for close().
self.last_print_n = last_print_n
self.n = n
self.miniters = miniters
self.close()
def update(self, n=1):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
if n < 0:
self.last_print_n += n # for auto-refresh logic to work
self.n += n
# check counter first to reduce calls to time()
if self.n - self.last_print_n >= self.miniters:
delta_t = self._time() - self.last_print_t
if delta_t >= self.mininterval:
cur_t = self._time()
delta_it = self.n - self.last_print_n # >= n
# elapsed = cur_t - self.start_t
# EMA (not just overall average)
if self.smoothing and delta_t and delta_it:
rate = delta_t / delta_it
self.avg_time = self.ema(
rate, self.avg_time, self.smoothing)
self.display()
# If no `miniters` was specified, adjust automatically to the
# maximum iteration rate seen so far between two prints.
# e.g.: After running `tqdm.update(5)`, subsequent
# calls to `tqdm.update()` will only cause an update after
# at least 5 more iterations.
if self.dynamic_miniters:
if self.maxinterval and delta_t >= self.maxinterval:
if self.mininterval:
self.miniters = delta_it * self.mininterval \
/ delta_t
else:
self.miniters = delta_it * self.maxinterval \
/ delta_t
elif self.smoothing:
self.miniters = self.smoothing * delta_it * \
(self.mininterval / delta_t
if self.mininterval and delta_t
else 1) + \
(1 - self.smoothing) * self.miniters
else:
self.miniters = max(self.miniters, delta_it)
# Store old values for next call
self.last_print_n = self.n
self.last_print_t = cur_t
def close(self):
# if not self.gui:
# return super(tqdm_gui, self).close()
if self.disable:
return
self.disable = True
with self.get_lock():
self._instances.remove(self)
# Restore toolbars
self.mpl.rcParams['toolbar'] = self.toolbar
# Return to non-interactive mode
if not self.wasion:
self.plt.ioff()
if not self.leave:
self.plt.close(self.fig)
def display(self):
n = self.n
cur_t = self._time()
elapsed = cur_t - self.start_t
delta_it = n - self.last_print_n
delta_t = cur_t - self.last_print_t
# Inline due to multiple calls
total = self.total
xdata = self.xdata
ydata = self.ydata
zdata = self.zdata
ax = self.ax
line1 = self.line1
line2 = self.line2
# instantaneous rate
y = delta_it / delta_t
# overall rate
z = n / elapsed
# update line data
xdata.append(n * 100.0 / total if total else cur_t)
ydata.append(y)
zdata.append(z)
# Discard old values
# xmin, xmax = ax.get_xlim()
# if (not total) and elapsed > xmin * 1.1:
if (not total) and elapsed > 66:
xdata.popleft()
ydata.popleft()
zdata.popleft()
ymin, ymax = ax.get_ylim()
if y > ymax or z > ymax:
ymax = 1.1 * y
ax.set_ylim(ymin, ymax)
ax.figure.canvas.draw()
if total:
line1.set_data(xdata, ydata)
line2.set_data(xdata, zdata)
try:
poly_lims = self.hspan.get_xy()
except AttributeError:
self.hspan = self.plt.axhspan(
0, 0.001, xmin=0, xmax=0, color='g')
poly_lims = self.hspan.get_xy()
poly_lims[0, 1] = ymin
poly_lims[1, 1] = ymax
poly_lims[2] = [n / total, ymax]
poly_lims[3] = [poly_lims[2, 0], ymin]
if len(poly_lims) > 4:
poly_lims[4, 1] = ymin
self.hspan.set_xy(poly_lims)
else:
t_ago = [cur_t - i for i in xdata]
line1.set_data(t_ago, ydata)
line2.set_data(t_ago, zdata)
ax.set_title(self.format_meter(
n, total, elapsed, 0,
self.desc, self.ascii, self.unit, self.unit_scale,
1 / self.avg_time if self.avg_time else None, self.bar_format,
self.postfix, self.unit_divisor),
fontname="DejaVu Sans Mono", fontsize=11)
self.plt.pause(1e-9)
def tgrange(*args, **kwargs):
"""
A shortcut for `tqdm.gui.tqdm(xrange(*args), **kwargs)`.
On Python3+, `range` is used instead of `xrange`.
"""
return tqdm_gui(_range(*args), **kwargs)
# Aliases
tqdm = tqdm_gui
trange = tgrange
|
import itertools
import platform
import threading
from http.client import HTTPConnection
import cherrypy
from cherrypy._cpcompat import HTTPSConnection
from cherrypy.test import helper
data = object()
class ReferenceTests(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self, *args, **kwargs):
cherrypy.request.thing = data
return 'Hello world!'
cherrypy.tree.mount(Root())
def test_threadlocal_garbage(self):
if platform.system() == 'Darwin':
self.skip('queue issues; see #1474')
success = itertools.count()
def getpage():
host = '%s:%s' % (self.interface(), self.PORT)
if self.scheme == 'https':
c = HTTPSConnection(host)
else:
c = HTTPConnection(host)
try:
c.putrequest('GET', '/')
c.endheaders()
response = c.getresponse()
body = response.read()
self.assertEqual(response.status, 200)
self.assertEqual(body, b'Hello world!')
finally:
c.close()
next(success)
ITERATIONS = 25
ts = [
threading.Thread(target=getpage)
for _ in range(ITERATIONS)
]
for t in ts:
t.start()
for t in ts:
t.join()
self.assertEqual(next(success), ITERATIONS)
|
from __future__ import (absolute_import, division)
from copy import deepcopy
import math
import sys
import numpy as np
from numpy import dot, zeros, eye
from filterpy.stats import logpdf
from filterpy.common import pretty_str, reshape_z
class InformationFilter(object):
"""
Create a linear Information filter. Information filters
compute the
inverse of the Kalman filter, allowing you to easily denote having
no information at initialization.
You are responsible for setting the various state variables to reasonable
values; the defaults below will not give you a functional filter.
Parameters
----------
dim_x : int
Number of state variables for the filter. For example, if you
are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of P, Q, and u
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x,y), dim_z would be 2.
dim_u : int (optional)
size of the control input, if it is being used.
Default value of 0 indicates it is not used.
self.compute_log_likelihood = compute_log_likelihood
self.log_likelihood = math.log(sys.float_info.min)
Attributes
----------
x : numpy.array(dim_x, 1)
State estimate vector
P_inv : numpy.array(dim_x, dim_x)
inverse state covariance matrix
x_prior : numpy.array(dim_x, 1)
Prior (predicted) state estimate. The *_prior and *_post attributes
are for convienence; they store the prior and posterior of the
current epoch. Read Only.
P_inv_prior : numpy.array(dim_x, dim_x)
Inverse prior (predicted) state covariance matrix. Read Only.
x_post : numpy.array(dim_x, 1)
Posterior (updated) state estimate. Read Only.
P_inv_post : numpy.array(dim_x, dim_x)
Inverse posterior (updated) state covariance matrix. Read Only.
z : ndarray
Last measurement used in update(). Read only.
R_inv : numpy.array(dim_z, dim_z)
inverse of measurement noise matrix
Q : numpy.array(dim_x, dim_x)
Process noise matrix
H : numpy.array(dim_z, dim_x)
Measurement function
y : numpy.array
Residual of the update step. Read only.
K : numpy.array(dim_x, dim_z)
Kalman gain of the update step. Read only.
S : numpy.array
Systen uncertaintly projected to measurement space. Read only.
log_likelihood : float
log-likelihood of the last measurement. Read only.
likelihood : float
likelihood of last measurment. Read only.
Computed from the log-likelihood. The log-likelihood can be very
small, meaning a large negative value such as -28000. Taking the
exp() of that results in 0.0, which can break typical algorithms
which multiply by this value, so by default we always return a
number >= sys.float_info.min.
inv : function, default numpy.linalg.inv
If you prefer another inverse function, such as the Moore-Penrose
pseudo inverse, set it to that instead: kf.inv = np.linalg.pinv
Examples
--------
See my book Kalman and Bayesian Filters in Python
https://github.com/rlabbe/Kalman-and-Bayesian-Filters-in-Python
"""
def __init__(self, dim_x, dim_z, dim_u=0, compute_log_likelihood=True):
if dim_x < 1:
raise ValueError('dim_x must be 1 or greater')
if dim_z < 1:
raise ValueError('dim_z must be 1 or greater')
if dim_u < 0:
raise ValueError('dim_u must be 0 or greater')
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.x = zeros((dim_x, 1)) # state
self.P_inv = eye(dim_x) # uncertainty covariance
self.Q = eye(dim_x) # process uncertainty
self.B = 0. # control transition matrix
self._F = 0. # state transition matrix
self._F_inv = 0. # state transition matrix
self.H = np.zeros((dim_z, dim_x)) # Measurement function
self.R_inv = eye(dim_z) # state uncertainty
self.z = np.array([[None]*self.dim_z]).T
# gain and residual are computed during the innovation step. We
# save them so that in case you want to inspect them for various
# purposes
self.K = 0. # kalman gain
self.y = zeros((dim_z, 1))
self.z = zeros((dim_z, 1))
self.S = 0. # system uncertainty in measurement space
# identity matrix. Do not alter this.
self._I = np.eye(dim_x)
self._no_information = False
self.compute_log_likelihood = compute_log_likelihood
self.log_likelihood = math.log(sys.float_info.min)
self.likelihood = sys.float_info.min
self.inv = np.linalg.inv
# save priors and posteriors
self.x_prior = np.copy(self.x)
self.P_inv_prior = np.copy(self.P_inv)
self.x_post = np.copy(self.x)
self.P_inv_post = np.copy(self.P_inv)
def update(self, z, R_inv=None):
"""
Add a new measurement (z) to the kalman filter. If z is None, nothing
is changed.
Parameters
----------
z : np.array
measurement for this update.
R : np.array, scalar, or None
Optionally provide R to override the measurement noise for this
one call, otherwise self.R will be used.
"""
if z is None:
self.z = None
self.x_post = self.x.copy()
self.P_inv_post = self.P_inv.copy()
return
if R_inv is None:
R_inv = self.R_inv
elif np.isscalar(R_inv):
R_inv = eye(self.dim_z) * R_inv
# rename for readability and a tiny extra bit of speed
H = self.H
H_T = H.T
P_inv = self.P_inv
x = self.x
if self._no_information:
self.x = dot(P_inv, x) + dot(H_T, R_inv).dot(z)
self.P_inv = P_inv + dot(H_T, R_inv).dot(H)
self.log_likelihood = math.log(sys.float_info.min)
self.likelihood = sys.float_info.min
else:
# y = z - Hx
# error (residual) between measurement and prediction
self.y = z - dot(H, x)
# S = HPH' + R
# project system uncertainty into measurement space
self.S = P_inv + dot(H_T, R_inv).dot(H)
self.K = dot(self.inv(self.S), H_T).dot(R_inv)
# x = x + Ky
# predict new x with residual scaled by the kalman gain
self.x = x + dot(self.K, self.y)
self.P_inv = P_inv + dot(H_T, R_inv).dot(H)
self.z = np.copy(reshape_z(z, self.dim_z, np.ndim(self.x)))
if self.compute_log_likelihood:
self.log_likelihood = logpdf(x=self.y, cov=self.S)
self.likelihood = math.exp(self.log_likelihood)
if self.likelihood == 0:
self.likelihood = sys.float_info.min
# save measurement and posterior state
self.z = deepcopy(z)
self.x_post = self.x.copy()
self.P_inv_post = self.P_inv.copy()
def predict(self, u=0):
""" Predict next position.
Parameters
----------
u : ndarray
Optional control vector. If non-zero, it is multiplied by B
to create the control input into the system.
"""
# x = Fx + Bu
A = dot(self._F_inv.T, self.P_inv).dot(self._F_inv)
#pylint: disable=bare-except
try:
AI = self.inv(A)
invertable = True
if self._no_information:
try:
self.x = dot(self.inv(self.P_inv), self.x)
except:
self.x = dot(0, self.x)
self._no_information = False
except:
invertable = False
self._no_information = True
if invertable:
self.x = dot(self._F, self.x) + dot(self.B, u)
self.P_inv = self.inv(AI + self.Q)
# save priors
self.P_inv_prior = np.copy(self.P_inv)
self.x_prior = np.copy(self.x)
else:
I_PF = self._I - dot(self.P_inv, self._F_inv)
FTI = self.inv(self._F.T)
FTIX = dot(FTI, self.x)
AQI = self.inv(A + self.Q)
self.x = dot(FTI, dot(I_PF, AQI).dot(FTIX))
# save priors
self.x_prior = np.copy(self.x)
self.P_inv_prior = np.copy(AQI)
def batch_filter(self, zs, Rs=None, update_first=False, saver=None):
""" Batch processes a sequences of measurements.
Parameters
----------
zs : list-like
list of measurements at each time step `self.dt` Missing
measurements must be represented by 'None'.
Rs : list-like, optional
optional list of values to use for the measurement error
covariance; a value of None in any position will cause the filter
to use `self.R` for that time step.
update_first : bool, optional,
controls whether the order of operations is update followed by
predict, or predict followed by update. Default is predict->update.
saver : filterpy.common.Saver, optional
filterpy.common.Saver object. If provided, saver.save() will be
called after every epoch
Returns
-------
means: np.array((n,dim_x,1))
array of the state for each time step. Each entry is an np.array.
In other words `means[k,:]` is the state at step `k`.
covariance: np.array((n,dim_x,dim_x))
array of the covariances for each time step. In other words
`covariance[k,:,:]` is the covariance at step `k`.
"""
raise NotImplementedError("this is not implemented yet")
#pylint: disable=unreachable, no-member
# this is a copy of the code from kalman_filter, it has not been
# turned into the information filter yet. DO NOT USE.
n = np.size(zs, 0)
if Rs is None:
Rs = [None] * n
# mean estimates from Kalman Filter
means = zeros((n, self.dim_x, 1))
# state covariances from Kalman Filter
covariances = zeros((n, self.dim_x, self.dim_x))
if update_first:
for i, (z, r) in enumerate(zip(zs, Rs)):
self.update(z, r)
means[i, :] = self.x
covariances[i, :, :] = self._P
self.predict()
if saver is not None:
saver.save()
else:
for i, (z, r) in enumerate(zip(zs, Rs)):
self.predict()
self.update(z, r)
means[i, :] = self.x
covariances[i, :, :] = self._P
if saver is not None:
saver.save()
return (means, covariances)
@property
def F(self):
"""State Transition matrix"""
return self._F
@F.setter
def F(self, value):
"""State Transition matrix"""
self._F = value
self._F_inv = self.inv(self._F)
@property
def P(self):
"""State covariance matrix"""
return self.inv(self.P_inv)
def __repr__(self):
return '\n'.join([
'InformationFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('x', self.x),
pretty_str('P_inv', self.P_inv),
pretty_str('x_prior', self.x_prior),
pretty_str('P_inv_prior', self.P_inv_prior),
pretty_str('F', self.F),
pretty_str('_F_inv', self._F_inv),
pretty_str('Q', self.Q),
pretty_str('R_inv', self.R_inv),
pretty_str('H', self.H),
pretty_str('K', self.K),
pretty_str('y', self.y),
pretty_str('z', self.z),
pretty_str('S', self.S),
pretty_str('B', self.B),
pretty_str('log-likelihood', self.log_likelihood),
pretty_str('likelihood', self.likelihood),
pretty_str('inv', self.inv)
])
|
from typing import Type, Text
import tensornetwork as tn
import numpy as np
import tensornetwork as tn
import jax
#enable double precision in JAX
jax.config.update('jax_enable_x64', True)
def blocksparse_XXZ_mpo(Jz: np.ndarray,
Jxy: np.ndarray,
Bz: np.ndarray,
dtype: Type[np.number] = np.float64) -> tn.FiniteMPO:
"""
Prepare a symmetric MPO.
Args:
Jz, Jxy, Bz: Hamiltonian parameters.
dtype: data type.
Returns:
`tn.FiniteMPO`: The mpo of the XXZ Heisenberg model with U(1) symmetry.
"""
dense_mpo = tn.FiniteXXZ(Jz, Jxy, Bz, dtype=dtype).tensors
ileft = tn.Index(tn.U1Charge(np.array([0])), False)
iright = ileft.flip_flow()
i1 = tn.Index(tn.U1Charge(np.array([0, -1, 1, 0, 0])), False)
i2 = tn.Index(tn.U1Charge(np.array([0, -1, 1, 0, 0])), True)
i3 = tn.Index(tn.U1Charge(np.array([0, 1])), False)
i4 = tn.Index(tn.U1Charge(np.array([0, 1])), True)
mpotensors = [tn.BlockSparseTensor.fromdense(
[ileft, i2, i3, i4], dense_mpo[0])] + [
tn.BlockSparseTensor.fromdense([i1, i2, i3, i4], tensor)
for tensor in dense_mpo[1:-1]
] + [tn.BlockSparseTensor.fromdense([i1, iright, i3, i4], dense_mpo[-1])]
return tn.FiniteMPO(mpotensors, backend='symmetric')
def blocksparse_halffilled_spin_mps(N: int,
D: int,
B: int = 5,
dtype: Type[np.number] = np.float64):
"""
Prepare a U(1) symmetric spin 1/2 MPS at zero total magnetization.
Args:
N: Number of spins.
D: The bond dimension.
B: The number of symmetry sectors on each ancillary link.
dtype: The data type of the MPS.
Returns:
`tn.FiniteMPS`: A U(1) symmetric spin 1/2 mps at zero total magnetization.
"""
auxcharges = [tn.U1Charge([0])] + [
tn.U1Charge.random(D, n // 2, n // 2 + B) for n in range(N - 1)
] + [tn.U1Charge([N // 2])]
tensors = [
tn.BlockSparseTensor.random([
tn.Index(auxcharges[n], False),
tn.Index(tn.U1Charge([0, 1]), False),
tn.Index(auxcharges[n + 1], True)
],
dtype=dtype) for n in range(N)
]
return tn.FiniteMPS(tensors, canonicalize=True, backend='symmetric')
def initialize_spin_mps(N: int, D: int, dtype: Type[np.number], backend: Text):
"""
Helper function to initialize an MPS for a given backend.
Args:
N: Number of spins.
D: The bond dimension.
dtype: The data type of the MPS.
Returns:
`tn.FiniteMPS`: A spin 1/2 mps for the corresponding backend.
"""
if backend == 'symmetric':
return blocksparse_halffilled_spin_mps(N=N, D=D, B=5, dtype=dtype)
return tn.FiniteMPS.random([2] * N, [D] * (N - 1), dtype=dtype, backend=backend)
def initialize_XXZ_mpo(Jz: np.ndarray, Jxy: np.ndarray, Bz: np.ndarray,
dtype: Type[np.number], backend: Text):
"""
Helper function to initialize the XXZ Heisenberg MPO
for a given backend.
Args:
Jz, Jxy, Bz: Hamiltonian parameters.
dtype: data type.
backend: The backend.
Returns:
`tn.FiniteMPS`: A spin 1/2 mps for the corresponding backend.
"""
if backend == 'symmetric':
return blocksparse_XXZ_mpo(Jz=Jz, Jxy=Jxy, Bz=Bz, dtype=dtype)
return tn.FiniteXXZ(Jz, Jxy, Bz, dtype=dtype, backend=backend)
def run_twosite_dmrg(N: int, D: int, dtype: Type[np.number], Jz: np.ndarray,
Jxy: np.ndarray, Bz: np.ndarray, num_sweeps: int,
backend: Text):
"""
Run two-site dmrg for the XXZ Heisenberg model using a given backend.
Args:
N: Number of spins.
D: The bond dimension.
dtype: The data type of the MPS.
Jz, Jxy, Bz: Hamiltonian parameters.
num_sweeps: Number of DMRG sweeps to perform.
backend: The backend.
Returns:
float/complex: The energy upon termination of DMRG.
"""
mps = initialize_spin_mps(N, 32, dtype, backend)
mpo = initialize_XXZ_mpo(Jz, Jxy, Bz, dtype, backend)
dmrg = tn.FiniteDMRG(mps, mpo)
return dmrg.run_two_site(
max_bond_dim=D, num_sweeps=num_sweeps, num_krylov_vecs=10, verbose=1)
if __name__ == '__main__':
# Run two-site DMRG for the XXZ Heisenberg model for
# different backends.
#
# change parameters to simulate larger systems and bigger
# bond dimensions
#
# Notes: JAX backend peforms jit (just in time) compilation of
# operations. This results in an overhead whenever the computation
# encounters a bond dimension it has not seen before.
# In two-site DMRG this happens when the bond dimensions
# are ramped up during the simulation close to the boundaries.
#
# The symmetric backend is for small bond dimensions typicall slower
# Than other backends, due to inherent book-keeping overhead.
# In comparison with numpy, the two backends typically are of the same
# speed for a bond dimension of D ~ 100. For value of D >~ 400, their
# symmetric backend is typically substantially faster than numpy,
# pytorch or jax on CPU.
num_sites, bond_dim, datatype = 20, 16, np.float64
jz = np.ones(num_sites - 1)
jxy = np.ones(num_sites - 1)
bz = np.zeros(num_sites)
n_sweeps = 5
energies = {}
backends = ('jax', 'numpy', 'symmetric', 'pytorch')
for be in backends:
print(f'\nrunning DMRG for {be} backend')
energies[be] = run_twosite_dmrg(
num_sites,
bond_dim,
datatype,
jz,
jxy,
bz,
num_sweeps=n_sweeps,
backend=be)
text = [
f"\nenergy for backend {backend}: {e}" for backend, e in energies.items()
]
print(''.join(text))
|
from Handler import Handler
import urllib2
class HttpPostHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.batch_size = int(self.config['batch'])
self.url = self.config.get('url')
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(HttpPostHandler, self).get_default_config_help()
config.update({
'url': 'Fully qualified url to send metrics to',
'batch': 'How many to store before sending to the graphite server',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(HttpPostHandler, self).get_default_config()
config.update({
'url': 'http://localhost/blah/blah/blah',
'batch': 100,
})
return config
# Join batched metrics and push to url mentioned in config
def process(self, metric):
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
self.post()
# Overriding flush to post metrics for every collector.
def flush(self):
"""Flush metrics in queue"""
self.post()
def post(self):
req = urllib2.Request(self.url, "\n".join(self.metrics))
urllib2.urlopen(req)
self.metrics = []
|
from homeassistant.components.homeassistant import (
DOMAIN as HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
)
from homeassistant.components.smart_meter_texas.const import DOMAIN
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_ERROR,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.setup import async_setup_component
from .conftest import TEST_ENTITY_ID, setup_integration
from tests.async_mock import patch
async def test_setup_with_no_config(hass):
"""Test that no config is successful."""
assert await async_setup_component(hass, DOMAIN, {}) is True
await hass.async_block_till_done()
# Assert no flows were started.
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_auth_failure(hass, config_entry, aioclient_mock):
"""Test if user's username or password is not accepted."""
await setup_integration(hass, config_entry, aioclient_mock, auth_fail=True)
assert config_entry.state == ENTRY_STATE_SETUP_ERROR
async def test_api_timeout(hass, config_entry, aioclient_mock):
"""Test that a timeout results in ConfigEntryNotReady."""
await setup_integration(hass, config_entry, aioclient_mock, auth_timeout=True)
assert config_entry.state == ENTRY_STATE_SETUP_RETRY
async def test_update_failure(hass, config_entry, aioclient_mock):
"""Test that the coordinator handles a bad response."""
await setup_integration(hass, config_entry, aioclient_mock, bad_reading=True)
await async_setup_component(hass, HA_DOMAIN, {})
await hass.async_block_till_done()
with patch("smart_meter_texas.Meter.read_meter") as updater:
await hass.services.async_call(
HA_DOMAIN,
SERVICE_UPDATE_ENTITY,
{ATTR_ENTITY_ID: TEST_ENTITY_ID},
blocking=True,
)
await hass.async_block_till_done()
updater.assert_called_once()
async def test_unload_config_entry(hass, config_entry, aioclient_mock):
"""Test entry unloading."""
await setup_integration(hass, config_entry, aioclient_mock)
config_entries = hass.config_entries.async_entries(DOMAIN)
assert len(config_entries) == 1
assert config_entries[0] is config_entry
assert config_entry.state == ENTRY_STATE_LOADED
await hass.config_entries.async_unload(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state == ENTRY_STATE_NOT_LOADED
|
import io
from apns2.errors import Unregistered
import pytest
import yaml
import homeassistant.components.apns.notify as apns
import homeassistant.components.notify as notify
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, mock_open, patch
from tests.common import assert_setup_component
CONFIG = {
notify.DOMAIN: {
"platform": "apns",
"name": "test_app",
"topic": "testapp.appname",
"cert_file": "test_app.pem",
}
}
@pytest.fixture(scope="module", autouse=True)
def mock_apns_notify_open():
"""Mock builtins.open for apns.notfiy."""
with patch("homeassistant.components.apns.notify.open", mock_open(), create=True):
yield
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def _setup_notify(hass_):
assert isinstance(apns.load_yaml_config_file, Mock), "Found unmocked load_yaml"
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass_, notify.DOMAIN, CONFIG)
assert handle_config[notify.DOMAIN]
@patch("os.path.isfile", return_value=True)
@patch("os.access", return_value=True)
async def test_apns_setup_full(mock_access, mock_isfile, hass):
"""Test setup with all data."""
config = {
"notify": {
"platform": "apns",
"name": "test_app",
"sandbox": "True",
"topic": "testapp.appname",
"cert_file": "test_app.pem",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
assert handle_config[notify.DOMAIN]
async def test_apns_setup_missing_name(hass):
"""Test setup with missing name."""
config = {
"notify": {
"platform": "apns",
"topic": "testapp.appname",
"cert_file": "test_app.pem",
}
}
with assert_setup_component(0) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
async def test_apns_setup_missing_certificate(hass):
"""Test setup with missing certificate."""
config = {
"notify": {
"platform": "apns",
"name": "test_app",
"topic": "testapp.appname",
}
}
with assert_setup_component(0) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
async def test_apns_setup_missing_topic(hass):
"""Test setup with missing topic."""
config = {
"notify": {
"platform": "apns",
"name": "test_app",
"cert_file": "test_app.pem",
}
}
with assert_setup_component(0) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
assert not handle_config[notify.DOMAIN]
@patch("homeassistant.components.apns.notify._write_device")
async def test_register_new_device(mock_write, hass):
"""Test registering a new device with a name."""
yaml_file = {5678: {"name": "test device 2"}}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
await _setup_notify(hass)
assert await hass.services.async_call(
apns.DOMAIN,
"apns_test_app",
{"push_id": "1234", "name": "test device"},
blocking=True,
)
assert len(written_devices) == 1
assert written_devices[0].name == "test device"
@patch("homeassistant.components.apns.notify._write_device")
async def test_register_device_without_name(mock_write, hass):
"""Test registering a without a name."""
yaml_file = {
1234: {"name": "test device 1", "tracking_device_id": "tracking123"},
5678: {"name": "test device 2", "tracking_device_id": "tracking456"},
}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
await _setup_notify(hass)
assert await hass.services.async_call(
apns.DOMAIN, "apns_test_app", {"push_id": "1234"}, blocking=True
)
devices = {dev.push_id: dev for dev in written_devices}
test_device = devices.get("1234")
assert test_device is not None
assert test_device.name is None
@patch("homeassistant.components.apns.notify._write_device")
async def test_update_existing_device(mock_write, hass):
"""Test updating an existing device."""
yaml_file = {1234: {"name": "test device 1"}, 5678: {"name": "test device 2"}}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
await _setup_notify(hass)
assert await hass.services.async_call(
apns.DOMAIN,
"apns_test_app",
{"push_id": "1234", "name": "updated device 1"},
blocking=True,
)
devices = {dev.push_id: dev for dev in written_devices}
test_device_1 = devices.get("1234")
test_device_2 = devices.get("5678")
assert test_device_1 is not None
assert test_device_2 is not None
assert "updated device 1" == test_device_1.name
@patch("homeassistant.components.apns.notify._write_device")
async def test_update_existing_device_with_tracking_id(mock_write, hass):
"""Test updating an existing device that has a tracking id."""
yaml_file = {
1234: {"name": "test device 1", "tracking_device_id": "tracking123"},
5678: {"name": "test device 2", "tracking_device_id": "tracking456"},
}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
await _setup_notify(hass)
assert await hass.services.async_call(
apns.DOMAIN,
"apns_test_app",
{"push_id": "1234", "name": "updated device 1"},
blocking=True,
)
devices = {dev.push_id: dev for dev in written_devices}
test_device_1 = devices.get("1234")
test_device_2 = devices.get("5678")
assert test_device_1 is not None
assert test_device_2 is not None
assert "tracking123" == test_device_1.tracking_device_id
assert "tracking456" == test_device_2.tracking_device_id
@patch("homeassistant.components.apns.notify.APNsClient")
async def test_send(mock_client, hass):
"""Test updating an existing device."""
send = mock_client.return_value.send_notification
yaml_file = {1234: {"name": "test device 1"}}
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
await _setup_notify(hass)
assert await hass.services.async_call(
"notify",
"test_app",
{
"message": "Hello",
"data": {"badge": 1, "sound": "test.mp3", "category": "testing"},
},
blocking=True,
)
assert send.called
assert 1 == len(send.mock_calls)
target = send.mock_calls[0][1][0]
payload = send.mock_calls[0][1][1]
assert "1234" == target
assert "Hello" == payload.alert
assert 1 == payload.badge
assert "test.mp3" == payload.sound
assert "testing" == payload.category
@patch("homeassistant.components.apns.notify.APNsClient")
async def test_send_when_disabled(mock_client, hass):
"""Test updating an existing device."""
send = mock_client.return_value.send_notification
yaml_file = {1234: {"name": "test device 1", "disabled": True}}
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
await _setup_notify(hass)
assert await hass.services.async_call(
"notify",
"test_app",
{
"message": "Hello",
"data": {"badge": 1, "sound": "test.mp3", "category": "testing"},
},
blocking=True,
)
assert not send.called
@patch("homeassistant.components.apns.notify.APNsClient")
async def test_send_with_state(mock_client, hass):
"""Test updating an existing device."""
send = mock_client.return_value.send_notification
yaml_file = {
1234: {"name": "test device 1", "tracking_device_id": "tracking123"},
5678: {"name": "test device 2", "tracking_device_id": "tracking456"},
}
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
), patch("os.path.isfile", Mock(return_value=True)):
notify_service = await hass.async_add_executor_job(
apns.ApnsNotificationService,
hass,
"test_app",
"testapp.appname",
False,
"test_app.pem",
)
notify_service.device_state_changed_listener(
"device_tracker.tracking456",
State("device_tracker.tracking456", None),
State("device_tracker.tracking456", "home"),
)
notify_service.send_message(message="Hello", target="home")
assert send.called
assert 1 == len(send.mock_calls)
target = send.mock_calls[0][1][0]
payload = send.mock_calls[0][1][1]
assert "5678" == target
assert "Hello" == payload.alert
@patch("homeassistant.components.apns.notify.APNsClient")
@patch("homeassistant.components.apns.notify._write_device")
async def test_disable_when_unregistered(mock_write, mock_client, hass):
"""Test disabling a device when it is unregistered."""
send = mock_client.return_value.send_notification
send.side_effect = Unregistered()
yaml_file = {
1234: {"name": "test device 1", "tracking_device_id": "tracking123"},
5678: {"name": "test device 2", "tracking_device_id": "tracking456"},
}
written_devices = []
def fake_write(_out, device):
"""Fake write_device."""
written_devices.append(device)
mock_write.side_effect = fake_write
with patch(
"homeassistant.components.apns.notify.load_yaml_config_file",
Mock(return_value=yaml_file),
):
await _setup_notify(hass)
assert await hass.services.async_call(
"notify", "test_app", {"message": "Hello"}, blocking=True
)
devices = {dev.push_id: dev for dev in written_devices}
test_device_1 = devices.get("1234")
assert test_device_1 is not None
assert test_device_1.disabled is True
async def test_write_device():
"""Test writing device."""
out = io.StringIO()
device = apns.ApnsDevice("123", "name", "track_id", True)
apns._write_device(out, device)
data = yaml.safe_load(out.getvalue())
assert data == {
123: {"name": "name", "tracking_device_id": "track_id", "disabled": True}
}
|
import logging
import re
import struct
from typing import Final, Optional
import aiohttp
from ..abc import MixinMeta
from ..cog_utils import CompositeMetaClass
log = logging.getLogger("red.cogs.Audio.cog.Utilities.Parsing")
STREAM_TITLE: Final[re.Pattern] = re.compile(br"StreamTitle='([^']*)';")
class ParsingUtilities(MixinMeta, metaclass=CompositeMetaClass):
async def icyparser(self, url: str) -> Optional[str]:
try:
async with self.session.get(url, headers={"Icy-MetaData": "1"}) as resp:
metaint = int(resp.headers["icy-metaint"])
for _ in range(5):
await resp.content.readexactly(metaint)
metadata_length = struct.unpack("B", await resp.content.readexactly(1))[0] * 16
metadata = await resp.content.readexactly(metadata_length)
m = re.search(STREAM_TITLE, metadata.rstrip(b"\0"))
if m:
title = m.group(1)
if title:
title = title.decode("utf-8", errors="replace")
return title
else:
return None
except (KeyError, aiohttp.ClientConnectionError, aiohttp.ClientResponseError):
return None
|
from blinkstick import blinkstick
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
CONF_SERIAL = "serial"
DEFAULT_NAME = "Blinkstick"
SUPPORT_BLINKSTICK = SUPPORT_BRIGHTNESS | SUPPORT_COLOR
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SERIAL): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Blinkstick device specified by serial number."""
name = config[CONF_NAME]
serial = config[CONF_SERIAL]
stick = blinkstick.find_by_serial(serial)
add_entities([BlinkStickLight(stick, name)], True)
class BlinkStickLight(LightEntity):
"""Representation of a BlinkStick light."""
def __init__(self, stick, name):
"""Initialize the light."""
self._stick = stick
self._name = name
self._serial = stick.get_serial()
self._hs_color = None
self._brightness = None
@property
def name(self):
"""Return the name of the light."""
return self._name
@property
def brightness(self):
"""Read back the brightness of the light."""
return self._brightness
@property
def hs_color(self):
"""Read back the color of the light."""
return self._hs_color
@property
def is_on(self):
"""Return True if entity is on."""
return self._brightness > 0
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BLINKSTICK
def update(self):
"""Read back the device state."""
rgb_color = self._stick.get_color()
hsv = color_util.color_RGB_to_hsv(*rgb_color)
self._hs_color = hsv[:2]
self._brightness = hsv[2]
def turn_on(self, **kwargs):
"""Turn the device on."""
if ATTR_HS_COLOR in kwargs:
self._hs_color = kwargs[ATTR_HS_COLOR]
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
else:
self._brightness = 255
rgb_color = color_util.color_hsv_to_RGB(
self._hs_color[0], self._hs_color[1], self._brightness / 255 * 100
)
self._stick.set_color(red=rgb_color[0], green=rgb_color[1], blue=rgb_color[2])
def turn_off(self, **kwargs):
"""Turn the device off."""
self._stick.turn_off()
|
import re
from typing import Any
from typing import Callable
from typing import Dict
ConstraintState = Dict[str, Dict[str, Any]]
ConstraintOp = Callable[[str, str, str, ConstraintState], bool]
def max_per(constraint_value, offer_value, attribute, state: ConstraintState):
if not constraint_value:
constraint_value = 1
state_value = state.get("MAX_PER", {}).get(attribute, {}).get(offer_value, 0)
return state_value <= int(constraint_value)
# lambda arg: [constraint value, offer value, attribute, state]
# example constraint: ['pool', 'MAX_PER', 5]
# constraint value: 5
# offer value: default
# attribute: pool
# state: {'MAX_PER' => {'pool' => {'default' => 6}}}
CONS_OPS: Dict[str, ConstraintOp] = {
"EQUALS": lambda cv, ov, *_: cv == ov,
"LIKE": lambda cv, ov, *_: bool(re.match(cv, ov)),
"UNLIKE": lambda cv, ov, *_: not (re.match(cv, ov)),
"MAX_PER": max_per,
"UNIQUE": max_per,
}
def nested_inc(op, _, attr_val, attr_name, state, step=1):
"""Increments relevant counter by step from args array"""
oph = state.setdefault(op, {})
nameh = oph.setdefault(attr_name, {})
nameh.setdefault(attr_val, 0)
nameh[attr_val] += step
return state
# lambda args same as CONS_OPS + update step
UPDATE_OPS = {
"EQUALS": lambda *_: None,
"LIKE": lambda *_: None,
"UNLIKE": lambda *_: None,
"MAX_PER": lambda *args: nested_inc("MAX_PER", *args),
"UNIQUE": lambda *args: nested_inc("MAX_PER", *args),
}
def check_offer_constraints(offer, constraints, state):
"""Returns True if all constraints are satisfied by offer's attributes,
returns False otherwise. Prints a error message and re-raises if an error
was thrown."""
for (attr, op, val) in constraints:
try:
offer_attr = next((x for x in offer.attributes if x.name == attr), None)
if offer_attr is None:
print("Attribute not found for a constraint: %s" % attr)
return False
elif not (CONS_OPS[op](val, offer_attr.text.value, offer_attr.name, state)):
print(
"Constraint not satisfied: [{} {} {}] for {} with {}".format(
attr, op, val, offer_attr.text.value, state
)
)
return False
except Exception as err:
print(
"Error while matching constraint: [{} {} {}] {}".format(
attr, op, val, str(err)
)
)
raise err
return True
def update_constraint_state(offer, constraints, state, step=1):
"""Mutates state for each offer attribute found in constraints by calling
relevant UPDATE_OP lambda"""
for (attr, op, val) in constraints:
for oa in offer.attributes:
if attr == oa.name:
UPDATE_OPS[op](val, oa.text.value, attr, state, step)
|
from pyowm.exceptions.api_call_error import APICallError
from pyowm.exceptions.api_response_error import UnauthorizedError
from homeassistant import data_entry_flow
from homeassistant.components.openweathermap.const import (
CONF_LANGUAGE,
DEFAULT_FORECAST_MODE,
DEFAULT_LANGUAGE,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_IMPORT, SOURCE_USER
from homeassistant.const import (
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_MODE,
CONF_NAME,
)
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry
CONFIG = {
CONF_NAME: "openweathermap",
CONF_API_KEY: "foo",
CONF_LATITUDE: 50,
CONF_LONGITUDE: 40,
CONF_MODE: DEFAULT_FORECAST_MODE,
CONF_LANGUAGE: DEFAULT_LANGUAGE,
}
VALID_YAML_CONFIG = {CONF_API_KEY: "foo"}
async def test_form(hass):
"""Test that the form is served with valid input."""
mocked_owm = _create_mocked_owm(True)
with patch(
"pyowm.weatherapi25.owm25.OWM25",
return_value=mocked_owm,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
assert result["errors"] == {}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
await hass.async_block_till_done()
conf_entries = hass.config_entries.async_entries(DOMAIN)
entry = conf_entries[0]
assert entry.state == "loaded"
await hass.config_entries.async_unload(conf_entries[0].entry_id)
await hass.async_block_till_done()
assert entry.state == "not_loaded"
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == CONFIG[CONF_NAME]
assert result["data"][CONF_LATITUDE] == CONFIG[CONF_LATITUDE]
assert result["data"][CONF_LONGITUDE] == CONFIG[CONF_LONGITUDE]
assert result["data"][CONF_API_KEY] == CONFIG[CONF_API_KEY]
async def test_form_import(hass):
"""Test we can import yaml config."""
mocked_owm = _create_mocked_owm(True)
with patch("pyowm.weatherapi25.owm25.OWM25", return_value=mocked_owm), patch(
"homeassistant.components.openweathermap.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.openweathermap.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=VALID_YAML_CONFIG.copy(),
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_LATITUDE] == hass.config.latitude
assert result["data"][CONF_LONGITUDE] == hass.config.longitude
assert result["data"][CONF_API_KEY] == VALID_YAML_CONFIG[CONF_API_KEY]
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_options(hass):
"""Test that the options form."""
mocked_owm = _create_mocked_owm(True)
with patch(
"pyowm.weatherapi25.owm25.OWM25",
return_value=mocked_owm,
):
config_entry = MockConfigEntry(
domain=DOMAIN, unique_id="openweathermap_unique_id", data=CONFIG
)
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
assert config_entry.state == "loaded"
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_MODE: "daily"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_MODE: "daily",
CONF_LANGUAGE: DEFAULT_LANGUAGE,
}
await hass.async_block_till_done()
assert config_entry.state == "loaded"
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_MODE: "freedaily"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert config_entry.options == {
CONF_MODE: "freedaily",
CONF_LANGUAGE: DEFAULT_LANGUAGE,
}
await hass.async_block_till_done()
assert config_entry.state == "loaded"
async def test_form_invalid_api_key(hass):
"""Test that the form is served with no input."""
mocked_owm = _create_mocked_owm(True)
with patch(
"pyowm.weatherapi25.owm25.OWM25",
return_value=mocked_owm,
side_effect=UnauthorizedError(""),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["errors"] == {"base": "invalid_api_key"}
async def test_form_api_call_error(hass):
"""Test setting up with api call error."""
mocked_owm = _create_mocked_owm(True)
with patch(
"pyowm.weatherapi25.owm25.OWM25",
return_value=mocked_owm,
side_effect=APICallError(""),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_api_offline(hass):
"""Test setting up with api call error."""
mocked_owm = _create_mocked_owm(False)
with patch(
"homeassistant.components.openweathermap.config_flow.OWM",
return_value=mocked_owm,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["errors"] == {"base": "invalid_api_key"}
def _create_mocked_owm(is_api_online: bool):
mocked_owm = MagicMock()
mocked_owm.is_API_online.return_value = is_api_online
weather = MagicMock()
weather.get_temperature.return_value.get.return_value = 10
weather.get_pressure.return_value.get.return_value = 10
weather.get_humidity.return_value = 10
weather.get_wind.return_value.get.return_value = 0
weather.get_clouds.return_value = "clouds"
weather.get_rain.return_value = []
weather.get_snow.return_value = 3
weather.get_detailed_status.return_value = "status"
weather.get_weather_code.return_value = 803
mocked_owm.weather_at_coords.return_value.get_weather.return_value = weather
one_day_forecast = MagicMock()
one_day_forecast.get_reference_time.return_value = 10
one_day_forecast.get_temperature.return_value.get.return_value = 10
one_day_forecast.get_rain.return_value.get.return_value = 0
one_day_forecast.get_snow.return_value.get.return_value = 0
one_day_forecast.get_wind.return_value.get.return_value = 0
one_day_forecast.get_weather_code.return_value = 803
mocked_owm.three_hours_forecast_at_coords.return_value.get_forecast.return_value.get_weathers.return_value = [
one_day_forecast
]
return mocked_owm
|
import os
import stat
import time
import datetime
import sys
import fnmatch
EXTENSIONS = ['*.py']
EXECUTABLE = 'nosetests test/'
DEFAULTARGS = '--with-color -exe' # -w tests'
def check_sum():
"""
Return a long which can be used to know if any .py files have changed.
"""
val = 0
for root, dirs, files in os.walk(os.getcwd()):
for extension in EXTENSIONS:
for f in fnmatch.filter(files, extension):
stats = os.stat(os.path.join(root, f))
val += stats[stat.ST_SIZE] + stats[stat.ST_MTIME]
return val
if __name__ == '__main__':
val = 0
try:
while True:
if check_sum() != val:
val = check_sum()
os.system('%s %s %s' % (EXECUTABLE, DEFAULTARGS, ' '.join(sys.argv[1:])))
print(datetime.datetime.now().__str__())
print('=' * 77)
time.sleep(1)
except KeyboardInterrupt:
print('Goodbye')
|
import unittest
import urwid
class PaddingTest(unittest.TestCase):
def ptest(self, desc, align, width, maxcol, left, right,min_width=None):
p = urwid.Padding(None, align, width, min_width)
l, r = p.padding_values((maxcol,),False)
assert (l,r)==(left,right), "%s expected %s but got %s"%(
desc, (left,right), (l,r))
def petest(self, desc, align, width):
self.assertRaises(urwid.PaddingError, lambda:
urwid.Padding(None, align, width))
def test_create(self):
self.petest("invalid pad",6,5)
self.petest("invalid pad type",('bad',2),5)
self.petest("invalid width",'center','42')
self.petest("invalid width type",'center',('gouranga',4))
def test_values(self):
self.ptest("left align 5 7",'left',5,7,0,2)
self.ptest("left align 7 7",'left',7,7,0,0)
self.ptest("left align 9 7",'left',9,7,0,0)
self.ptest("right align 5 7",'right',5,7,2,0)
self.ptest("center align 5 7",'center',5,7,1,1)
self.ptest("fixed left",('fixed left',3),5,10,3,2)
self.ptest("fixed left reduce",('fixed left',3),8,10,2,0)
self.ptest("fixed left shrink",('fixed left',3),18,10,0,0)
self.ptest("fixed left, right",
('fixed left',3),('fixed right',4),17,3,4)
self.ptest("fixed left, right, min_width",
('fixed left',3),('fixed right',4),10,3,2,5)
self.ptest("fixed left, right, min_width 2",
('fixed left',3),('fixed right',4),10,2,0,8)
self.ptest("fixed right",('fixed right',3),5,10,2,3)
self.ptest("fixed right reduce",('fixed right',3),8,10,0,2)
self.ptest("fixed right shrink",('fixed right',3),18,10,0,0)
self.ptest("fixed right, left",
('fixed right',3),('fixed left',4),17,4,3)
self.ptest("fixed right, left, min_width",
('fixed right',3),('fixed left',4),10,2,3,5)
self.ptest("fixed right, left, min_width 2",
('fixed right',3),('fixed left',4),10,0,2,8)
self.ptest("relative 30",('relative',30),5,10,1,4)
self.ptest("relative 50",('relative',50),5,10,2,3)
self.ptest("relative 130 edge",('relative',130),5,10,5,0)
self.ptest("relative -10 edge",('relative',-10),4,10,0,6)
self.ptest("center relative 70",'center',('relative',70),
10,1,2)
self.ptest("center relative 70 grow 8",'center',('relative',70),
10,1,1,8)
def mctest(self, desc, left, right, size, cx, innercx):
class Inner:
def __init__(self, desc, innercx):
self.desc = desc
self.innercx = innercx
def move_cursor_to_coords(self,size,cx,cy):
assert cx==self.innercx, desc
i = Inner(desc,innercx)
p = urwid.Padding(i, ('fixed left',left),
('fixed right',right))
p.move_cursor_to_coords(size, cx, 0)
def test_cursor(self):
self.mctest("cursor left edge",2,2,(10,2),2,0)
self.mctest("cursor left edge-1",2,2,(10,2),1,0)
self.mctest("cursor right edge",2,2,(10,2),7,5)
self.mctest("cursor right edge+1",2,2,(10,2),8,5)
def test_reduced_padding_cursor(self):
# FIXME: This is at least consistent now, but I don't like it.
# pack() on an Edit should leave room for the cursor
# fixing this gets deep into things like Edit._shift_view_to_cursor
# though, so this might not get fixed for a while
p = urwid.Padding(urwid.Edit(u'',u''), width='pack', left=4)
self.assertEqual(p.render((10,), True).cursor, None)
self.assertEqual(p.get_cursor_coords((10,)), None)
self.assertEqual(p.render((4,), True).cursor, None)
self.assertEqual(p.get_cursor_coords((4,)), None)
p = urwid.Padding(urwid.Edit(u'',u''), width=('relative', 100), left=4)
self.assertEqual(p.render((10,), True).cursor, (4, 0))
self.assertEqual(p.get_cursor_coords((10,)), (4, 0))
self.assertEqual(p.render((4,), True).cursor, None)
self.assertEqual(p.get_cursor_coords((4,)), None)
class FillerTest(unittest.TestCase):
def ftest(self, desc, valign, height, maxrow, top, bottom,
min_height=None):
f = urwid.Filler(None, valign, height, min_height)
t, b = f.filler_values((20,maxrow), False)
assert (t,b)==(top,bottom), "%s expected %s but got %s"%(
desc, (top,bottom), (t,b))
def fetest(self, desc, valign, height):
self.assertRaises(urwid.FillerError, lambda:
urwid.Filler(None, valign, height))
def test_create(self):
self.fetest("invalid pad",6,5)
self.fetest("invalid pad type",('bad',2),5)
self.fetest("invalid width",'middle','42')
self.fetest("invalid width type",'middle',('gouranga',4))
self.fetest("invalid combination",('relative',20),
('fixed bottom',4))
self.fetest("invalid combination 2",('relative',20),
('fixed top',4))
def test_values(self):
self.ftest("top align 5 7",'top',5,7,0,2)
self.ftest("top align 7 7",'top',7,7,0,0)
self.ftest("top align 9 7",'top',9,7,0,0)
self.ftest("bottom align 5 7",'bottom',5,7,2,0)
self.ftest("middle align 5 7",'middle',5,7,1,1)
self.ftest("fixed top",('fixed top',3),5,10,3,2)
self.ftest("fixed top reduce",('fixed top',3),8,10,2,0)
self.ftest("fixed top shrink",('fixed top',3),18,10,0,0)
self.ftest("fixed top, bottom",
('fixed top',3),('fixed bottom',4),17,3,4)
self.ftest("fixed top, bottom, min_width",
('fixed top',3),('fixed bottom',4),10,3,2,5)
self.ftest("fixed top, bottom, min_width 2",
('fixed top',3),('fixed bottom',4),10,2,0,8)
self.ftest("fixed bottom",('fixed bottom',3),5,10,2,3)
self.ftest("fixed bottom reduce",('fixed bottom',3),8,10,0,2)
self.ftest("fixed bottom shrink",('fixed bottom',3),18,10,0,0)
self.ftest("fixed bottom, top",
('fixed bottom',3),('fixed top',4),17,4,3)
self.ftest("fixed bottom, top, min_height",
('fixed bottom',3),('fixed top',4),10,2,3,5)
self.ftest("fixed bottom, top, min_height 2",
('fixed bottom',3),('fixed top',4),10,0,2,8)
self.ftest("relative 30",('relative',30),5,10,1,4)
self.ftest("relative 50",('relative',50),5,10,2,3)
self.ftest("relative 130 edge",('relative',130),5,10,5,0)
self.ftest("relative -10 edge",('relative',-10),4,10,0,6)
self.ftest("middle relative 70",'middle',('relative',70),
10,1,2)
self.ftest("middle relative 70 grow 8",'middle',('relative',70),
10,1,1,8)
def test_repr(self):
repr(urwid.Filler(urwid.Text(u'hai')))
|
from os import path
from homeassistant import config as hass_config
import homeassistant.components.notify as notify
from homeassistant.components.rest import DOMAIN
from homeassistant.const import SERVICE_RELOAD
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_reload_notify(hass):
"""Verify we can reload the notify service."""
assert await async_setup_component(
hass,
notify.DOMAIN,
{
notify.DOMAIN: [
{
"name": DOMAIN,
"platform": DOMAIN,
"resource": "http://127.0.0.1/off",
},
]
},
)
await hass.async_block_till_done()
assert hass.services.has_service(notify.DOMAIN, DOMAIN)
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"rest/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert not hass.services.has_service(notify.DOMAIN, DOMAIN)
assert hass.services.has_service(notify.DOMAIN, "rest_reloaded")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
import logging
import posixpath
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import ycsb
BENCHMARK_NAME = 'cloud_firestore_ycsb'
BENCHMARK_CONFIG = """
cloud_firestore_ycsb:
description: >
Run YCSB agains Google Cloud Firestore.
Configure the number of VMs via --num-vms.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: 1"""
YCSB_BINDING_TAR_URL = ('https://github.com/charlie-lobo/YCSB/releases'
'/download/0.17.0fs/'
'ycsb-googlefirestore-binding-0.17.0.tar.gz')
YCSB_BINDING_LIB_DIR = posixpath.join(ycsb.YCSB_DIR, 'lib')
PRIVATE_KEYFILE_DIR = '/tmp/key.json'
FLAGS = flags.FLAGS
flags.DEFINE_string('google_firestore_keyfile', None,
'The path to Google API JSON private key file')
flags.DEFINE_string('google_firestore_project_id', None,
'The project ID that has Cloud Firestore service')
flags.DEFINE_string('google_firestore_debug', 'false',
'The logging level when running YCSB')
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['ycsb_client_vms'].present:
config['vm_groups']['default']['vm_count'] = FLAGS.ycsb_client_vms
return config
def CheckPrerequisites(benchmark_config):
# Before YCSB Cloud Firestore supports Application Default Credential,
# we should always make sure valid credential flags are set.
if not FLAGS.google_firestore_keyfile:
raise ValueError('"google_firestore_keyfile" must be set')
if not FLAGS.google_firestore_project_id:
raise ValueError('"google_firestore_project_id" must be set ')
def Prepare(benchmark_spec):
benchmark_spec.always_call_cleanup = True
ycsb.SetYcsbTarUrl(YCSB_BINDING_TAR_URL)
vms = benchmark_spec.vms
# Install required packages and copy credential files
vm_util.RunThreaded(_Install, vms)
# Restore YCSB_TAR_URL
ycsb.SetYcsbTarUrl(None)
benchmark_spec.executor = ycsb.YCSBExecutor('googlefirestore')
def Run(benchmark_spec):
vms = benchmark_spec.vms
run_kwargs = {
'googlefirestore.projectId': FLAGS.google_firestore_project_id,
'googlefirestore.serviceAccountKey': PRIVATE_KEYFILE_DIR,
'log4j.rootLogger': 'DEBUG' if FLAGS.google_firestore_debug else 'INFO',
}
load_kwargs = run_kwargs.copy()
if FLAGS['ycsb_preload_threads'].present:
load_kwargs['threads'] = FLAGS['ycsb_preload_threads']
samples = list(
benchmark_spec.executor.LoadAndRun(
vms, load_kwargs=load_kwargs, run_kwargs=run_kwargs))
return samples
def Cleanup(benchmark_spec):
# TODO: support automatic cleanup.
logging.warning(
'For now, we can only manually delete all the entries via GCP portal.')
def _Install(vm):
vm.Install('ycsb')
# Copy private key file to VM
vm.RemoteCopy(FLAGS.google_firestore_keyfile, PRIVATE_KEYFILE_DIR)
|
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.entity import Entity
ATTR_NATIVE_BALANCE = "Balance in native currency"
CURRENCY_ICONS = {
"BTC": "mdi:currency-btc",
"ETH": "mdi:currency-eth",
"EUR": "mdi:currency-eur",
"LTC": "mdi:litecoin",
"USD": "mdi:currency-usd",
}
DEFAULT_COIN_ICON = "mdi:currency-usd-circle"
ATTRIBUTION = "Data provided by coinbase.com"
DATA_COINBASE = "coinbase_cache"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Coinbase sensors."""
if discovery_info is None:
return
if "account" in discovery_info:
account = discovery_info["account"]
sensor = AccountSensor(
hass.data[DATA_COINBASE], account["name"], account["balance"]["currency"]
)
if "exchange_currency" in discovery_info:
sensor = ExchangeRateSensor(
hass.data[DATA_COINBASE],
discovery_info["exchange_currency"],
discovery_info["native_currency"],
)
add_entities([sensor], True)
class AccountSensor(Entity):
"""Representation of a Coinbase.com sensor."""
def __init__(self, coinbase_data, name, currency):
"""Initialize the sensor."""
self._coinbase_data = coinbase_data
self._name = f"Coinbase {name}"
self._state = None
self._unit_of_measurement = currency
self._native_balance = None
self._native_currency = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return CURRENCY_ICONS.get(self._unit_of_measurement, DEFAULT_COIN_ICON)
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_NATIVE_BALANCE: f"{self._native_balance} {self._native_currency}",
}
def update(self):
"""Get the latest state of the sensor."""
self._coinbase_data.update()
for account in self._coinbase_data.accounts["data"]:
if self._name == f"Coinbase {account['name']}":
self._state = account["balance"]["amount"]
self._native_balance = account["native_balance"]["amount"]
self._native_currency = account["native_balance"]["currency"]
class ExchangeRateSensor(Entity):
"""Representation of a Coinbase.com sensor."""
def __init__(self, coinbase_data, exchange_currency, native_currency):
"""Initialize the sensor."""
self._coinbase_data = coinbase_data
self.currency = exchange_currency
self._name = f"{exchange_currency} Exchange Rate"
self._state = None
self._unit_of_measurement = native_currency
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return CURRENCY_ICONS.get(self.currency, DEFAULT_COIN_ICON)
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest state of the sensor."""
self._coinbase_data.update()
rate = self._coinbase_data.exchange_rates.rates[self.currency]
self._state = round(1 / float(rate), 2)
|
from homeassistant.const import (
CURRENCY_EURO,
ELECTRICAL_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
VOLT,
VOLUME_CUBIC_METERS,
)
def dsmr_transform(value):
"""Transform DSMR version value to right format."""
if value.isdigit():
return float(value) / 10
return value
def tariff_transform(value):
"""Transform tariff from number to description."""
if value == "1":
return "low"
return "high"
DEFINITIONS = {
"dsmr/reading/electricity_delivered_1": {
"name": "Low tariff usage",
"icon": "mdi:flash",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/reading/electricity_returned_1": {
"name": "Low tariff returned",
"icon": "mdi:flash-outline",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/reading/electricity_delivered_2": {
"name": "High tariff usage",
"icon": "mdi:flash",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/reading/electricity_returned_2": {
"name": "High tariff returned",
"icon": "mdi:flash-outline",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/reading/electricity_currently_delivered": {
"name": "Current power usage",
"icon": "mdi:flash",
"unit": "kW",
},
"dsmr/reading/electricity_currently_returned": {
"name": "Current power return",
"icon": "mdi:flash-outline",
"unit": "kW",
},
"dsmr/reading/phase_currently_delivered_l1": {
"name": "Current power usage L1",
"icon": "mdi:flash",
"unit": "kW",
},
"dsmr/reading/phase_currently_delivered_l2": {
"name": "Current power usage L2",
"icon": "mdi:flash",
"unit": "kW",
},
"dsmr/reading/phase_currently_delivered_l3": {
"name": "Current power usage L3",
"icon": "mdi:flash",
"unit": "kW",
},
"dsmr/reading/phase_currently_returned_l1": {
"name": "Current power return L1",
"icon": "mdi:flash-outline",
"unit": "kW",
},
"dsmr/reading/phase_currently_returned_l2": {
"name": "Current power return L2",
"icon": "mdi:flash-outline",
"unit": "kW",
},
"dsmr/reading/phase_currently_returned_l3": {
"name": "Current power return L3",
"icon": "mdi:flash-outline",
"unit": "kW",
},
"dsmr/reading/extra_device_delivered": {
"name": "Gas meter usage",
"icon": "mdi:fire",
"unit": VOLUME_CUBIC_METERS,
},
"dsmr/reading/phase_voltage_l1": {
"name": "Current voltage L1",
"icon": "mdi:flash",
"unit": VOLT,
},
"dsmr/reading/phase_voltage_l2": {
"name": "Current voltage L2",
"icon": "mdi:flash",
"unit": VOLT,
},
"dsmr/reading/phase_voltage_l3": {
"name": "Current voltage L3",
"icon": "mdi:flash",
"unit": VOLT,
},
"dsmr/reading/phase_power_current_l1": {
"name": "Phase power current L1",
"icon": "mdi:flash",
"unit": ELECTRICAL_CURRENT_AMPERE,
},
"dsmr/reading/phase_power_current_l2": {
"name": "Phase power current L2",
"icon": "mdi:flash",
"unit": ELECTRICAL_CURRENT_AMPERE,
},
"dsmr/reading/phase_power_current_l3": {
"name": "Phase power current L3",
"icon": "mdi:flash",
"unit": ELECTRICAL_CURRENT_AMPERE,
},
"dsmr/consumption/gas/delivered": {
"name": "Gas usage",
"icon": "mdi:fire",
"unit": VOLUME_CUBIC_METERS,
},
"dsmr/consumption/gas/currently_delivered": {
"name": "Current gas usage",
"icon": "mdi:fire",
"unit": VOLUME_CUBIC_METERS,
},
"dsmr/consumption/gas/read_at": {
"name": "Gas meter read",
"icon": "mdi:clock",
"unit": "",
},
"dsmr/day-consumption/electricity1": {
"name": "Low tariff usage",
"icon": "mdi:counter",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/day-consumption/electricity2": {
"name": "High tariff usage",
"icon": "mdi:counter",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/day-consumption/electricity1_returned": {
"name": "Low tariff return",
"icon": "mdi:counter",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/day-consumption/electricity2_returned": {
"name": "High tariff return",
"icon": "mdi:counter",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/day-consumption/electricity_merged": {
"name": "Power usage total",
"icon": "mdi:counter",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/day-consumption/electricity_returned_merged": {
"name": "Power return total",
"icon": "mdi:counter",
"unit": ENERGY_KILO_WATT_HOUR,
},
"dsmr/day-consumption/electricity1_cost": {
"name": "Low tariff cost",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/day-consumption/electricity2_cost": {
"name": "High tariff cost",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/day-consumption/electricity_cost_merged": {
"name": "Power total cost",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/day-consumption/gas": {
"name": "Gas usage",
"icon": "mdi:counter",
"unit": VOLUME_CUBIC_METERS,
},
"dsmr/day-consumption/gas_cost": {
"name": "Gas cost",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/day-consumption/total_cost": {
"name": "Total cost",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/day-consumption/energy_supplier_price_electricity_delivered_1": {
"name": "Low tariff delivered price",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/day-consumption/energy_supplier_price_electricity_delivered_2": {
"name": "High tariff delivered price",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/day-consumption/energy_supplier_price_electricity_returned_1": {
"name": "Low tariff returned price",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/day-consumption/energy_supplier_price_electricity_returned_2": {
"name": "High tariff returned price",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/day-consumption/energy_supplier_price_gas": {
"name": "Gas price",
"icon": "mdi:currency-eur",
"unit": CURRENCY_EURO,
},
"dsmr/meter-stats/dsmr_version": {
"name": "DSMR version",
"icon": "mdi:alert-circle",
"transform": dsmr_transform,
},
"dsmr/meter-stats/electricity_tariff": {
"name": "Electricity tariff",
"icon": "mdi:flash",
"transform": tariff_transform,
},
"dsmr/meter-stats/power_failure_count": {
"name": "Power failure count",
"icon": "mdi:flash",
},
"dsmr/meter-stats/long_power_failure_count": {
"name": "Long power failure count",
"icon": "mdi:flash",
},
"dsmr/meter-stats/voltage_sag_count_l1": {
"name": "Voltage sag L1",
"icon": "mdi:flash",
},
"dsmr/meter-stats/voltage_sag_count_l2": {
"name": "Voltage sag L2",
"icon": "mdi:flash",
},
"dsmr/meter-stats/voltage_sag_count_l3": {
"name": "Voltage sag L3",
"icon": "mdi:flash",
},
"dsmr/meter-stats/voltage_swell_count_l1": {
"name": "Voltage swell L1",
"icon": "mdi:flash",
},
"dsmr/meter-stats/voltage_swell_count_l2": {
"name": "Voltage swell L2",
"icon": "mdi:flash",
},
"dsmr/meter-stats/voltage_swell_count_l3": {
"name": "Voltage swell L3",
"icon": "mdi:flash",
},
"dsmr/meter-stats/rejected_telegrams": {
"name": "Rejected telegrams",
"icon": "mdi:flash",
},
}
|
from unittest import mock
import pytest
import requests
import requests_mock
import voluptuous as vol
from homeassistant.components.device_tracker import DOMAIN
import homeassistant.components.tomato.device_tracker as tomato
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PLATFORM,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
def mock_session_response(*args, **kwargs):
"""Mock data generation for session response."""
class MockSessionResponse:
def __init__(self, text, status_code):
self.text = text
self.status_code = status_code
# Username: foo
# Password: bar
if args[0].headers["Authorization"] != "Basic Zm9vOmJhcg==":
return MockSessionResponse(None, 401)
if "gimmie_bad_data" in args[0].body:
return MockSessionResponse("This shouldn't (wldev = be here.;", 200)
if "gimmie_good_data" in args[0].body:
return MockSessionResponse(
"wldev = [ ['eth1','F4:F5:D8:AA:AA:AA',"
"-42,5500,1000,7043,0],['eth1','58:EF:68:00:00:00',"
"-42,5500,1000,7043,0]];\n"
"dhcpd_lease = [ ['chromecast','172.10.10.5','F4:F5:D8:AA:AA:AA',"
"'0 days, 16:17:08'],['wemo','172.10.10.6','58:EF:68:00:00:00',"
"'0 days, 12:09:08']];",
200,
)
return MockSessionResponse(None, 200)
@pytest.fixture
def mock_exception_logger():
"""Mock pyunifi."""
with mock.patch(
"homeassistant.components.tomato.device_tracker._LOGGER.exception"
) as mock_exception_logger:
yield mock_exception_logger
@pytest.fixture
def mock_session_send():
"""Mock requests.Session().send."""
with mock.patch("requests.Session.send") as mock_session_send:
yield mock_session_send
def test_config_missing_optional_params(hass, mock_session_send):
"""Test the setup without optional parameters."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_USERNAME: "foo",
CONF_PASSWORD: "password",
tomato.CONF_HTTP_ID: "1234567890",
}
)
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "http://tomato-router:80/update.cgi"
assert result.req.headers == {
"Content-Length": "32",
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic Zm9vOnBhc3N3b3Jk",
}
assert "_http_id=1234567890" in result.req.body
assert "exec=devlist" in result.req.body
@mock.patch("os.access", return_value=True)
@mock.patch("os.path.isfile", mock.Mock(return_value=True))
def test_config_default_nonssl_port(hass, mock_session_send):
"""Test the setup without a default port set without ssl enabled."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_USERNAME: "foo",
CONF_PASSWORD: "password",
tomato.CONF_HTTP_ID: "1234567890",
}
)
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "http://tomato-router:80/update.cgi"
@mock.patch("os.access", return_value=True)
@mock.patch("os.path.isfile", mock.Mock(return_value=True))
def test_config_default_ssl_port(hass, mock_session_send):
"""Test the setup without a default port set with ssl enabled."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_SSL: True,
CONF_USERNAME: "foo",
CONF_PASSWORD: "password",
tomato.CONF_HTTP_ID: "1234567890",
}
)
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "https://tomato-router:443/update.cgi"
@mock.patch("os.access", return_value=True)
@mock.patch("os.path.isfile", mock.Mock(return_value=True))
def test_config_verify_ssl_but_no_ssl_enabled(hass, mock_session_send):
"""Test the setup with a string with ssl_verify but ssl not enabled."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_PORT: 1234,
CONF_SSL: False,
CONF_VERIFY_SSL: "/test/tomato.crt",
CONF_USERNAME: "foo",
CONF_PASSWORD: "password",
tomato.CONF_HTTP_ID: "1234567890",
}
)
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "http://tomato-router:1234/update.cgi"
assert result.req.headers == {
"Content-Length": "32",
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic Zm9vOnBhc3N3b3Jk",
}
assert "_http_id=1234567890" in result.req.body
assert "exec=devlist" in result.req.body
assert mock_session_send.call_count == 1
assert mock_session_send.mock_calls[0] == mock.call(result.req, timeout=3)
@mock.patch("os.access", return_value=True)
@mock.patch("os.path.isfile", mock.Mock(return_value=True))
def test_config_valid_verify_ssl_path(hass, mock_session_send):
"""Test the setup with a string for ssl_verify.
Representing the absolute path to a CA certificate bundle.
"""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "/test/tomato.crt",
CONF_USERNAME: "bar",
CONF_PASSWORD: "foo",
tomato.CONF_HTTP_ID: "0987654321",
}
)
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "https://tomato-router:1234/update.cgi"
assert result.req.headers == {
"Content-Length": "32",
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic YmFyOmZvbw==",
}
assert "_http_id=0987654321" in result.req.body
assert "exec=devlist" in result.req.body
assert mock_session_send.call_count == 1
assert mock_session_send.mock_calls[0] == mock.call(
result.req, timeout=3, verify="/test/tomato.crt"
)
def test_config_valid_verify_ssl_bool(hass, mock_session_send):
"""Test the setup with a bool for ssl_verify."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: "bar",
CONF_PASSWORD: "foo",
tomato.CONF_HTTP_ID: "0987654321",
}
)
}
result = tomato.get_scanner(hass, config)
assert result.req.url == "https://tomato-router:1234/update.cgi"
assert result.req.headers == {
"Content-Length": "32",
"Content-Type": "application/x-www-form-urlencoded",
"Authorization": "Basic YmFyOmZvbw==",
}
assert "_http_id=0987654321" in result.req.body
assert "exec=devlist" in result.req.body
assert mock_session_send.call_count == 1
assert mock_session_send.mock_calls[0] == mock.call(
result.req, timeout=3, verify=False
)
def test_config_errors():
"""Test for configuration errors."""
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
# No Host,
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: "bar",
CONF_PASSWORD: "foo",
tomato.CONF_HTTP_ID: "0987654321",
}
)
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_PORT: -123456789, # Bad Port
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: "bar",
CONF_PASSWORD: "foo",
tomato.CONF_HTTP_ID: "0987654321",
}
)
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
# No Username
CONF_PASSWORD: "foo",
tomato.CONF_HTTP_ID: "0987654321",
}
)
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: "bar",
# No Password
tomato.CONF_HTTP_ID: "0987654321",
}
)
with pytest.raises(vol.Invalid):
tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_PORT: 1234,
CONF_SSL: True,
CONF_VERIFY_SSL: "False",
CONF_USERNAME: "bar",
CONF_PASSWORD: "foo",
# No HTTP_ID
}
)
@mock.patch("requests.Session.send", side_effect=mock_session_response)
def test_config_bad_credentials(hass, mock_exception_logger):
"""Test the setup with bad credentials."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_USERNAME: "i_am",
CONF_PASSWORD: "an_imposter",
tomato.CONF_HTTP_ID: "1234",
}
)
}
tomato.get_scanner(hass, config)
assert mock_exception_logger.call_count == 1
assert mock_exception_logger.mock_calls[0] == mock.call(
"Failed to authenticate, please check your username and password"
)
@mock.patch("requests.Session.send", side_effect=mock_session_response)
def test_bad_response(hass, mock_exception_logger):
"""Test the setup with bad response from router."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_USERNAME: "foo",
CONF_PASSWORD: "bar",
tomato.CONF_HTTP_ID: "gimmie_bad_data",
}
)
}
tomato.get_scanner(hass, config)
assert mock_exception_logger.call_count == 1
assert mock_exception_logger.mock_calls[0] == mock.call(
"Failed to parse response from router"
)
@mock.patch("requests.Session.send", side_effect=mock_session_response)
def test_scan_devices(hass, mock_exception_logger):
"""Test scanning for new devices."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_USERNAME: "foo",
CONF_PASSWORD: "bar",
tomato.CONF_HTTP_ID: "gimmie_good_data",
}
)
}
scanner = tomato.get_scanner(hass, config)
assert scanner.scan_devices() == ["F4:F5:D8:AA:AA:AA", "58:EF:68:00:00:00"]
@mock.patch("requests.Session.send", side_effect=mock_session_response)
def test_bad_connection(hass, mock_exception_logger):
"""Test the router with a connection error."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_USERNAME: "foo",
CONF_PASSWORD: "bar",
tomato.CONF_HTTP_ID: "gimmie_good_data",
}
)
}
with requests_mock.Mocker() as adapter:
adapter.register_uri(
"POST",
"http://tomato-router:80/update.cgi",
exc=requests.exceptions.ConnectionError,
),
tomato.get_scanner(hass, config)
assert mock_exception_logger.call_count == 1
assert mock_exception_logger.mock_calls[0] == mock.call(
"Failed to connect to the router or invalid http_id supplied"
)
@mock.patch("requests.Session.send", side_effect=mock_session_response)
def test_router_timeout(hass, mock_exception_logger):
"""Test the router with a timeout error."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_USERNAME: "foo",
CONF_PASSWORD: "bar",
tomato.CONF_HTTP_ID: "gimmie_good_data",
}
)
}
with requests_mock.Mocker() as adapter:
adapter.register_uri(
"POST",
"http://tomato-router:80/update.cgi",
exc=requests.exceptions.Timeout,
),
tomato.get_scanner(hass, config)
assert mock_exception_logger.call_count == 1
assert mock_exception_logger.mock_calls[0] == mock.call(
"Connection to the router timed out"
)
@mock.patch("requests.Session.send", side_effect=mock_session_response)
def test_get_device_name(hass, mock_exception_logger):
"""Test getting device names."""
config = {
DOMAIN: tomato.PLATFORM_SCHEMA(
{
CONF_PLATFORM: tomato.DOMAIN,
CONF_HOST: "tomato-router",
CONF_USERNAME: "foo",
CONF_PASSWORD: "bar",
tomato.CONF_HTTP_ID: "gimmie_good_data",
}
)
}
scanner = tomato.get_scanner(hass, config)
assert scanner.get_device_name("F4:F5:D8:AA:AA:AA") == "chromecast"
assert scanner.get_device_name("58:EF:68:00:00:00") == "wemo"
assert scanner.get_device_name("AA:BB:CC:00:00:00") is None
|
import asyncio
from collections import defaultdict
import logging
import socket
import sys
import async_timeout
from mysensors import mysensors
import voluptuous as vol
from homeassistant.const import CONF_OPTIMISTIC, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.setup import async_setup_component
from .const import (
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAYS,
CONF_NODES,
CONF_PERSISTENCE,
CONF_PERSISTENCE_FILE,
CONF_RETAIN,
CONF_TCP_PORT,
CONF_TOPIC_IN_PREFIX,
CONF_TOPIC_OUT_PREFIX,
CONF_VERSION,
DOMAIN,
MYSENSORS_GATEWAY_READY,
MYSENSORS_GATEWAYS,
)
from .handler import HANDLERS
from .helpers import discover_mysensors_platform, validate_child, validate_node
_LOGGER = logging.getLogger(__name__)
GATEWAY_READY_TIMEOUT = 15.0
MQTT_COMPONENT = "mqtt"
def is_serial_port(value):
"""Validate that value is a windows serial port or a unix device."""
if sys.platform.startswith("win"):
ports = (f"COM{idx + 1}" for idx in range(256))
if value in ports:
return value
raise vol.Invalid(f"{value} is not a serial port")
return cv.isdevice(value)
def is_socket_address(value):
"""Validate that value is a valid address."""
try:
socket.getaddrinfo(value, None)
return value
except OSError as err:
raise vol.Invalid("Device is not a valid domain name or ip address") from err
def get_mysensors_gateway(hass, gateway_id):
"""Return MySensors gateway."""
if MYSENSORS_GATEWAYS not in hass.data:
hass.data[MYSENSORS_GATEWAYS] = {}
gateways = hass.data.get(MYSENSORS_GATEWAYS)
return gateways.get(gateway_id)
async def setup_gateways(hass, config):
"""Set up all gateways."""
conf = config[DOMAIN]
gateways = {}
for index, gateway_conf in enumerate(conf[CONF_GATEWAYS]):
persistence_file = gateway_conf.get(
CONF_PERSISTENCE_FILE,
hass.config.path(f"mysensors{index + 1}.pickle"),
)
ready_gateway = await _get_gateway(hass, config, gateway_conf, persistence_file)
if ready_gateway is not None:
gateways[id(ready_gateway)] = ready_gateway
return gateways
async def _get_gateway(hass, config, gateway_conf, persistence_file):
"""Return gateway after setup of the gateway."""
conf = config[DOMAIN]
persistence = conf[CONF_PERSISTENCE]
version = conf[CONF_VERSION]
device = gateway_conf[CONF_DEVICE]
baud_rate = gateway_conf[CONF_BAUD_RATE]
tcp_port = gateway_conf[CONF_TCP_PORT]
in_prefix = gateway_conf.get(CONF_TOPIC_IN_PREFIX, "")
out_prefix = gateway_conf.get(CONF_TOPIC_OUT_PREFIX, "")
if device == MQTT_COMPONENT:
if not await async_setup_component(hass, MQTT_COMPONENT, config):
return None
mqtt = hass.components.mqtt
retain = conf[CONF_RETAIN]
def pub_callback(topic, payload, qos, retain):
"""Call MQTT publish function."""
mqtt.async_publish(topic, payload, qos, retain)
def sub_callback(topic, sub_cb, qos):
"""Call MQTT subscribe function."""
@callback
def internal_callback(msg):
"""Call callback."""
sub_cb(msg.topic, msg.payload, msg.qos)
hass.async_create_task(mqtt.async_subscribe(topic, internal_callback, qos))
gateway = mysensors.AsyncMQTTGateway(
pub_callback,
sub_callback,
in_prefix=in_prefix,
out_prefix=out_prefix,
retain=retain,
loop=hass.loop,
event_callback=None,
persistence=persistence,
persistence_file=persistence_file,
protocol_version=version,
)
else:
try:
await hass.async_add_executor_job(is_serial_port, device)
gateway = mysensors.AsyncSerialGateway(
device,
baud=baud_rate,
loop=hass.loop,
event_callback=None,
persistence=persistence,
persistence_file=persistence_file,
protocol_version=version,
)
except vol.Invalid:
try:
await hass.async_add_executor_job(is_socket_address, device)
# valid ip address
gateway = mysensors.AsyncTCPGateway(
device,
port=tcp_port,
loop=hass.loop,
event_callback=None,
persistence=persistence,
persistence_file=persistence_file,
protocol_version=version,
)
except vol.Invalid:
# invalid ip address
return None
gateway.metric = hass.config.units.is_metric
gateway.optimistic = conf[CONF_OPTIMISTIC]
gateway.device = device
gateway.event_callback = _gw_callback_factory(hass, config)
gateway.nodes_config = gateway_conf[CONF_NODES]
if persistence:
await gateway.start_persistence()
return gateway
async def finish_setup(hass, hass_config, gateways):
"""Load any persistent devices and platforms and start gateway."""
discover_tasks = []
start_tasks = []
for gateway in gateways.values():
discover_tasks.append(_discover_persistent_devices(hass, hass_config, gateway))
start_tasks.append(_gw_start(hass, gateway))
if discover_tasks:
# Make sure all devices and platforms are loaded before gateway start.
await asyncio.wait(discover_tasks)
if start_tasks:
await asyncio.wait(start_tasks)
async def _discover_persistent_devices(hass, hass_config, gateway):
"""Discover platforms for devices loaded via persistence file."""
tasks = []
new_devices = defaultdict(list)
for node_id in gateway.sensors:
if not validate_node(gateway, node_id):
continue
node = gateway.sensors[node_id]
for child in node.children.values():
validated = validate_child(gateway, node_id, child)
for platform, dev_ids in validated.items():
new_devices[platform].extend(dev_ids)
for platform, dev_ids in new_devices.items():
tasks.append(discover_mysensors_platform(hass, hass_config, platform, dev_ids))
if tasks:
await asyncio.wait(tasks)
async def _gw_start(hass, gateway):
"""Start the gateway."""
# Don't use hass.async_create_task to avoid holding up setup indefinitely.
connect_task = hass.loop.create_task(gateway.start())
@callback
def gw_stop(event):
"""Trigger to stop the gateway."""
hass.async_create_task(gateway.stop())
if not connect_task.done():
connect_task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gw_stop)
if gateway.device == "mqtt":
# Gatways connected via mqtt doesn't send gateway ready message.
return
gateway_ready = asyncio.Future()
gateway_ready_key = MYSENSORS_GATEWAY_READY.format(id(gateway))
hass.data[gateway_ready_key] = gateway_ready
try:
with async_timeout.timeout(GATEWAY_READY_TIMEOUT):
await gateway_ready
except asyncio.TimeoutError:
_LOGGER.warning(
"Gateway %s not ready after %s secs so continuing with setup",
gateway.device,
GATEWAY_READY_TIMEOUT,
)
finally:
hass.data.pop(gateway_ready_key, None)
def _gw_callback_factory(hass, hass_config):
"""Return a new callback for the gateway."""
@callback
def mysensors_callback(msg):
"""Handle messages from a MySensors gateway."""
_LOGGER.debug("Node update: node %s child %s", msg.node_id, msg.child_id)
msg_type = msg.gateway.const.MessageType(msg.type)
msg_handler = HANDLERS.get(msg_type.name)
if msg_handler is None:
return
hass.async_create_task(msg_handler(hass, hass_config, msg))
return mysensors_callback
|
_stash = globals()["_stash"]
def get_all_bg_colors():
"""
Return a list of all known bg colors
"""
return _stash.renderer.BG_COLORS.keys()
def get_all_fg_colors():
"""
Return a list of all known fg colors
"""
return _stash.renderer.FG_COLORS.keys()
def main():
"""
The main function
"""
print("============ COLOR TEST ===================")
bg_colors = get_all_bg_colors()
fg_colors = get_all_fg_colors()
print("------------ available colors -------------")
print("Known FG colors: " + ", ".join(fg_colors))
print("Known BG colors: " + ", ".join(bg_colors))
print("------- showing all combinations ----------")
for fg in _stash.renderer.FG_COLORS:
for bg in _stash.renderer.BG_COLORS:
for bold in (False, True):
for italics in (False, True):
for underscore in (False, True):
for strikethrough in (False, True):
for reverse in (False, True):
traits = []
if bold:
traits.append("bold")
if italics:
traits.append("italic")
if underscore:
traits.append("underline")
if strikethrough:
traits.append("strikethrough")
desc = "{}-{}{}{}".format(fg, bg, ("-" if len(traits) > 0 else ""), "-".join(traits))
s = _stash.text_style(
desc,
dict(
color=fg,
bgcolor=bg,
traits=traits,
)
)
print(s)
print("================= Done =====================")
if __name__ == "__main__":
main()
|
import logging
from pulsectl import Pulse, PulseError
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
import homeassistant.helpers.config_validation as cv
DOMAIN = "pulseaudio_loopback"
_LOGGER = logging.getLogger(__name__)
CONF_SINK_NAME = "sink_name"
CONF_SOURCE_NAME = "source_name"
DEFAULT_NAME = "paloopback"
DEFAULT_PORT = 4713
IGNORED_SWITCH_WARN = "Switch is already in the desired state. Ignoring."
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SINK_NAME): cv.string,
vol.Required(CONF_SOURCE_NAME): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Read in all of our configuration, and initialize the loopback switch."""
name = config.get(CONF_NAME)
sink_name = config.get(CONF_SINK_NAME)
source_name = config.get(CONF_SOURCE_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
hass.data.setdefault(DOMAIN, {})
server_id = str.format("{0}:{1}", host, port)
if host:
connect_to_server = server_id
else:
connect_to_server = None
if server_id in hass.data[DOMAIN]:
server = hass.data[DOMAIN][server_id]
else:
server = Pulse(server=connect_to_server, connect=False, threading_lock=True)
hass.data[DOMAIN][server_id] = server
add_entities([PALoopbackSwitch(name, server, sink_name, source_name)], True)
class PALoopbackSwitch(SwitchEntity):
"""Representation the presence or absence of a PA loopback module."""
def __init__(self, name, pa_server, sink_name, source_name):
"""Initialize the Pulseaudio switch."""
self._module_idx = None
self._name = name
self._sink_name = sink_name
self._source_name = source_name
self._pa_svr = pa_server
def _get_module_idx(self):
try:
self._pa_svr.connect()
for module in self._pa_svr.module_list():
if not module.name == "module-loopback":
continue
if f"sink={self._sink_name}" not in module.argument:
continue
if f"source={self._source_name}" not in module.argument:
continue
return module.index
except PulseError:
return None
return None
@property
def available(self):
"""Return true when connected to server."""
return self._pa_svr.connected
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._module_idx is not None
def turn_on(self, **kwargs):
"""Turn the device on."""
if not self.is_on:
self._pa_svr.module_load(
"module-loopback",
args=f"sink={self._sink_name} source={self._source_name}",
)
else:
_LOGGER.warning(IGNORED_SWITCH_WARN)
def turn_off(self, **kwargs):
"""Turn the device off."""
if self.is_on:
self._pa_svr.module_unload(self._module_idx)
else:
_LOGGER.warning(IGNORED_SWITCH_WARN)
def update(self):
"""Refresh state in case an alternate process modified this data."""
self._module_idx = self._get_module_idx()
|
from collections import Counter
from re import split
from sys import version_info
import pandas as pd
from scattertext.Common import GENERAL_INQUIRER_URL
from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc
class FeatsFromGeneralInquirer(FeatsFromSpacyDoc):
def __init__(self,
use_lemmas=False,
entity_types_to_censor=set(),
tag_types_to_censor=set(),
strip_final_period=False,
**kwargs):
'''
Parameters
----------
empath_analyze_function: function (default=empath.Empath().analyze)
Function that produces a dictionary mapping Empath categories to
Other parameters from FeatsFromSpacyDoc.__init__
'''
self._lexicon_df = self._download_and_parse_general_inquirer()
super(FeatsFromGeneralInquirer, self).__init__(use_lemmas,
entity_types_to_censor,
tag_types_to_censor,
strip_final_period)
def _download_and_parse_general_inquirer(self):
df = pd.read_csv(GENERAL_INQUIRER_URL, sep='\t')
return (df.T[2:-4].apply(lambda x: list(df
.Entry
.apply(lambda x: x.split('#')[0])
.loc[x.dropna().index]
.drop_duplicates()
.apply(str.lower)),
axis=1)
.apply(pd.Series)
.stack()
.reset_index()[['level_0', 0]]
.rename(columns={'level_0': 'cat', 0: 'term'})
.set_index('term'))
def _analyze(self, doc):
text_df = (pd.DataFrame(pd.Series(Counter(t for t in split(r"(\W)", doc.lower()) if t.strip())))
.join(self._lexicon_df)
.dropna()
.groupby('cat')
.sum()
)
return text_df
def get_definitions(self):
'''
These definitions are from http://apmc.newmdsx.com/CATA/block3/General%20Inquirer%20Categories.txt
:return: dict
'''
return {
'Positiv': '1,915 words of positive outlook. (It does not contain words for yes, which has been made a separate category of 20 entries.)',
'Negativ': '2,291 words of negative outlook (not including the separate category no in the sense of refusal).',
'Pstv': '1045 positive words, an earlier version of Positiv.',
'Affil': 'A subset of 557 Pstv words are also tagged for indicating affiliation or supportiveness.',
'Ngtv': '1160 negative words, an earlier version of Negativ.',
'Hostile': 'A subset of Ngtv 833 words are also tagged Hostile for words indicating an attitude or concern with hostility or aggressiveness.',
'Strong': '1902 words implying strength.',
'Power': 'A subset of 689 Strong words indicating a concern with power, control or authority.',
'Weak': '755 words implying weakness.',
'Submit': 'A subset of 284 Weak words connoting submission to authority or power, dependence on others, vulnerability to others, or withdrawal.',
'Active': '2045 words implying an active orientation.',
'Passive': '911 words indicating a passive orientation',
'Pleasur': '168 words indicating the enjoyment of a feeling, including words indicating confidence, interest and commitment.',
'Pain': '254 words indicating suffering, lack of confidence, or commitment.',
'Feel': '49 words describing particular feelings, including gratitude, apathy, and optimism, not those of pain or pleasure.',
'Arousal': '166 words indicating excitation, aside from pleasures or pains, but including arousal of affiliation and hostility.',
'EMOT': '311 words related to emotion that are used as a disambiguation category, but also available for general use.',
'Virtue': '719 words indicating an assessment of moral approval or good fortune, especially from the perspective of middle-class society.',
'Vice': '685 words indicating an assessment of moral disapproval or misfortune.',
'Ovrst': '"Overstated", 696 words indicating emphasis in realms of speed, frequency, causality, inclusiveness, quantity or quasi-quantity, accuracy, validity, scope, size, clarity, exceptionality, intensity, likelihood, certainty and extremity.',
'Undrst': '"Understated", 319 words indicating de-emphasis and caution in these realms.',
'Academ': '153 words relating to academic, intellectual or educational matters, including the names of major fields of study.',
'Doctrin': '217 words referring to organized systems of belief or knowledge, including those of applied knowledge, mystical beliefs, and arts that academics study.',
'Econ@': '510 words of an economic, commercial, industrial, or business orientation, including roles, collectivities, acts, abstract ideas, and symbols, including references to money. Includes names of common commodities in business.',
'Exch': '60 words concerned with buying, selling and trading.',
'ECON': '502 words (269 in common with Econ@) that is used by the General Inquirer in disambiguating.',
'Exprsv': '205 words associated with the arts, sports, and self-expression.',
'Legal': '192 words relating to legal, judicial, or police matters.',
'Milit': '88 words relating to military matters.',
'Polit@': '263 words having a clear political character, including political roles, collectivities, acts, ideas, ideologies, and symbols.',
'POLIT': 'broader category than Polit@ of 507 words that is used in disambiguation.',
'Relig': '103 words pertaining to religious, metaphysical, supernatural or relevant philosophical matters.',
'Role': '569 words referring to identifiable and standardized individual human behavior patterns, as used by sociologists.',
'COLL': '191 words referring to all human collectivities (not animal). Used in disambiguation.',
'Work': '261 words for socially defined ways for doing work.',
'Ritual': '134 words for non-work social rituals.',
'SocRel': '577 words for socially-defined interpersonal processes (formerly called "IntRel", for interpersonal relations).',
'Race': '15 words (with important use of words senses) referring to racial or ethnic characteristics.',
'Kin@': '50 terms denoting kinship.',
'MALE': '56 words referring to men and social roles associated with men. (Also used as a marker in disambiguation)',
'Female': '43 words referring to women and social roles associated with women.',
#'NonAdlt': '25 words associated with infants through adolescents.',
'HU': '795 general references to humans, including roles',
'ANI': '72 references to animals, fish, birds, and insects, including their collectivities.',
#'Place': 'category with 318 words subdivided',
'Social': '111 words for created locations that typically provide for social interaction and occupy limited space',
'Region': '61 words',
'Route': '23 words',
'Aquatic': '20 words',
'Land': '63 words for places occurring in nature, such as desert or beach',
'Sky': '34 words for all aerial conditions, natural vapors and objects in outer space',
'Object': 'category with 661 words subdivided into',
'Tool': '318 word for tools',
'Food': '80 words for food',
'Vehicle': '39 words for vehcile',
'BldgPt': '46 words for buildings, rooms in buildings, and other building parts',
'ComnObj': '104 words for the tools of communication',
'NatObj': '61 words for natural objects including plants, minerals and other objects occurring in nature other than people or animals)',
'BodyPt': 'a list of 80 parts of the body',
'ComForm': '895 words relating to the form, format or media of the communication transaction.',
'COM': '412 communications words used in disambiguation.',
'Say': '4 words for say and tell.',
'Need': '76 words related to the expression of need or intent.',
'Goal': '53 names of end-states towards which muscular or mental striving is directed.',
'Try': '70 words indicating activities taken to reach a goal, but not including words indicating that the goals have been achieved.',
'Means': '244 words denoting objects, acts or methods utilized in attaining goals. Only 16 words overlap with Lasswell dictionary 77-word category MeansLw.',
'Persist': '64 words indicating "stick to it" and endurance.',
'Complet': '81 words indicating that goals have been achieved, apart from whether the action may continue. The termination of action is indicated by the category Finish.',
'Fail': '137 words indicating that goals have not been achieved.',
'NatrPro': '217 words for processes found in nature, birth to death.',
'Begin': '56 words',
'Vary': '98 words indicating change without connotation of increase, decrease, beginning or ending',
'Increas': 'increase, 111 words',
'Decreas': 'decrease, 82 words',
'Finish': '87 words Terminiation action of completion',
'Stay': '25 movement words relating to staying',
'Rise': '25 movement words relating to rising',
'Exert': '194 movement words relating to exertion',
'Fetch': '79 words, includes carrying',
'Travel': '209 words for all physical movement and travel from one place to another in a horizontal plane',
'Fall': '42 words referring to falling movement',
'Think': '81 words referring to the presence or absence of rational thought processes.',
'Know': '348 words indicating awareness or unawareness, certainty or uncertainty, similarity or difference, generality or specificity, importance or unimportance, presence or absence, as well as components of mental classes, concepts or ideas.',
'Causal': '112 words denoting presumption that occurrence of one phenomenon is necessarily preceded, accompanied or followed by the occurrence of another.',
'Ought': '26 words indicating moral imperative.',
'Perceiv': '192 words referring to the perceptual process of recognizing or identifying something by means of the senses.',
'Compare': '21 words of comparison.',
'Eval@': '205 words which imply judgment and evaluation, whether positive or negative, including means-ends judgments.',
'Solve': '189 words (mostly verbs) referring to the mental processes associated with problem solving.',
'Abs@': '185 words reflecting tendency to use abstract vocabulary. There is also an ABS category (276 words) used as a marker.',
'Quality': '344 words indicating qualities or degrees of qualities which can be detected or measured by the human senses. Virtues and vices are separate.',
'Quan': '314 words indicating the assessment of quantity, including the use of numbers. Numbers are also identified by the NUMBcategory (51 words) which in turn divides into ORDof 15 ordinal words and CARDfor 36 cardinal words.',
'FREQ': '46 words indicating an assessment of frequency or pattern of recurrences, as well as words indicating an assessment of nonoccurrence or low frequency. (Also used in disambiguation)',
'DIST': '19 words referring to distance and its measures. (Used in disambiguation)',
'Time@': '273 words indicating a time consciousness, including when events take place and time taken in an action. Includes velocity words as well. There is also a more restrictive TIME category (75 words) used as a marker for disambiguation.',
'Space': '302 words indicating a consciousness of location in space and spatial relationships. There are also two more specialized marker categories for disambiguation POS (35 words for position) and DIM (49 words for dimension).',
#'Rel1': '36 words indicating a consciousness of abstract relationships between people, places, objects and ideas, apart from relations in space and time.',
'COLOR': '21 words of color, used in disambiguation.',
'Self': '7 pronouns referring to the singular self',
'Our': '6 pronouns referring to the inclusive self ("we", etc.)',
'You': '9 pronouns indicating another person is being addressed directly.',
'Yes': '20 words directly indicating agreement, including word senses "of course", "to say the least", "all right".',
'No': '7 words directly indicating disagreement, with the word "no" itself disambiguated to separately identify absence or negation.',
'Negate': '217 words that refer to reversal or negation, including about 20 "dis" words, 40 "in" words, and 100 "un" words, as well as several senses of the word "no" itself; generally signals a downside view.',
'Intrj': '42 words and includes exclamations as well as casual and slang references, words categorized "yes" and "no" such as "amen" or "nope", as well as other words like "damn" and "farewell".',
'IAV': '1947 verbs giving an interpretative explanation of an action, such as "encourage, mislead, flatter".',
'DAV': '540 straight descriptive verbs of an action or feature of an action, such as "run, walk, write, read".',
'SV': '102 state verbs describing mental or emotional states. usually detached from specific observable events, such as "love, trust, abhor".',
'IPadj': '117 adjectives referring to relations between people, such as "unkind, aloof, supportive".',
'IndAdj': '637 adjectives describing people apart from their relations to one another, such as "thrifty, restless"',
'PowGain': 'Power Gain, 65 words about power increasing',
'PowLoss': 'Power Loss, 109 words of power decreasing.',
'PowEnds': 'Power Ends, 30 words about the goals of the power process.',
'PowAren': 'Power Arenas, 53 words referring to political places and environments except nation-states.',
'PowCon': 'Power conflict, 228 words for ways of conflicting.',
'PowCoop': 'Power cooperation, 118 words for ways of cooperating',
'PowAuPt': 'Power authoritative participants, 134 words for individual and collective actors in power process',
'PowPt': 'Power ordinary participants, 81 words for non-authoritative actors (such as followers) in the power process.',
'PowDoct': 'Power doctrine, 42 words for recognized ideas about power relations and practices.',
'PowAuth': 'Authoritative power, 79 words concerned with a tools or forms of invoking formal power.',
'PowOth': 'Residual category of 332 power words not in other subcategories',
'PowTot': '1,266 words for the whole domain',
'RcEthic': 'Ethics, 151 words of values concerning the social order.',
'RcRelig': 'Religion, 83 words that invoke transcendental, mystical or supernatural grounds for rectitude.',
'RcGain': 'Rectitude gain, 30 words such as worship and forgiveness.',
'RcLoss': 'Rectitude loss, 12 words such as sin and denounce.',
'RcEnds': 'Rectitude ends, 33 words including heaven and the high-frequency word "ought".',
'RcTot': 'Rectitude total, 310 words for the whole domain.',
'RspGain': '26 words for the garnering of respect, such as congratulations',
'RspLoss': '38 words for the losing of respect, such as shame.',
'RspOth': '182 words regarding respect that are neither gain nor loss',
'RspTot': '245 words in the domain.',
'AffGain': '35 words for reaping affect.',
'AffLoss': '11 words for affect loss and indifference',
'AffPt': 'Affect participant, 55 words for friends and family.',
'AffOth': '96 affect words not in other categories',
'AffTot': '196 words in the affect domain',
'WltPt': 'Wealth participant, 52 words for various roles in business and commerce.',
'WltTran': 'Wealth transaction, 53 words for pursuit of wealth, such as buying and selling.',
'WltOth': '271 wealth-related words not in the above, including economic domains and commodities.',
'WltTot': '378 words in wealth domain.',
'WlbGain': '37 various words related to a gain in well being.',
'WlbLoss': '60 words related to a loss in a state of well being, including being upset.',
'WlbPhys': '226 words connoting the physical aspects of well being, including its absence.',
'WlbPsyc': '139 words connoting the psychological aspects of well being, including its absence.',
'WlbPt': '27 roles that evoke a concern for well-being, including infants, doctors, and vacationers.',
'WlbTot': '487 words in well-being domain.',
'EnlGain': 'Enlightenment gain, 146 words likely to reflect a gain in enlightenment through thought, education, etc.',
'EnlLoss': 'Enlightenment loss, 27 words reflecting misunderstanding, being misguided, or oversimplified.',
'EnlEnds': 'Enlightenment ends, 18 words "denoting pursuit of intrinsic enlightenment ideas."',
'EnlPt': 'Enlightenment participant, 61 words referring to roles in the secular enlightenment sphere.',
'EnlOth': '585 other enlightenment words',
'EnlTot': 'total of about 835 words',
'SklAsth': 'Skill aesthetic, 35 words mostly of the arts',
'SklPt': 'Skill participant, 64 words mainly about trades and professions.',
'SklOth': '158 other skill-related words',
'SklTot': '257 skill words in all.',
'TrnGain': 'Transaction gain, 129 general words of accomplishment',
'TrnLoss': 'Transaction loss, 113 general words of not accomplishing, but having setbacks instead.',
'TranLw': '334 words of transaction or exchange in a broad sense, but not necessarily of gain or loss.',
'MeansLw': 'The Lasswell Means category, 78 general words referring to means and utility or lack of same. Overlaps little with Means category.',
'EndsLw': '270 words of desired or undesired ends or goals.',
'ArenaLw': '34 words for settings, other than power related arenas in PowAren.',
'PtLw': 'A list of 68 actors not otherwise defined by the dictionary.',
'Nation': 'A list of 169 nations, which needs updating.',
'Anomie': '30 words that usually show "a negation of value preference", nihilism, disappointment and futility.',
'NegAff': '193 words of negative affect "denoting negative feelings and emotional rejection.',
'PosAff': '126 words of positive affect "denoting positive feelings, acceptance, appreciation and emotional support."',
'SureLw': '175 words indicating "a feeling of sureness, certainty and firmness."',
'If': '132 words "denoting feelings of uncertainty, doubt and vagueness."',
'NotLw': '25 words "that show the denial of one sort or another. "'}
#'TimeSpc': '"a general space-time category" with 428 words,',
#'FormLw': '368 words referring to formats, standards, tools and conventions of communication. almost entirely a subset of the 895 words in ConForm category'}
def get_doc_metadata(self, doc, prefix=''):
topic_counter = Counter()
if version_info[0] >= 3:
doc = str(doc)
for topic_category, score in self._analyze(doc).to_dict()[0].items():
topic_counter[prefix + topic_category] = int(score)
return topic_counter
def has_metadata_term_list(self):
return True
def get_top_model_term_lists(self):
return self._lexicon_df.reset_index().groupby('cat')['term'].apply(list).to_dict()
|
import logging
from time import monotonic, sleep
from homeassistant.components.lock import LockEntity
from homeassistant.const import ATTR_CODE, STATE_LOCKED, STATE_UNLOCKED
from . import CONF_CODE_DIGITS, CONF_DEFAULT_LOCK_CODE, CONF_LOCKS, HUB as hub
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure lock platform."""
locks = []
if int(hub.config.get(CONF_LOCKS, 1)):
hub.update_overview()
locks.extend(
[
VerisureDoorlock(device_label)
for device_label in hub.get("$.doorLockStatusList[*].deviceLabel")
]
)
add_entities(locks)
class VerisureDoorlock(LockEntity):
"""Representation of a Verisure doorlock."""
def __init__(self, device_label):
"""Initialize the Verisure lock."""
self._device_label = device_label
self._state = None
self._digits = hub.config.get(CONF_CODE_DIGITS)
self._changed_by = None
self._change_timestamp = 0
self._default_lock_code = hub.config.get(CONF_DEFAULT_LOCK_CODE)
@property
def name(self):
"""Return the name of the lock."""
return hub.get_first(
"$.doorLockStatusList[?(@.deviceLabel=='%s')].area", self._device_label
)
@property
def state(self):
"""Return the state of the lock."""
return self._state
@property
def available(self):
"""Return True if entity is available."""
return (
hub.get_first(
"$.doorLockStatusList[?(@.deviceLabel=='%s')]", self._device_label
)
is not None
)
@property
def changed_by(self):
"""Last change triggered by."""
return self._changed_by
@property
def code_format(self):
"""Return the required six digit code."""
return "^\\d{%s}$" % self._digits
def update(self):
"""Update lock status."""
if monotonic() - self._change_timestamp < 10:
return
hub.update_overview()
status = hub.get_first(
"$.doorLockStatusList[?(@.deviceLabel=='%s')].lockedState",
self._device_label,
)
if status == "UNLOCKED":
self._state = STATE_UNLOCKED
elif status == "LOCKED":
self._state = STATE_LOCKED
elif status != "PENDING":
_LOGGER.error("Unknown lock state %s", status)
self._changed_by = hub.get_first(
"$.doorLockStatusList[?(@.deviceLabel=='%s')].userString",
self._device_label,
)
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state == STATE_LOCKED
def unlock(self, **kwargs):
"""Send unlock command."""
if self._state is None:
return
code = kwargs.get(ATTR_CODE, self._default_lock_code)
if code is None:
_LOGGER.error("Code required but none provided")
return
self.set_lock_state(code, STATE_UNLOCKED)
def lock(self, **kwargs):
"""Send lock command."""
if self._state == STATE_LOCKED:
return
code = kwargs.get(ATTR_CODE, self._default_lock_code)
if code is None:
_LOGGER.error("Code required but none provided")
return
self.set_lock_state(code, STATE_LOCKED)
def set_lock_state(self, code, state):
"""Send set lock state command."""
lock_state = "lock" if state == STATE_LOCKED else "unlock"
transaction_id = hub.session.set_lock_state(
code, self._device_label, lock_state
)["doorLockStateChangeTransactionId"]
_LOGGER.debug("Verisure doorlock %s", state)
transaction = {}
attempts = 0
while "result" not in transaction:
transaction = hub.session.get_lock_state_transaction(transaction_id)
attempts += 1
if attempts == 30:
break
if attempts > 1:
sleep(0.5)
if transaction["result"] == "OK":
self._state = state
self._change_timestamp = monotonic()
|
from functools import partial
from itertools import repeat
from importlib import import_module
from multiprocessing.dummy import Pool as ThreadPool
from multiprocessing import Pool, cpu_count
import pygogo as gogo
from riko.utils import multiplex, multi_try
from riko.bado import coroutine, return_value
from riko.bado import util, itertools as ait
from meza.process import merge
logger = gogo.Gogo(__name__, monolog=True).logger
class PyPipe(object):
"""A riko module fetching object"""
def __init__(self, name=None, source=None, parallel=False, **kwargs):
self.name = name
self.parallel = parallel
if kwargs.pop('listize', False) and source:
self.source = list(source)
else:
self.source = source or []
self.kwargs = kwargs
def __call__(self, **kwargs):
self.kwargs = kwargs
return self
class SyncPipe(PyPipe):
"""A synchronous Pipe object"""
def __init__(self, name=None, source=None, workers=None, **kwargs):
super(SyncPipe, self).__init__(name, source, **kwargs)
chunksize = kwargs.get('chunksize')
self.threads = kwargs.get('threads', True)
self.reuse_pool = kwargs.get('reuse_pool', True)
self.pool = kwargs.get('pool')
if self.name:
self.pipe = import_module('riko.modules.%s' % self.name).pipe
self.is_processor = self.pipe.__dict__.get('type') == 'processor'
self.mapify = self.is_processor and self.source
self.parallelize = self.parallel and self.mapify
else:
self.pipe = lambda source, **kw: source
self.mapify = False
self.parallelize = False
if self.parallelize:
ordered = kwargs.get('ordered')
length = lenish(self.source)
def_pool = ThreadPool if self.threads else Pool
self.workers = workers or get_worker_cnt(length, self.threads)
self.chunksize = chunksize or get_chunksize(length, self.workers)
self.pool = self.pool or def_pool(self.workers)
self.map = self.pool.imap if ordered else self.pool.imap_unordered
else:
self.workers = workers
self.chunksize = chunksize
self.map = map
def __getattr__(self, name):
kwargs = {
'parallel': self.parallel,
'threads': self.threads,
'pool': self.pool if self.reuse_pool else None,
'reuse_pool': self.reuse_pool,
'workers': self.workers}
return SyncPipe(name, source=self.output, **kwargs)
@property
def output(self):
pipeline = partial(self.pipe, **self.kwargs)
if self.parallelize:
zipped = zip(self.source, repeat(pipeline))
mapped = self.map(listpipe, zipped, chunksize=self.chunksize)
elif self.mapify:
mapped = self.map(pipeline, self.source)
if self.parallelize and not self.reuse_pool:
self.pool.close()
self.pool.join()
return multiplex(mapped) if self.mapify else pipeline(self.source)
@property
def list(self):
return list(self.output)
class PyCollection(object):
"""A riko bulk url fetching object"""
def __init__(self, sources, parallel=False, workers=None, **kwargs):
self.parallel = parallel
conf = kwargs.get('conf', {})
self.zargs = zip(sources, repeat(conf))
self.length = lenish(sources)
self.workers = workers or get_worker_cnt(self.length)
class SyncCollection(PyCollection):
"""A synchronous PyCollection object"""
def __init__(self, *args, **kwargs):
super(SyncCollection, self).__init__(*args, **kwargs)
if self.parallel:
self.chunksize = get_chunksize(self.length, self.workers)
self.pool = ThreadPool(self.workers)
self.map = self.pool.imap_unordered
else:
self.map = map
def fetch(self):
"""Fetch all source urls"""
kwargs = {'chunksize': self.chunksize} if self.parallel else {}
mapped = self.map(getpipe, self.zargs, **kwargs)
return multiplex(mapped)
def pipe(self, **kwargs):
"""Return a SyncPipe primed with the source feed"""
return SyncPipe(source=self.fetch(), **kwargs)
@property
def list(self):
return list(self.fetch())
class AsyncPipe(PyPipe):
"""An asynchronous PyPipe object"""
def __init__(self, name=None, source=None, connections=16, **kwargs):
super(AsyncPipe, self).__init__(name, source, **kwargs)
self.connections = connections
if self.name:
self.module = import_module('riko.modules.%s' % self.name)
self.async_pipe = self.module.async_pipe
pipe_type = self.async_pipe.__dict__.get('type')
self.is_processor = pipe_type == 'processor'
self.mapify = self.is_processor and self.source
else:
self.async_pipe = lambda source, **kw: util.async_return(source)
self.mapify = False
def __getattr__(self, name):
return AsyncPipe(name, source=self.output, connections=self.connections)
@property
@coroutine
def output(self):
source = yield self.source
async_pipeline = partial(self.async_pipe, **self.kwargs)
if self.mapify:
args = (async_pipeline, source, self.connections)
mapped = yield ait.async_map(*args)
output = multiplex(mapped)
else:
output = yield async_pipeline(source)
return_value(output)
@property
@coroutine
def list(self):
output = yield self.output
return_value(list(output))
class AsyncCollection(PyCollection):
"""An asynchronous PyCollection object"""
def __init__(self, sources, connections=16, **kwargs):
super(AsyncCollection, self).__init__(sources, **kwargs)
self.connections = connections
@coroutine
def async_fetch(self):
"""Fetch all source urls"""
args = (async_get_pipe, self.zargs, self.connections)
mapped = yield ait.async_map(*args)
return_value(multiplex(mapped))
def async_pipe(self, **kwargs):
"""Return an AsyncPipe primed with the source feed"""
return AsyncPipe(source=self.async_fetch(), **kwargs)
@property
@coroutine
def list(self):
result = yield self.async_fetch()
return_value(list(result))
def get_chunksize(length, workers):
return (length // (workers * 4)) or 1
def get_worker_cnt(length, threads=True):
multiplier = 2 if threads else 1
return min(length or 1, cpu_count() * multiplier)
def lenish(source, default=50):
funcs = (len, lambda x: x.__length_hint__())
errors = (TypeError, AttributeError)
zipped = list(zip(funcs, errors))
return multi_try(source, zipped, default)
def listpipe(args):
source, pipeline = args
return list(pipeline(source))
def getpipe(args, pipe=SyncPipe):
source, conf = args
ptype = source.get('type', 'fetch')
return pipe(ptype, conf=merge([conf, source])).output
@coroutine
def async_list_pipe(args):
source, async_pipeline = args
output = yield async_pipeline(source)
return_value(list(output))
async_get_pipe = partial(getpipe, pipe=AsyncPipe)
|
import json
import os
import pkgutil
from collections import Counter
from glob import glob
import yaml
from jsonschema import Draft4Validator
from jsonschema import exceptions
from jsonschema import FormatChecker
from jsonschema import ValidationError
from paasta_tools.cli.utils import failure
from paasta_tools.cli.utils import get_file_contents
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.cli.utils import guess_service_name
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import PaastaColors
from paasta_tools.cli.utils import success
from paasta_tools.kubernetes_tools import sanitise_kubernetes_name
from paasta_tools.secret_tools import get_secret_name_from_ref
from paasta_tools.secret_tools import is_secret_ref
from paasta_tools.secret_tools import is_shared_secret
from paasta_tools.tron_tools import list_tron_clusters
from paasta_tools.tron_tools import validate_complete_config
from paasta_tools.utils import get_service_instance_list
from paasta_tools.utils import list_all_instances_for_service
from paasta_tools.utils import list_clusters
from paasta_tools.utils import list_services
from paasta_tools.utils import load_system_paasta_config
SCHEMA_VALID = success("Successfully validated schema")
SCHEMA_ERROR = failure(
"Failed to load schema.",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
SCHEMA_INVALID = failure(
"Failed to validate schema. More info:",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
SCHEMA_NOT_FOUND = failure(
"Failed to find schema to validate against. More info:",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
FAILED_READING_FILE = failure(
"Failed to read file. More info:",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
UNKNOWN_SERVICE = (
"Unable to determine service to validate.\n"
"Please supply the %s name you wish to "
"validate with the %s option."
% (PaastaColors.cyan("SERVICE"), PaastaColors.cyan("-s"))
)
def invalid_tron_namespace(cluster, output, filename):
return failure(
"%s is invalid:\n %s\n " "More info:" % (filename, output),
"http://tron.readthedocs.io/en/latest/jobs.html",
)
def valid_tron_namespace(cluster, filename):
return success(f"{filename} is valid.")
def duplicate_instance_names_message(service, cluster, instance_names):
instance_name_list = "\n\t".join(instance_names)
message = (
f"Service {service} uses the following duplicate instance names for "
f"cluster {cluster}:\n\t{instance_name_list}\n"
)
return failure(
message, "https://paasta.readthedocs.io/en/latest/yelpsoa_configs.html"
)
def no_duplicate_instance_names_message(service, cluster):
return success(f"All {service}'s instance names in cluster {cluster} are unique")
def get_schema(file_type):
"""Get the correct schema to use for validation
:param file_type: what schema type should we validate against
"""
schema_path = "schemas/%s_schema.json" % file_type
try:
schema = pkgutil.get_data("paasta_tools.cli", schema_path).decode()
except IOError:
return None
return json.loads(schema)
def validate_instance_names(config_file_object, file_path):
errors = []
for instance_name in config_file_object:
if (
not instance_name.startswith("_")
and len(sanitise_kubernetes_name(instance_name)) > 63
):
errors.append(instance_name)
if errors:
error_string = "\n".join(errors)
print(
failure(
f"Length of instance name \n{error_string}\n should be no more than 63."
+ " Note _ is replaced with -- due to Kubernetes restriction",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return len(errors) == 0
def validate_service_name(service):
if len(sanitise_kubernetes_name(service)) > 63:
print(
failure(
f"Length of service name {service} should be no more than 63."
+ " Note _ is replaced with - due to Kubernetes restriction",
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
return True
def get_config_file_dict(file_path):
basename = os.path.basename(file_path)
extension = os.path.splitext(basename)[1]
try:
config_file = get_file_contents(file_path)
if extension == ".yaml":
return yaml.safe_load(config_file)
elif extension == ".json":
return json.loads(config_file)
else:
return config_file
except Exception:
print(f"{FAILED_READING_FILE}: {file_path}")
raise
def validate_schema(file_path, file_type):
"""Check if the specified config file has a valid schema
:param file_path: path to file to validate
:param file_type: what schema type should we validate against
"""
try:
schema = get_schema(file_type)
except Exception as e:
print(f"{SCHEMA_ERROR}: {file_type}, error: {e!r}")
return
if schema is None:
print(f"{SCHEMA_NOT_FOUND}: {file_path}")
return
validator = Draft4Validator(schema, format_checker=FormatChecker())
basename = os.path.basename(file_path)
config_file_object = get_config_file_dict(file_path)
try:
validator.validate(config_file_object)
if file_type == "kubernetes" and not validate_instance_names(
config_file_object, file_path
):
return
except ValidationError:
print(f"{SCHEMA_INVALID}: {file_path}")
errors = validator.iter_errors(config_file_object)
print(" Validation Message: %s" % exceptions.best_match(errors).message)
except Exception as e:
print(f"{SCHEMA_ERROR}: {file_type}, error: {e!r}")
return
else:
print(f"{SCHEMA_VALID}: {basename}")
return True
def validate_all_schemas(service_path):
"""Finds all recognized config files in service directory,
and validates their schema.
:param service_path: path to location of configuration files
"""
path = os.path.join(service_path, "*.yaml")
returncode = True
for file_name in glob(path):
if os.path.islink(file_name):
continue
basename = os.path.basename(file_name)
for file_type in ["marathon", "adhoc", "tron", "kubernetes"]:
if basename.startswith(file_type):
if not validate_schema(file_name, file_type):
returncode = False
return returncode
def add_subparser(subparsers):
validate_parser = subparsers.add_parser(
"validate",
description="Execute 'paasta validate' from service repo root",
help="Validate that all paasta config files in pwd are correct",
)
validate_parser.add_argument(
"-s",
"--service",
required=False,
help="Service that you want to validate. Like 'example_service'.",
).completer = lazy_choices_completer(list_services)
validate_parser.add_argument(
"-y",
"--yelpsoa-config-root",
dest="yelpsoa_config_root",
default=os.getcwd(),
required=False,
help="Path to root of yelpsoa-configs checkout",
)
validate_parser.set_defaults(command=paasta_validate)
def check_service_path(service_path):
"""Check that the specified path exists and has yaml files
:param service_path: Path to directory that should contain yaml files
"""
if not service_path or not os.path.isdir(service_path):
print(
failure(
"%s is not a directory" % service_path,
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
if not glob(os.path.join(service_path, "*.yaml")):
print(
failure(
"%s does not contain any .yaml files" % service_path,
"http://paasta.readthedocs.io/en/latest/yelpsoa_configs.html",
)
)
return False
return True
def get_service_path(service, soa_dir):
"""Determine the path of the directory containing the conf files
:param service: Name of service
:param soa_dir: Directory containing soa configs for all services
"""
if service:
service_path = os.path.join(soa_dir, service)
else:
if soa_dir == os.getcwd():
service_path = os.getcwd()
else:
print(UNKNOWN_SERVICE)
return None
return service_path
def path_to_soa_dir_service(service_path):
"""Split a service_path into its soa_dir and service name components"""
soa_dir = os.path.dirname(service_path)
service = os.path.basename(service_path)
return soa_dir, service
def validate_tron(service_path):
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
for cluster in list_tron_clusters(service, soa_dir):
if not validate_tron_namespace(service, cluster, soa_dir):
returncode = False
return returncode
def validate_tron_namespace(service, cluster, soa_dir, tron_dir=False):
if tron_dir:
display_name = f"{cluster}/{service}.yaml"
else:
display_name = f"tron-{cluster}.yaml"
messages = validate_complete_config(service, cluster, soa_dir)
returncode = len(messages) == 0
if messages:
print(invalid_tron_namespace(cluster, "\n ".join(messages), display_name))
else:
print(valid_tron_namespace(cluster, display_name))
return returncode
def validate_paasta_objects(service_path):
soa_dir, service = path_to_soa_dir_service(service_path)
returncode = True
messages = []
for cluster in list_clusters(service, soa_dir):
for instance in list_all_instances_for_service(
service=service, clusters=[cluster], soa_dir=soa_dir
):
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
)
messages.extend(instance_config.validate())
returncode = len(messages) == 0
if messages:
errors = "\n".join(messages)
print(failure((f"There were failures validating {service}: {errors}"), ""))
else:
print(success(f"All PaaSTA Instances for are valid for all clusters"))
return returncode
def validate_unique_instance_names(service_path):
"""Check that the service does not use the same instance name more than once"""
soa_dir, service = path_to_soa_dir_service(service_path)
check_passed = True
for cluster in list_clusters(service, soa_dir):
service_instances = get_service_instance_list(
service=service, cluster=cluster, soa_dir=soa_dir
)
instance_names = [service_instance[1] for service_instance in service_instances]
instance_name_to_count = Counter(instance_names)
duplicate_instance_names = [
instance_name
for instance_name, count in instance_name_to_count.items()
if count > 1
]
if duplicate_instance_names:
check_passed = False
print(
duplicate_instance_names_message(
service, cluster, duplicate_instance_names
)
)
else:
print(no_duplicate_instance_names_message(service, cluster))
return check_passed
def validate_autoscaling_configs(service_path):
"""Validate new autoscaling configurations that are not validated by jsonschema for the service of interest.
:param service_path: Path to directory containing soa conf yaml files for service
"""
path = os.path.join(service_path, "*.yaml")
returncode = True
instances = {}
# Read and store all instance configuration in instances dict
for file_name in glob(path):
if os.path.islink(file_name):
continue
basename = os.path.basename(file_name)
if basename.startswith("kubernetes"):
cluster = basename[basename.rfind("kuernetes-") + 1 :]
instances[cluster] = get_config_file_dict(file_name)
# Validate autoscaling configurations for all instances
for cluster_name, cluster in instances.items():
for instance_name, instance in cluster.items():
for metric, params in instance.get("new_autoscaling", {}).items():
if len(metric) > 63:
returncode = False
print(f"length of metric name {metric} exceeds 63")
continue
if metric in {"http", "uwsgi"} and "dimensions" in params:
for k, v in params["dimensions"].items():
if len(k) > 128:
returncode = False
print(
f"length of dimension key {k} of instance {instance_name} in {cluster_name} cannot exceed 128"
)
if len(v) > 256:
returncode = False
print(
f"length of dimension value {v} of instance {instance_name} in {cluster_name} cannot exceed 256"
)
return returncode
def check_secrets_for_instance(instance_config_dict, soa_dir, service_path, vault_env):
return_value = True
for env_value in instance_config_dict.get("env", {}).values():
if is_secret_ref(env_value):
secret_name = get_secret_name_from_ref(env_value)
if is_shared_secret(env_value):
secret_file_name = f"{soa_dir}/_shared/secrets/{secret_name}.json"
else:
secret_file_name = f"{service_path}/secrets/{secret_name}.json"
if os.path.isfile(secret_file_name):
secret_json = get_config_file_dict(secret_file_name)
if "ciphertext" not in secret_json["environments"].get(vault_env, {}):
print(
failure(
f"Secret {secret_name} not defined for ecosystem {vault_env} on secret file {secret_file_name}",
"",
)
)
return_value = False
else:
print(failure(f"Secret file {secret_file_name} not defined", ""))
return_value = False
return return_value
def validate_secrets(service_path):
soa_dir, service = path_to_soa_dir_service(service_path)
system_paasta_config = load_system_paasta_config()
vault_cluster_map = system_paasta_config.get_vault_cluster_config()
return_value = True
for cluster in list_clusters(service, soa_dir):
vault_env = vault_cluster_map.get(cluster)
if not vault_env:
print(failure(f"{cluster} not found on vault_cluster_map", ""))
return_value = False
continue
for instance in list_all_instances_for_service(
service=service, clusters=[cluster], soa_dir=soa_dir
):
instance_config = get_instance_config(
service=service,
instance=instance,
cluster=cluster,
load_deployments=False,
soa_dir=soa_dir,
)
if not check_secrets_for_instance(
instance_config.config_dict, soa_dir, service_path, vault_env
):
return_value = False
if return_value:
print(success("No orphan secrets found"))
return return_value
def paasta_validate_soa_configs(service, service_path):
"""Analyze the service in service_path to determine if the conf files are valid
:param service_path: Path to directory containing soa conf yaml files for service
"""
if not check_service_path(service_path):
return False
if not validate_service_name(service):
return False
returncode = True
if not validate_all_schemas(service_path):
returncode = False
if not validate_tron(service_path):
returncode = False
if not validate_paasta_objects(service_path):
returncode = False
if not validate_unique_instance_names(service_path):
returncode = False
if not validate_autoscaling_configs(service_path):
returncode = False
if not validate_secrets(service_path):
returncode = False
return returncode
def paasta_validate(args):
"""Generate a service_path from the provided args and call paasta_validate_soa_configs
:param args: argparse.Namespace obj created from sys.args by cli
"""
service_path = get_service_path(args.service, args.yelpsoa_config_root)
service = args.service or guess_service_name()
if not paasta_validate_soa_configs(service, service_path):
return 1
|
from ipaddress import ip_address
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.core import callback
from homeassistant.util.network import is_link_local
from .const import (
CONF_MODEL,
CONF_STREAM_PROFILE,
DEFAULT_STREAM_PROFILE,
DOMAIN as AXIS_DOMAIN,
)
from .device import get_device
from .errors import AuthenticationRequired, CannotConnect
AXIS_OUI = {"00408C", "ACCC8E", "B8A44F"}
CONFIG_FILE = "axis.conf"
EVENT_TYPES = ["motion", "vmd3", "pir", "sound", "daynight", "tampering", "input"]
PLATFORMS = ["camera"]
AXIS_INCLUDE = EVENT_TYPES + PLATFORMS
DEFAULT_PORT = 80
class AxisFlowHandler(config_entries.ConfigFlow, domain=AXIS_DOMAIN):
"""Handle a Axis config flow."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return AxisOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the Axis config flow."""
self.device_config = {}
self.discovery_schema = {}
self.import_schema = {}
async def async_step_user(self, user_input=None):
"""Handle a Axis config flow start.
Manage device specific parameters.
"""
errors = {}
if user_input is not None:
try:
device = await get_device(
self.hass,
host=user_input[CONF_HOST],
port=user_input[CONF_PORT],
username=user_input[CONF_USERNAME],
password=user_input[CONF_PASSWORD],
)
await self.async_set_unique_id(device.vapix.serial_number)
self._abort_if_unique_id_configured(
updates={
CONF_HOST: user_input[CONF_HOST],
CONF_PORT: user_input[CONF_PORT],
}
)
self.device_config = {
CONF_HOST: user_input[CONF_HOST],
CONF_PORT: user_input[CONF_PORT],
CONF_USERNAME: user_input[CONF_USERNAME],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_MAC: device.vapix.serial_number,
CONF_MODEL: device.vapix.product_number,
}
return await self._create_entry()
except AuthenticationRequired:
errors["base"] = "invalid_auth"
except CannotConnect:
errors["base"] = "cannot_connect"
data = self.discovery_schema or {
vol.Required(CONF_HOST): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}
return self.async_show_form(
step_id="user",
description_placeholders=self.device_config,
data_schema=vol.Schema(data),
errors=errors,
)
async def _create_entry(self):
"""Create entry for device.
Generate a name to be used as a prefix for device entities.
"""
model = self.device_config[CONF_MODEL]
same_model = [
entry.data[CONF_NAME]
for entry in self.hass.config_entries.async_entries(AXIS_DOMAIN)
if entry.data[CONF_MODEL] == model
]
name = model
for idx in range(len(same_model) + 1):
name = f"{model} {idx}"
if name not in same_model:
break
self.device_config[CONF_NAME] = name
title = f"{model} - {self.device_config[CONF_MAC]}"
return self.async_create_entry(title=title, data=self.device_config)
async def async_step_zeroconf(self, discovery_info):
"""Prepare configuration for a discovered Axis device."""
serial_number = discovery_info["properties"]["macaddress"]
if serial_number[:6] not in AXIS_OUI:
return self.async_abort(reason="not_axis_device")
if is_link_local(ip_address(discovery_info[CONF_HOST])):
return self.async_abort(reason="link_local_address")
await self.async_set_unique_id(serial_number)
self._abort_if_unique_id_configured(
updates={
CONF_HOST: discovery_info[CONF_HOST],
CONF_PORT: discovery_info[CONF_PORT],
}
)
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context["title_placeholders"] = {
CONF_NAME: discovery_info["hostname"][:-7],
CONF_HOST: discovery_info[CONF_HOST],
}
self.discovery_schema = {
vol.Required(CONF_HOST, default=discovery_info[CONF_HOST]): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_PORT, default=discovery_info[CONF_PORT]): int,
}
return await self.async_step_user()
class AxisOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle Axis device options."""
def __init__(self, config_entry):
"""Initialize Axis device options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.device = None
async def async_step_init(self, user_input=None):
"""Manage the Axis device options."""
self.device = self.hass.data[AXIS_DOMAIN][self.config_entry.unique_id]
return await self.async_step_configure_stream()
async def async_step_configure_stream(self, user_input=None):
"""Manage the Axis device options."""
if user_input is not None:
self.options.update(user_input)
return self.async_create_entry(title="", data=self.options)
profiles = [DEFAULT_STREAM_PROFILE]
for profile in self.device.api.vapix.streaming_profiles:
profiles.append(profile.name)
return self.async_show_form(
step_id="configure_stream",
data_schema=vol.Schema(
{
vol.Optional(
CONF_STREAM_PROFILE, default=self.device.option_stream_profile
): vol.In(profiles)
}
),
)
|
from __future__ import division, print_function
from .display_common import BaseScreen
import time
class LCDScreen(BaseScreen):
def set_terminal_properties(self, colors=None, bright_is_bold=None,
has_underline=None):
pass
def set_mouse_tracking(self, enable=True):
pass
def set_input_timeouts(self, *args):
pass
def reset_default_terminal_palette(self, *args):
pass
def draw_screen(self, size, r ):
pass
def clear(self):
pass
def get_cols_rows(self):
return self.DISPLAY_SIZE
class CFLCDScreen(LCDScreen):
"""
Common methods for Crystal Fontz LCD displays
"""
KEYS = [None, # no key with code 0
'up_press', 'down_press', 'left_press',
'right_press', 'enter_press', 'exit_press',
'up_release', 'down_release', 'left_release',
'right_release', 'enter_release', 'exit_release',
'ul_press', 'ur_press', 'll_press', 'lr_press',
'ul_release', 'ur_release', 'll_release', 'lr_release']
CMD_PING = 0
CMD_VERSION = 1
CMD_CLEAR = 6
CMD_CGRAM = 9
CMD_CURSOR_POSITION = 11 # data = [col, row]
CMD_CURSOR_STYLE = 12 # data = [style (0-4)]
CMD_LCD_CONTRAST = 13 # data = [contrast (0-255)]
CMD_BACKLIGHT = 14 # data = [power (0-100)]
CMD_LCD_DATA = 31 # data = [col, row] + text
CMD_GPO = 34 # data = [pin(0-12), value(0-100)]
# sent from device
CMD_KEY_ACTIVITY = 0x80
CMD_ACK = 0x40 # in high two bits ie. & 0xc0
CURSOR_NONE = 0
CURSOR_BLINKING_BLOCK = 1
CURSOR_UNDERSCORE = 2
CURSOR_BLINKING_BLOCK_UNDERSCORE = 3
CURSOR_INVERTING_BLINKING_BLOCK = 4
MAX_PACKET_DATA_LENGTH = 22
colors = 1
has_underline = False
def __init__(self, device_path, baud):
"""
device_path -- eg. '/dev/ttyUSB0'
baud -- baud rate
"""
super(CFLCDScreen, self).__init__()
self.device_path = device_path
from serial import Serial
self._device = Serial(device_path, baud, timeout=0)
self._unprocessed = ""
@classmethod
def get_crc(cls, buf):
# This seed makes the output of this shift based algorithm match
# the table based algorithm. The center 16 bits of the 32-bit
# "newCRC" are used for the CRC. The MSB of the lower byte is used
# to see what bit was shifted out of the center 16 bit CRC
# accumulator ("carry flag analog");
newCRC = 0x00F32100
for byte in buf:
# Push this byte’s bits through a software
# implementation of a hardware shift & xor.
for bit_count in range(8):
# Shift the CRC accumulator
newCRC >>= 1
# The new MSB of the CRC accumulator comes
# from the LSB of the current data byte.
if ord(byte) & (0x01 << bit_count):
newCRC |= 0x00800000
# If the low bit of the current CRC accumulator was set
# before the shift, then we need to XOR the accumulator
# with the polynomial (center 16 bits of 0x00840800)
if newCRC & 0x00000080:
newCRC ^= 0x00840800
# All the data has been done. Do 16 more bits of 0 data.
for bit_count in range(16):
# Shift the CRC accumulator
newCRC >>= 1
# If the low bit of the current CRC accumulator was set
# before the shift we need to XOR the accumulator with
# 0x00840800.
if newCRC & 0x00000080:
newCRC ^= 0x00840800
# Return the center 16 bits, making this CRC match the one’s
# complement that is sent in the packet.
return ((~newCRC)>>8) & 0xffff
def _send_packet(self, command, data):
"""
low-level packet sending.
Following the protocol requires waiting for ack packet between
sending each packet to the device.
"""
buf = chr(command) + chr(len(data)) + data
crc = self.get_crc(buf)
buf = buf + chr(crc & 0xff) + chr(crc >> 8)
self._device.write(buf)
def _read_packet(self):
"""
low-level packet reading.
returns (command/report code, data) or None
This method stored data read and tries to resync when bad data
is received.
"""
# pull in any new data available
self._unprocessed = self._unprocessed + self._device.read()
while True:
try:
command, data, unprocessed = self._parse_data(self._unprocessed)
self._unprocessed = unprocessed
return command, data
except self.MoreDataRequired:
return
except self.InvalidPacket:
# throw out a byte and try to parse again
self._unprocessed = self._unprocessed[1:]
class InvalidPacket(Exception):
pass
class MoreDataRequired(Exception):
pass
@classmethod
def _parse_data(cls, data):
"""
Try to read a packet from the start of data, returning
(command/report code, packet_data, remaining_data)
or raising InvalidPacket or MoreDataRequired
"""
if len(data) < 2:
raise cls.MoreDataRequired
command = ord(data[0])
plen = ord(data[1])
if plen > cls.MAX_PACKET_DATA_LENGTH:
raise cls.InvalidPacket("length value too large")
if len(data) < plen + 4:
raise cls.MoreDataRequired
crc = cls.get_crc(data[:2 + plen])
pcrc = ord(data[2 + plen]) + (ord(data[3 + plen]) << 8 )
if crc != pcrc:
raise cls.InvalidPacket("CRC doesn't match")
return (command, data[2:2 + plen], data[4 + plen:])
class KeyRepeatSimulator(object):
"""
Provide simulated repeat key events when given press and
release events.
If two or more keys are pressed disable repeating until all
keys are released.
"""
def __init__(self, repeat_delay, repeat_next):
"""
repeat_delay -- seconds to wait before starting to repeat keys
repeat_next -- time between each repeated key
"""
self.repeat_delay = repeat_delay
self.repeat_next = repeat_next
self.pressed = {}
self.multiple_pressed = False
def press(self, key):
if self.pressed:
self.multiple_pressed = True
self.pressed[key] = time.time()
def release(self, key):
if key not in self.pressed:
return # ignore extra release events
del self.pressed[key]
if not self.pressed:
self.multiple_pressed = False
def next_event(self):
"""
Return (remaining, key) where remaining is the number of seconds
(float) until the key repeat event should be sent, or None if no
events are pending.
"""
if len(self.pressed) != 1 or self.multiple_pressed:
return
for key in self.pressed:
return max(0, self.pressed[key] + self.repeat_delay
- time.time()), key
def sent_event(self):
"""
Cakk this method when you have sent a key repeat event so the
timer will be reset for the next event
"""
if len(self.pressed) != 1:
return # ignore event that shouldn't have been sent
for key in self.pressed:
self.pressed[key] = (
time.time() - self.repeat_delay + self.repeat_next)
return
class CF635Screen(CFLCDScreen):
u"""
Crystal Fontz 635 display
20x4 character display + cursor
no foreground/background colors or settings supported
see CGROM for list of close unicode matches to characters available
6 button input
up, down, left, right, enter (check mark), exit (cross)
"""
DISPLAY_SIZE = (20, 4)
# ① through ⑧ are programmable CGRAM (chars 0-7, repeated at 8-15)
# double arrows (⇑⇓) appear as double arrowheads (chars 18, 19)
# ⑴ resembles a bell
# ⑵ resembles a filled-in "Y"
# ⑶ is the letters "Pt" together
# partial blocks (▇▆▄▃▁) are actually shorter versions of (▉▋▌▍▏)
# both groups are intended to draw horizontal bars with pixel
# precision, use ▇*[▆▄▃▁]? for a thin bar or ▉*[▋▌▍▏]? for a thick bar
CGROM = (
u"①②③④⑤⑥⑦⑧①②③④⑤⑥⑦⑧"
u"►◄⇑⇓«»↖↗↙↘▲▼↲^ˇ█"
u" !\"#¤%&'()*+,-./"
u"0123456789:;<=>?"
u"¡ABCDEFGHIJKLMNO"
u"PQRSTUVWXYZÄÖÑܧ"
u"¿abcdefghijklmno"
u"pqrstuvwxyzäöñüà"
u"⁰¹²³⁴⁵⁶⁷⁸⁹½¼±≥≤μ"
u"♪♫⑴♥♦⑵⌜⌟“”()αɛδ∞"
u"@£$¥èéùìòÇᴾØøʳÅå"
u"⌂¢ΦτλΩπΨΣθΞ♈ÆæßÉ"
u"ΓΛΠϒ_ÈÊêçğŞşİι~◊"
u"▇▆▄▃▁ƒ▉▋▌▍▏⑶◽▪↑→"
u"↓←ÁÍÓÚÝáíóúýÔôŮů"
u"ČĔŘŠŽčĕřšž[\]{|}")
cursor_style = CFLCDScreen.CURSOR_INVERTING_BLINKING_BLOCK
def __init__(self, device_path, baud=115200,
repeat_delay=0.5, repeat_next=0.125,
key_map=['up', 'down', 'left', 'right', 'enter', 'esc']):
"""
device_path -- eg. '/dev/ttyUSB0'
baud -- baud rate
repeat_delay -- seconds to wait before starting to repeat keys
repeat_next -- time between each repeated key
key_map -- the keys to send for this device's buttons
"""
super(CF635Screen, self).__init__(device_path, baud)
self.repeat_delay = repeat_delay
self.repeat_next = repeat_next
self.key_repeat = KeyRepeatSimulator(repeat_delay, repeat_next)
self.key_map = key_map
self._last_command = None
self._last_command_time = 0
self._command_queue = []
self._screen_buf = None
self._previous_canvas = None
self._update_cursor = False
def get_input_descriptors(self):
"""
return the fd from our serial device so we get called
on input and responses
"""
return [self._device.fd]
def get_input_nonblocking(self):
"""
Return a (next_input_timeout, keys_pressed, raw_keycodes)
tuple.
The protocol for our device requires waiting for acks between
each command, so this method responds to those as well as key
press and release events.
Key repeat events are simulated here as the device doesn't send
any for us.
raw_keycodes are the bytes of messages we received, which might
not seem to have any correspondence to keys_pressed.
"""
input = []
raw_input = []
timeout = None
while True:
packet = self._read_packet()
if not packet:
break
command, data = packet
if command == self.CMD_KEY_ACTIVITY and data:
d0 = ord(data[0])
if 1 <= d0 <= 12:
release = d0 > 6
keycode = d0 - (release * 6) - 1
key = self.key_map[keycode]
if release:
self.key_repeat.release(key)
else:
input.append(key)
self.key_repeat.press(key)
raw_input.append(d0)
elif command & 0xc0 == 0x40: # "ACK"
if command & 0x3f == self._last_command:
self._send_next_command()
next_repeat = self.key_repeat.next_event()
if next_repeat:
timeout, key = next_repeat
if not timeout:
input.append(key)
self.key_repeat.sent_event()
timeout = None
return timeout, input, []
def _send_next_command(self):
"""
send out the next command in the queue
"""
if not self._command_queue:
self._last_command = None
return
command, data = self._command_queue.pop(0)
self._send_packet(command, data)
self._last_command = command # record command for ACK
self._last_command_time = time.time()
def queue_command(self, command, data):
self._command_queue.append((command, data))
# not waiting? send away!
if self._last_command is None:
self._send_next_command()
def draw_screen(self, size, canvas):
assert size == self.DISPLAY_SIZE
if self._screen_buf:
osb = self._screen_buf
else:
osb = []
sb = []
y = 0
for row in canvas.content():
text = []
for a, cs, run in row:
text.append(run)
if not osb or osb[y] != text:
self.queue_command(self.CMD_LCD_DATA, chr(0) + chr(y) +
"".join(text))
sb.append(text)
y += 1
if (self._previous_canvas and
self._previous_canvas.cursor == canvas.cursor and
(not self._update_cursor or not canvas.cursor)):
pass
elif canvas.cursor is None:
self.queue_command(self.CMD_CURSOR_STYLE, chr(self.CURSOR_NONE))
else:
x, y = canvas.cursor
self.queue_command(self.CMD_CURSOR_POSITION, chr(x) + chr(y))
self.queue_command(self.CMD_CURSOR_STYLE, chr(self.cursor_style))
self._update_cursor = False
self._screen_buf = sb
self._previous_canvas = canvas
def program_cgram(self, index, data):
"""
Program character data. Characters available as chr(0) through
chr(7), and repeated as chr(8) through chr(15).
index -- 0 to 7 index of character to program
data -- list of 8, 6-bit integer values top to bottom with MSB
on the left side of the character.
"""
assert 0 <= index <= 7
assert len(data) == 8
self.queue_command(self.CMD_CGRAM, chr(index) +
"".join([chr(x) for x in data]))
def set_cursor_style(self, style):
"""
style -- CURSOR_BLINKING_BLOCK, CURSOR_UNDERSCORE,
CURSOR_BLINKING_BLOCK_UNDERSCORE or
CURSOR_INVERTING_BLINKING_BLOCK
"""
assert 1 <= style <= 4
self.cursor_style = style
self._update_cursor = True
def set_backlight(self, value):
"""
Set backlight brightness
value -- 0 to 100
"""
assert 0 <= value <= 100
self.queue_command(self.CMD_BACKLIGHT, chr(value))
def set_lcd_contrast(self, value):
"""
value -- 0 to 255
"""
assert 0 <= value <= 255
self.queue_command(self.CMD_LCD_CONTRAST, chr(value))
def set_led_pin(self, led, rg, value):
"""
led -- 0 to 3
rg -- 0 for red, 1 for green
value -- 0 to 100
"""
assert 0 <= led <= 3
assert rg in (0, 1)
assert 0 <= value <= 100
self.queue_command(self.CMD_GPO, chr(12 - 2 * led - rg) +
chr(value))
|
from __future__ import print_function, with_statement
import logging
import os
import sys
import time
import bz2
import itertools
import numpy as np
import scipy.linalg
import gensim
try:
from sparsesvd import sparsesvd
except ImportError:
# no SVDLIBC: install with `easy_install sparsesvd` if you want SVDLIBC results as well
sparsesvd = None
sparsesvd = None # don't use SVDLIBC
FACTORS = [300] # which num_topics to try
CHUNKSIZE = [10000, 1000] # which chunksize to try
POWER_ITERS = [0, 1, 2, 4, 6] # extra power iterations for the randomized algo
# when reporting reconstruction error, also report spectral norm error? (very slow)
COMPUTE_NORM2 = False
def norm2(a):
"""Spectral norm ("norm 2") of a symmetric matrix `a`."""
if COMPUTE_NORM2:
logging.info("computing spectral norm of a %s matrix", str(a.shape))
return scipy.linalg.eigvalsh(a).max() # much faster than np.linalg.norm(2)
else:
return np.nan
def rmse(diff):
return np.sqrt(1.0 * np.multiply(diff, diff).sum() / diff.size)
def print_error(name, aat, u, s, ideal_nf, ideal_n2):
err = -np.dot(u, np.dot(np.diag(s), u.T))
err += aat
nf, n2 = np.linalg.norm(err), norm2(err)
print(
'%s error: norm_frobenius=%f (/ideal=%g), norm2=%f (/ideal=%g), RMSE=%g' %
(name, nf, nf / ideal_nf, n2, n2 / ideal_n2, rmse(err))
)
sys.stdout.flush()
class ClippedCorpus:
def __init__(self, corpus, max_docs, max_terms):
self.corpus = corpus
self.max_docs, self.max_terms = max_docs, max_terms
def __iter__(self):
for doc in itertools.islice(self.corpus, self.max_docs):
yield [(f, w) for f, w in doc if f < self.max_terms]
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
logging.info("running %s", " ".join(sys.argv))
program = os.path.basename(sys.argv[0])
# do we have enough cmd line arguments?
if len(sys.argv) < 2:
print(globals()["__doc__"] % locals())
sys.exit(1)
fname = sys.argv[1]
if fname.endswith('bz2'):
mm = gensim.corpora.MmCorpus(bz2.BZ2File(fname))
else:
mm = gensim.corpora.MmCorpus(fname)
# extra cmd parameters = use a subcorpus (fewer docs, smaller vocab)
if len(sys.argv) > 2:
n = int(sys.argv[2])
else:
n = mm.num_docs
if len(sys.argv) > 3:
m = int(sys.argv[3])
else:
m = mm.num_terms
logging.info("using %i documents and %i features", n, m)
corpus = ClippedCorpus(mm, n, m)
id2word = gensim.utils.FakeDict(m)
logging.info("computing corpus * corpus^T") # eigenvalues of this matrix are singular values of `corpus`, squared
aat = np.zeros((m, m), dtype=np.float64)
for chunk in gensim.utils.grouper(corpus, chunksize=5000):
num_nnz = sum(len(doc) for doc in chunk)
chunk = gensim.matutils.corpus2csc(chunk, num_nnz=num_nnz, num_terms=m, num_docs=len(chunk), dtype=np.float32)
chunk = chunk * chunk.T
chunk = chunk.toarray()
aat += chunk
del chunk
logging.info("computing full decomposition of corpus * corpus^t")
aat = aat.astype(np.float32)
spectrum_s, spectrum_u = scipy.linalg.eigh(aat)
spectrum_s = spectrum_s[::-1] # re-order to descending eigenvalue order
spectrum_u = spectrum_u.T[::-1].T
np.save(fname + '.spectrum.npy', spectrum_s)
for factors in FACTORS:
err = -np.dot(spectrum_u[:, :factors], np.dot(np.diag(spectrum_s[:factors]), spectrum_u[:, :factors].T))
err += aat
ideal_fro = np.linalg.norm(err)
del err
ideal_n2 = spectrum_s[factors + 1]
print('*' * 40, "%i factors, ideal error norm_frobenius=%f, norm_2=%f" % (factors, ideal_fro, ideal_n2))
print("*" * 30, end="")
print_error("baseline", aat,
np.zeros((m, factors)), np.zeros((factors)), ideal_fro, ideal_n2)
if sparsesvd:
logging.info("computing SVDLIBC SVD for %i factors", factors)
taken = time.time()
corpus_ram = gensim.matutils.corpus2csc(corpus, num_terms=m)
ut, s, vt = sparsesvd(corpus_ram, factors)
taken = time.time() - taken
del corpus_ram
del vt
u, s = ut.T.astype(np.float32), s.astype(np.float32)**2 # convert singular values to eigenvalues
del ut
print("SVDLIBC SVD for %i factors took %s s (spectrum %f .. %f)"
% (factors, taken, s[0], s[-1]))
print_error("SVDLIBC", aat, u, s, ideal_fro, ideal_n2)
del u
for power_iters in POWER_ITERS:
for chunksize in CHUNKSIZE:
logging.info(
"computing incremental SVD for %i factors, %i power iterations, chunksize %i",
factors, power_iters, chunksize
)
taken = time.time()
gensim.models.lsimodel.P2_EXTRA_ITERS = power_iters
model = gensim.models.LsiModel(
corpus, id2word=id2word, num_topics=factors,
chunksize=chunksize, power_iters=power_iters
)
taken = time.time() - taken
u, s = model.projection.u.astype(np.float32), model.projection.s.astype(np.float32)**2
del model
print(
"incremental SVD for %i factors, %i power iterations, "
"chunksize %i took %s s (spectrum %f .. %f)" %
(factors, power_iters, chunksize, taken, s[0], s[-1])
)
print_error('incremental SVD', aat, u, s, ideal_fro, ideal_n2)
del u
logging.info("computing multipass SVD for %i factors, %i power iterations", factors, power_iters)
taken = time.time()
model = gensim.models.LsiModel(
corpus, id2word=id2word, num_topics=factors, chunksize=2000,
onepass=False, power_iters=power_iters
)
taken = time.time() - taken
u, s = model.projection.u.astype(np.float32), model.projection.s.astype(np.float32)**2
del model
print(
"multipass SVD for %i factors, "
"%i power iterations took %s s (spectrum %f .. %f)" %
(factors, power_iters, taken, s[0], s[-1])
)
print_error('multipass SVD', aat, u, s, ideal_fro, ideal_n2)
del u
logging.info("finished running %s", program)
|
import logging
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import ATTR_LOCATION
from homeassistant.helpers.reload import setup_reload_service
from . import DOMAIN as TELEGRAM_DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
DOMAIN = "telegram_bot"
ATTR_KEYBOARD = "keyboard"
ATTR_INLINE_KEYBOARD = "inline_keyboard"
ATTR_PHOTO = "photo"
ATTR_VIDEO = "video"
ATTR_DOCUMENT = "document"
CONF_CHAT_ID = "chat_id"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Required(CONF_CHAT_ID): vol.Coerce(int)})
def get_service(hass, config, discovery_info=None):
"""Get the Telegram notification service."""
setup_reload_service(hass, TELEGRAM_DOMAIN, PLATFORMS)
chat_id = config.get(CONF_CHAT_ID)
return TelegramNotificationService(hass, chat_id)
class TelegramNotificationService(BaseNotificationService):
"""Implement the notification service for Telegram."""
def __init__(self, hass, chat_id):
"""Initialize the service."""
self._chat_id = chat_id
self.hass = hass
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
service_data = {ATTR_TARGET: kwargs.get(ATTR_TARGET, self._chat_id)}
if ATTR_TITLE in kwargs:
service_data.update({ATTR_TITLE: kwargs.get(ATTR_TITLE)})
if message:
service_data.update({ATTR_MESSAGE: message})
data = kwargs.get(ATTR_DATA)
# Get keyboard info
if data is not None and ATTR_KEYBOARD in data:
keys = data.get(ATTR_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
service_data.update(keyboard=keys)
elif data is not None and ATTR_INLINE_KEYBOARD in data:
keys = data.get(ATTR_INLINE_KEYBOARD)
keys = keys if isinstance(keys, list) else [keys]
service_data.update(inline_keyboard=keys)
# Send a photo, video, document, or location
if data is not None and ATTR_PHOTO in data:
photos = data.get(ATTR_PHOTO)
photos = photos if isinstance(photos, list) else [photos]
for photo_data in photos:
service_data.update(photo_data)
self.hass.services.call(DOMAIN, "send_photo", service_data=service_data)
return
if data is not None and ATTR_VIDEO in data:
videos = data.get(ATTR_VIDEO)
videos = videos if isinstance(videos, list) else [videos]
for video_data in videos:
service_data.update(video_data)
self.hass.services.call(DOMAIN, "send_video", service_data=service_data)
return
if data is not None and ATTR_LOCATION in data:
service_data.update(data.get(ATTR_LOCATION))
return self.hass.services.call(
DOMAIN, "send_location", service_data=service_data
)
if data is not None and ATTR_DOCUMENT in data:
service_data.update(data.get(ATTR_DOCUMENT))
return self.hass.services.call(
DOMAIN, "send_document", service_data=service_data
)
# Send message
_LOGGER.debug(
"TELEGRAM NOTIFIER calling %s.send_message with %s", DOMAIN, service_data
)
return self.hass.services.call(
DOMAIN, "send_message", service_data=service_data
)
|
import logging
from pyjoin import (
get_devices,
ring_device,
send_file,
send_notification,
send_sms,
send_url,
set_wallpaper,
)
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_NAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "joaoapps_join"
CONF_DEVICE_ID = "device_id"
CONF_DEVICE_IDS = "device_ids"
CONF_DEVICE_NAMES = "device_names"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_DEVICE_ID): cv.string,
vol.Optional(CONF_DEVICE_IDS): cv.string,
vol.Optional(CONF_DEVICE_NAMES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
],
)
},
extra=vol.ALLOW_EXTRA,
)
def register_device(hass, api_key, name, device_id, device_ids, device_names):
"""Register services for each join device listed."""
def ring_service(service):
"""Service to ring devices."""
ring_device(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
)
def set_wallpaper_service(service):
"""Service to set wallpaper on devices."""
set_wallpaper(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
url=service.data.get("url"),
)
def send_file_service(service):
"""Service to send files to devices."""
send_file(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
url=service.data.get("url"),
)
def send_url_service(service):
"""Service to open url on devices."""
send_url(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
url=service.data.get("url"),
)
def send_tasker_service(service):
"""Service to open url on devices."""
send_notification(
api_key=api_key,
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
text=service.data.get("command"),
)
def send_sms_service(service):
"""Service to send sms from devices."""
send_sms(
device_id=device_id,
device_ids=device_ids,
device_names=device_names,
sms_number=service.data.get("number"),
sms_text=service.data.get("message"),
api_key=api_key,
)
hass.services.register(DOMAIN, f"{name}ring", ring_service)
hass.services.register(DOMAIN, f"{name}set_wallpaper", set_wallpaper_service)
hass.services.register(DOMAIN, f"{name}send_sms", send_sms_service)
hass.services.register(DOMAIN, f"{name}send_file", send_file_service)
hass.services.register(DOMAIN, f"{name}send_url", send_url_service)
hass.services.register(DOMAIN, f"{name}send_tasker", send_tasker_service)
def setup(hass, config):
"""Set up the Join services."""
for device in config[DOMAIN]:
api_key = device.get(CONF_API_KEY)
device_id = device.get(CONF_DEVICE_ID)
device_ids = device.get(CONF_DEVICE_IDS)
device_names = device.get(CONF_DEVICE_NAMES)
name = device.get(CONF_NAME)
name = f"{name.lower().replace(' ', '_')}_" if name else ""
if api_key:
if not get_devices(api_key):
_LOGGER.error("Error connecting to Join, check API key")
return False
if device_id is None and device_ids is None and device_names is None:
_LOGGER.error(
"No device was provided. Please specify device_id"
", device_ids, or device_names"
)
return False
register_device(hass, api_key, name, device_id, device_ids, device_names)
return True
|
from datetime import timedelta
import logging
from httpcore import ConnectError, ConnectTimeout
from wolf_smartset.token_auth import InvalidAuth
from wolf_smartset.wolf_client import WolfClient
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
COORDINATOR,
DEVICE_GATEWAY,
DEVICE_ID,
DEVICE_NAME,
DOMAIN,
PARAMETERS,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Wolf SmartSet Service component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Wolf SmartSet Service from a config entry."""
username = entry.data[CONF_USERNAME]
password = entry.data[CONF_PASSWORD]
device_name = entry.data[DEVICE_NAME]
device_id = entry.data[DEVICE_ID]
gateway_id = entry.data[DEVICE_GATEWAY]
_LOGGER.debug(
"Setting up wolflink integration for device: %s (id: %s, gateway: %s)",
device_name,
device_id,
gateway_id,
)
wolf_client = WolfClient(username, password)
parameters = await fetch_parameters(wolf_client, gateway_id, device_id)
async def async_update_data():
"""Update all stored entities for Wolf SmartSet."""
try:
values = await wolf_client.fetch_value(gateway_id, device_id, parameters)
return {v.value_id: v.value for v in values}
except ConnectError as exception:
raise UpdateFailed(
f"Error communicating with API: {exception}"
) from exception
except InvalidAuth as exception:
raise UpdateFailed("Invalid authentication during update.") from exception
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="wolflink",
update_method=async_update_data,
update_interval=timedelta(minutes=1),
)
await coordinator.async_refresh()
hass.data[DOMAIN][entry.entry_id] = {}
hass.data[DOMAIN][entry.entry_id][PARAMETERS] = parameters
hass.data[DOMAIN][entry.entry_id][COORDINATOR] = coordinator
hass.data[DOMAIN][entry.entry_id][DEVICE_ID] = device_id
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = await hass.config_entries.async_forward_entry_unload(entry, "sensor")
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def fetch_parameters(client: WolfClient, gateway_id: int, device_id: int):
"""
Fetch all available parameters with usage of WolfClient.
By default Reglertyp entity is removed because API will not provide value for this parameter.
"""
try:
fetched_parameters = await client.fetch_parameters(gateway_id, device_id)
return [param for param in fetched_parameters if param.name != "Reglertyp"]
except (ConnectError, ConnectTimeout) as exception:
raise UpdateFailed(f"Error communicating with API: {exception}") from exception
except InvalidAuth as exception:
raise UpdateFailed("Invalid authentication during update") from exception
|
from random import randrange
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MAXIMUM,
CONF_MINIMUM,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
ATTR_MAXIMUM = "maximum"
ATTR_MINIMUM = "minimum"
DEFAULT_NAME = "Random Sensor"
DEFAULT_MIN = 0
DEFAULT_MAX = 20
ICON = "mdi:hanger"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_MAXIMUM, default=DEFAULT_MAX): cv.positive_int,
vol.Optional(CONF_MINIMUM, default=DEFAULT_MIN): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Random number sensor."""
name = config.get(CONF_NAME)
minimum = config.get(CONF_MINIMUM)
maximum = config.get(CONF_MAXIMUM)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
async_add_entities([RandomSensor(name, minimum, maximum, unit)], True)
class RandomSensor(Entity):
"""Representation of a Random number sensor."""
def __init__(self, name, minimum, maximum, unit_of_measurement):
"""Initialize the Random sensor."""
self._name = name
self._minimum = minimum
self._maximum = maximum
self._unit_of_measurement = unit_of_measurement
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the attributes of the sensor."""
return {ATTR_MAXIMUM: self._maximum, ATTR_MINIMUM: self._minimum}
async def async_update(self):
"""Get a new number and updates the states."""
self._state = randrange(self._minimum, self._maximum + 1)
|
try:
from setuptools import setup
from setuptools.command.install import install
from setuptools.command.egg_info import egg_info
except ImportError:
from distutils.core import setup
import sys
import os
import subprocess
import errno
import versioneer
cmdclass = versioneer.get_cmdclass()
version = versioneer.get_version()
if sys.version_info[:2] <= (2, 6):
raise RuntimeError(
"You're using Python <= 2.6, but this package requires either Python "
"2.7, or 3.3 or above, so you can't use it unless you upgrade your "
"Python version."
)
if sys.version_info[:2] <= (3, 5):
dependencies = ['ordered-set<4.0.0']
else:
dependencies = ['ordered-set']
extras = {
'docs': ['sphinx'],
'matrices': ['numpy'],
'matplotlib': ['matplotlib'],
'quantities': ['quantities', 'numpy'],
'testing': ['flake8<3.0.0', 'pep8-naming==0.8.2',
'flake8_docstrings==1.3.0', 'pycodestyle==2.0.0',
'pydocstyle==3.0.0', 'pyflakes==1.2.3', 'pytest>=4.6',
'flake8-putty',
'coverage', 'pytest-cov'],
'packaging': ['twine'],
'convert_to_py2': ['3to2', 'future>=0.15.2'],
}
if sys.version_info[0] == 3:
source_dir = '.'
if sys.version_info < (3, 4):
del extras['docs']
extras['matplotlib'] = ['matplotlib<2.0.0']
extras['matrices'] = ['numpy<1.12.0']
extras['quantities'][1] = 'numpy<1.12.0'
else:
source_dir = 'python2_source'
dependencies.append('future>=0.15.2')
PY2_CONVERTED = False
extras['all'] = list(set([req for reqs in extras.values() for req in reqs]))
# Automatically convert the source from Python 3 to Python 2 if we need to.
class CustomInstall(install):
def run(self):
convert_to_py2()
install.run(self)
class CustomEggInfo(egg_info):
def initialize_options(self):
convert_to_py2()
egg_info.initialize_options(self)
def convert_to_py2():
global PY2_CONVERTED
if source_dir == 'python2_source' and not PY2_CONVERTED:
pylatex_exists = os.path.exists(os.path.join(source_dir, 'pylatex'))
if '+' not in version and pylatex_exists:
# This is an official release, just use the pre existing existing
# python2_source dir
return
try:
# Check if 3to2 exists
subprocess.check_output(['3to2', '--help'])
subprocess.check_output(['pasteurize', '--help'])
except OSError as e:
if e.errno != errno.ENOENT:
raise
if not pylatex_exists:
raise ImportError('3to2 and future need to be installed '
'before installing when PyLaTeX for Python '
'2.7 when it is not installed using one of '
'the pip releases.')
else:
converter = os.path.dirname(os.path.realpath(__file__)) \
+ '/convert_to_py2.sh'
subprocess.check_call([converter])
PY2_CONVERTED = True
cmdclass['install'] = CustomInstall
cmdclass['egg_info'] = CustomEggInfo
setup(name='PyLaTeX',
version=version,
author='Jelte Fennema',
author_email='[email protected]',
description='A Python library for creating LaTeX files and snippets',
long_description=open('README.rst').read(),
package_dir={'': source_dir},
packages=['pylatex', 'pylatex.base_classes'],
url='https://github.com/JelteF/PyLaTeX',
license='MIT',
install_requires=dependencies,
extras_require=extras,
cmdclass=cmdclass,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Intended Audience :: Education',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Software Development :: Code Generators',
'Topic :: Text Processing :: Markup :: LaTeX',
]
)
|
import unittest
from Tests.utils.utils import get_test_path
from kalliope.core.ConfigurationManager.YAMLLoader import YAMLFileNotFound, YAMLLoader
class TestYAMLLoader(unittest.TestCase):
"""
Class to test YAMLLoader
"""
def setUp(self):
pass
def test_get_config(self):
valid_file_path_to_test = get_test_path("brains/brain_test.yml")
invalid_file_path = "brains/non_existing_brain.yml"
expected_result = [
{'signals': [{'order': 'test_order'}],
'neurons': [{'say': {'message': ['test message']}}],
'name': 'test'},
{'signals': [{'order': 'test_order_2'}],
'neurons': [{'say': {'message': ['test message']}}],
'name': 'test2'},
{'signals': [{'order': 'order_for_int'}],
'neurons': [{'sleep': {'seconds': 60}}],
'name': 'testint'},
{'includes': ['included_brain_test.yml']},
{'signals': [{'order': 'test_order_3'}],
'neurons': [{'say': {'message': ['test message']}}],
'name': 'test3'}
]
with self.assertRaises(YAMLFileNotFound):
YAMLLoader.get_config(invalid_file_path)
self.assertEqual(YAMLLoader.get_config(valid_file_path_to_test), expected_result)
if __name__ == '__main__':
unittest.main()
|
import argparse
import importlib
from pathlib import Path
import sys
from . import error, util
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
return util.get_base_arg_parser().parse_known_args()[0]
def main():
"""Run a translation script."""
if not Path("requirements_all.txt").is_file():
print("Run from project root")
return 1
args = get_arguments()
module = importlib.import_module(f".{args.action}", "script.translations")
return module.run()
if __name__ == "__main__":
try:
sys.exit(main())
except error.ExitApp as err:
print()
print(f"Fatal Error: {err.reason}")
sys.exit(err.exit_code)
except (KeyboardInterrupt, EOFError):
print()
print("Aborted!")
sys.exit(2)
|
from homeassistant.components.switch import SwitchEntity
from .base_class import TradfriBaseDevice
from .const import CONF_GATEWAY_ID, DEVICES, DOMAIN, KEY_API
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load Tradfri switches based on a config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
tradfri_data = hass.data[DOMAIN][config_entry.entry_id]
api = tradfri_data[KEY_API]
devices = tradfri_data[DEVICES]
switches = [dev for dev in devices if dev.has_socket_control]
if switches:
async_add_entities(
TradfriSwitch(switch, api, gateway_id) for switch in switches
)
class TradfriSwitch(TradfriBaseDevice, SwitchEntity):
"""The platform class required by Home Assistant."""
def __init__(self, device, api, gateway_id):
"""Initialize a switch."""
super().__init__(device, api, gateway_id)
self._unique_id = f"{gateway_id}-{device.id}"
def _refresh(self, device):
"""Refresh the switch data."""
super()._refresh(device)
# Caching of switch control and switch object
self._device_control = device.socket_control
self._device_data = device.socket_control.sockets[0]
@property
def is_on(self):
"""Return true if switch is on."""
return self._device_data.state
async def async_turn_off(self, **kwargs):
"""Instruct the switch to turn off."""
await self._api(self._device_control.set_state(False))
async def async_turn_on(self, **kwargs):
"""Instruct the switch to turn on."""
await self._api(self._device_control.set_state(True))
|
from datetime import datetime
from functools import partial
import logging
from pyhomematic import HMConnection
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_MODE,
ATTR_NAME,
CONF_HOST,
CONF_HOSTS,
CONF_PASSWORD,
CONF_PLATFORM,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from .const import (
ATTR_ADDRESS,
ATTR_CHANNEL,
ATTR_DEVICE_TYPE,
ATTR_DISCOVER_DEVICES,
ATTR_DISCOVERY_TYPE,
ATTR_ERRORCODE,
ATTR_INTERFACE,
ATTR_LOW_BAT,
ATTR_LOWBAT,
ATTR_MESSAGE,
ATTR_PARAM,
ATTR_PARAMSET,
ATTR_PARAMSET_KEY,
ATTR_RX_MODE,
ATTR_TIME,
ATTR_UNIQUE_ID,
ATTR_VALUE,
ATTR_VALUE_TYPE,
CONF_CALLBACK_IP,
CONF_CALLBACK_PORT,
CONF_INTERFACES,
CONF_JSONPORT,
CONF_LOCAL_IP,
CONF_LOCAL_PORT,
CONF_PATH,
CONF_PORT,
CONF_RESOLVENAMES,
CONF_RESOLVENAMES_OPTIONS,
DATA_CONF,
DATA_HOMEMATIC,
DATA_STORE,
DISCOVER_BATTERY,
DISCOVER_BINARY_SENSORS,
DISCOVER_CLIMATE,
DISCOVER_COVER,
DISCOVER_LIGHTS,
DISCOVER_LOCKS,
DISCOVER_SENSORS,
DISCOVER_SWITCHES,
DOMAIN,
EVENT_ERROR,
EVENT_IMPULSE,
EVENT_KEYPRESS,
HM_DEVICE_TYPES,
HM_IGNORE_DISCOVERY_NODE,
HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS,
HM_IMPULSE_EVENTS,
HM_PRESS_EVENTS,
SERVICE_PUT_PARAMSET,
SERVICE_RECONNECT,
SERVICE_SET_DEVICE_VALUE,
SERVICE_SET_INSTALL_MODE,
SERVICE_SET_VARIABLE_VALUE,
SERVICE_VIRTUALKEY,
)
from .entity import HMHub
_LOGGER = logging.getLogger(__name__)
DEFAULT_LOCAL_IP = "0.0.0.0"
DEFAULT_LOCAL_PORT = 0
DEFAULT_RESOLVENAMES = False
DEFAULT_JSONPORT = 80
DEFAULT_PORT = 2001
DEFAULT_PATH = ""
DEFAULT_USERNAME = "Admin"
DEFAULT_PASSWORD = ""
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = False
DEFAULT_CHANNEL = 1
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_PLATFORM): "homematic",
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_ADDRESS): cv.string,
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_DEVICE_TYPE): cv.string,
vol.Optional(ATTR_CHANNEL, default=DEFAULT_CHANNEL): vol.Coerce(int),
vol.Optional(ATTR_PARAM): cv.string,
vol.Optional(ATTR_UNIQUE_ID): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_INTERFACES, default={}): {
cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(
CONF_RESOLVENAMES, default=DEFAULT_RESOLVENAMES
): vol.In(CONF_RESOLVENAMES_OPTIONS),
vol.Optional(CONF_JSONPORT, default=DEFAULT_JSONPORT): cv.port,
vol.Optional(
CONF_USERNAME, default=DEFAULT_USERNAME
): cv.string,
vol.Optional(
CONF_PASSWORD, default=DEFAULT_PASSWORD
): cv.string,
vol.Optional(CONF_CALLBACK_IP): cv.string,
vol.Optional(CONF_CALLBACK_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(
CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL
): cv.boolean,
}
},
vol.Optional(CONF_HOSTS, default={}): {
cv.match_all: {
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(
CONF_USERNAME, default=DEFAULT_USERNAME
): cv.string,
vol.Optional(
CONF_PASSWORD, default=DEFAULT_PASSWORD
): cv.string,
}
},
vol.Optional(CONF_LOCAL_IP, default=DEFAULT_LOCAL_IP): cv.string,
vol.Optional(CONF_LOCAL_PORT): cv.port,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SCHEMA_SERVICE_VIRTUALKEY = vol.Schema(
{
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): cv.string,
vol.Optional(ATTR_INTERFACE): cv.string,
}
)
SCHEMA_SERVICE_SET_VARIABLE_VALUE = vol.Schema(
{
vol.Required(ATTR_NAME): cv.string,
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
}
)
SCHEMA_SERVICE_SET_DEVICE_VALUE = vol.Schema(
{
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_CHANNEL): vol.Coerce(int),
vol.Required(ATTR_PARAM): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_VALUE): cv.match_all,
vol.Optional(ATTR_VALUE_TYPE): vol.In(
["boolean", "dateTime.iso8601", "double", "int", "string"]
),
vol.Optional(ATTR_INTERFACE): cv.string,
}
)
SCHEMA_SERVICE_RECONNECT = vol.Schema({})
SCHEMA_SERVICE_SET_INSTALL_MODE = vol.Schema(
{
vol.Required(ATTR_INTERFACE): cv.string,
vol.Optional(ATTR_TIME, default=60): cv.positive_int,
vol.Optional(ATTR_MODE, default=1): vol.All(vol.Coerce(int), vol.In([1, 2])),
vol.Optional(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
}
)
SCHEMA_SERVICE_PUT_PARAMSET = vol.Schema(
{
vol.Required(ATTR_INTERFACE): cv.string,
vol.Required(ATTR_ADDRESS): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET_KEY): vol.All(cv.string, vol.Upper),
vol.Required(ATTR_PARAMSET): dict,
vol.Optional(ATTR_RX_MODE): vol.All(cv.string, vol.Upper),
}
)
def setup(hass, config):
"""Set up the Homematic component."""
conf = config[DOMAIN]
hass.data[DATA_CONF] = remotes = {}
hass.data[DATA_STORE] = set()
# Create hosts-dictionary for pyhomematic
for rname, rconfig in conf[CONF_INTERFACES].items():
remotes[rname] = {
"ip": rconfig.get(CONF_HOST),
"port": rconfig.get(CONF_PORT),
"path": rconfig.get(CONF_PATH),
"resolvenames": rconfig.get(CONF_RESOLVENAMES),
"jsonport": rconfig.get(CONF_JSONPORT),
"username": rconfig.get(CONF_USERNAME),
"password": rconfig.get(CONF_PASSWORD),
"callbackip": rconfig.get(CONF_CALLBACK_IP),
"callbackport": rconfig.get(CONF_CALLBACK_PORT),
"ssl": rconfig[CONF_SSL],
"verify_ssl": rconfig.get(CONF_VERIFY_SSL),
"connect": True,
}
for sname, sconfig in conf[CONF_HOSTS].items():
remotes[sname] = {
"ip": sconfig.get(CONF_HOST),
"port": sconfig[CONF_PORT],
"username": sconfig.get(CONF_USERNAME),
"password": sconfig.get(CONF_PASSWORD),
"connect": False,
}
# Create server thread
bound_system_callback = partial(_system_callback_handler, hass, config)
hass.data[DATA_HOMEMATIC] = homematic = HMConnection(
local=config[DOMAIN].get(CONF_LOCAL_IP),
localport=config[DOMAIN].get(CONF_LOCAL_PORT, DEFAULT_LOCAL_PORT),
remotes=remotes,
systemcallback=bound_system_callback,
interface_id="homeassistant",
)
# Start server thread, connect to hosts, initialize to receive events
homematic.start()
# Stops server when Home Assistant is shutting down
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, hass.data[DATA_HOMEMATIC].stop)
# Init homematic hubs
entity_hubs = []
for hub_name in conf[CONF_HOSTS]:
entity_hubs.append(HMHub(hass, homematic, hub_name))
def _hm_service_virtualkey(service):
"""Service to handle virtualkey servicecalls."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found for service virtualkey!", address)
return
# Parameter doesn't exist for device
if param not in hmdevice.ACTIONNODE:
_LOGGER.error("%s not datapoint in hm device %s", param, address)
return
# Channel doesn't exist for device
if channel not in hmdevice.ACTIONNODE[param]:
_LOGGER.error("%i is not a channel in hm device %s", channel, address)
return
# Call parameter
hmdevice.actionNodeData(param, True, channel)
hass.services.register(
DOMAIN,
SERVICE_VIRTUALKEY,
_hm_service_virtualkey,
schema=SCHEMA_SERVICE_VIRTUALKEY,
)
def _service_handle_value(service):
"""Service to call setValue method for HomeMatic system variable."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
name = service.data[ATTR_NAME]
value = service.data[ATTR_VALUE]
if entity_ids:
entities = [
entity for entity in entity_hubs if entity.entity_id in entity_ids
]
else:
entities = entity_hubs
if not entities:
_LOGGER.error("No HomeMatic hubs available")
return
for hub in entities:
hub.hm_set_variable(name, value)
hass.services.register(
DOMAIN,
SERVICE_SET_VARIABLE_VALUE,
_service_handle_value,
schema=SCHEMA_SERVICE_SET_VARIABLE_VALUE,
)
def _service_handle_reconnect(service):
"""Service to reconnect all HomeMatic hubs."""
homematic.reconnect()
hass.services.register(
DOMAIN,
SERVICE_RECONNECT,
_service_handle_reconnect,
schema=SCHEMA_SERVICE_RECONNECT,
)
def _service_handle_device(service):
"""Service to call setValue method for HomeMatic devices."""
address = service.data.get(ATTR_ADDRESS)
channel = service.data.get(ATTR_CHANNEL)
param = service.data.get(ATTR_PARAM)
value = service.data.get(ATTR_VALUE)
value_type = service.data.get(ATTR_VALUE_TYPE)
# Convert value into correct XML-RPC Type.
# https://docs.python.org/3/library/xmlrpc.client.html#xmlrpc.client.ServerProxy
if value_type:
if value_type == "int":
value = int(value)
elif value_type == "double":
value = float(value)
elif value_type == "boolean":
value = bool(value)
elif value_type == "dateTime.iso8601":
value = datetime.strptime(value, "%Y%m%dT%H:%M:%S")
else:
# Default is 'string'
value = str(value)
# Device not found
hmdevice = _device_from_servicecall(hass, service)
if hmdevice is None:
_LOGGER.error("%s not found!", address)
return
hmdevice.setValue(param, value, channel)
hass.services.register(
DOMAIN,
SERVICE_SET_DEVICE_VALUE,
_service_handle_device,
schema=SCHEMA_SERVICE_SET_DEVICE_VALUE,
)
def _service_handle_install_mode(service):
"""Service to set interface into install mode."""
interface = service.data.get(ATTR_INTERFACE)
mode = service.data.get(ATTR_MODE)
time = service.data.get(ATTR_TIME)
address = service.data.get(ATTR_ADDRESS)
homematic.setInstallMode(interface, t=time, mode=mode, address=address)
hass.services.register(
DOMAIN,
SERVICE_SET_INSTALL_MODE,
_service_handle_install_mode,
schema=SCHEMA_SERVICE_SET_INSTALL_MODE,
)
def _service_put_paramset(service):
"""Service to call the putParamset method on a HomeMatic connection."""
interface = service.data.get(ATTR_INTERFACE)
address = service.data.get(ATTR_ADDRESS)
paramset_key = service.data.get(ATTR_PARAMSET_KEY)
# When passing in the paramset from a YAML file we get an OrderedDict
# here instead of a dict, so add this explicit cast.
# The service schema makes sure that this cast works.
paramset = dict(service.data.get(ATTR_PARAMSET))
rx_mode = service.data.get(ATTR_RX_MODE)
_LOGGER.debug(
"Calling putParamset: %s, %s, %s, %s, %s",
interface,
address,
paramset_key,
paramset,
rx_mode,
)
homematic.putParamset(interface, address, paramset_key, paramset, rx_mode)
hass.services.register(
DOMAIN,
SERVICE_PUT_PARAMSET,
_service_put_paramset,
schema=SCHEMA_SERVICE_PUT_PARAMSET,
)
return True
def _system_callback_handler(hass, config, src, *args):
"""System callback handler."""
# New devices available at hub
if src == "newDevices":
(interface_id, dev_descriptions) = args
interface = interface_id.split("-")[-1]
# Device support active?
if not hass.data[DATA_CONF][interface]["connect"]:
return
addresses = []
for dev in dev_descriptions:
address = dev["ADDRESS"].split(":")[0]
if address not in hass.data[DATA_STORE]:
hass.data[DATA_STORE].add(address)
addresses.append(address)
# Register EVENTS
# Search all devices with an EVENTNODE that includes data
bound_event_callback = partial(_hm_event_handler, hass, interface)
for dev in addresses:
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(dev)
if hmdevice.EVENTNODE:
hmdevice.setEventCallback(callback=bound_event_callback, bequeath=True)
# Create Home Assistant entities
if addresses:
for component_name, discovery_type in (
("switch", DISCOVER_SWITCHES),
("light", DISCOVER_LIGHTS),
("cover", DISCOVER_COVER),
("binary_sensor", DISCOVER_BINARY_SENSORS),
("sensor", DISCOVER_SENSORS),
("climate", DISCOVER_CLIMATE),
("lock", DISCOVER_LOCKS),
("binary_sensor", DISCOVER_BATTERY),
):
# Get all devices of a specific type
found_devices = _get_devices(hass, discovery_type, addresses, interface)
# When devices of this type are found
# they are setup in Home Assistant and a discovery event is fired
if found_devices:
discovery.load_platform(
hass,
component_name,
DOMAIN,
{
ATTR_DISCOVER_DEVICES: found_devices,
ATTR_DISCOVERY_TYPE: discovery_type,
},
config,
)
# Homegear error message
elif src == "error":
_LOGGER.error("Error: %s", args)
(interface_id, errorcode, message) = args
hass.bus.fire(EVENT_ERROR, {ATTR_ERRORCODE: errorcode, ATTR_MESSAGE: message})
def _get_devices(hass, discovery_type, keys, interface):
"""Get the HomeMatic devices for given discovery_type."""
device_arr = []
for key in keys:
device = hass.data[DATA_HOMEMATIC].devices[interface][key]
class_name = device.__class__.__name__
metadata = {}
# Class not supported by discovery type
if (
discovery_type != DISCOVER_BATTERY
and class_name not in HM_DEVICE_TYPES[discovery_type]
):
continue
# Load metadata needed to generate a parameter list
if discovery_type == DISCOVER_SENSORS:
metadata.update(device.SENSORNODE)
elif discovery_type == DISCOVER_BINARY_SENSORS:
metadata.update(device.BINARYNODE)
elif discovery_type == DISCOVER_BATTERY:
if ATTR_LOWBAT in device.ATTRIBUTENODE:
metadata.update({ATTR_LOWBAT: device.ATTRIBUTENODE[ATTR_LOWBAT]})
elif ATTR_LOW_BAT in device.ATTRIBUTENODE:
metadata.update({ATTR_LOW_BAT: device.ATTRIBUTENODE[ATTR_LOW_BAT]})
else:
continue
else:
metadata.update({None: device.ELEMENT})
# Generate options for 1...n elements with 1...n parameters
for param, channels in metadata.items():
if (
param in HM_IGNORE_DISCOVERY_NODE
and class_name not in HM_IGNORE_DISCOVERY_NODE_EXCEPTIONS.get(param, [])
):
continue
if discovery_type == DISCOVER_SWITCHES and class_name == "IPKeySwitchLevel":
channels.remove(8)
channels.remove(12)
if discovery_type == DISCOVER_LIGHTS and class_name == "IPKeySwitchLevel":
channels.remove(4)
# Add devices
_LOGGER.debug(
"%s: Handling %s: %s: %s", discovery_type, key, param, channels
)
for channel in channels:
name = _create_ha_id(
name=device.NAME, channel=channel, param=param, count=len(channels)
)
unique_id = _create_ha_id(
name=key, channel=channel, param=param, count=len(channels)
)
device_dict = {
CONF_PLATFORM: "homematic",
ATTR_ADDRESS: key,
ATTR_INTERFACE: interface,
ATTR_NAME: name,
ATTR_DEVICE_TYPE: class_name,
ATTR_CHANNEL: channel,
ATTR_UNIQUE_ID: unique_id,
}
if param is not None:
device_dict[ATTR_PARAM] = param
# Add new device
try:
DEVICE_SCHEMA(device_dict)
device_arr.append(device_dict)
except vol.MultipleInvalid as err:
_LOGGER.error("Invalid device config: %s", str(err))
return device_arr
def _create_ha_id(name, channel, param, count):
"""Generate a unique entity id."""
# HMDevice is a simple device
if count == 1 and param is None:
return name
# Has multiple elements/channels
if count > 1 and param is None:
return f"{name} {channel}"
# With multiple parameters on first channel
if count == 1 and param is not None:
return f"{name} {param}"
# Multiple parameters with multiple channels
if count > 1 and param is not None:
return f"{name} {channel} {param}"
def _hm_event_handler(hass, interface, device, caller, attribute, value):
"""Handle all pyhomematic device events."""
try:
channel = int(device.split(":")[1])
address = device.split(":")[0]
hmdevice = hass.data[DATA_HOMEMATIC].devices[interface].get(address)
except (TypeError, ValueError):
_LOGGER.error("Event handling channel convert error!")
return
# Return if not an event supported by device
if attribute not in hmdevice.EVENTNODE:
return
_LOGGER.debug("Event %s for %s channel %i", attribute, hmdevice.NAME, channel)
# Keypress event
if attribute in HM_PRESS_EVENTS:
hass.bus.fire(
EVENT_KEYPRESS,
{ATTR_NAME: hmdevice.NAME, ATTR_PARAM: attribute, ATTR_CHANNEL: channel},
)
return
# Impulse event
if attribute in HM_IMPULSE_EVENTS:
hass.bus.fire(EVENT_IMPULSE, {ATTR_NAME: hmdevice.NAME, ATTR_CHANNEL: channel})
return
_LOGGER.warning("Event is unknown and not forwarded")
def _device_from_servicecall(hass, service):
"""Extract HomeMatic device from service call."""
address = service.data.get(ATTR_ADDRESS)
interface = service.data.get(ATTR_INTERFACE)
if address == "BIDCOS-RF":
address = "BidCoS-RF"
if interface:
return hass.data[DATA_HOMEMATIC].devices[interface].get(address)
for devices in hass.data[DATA_HOMEMATIC].devices.values():
if address in devices:
return devices[address]
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.