text
stringlengths 213
32.3k
|
---|
from homeassistant.components.sensor import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
)
from homeassistant.const import DEGREE, PRESSURE_MBAR, TEMP_CELSIUS
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import WiffiEntity
from .const import CREATE_ENTITY_SIGNAL
from .wiffi_strings import (
WIFFI_UOM_DEGREE,
WIFFI_UOM_LUX,
WIFFI_UOM_MILLI_BAR,
WIFFI_UOM_PERCENT,
WIFFI_UOM_TEMP_CELSIUS,
)
# map to determine HA device class from wiffi's unit of measurement
UOM_TO_DEVICE_CLASS_MAP = {
WIFFI_UOM_TEMP_CELSIUS: DEVICE_CLASS_TEMPERATURE,
WIFFI_UOM_PERCENT: DEVICE_CLASS_HUMIDITY,
WIFFI_UOM_MILLI_BAR: DEVICE_CLASS_PRESSURE,
WIFFI_UOM_LUX: DEVICE_CLASS_ILLUMINANCE,
}
# map to convert wiffi unit of measurements to common HA uom's
UOM_MAP = {
WIFFI_UOM_DEGREE: DEGREE,
WIFFI_UOM_TEMP_CELSIUS: TEMP_CELSIUS,
WIFFI_UOM_MILLI_BAR: PRESSURE_MBAR,
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up platform for a new integration.
Called by the HA framework after async_forward_entry_setup has been called
during initialization of a new integration (= wiffi).
"""
@callback
def _create_entity(device, metric):
"""Create platform specific entities."""
entities = []
if metric.is_number:
entities.append(NumberEntity(device, metric, config_entry.options))
elif metric.is_string:
entities.append(StringEntity(device, metric, config_entry.options))
async_add_entities(entities)
async_dispatcher_connect(hass, CREATE_ENTITY_SIGNAL, _create_entity)
class NumberEntity(WiffiEntity):
"""Entity for wiffi metrics which have a number value."""
def __init__(self, device, metric, options):
"""Initialize the entity."""
super().__init__(device, metric, options)
self._device_class = UOM_TO_DEVICE_CLASS_MAP.get(metric.unit_of_measurement)
self._unit_of_measurement = UOM_MAP.get(
metric.unit_of_measurement, metric.unit_of_measurement
)
self._value = metric.value
self.reset_expiration_date()
@property
def device_class(self):
"""Return the automatically determined device class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self._unit_of_measurement
@property
def state(self):
"""Return the value of the entity."""
return self._value
@callback
def _update_value_callback(self, device, metric):
"""Update the value of the entity.
Called if a new message has been received from the wiffi device.
"""
self.reset_expiration_date()
self._unit_of_measurement = UOM_MAP.get(
metric.unit_of_measurement, metric.unit_of_measurement
)
self._value = metric.value
self.async_write_ha_state()
class StringEntity(WiffiEntity):
"""Entity for wiffi metrics which have a string value."""
def __init__(self, device, metric, options):
"""Initialize the entity."""
super().__init__(device, metric, options)
self._value = metric.value
self.reset_expiration_date()
@property
def state(self):
"""Return the value of the entity."""
return self._value
@callback
def _update_value_callback(self, device, metric):
"""Update the value of the entity.
Called if a new message has been received from the wiffi device.
"""
self.reset_expiration_date()
self._value = metric.value
self.async_write_ha_state()
|
import asyncio
from enum import Enum
from functools import wraps
import logging
from typing import Any, Union
import zigpy.exceptions
from homeassistant.core import callback
from .. import typing as zha_typing
from ..const import (
ATTR_ARGS,
ATTR_ATTRIBUTE_ID,
ATTR_ATTRIBUTE_NAME,
ATTR_CLUSTER_ID,
ATTR_COMMAND,
ATTR_UNIQUE_ID,
ATTR_VALUE,
CHANNEL_ZDO,
SIGNAL_ATTR_UPDATED,
)
from ..helpers import LogMixin, safe_read
_LOGGER = logging.getLogger(__name__)
def parse_and_log_command(channel, tsn, command_id, args):
"""Parse and log a zigbee cluster command."""
cmd = channel.cluster.server_commands.get(command_id, [command_id])[0]
channel.debug(
"received '%s' command with %s args on cluster_id '%s' tsn '%s'",
cmd,
args,
channel.cluster.cluster_id,
tsn,
)
return cmd
def decorate_command(channel, command):
"""Wrap a cluster command to make it safe."""
@wraps(command)
async def wrapper(*args, **kwds):
try:
result = await command(*args, **kwds)
channel.debug(
"executed '%s' command with args: '%s' kwargs: '%s' result: %s",
command.__name__,
args,
kwds,
result,
)
return result
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
channel.debug(
"command failed: '%s' args: '%s' kwargs '%s' exception: '%s'",
command.__name__,
args,
kwds,
str(ex),
)
return ex
return wrapper
class ChannelStatus(Enum):
"""Status of a channel."""
CREATED = 1
CONFIGURED = 2
INITIALIZED = 3
class ZigbeeChannel(LogMixin):
"""Base channel for a Zigbee cluster."""
REPORT_CONFIG = ()
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Initialize ZigbeeChannel."""
self._generic_id = f"channel_0x{cluster.cluster_id:04x}"
self._channel_name = getattr(cluster, "ep_attribute", self._generic_id)
self._ch_pool = ch_pool
self._cluster = cluster
self._id = f"{ch_pool.id}:0x{cluster.cluster_id:04x}"
unique_id = ch_pool.unique_id.replace("-", ":")
self._unique_id = f"{unique_id}:0x{cluster.cluster_id:04x}"
self._report_config = self.REPORT_CONFIG
if not hasattr(self, "_value_attribute") and len(self._report_config) > 0:
attr = self._report_config[0].get("attr")
if isinstance(attr, str):
self.value_attribute = self.cluster.attridx.get(attr)
else:
self.value_attribute = attr
self._status = ChannelStatus.CREATED
self._cluster.add_listener(self)
@property
def id(self) -> str:
"""Return channel id unique for this device only."""
return self._id
@property
def generic_id(self):
"""Return the generic id for this channel."""
return self._generic_id
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the zigpy cluster for this channel."""
return self._cluster
@property
def name(self) -> str:
"""Return friendly name."""
return self._channel_name
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
self._ch_pool.async_send_signal(signal, *args)
async def bind(self):
"""Bind a zigbee cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
try:
res = await self.cluster.bind()
self.debug("bound '%s' cluster: %s", self.cluster.ep_attribute, res[0])
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"Failed to bind '%s' cluster: %s", self.cluster.ep_attribute, str(ex)
)
async def configure_reporting(self) -> None:
"""Configure attribute reporting for a cluster.
This also swallows ZigbeeException exceptions that are thrown when
devices are unreachable.
"""
kwargs = {}
if self.cluster.cluster_id >= 0xFC00 and self._ch_pool.manufacturer_code:
kwargs["manufacturer"] = self._ch_pool.manufacturer_code
for report in self._report_config:
attr = report["attr"]
attr_name = self.cluster.attributes.get(attr, [attr])[0]
min_report_int, max_report_int, reportable_change = report["config"]
try:
res = await self.cluster.configure_reporting(
attr, min_report_int, max_report_int, reportable_change, **kwargs
)
self.debug(
"reporting '%s' attr on '%s' cluster: %d/%d/%d: Result: '%s'",
attr_name,
self.cluster.ep_attribute,
min_report_int,
max_report_int,
reportable_change,
res,
)
except (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError) as ex:
self.debug(
"failed to set reporting for '%s' attr on '%s' cluster: %s",
attr_name,
self.cluster.ep_attribute,
str(ex),
)
async def async_configure(self):
"""Set cluster binding and attribute reporting."""
if not self._ch_pool.skip_configuration:
await self.bind()
if self.cluster.is_server:
await self.configure_reporting()
self.debug("finished channel configuration")
else:
self.debug("skipping channel configuration")
self._status = ChannelStatus.CONFIGURED
async def async_initialize(self, from_cache):
"""Initialize channel."""
if not from_cache and self._ch_pool.skip_configuration:
self._status = ChannelStatus.INITIALIZED
return
self.debug("initializing channel: from_cache: %s", from_cache)
attributes = []
for report_config in self._report_config:
attributes.append(report_config["attr"])
if len(attributes) > 0:
await self.get_attributes(attributes, from_cache=from_cache)
self._status = ChannelStatus.INITIALIZED
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
attrid,
self.cluster.attributes.get(attrid, [attrid])[0],
value,
)
@callback
def zdo_command(self, *args, **kwargs):
"""Handle ZDO commands on this cluster."""
@callback
def zha_send_event(self, command: str, args: Union[int, dict]) -> None:
"""Relay events to hass."""
self._ch_pool.zha_send_event(
{
ATTR_UNIQUE_ID: self.unique_id,
ATTR_CLUSTER_ID: self.cluster.cluster_id,
ATTR_COMMAND: command,
ATTR_ARGS: args,
}
)
async def async_update(self):
"""Retrieve latest state from cluster."""
async def get_attribute_value(self, attribute, from_cache=True):
"""Get the value for an attribute."""
manufacturer = None
manufacturer_code = self._ch_pool.manufacturer_code
if self.cluster.cluster_id >= 0xFC00 and manufacturer_code:
manufacturer = manufacturer_code
result = await safe_read(
self._cluster,
[attribute],
allow_cache=from_cache,
only_cache=from_cache and not self._ch_pool.is_mains_powered,
manufacturer=manufacturer,
)
return result.get(attribute)
async def get_attributes(self, attributes, from_cache=True):
"""Get the values for a list of attributes."""
manufacturer = None
manufacturer_code = self._ch_pool.manufacturer_code
if self.cluster.cluster_id >= 0xFC00 and manufacturer_code:
manufacturer = manufacturer_code
try:
result, _ = await self.cluster.read_attributes(
attributes,
allow_cache=from_cache,
only_cache=from_cache and not self._ch_pool.is_mains_powered,
manufacturer=manufacturer,
)
return result
except (asyncio.TimeoutError, zigpy.exceptions.ZigbeeException) as ex:
self.debug(
"failed to get attributes '%s' on '%s' cluster: %s",
attributes,
self.cluster.ep_attribute,
str(ex),
)
return {}
def log(self, level, msg, *args):
"""Log a message."""
msg = f"[%s:%s]: {msg}"
args = (self._ch_pool.nwk, self._id) + args
_LOGGER.log(level, msg, *args)
def __getattr__(self, name):
"""Get attribute or a decorated cluster command."""
if hasattr(self._cluster, name) and callable(getattr(self._cluster, name)):
command = getattr(self._cluster, name)
command.__name__ = name
return decorate_command(self, command)
return self.__getattribute__(name)
class ZDOChannel(LogMixin):
"""Channel for ZDO events."""
def __init__(self, cluster, device):
"""Initialize ZDOChannel."""
self.name = CHANNEL_ZDO
self._cluster = cluster
self._zha_device = device
self._status = ChannelStatus.CREATED
self._unique_id = "{}:{}_ZDO".format(str(device.ieee), device.name)
self._cluster.add_listener(self)
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def cluster(self):
"""Return the aigpy cluster for this channel."""
return self._cluster
@property
def status(self):
"""Return the status of the channel."""
return self._status
@callback
def device_announce(self, zigpy_device):
"""Device announce handler."""
@callback
def permit_duration(self, duration):
"""Permit handler."""
async def async_initialize(self, from_cache):
"""Initialize channel."""
self._status = ChannelStatus.INITIALIZED
async def async_configure(self):
"""Configure channel."""
self._status = ChannelStatus.CONFIGURED
def log(self, level, msg, *args):
"""Log a message."""
msg = f"[%s:ZDO](%s): {msg}"
args = (self._zha_device.nwk, self._zha_device.model) + args
_LOGGER.log(level, msg, *args)
class ClientChannel(ZigbeeChannel):
"""Channel listener for Zigbee client (output) clusters."""
@callback
def attribute_updated(self, attrid, value):
"""Handle an attribute updated on this cluster."""
self.zha_send_event(
SIGNAL_ATTR_UPDATED,
{
ATTR_ATTRIBUTE_ID: attrid,
ATTR_ATTRIBUTE_NAME: self._cluster.attributes.get(attrid, ["Unknown"])[
0
],
ATTR_VALUE: value,
},
)
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle a cluster command received on this cluster."""
if (
self._cluster.server_commands is not None
and self._cluster.server_commands.get(command_id) is not None
):
self.zha_send_event(self._cluster.server_commands.get(command_id)[0], args)
|
import pytest
from yandextank.plugins.DataUploader.plugin import BackendTypes
class TestBackendTypes(object):
@pytest.mark.parametrize('api_address, section_name, expected_type', [
('lunapark.foo-bar.ru', 'uploader', BackendTypes.LUNAPARK),
('lunapark.test.foo-bar.ru', 'overload', BackendTypes.LUNAPARK),
('overload.yandex.net', 'uploade', BackendTypes.OVERLOAD),
('localhost', 'lunapark', BackendTypes.LUNAPARK)
])
def test_identify(self, api_address, section_name, expected_type):
assert BackendTypes.identify_backend(api_address, section_name) == expected_type
|
import math
from iglo import Lamp
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
LightEntity,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
DEFAULT_NAME = "iGlo Light"
DEFAULT_PORT = 8080
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the iGlo lights."""
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
port = config.get(CONF_PORT)
add_entities([IGloLamp(name, host, port)], True)
class IGloLamp(LightEntity):
"""Representation of an iGlo light."""
def __init__(self, name, host, port):
"""Initialize the light."""
self._name = name
self._lamp = Lamp(0, host, port)
@property
def name(self):
"""Return the name of the light."""
return self._name
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return int((self._lamp.state()["brightness"] / 200.0) * 255)
@property
def color_temp(self):
"""Return the color temperature."""
return color_util.color_temperature_kelvin_to_mired(self._lamp.state()["white"])
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return math.ceil(
color_util.color_temperature_kelvin_to_mired(self._lamp.max_kelvin)
)
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return math.ceil(
color_util.color_temperature_kelvin_to_mired(self._lamp.min_kelvin)
)
@property
def hs_color(self):
"""Return the hs value."""
return color_util.color_RGB_to_hs(*self._lamp.state()["rgb"])
@property
def effect(self):
"""Return the current effect."""
return self._lamp.state()["effect"]
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._lamp.effect_list()
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR | SUPPORT_EFFECT
@property
def is_on(self):
"""Return true if light is on."""
return self._lamp.state()["on"]
def turn_on(self, **kwargs):
"""Turn the light on."""
if not self.is_on:
self._lamp.switch(True)
if ATTR_BRIGHTNESS in kwargs:
brightness = int((kwargs[ATTR_BRIGHTNESS] / 255.0) * 200.0)
self._lamp.brightness(brightness)
return
if ATTR_HS_COLOR in kwargs:
rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
self._lamp.rgb(*rgb)
return
if ATTR_COLOR_TEMP in kwargs:
kelvin = int(
color_util.color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
)
self._lamp.white(kelvin)
return
if ATTR_EFFECT in kwargs:
effect = kwargs[ATTR_EFFECT]
self._lamp.effect(effect)
return
def turn_off(self, **kwargs):
"""Turn the light off."""
self._lamp.switch(False)
|
from numbers import Number
from homeassistant.const import (
LENGTH,
LENGTH_FEET,
LENGTH_KILOMETERS,
LENGTH_METERS,
LENGTH_MILES,
UNIT_NOT_RECOGNIZED_TEMPLATE,
)
VALID_UNITS = [LENGTH_KILOMETERS, LENGTH_MILES, LENGTH_FEET, LENGTH_METERS]
def convert(value: float, unit_1: str, unit_2: str) -> float:
"""Convert one unit of measurement to another."""
if unit_1 not in VALID_UNITS:
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(unit_1, LENGTH))
if unit_2 not in VALID_UNITS:
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(unit_2, LENGTH))
if not isinstance(value, Number):
raise TypeError(f"{value} is not of numeric type")
if unit_1 == unit_2 or unit_1 not in VALID_UNITS:
return value
meters: float = value
if unit_1 == LENGTH_MILES:
meters = __miles_to_meters(value)
elif unit_1 == LENGTH_FEET:
meters = __feet_to_meters(value)
elif unit_1 == LENGTH_KILOMETERS:
meters = __kilometers_to_meters(value)
result = meters
if unit_2 == LENGTH_MILES:
result = __meters_to_miles(meters)
elif unit_2 == LENGTH_FEET:
result = __meters_to_feet(meters)
elif unit_2 == LENGTH_KILOMETERS:
result = __meters_to_kilometers(meters)
return result
def __miles_to_meters(miles: float) -> float:
"""Convert miles to meters."""
return miles * 1609.344
def __feet_to_meters(feet: float) -> float:
"""Convert feet to meters."""
return feet * 0.3048
def __kilometers_to_meters(kilometers: float) -> float:
"""Convert kilometers to meters."""
return kilometers * 1000
def __meters_to_miles(meters: float) -> float:
"""Convert meters to miles."""
return meters * 0.000621371
def __meters_to_feet(meters: float) -> float:
"""Convert meters to feet."""
return meters * 3.28084
def __meters_to_kilometers(meters: float) -> float:
"""Convert meters to kilometers."""
return meters * 0.001
|
import botocore
from flask import current_app
from retrying import retry
from lemur.extensions import metrics, sentry
from lemur.exceptions import InvalidListener
from lemur.plugins.lemur_aws.sts import sts_client
def retry_throttled(exception):
"""
Determines if this exception is due to throttling
:param exception:
:return:
"""
# Log details about the exception
try:
raise exception
except Exception as e:
current_app.logger.error("ELB retry_throttled triggered", exc_info=True)
metrics.send("elb_retry", "counter", 1, metric_tags={"exception": str(e)})
sentry.captureException()
if isinstance(exception, botocore.exceptions.ClientError):
if exception.response["Error"]["Code"] == "LoadBalancerNotFound":
return False
if exception.response["Error"]["Code"] == "CertificateNotFound":
return False
return True
def is_valid(listener_tuple):
"""
There are a few rules that aws has when creating listeners,
this function ensures those rules are met before we try and create
or update a listener.
While these could be caught with boto exception handling, I would
rather be nice and catch these early before we sent them out to aws.
It also gives us an opportunity to create nice user warnings.
This validity check should also be checked in the frontend
but must also be enforced by server.
:param listener_tuple:
"""
lb_port, i_port, lb_protocol, arn = listener_tuple
if lb_protocol.lower() in ["ssl", "https"]:
if not arn:
raise InvalidListener
return listener_tuple
def get_all_elbs(**kwargs):
"""
Fetches all elbs for a given account/region
:param kwargs:
:return:
"""
elbs = []
try:
while True:
response = get_elbs(**kwargs)
elbs += response["LoadBalancerDescriptions"]
if not response.get("NextMarker"):
return elbs
else:
kwargs.update(dict(Marker=response["NextMarker"]))
except Exception as e: # noqa
metrics.send("get_all_elbs_error", "counter", 1)
sentry.captureException()
raise
def get_all_elbs_v2(**kwargs):
"""
Fetches all elbs for a given account/region
:param kwargs:
:return:
"""
elbs = []
try:
while True:
response = get_elbs_v2(**kwargs)
elbs += response["LoadBalancers"]
if not response.get("NextMarker"):
return elbs
else:
kwargs.update(dict(Marker=response["NextMarker"]))
except Exception as e: # noqa
metrics.send("get_all_elbs_v2_error", "counter", 1)
sentry.captureException()
raise
@sts_client("elbv2")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
def get_listener_arn_from_endpoint(endpoint_name, endpoint_port, **kwargs):
"""
Get a listener ARN from an endpoint.
:param endpoint_name:
:param endpoint_port:
:return:
"""
try:
client = kwargs.pop("client")
elbs = client.describe_load_balancers(Names=[endpoint_name])
for elb in elbs["LoadBalancers"]:
listeners = client.describe_listeners(
LoadBalancerArn=elb["LoadBalancerArn"]
)
for listener in listeners["Listeners"]:
if listener["Port"] == endpoint_port:
return listener["ListenerArn"]
except Exception as e: # noqa
metrics.send(
"get_listener_arn_from_endpoint_error",
"counter",
1,
metric_tags={
"error": str(e),
"endpoint_name": endpoint_name,
"endpoint_port": endpoint_port,
},
)
sentry.captureException(
extra={
"endpoint_name": str(endpoint_name),
"endpoint_port": str(endpoint_port),
}
)
raise
@sts_client("elbv2")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=5)
def get_load_balancer_arn_from_endpoint(endpoint_name, **kwargs):
"""
Get a load balancer ARN from an endpoint.
:param endpoint_name:
:return:
"""
try:
client = kwargs.pop("client")
elbs = client.describe_load_balancers(Names=[endpoint_name])
if "LoadBalancers" in elbs and elbs["LoadBalancers"]:
return elbs["LoadBalancers"][0]["LoadBalancerArn"]
except Exception as e: # noqa
metrics.send(
"get_load_balancer_arn_from_endpoint",
"counter",
1,
metric_tags={
"error": str(e),
"endpoint_name": endpoint_name,
},
)
sentry.captureException(
extra={
"endpoint_name": str(endpoint_name),
}
)
raise
@sts_client("elb")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
def get_elbs(**kwargs):
"""
Fetches one page elb objects for a given account and region.
"""
try:
client = kwargs.pop("client")
return client.describe_load_balancers(**kwargs)
except Exception as e: # noqa
metrics.send("get_elbs_error", "counter", 1, metric_tags={"error": str(e)})
sentry.captureException()
raise
@sts_client("elbv2")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
def get_elbs_v2(**kwargs):
"""
Fetches one page of elb objects for a given account and region.
:param kwargs:
:return:
"""
try:
client = kwargs.pop("client")
return client.describe_load_balancers(**kwargs)
except Exception as e: # noqa
metrics.send("get_elbs_v2_error", "counter", 1, metric_tags={"error": str(e)})
sentry.captureException()
raise
@sts_client("elbv2")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
def describe_listeners_v2(**kwargs):
"""
Fetches one page of listener objects for a given elb arn.
:param kwargs:
:return:
"""
try:
client = kwargs.pop("client")
return client.describe_listeners(**kwargs)
except Exception as e: # noqa
metrics.send(
"describe_listeners_v2_error", "counter", 1, metric_tags={"error": str(e)}
)
sentry.captureException()
raise
@sts_client("elb")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
def describe_load_balancer_policies(load_balancer_name, policy_names, **kwargs):
"""
Fetching all policies currently associated with an ELB.
:param load_balancer_name:
:return:
"""
try:
return kwargs["client"].describe_load_balancer_policies(
LoadBalancerName=load_balancer_name, PolicyNames=policy_names
)
except Exception as e: # noqa
metrics.send(
"describe_load_balancer_policies_error",
"counter",
1,
metric_tags={
"load_balancer_name": load_balancer_name,
"policy_names": policy_names,
"error": str(e),
},
)
sentry.captureException(
extra={
"load_balancer_name": str(load_balancer_name),
"policy_names": str(policy_names),
}
)
raise
@sts_client("elbv2")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
def describe_ssl_policies_v2(policy_names, **kwargs):
"""
Fetching all policies currently associated with an ELB.
:param policy_names:
:return:
"""
try:
return kwargs["client"].describe_ssl_policies(Names=policy_names)
except Exception as e: # noqa
metrics.send(
"describe_ssl_policies_v2_error",
"counter",
1,
metric_tags={"policy_names": policy_names, "error": str(e)},
)
sentry.captureException(extra={"policy_names": str(policy_names)})
raise
@sts_client("elb")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
def describe_load_balancer_types(policies, **kwargs):
"""
Describe the policies with policy details.
:param policies:
:return:
"""
return kwargs["client"].describe_load_balancer_policy_types(
PolicyTypeNames=policies
)
@sts_client("elb")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
def attach_certificate(name, port, certificate_id, **kwargs):
"""
Attaches a certificate to a listener, throws exception
if certificate specified does not exist in a particular account.
:param name:
:param port:
:param certificate_id:
"""
try:
return kwargs["client"].set_load_balancer_listener_ssl_certificate(
LoadBalancerName=name,
LoadBalancerPort=port,
SSLCertificateId=certificate_id,
)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "LoadBalancerNotFound":
current_app.logger.warning("Loadbalancer does not exist.")
else:
raise e
@sts_client("elbv2")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=20)
def attach_certificate_v2(listener_arn, port, certificates, **kwargs):
"""
Attaches a certificate to a listener, throws exception
if certificate specified does not exist in a particular account.
:param listener_arn:
:param port:
:param certificates:
"""
try:
return kwargs["client"].modify_listener(
ListenerArn=listener_arn, Port=port, Certificates=certificates
)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] == "LoadBalancerNotFound":
current_app.logger.warning("Loadbalancer does not exist.")
else:
raise e
|
import logging
from avri.api import Avri, AvriException
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_ID, DEVICE_CLASS_TIMESTAMP
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN, ICON
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Avri Waste platform."""
client = hass.data[DOMAIN][entry.entry_id]
integration_id = entry.data[CONF_ID]
try:
each_upcoming = await hass.async_add_executor_job(client.upcoming_of_each)
except AvriException as ex:
raise PlatformNotReady from ex
else:
entities = [
AvriWasteUpcoming(client, upcoming.name, integration_id)
for upcoming in each_upcoming
]
async_add_entities(entities, True)
class AvriWasteUpcoming(Entity):
"""Avri Waste Sensor."""
def __init__(self, client: Avri, waste_type: str, integration_id: str):
"""Initialize the sensor."""
self._waste_type = waste_type
self._name = f"{self._waste_type}".title()
self._state = None
self._client = client
self._state_available = False
self._integration_id = integration_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return (f"{self._integration_id}" f"-{self._waste_type}").replace(" ", "")
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return True if entity is available."""
return self._state_available
@property
def device_class(self):
"""Return the device class of the sensor."""
return DEVICE_CLASS_TIMESTAMP
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
async def async_update(self):
"""Update the data."""
if not self.enabled:
return
try:
pickup_events = self._client.upcoming_of_each()
except AvriException as ex:
_LOGGER.error(
"There was an error retrieving upcoming garbage pickups: %s", ex
)
self._state_available = False
self._state = None
else:
self._state_available = True
matched_events = list(
filter(lambda event: event.name == self._waste_type, pickup_events)
)
if not matched_events:
self._state = None
else:
self._state = matched_events[0].day.date()
|
import dedupe
import unittest
class ParsingTest(unittest.TestCase):
def setUp(self):
self.index = dedupe.tfidf.TfIdfIndex()
def test_keywords(self):
self.index.index(('AND', 'OR', 'EOF', 'NOT'))
self.index._index.initSearch()
assert self.index.search(('AND', 'OR', 'EOF', 'NOT'))[0] == 1
def test_keywords_title(self):
self.index.index(('And', 'Or', 'Eof', 'Not'))
self.index._index.initSearch()
assert self.index.search(('And', 'Or', 'Eof', 'Not'))[0] == 1
def test_empty_search(self):
self.index._index.initSearch()
assert self.index.search(()) == []
def test_wildcards(self):
self.index.index((r'f\o',))
self.index.index(('f*',))
self.index._index.initSearch()
assert len(self.index.search(('f*',))) == 1
if __name__ == "__main__":
unittest.main()
|
import os
import cherrypy
thisdir = os.path.dirname(os.path.abspath(__file__))
class Root:
pass
if __name__ == '__main__':
conf = {'/base': {'tools.staticdir.root': thisdir,
# Obsolete key.
'throw_errors': True,
},
# This entry should be OK.
'/base/static': {'tools.staticdir.on': True,
'tools.staticdir.dir': 'static'},
# Warn on missing folder.
'/base/js': {'tools.staticdir.on': True,
'tools.staticdir.dir': 'js'},
# Warn on dir with an abs path even though we provide root.
'/base/static2': {'tools.staticdir.on': True,
'tools.staticdir.dir': '/static'},
# Warn on dir with a relative path with no root.
'/static3': {'tools.staticdir.on': True,
'tools.staticdir.dir': 'static'},
# Warn on unknown namespace
'/unknown': {'toobles.gzip.on': True},
# Warn special on cherrypy.<known ns>.*
'/cpknown': {'cherrypy.tools.encode.on': True},
# Warn on mismatched types
'/conftype': {'request.show_tracebacks': 14},
# Warn on unknown tool.
'/web': {'tools.unknown.on': True},
# Warn on server.* in app config.
'/app1': {'server.socket_host': '0.0.0.0'},
# Warn on 'localhost'
'global': {'server.socket_host': 'localhost'},
# Warn on '[name]'
'[/extra_brackets]': {},
}
cherrypy.quickstart(Root(), config=conf)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import posixpath
import xml.etree.ElementTree
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import gluster
import six
FLAGS = flags.FLAGS
BENCHMARKS = ['VDI', 'DATABASE', 'SWBUILD', 'VDA', 'EDA']
flags.DEFINE_string(
'specsfs2014_config', None,
'This flag can be used to specify an alternate SPEC config file to use. '
'If this option is specified, none of the other benchmark specific flags '
'which operate on the config file will be used (since the default config '
'file will be replaced by this one).')
flags.DEFINE_list('specsfs2014_benchmarks', BENCHMARKS,
'The SPEC SFS 2014 benchmarks to run.')
flags.register_validator(
'specsfs2014_benchmarks',
lambda benchmarks: benchmarks and set(benchmarks).issubset(BENCHMARKS),
'Invalid benchmarks list. specsfs2014_benchmarks must be a subset of ' +
', '.join(BENCHMARKS))
flag_util.DEFINE_integerlist(
'specsfs2014_load', [1],
'The starting load in units of SPEC "business metrics". The meaning of '
'business metric varies depending on the SPEC benchmark (e.g. VDI has '
'load measured in virtual desktops).', module_name=__name__)
flags.DEFINE_integer(
'specsfs2014_incr_load', 1,
'The amount to increment "load" by for each run.',
lower_bound=1)
flags.DEFINE_integer(
'specsfs2014_num_runs', 1,
'The total number of SPEC runs. The load for the nth run is '
'"load" + n * "specsfs_incr_load".',
lower_bound=1)
flags.DEFINE_boolean(
'specsfs2014_auto_mode', False,
'If True, automatically find the max passing score for each benchmark. '
'This ignores other flags such as specsfs2014_load, specsfs2014_incr_load, '
'and specsfs2014_num_runs.')
BENCHMARK_NAME = 'specsfs2014'
BENCHMARK_CONFIG = """
specsfs2014:
description: >
Run SPEC SFS 2014. For a full explanation of all benchmark modes
see http://www.spec.org/sfs2014/. In order to run this benchmark
copy your 'SPECsfs2014_SP2.iso' and 'netmist_license_key' files
into the data/ directory.
vm_groups:
clients:
vm_spec: *default_single_core
vm_count: null
gluster_servers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: 3
"""
_SPEC_SFS_2014_ISO = 'SPECsfs2014_SP2.iso'
_SPEC_SFS_2014_LICENSE = 'netmist_license_key'
_SPEC_DIR = 'spec'
_SPEC_CONFIG = 'sfs_rc'
_VOLUME_NAME = 'gv0'
_MOUNT_POINT = '/scratch'
_MOUNTPOINTS_FILE = 'mountpoints.txt'
_PUBLISHED_METRICS = frozenset([
'achieved rate', 'average latency', 'overall throughput',
'read throughput', 'write throughput'
])
_METADATA_KEYS = frozenset([
'op rate', 'run time', 'processes per client', 'file size',
'client data set size', 'starting data set size', 'initial file space',
'maximum file space'
])
BENCHMARK_DATA = {
_SPEC_SFS_2014_ISO:
'666d3f79e9184211736c32c825edb007c6a5ad88eeceb3c99aa01acf733c6fb3'
}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(unused_benchmark_config):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
data.ResourcePath(_SPEC_SFS_2014_LICENSE)
if FLAGS.specsfs2014_config:
data.ResourcePath(FLAGS.specsfs2014_config)
def _PrepareSpec(vm):
"""Prepares a SPEC client by copying SPEC to the VM."""
mount_dir = 'spec_mnt'
vm.RemoteCommand('mkdir %s' % mount_dir)
vm.RemoteCommand('mkdir %s' % _SPEC_DIR)
vm.InstallPreprovisionedBenchmarkData('specsfs2014', [_SPEC_SFS_2014_ISO],
'~/')
vm.PushFile(data.ResourcePath(_SPEC_SFS_2014_LICENSE), _SPEC_DIR)
vm.RemoteCommand('sudo mount -t iso9660 -o loop %s %s' %
(_SPEC_SFS_2014_ISO, mount_dir))
vm.RemoteCommand('cp -r %s/* %s' % (mount_dir, _SPEC_DIR))
vm.RemoteCommand('sudo umount {0} && sudo rm -rf {0}'.format(mount_dir))
def _ConfigureSpec(prime_client, clients, benchmark,
load=None, num_runs=None, incr_load=None):
"""Configures SPEC SFS 2014 on the prime client.
This function modifies the default configuration file (sfs_rc) which
can be found either in the SPEC SFS 2014 user guide or within the iso.
It also creates a file containing the client mountpoints so that SPEC
can run in a distributed manner.
Args:
prime_client: The VM from which SPEC will be controlled.
clients: A list of SPEC client VMs (including the prime_client).
benchmark: The sub-benchmark to run.
load: List of ints. The LOAD parameter to SPECSFS.
num_runs: The NUM_RUNS parameter to SPECSFS.
incr_load: The INCR_LOAD parameter to SPECSFS.
"""
config_path = posixpath.join(_SPEC_DIR, _SPEC_CONFIG)
prime_client.RemoteCommand('sudo cp {0}.bak {0}'.format(config_path))
stdout, _ = prime_client.RemoteCommand('pwd')
exec_path = posixpath.join(stdout.strip(), _SPEC_DIR, 'binaries',
'linux', 'x86_64', 'netmist')
load = load or FLAGS.specsfs2014_load
num_runs = num_runs or FLAGS.specsfs2014_num_runs
incr_load = incr_load or FLAGS.specsfs2014_incr_load
configuration_overrides = {
'USER': prime_client.user_name,
'EXEC_PATH': exec_path.replace('/', r'\/'),
'CLIENT_MOUNTPOINTS': _MOUNTPOINTS_FILE,
'BENCHMARK': benchmark,
'LOAD': ' '.join([str(x) for x in load]),
'NUM_RUNS': num_runs,
'INCR_LOAD': incr_load,
'WARMUP_TIME': 60,
}
# Any special characters in the overrides dictionary should be escaped so
# that they don't interfere with sed.
sed_expressions = ' '.join([
'-e "s/{0}=.*/{0}={1}/"'.format(k, v)
for k, v in six.iteritems(configuration_overrides)
])
sed_cmd = 'sudo sed -i {0} {1}'.format(sed_expressions, config_path)
prime_client.RemoteCommand(sed_cmd)
mount_points = [f'{client.internal_ip} {_MOUNT_POINT}' for client in clients]
vm_util.CreateRemoteFile(prime_client, '\n'.join(mount_points),
posixpath.join(_SPEC_DIR, _MOUNTPOINTS_FILE))
def Prepare(benchmark_spec):
"""Install SPEC SFS 2014.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
gluster_servers = benchmark_spec.vm_groups['gluster_servers']
clients = benchmark_spec.vm_groups['clients']
prime_client = clients[0]
# Set up Gluster
if gluster_servers:
gluster.ConfigureServers(gluster_servers, _VOLUME_NAME)
args = [((client, gluster_servers[0], _VOLUME_NAME, _MOUNT_POINT), {})
for client in clients]
vm_util.RunThreaded(gluster.MountGluster, args)
# Set up SPEC
vm_util.RunThreaded(_PrepareSpec, clients)
# Create a backup of the config file.
prime_client.RemoteCommand('cp {0} {0}.bak'.format(
posixpath.join(_SPEC_DIR, _SPEC_CONFIG)))
prime_client.AuthenticateVm()
# Make sure any Static VMs are setup correctly.
for client in clients:
prime_client.TestAuthentication(client)
def _ParseSpecSfsOutput(output, extra_metadata=None):
"""Returns samples generated from the output of SPEC SFS 2014.
Args:
output: The stdout from running SPEC.
extra_metadata: Dict of metadata to include with results.
Returns:
List of sample.Sample objects.
This parses the contents of the results xml file and creates samples
from the achieved operation rate, latency, and throughput metrics.
The samples are annotated with metadata collected from the xml file
including information about the benchmark name, the load, and data size.
"""
root = xml.etree.ElementTree.fromstring(output)
samples = []
for run in root.findall('run'):
metadata = {
'benchmark': run.find('benchmark').attrib['name'],
'business_metric': run.find('business_metric').text
}
if extra_metadata:
metadata.update(extra_metadata)
for key in _METADATA_KEYS:
element = run.find('metric[@name="%s"]' % key)
units = element.attrib.get('units')
label = '%s (%s)' % (key, units) if units else key
metadata[label] = element.text
if run.find('valid_run').text == 'INVALID_RUN':
metadata['valid_run'] = False
else:
metadata['valid_run'] = True
for metric in run.findall('metric'):
name = metric.attrib['name']
if name in _PUBLISHED_METRICS:
samples.append(sample.Sample(name,
float(metric.text),
metric.attrib.get('units', ''),
metadata))
return samples
def _RunSpecSfs(benchmark_spec):
"""Run SPEC SFS 2014 once.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
prime_client = benchmark_spec.vm_groups['clients'][0]
run_cmd = 'cd {0} && ./sfsmanager -r sfs_rc {1}'.format(
_SPEC_DIR, '-a' if FLAGS.specsfs2014_auto_mode else '')
prime_client.RobustRemoteCommand(run_cmd, ignore_failure=True)
results_file = posixpath.join(_SPEC_DIR, 'results', 'sfssum_sfs2014_SP2.xml')
output, _ = prime_client.RemoteCommand('cat %s' % results_file)
if benchmark_spec.vm_groups['gluster_servers']:
gluster_metadata = {
'gluster_stripes': FLAGS.gluster_stripes,
'gluster_replicas': FLAGS.gluster_replicas
}
else:
gluster_metadata = {}
return _ParseSpecSfsOutput(output, extra_metadata=gluster_metadata)
def Run(benchmark_spec):
"""Run SPEC SFS 2014 for each configuration.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
clients = benchmark_spec.vm_groups['clients']
prime_client = clients[0]
results = []
if FLAGS.specsfs2014_config:
prime_client.PushFile(
data.ResourcePath(FLAGS.specsfs2014_config),
posixpath.join(_SPEC_DIR, _SPEC_CONFIG))
results += _RunSpecSfs(benchmark_spec)
else:
for benchmark in FLAGS.specsfs2014_benchmarks:
_ConfigureSpec(prime_client, clients, benchmark)
results += _RunSpecSfs(benchmark_spec)
return results
def Cleanup(benchmark_spec):
"""Cleanup SPEC SFS 2014.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
clients = benchmark_spec.vm_groups['clients']
gluster_servers = benchmark_spec.vm_groups['gluster_servers']
for client in clients:
client.RemoteCommand('sudo umount %s' % _MOUNT_POINT)
client.RemoteCommand(
'rm %s && sudo rm -rf %s' % (_SPEC_SFS_2014_ISO, _SPEC_DIR))
if gluster_servers:
gluster.DeleteVolume(gluster_servers[0], _VOLUME_NAME)
|
from flexx import flx
class MyApp(flx.JsComponent):
""" This the root of the app, accessible via self.root on any component.
It functions as a central data-store. In this case it is a JsComponent,
but it can also be a PyComponent if that makes more sense.
"""
first_name = flx.StringProp(settable=True)
last_name = flx.StringProp(settable=True)
def init(self):
View()
class MyPersonLabel(flx.Widget):
""" A simple widget that renders the name.
"""
def _render_dom(self):
return [self.root.first_name + ' ' + self.root.last_name]
class View(flx.Widget):
""" This displays the person's name, as well as the input fields to update it.
"""
def init(self):
with flx.VBox():
with flx.HBox():
self.first_edit = flx.LineEdit(placeholder_text='first name',
text='Jane')
self.last_edit = flx.LineEdit(placeholder_text='last name',
text='Doe')
flx.Widget(flex=1) # spacer
with flx.HBox():
flx.Label(text=lambda: self.root.first_name,
style='border:1px solid red')
flx.Label(text=lambda: self.root.last_name,
style='border:1px solid red')
flx.Widget(flex=1) # spacer
MyPersonLabel(style='border:1px solid blue')
flx.Widget(flex=1) # spacer
@flx.reaction
def _update_name(self):
self.root.set_first_name(self.first_edit.text)
self.root.set_last_name(self.last_edit.text)
if __name__ == '__main__':
m = flx.launch(MyApp)
flx.run()
|
from __future__ import unicode_literals
from base64 import b64encode
def b64_encode(item):
"""base64 encode"""
try:
return (b64encode(item.encode('utf-8'))).decode()
except:
return ''
|
import flatbuffers
class AuthCryptosignRequest(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsAuthCryptosignRequest(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = AuthCryptosignRequest()
x.Init(buf, n + offset)
return x
# AuthCryptosignRequest
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# AuthCryptosignRequest
def Pubkey(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.String(o + self._tab.Pos)
return None
# AuthCryptosignRequest
def ChannelBinding(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(6))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
def AuthCryptosignRequestStart(builder): builder.StartObject(2)
def AuthCryptosignRequestAddPubkey(builder, pubkey): builder.PrependUOffsetTRelativeSlot(0, flatbuffers.number_types.UOffsetTFlags.py_type(pubkey), 0)
def AuthCryptosignRequestAddChannelBinding(builder, channelBinding): builder.PrependUint8Slot(1, channelBinding, 0)
def AuthCryptosignRequestEnd(builder): return builder.EndObject()
|
import pandas as pd
import pytest
import pytz
from qstrader.portcon.optimiser.equal_weight import (
EqualWeightPortfolioOptimiser
)
class DataHandlerMock(object):
pass
@pytest.mark.parametrize(
"scale,initial_weights,expected_weights",
[
(
1.0,
{
'EQ:ABCD': 0.25,
'EQ:DEFG': 0.75
},
{
'EQ:ABCD': 0.5,
'EQ:DEFG': 0.5
},
),
(
2.0,
{
'EQ:HIJK': 0.15,
'EQ:LMNO': 0.45,
'EQ:PQRS': 0.40
},
{
'EQ:HIJK': 2 / 3.0,
'EQ:LMNO': 2 / 3.0,
'EQ:PQRS': 2 / 3.0
}
)
]
)
def test_fixed_weight_optimiser(scale, initial_weights, expected_weights):
"""
Tests initialisation and 'pass through' capability of
FixedWeightPortfolioOptimiser.
"""
dt = pd.Timestamp('2019-01-01 00:00:00', tz=pytz.UTC)
data_handler = DataHandlerMock()
fwo = EqualWeightPortfolioOptimiser(scale=scale, data_handler=data_handler)
assert fwo(dt, initial_weights) == expected_weights
|
import base64
import contextlib
import json
import logging
import time
from pathlib import Path
from typing import TYPE_CHECKING, List, Mapping, MutableMapping, Optional, Tuple, Union
import aiohttp
from redbot.core import Config
from redbot.core.bot import Red
from redbot.core.commands import Cog, Context
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
from ..errors import SpotifyFetchError
if TYPE_CHECKING:
from .. import Audio
_ = Translator("Audio", Path(__file__))
log = logging.getLogger("red.cogs.Audio.api.Spotify")
CATEGORY_ENDPOINT = "https://api.spotify.com/v1/browse/categories"
TOKEN_ENDPOINT = "https://accounts.spotify.com/api/token"
ALBUMS_ENDPOINT = "https://api.spotify.com/v1/albums"
TRACKS_ENDPOINT = "https://api.spotify.com/v1/tracks"
PLAYLISTS_ENDPOINT = "https://api.spotify.com/v1/playlists"
class SpotifyWrapper:
"""Wrapper for the Spotify API."""
def __init__(
self, bot: Red, config: Config, session: aiohttp.ClientSession, cog: Union["Audio", Cog]
):
self.bot = bot
self.config = config
self.session = session
self.spotify_token: Optional[MutableMapping] = None
self.client_id: Optional[str] = None
self.client_secret: Optional[str] = None
self._token: Mapping[str, str] = {}
self.cog = cog
@staticmethod
def spotify_format_call(query_type: str, key: str) -> Tuple[str, MutableMapping]:
"""Format the spotify endpoint."""
params: MutableMapping = {}
if query_type == "album":
query = f"{ALBUMS_ENDPOINT}/{key}/tracks"
elif query_type == "track":
query = f"{TRACKS_ENDPOINT}/{key}"
else:
query = f"{PLAYLISTS_ENDPOINT}/{key}/tracks"
return query, params
async def get_spotify_track_info(
self, track_data: MutableMapping, ctx: Context
) -> Tuple[str, ...]:
"""Extract track info from spotify response."""
prefer_lyrics = await self.cog.get_lyrics_status(ctx)
track_name = track_data["name"]
if prefer_lyrics:
track_name = f"{track_name} - lyrics"
artist_name = track_data["artists"][0]["name"]
track_info = f"{track_name} {artist_name}"
song_url = track_data.get("external_urls", {}).get("spotify")
uri = track_data["uri"]
_id = track_data["id"]
_type = track_data["type"]
return song_url, track_info, uri, artist_name, track_name, _id, _type
@staticmethod
async def is_access_token_valid(token: MutableMapping) -> bool:
"""Check if current token is not too old."""
return (token["expires_at"] - int(time.time())) < 60
@staticmethod
def make_auth_header(
client_id: Optional[str], client_secret: Optional[str]
) -> MutableMapping[str, Union[str, int]]:
"""Make Authorization header for spotify token."""
if client_id is None:
client_id = ""
if client_secret is None:
client_secret = ""
auth_header = base64.b64encode(f"{client_id}:{client_secret}".encode("ascii"))
return {"Authorization": f"Basic {auth_header.decode('ascii')}"}
async def get(
self, url: str, headers: MutableMapping = None, params: MutableMapping = None
) -> MutableMapping[str, str]:
"""Make a GET request to the spotify API."""
if params is None:
params = {}
async with self.session.request("GET", url, params=params, headers=headers) as r:
data = await r.json(loads=json.loads)
if r.status != 200:
log.debug(f"Issue making GET request to {url}: [{r.status}] {data}")
return data
async def update_token(self, new_token: Mapping[str, str]):
self._token = new_token
async def get_token(self) -> None:
"""Get the stored spotify tokens."""
if not self._token:
self._token = await self.bot.get_shared_api_tokens("spotify")
self.client_id = self._token.get("client_id", "")
self.client_secret = self._token.get("client_secret", "")
async def get_country_code(self, ctx: Context = None) -> str:
return (
(
await self.config.user(ctx.author).country_code()
or await self.config.guild(ctx.guild).country_code()
)
if ctx
else "US"
)
async def request_access_token(self) -> MutableMapping:
"""Make a spotify call to get the auth token."""
await self.get_token()
payload = {"grant_type": "client_credentials"}
headers = self.make_auth_header(self.client_id, self.client_secret)
r = await self.post(TOKEN_ENDPOINT, payload=payload, headers=headers)
return r
async def get_access_token(self) -> Optional[str]:
"""Get the access_token."""
if self.spotify_token and not await self.is_access_token_valid(self.spotify_token):
return self.spotify_token["access_token"]
token = await self.request_access_token()
if token is None:
log.debug("Requested a token from Spotify, did not end up getting one.")
try:
token["expires_at"] = int(time.time()) + int(token["expires_in"])
except KeyError:
return None
self.spotify_token = token
log.debug(f"Created a new access token for Spotify: {token}")
return self.spotify_token["access_token"]
async def post(
self, url: str, payload: MutableMapping, headers: MutableMapping = None
) -> MutableMapping:
"""Make a POST call to spotify."""
async with self.session.post(url, data=payload, headers=headers) as r:
data = await r.json(loads=json.loads)
if r.status != 200:
log.debug(f"Issue making POST request to {url}: [{r.status}] {data}")
return data
async def make_get_call(self, url: str, params: MutableMapping) -> MutableMapping:
"""Make a Get call to spotify."""
token = await self.get_access_token()
return await self.get(url, params=params, headers={"Authorization": f"Bearer {token}"})
async def get_categories(self, ctx: Context = None) -> List[MutableMapping]:
"""Get the spotify categories."""
country_code = await self.get_country_code(ctx=ctx)
params: MutableMapping = {"country": country_code} if country_code else {}
result = await self.make_get_call(CATEGORY_ENDPOINT, params=params)
with contextlib.suppress(KeyError):
if result["error"]["status"] == 401:
raise SpotifyFetchError(
message=_(
"The Spotify API key or client secret has not been set properly. "
"\nUse `{prefix}audioset spotifyapi` for instructions."
)
)
categories = result.get("categories", {}).get("items", [])
return [{c["name"]: c["id"]} for c in categories if c]
async def get_playlist_from_category(self, category: str, ctx: Context = None):
"""Get spotify playlists for the specified category."""
url = f"{CATEGORY_ENDPOINT}/{category}/playlists"
country_code = await self.get_country_code(ctx=ctx)
params: MutableMapping = {"country": country_code} if country_code else {}
result = await self.make_get_call(url, params=params)
playlists = result.get("playlists", {}).get("items", [])
return [
{
"name": c["name"],
"uri": c["uri"],
"url": c.get("external_urls", {}).get("spotify"),
"tracks": c.get("tracks", {}).get("total", "Unknown"),
}
async for c in AsyncIter(playlists)
if c
]
|
from test import CollectorTestCase
from test import get_collector_config
from mock import patch
import os
from diamond.collector import Collector
from nagiosperfdata import NagiosPerfdataCollector
class TestNagiosPerfdataCollector(CollectorTestCase):
def setUp(self):
"""Set up the fixtures for the test
"""
fixtures_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'fixtures'))
config = get_collector_config('NagiosPerfdataCollector', {
'perfdata_dir': fixtures_dir
})
self.collector = NagiosPerfdataCollector(config, None)
self.fixtures = os.listdir(fixtures_dir)
def test_import(self):
"""Test that import works correctly
"""
self.assertTrue(NagiosPerfdataCollector)
@patch.object(NagiosPerfdataCollector, '_process_file')
def test_collect_should_list_fixtures(self, process_mock):
"""Test that collect() finds our test fixtures
"""
self.collector.collect()
self.assertTrue(process_mock.called)
def test_extract_fields_should_extract_fields(self):
"""Test that extract_fields() actually extracts fields
"""
s = "KEY1::VALUE1\tKEY2::VALUE2 KEY3::VALUE3"
fields = self.collector._extract_fields(s)
self.assertEqual(fields.get('KEY1'), 'VALUE1')
self.assertFalse('KEY2' in fields)
def test_fields_valid_should_not_validate_invalid_datatype(self):
fields = {'DATATYPE': 'BAD HOSTPERFDATA',
'HOSTNAME': 'testhost',
'HOSTPERFDATA': '',
'TIMET': 5304577351}
self.assertFalse(self.collector._fields_valid(fields))
def test_fields_valid_should_validate_complete_host_fields(self):
fields = {'DATATYPE': 'HOSTPERFDATA',
'HOSTNAME': 'testhost',
'HOSTPERFDATA': '',
'TIMET': 5304577351}
self.assertTrue(self.collector._fields_valid(fields))
def test_fields_valid_should_not_validate_incomplete_host_fields(self):
fields = {'DATATYPE': 'HOSTPERFDATA',
'HOSTNAME': 'testhost',
'TIMET': 5304577351}
self.assertFalse(self.collector._fields_valid(fields))
def test_fields_valid_should_validate_complete_service_fields(self):
fields = {'DATATYPE': 'SERVICEPERFDATA',
'HOSTNAME': 'testhost',
'TIMET': 5304577351,
'SERVICEDESC': 'Ping',
'SERVICEPERFDATA': ''}
self.assertTrue(self.collector._fields_valid(fields))
def test_fields_valid_should_not_validate_incomplete_service_fields(self):
fields = {'DATATYPE': 'SERVICEPERFDATA',
'HOSTNAME': 'testhost',
'TIMET': 5304577351,
'SERVICEDESC': 'Ping'}
self.assertFalse(self.collector._fields_valid(fields))
def test_normalize_to_unit_should_normalize(self):
self.assertEqual(self.collector._normalize_to_unit(1, None), 1.0)
self.assertEqual(self.collector._normalize_to_unit(1, 'KB'), 1024.0)
def test_parse_perfdata_should_parse_valid_perfdata(self):
perf = self.collector._parse_perfdata(
'rta=0.325ms;300.000;500.000;0; pl=0%;20;60;;')
expected_result = [('rta', 0.000325), ('pl', 0.0)]
self.assertEqual(perf, expected_result)
def test_parse_perfdata_should_not_parse_invalid_perfdata(self):
perf = self.collector._parse_perfdata(
'something with spaces=0.325ms;300.000;500.000;0; pl=0%;20;60;;')
unexpected_result = [('something with spaces', 0.000325), ('pl', 0.0)]
self.assertNotEqual(perf, unexpected_result)
@patch('os.remove')
@patch.object(Collector, 'publish')
def test_process_file_should_work_with_real_host_perfdata(
self, publish_mock, remove_mock):
path = self.getFixturePath('host-perfdata.0')
self.collector._process_file(path)
expected = {
'nagios.testhost.host.pl': 0,
'nagios.testhost.host.rta': 0.000325
}
self.assertPublishedMany(publish_mock, expected)
@patch('os.remove')
@patch.object(Collector, 'publish')
def test_process_file_should_work_with_real_service_perfdata(
self, publish_mock, remove_mock):
path = self.getFixturePath('service-perfdata.0')
self.collector._process_file(path)
expected = {
'nagios.testhost.nrpe_._home': 705181 * 1024 * 1024,
'nagios.testhost.nrpe_._data': 6090266 * 1024 * 1024,
'nagios.testhost.nrpe_._tmp': 6090266 * 1024 * 1024
}
self.assertPublishedMany(publish_mock, expected)
def test_sanitize_should_sanitize(self):
orig1 = 'myhost.mydomain'
sani1 = self.collector._sanitize(orig1)
self.assertEqual(sani1, 'myhost_mydomain')
orig2 = '/test/path'
sani2 = self.collector._sanitize(orig2)
self.assertEqual(sani2, '_test_path')
|
from __future__ import absolute_import
from pyspark.sql import SQLContext
from pyspark.mllib.regression import LabeledPoint
from ..utils.rdd_utils import from_labeled_point, to_labeled_point, lp_to_simple_rdd
from pyspark.mllib.linalg import Vector as MLLibVector, Vectors as MLLibVectors
def to_data_frame(sc, features, labels, categorical=False):
"""Convert numpy arrays of features and labels into Spark DataFrame
"""
lp_rdd = to_labeled_point(sc, features, labels, categorical)
sql_context = SQLContext(sc)
df = sql_context.createDataFrame(lp_rdd)
return df
def from_data_frame(df, categorical=False, nb_classes=None):
"""Convert DataFrame back to pair of numpy arrays
"""
lp_rdd = df.rdd.map(lambda row: LabeledPoint(row.label, row.features))
features, labels = from_labeled_point(lp_rdd, categorical, nb_classes)
return features, labels
def df_to_simple_rdd(df, categorical=False, nb_classes=None, features_col='features', label_col='label'):
"""Convert DataFrame into RDD of pairs
"""
sql_context = df.sql_ctx
sql_context.registerDataFrameAsTable(df, "temp_table")
selected_df = sql_context.sql(
"SELECT {0} AS features, {1} as label from temp_table".format(features_col, label_col))
if isinstance(selected_df.first().features, MLLibVector):
lp_rdd = selected_df.rdd.map(
lambda row: LabeledPoint(row.label, row.features))
else:
lp_rdd = selected_df.rdd.map(lambda row: LabeledPoint(
row.label, MLLibVectors.fromML(row.features)))
rdd = lp_to_simple_rdd(lp_rdd, categorical, nb_classes)
return rdd
|
from homeassistant.helpers.entity import ToggleEntity
from .const import (
ADVANTAGE_AIR_STATE_OFF,
ADVANTAGE_AIR_STATE_ON,
DOMAIN as ADVANTAGE_AIR_DOMAIN,
)
from .entity import AdvantageAirEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up AdvantageAir toggle platform."""
instance = hass.data[ADVANTAGE_AIR_DOMAIN][config_entry.entry_id]
entities = []
for ac_key, ac_device in instance["coordinator"].data["aircons"].items():
if ac_device["info"]["freshAirStatus"] != "none":
entities.append(AdvantageAirFreshAir(instance, ac_key))
async_add_entities(entities)
class AdvantageAirFreshAir(AdvantageAirEntity, ToggleEntity):
"""Representation of Advantage Air fresh air control."""
@property
def name(self):
"""Return the name."""
return f'{self._ac["name"]} Fresh Air'
@property
def unique_id(self):
"""Return a unique id."""
return f'{self.coordinator.data["system"]["rid"]}-{self.ac_key}-freshair'
@property
def is_on(self):
"""Return the fresh air status."""
return self._ac["freshAirStatus"] == ADVANTAGE_AIR_STATE_ON
@property
def icon(self):
"""Return a representative icon of the fresh air switch."""
return "mdi:air-filter"
async def async_turn_on(self, **kwargs):
"""Turn fresh air on."""
await self.async_change(
{self.ac_key: {"info": {"freshAirStatus": ADVANTAGE_AIR_STATE_ON}}}
)
async def async_turn_off(self, **kwargs):
"""Turn fresh air off."""
await self.async_change(
{self.ac_key: {"info": {"freshAirStatus": ADVANTAGE_AIR_STATE_OFF}}}
)
|
import asyncio
from datetime import timedelta
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.const import CONF_DOMAIN, CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "google_domains"
INTERVAL = timedelta(minutes=5)
DEFAULT_TIMEOUT = 10
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Initialize the Google Domains component."""
domain = config[DOMAIN].get(CONF_DOMAIN)
user = config[DOMAIN].get(CONF_USERNAME)
password = config[DOMAIN].get(CONF_PASSWORD)
timeout = config[DOMAIN].get(CONF_TIMEOUT)
session = hass.helpers.aiohttp_client.async_get_clientsession()
result = await _update_google_domains(
hass, session, domain, user, password, timeout
)
if not result:
return False
async def update_domain_interval(now):
"""Update the Google Domains entry."""
await _update_google_domains(hass, session, domain, user, password, timeout)
hass.helpers.event.async_track_time_interval(update_domain_interval, INTERVAL)
return True
async def _update_google_domains(hass, session, domain, user, password, timeout):
"""Update Google Domains."""
url = f"https://{user}:{password}@domains.google.com/nic/update"
params = {"hostname": domain}
try:
with async_timeout.timeout(timeout):
resp = await session.get(url, params=params)
body = await resp.text()
if body.startswith("good") or body.startswith("nochg"):
return True
_LOGGER.warning("Updating Google Domains failed: %s => %s", domain, body)
except aiohttp.ClientError:
_LOGGER.warning("Can't connect to Google Domains API")
except asyncio.TimeoutError:
_LOGGER.warning("Timeout from Google Domains API for domain: %s", domain)
return False
|
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
ATTRIBUTION,
DOMAIN,
MANUFACTURER,
SIGNAL_THERMOSTAT_UPDATE,
SIGNAL_ZONE_UPDATE,
)
class NexiaEntity(CoordinatorEntity):
"""Base class for nexia entities."""
def __init__(self, coordinator, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator)
self._unique_id = unique_id
self._name = name
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name."""
return self._name
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
}
class NexiaThermostatEntity(NexiaEntity):
"""Base class for nexia devices attached to a thermostat."""
def __init__(self, coordinator, thermostat, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator, name, unique_id)
self._thermostat = thermostat
@property
def device_info(self):
"""Return the device_info of the device."""
return {
"identifiers": {(DOMAIN, self._thermostat.thermostat_id)},
"name": self._thermostat.get_name(),
"model": self._thermostat.get_model(),
"sw_version": self._thermostat.get_firmware(),
"manufacturer": MANUFACTURER,
}
async def async_added_to_hass(self):
"""Listen for signals for services."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_THERMOSTAT_UPDATE}-{self._thermostat.thermostat_id}",
self.async_write_ha_state,
)
)
class NexiaThermostatZoneEntity(NexiaThermostatEntity):
"""Base class for nexia devices attached to a thermostat."""
def __init__(self, coordinator, zone, name, unique_id):
"""Initialize the entity."""
super().__init__(coordinator, zone.thermostat, name, unique_id)
self._zone = zone
@property
def device_info(self):
"""Return the device_info of the device."""
data = super().device_info
data.update(
{
"identifiers": {(DOMAIN, self._zone.zone_id)},
"name": self._zone.get_name(),
"via_device": (DOMAIN, self._zone.thermostat.thermostat_id),
}
)
return data
async def async_added_to_hass(self):
"""Listen for signals for services."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{SIGNAL_ZONE_UPDATE}-{self._zone.zone_id}",
self.async_write_ha_state,
)
)
|
from requests.exceptions import RequestException
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.vera import CONF_CONTROLLER, CONF_LEGACY_UNIQUE_ID, DOMAIN
from homeassistant.const import CONF_EXCLUDE, CONF_LIGHTS, CONF_SOURCE
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry, mock_registry
async def test_async_step_user_success(hass: HomeAssistant) -> None:
"""Test user step success."""
with patch("pyvera.VeraController") as vera_controller_class_mock:
controller = MagicMock()
controller.refresh_data = MagicMock()
controller.serial_number = "serial_number_0"
vera_controller_class_mock.return_value = controller
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == config_entries.SOURCE_USER
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={
CONF_CONTROLLER: "http://127.0.0.1:123/",
CONF_LIGHTS: "12 13",
CONF_EXCLUDE: "14 15",
},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "http://127.0.0.1:123"
assert result["data"] == {
CONF_CONTROLLER: "http://127.0.0.1:123",
CONF_SOURCE: config_entries.SOURCE_USER,
CONF_LIGHTS: [12, 13],
CONF_EXCLUDE: [14, 15],
CONF_LEGACY_UNIQUE_ID: False,
}
assert result["result"].unique_id == controller.serial_number
entries = hass.config_entries.async_entries(DOMAIN)
assert entries
async def test_async_step_import_success(hass: HomeAssistant) -> None:
"""Test import step success."""
with patch("pyvera.VeraController") as vera_controller_class_mock:
controller = MagicMock()
controller.refresh_data = MagicMock()
controller.serial_number = "serial_number_1"
vera_controller_class_mock.return_value = controller
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_CONTROLLER: "http://127.0.0.1:123/"},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "http://127.0.0.1:123"
assert result["data"] == {
CONF_CONTROLLER: "http://127.0.0.1:123",
CONF_SOURCE: config_entries.SOURCE_IMPORT,
CONF_LEGACY_UNIQUE_ID: False,
}
assert result["result"].unique_id == controller.serial_number
async def test_async_step_import_success_with_legacy_unique_id(
hass: HomeAssistant,
) -> None:
"""Test import step success with legacy unique id."""
entity_registry = mock_registry(hass)
entity_registry.async_get_or_create(
domain="switch", platform=DOMAIN, unique_id="12"
)
with patch("pyvera.VeraController") as vera_controller_class_mock:
controller = MagicMock()
controller.refresh_data = MagicMock()
controller.serial_number = "serial_number_1"
vera_controller_class_mock.return_value = controller
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_CONTROLLER: "http://127.0.0.1:123/"},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "http://127.0.0.1:123"
assert result["data"] == {
CONF_CONTROLLER: "http://127.0.0.1:123",
CONF_SOURCE: config_entries.SOURCE_IMPORT,
CONF_LEGACY_UNIQUE_ID: True,
}
assert result["result"].unique_id == controller.serial_number
async def test_async_step_finish_error(hass: HomeAssistant) -> None:
"""Test finish step with error."""
with patch("pyvera.VeraController") as vera_controller_class_mock:
controller = MagicMock()
controller.refresh_data = MagicMock(side_effect=RequestException())
vera_controller_class_mock.return_value = controller
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_CONTROLLER: "http://127.0.0.1:123/"},
)
assert result["type"] == "abort"
assert result["reason"] == "cannot_connect"
assert result["description_placeholders"] == {
"base_url": "http://127.0.0.1:123"
}
async def test_options(hass):
"""Test updating options."""
base_url = "http://127.0.0.1/"
entry = MockConfigEntry(
domain=DOMAIN,
title=base_url,
data={CONF_CONTROLLER: "http://127.0.0.1/"},
options={CONF_LIGHTS: [1, 2, 3]},
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(
entry.entry_id, context={"source": "test"}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_LIGHTS: "1,2;3 4 5_6bb7",
CONF_EXCLUDE: "8,9;10 11 12_13bb14",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_LIGHTS: [1, 2, 3, 4, 5, 6, 7],
CONF_EXCLUDE: [8, 9, 10, 11, 12, 13, 14],
}
|
from flask import current_app
class LemurException(Exception):
def __init__(self, *args, **kwargs):
current_app.logger.exception(self)
class DuplicateError(LemurException):
def __init__(self, key):
self.key = key
def __str__(self):
return repr("Duplicate found! Could not create: {0}".format(self.key))
class InvalidListener(LemurException):
def __str__(self):
return repr(
"Invalid listener, ensure you select a certificate if you are using a secure protocol"
)
class AttrNotFound(LemurException):
def __init__(self, field):
self.field = field
def __str__(self):
return repr("The field '{0}' is not sortable or filterable".format(self.field))
class InvalidConfiguration(Exception):
pass
class InvalidAuthority(Exception):
pass
class UnknownProvider(Exception):
pass
|
import asyncio
import pydeconz
from homeassistant.components.deconz.config_flow import (
CONF_MANUAL_INPUT,
CONF_SERIAL,
DECONZ_MANUFACTURERURL,
)
from homeassistant.components.deconz.const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_DECONZ_GROUPS,
CONF_ALLOW_NEW_DEVICES,
CONF_MASTER_GATEWAY,
DOMAIN,
)
from homeassistant.components.ssdp import (
ATTR_SSDP_LOCATION,
ATTR_UPNP_MANUFACTURER_URL,
ATTR_UPNP_SERIAL,
)
from homeassistant.config_entries import SOURCE_HASSIO, SOURCE_SSDP, SOURCE_USER
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT, CONTENT_TYPE_JSON
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from .test_gateway import API_KEY, BRIDGEID, setup_deconz_integration
from tests.async_mock import patch
BAD_BRIDGEID = "0000000000000000"
async def test_flow_discovered_bridges(hass, aioclient_mock):
"""Test that config flow works for discovered bridges."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[
{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80},
{"id": "1234E567890A", "internalipaddress": "5.6.7.8", "internalport": 80},
],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4"}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_flow_manual_configuration_decision(hass, aioclient_mock):
"""Test that config flow for one discovered bridge works."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80}],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: CONF_MANUAL_INPUT}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_flow_manual_configuration(hass, aioclient_mock):
"""Test that config flow works with manual configuration after no discovered bridges."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_manual_configuration_after_discovery_timeout(hass, aioclient_mock):
"""Test failed discovery fallbacks to manual configuration."""
aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=asyncio.TimeoutError)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
assert not hass.config_entries.flow._progress[result["flow_id"]].bridges
async def test_manual_configuration_after_discovery_ResponseError(hass, aioclient_mock):
"""Test failed discovery fallbacks to manual configuration."""
aioclient_mock.get(pydeconz.utils.URL_DISCOVER, exc=pydeconz.errors.ResponseError)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
assert not hass.config_entries.flow._progress[result["flow_id"]].bridges
async def test_manual_configuration_update_configuration(hass, aioclient_mock):
"""Test that manual configuration can update existing config entry."""
config_entry = await setup_deconz_integration(hass)
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "2.3.4.5", CONF_PORT: 80},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://2.3.4.5:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://2.3.4.5:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert config_entry.data[CONF_HOST] == "2.3.4.5"
async def test_manual_configuration_dont_update_configuration(hass, aioclient_mock):
"""Test that _create_entry work and that bridgeid can be requested."""
await setup_deconz_integration(hass)
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config",
json={"bridgeid": BRIDGEID},
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_manual_configuration_timeout_get_bridge(hass, aioclient_mock):
"""Test that _create_entry handles a timeout."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "manual_input"
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={CONF_HOST: "1.2.3.4", CONF_PORT: 80},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
f"http://1.2.3.4:80/api/{API_KEY}/config", exc=asyncio.TimeoutError
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "no_bridges"
async def test_link_get_api_key_ResponseError(hass, aioclient_mock):
"""Test config flow should abort if no API key was possible to retrieve."""
aioclient_mock.get(
pydeconz.utils.URL_DISCOVER,
json=[{"id": BRIDGEID, "internalipaddress": "1.2.3.4", "internalport": 80}],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_HOST: "1.2.3.4"}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post("http://1.2.3.4:80/api", exc=pydeconz.errors.ResponseError)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
assert result["errors"] == {"base": "no_key"}
async def test_flow_ssdp_discovery(hass, aioclient_mock):
"""Test that config flow for one discovered bridge works."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": SOURCE_SSDP},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == BRIDGEID
assert result["data"] == {
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
async def test_flow_ssdp_discovery_bad_bridge_id_aborts(hass, aioclient_mock):
"""Test that config flow aborts if deCONZ signals no radio hardware available."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ATTR_UPNP_SERIAL: BAD_BRIDGEID,
},
context={"source": SOURCE_SSDP},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "link"
aioclient_mock.post(
"http://1.2.3.4:80/api",
json=[{"success": {"username": API_KEY}}],
headers={"content-type": CONTENT_TYPE_JSON},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "no_hardware_available"
async def test_ssdp_discovery_not_deconz_bridge(hass):
"""Test a non deconz bridge being discovered over ssdp."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={ATTR_UPNP_MANUFACTURER_URL: "not deconz bridge"},
context={"source": SOURCE_SSDP},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "not_deconz_bridge"
async def test_ssdp_discovery_update_configuration(hass):
"""Test if a discovered bridge is configured but updates with new attributes."""
config_entry = await setup_deconz_integration(hass)
with patch(
"homeassistant.components.deconz.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ATTR_SSDP_LOCATION: "http://2.3.4.5:80/",
ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": SOURCE_SSDP},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert config_entry.data[CONF_HOST] == "2.3.4.5"
assert len(mock_setup_entry.mock_calls) == 1
async def test_ssdp_discovery_dont_update_configuration(hass):
"""Test if a discovered bridge has already been configured."""
config_entry = await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": SOURCE_SSDP},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert config_entry.data[CONF_HOST] == "1.2.3.4"
async def test_ssdp_discovery_dont_update_existing_hassio_configuration(hass):
"""Test to ensure the SSDP discovery does not update an Hass.io entry."""
config_entry = await setup_deconz_integration(hass, source=SOURCE_HASSIO)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
ATTR_SSDP_LOCATION: "http://1.2.3.4:80/",
ATTR_UPNP_MANUFACTURER_URL: DECONZ_MANUFACTURERURL,
ATTR_UPNP_SERIAL: BRIDGEID,
},
context={"source": SOURCE_SSDP},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert config_entry.data[CONF_HOST] == "1.2.3.4"
async def test_flow_hassio_discovery(hass):
"""Test hassio discovery flow works."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
"addon": "Mock Addon",
CONF_HOST: "mock-deconz",
CONF_PORT: 80,
CONF_SERIAL: BRIDGEID,
CONF_API_KEY: API_KEY,
},
context={"source": SOURCE_HASSIO},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "hassio_confirm"
assert result["description_placeholders"] == {"addon": "Mock Addon"}
with patch(
"homeassistant.components.deconz.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.deconz.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={}
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
CONF_HOST: "mock-deconz",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_hassio_discovery_update_configuration(hass):
"""Test we can update an existing config entry."""
config_entry = await setup_deconz_integration(hass)
with patch(
"homeassistant.components.deconz.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
CONF_HOST: "2.3.4.5",
CONF_PORT: 8080,
CONF_API_KEY: "updated",
CONF_SERIAL: BRIDGEID,
},
context={"source": SOURCE_HASSIO},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert config_entry.data[CONF_HOST] == "2.3.4.5"
assert config_entry.data[CONF_PORT] == 8080
assert config_entry.data[CONF_API_KEY] == "updated"
assert len(mock_setup_entry.mock_calls) == 1
async def test_hassio_discovery_dont_update_configuration(hass):
"""Test we can update an existing config entry."""
await setup_deconz_integration(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
data={
CONF_HOST: "1.2.3.4",
CONF_PORT: 80,
CONF_API_KEY: API_KEY,
CONF_SERIAL: BRIDGEID,
},
context={"source": SOURCE_HASSIO},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_option_flow(hass):
"""Test config flow options."""
config_entry = await setup_deconz_integration(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "deconz_devices"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_ALLOW_CLIP_SENSOR: False,
CONF_ALLOW_DECONZ_GROUPS: False,
CONF_ALLOW_NEW_DEVICES: False,
},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_ALLOW_CLIP_SENSOR: False,
CONF_ALLOW_DECONZ_GROUPS: False,
CONF_ALLOW_NEW_DEVICES: False,
CONF_MASTER_GATEWAY: True,
}
|
import os
from ... import event, app
from . import Widget
def _load_bokeh(ext):
import bokeh.resources
dev = os.environ.get('BOKEH_RESOURCES', '') == 'relative-dev'
res = bokeh.resources.bokehjsdir()
if dev:
res = os.path.abspath(os.path.join(bokeh.__file__,
'..', '..', 'bokehjs', 'build'))
modname = 'bokeh' if dev else 'bokeh.min'
filename = os.path.join(res, ext, modname + '.' + ext)
return open(filename, 'rb').read().decode()
def _load_bokeh_js():
return _load_bokeh('js')
def _load_bokeh_css():
return _load_bokeh('css')
# Associate Bokeh asset, but in a "lazy" way, so that we don't attempt to
# import bokeh until the user actually instantiates a BokehWidget.
app.assets.associate_asset(__name__, 'bokeh.js', _load_bokeh_js)
app.assets.associate_asset(__name__, 'bokeh.css', _load_bokeh_css)
def make_bokeh_widget(plot, **kwargs):
from bokeh.models import Plot
from bokeh.embed import components
# Set plot prop
if not isinstance(plot, Plot):
raise ValueError('plot must be a Bokeh plot object.')
# The sizing_mode is fixed by default, but that's silly in this context
if plot.sizing_mode == 'fixed':
plot.sizing_mode = 'stretch_both'
# Get components and apply to widget
script, div = components(plot)
script = '\n'.join(script.strip().split('\n')[1:-1])
widget = BokehWidget(**kwargs)
widget.set_plot_components(
dict(script=script, div=div, id=plot.ref['id']))
return widget
class BokehWidget(Widget):
""" A widget that shows a Bokeh plot object.
For Bokeh 0.12 and up. The plot's ``sizing_mode`` property is set to
``stretch_both`` unless it was set to something other than ``fixed``. Other
responsive modes are 'scale_width', 'scale_height' and 'scale_both`, which
all keep aspect ratio while being responsive in a certain direction.
This widget is, like all widgets, a JsComponent; it lives in the browser,
while the Bokeh plot is a Python object. Therefore we cannot simply use
a property to set the plot. Use ``ui.BokehWidget.from_plot(plot)`` to
instantiate the widget from Python.
"""
DEFAULT_MIN_SIZE = 100, 100
CSS = """
.flx-BokehWidget > .plotdiv {
overflow: hidden;
}
"""
@classmethod
def from_plot(cls, plot, **kwargs):
""" Create a BokehWidget using a Bokeh plot.
"""
return make_bokeh_widget(plot, **kwargs)
plot = event.Attribute(doc="""The JS-side of the Bokeh plot object.""")
def _render_dom(self):
return None
@event.action
def set_plot_components(self, d):
""" Set the plot using its script/html components.
"""
global window
# Embed div
self.node.innerHTML = d.div # We put trust in d.div
# "exec" code
el = window.document.createElement('script')
el.innerHTML = d.script
self.node.appendChild(el)
# Get plot from id in next event-loop iter
def getplot():
self._plot = window.Bokeh.index[d.id]
self.__resize_plot()
window.setTimeout(getplot, 10)
@event.reaction('size')
def __resize_plot(self, *events):
if self.plot and self.parent:
if self.plot.resize:
self.plot.resize()
else:
self.plot.model.document.resize() # older
|
import numpy as np
from ..utils import logger, verbose
@verbose
def is_equal(first, second, verbose=None):
"""Check if 2 python structures are the same.
Designed to handle dict, list, np.ndarray etc.
"""
all_equal = True
# Check all keys in first dict
if type(first) != type(second):
all_equal = False
if isinstance(first, dict):
for key in first.keys():
if (key not in second):
logger.info("Missing key %s in %s" % (key, second))
all_equal = False
else:
if not is_equal(first[key], second[key]):
all_equal = False
elif isinstance(first, np.ndarray):
if not np.allclose(first, second):
all_equal = False
elif isinstance(first, list):
for a, b in zip(first, second):
if not is_equal(a, b):
logger.info('%s and\n%s are different' % (a, b))
all_equal = False
else:
if first != second:
logger.info('%s and\n%s are different' % (first, second))
all_equal = False
return all_equal
|
import logging
import re
from Handler import Handler
from diamond.collector import get_hostname
from configobj import Section
try:
import raven.handlers.logging
except ImportError:
raven = None
__author__ = 'Bruno Clermont'
__email__ = '[email protected]'
class InvalidRule(ValueError):
"""
invalid rule
"""
pass
class BaseResult(object):
"""
Base class for a Rule minimum/maximum check result
"""
adjective = None
def __init__(self, value, threshold):
"""
@type value: float
@param value: metric value
@type threshold: float
@param threshold: value that trigger a warning
"""
self.value = value
self.threshold = threshold
if not raven:
self.log.error('raven.handlers.logging import failed. '
'Handler disabled')
self.enabled = False
return
@property
def verbose_message(self):
"""return more complete message"""
if self.threshold is None:
return 'No threshold'
return '%.1f is %s than %.1f' % (self.value,
self.adjective,
self.threshold)
@property
def _is_error(self):
raise NotImplementedError('_is_error')
@property
def is_error(self):
"""
for some reason python do this:
>>> 1.0 > None
True
>>> 1.0 < None
False
so we just check if min/max is not None before return _is_error
"""
if self.threshold is None:
return False
return self._is_error
def __str__(self):
name = self.__class__.__name__.lower()
if self.threshold is None:
return '%s: %.1f no threshold' % (name, self.value)
return '%.1f (%s: %.1f)' % (self.value, name, self.threshold)
class Minimum(BaseResult):
"""
Minimum result
"""
adjective = 'lower'
@property
def _is_error(self):
"""if it's too low"""
return self.value < self.threshold
class Maximum(BaseResult):
"""
Maximum result
"""
adjective = 'higher'
@property
def _is_error(self):
"""if it's too high"""
return self.value > self.threshold
class Rule(object):
"""
Alert rule
"""
def __init__(self, name, path, min=None, max=None):
"""
@type name: string
@param name: rule name, used to identify this rule in Sentry
@type path: string
@param path: un-compiled regular expression of the path of the rule
@type min: string of float/int, int or float. will be convert to float
@param min: optional minimal value that if value goes below it send
an alert to Sentry
@type max: string of float/int, int or float. will be convert to float
@param max: optional maximal value that if value goes over it send
an alert to Sentry
"""
self.name = name
# counters that can be used to debug rule
self.counter_errors = 0
self.counter_pass = 0
# force min and max to be float
try:
self.min = float(min)
except TypeError:
self.min = None
try:
self.max = float(max)
except TypeError:
self.max = None
if self.min is None and self.max is None:
raise InvalidRule("%s: %s: both min and max are unset or invalid"
% (name, path))
if self.min is not None and self.max is not None:
if self.min > self.max:
raise InvalidRule("min %.1f is larger than max %.1f" % (
self.min, self.max))
# compile path regular expression
self.regexp = re.compile(r'(?P<prefix>.*)\.(?P<path>%s)$' % path)
def process(self, metric, handler):
"""
process a single diamond metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@type handler: diamond.handler.sentry.SentryHandler
@param handler: configured Sentry graphite handler
@rtype None
"""
match = self.regexp.match(metric.path)
if match:
minimum = Minimum(metric.value, self.min)
maximum = Maximum(metric.value, self.max)
if minimum.is_error or maximum.is_error:
self.counter_errors += 1
message = "%s Warning on %s: %.1f" % (self.name,
handler.hostname,
metric.value)
culprit = "%s %s" % (handler.hostname, match.group('path'))
handler.raven_logger.error(message, extra={
'culprit': culprit,
'data': {
'metric prefix': match.group('prefix'),
'metric path': match.group('path'),
'minimum check': minimum.verbose_message,
'maximum check': maximum.verbose_message,
'metric original path': metric.path,
'metric value': metric.value,
'metric precision': metric.precision,
'metric timestamp': metric.timestamp,
'minimum threshold': self.min,
'maximum threshold': self.max,
'path regular expression': self.regexp.pattern,
'total errors': self.counter_errors,
'total pass': self.counter_pass,
'hostname': handler.hostname
}
}
)
else:
self.counter_pass += 1
def __repr__(self):
return '%s: min:%s max:%s %s' % (self.name, self.min, self.max,
self.regexp.pattern)
class SentryHandler(Handler):
"""
Diamond handler that check if a metric goes too low or too high
"""
# valid key name in rules sub-section
VALID_RULES_KEYS = ('name', 'path', 'min', 'max')
def __init__(self, config=None):
"""
@type config: configobj.ConfigObj
"""
Handler.__init__(self, config)
if not raven:
return
# init sentry/raven
self.sentry_log_handler = raven.handlers.logging.SentryHandler(
self.config['dsn'])
self.raven_logger = logging.getLogger(self.__class__.__name__)
self.raven_logger.addHandler(self.sentry_log_handler)
self.configure_sentry_errors()
self.rules = self.compile_rules()
self.hostname = get_hostname(self.config)
if not len(self.rules):
self.log.warning("No rules, this graphite handler is unused")
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(SentryHandler, self).get_default_config_help()
config.update({
'dsn': '',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(SentryHandler, self).get_default_config()
config.update({
'dsn': '',
})
return config
def compile_rules(self):
"""
Compile alert rules
@rtype list of Rules
"""
output = []
# validate configuration, skip invalid section
for key_name, section in self.config.items():
rule = self.compile_section(section)
if rule is not None:
output.append(rule)
return output
def compile_section(self, section):
"""
Validate if a section is a valid rule
@type section: configobj.Section
@param section: section to validate
@rtype Rule or None
@return None if invalid
"""
if section.__class__ != Section:
# not a section, just skip
return
# name and path are mandatory
keys = section.keys()
for key in ('name', 'path'):
if key not in keys:
self.log.warning("section %s miss key '%s' ignore", key,
section.name)
return
# just warn if invalid key in section
for key in keys:
if key not in self.VALID_RULES_KEYS:
self.log.warning("invalid key %s in section %s",
key, section.name)
# need at least a min or a max
if 'min' not in keys and 'max' not in keys:
self.log.warning("either 'min' or 'max' is defined in %s",
section.name)
return
# add rule to the list
kwargs = {
'name': section['name'],
'path': section['path']
}
for argument in ('min', 'max'):
try:
kwargs[argument] = section[argument]
except KeyError:
pass
# init rule
try:
return Rule(**kwargs)
except InvalidRule as err:
self.log.error(str(err))
def configure_sentry_errors(self):
"""
Configure sentry.errors to use the same loggers as the root handler
@rtype: None
"""
sentry_errors_logger = logging.getLogger('sentry.errors')
root_logger = logging.getLogger()
for handler in root_logger.handlers:
sentry_errors_logger.addHandler(handler)
def process(self, metric):
"""
process a single metric
@type metric: diamond.metric.Metric
@param metric: metric to process
@rtype None
"""
for rule in self.rules:
rule.process(metric, self)
def __repr__(self):
return "SentryHandler '%s' %d rules" % (
self.sentry_log_handler.client.servers, len(self.rules))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import copy
import itertools
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import linux_benchmarks
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import os_types
from perfkitbenchmarker import windows_benchmarks
from perfkitbenchmarker import windows_packages
import six
from six.moves import zip
FLAGS = flags.FLAGS
flags.DEFINE_string('flag_matrix', None,
'The name of the flag matrix to run.')
flags.DEFINE_string('flag_zip', None,
'The name of the flag zip to run.')
flags.DEFINE_integer('num_benchmark_copies', 1,
'The number of copies of each benchmark config to run.')
MESSAGE = 'message'
BENCHMARK_LIST = 'benchmark_list'
STANDARD_SET = 'standard_set'
BENCHMARK_SETS = {
STANDARD_SET: {
MESSAGE: ('The standard_set is a community agreed upon set of '
'benchmarks to measure Cloud performance.'),
BENCHMARK_LIST: [
'aerospike',
'block_storage_workload',
'cassandra_stress',
'cluster_boot',
'copy_throughput',
'coremark',
'fio',
'hadoop_terasort',
'hpcc',
'iperf',
'mesh_network',
'mongodb_ycsb',
'netperf',
'object_storage_service',
'ping',
'redis',
'speccpu2006',
'sysbench',
'unixbench',
]
},
'arm_set': {
MESSAGE: 'ARM benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'alicloud_set': {
MESSAGE: 'AliCloud benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'broadcom_set': {
MESSAGE: 'Broadcom benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'canonical_set': {
MESSAGE: 'Canonical benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'centurylinkcloud_set': {
MESSAGE:
'This benchmark set is supported on CenturyLink Cloud.',
BENCHMARK_LIST: [
'cassandra_stress',
'copy_throughput',
'hpcc',
'iperf',
'mesh_network',
'mongodb_ycsb',
'ping',
'redis',
'sysbench',
'unixbench',
]
},
'cisco_set': {
MESSAGE: 'Cisco benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'cloudharmony_set': {
MESSAGE: 'CloudHarmony benchmark set.',
BENCHMARK_LIST: [
'speccpu2006',
'unixbench',
]
},
'cloudspectator_set': {
MESSAGE: 'CloudSpectator benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'google_set': {
MESSAGE: ('This benchmark set is maintained by Google Cloud Platform '
'Performance Team.'),
BENCHMARK_LIST: [
'aerospike_ycsb',
'bidirectional_network',
'block_storage_workload',
'cassandra_stress',
'cassandra_ycsb',
'cluster_boot',
'copy_throughput',
'fio',
'gpu_pcie_bandwidth',
'hadoop_terasort',
'horovod',
'hpcc',
'hpcg',
'inception3',
'iperf',
'mesh_network',
'mlperf',
'mnist',
'mongodb_ycsb',
'multichase',
'mxnet',
'netperf',
'object_storage_service',
'oldisim',
'pgbench',
'ping',
'redis_ycsb',
'resnet',
'stencil2d',
'speccpu2006',
'sysbench',
'tensorflow',
'tensorflow_serving',
'tomcat_wrk',
'unixbench',
]
},
'intel_set': {
MESSAGE:
'Intel benchmark set.',
BENCHMARK_LIST: [
'fio',
'iperf',
'unixbench',
'hpcc',
'cluster_boot',
'redis',
'cassandra_stress',
'object_storage_service',
'sysbench',
]
},
'kubernetes_set': {
MESSAGE:
'Kubernetes benchmark set.',
BENCHMARK_LIST: [
'block_storage_workload',
'cassandra_ycsb',
'cassandra_stress',
'cluster_boot',
'fio',
'iperf',
'mesh_network',
'mongodb_ycsb',
'netperf',
'redis',
'sysbench',
]
},
'mellanox_set': {
MESSAGE: 'Mellanox benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'microsoft_set': {
MESSAGE: 'Microsoft benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'qualcomm_technologies_set': {
MESSAGE: 'Qualcomm Technologies, Inc. benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'rackspace_set': {
MESSAGE:
'Rackspace benchmark set.',
BENCHMARK_LIST: [
'aerospike',
'block_storage_workload',
'cassandra_stress',
'cluster_boot',
'copy_throughput',
'fio',
'hpcc',
'iperf',
'mesh_network',
'mongodb_ycsb',
'netperf',
'oldisim',
'ping',
'redis',
'silo',
'sysbench',
'unixbench',
]
},
'red_hat_set': {
MESSAGE: 'Red Hat benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'tradeworx_set': {
MESSAGE: 'Tradeworx Inc. benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'thesys_technologies_set': {
MESSAGE: 'Thesys Technologies LLC. benchmark set.',
BENCHMARK_LIST: [STANDARD_SET]
},
'stanford_set': {
MESSAGE: 'Stanford University benchmark set.',
BENCHMARK_LIST: [STANDARD_SET, 'oldisim']
},
'mit_set': {
MESSAGE: 'Massachusetts Institute of Technology benchmark set.',
BENCHMARK_LIST: [STANDARD_SET, 'silo']
},
'cloudsuite_set': {
MESSAGE:
'CloudSuite benchmark set.',
BENCHMARK_LIST: [
'cloudsuite_data_analytics',
'cloudsuite_data_caching',
'cloudsuite_graph_analytics',
'cloudsuite_in_memory_analytics',
'cloudsuite_media_streaming',
'cloudsuite_web_search',
'cloudsuite_web_serving',
]
}
}
class FlagMatrixNotFoundException(Exception):
pass
class FlagZipNotFoundException(Exception):
pass
def _GetValidBenchmarks():
"""Returns a dict mapping valid benchmark names to their modules."""
if FLAGS.os_type in os_types.WINDOWS_OS_TYPES:
return windows_benchmarks.VALID_BENCHMARKS
return linux_benchmarks.VALID_BENCHMARKS
def _GetValidPackages():
"""Returns a dict mapping valid package names to their modules."""
if FLAGS.os_type in os_types.WINDOWS_OS_TYPES:
return windows_packages.PACKAGES
return linux_packages.PACKAGES
def BenchmarkModule(benchmark_name):
"""Finds the module for a benchmark by name.
Args:
benchmark_name: The name of the benchmark.
Returns:
The benchmark's module, or None if the benchmark is invalid.
"""
valid_benchmarks = _GetValidBenchmarks()
return valid_benchmarks.get(benchmark_name)
def PackageModule(package_name):
"""Finds the module for a package by name.
Args:
package_name: The name of the package.
Returns:
The package's module, or None if the package_name is invalid.
"""
packages = _GetValidPackages()
return packages.get(package_name)
def _GetBenchmarksFromUserConfig(user_config):
"""Returns a list of benchmark module, config tuples."""
benchmarks = user_config.get('benchmarks', [])
valid_benchmarks = _GetValidBenchmarks()
benchmark_config_list = []
for entry in benchmarks:
name, user_config = entry.popitem()
try:
benchmark_module = valid_benchmarks[name]
except KeyError:
raise ValueError('Benchmark "%s" not valid on os_type "%s"' %
(name, FLAGS.os_type))
benchmark_config_list.append((benchmark_module, user_config))
return benchmark_config_list
def _GetConfigForAxis(benchmark_config, flag_config):
config = copy.copy(benchmark_config)
config_local_flags = config.get('flags', {})
config['flags'] = copy.deepcopy(configs.GetConfigFlags())
config['flags'].update(config_local_flags)
for setting in flag_config:
config['flags'].update(setting)
return config
def _AssertZipAxesHaveSameLength(axes):
expected_length = len(axes[0])
for axis in axes[1:]:
if len(axis) != expected_length:
raise ValueError('flag_zip axes must all be the same length')
def _AssertFlagMatrixAndZipDefsExist(benchmark_config,
flag_matrix_name,
flag_zip_name):
"""Asserts that specified flag_matrix and flag_zip exist.
Both flag_matrix_name and flag_zip_name can be None, meaning that the user
(or the benchmark_config) did not specify them.
Args:
benchmark_config: benchmark_config
flag_matrix_name: name of the flag_matrix_def specified by the user via a
flag, specified in the benchmark_config, or None.
flag_zip_name: name of the flag_zip_def specified by the user via a flag,
specified in the benchmark_config, or None.
Raises:
FlagMatrixNotFoundException: if flag_matrix_name is not None, and is not
found in the flag_matrix_defs section of the benchmark_config.
FlagZipNotFoundException: if flag_zip_name is not None, and is not
found in the flag_zip_defs section of the benchmark_config.
"""
if (flag_matrix_name and
flag_matrix_name not in
benchmark_config.get('flag_matrix_defs', {})):
raise FlagMatrixNotFoundException('No flag_matrix with name {0}'
.format(flag_matrix_name))
if (flag_zip_name and
flag_zip_name not in
benchmark_config.get('flag_zip_defs', {})):
raise FlagZipNotFoundException('No flag_zip with name {0}'
.format(flag_zip_name))
def GetBenchmarksFromFlags():
"""Returns a list of benchmarks to run based on the benchmarks flag.
If no benchmarks (or sets) are specified, this will return the standard set.
If multiple sets or mixes of sets and benchmarks are specified, this will
return the union of all sets and individual benchmarks.
Raises:
ValueError: when benchmark_name is not valid for os_type supplied
"""
user_config = configs.GetUserConfig()
benchmark_config_list = _GetBenchmarksFromUserConfig(user_config)
if benchmark_config_list and not FLAGS['benchmarks'].present:
return benchmark_config_list
benchmark_queue = collections.deque(FLAGS.benchmarks)
benchmark_names = []
benchmark_set = set()
while benchmark_queue:
benchmark = benchmark_queue.popleft()
if benchmark in benchmark_set:
continue
benchmark_set.add(benchmark)
if benchmark in BENCHMARK_SETS:
benchmark_queue.extendleft(BENCHMARK_SETS[benchmark][BENCHMARK_LIST])
else:
benchmark_names.append(benchmark)
valid_benchmarks = _GetValidBenchmarks()
# create a list of module, config tuples to return
benchmark_config_list = []
for benchmark_name in benchmark_names:
benchmark_config = user_config.get(benchmark_name, {})
benchmark_name = benchmark_config.get('name', benchmark_name)
benchmark_module = valid_benchmarks.get(benchmark_name)
if benchmark_module is None:
raise ValueError('Benchmark "%s" not valid on os_type "%s"' %
(benchmark_name, FLAGS.os_type))
flag_matrix_name = (
FLAGS.flag_matrix or benchmark_config.get('flag_matrix', None)
)
flag_zip_name = (
FLAGS.flag_zip or benchmark_config.get('flag_zip', None)
)
_AssertFlagMatrixAndZipDefsExist(benchmark_config,
flag_matrix_name,
flag_zip_name)
# We need to remove the 'flag_matrix', 'flag_matrix_defs', 'flag_zip',
# 'flag_zip_defs', and 'flag_matrix_filters' keys from the config
# dictionary since they aren't actually part of the config spec and will
# cause errors if they are left in.
benchmark_config.pop('flag_matrix', None)
benchmark_config.pop('flag_zip', None)
flag_matrix = benchmark_config.pop(
'flag_matrix_defs', {}).get(flag_matrix_name, {})
flag_matrix_filter = benchmark_config.pop(
'flag_matrix_filters', {}).get(flag_matrix_name, {})
flag_zip = benchmark_config.pop(
'flag_zip_defs', {}).get(flag_zip_name, {})
zipped_axes = []
crossed_axes = []
if flag_zip:
flag_axes = []
for flag, values in six.iteritems(flag_zip):
flag_axes.append([{flag: v} for v in values])
_AssertZipAxesHaveSameLength(flag_axes)
for flag_config in zip(*flag_axes):
config = _GetConfigForAxis(benchmark_config, flag_config)
zipped_axes.append((benchmark_module, config))
crossed_axes.append([benchmark_tuple[1]['flags'] for
benchmark_tuple in zipped_axes])
for flag, values in sorted(six.iteritems(flag_matrix)):
crossed_axes.append([{flag: v} for v in values])
for flag_config in itertools.product(*crossed_axes):
config = _GetConfigForAxis(benchmark_config, flag_config)
if (flag_matrix_filter and not eval(
flag_matrix_filter, {}, config['flags'])):
continue
benchmark_config_list.extend([(benchmark_module, config)] *
FLAGS.num_benchmark_copies)
return benchmark_config_list
|
import json
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from homeassistant.config import DATA_CUSTOMIZE
from tests.async_mock import patch
async def test_get_entity(hass, hass_client):
"""Test getting entity."""
with patch.object(config, "SECTIONS", ["customize"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
def mock_read(path):
"""Mock reading data."""
return {"hello.beer": {"free": "beer"}, "other.entity": {"do": "something"}}
hass.data[DATA_CUSTOMIZE] = {"hello.beer": {"cold": "beer"}}
with patch("homeassistant.components.config._read", mock_read):
resp = await client.get("/api/config/customize/config/hello.beer")
assert resp.status == 200
result = await resp.json()
assert result == {"local": {"free": "beer"}, "global": {"cold": "beer"}}
async def test_update_entity(hass, hass_client):
"""Test updating entity."""
with patch.object(config, "SECTIONS", ["customize"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = {
"hello.beer": {"ignored": True},
"other.entity": {"polling_intensity": 2},
}
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
hass.states.async_set("hello.world", "state", {"a": "b"})
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
), patch(
"homeassistant.config.async_hass_config_yaml",
return_value={},
):
resp = await client.post(
"/api/config/customize/config/hello.world",
data=json.dumps(
{"name": "Beer", "entities": ["light.top", "light.bottom"]}
),
)
await hass.async_block_till_done()
assert resp.status == 200
result = await resp.json()
assert result == {"result": "ok"}
state = hass.states.get("hello.world")
assert state.state == "state"
assert dict(state.attributes) == {
"a": "b",
"name": "Beer",
"entities": ["light.top", "light.bottom"],
}
orig_data["hello.world"]["name"] = "Beer"
orig_data["hello.world"]["entities"] = ["light.top", "light.bottom"]
assert written[0] == orig_data
async def test_update_entity_invalid_key(hass, hass_client):
"""Test updating entity."""
with patch.object(config, "SECTIONS", ["customize"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
resp = await client.post(
"/api/config/customize/config/not_entity", data=json.dumps({"name": "YO"})
)
assert resp.status == 400
async def test_update_entity_invalid_json(hass, hass_client):
"""Test updating entity."""
with patch.object(config, "SECTIONS", ["customize"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
resp = await client.post("/api/config/customize/config/hello.beer", data="not json")
assert resp.status == 400
|
import asyncio
import logging
import pywilight
import requests
from homeassistant.const import CONF_HOST, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
_LOGGER = logging.getLogger(__name__)
class WiLightParent:
"""Manages a single WiLight Parent Device."""
def __init__(self, hass, config_entry):
"""Initialize the system."""
self._host = config_entry.data[CONF_HOST]
self._hass = hass
self._api = None
@property
def host(self):
"""Return the host of this parent."""
return self._host
@property
def api(self):
"""Return the api of this parent."""
return self._api
async def async_setup(self):
"""Set up a WiLight Parent Device based on host parameter."""
host = self._host
hass = self._hass
api_device = await hass.async_add_executor_job(create_api_device, host)
if api_device is None:
return False
@callback
def disconnected():
# Schedule reconnect after connection has been lost.
_LOGGER.warning("WiLight %s disconnected", api_device.device_id)
async_dispatcher_send(
hass, f"wilight_device_available_{api_device.device_id}", False
)
@callback
def reconnected():
# Schedule reconnect after connection has been lost.
_LOGGER.warning("WiLight %s reconnect", api_device.device_id)
async_dispatcher_send(
hass, f"wilight_device_available_{api_device.device_id}", True
)
async def connect(api_device):
# Set up connection and hook it into HA for reconnect/shutdown.
_LOGGER.debug("Initiating connection to %s", api_device.device_id)
client = await api_device.config_client(
disconnect_callback=disconnected,
reconnect_callback=reconnected,
loop=asyncio.get_running_loop(),
logger=_LOGGER,
)
# handle shutdown of WiLight asyncio transport
hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, lambda x: client.stop()
)
_LOGGER.info("Connected to WiLight device: %s", api_device.device_id)
await connect(api_device)
self._api = api_device
return True
async def async_reset(self):
"""Reset api."""
# If the initialization was wrong.
if self._api is None:
return True
self._api.client.stop()
def create_api_device(host):
"""Create an API Device."""
try:
device = pywilight.device_from_host(host)
except (
requests.exceptions.ConnectionError,
requests.exceptions.Timeout,
) as err:
_LOGGER.error("Unable to access WiLight at %s (%s)", host, err)
return None
return device
|
from datetime import timedelta
from london_tube_status import TubeData
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
ATTRIBUTION = "Powered by TfL Open Data"
CONF_LINE = "line"
ICON = "mdi:subway"
SCAN_INTERVAL = timedelta(seconds=30)
TUBE_LINES = [
"Bakerloo",
"Central",
"Circle",
"District",
"DLR",
"Hammersmith & City",
"Jubilee",
"London Overground",
"Metropolitan",
"Northern",
"Piccadilly",
"TfL Rail",
"Victoria",
"Waterloo & City",
]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_LINE): vol.All(cv.ensure_list, [vol.In(list(TUBE_LINES))])}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Tube sensor."""
data = TubeData()
data.update()
sensors = []
for line in config.get(CONF_LINE):
sensors.append(LondonTubeSensor(line, data))
add_entities(sensors, True)
class LondonTubeSensor(Entity):
"""Sensor that reads the status of a line from Tube Data."""
def __init__(self, name, data):
"""Initialize the London Underground sensor."""
self._data = data
self._description = None
self._name = name
self._state = None
self.attrs = {ATTR_ATTRIBUTION: ATTRIBUTION}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
self.attrs["Description"] = self._description
return self.attrs
def update(self):
"""Update the sensor."""
self._data.update()
self._state = self._data.data[self.name]["State"]
self._description = self._data.data[self.name]["Description"]
|
import json
import logging
from absl import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker import spark_service
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
DEFAULT_MACHINE_TYPE = 'm3.xlarge'
RELEASE_LABEL = 'emr-5.23.0'
READY_CHECK_SLEEP = 30
READY_CHECK_TRIES = 60
READY_STATE = 'WAITING'
JOB_WAIT_SLEEP = 30
DELETED_STATES = ['TERMINATED_WITH_ERRORS', 'TERMINATED']
MANAGER_SG = 'EmrManagedMasterSecurityGroup'
WORKER_SG = 'EmrManagedSlaveSecurityGroup'
# Certain machine types require a subnet.
NEEDS_SUBNET = ['m4', 'c4', 'm5', 'c5']
class AwsSecurityGroup(resource.BaseResource):
"""Object representing a AWS Security Group.
A security group is created automatically when an Amazon EMR cluster
is created. It is not deleted automatically, and the subnet and VPN
cannot be deleted until the security group is deleted.
Because of this, there's no _Create method, only a _Delete and an
_Exists method.
"""
def __init__(self, cmd_prefix, group_id):
super(AwsSecurityGroup, self).__init__()
self.created = True
self.group_id = group_id
self.cmd_prefix = cmd_prefix
def _Delete(self):
cmd = self.cmd_prefix + ['ec2', 'delete-security-group',
'--group-id=' + self.group_id]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _Exists(self):
cmd = self.cmd_prefix + ['ec2', 'describe-security-groups',
'--group-id=' + self.group_id]
_, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
# if the security group doesn't exist, the describe command gives an error.
return retcode == 0
def _Create(self):
if not self.created:
raise NotImplemented()
class AwsEMR(spark_service.BaseSparkService):
"""Object representing a AWS EMR cluster.
Attributes:
cluster_id: Cluster identifier, set in superclass.
project: Enclosing project for the cluster.
cmd_prefix: emr prefix, including region
network: network to use; set if needed by machine type
bucket_to_delete: bucket name to delete when cluster is
terminated.
"""
CLOUD = aws.CLOUD
SPARK_SAMPLE_LOCATION = '/usr/lib/spark/lib/spark-examples.jar'
SERVICE_NAME = 'emr'
def __init__(self, spark_service_spec):
super(AwsEMR, self).__init__(spark_service_spec)
# TODO(hildrum) use availability zone when appropriate
worker_machine_type = self.spec.worker_group.vm_spec.machine_type
leader_machine_type = self.spec.master_group.vm_spec.machine_type
self.cmd_prefix = list(util.AWS_PREFIX)
if self.zone:
region = util.GetRegionFromZone(self.zone)
self.cmd_prefix += ['--region', region]
# Certain machine types require subnets.
if (self.spec.static_cluster_id is None and
(worker_machine_type[0:2] in NEEDS_SUBNET or
leader_machine_type[0:2] in NEEDS_SUBNET)):
# GetNetwork is supposed to take a VM, but all it uses
# from the VM is the zone attribute, which self has.
self.network = aws_network.AwsNetwork.GetNetwork(self)
else:
self.network = None
self.bucket_to_delete = None
def _CreateLogBucket(self):
bucket_name = 's3://pkb-{0}-emr'.format(FLAGS.run_uri)
cmd = self.cmd_prefix + ['s3', 'mb', bucket_name]
_, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
raise Exception('Error creating logs bucket')
self.bucket_to_delete = bucket_name
return bucket_name
def _Create(self):
"""Creates the cluster."""
name = 'pkb_' + FLAGS.run_uri
logs_bucket = FLAGS.aws_emr_loguri or self._CreateLogBucket()
instance_groups = []
for group_type, group_spec in [
('CORE', self.spec.worker_group),
('MASTER', self.spec.master_group)]:
instance_properties = {'InstanceCount': group_spec.vm_count,
'InstanceGroupType': group_type,
'InstanceType': group_spec.vm_spec.machine_type,
'Name': group_type + ' group'}
if group_spec.disk_spec:
# Make sure nothing we are ignoring is included in the disk spec
assert group_spec.disk_spec.device_path is None
assert group_spec.disk_spec.disk_number is None
assert group_spec.disk_spec.mount_point is None
assert group_spec.disk_spec.iops is None
ebs_configuration = {'EbsBlockDeviceConfigs': [
{'VolumeSpecification':
{'SizeInGB': group_spec.disk_spec.disk_size,
'VolumeType': group_spec.disk_spec.disk_type},
'VolumesPerInstance':
group_spec.disk_spec.num_striped_disks}]}
instance_properties.update({'EbsConfiguration': ebs_configuration})
instance_groups.append(instance_properties)
# we need to store the cluster id.
cmd = self.cmd_prefix + ['emr', 'create-cluster', '--name', name,
'--release-label', RELEASE_LABEL,
'--use-default-roles',
'--instance-groups',
json.dumps(instance_groups),
'--application', 'Name=Spark',
'Name=Hadoop',
'--log-uri', logs_bucket]
if self.network:
cmd += ['--ec2-attributes', 'SubnetId=' + self.network.subnet.id]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
self.cluster_id = result['ClusterId']
logging.info('Cluster created with id %s', self.cluster_id)
for tag_key, tag_value in util.MakeDefaultTags().items():
self._AddTag(tag_key, tag_value)
def _AddTag(self, key, value):
"""Add the key value pair as a tag to the emr cluster."""
cmd = self.cmd_prefix + ['emr', 'add-tags',
'--resource-id', self.cluster_id,
'--tag',
'{}={}'.format(key, value)]
vm_util.IssueCommand(cmd)
def _DeleteSecurityGroups(self):
"""Delete the security groups associated with this cluster."""
cmd = self.cmd_prefix + ['emr', 'describe-cluster',
'--cluster-id', self.cluster_id]
stdout, _, _ = vm_util.IssueCommand(cmd)
cluster_desc = json.loads(stdout)
sec_object = cluster_desc['Cluster']['Ec2InstanceAttributes']
manager_sg = sec_object[MANAGER_SG]
worker_sg = sec_object[WORKER_SG]
# the manager group and the worker group reference each other, so neither
# can be deleted. First we delete the references to the manager group in
# the worker group. Then we delete the manager group, and then, finally the
# worker group.
# remove all references to the manager group from the worker group.
for proto, port in [('tcp', '0-65535'), ('udp', '0-65535'), ('icmp', '-1')]:
for group1, group2 in [(worker_sg, manager_sg), (manager_sg, worker_sg)]:
cmd = self.cmd_prefix + ['ec2', 'revoke-security-group-ingress',
'--group-id=' + group1,
'--source-group=' + group2,
'--protocol=' + proto,
'--port=' + port]
vm_util.IssueCommand(cmd)
# Now we need to delete the manager, then the worker.
for group in manager_sg, worker_sg:
sec_group = AwsSecurityGroup(self.cmd_prefix, group)
sec_group.Delete()
def _Delete(self):
"""Deletes the cluster."""
cmd = self.cmd_prefix + ['emr', 'terminate-clusters', '--cluster-ids',
self.cluster_id]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _DeleteDependencies(self):
if self.network:
self._DeleteSecurityGroups()
if self.bucket_to_delete:
bucket_del_cmd = self.cmd_prefix + ['s3', 'rb', '--force',
self.bucket_to_delete]
vm_util.IssueCommand(bucket_del_cmd)
def _Exists(self):
"""Check to see whether the cluster exists."""
cmd = self.cmd_prefix + ['emr', 'describe-cluster',
'--cluster-id', self.cluster_id]
stdout, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
return False
result = json.loads(stdout)
if result['Cluster']['Status']['State'] in DELETED_STATES:
return False
else:
return True
def _IsReady(self):
"""Check to see if the cluster is ready."""
cmd = self.cmd_prefix + ['emr', 'describe-cluster', '--cluster-id',
self.cluster_id]
stdout, _, rc = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
if result['Cluster']['Status']['State'] == 'TERMINATED_WITH_ERRORS':
reason = result['Cluster']['Status']['StateChangeReason']['Message']
message = reason
if reason.startswith('Subnet is required'):
message = ('Cluster creation failed because this machine type requires '
'a subnet. To ensure PKB creates a subnet for this machine '
'type, update the NEEDS_SUBNET variable of '
'providers/aws/aws_emr.py to contain prefix of this machine '
'type. Raw AWS message={0}'.format(reason))
raise Exception(message)
return result['Cluster']['Status']['State'] == READY_STATE
def _GetLogBase(self):
"""Gets the base uri for the logs."""
cmd = self.cmd_prefix + ['emr', 'describe-cluster', '--cluster-id',
self.cluster_id]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
if 'LogUri' in result['Cluster']:
self.logging_enabled = True
log_uri = result['Cluster']['LogUri']
if log_uri.startswith('s3n'):
log_uri = 's3' + log_uri[3:]
return log_uri
else:
return None
def _CheckForFile(self, filename):
"""Wait for file to appear on s3."""
cmd = self.cmd_prefix + ['s3', 'ls', filename]
_, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
return retcode == 0
def _IsStepDone(self, step_id):
"""Determine whether the step is done.
Args:
step_id: The step id to query.
Returns:
A dictionary describing the step if the step the step is complete,
None otherwise.
"""
cmd = self.cmd_prefix + ['emr', 'describe-step', '--cluster-id',
self.cluster_id, '--step-id', step_id]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
state = result['Step']['Status']['State']
if state == "COMPLETED" or state == "FAILED":
return result
else:
return None
def _MakeHadoopStep(self, jarfile, classname, job_arguments):
"""Construct an EMR step with a type CUSTOM_JAR"""
step_list = ['Type=CUSTOM_JAR', 'Jar=' + jarfile]
if classname:
step_list.append('MainClass=' + classname)
if job_arguments:
arg_string = '[' + ','.join(job_arguments) + ']'
step_list.append('Args=' + arg_string)
return step_list
def _MakeSparkStep(self, jarfile, classname, job_arguments):
arg_list = ['--class', classname, jarfile]
if job_arguments:
arg_list += job_arguments
arg_string = '[' + ','.join(arg_list) + ']'
step_list = ['Type=Spark', 'Args=' + arg_string]
return step_list
def SubmitJob(self, jarfile, classname, job_poll_interval=JOB_WAIT_SLEEP,
job_arguments=None, job_stdout_file=None,
job_type=spark_service.SPARK_JOB_TYPE):
"""Submit the job.
Submit the job and wait for it to complete. If job_stdout_file is not
None, also way for the job's stdout to appear and put that in
job_stdout_file.
Args:
jarfile: Jar file containing the class to submit.
classname: Name of the class.
job_poll_interval: Submit job will poll until the job is done; this is
the time between checks.
job_arguments: Arguments to pass to the job.
job_stdout_file: Name of a file in which to put the job's standard out.
If there is data here already, it will be overwritten.
"""
@vm_util.Retry(poll_interval=job_poll_interval, fuzz=0)
def WaitForFile(filename):
if not self._CheckForFile(filename):
raise Exception('File not found yet')
@vm_util.Retry(timeout=FLAGS.aws_emr_job_wait_time,
poll_interval=job_poll_interval, fuzz=0)
def WaitForStep(step_id):
result = self._IsStepDone(step_id)
if result is None:
raise Exception('Step {0} not complete.'.format(step_id))
return result
if job_type == spark_service.SPARK_JOB_TYPE:
step_list = self._MakeSparkStep(jarfile, classname, job_arguments)
elif job_type == spark_service.HADOOP_JOB_TYPE:
step_list = self._MakeHadoopStep(jarfile, classname, job_arguments)
else:
raise Exception('Job type %s unsupported for EMR' % job_type)
step_string = ','.join(step_list)
cmd = self.cmd_prefix + ['emr', 'add-steps', '--cluster-id',
self.cluster_id, '--steps', step_string]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
step_id = result['StepIds'][0]
metrics = {}
result = WaitForStep(step_id)
pending_time = result['Step']['Status']['Timeline']['CreationDateTime']
start_time = result['Step']['Status']['Timeline']['StartDateTime']
end_time = result['Step']['Status']['Timeline']['EndDateTime']
metrics[spark_service.WAITING] = start_time - pending_time
metrics[spark_service.RUNTIME] = end_time - start_time
step_state = result['Step']['Status']['State']
metrics[spark_service.SUCCESS] = step_state == "COMPLETED"
# Now we need to take the standard out and put it in the designated path,
# if appropriate.
if job_stdout_file:
log_base = self._GetLogBase()
if log_base is None:
logging.warn('SubmitJob requested output, but EMR cluster was not '
'created with logging')
return metrics
# log_base ends in a slash.
s3_stdout = '{0}{1}/steps/{2}/stdout.gz'.format(log_base,
self.cluster_id,
step_id)
WaitForFile(s3_stdout)
dest_file = '{0}.gz'.format(job_stdout_file)
cp_cmd = ['aws', 's3', 'cp', s3_stdout, dest_file]
_, _, retcode = vm_util.IssueCommand(cp_cmd, raise_on_failure=False)
if retcode == 0:
uncompress_cmd = ['gunzip', '-f', dest_file]
vm_util.IssueCommand(uncompress_cmd)
return metrics
def SetClusterProperty(self):
pass
def ExecuteOnMaster(self, script_path, script_args):
raise NotImplementedError()
def CopyFromMaster(self, remote_path, local_path):
raise NotImplementedError()
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from openvz import OpenvzCollector
###############################################################################
class TestOpenvzCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('OpenvzCollector', {})
self.collector = OpenvzCollector(config, None)
def test_import(self):
self.assertTrue(OpenvzCollector)
@patch.object(Collector, 'publish')
def test_parse_values(self, publish_mock):
collector_mock = patch.object(OpenvzCollector, 'poll', Mock(
return_value=self.getFixture('vzlist.json').getvalue()))
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
# DNS
'dns_home_loc.kmemsize.held': 5151725,
'dns_home_loc.uptime': 1316250.125,
'dns_home_loc.laverage.01': 0.01,
'dns_home_loc.laverage.05': 0.05,
'dns_home_loc.laverage.15': 0.15,
# MQTT
'mqtt_home_loc.kmemsize.held': 4930969,
'mqtt_home_loc.uptime': 126481.188,
'mqtt_home_loc.laverage.01': 0.1,
'mqtt_home_loc.laverage.05': 0.5,
'mqtt_home_loc.laverage.15': 1.5,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
import json
import logging
import http.client
import tempfile
from collections import namedtuple
from .common import *
logger = logging.getLogger(__name__)
ChangeSet = namedtuple('Changes', ['nodes', 'purged_nodes', 'checkpoint', 'reset'])
class MetadataMixin(object):
def get_node_list(self, **params) -> list:
""":param params: may include tempLink='True'"""
return self.BOReq.paginated_get(self.metadata_url + 'nodes', params)
def get_file_list(self) -> list:
return self.get_node_list(filters='kind:FILE')
def get_folder_list(self) -> list:
return self.get_node_list(filters='kind:FOLDER')
def get_asset_list(self) -> list:
return self.get_node_list(filters='kind:ASSET')
def get_trashed_folders(self) -> list:
return self.get_node_list(filters='status:TRASH AND kind:FOLDER')
def get_trashed_files(self) -> list:
return self.get_node_list(filters='status:TRASH AND kind:FILE')
def get_changes(self, checkpoint='', include_purged=False, silent=True, file=None):
"""Writes changes into a (temporary) file. See
`<https://developer.amazon.com/public/apis/experience/cloud-drive/content/changes>`_.
"""
logger.info('Getting changes with checkpoint "%s".' % checkpoint)
body = {}
if checkpoint:
body['checkpoint'] = checkpoint
if include_purged:
body['includePurged'] = 'true'
r = self.BOReq.post(self.metadata_url + 'changes', data=json.dumps(body), stream=True)
if r.status_code not in OK_CODES:
r.close()
raise RequestError(r.status_code, r.text)
if file:
tmp = open(file, 'w+b')
else:
tmp = tempfile.TemporaryFile('w+b')
try:
for line in r.iter_lines(chunk_size=10 * 1024 ** 2, decode_unicode=False):
if line:
tmp.write(line + b'\n')
if not silent:
print('.', end='', flush=True)
if not silent:
print()
except (http.client.IncompleteRead, requests.exceptions.ChunkedEncodingError) as e:
logger.info(str(e))
raise RequestError(RequestError.CODE.INCOMPLETE_RESULT,
'[acd_api] reading changes terminated prematurely.')
except:
raise
finally:
r.close()
tmp.seek(0)
return tmp
@staticmethod
def _iter_changes_lines(f) -> 'Generator[ChangeSet]':
"""Generates a ChangeSet per line in passed file
the expected return format should be:
{"checkpoint": str, "reset": bool, "nodes": []}
{"checkpoint": str, "reset": false, "nodes": []}
{"end": true}
:arg f: opened file with current position at the beginning of a changeset
:throws: RequestError
"""
end = False
pages = -1
while True:
line = f.readline()
if not line:
break
reset = False
pages += 1
nodes = []
purged_nodes = []
try:
o = json.loads(line.decode('utf-8'))
except ValueError:
raise RequestError(RequestError.CODE.INCOMPLETE_RESULT,
'[acd_api] Invalid JSON in change set, page %i.' % pages)
try:
if o['end']:
end = True
continue
except KeyError:
pass
if o['reset']:
logger.info('Found "reset" tag in changes.')
reset = True
# could this actually happen?
if o['statusCode'] not in OK_CODES:
raise RequestError(RequestError.CODE.FAILED_SUBREQUEST,
'[acd_api] Partial failure in change request.')
for node in o['nodes']:
if node['status'] == 'PURGED':
purged_nodes.append(node['id'])
else:
nodes.append(node)
checkpoint = o['checkpoint']
logger.debug('Checkpoint: %s' % checkpoint)
yield ChangeSet(nodes, purged_nodes, checkpoint, reset)
logger.info('%i page(s) in changes.' % pages)
if not end:
logger.warning('End of change request not reached.')
def get_metadata(self, node_id: str, assets=False, temp_link=True) -> dict:
"""Gets a node's metadata.
:arg assets: also include asset info (e.g. thumbnails) if the node is a file
:arg temp_link: include a temporary download link if the node is a file
"""
params = {'tempLink': 'true' if temp_link else 'false',
'asset': 'ALL' if assets else 'NONE'}
r = self.BOReq.get(self.metadata_url + 'nodes/' + node_id, params=params)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()
# this will increment the node's version attribute
def update_metadata(self, node_id: str, properties: dict) -> dict:
"""Update a node's properties like name, description, status, parents, ..."""
body = json.dumps(properties)
r = self.BOReq.patch(self.metadata_url + 'nodes/' + node_id, data=body)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()
def get_root_node(self) -> dict:
"""Gets the root node metadata"""
params = {'filters': 'isRoot:true'}
r = self.BOReq.get(self.metadata_url + 'nodes', params=params)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
data = r.json()
return data['data'][0]
def get_root_id(self) -> str:
"""Gets the ID of the root node
:returns: the topmost folder id"""
r = self.get_root_node()
if 'id' in r['data'][0]:
return r['data'][0]['id']
def list_children(self, node_id: str) -> list:
l = self.BOReq.paginated_get(self.metadata_url + 'nodes/' + node_id + '/children')
return l
def list_child_folders(self, node_id: str) -> list:
l = self.BOReq.paginated_get(self.metadata_url + 'nodes/' + node_id + '/children',
params={'filters': 'kind:FOLDER'})
return l
def add_child(self, parent_id: str, child_id: str) -> dict:
"""Adds node with ID *child_id* to folder with ID *parent_id*.
:returns: updated child node dict"""
r = self.BOReq.put(self.metadata_url + 'nodes/' + parent_id + '/children/' + child_id)
if r.status_code not in OK_CODES:
logger.error('Adding child failed.')
raise RequestError(r.status_code, r.text)
return r.json()
def remove_child(self, parent_id: str, child_id: str) -> dict:
""":returns: updated child node dict"""
r = self.BOReq.delete(
self.metadata_url + 'nodes/' + parent_id + "/children/" + child_id)
# contrary to response code stated in API doc (202 ACCEPTED)
if r.status_code not in OK_CODES:
logger.error('Removing child failed.')
raise RequestError(r.status_code, r.text)
return r.json()
def move_node_from(self, node_id: str, old_parent_id: str, new_parent_id: str) -> dict:
"""Moves node with given ID from old parent to new parent.
Not tested with multi-parent nodes.
:returns: changed node dict"""
data = {'fromParent': old_parent_id, 'childId': node_id}
r = self.BOReq.post(self.metadata_url + 'nodes/' + new_parent_id + '/children',
data=json.dumps(data))
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()
def move_node(self, node_id: str, parent_id: str) -> dict:
return self.update_metadata(node_id, {'parents': [parent_id]})
def rename_node(self, node_id: str, new_name: str) -> dict:
properties = {'name': new_name}
return self.update_metadata(node_id, properties)
def set_available(self, node_id: str) -> dict:
"""Sets node status from 'PENDING' to 'AVAILABLE'."""
properties = {'status': 'AVAILABLE'}
return self.update_metadata(node_id, properties)
def get_owner_id(self):
"""Provisional function for retrieving the security profile's name, a.k.a. owner id."""
node = self.create_file('acd_cli_get_owner_id')
self.move_to_trash(node['id'])
return node['createdBy']
def list_properties(self, node_id: str, owner_id: str) -> dict:
"""This will always return an empty dict if the accessor is not the owner.
:param owner_id: owner ID (return status 404 if empty)"""
r = self.BOReq.get(self.metadata_url + 'nodes/' + node_id + '/properties/' + owner_id)
if r.status_code not in OK_CODES:
raise RequestError(r.status_code, r.text)
return r.json()['data']
def add_property(self, node_id: str, owner_id: str, key: str, value: str) -> dict:
"""Adds or overwrites *key* property with *content*. Maximum number of keys per owner is 10.
:param value: string of length <= 500
:raises: RequestError: 404, <UnknownOperationException/> if owner is empty
RequestError: 400, {...} if maximum of allowed properties is reached
:returns dict: {'key': '<KEY>', 'location': '<NODE_ADDRESS>/properties/<OWNER_ID/<KEY>',
'value': '<VALUE>'}"""
ok_codes = [requests.codes.CREATED]
r = self.BOReq.put(self.metadata_url + 'nodes/' + node_id +
'/properties/' + owner_id + '/' + key,
data=json.dumps({'value': value}), acc_codes=ok_codes)
if r.status_code not in ok_codes:
raise RequestError(r.status_code, r.text)
return r.json()
def delete_property(self, node_id: str, owner_id: str, key: str):
"""Deletes *key* property from node with ID *node_id*."""
ok_codes = [requests.codes.NO_CONTENT]
r = self.BOReq.delete(self.metadata_url + 'nodes/' + node_id +
'/properties/' + owner_id + '/' + key, acc_codes=ok_codes)
if r.status_code not in ok_codes:
raise RequestError(r.status_code, r.text)
def delete_properties(self, node_id: str, owner_id: str):
"""Deletes all of the owner's properties. Uses multiple requests."""
ok_codes = [requests.codes.NO_CONTENT]
prop_dict = self.list_properties(node_id, owner_id)
for key in prop_dict:
r = self.BOReq.delete('%s/nodes/%s/properties/%s/%s'
% (self.metadata_url, node_id, owner_id, key), acc_codes=ok_codes)
if r.status_code not in ok_codes:
raise RequestError(r.status_code, r.text)
def resolve_folder_path(self, path: str) -> 'List[dict]':
"""Resolves a non-trash folder path to a list of folder entries."""
segments = list(filter(bool, path.split('/')))
folder_chain = []
root = self.get_root_node()
folder_chain.append(root)
if not segments:
return folder_chain
for i, segment in enumerate(segments):
dir_entries = self.list_child_folders(folder_chain[-1]['id'])
for ent in dir_entries:
if ent['status'] == 'AVAILABLE' and ent['name'] == segment:
folder_chain.append(ent)
break
if len(folder_chain) != i + 2:
return []
return folder_chain
|
import argparse
import functools
import re
from pathlib import Path
from typing import Final, MutableMapping, Optional, Pattern, Tuple, Union
import discord
from redbot.core import commands
from redbot.core.bot import Red
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
from .apis.api_utils import standardize_scope
from .apis.playlist_interface import get_all_playlist_converter
from .errors import NoMatchesFound, TooManyMatches
from .utils import PlaylistScope
_ = Translator("Audio", Path(__file__))
__all__ = [
"ComplexScopeParser",
"PlaylistConverter",
"ScopeParser",
"LazyGreedyConverter",
"standardize_scope",
"get_lazy_converter",
"get_playlist_converter",
]
T_ = _
_ = lambda s: s
_SCOPE_HELP: Final[str] = _(
"""
Scope must be a valid version of one of the following:
Global
Guild
User
"""
)
_USER_HELP: Final[str] = _(
"""
Author must be a valid version of one of the following:
User ID
User Mention
User Name#123
"""
)
_GUILD_HELP: Final[str] = _(
"""
Guild must be a valid version of one of the following:
Guild ID
Exact guild name
"""
)
_ = T_
MENTION_RE: Final[Pattern] = re.compile(r"^<?(?:(?:@[!&]?)?|#)(\d{15,21})>?$")
def _match_id(arg: str) -> Optional[int]:
m = MENTION_RE.match(arg)
if m:
return int(m.group(1))
return None
async def global_unique_guild_finder(ctx: commands.Context, arg: str) -> discord.Guild:
bot: Red = ctx.bot
_id = _match_id(arg)
if _id is not None:
guild: discord.Guild = bot.get_guild(_id)
if guild is not None:
return guild
maybe_matches = []
async for obj in AsyncIter(bot.guilds):
if obj.name == arg or str(obj) == arg:
maybe_matches.append(obj)
if not maybe_matches:
raise NoMatchesFound(
_(
'"{arg}" was not found. It must be the ID or '
"complete name of a server which the bot can see."
).format(arg=arg)
)
elif len(maybe_matches) == 1:
return maybe_matches[0]
else:
raise TooManyMatches(
_(
'"{arg}" does not refer to a unique server. '
"Please use the ID for the server you're trying to specify."
).format(arg=arg)
)
async def global_unique_user_finder(
ctx: commands.Context, arg: str, guild: discord.guild = None
) -> discord.abc.User:
bot: Red = ctx.bot
guild = guild or ctx.guild
_id = _match_id(arg)
if _id is not None:
user: discord.User = bot.get_user(_id)
if user is not None:
return user
maybe_matches = []
async for user in AsyncIter(bot.users).filter(lambda u: u.name == arg or f"{u}" == arg):
maybe_matches.append(user)
if guild is not None:
async for member in AsyncIter(guild.members).filter(
lambda m: m.nick == arg and not any(obj.id == m.id for obj in maybe_matches)
):
maybe_matches.append(member)
if not maybe_matches:
raise NoMatchesFound(
_(
'"{arg}" was not found. It must be the ID or name or '
"mention a user which the bot can see."
).format(arg=arg)
)
elif len(maybe_matches) == 1:
return maybe_matches[0]
else:
raise TooManyMatches(
_(
'"{arg}" does not refer to a unique server. '
"Please use the ID for the server you're trying to specify."
).format(arg=arg)
)
class PlaylistConverter(commands.Converter):
async def convert(self, ctx: commands.Context, arg: str) -> MutableMapping:
"""Get playlist for all scopes that match the argument user provided"""
cog = ctx.cog
user_matches = []
guild_matches = []
global_matches = []
if cog:
global_matches = await get_all_playlist_converter(
PlaylistScope.GLOBAL.value,
ctx.bot,
cog.playlist_api,
arg,
guild=ctx.guild,
author=ctx.author,
)
guild_matches = await get_all_playlist_converter(
PlaylistScope.GUILD.value,
ctx.bot,
cog.playlist_api,
arg,
guild=ctx.guild,
author=ctx.author,
)
user_matches = await get_all_playlist_converter(
PlaylistScope.USER.value,
ctx.bot,
cog.playlist_api,
arg,
guild=ctx.guild,
author=ctx.author,
)
if not user_matches and not guild_matches and not global_matches:
raise commands.BadArgument(_("Could not match '{}' to a playlist.").format(arg))
return {
PlaylistScope.GLOBAL.value: global_matches,
PlaylistScope.GUILD.value: guild_matches,
PlaylistScope.USER.value: user_matches,
"all": [*global_matches, *guild_matches, *user_matches],
"arg": arg,
}
class NoExitParser(argparse.ArgumentParser):
def error(self, message):
raise commands.BadArgument()
class ScopeParser(commands.Converter):
async def convert(
self, ctx: commands.Context, argument: str
) -> Tuple[Optional[str], discord.User, Optional[discord.Guild], bool]:
target_scope: Optional[str] = None
target_user: Optional[Union[discord.Member, discord.User]] = None
target_guild: Optional[discord.Guild] = None
specified_user = False
argument = argument.replace("—", "--")
command, *arguments = argument.split(" -- ")
if arguments:
argument = " -- ".join(arguments)
else:
command = ""
parser = NoExitParser(description="Playlist Scope Parsing.", add_help=False)
parser.add_argument("--scope", nargs="*", dest="scope", default=[])
parser.add_argument("--guild", nargs="*", dest="guild", default=[])
parser.add_argument("--server", nargs="*", dest="guild", default=[])
parser.add_argument("--author", nargs="*", dest="author", default=[])
parser.add_argument("--user", nargs="*", dest="author", default=[])
parser.add_argument("--member", nargs="*", dest="author", default=[])
if not command:
parser.add_argument("command", nargs="*")
try:
vals = vars(parser.parse_args(argument.split()))
except Exception as exc:
raise commands.BadArgument() from exc
if vals["scope"]:
scope_raw = " ".join(vals["scope"]).strip()
scope = scope_raw.upper().strip()
valid_scopes = PlaylistScope.list() + [
"GLOBAL",
"GUILD",
"AUTHOR",
"USER",
"SERVER",
"MEMBER",
"BOT",
]
if scope not in valid_scopes:
raise commands.ArgParserFailure("--scope", scope_raw, custom_help=_(_SCOPE_HELP))
target_scope = standardize_scope(scope)
elif "--scope" in argument and not vals["scope"]:
raise commands.ArgParserFailure("--scope", _("Nothing"), custom_help=_(_SCOPE_HELP))
is_owner = await ctx.bot.is_owner(ctx.author)
guild = vals.get("guild", None) or vals.get("server", None)
if is_owner and guild:
server_error = ""
target_guild = None
guild_raw = " ".join(guild).strip()
try:
target_guild = await global_unique_guild_finder(ctx, guild_raw)
except TooManyMatches as err:
server_error = f"{err}\n"
except NoMatchesFound as err:
server_error = f"{err}\n"
if target_guild is None:
raise commands.ArgParserFailure(
"--guild", guild_raw, custom_help=f"{server_error}{_(_GUILD_HELP)}"
)
elif not is_owner and (guild or any(x in argument for x in ["--guild", "--server"])):
raise commands.BadArgument(_("You cannot use `--guild`"))
elif any(x in argument for x in ["--guild", "--server"]):
raise commands.ArgParserFailure("--guild", _("Nothing"), custom_help=_(_GUILD_HELP))
author = vals.get("author", None) or vals.get("user", None) or vals.get("member", None)
if author:
user_error = ""
target_user = None
user_raw = " ".join(author).strip()
try:
target_user = await global_unique_user_finder(ctx, user_raw, guild=target_guild)
specified_user = True
except TooManyMatches as err:
user_error = f"{err}\n"
except NoMatchesFound as err:
user_error = f"{err}\n"
if target_user is None:
raise commands.ArgParserFailure(
"--author", user_raw, custom_help=f"{user_error}{_(_USER_HELP)}"
)
elif any(x in argument for x in ["--author", "--user", "--member"]):
raise commands.ArgParserFailure("--scope", _("Nothing"), custom_help=_(_USER_HELP))
target_scope: Optional[str] = target_scope or None
target_user: Union[discord.Member, discord.User] = target_user or ctx.author
target_guild: discord.Guild = target_guild or ctx.guild
return target_scope, target_user, target_guild, specified_user
class ComplexScopeParser(commands.Converter):
async def convert(
self, ctx: commands.Context, argument: str
) -> Tuple[
str,
discord.User,
Optional[discord.Guild],
bool,
str,
discord.User,
Optional[discord.Guild],
bool,
]:
target_scope: Optional[str] = None
target_user: Optional[Union[discord.Member, discord.User]] = None
target_guild: Optional[discord.Guild] = None
specified_target_user = False
source_scope: Optional[str] = None
source_user: Optional[Union[discord.Member, discord.User]] = None
source_guild: Optional[discord.Guild] = None
specified_source_user = False
argument = argument.replace("—", "--")
command, *arguments = argument.split(" -- ")
if arguments:
argument = " -- ".join(arguments)
else:
command = ""
parser = NoExitParser(description="Playlist Scope Parsing.", add_help=False)
parser.add_argument("--to-scope", nargs="*", dest="to_scope", default=[])
parser.add_argument("--to-guild", nargs="*", dest="to_guild", default=[])
parser.add_argument("--to-server", nargs="*", dest="to_server", default=[])
parser.add_argument("--to-author", nargs="*", dest="to_author", default=[])
parser.add_argument("--to-user", nargs="*", dest="to_user", default=[])
parser.add_argument("--to-member", nargs="*", dest="to_member", default=[])
parser.add_argument("--from-scope", nargs="*", dest="from_scope", default=[])
parser.add_argument("--from-guild", nargs="*", dest="from_guild", default=[])
parser.add_argument("--from-server", nargs="*", dest="from_server", default=[])
parser.add_argument("--from-author", nargs="*", dest="from_author", default=[])
parser.add_argument("--from-user", nargs="*", dest="from_user", default=[])
parser.add_argument("--from-member", nargs="*", dest="from_member", default=[])
if not command:
parser.add_argument("command", nargs="*")
try:
vals = vars(parser.parse_args(argument.split()))
except Exception as exc:
raise commands.BadArgument() from exc
is_owner = await ctx.bot.is_owner(ctx.author)
valid_scopes = PlaylistScope.list() + [
"GLOBAL",
"GUILD",
"AUTHOR",
"USER",
"SERVER",
"MEMBER",
"BOT",
]
if vals["to_scope"]:
to_scope_raw = " ".join(vals["to_scope"]).strip()
to_scope = to_scope_raw.upper().strip()
if to_scope not in valid_scopes:
raise commands.ArgParserFailure(
"--to-scope", to_scope_raw, custom_help=_SCOPE_HELP
)
target_scope = standardize_scope(to_scope)
elif "--to-scope" in argument and not vals["to_scope"]:
raise commands.ArgParserFailure("--to-scope", _("Nothing"), custom_help=_(_SCOPE_HELP))
if vals["from_scope"]:
from_scope_raw = " ".join(vals["from_scope"]).strip()
from_scope = from_scope_raw.upper().strip()
if from_scope not in valid_scopes:
raise commands.ArgParserFailure(
"--from-scope", from_scope_raw, custom_help=_SCOPE_HELP
)
source_scope = standardize_scope(from_scope)
elif "--from-scope" in argument and not vals["to_scope"]:
raise commands.ArgParserFailure("--to-scope", _("Nothing"), custom_help=_(_SCOPE_HELP))
to_guild = vals.get("to_guild", None) or vals.get("to_server", None)
if is_owner and to_guild:
target_server_error = ""
target_guild = None
to_guild_raw = " ".join(to_guild).strip()
try:
target_guild = await global_unique_guild_finder(ctx, to_guild_raw)
except TooManyMatches as err:
target_server_error = f"{err}\n"
except NoMatchesFound as err:
target_server_error = f"{err}\n"
if target_guild is None:
raise commands.ArgParserFailure(
"--to-guild",
to_guild_raw,
custom_help=f"{target_server_error}{_(_GUILD_HELP)}",
)
elif not is_owner and (
to_guild or any(x in argument for x in ["--to-guild", "--to-server"])
):
raise commands.BadArgument(_("You cannot use `--to-server`"))
elif any(x in argument for x in ["--to-guild", "--to-server"]):
raise commands.ArgParserFailure(
"--to-server", _("Nothing"), custom_help=_(_GUILD_HELP)
)
from_guild = vals.get("from_guild", None) or vals.get("from_server", None)
if is_owner and from_guild:
source_server_error = ""
source_guild = None
from_guild_raw = " ".join(from_guild).strip()
try:
source_guild = await global_unique_guild_finder(ctx, from_guild_raw)
except TooManyMatches as err:
source_server_error = f"{err}\n"
except NoMatchesFound as err:
source_server_error = f"{err}\n"
if source_guild is None:
raise commands.ArgParserFailure(
"--from-guild",
from_guild_raw,
custom_help=f"{source_server_error}{_(_GUILD_HELP)}",
)
elif not is_owner and (
from_guild or any(x in argument for x in ["--from-guild", "--from-server"])
):
raise commands.BadArgument(_("You cannot use `--from-server`"))
elif any(x in argument for x in ["--from-guild", "--from-server"]):
raise commands.ArgParserFailure(
"--from-server", _("Nothing"), custom_help=_(_GUILD_HELP)
)
to_author = (
vals.get("to_author", None) or vals.get("to_user", None) or vals.get("to_member", None)
)
if to_author:
target_user_error = ""
target_user = None
to_user_raw = " ".join(to_author).strip()
try:
target_user = await global_unique_user_finder(ctx, to_user_raw, guild=target_guild)
specified_target_user = True
except TooManyMatches as err:
target_user_error = f"{err}\n"
except NoMatchesFound as err:
target_user_error = f"{err}\n"
if target_user is None:
raise commands.ArgParserFailure(
"--to-author", to_user_raw, custom_help=f"{target_user_error}{_(_USER_HELP)}"
)
elif any(x in argument for x in ["--to-author", "--to-user", "--to-member"]):
raise commands.ArgParserFailure("--to-user", _("Nothing"), custom_help=_(_USER_HELP))
from_author = (
vals.get("from_author", None)
or vals.get("from_user", None)
or vals.get("from_member", None)
)
if from_author:
source_user_error = ""
source_user = None
from_user_raw = " ".join(from_author).strip()
try:
source_user = await global_unique_user_finder(
ctx, from_user_raw, guild=target_guild
)
specified_target_user = True
except TooManyMatches as err:
source_user_error = f"{err}\n"
except NoMatchesFound as err:
source_user_error = f"{err}\n"
if source_user is None:
raise commands.ArgParserFailure(
"--from-author",
from_user_raw,
custom_help=f"{source_user_error}{_(_USER_HELP)}",
)
elif any(x in argument for x in ["--from-author", "--from-user", "--from-member"]):
raise commands.ArgParserFailure("--from-user", _("Nothing"), custom_help=_(_USER_HELP))
target_scope = target_scope or PlaylistScope.GUILD.value
target_user = target_user or ctx.author
target_guild = target_guild or ctx.guild
source_scope = source_scope or PlaylistScope.GUILD.value
source_user = source_user or ctx.author
source_guild = source_guild or ctx.guild
return (
source_scope,
source_user,
source_guild,
specified_source_user,
target_scope,
target_user,
target_guild,
specified_target_user,
)
class LazyGreedyConverter(commands.Converter):
def __init__(self, splitter: str):
self.splitter_Value = splitter
async def convert(self, ctx: commands.Context, argument: str) -> str:
full_message = ctx.message.content.partition(f" {argument} ")
if len(full_message) == 1:
full_message = (
(argument if argument not in full_message else "") + " " + full_message[0]
)
elif len(full_message) > 1:
full_message = (
(argument if argument not in full_message else "") + " " + full_message[-1]
)
greedy_output = (" " + full_message.replace("—", "--")).partition(
f" {self.splitter_Value}"
)[0]
return f"{greedy_output}".strip()
def get_lazy_converter(splitter: str) -> type:
"""Returns a typechecking safe `LazyGreedyConverter` suitable for use with discord.py."""
class PartialMeta(type(LazyGreedyConverter)):
__call__ = functools.partialmethod(type(LazyGreedyConverter).__call__, splitter)
class ValidatedConverter(LazyGreedyConverter, metaclass=PartialMeta):
pass
return ValidatedConverter
def get_playlist_converter() -> type:
"""Returns a typechecking safe `PlaylistConverter` suitable for use with discord.py."""
class PartialMeta(type(PlaylistConverter)):
__call__ = functools.partialmethod(type(PlaylistConverter).__call__)
class ValidatedConverter(PlaylistConverter, metaclass=PartialMeta):
pass
return ValidatedConverter
|
import os
from nikola.plugin_categories import Taxonomy
from nikola import utils, hierarchy_utils
class ClassifyCategories(Taxonomy):
"""Classify the posts by categories."""
name = "classify_categories"
classification_name = "category"
overview_page_variable_name = "categories"
overview_page_items_variable_name = "cat_items"
overview_page_hierarchy_variable_name = "cat_hierarchy"
more_than_one_classifications_per_post = False
has_hierarchy = True
include_posts_from_subhierarchies = True
include_posts_into_hierarchy_root = False
show_list_as_subcategories_list = False
template_for_classification_overview = "tags.tmpl"
always_disable_rss = False
always_disable_atom = False
apply_to_posts = True
apply_to_pages = False
minimum_post_count_per_classification_in_overview = 1
omit_empty_classifications = True
add_other_languages_variable = True
path_handler_docstrings = {
'category_index': """A link to the category index.
Example:
link://category_index => /categories/index.html""",
'category': """A link to a category. Takes page number as optional keyword argument.
Example:
link://category/dogs => /categories/dogs.html""",
'category_atom': """A link to a category's Atom feed.
Example:
link://category_atom/dogs => /categories/dogs.atom""",
'category_rss': """A link to a category's RSS feed.
Example:
link://category_rss/dogs => /categories/dogs.xml""",
}
def set_site(self, site):
"""Set site, which is a Nikola instance."""
super().set_site(site)
self.show_list_as_index = self.site.config['CATEGORY_PAGES_ARE_INDEXES']
self.template_for_single_list = "tagindex.tmpl" if self.show_list_as_index else "tag.tmpl"
self.translation_manager = utils.ClassificationTranslationManager()
# Needed to undo names for CATEGORY_PAGES_FOLLOW_DESTPATH
self.destpath_names_reverse = {}
for lang in self.site.config['TRANSLATIONS']:
self.destpath_names_reverse[lang] = {}
for k, v in self.site.config['CATEGORY_DESTPATH_NAMES'](lang).items():
self.destpath_names_reverse[lang][v] = k
self.destpath_names_reverse = utils.TranslatableSetting(
'_CATEGORY_DESTPATH_NAMES_REVERSE', self.destpath_names_reverse,
self.site.config['TRANSLATIONS'])
def is_enabled(self, lang=None):
"""Return True if this taxonomy is enabled, or False otherwise."""
return True
def classify(self, post, lang):
"""Classify the given post for the given language."""
cat = post.meta('category', lang=lang).strip()
return [cat] if cat else []
def get_classification_friendly_name(self, classification, lang, only_last_component=False):
"""Extract a friendly name from the classification."""
classification = self.extract_hierarchy(classification)
return classification[-1] if classification else ''
def get_overview_path(self, lang, dest_type='page'):
"""Return a path for the list of all classifications."""
if self.site.config['CATEGORIES_INDEX_PATH'](lang):
path = self.site.config['CATEGORIES_INDEX_PATH'](lang)
append_index = 'never'
else:
path = self.site.config['CATEGORY_PATH'](lang)
append_index = 'always'
return [component for component in path.split('/') if component], append_index
def slugify_tag_name(self, name, lang):
"""Slugify a tag name."""
if self.site.config['SLUG_TAG_PATH']:
name = utils.slugify(name, lang)
return name
def slugify_category_name(self, path, lang):
"""Slugify a category name."""
if self.site.config['CATEGORY_OUTPUT_FLAT_HIERARCHY']:
path = path[-1:] # only the leaf
result = [self.slugify_tag_name(part, lang) for part in path]
result[0] = self.site.config['CATEGORY_PREFIX'] + result[0]
if not self.site.config['PRETTY_URLS']:
result = ['-'.join(result)]
return result
def get_path(self, classification, lang, dest_type='page'):
"""Return a path for the given classification."""
cat_string = '/'.join(classification)
classification_raw = classification # needed to undo CATEGORY_DESTPATH_NAMES
destpath_names_reverse = self.destpath_names_reverse(lang)
if self.site.config['CATEGORY_PAGES_FOLLOW_DESTPATH']:
base_dir = None
for post in self.site.posts_per_category[cat_string]:
if post.category_from_destpath:
base_dir = post.folder_base(lang)
# Handle CATEGORY_DESTPATH_NAMES
if cat_string in destpath_names_reverse:
cat_string = destpath_names_reverse[cat_string]
classification_raw = cat_string.split('/')
break
if not self.site.config['CATEGORY_DESTPATH_TRIM_PREFIX']:
# If prefixes are not trimmed, we'll already have the base_dir in classification_raw
base_dir = ''
if base_dir is None:
# fallback: first POSTS entry + classification
base_dir = self.site.config['POSTS'][0][1]
base_dir_list = base_dir.split(os.sep)
sub_dir = [self.slugify_tag_name(part, lang) for part in classification_raw]
return [_f for _f in (base_dir_list + sub_dir) if _f], 'auto'
else:
return [_f for _f in [self.site.config['CATEGORY_PATH'](lang)] if _f] + self.slugify_category_name(
classification, lang), 'auto'
def extract_hierarchy(self, classification):
"""Given a classification, return a list of parts in the hierarchy."""
return hierarchy_utils.parse_escaped_hierarchical_category_name(classification)
def recombine_classification_from_hierarchy(self, hierarchy):
"""Given a list of parts in the hierarchy, return the classification string."""
return hierarchy_utils.join_hierarchical_category_path(hierarchy)
def provide_overview_context_and_uptodate(self, lang):
"""Provide data for the context and the uptodate list for the list of all classifiations."""
kw = {
'category_path': self.site.config['CATEGORY_PATH'],
'category_prefix': self.site.config['CATEGORY_PREFIX'],
"category_pages_are_indexes": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],
"tzinfo": self.site.tzinfo,
"category_descriptions": self.site.config['CATEGORY_DESCRIPTIONS'],
"category_titles": self.site.config['CATEGORY_TITLES'],
}
context = {
"title": self.site.MESSAGES[lang]["Categories"],
"description": self.site.MESSAGES[lang]["Categories"],
"pagekind": ["list", "tags_page"],
}
kw.update(context)
return context, kw
def provide_context_and_uptodate(self, classification, lang, node=None):
"""Provide data for the context and the uptodate list for the list of the given classifiation."""
cat_path = self.extract_hierarchy(classification)
kw = {
'category_path': self.site.config['CATEGORY_PATH'],
'category_prefix': self.site.config['CATEGORY_PREFIX'],
"category_pages_are_indexes": self.site.config['CATEGORY_PAGES_ARE_INDEXES'],
"tzinfo": self.site.tzinfo,
"category_descriptions": self.site.config['CATEGORY_DESCRIPTIONS'],
"category_titles": self.site.config['CATEGORY_TITLES'],
}
posts = self.site.posts_per_classification[self.classification_name][lang]
if node is None:
children = []
else:
children = [child for child in node.children if len([post for post in posts.get(child.classification_name, []) if self.site.config['SHOW_UNTRANSLATED_POSTS'] or post.is_translation_available(lang)]) > 0]
subcats = [(child.name, self.site.link(self.classification_name, child.classification_name, lang)) for child in children]
friendly_name = self.get_classification_friendly_name(classification, lang)
context = {
"title": self.site.config['CATEGORY_TITLES'].get(lang, {}).get(classification, self.site.MESSAGES[lang]["Posts about %s"] % friendly_name),
"description": self.site.config['CATEGORY_DESCRIPTIONS'].get(lang, {}).get(classification),
"pagekind": ["tag_page", "index" if self.show_list_as_index else "list"],
"tag": friendly_name,
"category": classification,
"category_path": cat_path,
"subcategories": subcats,
}
kw.update(context)
return context, kw
def get_other_language_variants(self, classification, lang, classifications_per_language):
"""Return a list of variants of the same category in other languages."""
return self.translation_manager.get_translations_as_list(classification, lang, classifications_per_language)
def postprocess_posts_per_classification(self, posts_per_classification_per_language, flat_hierarchy_per_lang=None, hierarchy_lookup_per_lang=None):
"""Rearrange, modify or otherwise use the list of posts per classification and per language."""
self.translation_manager.read_from_config(self.site, 'CATEGORY', posts_per_classification_per_language, False)
def should_generate_classification_page(self, classification, post_list, lang):
"""Only generates list of posts for classification if this function returns True."""
if self.site.config["CATEGORY_PAGES_FOLLOW_DESTPATH"]:
# In destpath mode, allow users to replace the default category index with a custom page.
classification_hierarchy = self.extract_hierarchy(classification)
dest_list, _ = self.get_path(classification_hierarchy, lang)
short_destination = os.sep.join(dest_list + [self.site.config["INDEX_FILE"]])
if short_destination in self.site.post_per_file:
return False
return True
def should_generate_atom_for_classification_page(self, classification, post_list, lang):
"""Only generates Atom feed for list of posts for classification if this function returns True."""
return True
def should_generate_rss_for_classification_page(self, classification, post_list, lang):
"""Only generates RSS feed for list of posts for classification if this function returns True."""
return True
|
from copy import deepcopy
from homeassistant.components.axis.const import DOMAIN as AXIS_DOMAIN
from homeassistant.components.light import ATTR_BRIGHTNESS, DOMAIN as LIGHT_DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
from .test_device import API_DISCOVERY_RESPONSE, NAME, setup_axis_integration
from tests.async_mock import patch
API_DISCOVERY_LIGHT_CONTROL = {
"id": "light-control",
"version": "1.1",
"name": "Light Control",
}
EVENT_ON = {
"operation": "Initialized",
"topic": "tns1:Device/tnsaxis:Light/Status",
"source": "id",
"source_idx": "0",
"type": "state",
"value": "ON",
}
EVENT_OFF = {
"operation": "Initialized",
"topic": "tns1:Device/tnsaxis:Light/Status",
"source": "id",
"source_idx": "0",
"type": "state",
"value": "OFF",
}
async def test_platform_manually_configured(hass):
"""Test that nothing happens when platform is manually configured."""
assert await async_setup_component(
hass, LIGHT_DOMAIN, {LIGHT_DOMAIN: {"platform": AXIS_DOMAIN}}
)
assert AXIS_DOMAIN not in hass.data
async def test_no_lights(hass):
"""Test that no light events in Axis results in no light entities."""
await setup_axis_integration(hass)
assert not hass.states.async_entity_ids(LIGHT_DOMAIN)
async def test_lights(hass):
"""Test that lights are loaded properly."""
api_discovery = deepcopy(API_DISCOVERY_RESPONSE)
api_discovery["data"]["apiList"].append(API_DISCOVERY_LIGHT_CONTROL)
with patch.dict(API_DISCOVERY_RESPONSE, api_discovery):
config_entry = await setup_axis_integration(hass)
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
# Add light
with patch(
"axis.light_control.LightControl.get_current_intensity",
return_value={"data": {"intensity": 100}},
), patch(
"axis.light_control.LightControl.get_valid_intensity",
return_value={"data": {"ranges": [{"high": 150}]}},
):
device.api.event.process_event(EVENT_ON)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids(LIGHT_DOMAIN)) == 1
entity_id = f"{LIGHT_DOMAIN}.{NAME}_ir_light_0"
light_0 = hass.states.get(entity_id)
assert light_0.state == STATE_ON
assert light_0.name == f"{NAME} IR Light 0"
# Turn on, set brightness, light already on
with patch(
"axis.light_control.LightControl.activate_light"
) as mock_activate, patch(
"axis.light_control.LightControl.set_manual_intensity"
) as mock_set_intensity, patch(
"axis.light_control.LightControl.get_current_intensity",
return_value={"data": {"intensity": 100}},
):
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id, ATTR_BRIGHTNESS: 50},
blocking=True,
)
mock_activate.assert_not_awaited()
mock_set_intensity.assert_called_once_with("led0", 29)
# Turn off
with patch(
"axis.light_control.LightControl.deactivate_light"
) as mock_deactivate, patch(
"axis.light_control.LightControl.get_current_intensity",
return_value={"data": {"intensity": 100}},
):
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
mock_deactivate.assert_called_once()
# Event turn off light
device.api.event.process_event(EVENT_OFF)
await hass.async_block_till_done()
light_0 = hass.states.get(entity_id)
assert light_0.state == STATE_OFF
# Turn on, set brightness
with patch(
"axis.light_control.LightControl.activate_light"
) as mock_activate, patch(
"axis.light_control.LightControl.set_manual_intensity"
) as mock_set_intensity, patch(
"axis.light_control.LightControl.get_current_intensity",
return_value={"data": {"intensity": 100}},
):
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
mock_activate.assert_called_once()
mock_set_intensity.assert_not_called()
# Turn off, light already off
with patch(
"axis.light_control.LightControl.deactivate_light"
) as mock_deactivate, patch(
"axis.light_control.LightControl.get_current_intensity",
return_value={"data": {"intensity": 100}},
):
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: entity_id},
blocking=True,
)
mock_deactivate.assert_not_called()
|
from collections import OrderedDict
import os
from typing import List, NamedTuple, Optional
import attr
import voluptuous as vol
from homeassistant import loader
from homeassistant.config import (
CONF_CORE,
CONF_PACKAGES,
CORE_CONFIG_SCHEMA,
YAML_CONFIG_FILE,
_format_config_error,
config_per_platform,
extract_domain_configs,
load_yaml_config_file,
merge_packages_config,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType
from homeassistant.requirements import (
RequirementsNotFound,
async_get_integration_with_requirements,
)
import homeassistant.util.yaml.loader as yaml_loader
class CheckConfigError(NamedTuple):
"""Configuration check error."""
message: str
domain: Optional[str]
config: Optional[ConfigType]
@attr.s
class HomeAssistantConfig(OrderedDict):
"""Configuration result with errors attribute."""
errors: List[CheckConfigError] = attr.ib(factory=list)
def add_error(
self,
message: str,
domain: Optional[str] = None,
config: Optional[ConfigType] = None,
) -> "HomeAssistantConfig":
"""Add a single error."""
self.errors.append(CheckConfigError(str(message), domain, config))
return self
@property
def error_str(self) -> str:
"""Return errors as a string."""
return "\n".join([err.message for err in self.errors])
async def async_check_ha_config_file(hass: HomeAssistant) -> HomeAssistantConfig:
"""Load and check if Home Assistant configuration file is valid.
This method is a coroutine.
"""
result = HomeAssistantConfig()
def _pack_error(
package: str, component: str, config: ConfigType, message: str
) -> None:
"""Handle errors from packages: _log_pkg_error."""
message = f"Package {package} setup failed. Component {component} {message}"
domain = f"homeassistant.packages.{package}.{component}"
pack_config = core_config[CONF_PACKAGES].get(package, config)
result.add_error(message, domain, pack_config)
def _comp_error(ex: Exception, domain: str, config: ConfigType) -> None:
"""Handle errors from components: async_log_exception."""
result.add_error(_format_config_error(ex, domain, config), domain, config)
# Load configuration.yaml
config_path = hass.config.path(YAML_CONFIG_FILE)
try:
if not await hass.async_add_executor_job(os.path.isfile, config_path):
return result.add_error("File configuration.yaml not found.")
config = await hass.async_add_executor_job(load_yaml_config_file, config_path)
except FileNotFoundError:
return result.add_error(f"File not found: {config_path}")
except HomeAssistantError as err:
return result.add_error(f"Error loading {config_path}: {err}")
finally:
yaml_loader.clear_secret_cache()
# Extract and validate core [homeassistant] config
try:
core_config = config.pop(CONF_CORE, {})
core_config = CORE_CONFIG_SCHEMA(core_config)
result[CONF_CORE] = core_config
except vol.Invalid as err:
result.add_error(err, CONF_CORE, core_config)
core_config = {}
# Merge packages
await merge_packages_config(
hass, config, core_config.get(CONF_PACKAGES, {}), _pack_error
)
core_config.pop(CONF_PACKAGES, None)
# Filter out repeating config sections
components = {key.split(" ")[0] for key in config.keys()}
# Process and validate config
for domain in components:
try:
integration = await async_get_integration_with_requirements(hass, domain)
except (RequirementsNotFound, loader.IntegrationNotFound) as ex:
result.add_error(f"Component error: {domain} - {ex}")
continue
try:
component = integration.get_component()
except ImportError as ex:
result.add_error(f"Component error: {domain} - {ex}")
continue
config_schema = getattr(component, "CONFIG_SCHEMA", None)
if config_schema is not None:
try:
config = config_schema(config)
result[domain] = config[domain]
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
component_platform_schema = getattr(
component,
"PLATFORM_SCHEMA_BASE",
getattr(component, "PLATFORM_SCHEMA", None),
)
if component_platform_schema is None:
continue
platforms = []
for p_name, p_config in config_per_platform(config, domain):
# Validate component specific platform schema
try:
p_validated = component_platform_schema(p_config)
except vol.Invalid as ex:
_comp_error(ex, domain, config)
continue
# Not all platform components follow same pattern for platforms
# So if p_name is None we are not going to validate platform
# (the automation component is one of them)
if p_name is None:
platforms.append(p_validated)
continue
try:
p_integration = await async_get_integration_with_requirements(
hass, p_name
)
platform = p_integration.get_platform(domain)
except (
loader.IntegrationNotFound,
RequirementsNotFound,
ImportError,
) as ex:
result.add_error(f"Platform error {domain}.{p_name} - {ex}")
continue
# Validate platform specific schema
platform_schema = getattr(platform, "PLATFORM_SCHEMA", None)
if platform_schema is not None:
try:
p_validated = platform_schema(p_validated)
except vol.Invalid as ex:
_comp_error(ex, f"{domain}.{p_name}", p_validated)
continue
platforms.append(p_validated)
# Remove config for current component and add validated config back in.
for filter_comp in extract_domain_configs(config, domain):
del config[filter_comp]
result[domain] = platforms
return result
|
from homeassistant.components.switch import SwitchEntity
from . import LUTRON_CONTROLLER, LUTRON_DEVICES, LutronDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Lutron switches."""
devs = []
# Add Lutron Switches
for (area_name, device) in hass.data[LUTRON_DEVICES]["switch"]:
dev = LutronSwitch(area_name, device, hass.data[LUTRON_CONTROLLER])
devs.append(dev)
# Add the indicator LEDs for scenes (keypad buttons)
for scene_data in hass.data[LUTRON_DEVICES]["scene"]:
(area_name, keypad_name, scene, led) = scene_data
if led is not None:
led = LutronLed(
area_name, keypad_name, scene, led, hass.data[LUTRON_CONTROLLER]
)
devs.append(led)
add_entities(devs, True)
class LutronSwitch(LutronDevice, SwitchEntity):
"""Representation of a Lutron Switch."""
def __init__(self, area_name, lutron_device, controller):
"""Initialize the switch."""
self._prev_state = None
super().__init__(area_name, lutron_device, controller)
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._lutron_device.level = 100
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._lutron_device.level = 0
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"lutron_integration_id": self._lutron_device.id}
@property
def is_on(self):
"""Return true if device is on."""
return self._lutron_device.last_level() > 0
def update(self):
"""Call when forcing a refresh of the device."""
if self._prev_state is None:
self._prev_state = self._lutron_device.level > 0
class LutronLed(LutronDevice, SwitchEntity):
"""Representation of a Lutron Keypad LED."""
def __init__(self, area_name, keypad_name, scene_device, led_device, controller):
"""Initialize the switch."""
self._keypad_name = keypad_name
self._scene_name = scene_device.name
super().__init__(area_name, led_device, controller)
def turn_on(self, **kwargs):
"""Turn the LED on."""
self._lutron_device.state = 1
def turn_off(self, **kwargs):
"""Turn the LED off."""
self._lutron_device.state = 0
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
"keypad": self._keypad_name,
"scene": self._scene_name,
"led": self._lutron_device.name,
}
@property
def is_on(self):
"""Return true if device is on."""
return self._lutron_device.last_state
@property
def name(self):
"""Return the name of the LED."""
return f"{self._area_name} {self._keypad_name}: {self._scene_name} LED"
def update(self):
"""Call when forcing a refresh of the device."""
if self._lutron_device.last_state is not None:
return
# The following property getter actually triggers an update in Lutron
self._lutron_device.state # pylint: disable=pointless-statement
|
from datetime import timedelta
import logging
from typing import Any, Callable, Dict, List, Optional
from elgato import Elgato, ElgatoError, Info, State
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_ON,
ATTR_SOFTWARE_VERSION,
ATTR_TEMPERATURE,
DATA_ELGATO_CLIENT,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
PARALLEL_UPDATES = 1
SCAN_INTERVAL = timedelta(seconds=10)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Elgato Key Light based on a config entry."""
elgato: Elgato = hass.data[DOMAIN][entry.entry_id][DATA_ELGATO_CLIENT]
info = await elgato.info()
async_add_entities([ElgatoLight(entry.entry_id, elgato, info)], True)
class ElgatoLight(LightEntity):
"""Defines a Elgato Key Light."""
def __init__(
self,
entry_id: str,
elgato: Elgato,
info: Info,
):
"""Initialize Elgato Key Light."""
self._brightness: Optional[int] = None
self._info: Info = info
self._state: Optional[bool] = None
self._temperature: Optional[int] = None
self._available = True
self.elgato = elgato
@property
def name(self) -> str:
"""Return the name of the entity."""
# Return the product name, if display name is not set
if not self._info.display_name:
return self._info.product_name
return self._info.display_name
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return self._info.serial_number
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of this light between 1..255."""
return self._brightness
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self._temperature
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 143
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 344
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
@property
def is_on(self) -> bool:
"""Return the state of the light."""
return bool(self._state)
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the light."""
await self.async_turn_on(on=False)
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the light."""
data = {}
data[ATTR_ON] = True
if ATTR_ON in kwargs:
data[ATTR_ON] = kwargs[ATTR_ON]
if ATTR_COLOR_TEMP in kwargs:
data[ATTR_TEMPERATURE] = kwargs[ATTR_COLOR_TEMP]
if ATTR_BRIGHTNESS in kwargs:
data[ATTR_BRIGHTNESS] = round((kwargs[ATTR_BRIGHTNESS] / 255) * 100)
try:
await self.elgato.light(**data)
except ElgatoError:
_LOGGER.error("An error occurred while updating the Elgato Key Light")
self._available = False
async def async_update(self) -> None:
"""Update Elgato entity."""
try:
state: State = await self.elgato.state()
except ElgatoError:
if self._available:
_LOGGER.error("An error occurred while updating the Elgato Key Light")
self._available = False
return
self._available = True
self._brightness = round((state.brightness * 255) / 100)
self._state = state.on
self._temperature = state.temperature
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this Elgato Key Light."""
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._info.serial_number)},
ATTR_NAME: self._info.product_name,
ATTR_MANUFACTURER: "Elgato",
ATTR_MODEL: self._info.product_name,
ATTR_SOFTWARE_VERSION: f"{self._info.firmware_version} ({self._info.firmware_build_number})",
}
|
from datetime import datetime, timedelta
import pytest
import homeassistant.util.dt as dt_util
DEFAULT_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
TEST_TIME_ZONE = "America/Los_Angeles"
def teardown():
"""Stop everything that was started."""
dt_util.set_default_time_zone(DEFAULT_TIME_ZONE)
def test_get_time_zone_retrieves_valid_time_zone():
"""Test getting a time zone."""
time_zone = dt_util.get_time_zone(TEST_TIME_ZONE)
assert time_zone is not None
assert TEST_TIME_ZONE == time_zone.zone
def test_get_time_zone_returns_none_for_garbage_time_zone():
"""Test getting a non existing time zone."""
time_zone = dt_util.get_time_zone("Non existing time zone")
assert time_zone is None
def test_set_default_time_zone():
"""Test setting default time zone."""
time_zone = dt_util.get_time_zone(TEST_TIME_ZONE)
dt_util.set_default_time_zone(time_zone)
# We cannot compare the timezones directly because of DST
assert time_zone.zone == dt_util.now().tzinfo.zone
def test_utcnow():
"""Test the UTC now method."""
assert abs(dt_util.utcnow().replace(tzinfo=None) - datetime.utcnow()) < timedelta(
seconds=1
)
def test_now():
"""Test the now method."""
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
assert abs(
dt_util.as_utc(dt_util.now()).replace(tzinfo=None) - datetime.utcnow()
) < timedelta(seconds=1)
def test_as_utc_with_naive_object():
"""Test the now method."""
utcnow = datetime.utcnow()
assert utcnow == dt_util.as_utc(utcnow).replace(tzinfo=None)
def test_as_utc_with_utc_object():
"""Test UTC time with UTC object."""
utcnow = dt_util.utcnow()
assert utcnow == dt_util.as_utc(utcnow)
def test_as_utc_with_local_object():
"""Test the UTC time with local object."""
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
localnow = dt_util.now()
utcnow = dt_util.as_utc(localnow)
assert localnow == utcnow
assert localnow.tzinfo != utcnow.tzinfo
def test_as_local_with_naive_object():
"""Test local time with native object."""
now = dt_util.now()
assert abs(now - dt_util.as_local(datetime.utcnow())) < timedelta(seconds=1)
def test_as_local_with_local_object():
"""Test local with local object."""
now = dt_util.now()
assert now == now
def test_as_local_with_utc_object():
"""Test local time with UTC object."""
dt_util.set_default_time_zone(dt_util.get_time_zone(TEST_TIME_ZONE))
utcnow = dt_util.utcnow()
localnow = dt_util.as_local(utcnow)
assert localnow == utcnow
assert localnow.tzinfo != utcnow.tzinfo
def test_utc_from_timestamp():
"""Test utc_from_timestamp method."""
assert datetime(1986, 7, 9, tzinfo=dt_util.UTC) == dt_util.utc_from_timestamp(
521251200
)
def test_as_timestamp():
"""Test as_timestamp method."""
ts = 1462401234
utc_dt = dt_util.utc_from_timestamp(ts)
assert ts == dt_util.as_timestamp(utc_dt)
utc_iso = utc_dt.isoformat()
assert ts == dt_util.as_timestamp(utc_iso)
# confirm the ability to handle a string passed in
delta = dt_util.as_timestamp("2016-01-01 12:12:12")
delta -= dt_util.as_timestamp("2016-01-01 12:12:11")
assert delta == 1
def test_parse_datetime_converts_correctly():
"""Test parse_datetime converts strings."""
assert datetime(1986, 7, 9, 12, 0, 0, tzinfo=dt_util.UTC) == dt_util.parse_datetime(
"1986-07-09T12:00:00Z"
)
utcnow = dt_util.utcnow()
assert utcnow == dt_util.parse_datetime(utcnow.isoformat())
def test_parse_datetime_returns_none_for_incorrect_format():
"""Test parse_datetime returns None if incorrect format."""
assert dt_util.parse_datetime("not a datetime string") is None
def test_get_age():
"""Test get_age."""
diff = dt_util.now() - timedelta(seconds=0)
assert dt_util.get_age(diff) == "0 seconds"
diff = dt_util.now() - timedelta(seconds=1)
assert dt_util.get_age(diff) == "1 second"
diff = dt_util.now() - timedelta(seconds=30)
assert dt_util.get_age(diff) == "30 seconds"
diff = dt_util.now() - timedelta(minutes=5)
assert dt_util.get_age(diff) == "5 minutes"
diff = dt_util.now() - timedelta(minutes=1)
assert dt_util.get_age(diff) == "1 minute"
diff = dt_util.now() - timedelta(minutes=300)
assert dt_util.get_age(diff) == "5 hours"
diff = dt_util.now() - timedelta(minutes=320)
assert dt_util.get_age(diff) == "5 hours"
diff = dt_util.now() - timedelta(minutes=1.6 * 60 * 24)
assert dt_util.get_age(diff) == "2 days"
diff = dt_util.now() - timedelta(minutes=2 * 60 * 24)
assert dt_util.get_age(diff) == "2 days"
diff = dt_util.now() - timedelta(minutes=32 * 60 * 24)
assert dt_util.get_age(diff) == "1 month"
diff = dt_util.now() - timedelta(minutes=365 * 60 * 24)
assert dt_util.get_age(diff) == "1 year"
def test_parse_time_expression():
"""Test parse_time_expression."""
assert [x for x in range(60)] == dt_util.parse_time_expression("*", 0, 59)
assert [x for x in range(60)] == dt_util.parse_time_expression(None, 0, 59)
assert [x for x in range(0, 60, 5)] == dt_util.parse_time_expression("/5", 0, 59)
assert [1, 2, 3] == dt_util.parse_time_expression([2, 1, 3], 0, 59)
assert [x for x in range(24)] == dt_util.parse_time_expression("*", 0, 23)
assert [42] == dt_util.parse_time_expression(42, 0, 59)
assert [42] == dt_util.parse_time_expression("42", 0, 59)
with pytest.raises(ValueError):
dt_util.parse_time_expression(61, 0, 60)
def test_find_next_time_expression_time_basic():
"""Test basic stuff for find_next_time_expression_time."""
def find(dt, hour, minute, second):
"""Call test_find_next_time_expression_time."""
seconds = dt_util.parse_time_expression(second, 0, 59)
minutes = dt_util.parse_time_expression(minute, 0, 59)
hours = dt_util.parse_time_expression(hour, 0, 23)
return dt_util.find_next_time_expression_time(dt, seconds, minutes, hours)
assert datetime(2018, 10, 7, 10, 30, 0) == find(
datetime(2018, 10, 7, 10, 20, 0), "*", "/30", 0
)
assert datetime(2018, 10, 7, 10, 30, 0) == find(
datetime(2018, 10, 7, 10, 30, 0), "*", "/30", 0
)
assert datetime(2018, 10, 7, 12, 0, 30) == find(
datetime(2018, 10, 7, 10, 30, 0), "/3", "/30", [30, 45]
)
assert datetime(2018, 10, 8, 5, 0, 0) == find(
datetime(2018, 10, 7, 10, 30, 0), 5, 0, 0
)
assert find(datetime(2018, 10, 7, 10, 30, 0, 999999), "*", "/30", 0) == datetime(
2018, 10, 7, 10, 30, 0
)
def test_find_next_time_expression_time_dst():
"""Test daylight saving time for find_next_time_expression_time."""
tz = dt_util.get_time_zone("Europe/Vienna")
dt_util.set_default_time_zone(tz)
def find(dt, hour, minute, second):
"""Call test_find_next_time_expression_time."""
seconds = dt_util.parse_time_expression(second, 0, 59)
minutes = dt_util.parse_time_expression(minute, 0, 59)
hours = dt_util.parse_time_expression(hour, 0, 23)
return dt_util.find_next_time_expression_time(dt, seconds, minutes, hours)
# Entering DST, clocks are rolled forward
assert tz.localize(datetime(2018, 3, 26, 2, 30, 0)) == find(
tz.localize(datetime(2018, 3, 25, 1, 50, 0)), 2, 30, 0
)
assert tz.localize(datetime(2018, 3, 26, 2, 30, 0)) == find(
tz.localize(datetime(2018, 3, 25, 3, 50, 0)), 2, 30, 0
)
assert tz.localize(datetime(2018, 3, 26, 2, 30, 0)) == find(
tz.localize(datetime(2018, 3, 26, 1, 50, 0)), 2, 30, 0
)
# Leaving DST, clocks are rolled back
assert tz.localize(datetime(2018, 10, 28, 2, 30, 0), is_dst=False) == find(
tz.localize(datetime(2018, 10, 28, 2, 5, 0), is_dst=False), 2, 30, 0
)
assert tz.localize(datetime(2018, 10, 28, 2, 30, 0), is_dst=False) == find(
tz.localize(datetime(2018, 10, 28, 2, 55, 0), is_dst=True), 2, 30, 0
)
assert tz.localize(datetime(2018, 10, 28, 4, 30, 0), is_dst=False) == find(
tz.localize(datetime(2018, 10, 28, 2, 55, 0), is_dst=True), 4, 30, 0
)
assert tz.localize(datetime(2018, 10, 28, 2, 30, 0), is_dst=True) == find(
tz.localize(datetime(2018, 10, 28, 2, 5, 0), is_dst=True), 2, 30, 0
)
assert tz.localize(datetime(2018, 10, 29, 2, 30, 0)) == find(
tz.localize(datetime(2018, 10, 28, 2, 55, 0), is_dst=False), 2, 30, 0
)
|
from __future__ import annotations
from contextlib import contextmanager
from pathlib import Path
from typing import Generator, Union
import apsw
__all__ = ["APSWConnectionWrapper"]
# TODO (mikeshardmind): make this inherit typing_extensions.Protocol
# long term: mypy; short term: removing the pylint disables below
class ProvidesCursor:
def cursor(self) -> apsw.Cursor:
...
class ContextManagerMixin(ProvidesCursor):
@contextmanager
def with_cursor(self) -> Generator[apsw.Cursor, None, None]:
"""
apsw cursors are relatively cheap, and are gc safe
In most cases, it's fine not to use this.
"""
c = self.cursor() # pylint: disable=assignment-from-no-return
try:
yield c
finally:
c.close()
@contextmanager
def transaction(self) -> Generator[apsw.Cursor, None, None]:
"""
Wraps a cursor as a context manager for a transaction
which is rolled back on unhandled exception,
or committed on non-exception exit
"""
c = self.cursor() # pylint: disable=assignment-from-no-return
try:
c.execute("BEGIN TRANSACTION")
yield c
except Exception:
c.execute("ROLLBACK TRANSACTION")
raise
else:
c.execute("COMMIT TRANSACTION")
finally:
c.close()
class APSWConnectionWrapper(apsw.Connection, ContextManagerMixin):
"""
Provides a few convenience methods, and allows a path object for construction
"""
def __init__(self, filename: Union[Path, str], *args, **kwargs):
super().__init__(str(filename), *args, **kwargs)
# TODO (mikeshardmind): asyncio friendly ThreadedAPSWConnection class
|
from __future__ import unicode_literals
import itertools
from lib.fun.decorator import magic
from lib.fun.fun import countchecker
def get_chunk_dic(objflag):
countchecker(len(objflag))
@magic
def chunk():
for item in itertools.permutations(objflag):
yield "".join(item)
|
import pytest
from homeassistant.components.mill.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
@pytest.fixture(name="mill_setup", autouse=True)
def mill_setup_fixture():
"""Patch mill setup entry."""
with patch("homeassistant.components.mill.async_setup_entry", return_value=True):
yield
async def test_show_config_form(hass):
"""Test show configuration form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
async def test_create_entry(hass):
"""Test create entry from user input."""
test_data = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pswd",
}
with patch("mill.Mill.connect", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=test_data
)
assert result["type"] == "create_entry"
assert result["title"] == test_data[CONF_USERNAME]
assert result["data"] == test_data
async def test_flow_entry_already_exists(hass):
"""Test user input for config_entry that already exists."""
test_data = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pswd",
}
first_entry = MockConfigEntry(
domain="mill",
data=test_data,
unique_id=test_data[CONF_USERNAME],
)
first_entry.add_to_hass(hass)
with patch("mill.Mill.connect", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=test_data
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_connection_error(hass):
"""Test connection error."""
test_data = {
CONF_USERNAME: "user",
CONF_PASSWORD: "pswd",
}
first_entry = MockConfigEntry(
domain="mill",
data=test_data,
unique_id=test_data[CONF_USERNAME],
)
first_entry.add_to_hass(hass)
with patch("mill.Mill.connect", return_value=False):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}, data=test_data
)
assert result["type"] == "form"
assert result["errors"]["cannot_connect"] == "cannot_connect"
|
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from .const import DOMAIN
SUPPORT_FAN = ["Auto", "Low", "Medium", "High", "Boost 10", "Boost 20", "Boost 30"]
SUPPORT_HVAC = [HVAC_MODE_HEAT, HVAC_MODE_COOL]
HA_STATE_TO_SPIDER = {
HVAC_MODE_COOL: "Cool",
HVAC_MODE_HEAT: "Heat",
HVAC_MODE_OFF: "Idle",
}
SPIDER_STATE_TO_HA = {value: key for key, value in HA_STATE_TO_SPIDER.items()}
async def async_setup_entry(hass, config, async_add_entities):
"""Initialize a Spider thermostat."""
api = hass.data[DOMAIN][config.entry_id]
entities = [SpiderThermostat(api, entity) for entity in api.get_thermostats()]
async_add_entities(entities)
class SpiderThermostat(ClimateEntity):
"""Representation of a thermostat."""
def __init__(self, api, thermostat):
"""Initialize the thermostat."""
self.api = api
self.thermostat = thermostat
@property
def supported_features(self):
"""Return the list of supported features."""
supports = SUPPORT_TARGET_TEMPERATURE
if self.thermostat.has_fan_mode:
supports |= SUPPORT_FAN_MODE
return supports
@property
def unique_id(self):
"""Return the id of the thermostat, if any."""
return self.thermostat.id
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self.thermostat.name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self.thermostat.current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self.thermostat.target_temperature
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self.thermostat.temperature_steps
@property
def min_temp(self):
"""Return the minimum temperature."""
return self.thermostat.minimum_temperature
@property
def max_temp(self):
"""Return the maximum temperature."""
return self.thermostat.maximum_temperature
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return SPIDER_STATE_TO_HA[self.thermostat.operation_mode]
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return SUPPORT_HVAC
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self.thermostat.set_temperature(temperature)
def set_hvac_mode(self, hvac_mode):
"""Set new target operation mode."""
self.thermostat.set_operation_mode(HA_STATE_TO_SPIDER.get(hvac_mode))
@property
def fan_mode(self):
"""Return the fan setting."""
return self.thermostat.current_fan_speed
def set_fan_mode(self, fan_mode):
"""Set fan mode."""
self.thermostat.set_fan_speed(fan_mode)
@property
def fan_modes(self):
"""List of available fan modes."""
return SUPPORT_FAN
def update(self):
"""Get the latest data."""
self.thermostat = self.api.get_thermostat(self.unique_id)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
import mock
from perfkitbenchmarker import resource
from perfkitbenchmarker.providers.gcp import util
import six
_GCLOUD_PATH = 'path/gcloud'
class GceResource(resource.BaseResource):
def __init__(self, **kwargs):
for k, v in six.iteritems(kwargs):
setattr(self, k, v)
def _Create(self):
raise NotImplementedError()
def _Delete(self):
raise NotImplementedError()
class GcloudCommandTestCase(unittest.TestCase):
def setUp(self):
super(GcloudCommandTestCase, self).setUp()
p = mock.patch(util.__name__ + '.FLAGS')
self.mock_flags = p.start()
self.addCleanup(p.stop)
self.mock_flags.gcloud_path = _GCLOUD_PATH
def testCommonFlagsWithoutOptionalFlags(self):
gce_resource = GceResource(project=None)
cmd = util.GcloudCommand(gce_resource, 'compute', 'images', 'list')
self.assertEqual(cmd.GetCommand(), [
'path/gcloud', 'compute', 'images', 'list', '--format', 'json',
'--quiet'
])
def testCommonFlagsWithOptionalFlags(self):
gce_resource = GceResource(project='test-project', zone='test-zone')
cmd = util.GcloudCommand(gce_resource, 'compute', 'images', 'list')
self.assertEqual(cmd.GetCommand(), [
'path/gcloud', 'compute', 'images', 'list', '--format', 'json',
'--project', 'test-project', '--quiet', '--zone', 'test-zone'
])
def testListValue(self):
gce_resource = GceResource(project=None)
cmd = util.GcloudCommand(gce_resource, 'compute', 'instances', 'create')
cmd.flags['local-ssd'] = ['interface=nvme', 'interface=SCSI']
self.assertEqual(cmd.GetCommand(), [
'path/gcloud',
'compute',
'instances',
'create',
'--format',
'json',
'--local-ssd',
'interface=nvme',
'--local-ssd',
'interface=SCSI',
'--quiet',
])
def testIssue(self):
gce_resource = GceResource(project=None)
cmd = util.GcloudCommand(gce_resource, 'compute', 'images', 'list')
mock_issue_return_value = ('issue-return-value', 'stderr', 0)
p = mock.patch(util.__name__ + '.vm_util.IssueCommand',
return_value=mock_issue_return_value)
with p as mock_issue:
return_value = cmd.Issue()
mock_issue.assert_called_with(['path/gcloud', 'compute', 'images', 'list',
'--format', 'json', '--quiet'])
self.assertEqual(return_value, mock_issue_return_value)
def testIssueWarningSuppressed(self):
gce_resource = GceResource(project=None)
cmd = util.GcloudCommand(gce_resource, 'compute', 'images', 'list')
mock_issue_return_value = ('issue-return-value', 'stderr', 0)
p = mock.patch(util.__name__ + '.vm_util.IssueCommand',
return_value=mock_issue_return_value)
with p as mock_issue:
return_value = cmd.Issue(suppress_warning=True)
mock_issue.assert_called_with(
['path/gcloud', 'compute', 'images', 'list', '--format', 'json',
'--quiet'],
suppress_warning=True)
self.assertEqual(return_value, mock_issue_return_value)
def testIssueRetryable(self):
gce_resource = GceResource(project=None)
cmd = util.GcloudCommand(gce_resource, 'compute', 'images', 'list')
mock_issue_return_value = ('issue-return-value', 'stderr', 0)
p = mock.patch(util.__name__ + '.vm_util.IssueRetryableCommand',
return_value=mock_issue_return_value)
with p as mock_issue:
return_value = cmd.IssueRetryable()
mock_issue.assert_called_with(['path/gcloud', 'compute', 'images', 'list',
'--format', 'json', '--quiet'])
self.assertEqual(return_value, mock_issue_return_value)
def testGetRegionFromZone(self):
zone = 'us-central1-xyz'
self.assertEqual(util.GetRegionFromZone(zone), 'us-central1')
if __name__ == '__main__':
unittest.main()
|
import asyncio
from typing import Any, Dict, List, Optional, Tuple, Union
import zigpy.zcl.clusters.closures
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_send
from . import ( # noqa: F401 # pylint: disable=unused-import
base,
closures,
general,
homeautomation,
hvac,
lighting,
lightlink,
manufacturerspecific,
measurement,
protocol,
security,
smartenergy,
)
from .. import (
const,
device as zha_core_device,
discovery as zha_disc,
registries as zha_regs,
typing as zha_typing,
)
ChannelsDict = Dict[str, zha_typing.ChannelType]
class Channels:
"""All discovered channels of a device."""
def __init__(self, zha_device: zha_typing.ZhaDeviceType) -> None:
"""Initialize instance."""
self._pools: List[zha_typing.ChannelPoolType] = []
self._power_config = None
self._identify = None
self._semaphore = asyncio.Semaphore(3)
self._unique_id = str(zha_device.ieee)
self._zdo_channel = base.ZDOChannel(zha_device.device.endpoints[0], zha_device)
self._zha_device = zha_device
@property
def pools(self) -> List["ChannelPool"]:
"""Return channel pools list."""
return self._pools
@property
def power_configuration_ch(self) -> zha_typing.ChannelType:
"""Return power configuration channel."""
return self._power_config
@power_configuration_ch.setter
def power_configuration_ch(self, channel: zha_typing.ChannelType) -> None:
"""Power configuration channel setter."""
if self._power_config is None:
self._power_config = channel
@property
def identify_ch(self) -> zha_typing.ChannelType:
"""Return power configuration channel."""
return self._identify
@identify_ch.setter
def identify_ch(self, channel: zha_typing.ChannelType) -> None:
"""Power configuration channel setter."""
if self._identify is None:
self._identify = channel
@property
def semaphore(self) -> asyncio.Semaphore:
"""Return semaphore for concurrent tasks."""
return self._semaphore
@property
def zdo_channel(self) -> zha_typing.ZDOChannelType:
"""Return ZDO channel."""
return self._zdo_channel
@property
def zha_device(self) -> zha_typing.ZhaDeviceType:
"""Return parent zha device."""
return self._zha_device
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def zigbee_signature(self) -> Dict[int, Dict[str, Any]]:
"""Get the zigbee signatures for the pools in channels."""
return {
signature[0]: signature[1]
for signature in [pool.zigbee_signature for pool in self.pools]
}
@classmethod
def new(cls, zha_device: zha_typing.ZhaDeviceType) -> "Channels":
"""Create new instance."""
channels = cls(zha_device)
for ep_id in sorted(zha_device.device.endpoints):
channels.add_pool(ep_id)
return channels
def add_pool(self, ep_id: int) -> None:
"""Add channels for a specific endpoint."""
if ep_id == 0:
return
self._pools.append(ChannelPool.new(self, ep_id))
async def async_initialize(self, from_cache: bool = False) -> None:
"""Initialize claimed channels."""
await self.zdo_channel.async_initialize(from_cache)
self.zdo_channel.debug("'async_initialize' stage succeeded")
await asyncio.gather(
*(pool.async_initialize(from_cache) for pool in self.pools)
)
async def async_configure(self) -> None:
"""Configure claimed channels."""
await self.zdo_channel.async_configure()
self.zdo_channel.debug("'async_configure' stage succeeded")
await asyncio.gather(*(pool.async_configure() for pool in self.pools))
@callback
def async_new_entity(
self,
component: str,
entity_class: zha_typing.CALLABLE_T,
unique_id: str,
channels: List[zha_typing.ChannelType],
):
"""Signal new entity addition."""
if self.zha_device.status == zha_core_device.DeviceStatus.INITIALIZED:
return
self.zha_device.hass.data[const.DATA_ZHA][component].append(
(entity_class, (unique_id, self.zha_device, channels))
)
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
async_dispatcher_send(self.zha_device.hass, signal, *args)
@callback
def zha_send_event(self, event_data: Dict[str, Union[str, int]]) -> None:
"""Relay events to hass."""
self.zha_device.hass.bus.async_fire(
"zha_event",
{
const.ATTR_DEVICE_IEEE: str(self.zha_device.ieee),
const.ATTR_UNIQUE_ID: self.unique_id,
**event_data,
},
)
class ChannelPool:
"""All channels of an endpoint."""
def __init__(self, channels: Channels, ep_id: int):
"""Initialize instance."""
self._all_channels: ChannelsDict = {}
self._channels: Channels = channels
self._claimed_channels: ChannelsDict = {}
self._id: int = ep_id
self._client_channels: Dict[str, zha_typing.ClientChannelType] = {}
self._unique_id: str = f"{channels.unique_id}-{ep_id}"
@property
def all_channels(self) -> ChannelsDict:
"""All server channels of an endpoint."""
return self._all_channels
@property
def claimed_channels(self) -> ChannelsDict:
"""Channels in use."""
return self._claimed_channels
@property
def client_channels(self) -> Dict[str, zha_typing.ClientChannelType]:
"""Return a dict of client channels."""
return self._client_channels
@property
def endpoint(self) -> zha_typing.ZigpyEndpointType:
"""Return endpoint of zigpy device."""
return self._channels.zha_device.device.endpoints[self.id]
@property
def id(self) -> int:
"""Return endpoint id."""
return self._id
@property
def nwk(self) -> int:
"""Device NWK for logging."""
return self._channels.zha_device.nwk
@property
def is_mains_powered(self) -> bool:
"""Device is_mains_powered."""
return self._channels.zha_device.is_mains_powered
@property
def manufacturer(self) -> Optional[str]:
"""Return device manufacturer."""
return self._channels.zha_device.manufacturer
@property
def manufacturer_code(self) -> Optional[int]:
"""Return device manufacturer."""
return self._channels.zha_device.manufacturer_code
@property
def hass(self):
"""Return hass."""
return self._channels.zha_device.hass
@property
def model(self) -> Optional[str]:
"""Return device model."""
return self._channels.zha_device.model
@property
def skip_configuration(self) -> bool:
"""Return True if device does not require channel configuration."""
return self._channels.zha_device.skip_configuration
@property
def unique_id(self):
"""Return the unique id for this channel."""
return self._unique_id
@property
def zigbee_signature(self) -> Tuple[int, Dict[str, Any]]:
"""Get the zigbee signature for the endpoint this pool represents."""
return (
self.endpoint.endpoint_id,
{
const.ATTR_PROFILE_ID: self.endpoint.profile_id,
const.ATTR_DEVICE_TYPE: f"0x{self.endpoint.device_type:04x}"
if self.endpoint.device_type is not None
else "",
const.ATTR_IN_CLUSTERS: [
f"0x{cluster_id:04x}"
for cluster_id in sorted(self.endpoint.in_clusters)
],
const.ATTR_OUT_CLUSTERS: [
f"0x{cluster_id:04x}"
for cluster_id in sorted(self.endpoint.out_clusters)
],
},
)
@classmethod
def new(cls, channels: Channels, ep_id: int) -> "ChannelPool":
"""Create new channels for an endpoint."""
pool = cls(channels, ep_id)
pool.add_all_channels()
pool.add_client_channels()
zha_disc.PROBE.discover_entities(pool)
return pool
@callback
def add_all_channels(self) -> None:
"""Create and add channels for all input clusters."""
for cluster_id, cluster in self.endpoint.in_clusters.items():
channel_class = zha_regs.ZIGBEE_CHANNEL_REGISTRY.get(
cluster_id, base.ZigbeeChannel
)
# really ugly hack to deal with xiaomi using the door lock cluster
# incorrectly.
if (
hasattr(cluster, "ep_attribute")
and cluster_id == zigpy.zcl.clusters.closures.DoorLock.cluster_id
and cluster.ep_attribute == "multistate_input"
):
channel_class = general.MultistateInput
# end of ugly hack
channel = channel_class(cluster, self)
if channel.name == const.CHANNEL_POWER_CONFIGURATION:
if (
self._channels.power_configuration_ch
or self._channels.zha_device.is_mains_powered
):
# on power configuration channel per device
continue
self._channels.power_configuration_ch = channel
elif channel.name == const.CHANNEL_IDENTIFY:
self._channels.identify_ch = channel
self.all_channels[channel.id] = channel
@callback
def add_client_channels(self) -> None:
"""Create client channels for all output clusters if in the registry."""
for cluster_id, channel_class in zha_regs.CLIENT_CHANNELS_REGISTRY.items():
cluster = self.endpoint.out_clusters.get(cluster_id)
if cluster is not None:
channel = channel_class(cluster, self)
self.client_channels[channel.id] = channel
async def async_initialize(self, from_cache: bool = False) -> None:
"""Initialize claimed channels."""
await self._execute_channel_tasks("async_initialize", from_cache)
async def async_configure(self) -> None:
"""Configure claimed channels."""
await self._execute_channel_tasks("async_configure")
async def _execute_channel_tasks(self, func_name: str, *args: Any) -> None:
"""Add a throttled channel task and swallow exceptions."""
async def _throttle(coro):
async with self._channels.semaphore:
return await coro
channels = [*self.claimed_channels.values(), *self.client_channels.values()]
tasks = [_throttle(getattr(ch, func_name)(*args)) for ch in channels]
results = await asyncio.gather(*tasks, return_exceptions=True)
for channel, outcome in zip(channels, results):
if isinstance(outcome, Exception):
channel.warning("'%s' stage failed: %s", func_name, str(outcome))
continue
channel.debug("'%s' stage succeeded", func_name)
@callback
def async_new_entity(
self,
component: str,
entity_class: zha_typing.CALLABLE_T,
unique_id: str,
channels: List[zha_typing.ChannelType],
):
"""Signal new entity addition."""
self._channels.async_new_entity(component, entity_class, unique_id, channels)
@callback
def async_send_signal(self, signal: str, *args: Any) -> None:
"""Send a signal through hass dispatcher."""
self._channels.async_send_signal(signal, *args)
@callback
def claim_channels(self, channels: List[zha_typing.ChannelType]) -> None:
"""Claim a channel."""
self.claimed_channels.update({ch.id: ch for ch in channels})
@callback
def unclaimed_channels(self) -> List[zha_typing.ChannelType]:
"""Return a list of available (unclaimed) channels."""
claimed = set(self.claimed_channels)
available = set(self.all_channels)
return [self.all_channels[chan_id] for chan_id in (available - claimed)]
@callback
def zha_send_event(self, event_data: Dict[str, Union[str, int]]) -> None:
"""Relay events to hass."""
self._channels.zha_send_event(
{
const.ATTR_UNIQUE_ID: self.unique_id,
const.ATTR_ENDPOINT_ID: self.id,
**event_data,
}
)
|
import arrow
import warnings
import ipaddress
from flask import current_app
from datetime import datetime as dt
from cryptography import x509
from marshmallow import utils
from marshmallow.fields import Field
from marshmallow.exceptions import ValidationError
from lemur.common import validators
class Hex(Field):
"""
A hex formatted string.
"""
def _serialize(self, value, attr, obj):
if value:
value = hex(int(value))[2:].upper()
return value
class ArrowDateTime(Field):
"""A formatted datetime string in UTC.
Example: ``'2014-12-22T03:12:58.019077+00:00'``
Timezone-naive `datetime` objects are converted to
UTC (+00:00) by :meth:`Schema.dump <marshmallow.Schema.dump>`.
:meth:`Schema.load <marshmallow.Schema.load>` returns `datetime`
objects that are timezone-aware.
:param str format: Either ``"rfc"`` (for RFC822), ``"iso"`` (for ISO8601),
or a date format string. If `None`, defaults to "iso".
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
DATEFORMAT_SERIALIZATION_FUNCS = {
"iso": utils.isoformat,
"iso8601": utils.isoformat,
"rfc": utils.rfcformat,
"rfc822": utils.rfcformat,
}
DATEFORMAT_DESERIALIZATION_FUNCS = {
"iso": utils.from_iso,
"iso8601": utils.from_iso,
"rfc": utils.from_rfc,
"rfc822": utils.from_rfc,
}
DEFAULT_FORMAT = "iso"
localtime = False
default_error_messages = {
"invalid": "Not a valid datetime.",
"format": '"{input}" cannot be formatted as a datetime.',
}
def __init__(self, format=None, **kwargs):
super(ArrowDateTime, self).__init__(**kwargs)
# Allow this to be None. It may be set later in the ``_serialize``
# or ``_desrialize`` methods This allows a Schema to dynamically set the
# dateformat, e.g. from a Meta option
self.dateformat = format
def _add_to_schema(self, field_name, schema):
super(ArrowDateTime, self)._add_to_schema(field_name, schema)
self.dateformat = self.dateformat or schema.opts.dateformat
def _serialize(self, value, attr, obj):
if value is None:
return None
self.dateformat = self.dateformat or self.DEFAULT_FORMAT
format_func = self.DATEFORMAT_SERIALIZATION_FUNCS.get(self.dateformat, None)
if format_func:
try:
return format_func(value, localtime=self.localtime)
except (AttributeError, ValueError) as err:
self.fail("format", input=value)
else:
return value.strftime(self.dateformat)
def _deserialize(self, value, attr, data):
if not value: # Falsy values, e.g. '', None, [] are not valid
raise self.fail("invalid")
self.dateformat = self.dateformat or self.DEFAULT_FORMAT
func = self.DATEFORMAT_DESERIALIZATION_FUNCS.get(self.dateformat)
if func:
try:
return arrow.get(func(value))
except (TypeError, AttributeError, ValueError):
raise self.fail("invalid")
elif self.dateformat:
try:
return dt.datetime.strptime(value, self.dateformat)
except (TypeError, AttributeError, ValueError):
raise self.fail("invalid")
elif utils.dateutil_available:
try:
return arrow.get(utils.from_datestring(value))
except TypeError:
raise self.fail("invalid")
else:
warnings.warn(
"It is recommended that you install python-dateutil "
"for improved datetime deserialization."
)
raise self.fail("invalid")
class KeyUsageExtension(Field):
"""An x509.KeyUsage ExtensionType object
Dict of KeyUsage names/values are deserialized into an x509.KeyUsage object
and back.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
def _serialize(self, value, attr, obj):
return {
"useDigitalSignature": value.digital_signature,
"useNonRepudiation": value.content_commitment,
"useKeyEncipherment": value.key_encipherment,
"useDataEncipherment": value.data_encipherment,
"useKeyAgreement": value.key_agreement,
"useKeyCertSign": value.key_cert_sign,
"useCRLSign": value.crl_sign,
"useEncipherOnly": value._encipher_only,
"useDecipherOnly": value._decipher_only,
}
def _deserialize(self, value, attr, data):
keyusages = {
"digital_signature": False,
"content_commitment": False,
"key_encipherment": False,
"data_encipherment": False,
"key_agreement": False,
"key_cert_sign": False,
"crl_sign": False,
"encipher_only": False,
"decipher_only": False,
}
for k, v in value.items():
if k == "useDigitalSignature":
keyusages["digital_signature"] = v
elif k == "useNonRepudiation":
keyusages["content_commitment"] = v
elif k == "useKeyEncipherment":
keyusages["key_encipherment"] = v
elif k == "useDataEncipherment":
keyusages["data_encipherment"] = v
elif k == "useKeyCertSign":
keyusages["key_cert_sign"] = v
elif k == "useCRLSign":
keyusages["crl_sign"] = v
elif k == "useKeyAgreement":
keyusages["key_agreement"] = v
elif k == "useEncipherOnly" and v:
keyusages["encipher_only"] = True
keyusages["key_agreement"] = True
elif k == "useDecipherOnly" and v:
keyusages["decipher_only"] = True
keyusages["key_agreement"] = True
if keyusages["encipher_only"] and keyusages["decipher_only"]:
raise ValidationError(
"A certificate cannot have both Encipher Only and Decipher Only Extended Key Usages."
)
return x509.KeyUsage(
digital_signature=keyusages["digital_signature"],
content_commitment=keyusages["content_commitment"],
key_encipherment=keyusages["key_encipherment"],
data_encipherment=keyusages["data_encipherment"],
key_agreement=keyusages["key_agreement"],
key_cert_sign=keyusages["key_cert_sign"],
crl_sign=keyusages["crl_sign"],
encipher_only=keyusages["encipher_only"],
decipher_only=keyusages["decipher_only"],
)
class ExtendedKeyUsageExtension(Field):
"""An x509.ExtendedKeyUsage ExtensionType object
Dict of ExtendedKeyUsage names/values are deserialized into an x509.ExtendedKeyUsage object
and back.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
def _serialize(self, value, attr, obj):
usages = value._usages
usage_list = {}
for usage in usages:
if usage == x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH:
usage_list["useClientAuthentication"] = True
elif usage == x509.oid.ExtendedKeyUsageOID.SERVER_AUTH:
usage_list["useServerAuthentication"] = True
elif usage == x509.oid.ExtendedKeyUsageOID.CODE_SIGNING:
usage_list["useCodeSigning"] = True
elif usage == x509.oid.ExtendedKeyUsageOID.EMAIL_PROTECTION:
usage_list["useEmailProtection"] = True
elif usage == x509.oid.ExtendedKeyUsageOID.TIME_STAMPING:
usage_list["useTimestamping"] = True
elif usage == x509.oid.ExtendedKeyUsageOID.OCSP_SIGNING:
usage_list["useOCSPSigning"] = True
elif usage.dotted_string == "1.3.6.1.5.5.7.3.14":
usage_list["useEapOverLAN"] = True
elif usage.dotted_string == "1.3.6.1.5.5.7.3.13":
usage_list["useEapOverPPP"] = True
elif usage.dotted_string == "1.3.6.1.4.1.311.20.2.2":
usage_list["useSmartCardLogon"] = True
else:
current_app.logger.warning(
"Unable to serialize ExtendedKeyUsage with OID: {usage}".format(
usage=usage.dotted_string
)
)
return usage_list
def _deserialize(self, value, attr, data):
usage_oids = []
for k, v in value.items():
if k == "useClientAuthentication" and v:
usage_oids.append(x509.oid.ExtendedKeyUsageOID.CLIENT_AUTH)
elif k == "useServerAuthentication" and v:
usage_oids.append(x509.oid.ExtendedKeyUsageOID.SERVER_AUTH)
elif k == "useCodeSigning" and v:
usage_oids.append(x509.oid.ExtendedKeyUsageOID.CODE_SIGNING)
elif k == "useEmailProtection" and v:
usage_oids.append(x509.oid.ExtendedKeyUsageOID.EMAIL_PROTECTION)
elif k == "useTimestamping" and v:
usage_oids.append(x509.oid.ExtendedKeyUsageOID.TIME_STAMPING)
elif k == "useOCSPSigning" and v:
usage_oids.append(x509.oid.ExtendedKeyUsageOID.OCSP_SIGNING)
elif k == "useEapOverLAN" and v:
usage_oids.append(x509.oid.ObjectIdentifier("1.3.6.1.5.5.7.3.14"))
elif k == "useEapOverPPP" and v:
usage_oids.append(x509.oid.ObjectIdentifier("1.3.6.1.5.5.7.3.13"))
elif k == "useSmartCardLogon" and v:
usage_oids.append(x509.oid.ObjectIdentifier("1.3.6.1.4.1.311.20.2.2"))
else:
current_app.logger.warning(
"Unable to deserialize ExtendedKeyUsage with name: {key}".format(
key=k
)
)
return x509.ExtendedKeyUsage(usage_oids)
class BasicConstraintsExtension(Field):
"""An x509.BasicConstraints ExtensionType object
Dict of CA boolean and a path_length integer names/values are deserialized into an x509.BasicConstraints object
and back.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
def _serialize(self, value, attr, obj):
return {"ca": value.ca, "path_length": value.path_length}
def _deserialize(self, value, attr, data):
ca = value.get("ca", False)
path_length = value.get("path_length", None)
if ca:
if not isinstance(path_length, (type(None), int)):
raise ValidationError(
"A CA certificate path_length (for BasicConstraints) must be None or an integer."
)
return x509.BasicConstraints(ca=True, path_length=path_length)
else:
return x509.BasicConstraints(ca=False, path_length=None)
class SubjectAlternativeNameExtension(Field):
"""An x509.SubjectAlternativeName ExtensionType object
Dict of CA boolean and a path_length integer names/values are deserialized into an x509.BasicConstraints object
and back.
:param kwargs: The same keyword arguments that :class:`Field` receives.
"""
def _serialize(self, value, attr, obj):
general_names = []
name_type = None
if value:
for name in value._general_names:
value = name.value
if isinstance(name, x509.DNSName):
name_type = "DNSName"
elif isinstance(name, x509.IPAddress):
if isinstance(value, ipaddress.IPv4Network):
name_type = "IPNetwork"
else:
name_type = "IPAddress"
value = str(value)
elif isinstance(name, x509.UniformResourceIdentifier):
name_type = "uniformResourceIdentifier"
elif isinstance(name, x509.DirectoryName):
name_type = "directoryName"
elif isinstance(name, x509.RFC822Name):
name_type = "rfc822Name"
elif isinstance(name, x509.RegisteredID):
name_type = "registeredID"
value = value.dotted_string
else:
current_app.logger.warning(
"Unknown SubAltName type: {name}".format(name=name)
)
continue
general_names.append({"nameType": name_type, "value": value})
return general_names
def _deserialize(self, value, attr, data):
general_names = []
for name in value:
if name["nameType"] == "DNSName":
validators.sensitive_domain(name["value"])
general_names.append(x509.DNSName(name["value"]))
elif name["nameType"] == "IPAddress":
general_names.append(
x509.IPAddress(ipaddress.ip_address(name["value"]))
)
elif name["nameType"] == "IPNetwork":
general_names.append(
x509.IPAddress(ipaddress.ip_network(name["value"]))
)
elif name["nameType"] == "uniformResourceIdentifier":
general_names.append(x509.UniformResourceIdentifier(name["value"]))
elif name["nameType"] == "directoryName":
# TODO: Need to parse a string in name['value'] like:
# 'CN=Common Name, O=Org Name, OU=OrgUnit Name, C=US, ST=ST, L=City/[email protected]'
# or
# 'CN=Common Name/O=Org Name/OU=OrgUnit Name/C=US/ST=NH/L=City/[email protected]'
# and turn it into something like:
# x509.Name([
# x509.NameAttribute(x509.OID_COMMON_NAME, "Common Name"),
# x509.NameAttribute(x509.OID_ORGANIZATION_NAME, "Org Name"),
# x509.NameAttribute(x509.OID_ORGANIZATIONAL_UNIT_NAME, "OrgUnit Name"),
# x509.NameAttribute(x509.OID_COUNTRY_NAME, "US"),
# x509.NameAttribute(x509.OID_STATE_OR_PROVINCE_NAME, "NH"),
# x509.NameAttribute(x509.OID_LOCALITY_NAME, "City"),
# x509.NameAttribute(x509.OID_EMAIL_ADDRESS, "[email protected]")
# ]
# general_names.append(x509.DirectoryName(x509.Name(BLAH))))
pass
elif name["nameType"] == "rfc822Name":
general_names.append(x509.RFC822Name(name["value"]))
elif name["nameType"] == "registeredID":
general_names.append(
x509.RegisteredID(x509.ObjectIdentifier(name["value"]))
)
elif name["nameType"] == "otherName":
# This has two inputs (type and value), so it doesn't fit the mold of the rest of these GeneralName entities.
# general_names.append(x509.OtherName(name['type'], bytes(name['value']), 'utf-8'))
pass
elif name["nameType"] == "x400Address":
# The Python Cryptography library doesn't support x400Address types (yet?)
pass
elif name["nameType"] == "EDIPartyName":
# The Python Cryptography library doesn't support EDIPartyName types (yet?)
pass
else:
current_app.logger.warning(
"Unable to deserialize SubAltName with type: {name_type}".format(
name_type=name["nameType"]
)
)
return x509.SubjectAlternativeName(general_names)
|
import pytest
import voluptuous_serialize
import homeassistant.components.automation as automation
from homeassistant.components.climate import DOMAIN, const, device_trigger
from homeassistant.const import TEMP_CELSIUS
from homeassistant.helpers import config_validation as cv, device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a climate device."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
entity_id = f"{DOMAIN}.test_5678"
hass.states.async_set(
entity_id,
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_ACTION: const.CURRENT_HVAC_IDLE,
const.ATTR_CURRENT_HUMIDITY: 23,
const.ATTR_CURRENT_TEMPERATURE: 18,
},
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "hvac_mode_changed",
"device_id": device_entry.id,
"entity_id": entity_id,
},
{
"platform": "device",
"domain": DOMAIN,
"type": "current_temperature_changed",
"device_id": device_entry.id,
"entity_id": entity_id,
},
{
"platform": "device",
"domain": DOMAIN,
"type": "current_humidity_changed",
"device_id": device_entry.id,
"entity_id": entity_id,
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_COOL,
{
const.ATTR_HVAC_ACTION: const.CURRENT_HVAC_IDLE,
const.ATTR_CURRENT_HUMIDITY: 23,
const.ATTR_CURRENT_TEMPERATURE: 18,
},
)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "hvac_mode_changed",
"to": const.HVAC_MODE_AUTO,
},
"action": {
"service": "test.automation",
"data_template": {"some": "hvac_mode_changed"},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "current_temperature_changed",
"above": 20,
},
"action": {
"service": "test.automation",
"data_template": {"some": "current_temperature_changed"},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "climate.entity",
"type": "current_humidity_changed",
"below": 10,
},
"action": {
"service": "test.automation",
"data_template": {"some": "current_humidity_changed"},
},
},
]
},
)
# Fake that the HVAC mode is changing
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_AUTO,
{
const.ATTR_HVAC_ACTION: const.CURRENT_HVAC_COOL,
const.ATTR_CURRENT_HUMIDITY: 23,
const.ATTR_CURRENT_TEMPERATURE: 18,
},
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "hvac_mode_changed"
# Fake that the temperature is changing
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_AUTO,
{
const.ATTR_HVAC_ACTION: const.CURRENT_HVAC_COOL,
const.ATTR_CURRENT_HUMIDITY: 23,
const.ATTR_CURRENT_TEMPERATURE: 23,
},
)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "current_temperature_changed"
# Fake that the humidity is changing
hass.states.async_set(
"climate.entity",
const.HVAC_MODE_AUTO,
{
const.ATTR_HVAC_ACTION: const.CURRENT_HVAC_COOL,
const.ATTR_CURRENT_HUMIDITY: 7,
const.ATTR_CURRENT_TEMPERATURE: 23,
},
)
await hass.async_block_till_done()
assert len(calls) == 3
assert calls[2].data["some"] == "current_humidity_changed"
async def test_get_trigger_capabilities_hvac_mode(hass):
"""Test we get the expected capabilities from a climate trigger."""
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": "climate",
"type": "hvac_mode_changed",
"entity_id": "climate.upstairs",
"to": "heat",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [{"name": "for", "optional": True, "type": "positive_time_period_dict"}]
@pytest.mark.parametrize(
"type", ["current_temperature_changed", "current_humidity_changed"]
)
async def test_get_trigger_capabilities_temp_humid(hass, type):
"""Test we get the expected capabilities from a climate trigger."""
capabilities = await device_trigger.async_get_trigger_capabilities(
hass,
{
"platform": "device",
"domain": "climate",
"type": "current_temperature_changed",
"entity_id": "climate.upstairs",
"above": "23",
},
)
assert capabilities and "extra_fields" in capabilities
assert voluptuous_serialize.convert(
capabilities["extra_fields"], custom_serializer=cv.custom_serializer
) == [
{
"description": {"suffix": TEMP_CELSIUS},
"name": "above",
"optional": True,
"type": "float",
},
{
"description": {"suffix": TEMP_CELSIUS},
"name": "below",
"optional": True,
"type": "float",
},
{"name": "for", "optional": True, "type": "positive_time_period_dict"},
]
|
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.components.device_tracker.config_entry import TrackerEntity
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
)
from homeassistant.core import callback
from homeassistant.helpers import device_registry
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as GPL_DOMAIN, TRACKER_UPDATE
from .const import (
ATTR_ACTIVITY,
ATTR_ALTITUDE,
ATTR_DIRECTION,
ATTR_PROVIDER,
ATTR_SPEED,
)
async def async_setup_entry(hass: HomeAssistantType, entry, async_add_entities):
"""Configure a dispatcher connection based on a config entry."""
@callback
def _receive_data(device, gps, battery, accuracy, attrs):
"""Receive set location."""
if device in hass.data[GPL_DOMAIN]["devices"]:
return
hass.data[GPL_DOMAIN]["devices"].add(device)
async_add_entities([GPSLoggerEntity(device, gps, battery, accuracy, attrs)])
hass.data[GPL_DOMAIN]["unsub_device_tracker"][
entry.entry_id
] = async_dispatcher_connect(hass, TRACKER_UPDATE, _receive_data)
# Restore previously loaded devices
dev_reg = await device_registry.async_get_registry(hass)
dev_ids = {
identifier[1]
for device in dev_reg.devices.values()
for identifier in device.identifiers
if identifier[0] == GPL_DOMAIN
}
if not dev_ids:
return
entities = []
for dev_id in dev_ids:
hass.data[GPL_DOMAIN]["devices"].add(dev_id)
entity = GPSLoggerEntity(dev_id, None, None, None, None)
entities.append(entity)
async_add_entities(entities)
class GPSLoggerEntity(TrackerEntity, RestoreEntity):
"""Represent a tracked device."""
def __init__(self, device, location, battery, accuracy, attributes):
"""Set up Geofency entity."""
self._accuracy = accuracy
self._attributes = attributes
self._name = device
self._battery = battery
self._location = location
self._unsub_dispatcher = None
self._unique_id = device
@property
def battery_level(self):
"""Return battery value of the device."""
return self._battery
@property
def device_state_attributes(self):
"""Return device specific attributes."""
return self._attributes
@property
def latitude(self):
"""Return latitude value of the device."""
return self._location[0]
@property
def longitude(self):
"""Return longitude value of the device."""
return self._location[1]
@property
def location_accuracy(self):
"""Return the gps accuracy of the device."""
return self._accuracy
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return the unique ID."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
return {"name": self._name, "identifiers": {(GPL_DOMAIN, self._unique_id)}}
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_GPS
async def async_added_to_hass(self):
"""Register state update callback."""
await super().async_added_to_hass()
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, TRACKER_UPDATE, self._async_receive_data
)
# don't restore if we got created with data
if self._location is not None:
return
state = await self.async_get_last_state()
if state is None:
self._location = (None, None)
self._accuracy = None
self._attributes = {
ATTR_ALTITUDE: None,
ATTR_ACTIVITY: None,
ATTR_DIRECTION: None,
ATTR_PROVIDER: None,
ATTR_SPEED: None,
}
self._battery = None
return
attr = state.attributes
self._location = (attr.get(ATTR_LATITUDE), attr.get(ATTR_LONGITUDE))
self._accuracy = attr.get(ATTR_GPS_ACCURACY)
self._attributes = {
ATTR_ALTITUDE: attr.get(ATTR_ALTITUDE),
ATTR_ACTIVITY: attr.get(ATTR_ACTIVITY),
ATTR_DIRECTION: attr.get(ATTR_DIRECTION),
ATTR_PROVIDER: attr.get(ATTR_PROVIDER),
ATTR_SPEED: attr.get(ATTR_SPEED),
}
self._battery = attr.get(ATTR_BATTERY_LEVEL)
async def async_will_remove_from_hass(self):
"""Clean up after entity before removal."""
await super().async_will_remove_from_hass()
self._unsub_dispatcher()
@callback
def _async_receive_data(self, device, location, battery, accuracy, attributes):
"""Mark the device as seen."""
if device != self.name:
return
self._location = location
self._battery = battery
self._accuracy = accuracy
self._attributes.update(attributes)
self.async_write_ha_state()
|
import logging
from Plugwise_Smile.Smile import Smile
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT, CONF_SCAN_INTERVAL
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.typing import DiscoveryInfoType
from .const import ( # pylint:disable=unused-import
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
ZEROCONF_MAP,
)
_LOGGER = logging.getLogger(__name__)
def _base_gw_schema(discovery_info):
"""Generate base schema for gateways."""
base_gw_schema = {}
if not discovery_info:
base_gw_schema[vol.Required(CONF_HOST)] = str
base_gw_schema[vol.Optional(CONF_PORT, default=DEFAULT_PORT)] = int
base_gw_schema[vol.Required(CONF_PASSWORD)] = str
return vol.Schema(base_gw_schema)
async def validate_gw_input(hass: core.HomeAssistant, data):
"""
Validate the user input allows us to connect to the gateray.
Data has the keys from _base_gw_schema() with values provided by the user.
"""
websession = async_get_clientsession(hass, verify_ssl=False)
api = Smile(
host=data[CONF_HOST],
password=data[CONF_PASSWORD],
port=data[CONF_PORT],
timeout=30,
websession=websession,
)
try:
await api.connect()
except Smile.InvalidAuthentication as err:
raise InvalidAuth from err
except Smile.PlugwiseError as err:
raise CannotConnect from err
return api
# PLACEHOLDER USB connection validation
class PlugwiseConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Plugwise Smile."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the Plugwise config flow."""
self.discovery_info = {}
async def async_step_zeroconf(self, discovery_info: DiscoveryInfoType):
"""Prepare configuration for a discovered Plugwise Smile."""
self.discovery_info = discovery_info
_properties = self.discovery_info.get("properties")
unique_id = self.discovery_info.get("hostname").split(".")[0]
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
_product = _properties.get("product", None)
_version = _properties.get("version", "n/a")
_name = f"{ZEROCONF_MAP.get(_product, _product)} v{_version}"
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context["title_placeholders"] = {
CONF_HOST: discovery_info[CONF_HOST],
CONF_PORT: discovery_info.get(CONF_PORT, DEFAULT_PORT),
"name": _name,
}
return await self.async_step_user()
async def async_step_user_gateway(self, user_input=None):
"""Handle the initial step for gateways."""
errors = {}
if user_input is not None:
if self.discovery_info:
user_input[CONF_HOST] = self.discovery_info[CONF_HOST]
user_input[CONF_PORT] = self.discovery_info.get(CONF_PORT, DEFAULT_PORT)
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) == user_input[CONF_HOST]:
return self.async_abort(reason="already_configured")
try:
api = await validate_gw_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if not errors:
await self.async_set_unique_id(
api.smile_hostname or api.gateway_id, raise_on_progress=False
)
self._abort_if_unique_id_configured()
return self.async_create_entry(title=api.smile_name, data=user_input)
return self.async_show_form(
step_id="user_gateway",
data_schema=_base_gw_schema(self.discovery_info),
errors=errors or {},
)
# PLACEHOLDER USB async_step_user_usb and async_step_user_usb_manual_paht
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
# PLACEHOLDER USB vs Gateway Logic
return await self.async_step_user_gateway()
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return PlugwiseOptionsFlowHandler(config_entry)
class PlugwiseOptionsFlowHandler(config_entries.OptionsFlow):
"""Plugwise option flow."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the Plugwise options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
api = self.hass.data[DOMAIN][self.config_entry.entry_id]["api"]
interval = DEFAULT_SCAN_INTERVAL[api.smile_type]
data = {
vol.Optional(
CONF_SCAN_INTERVAL,
default=self.config_entry.options.get(CONF_SCAN_INTERVAL, interval),
): int
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(data))
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
import logging
import voluptuous as vol
from homeassistant.components import zabbix
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
_CONF_TRIGGERS = "triggers"
_CONF_HOSTIDS = "hostids"
_CONF_INDIVIDUAL = "individual"
_ZABBIX_ID_LIST_SCHEMA = vol.Schema([int])
_ZABBIX_TRIGGER_SCHEMA = vol.Schema(
{
vol.Optional(_CONF_HOSTIDS, default=[]): _ZABBIX_ID_LIST_SCHEMA,
vol.Optional(_CONF_INDIVIDUAL, default=False): cv.boolean,
vol.Optional(CONF_NAME): cv.string,
}
)
# SCAN_INTERVAL = 30
#
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(_CONF_TRIGGERS): vol.Any(_ZABBIX_TRIGGER_SCHEMA, None)}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Zabbix sensor platform."""
sensors = []
zapi = hass.data[zabbix.DOMAIN]
if not zapi:
_LOGGER.error("zapi is None. Zabbix integration hasn't been loaded?")
return False
_LOGGER.info("Connected to Zabbix API Version %s", zapi.api_version())
trigger_conf = config.get(_CONF_TRIGGERS)
# The following code seems overly complex. Need to think about this...
if trigger_conf:
hostids = trigger_conf.get(_CONF_HOSTIDS)
individual = trigger_conf.get(_CONF_INDIVIDUAL)
name = trigger_conf.get(CONF_NAME)
if individual:
# Individual sensor per host
if not hostids:
# We need hostids
_LOGGER.error("If using 'individual', must specify hostids")
return False
for hostid in hostids:
_LOGGER.debug("Creating Zabbix Sensor: %s", str(hostid))
sensor = ZabbixSingleHostTriggerCountSensor(zapi, [hostid], name)
sensors.append(sensor)
else:
if not hostids:
# Single sensor that provides the total count of triggers.
_LOGGER.debug("Creating Zabbix Sensor")
sensor = ZabbixTriggerCountSensor(zapi, name)
else:
# Single sensor that sums total issues for all hosts
_LOGGER.debug("Creating Zabbix Sensor group: %s", str(hostids))
sensor = ZabbixMultipleHostTriggerCountSensor(zapi, hostids, name)
sensors.append(sensor)
else:
# Single sensor that provides the total count of triggers.
_LOGGER.debug("Creating Zabbix Sensor")
sensor = ZabbixTriggerCountSensor(zapi)
sensors.append(sensor)
add_entities(sensors)
class ZabbixTriggerCountSensor(Entity):
"""Get the active trigger count for all Zabbix monitored hosts."""
def __init__(self, zApi, name="Zabbix"):
"""Initialize Zabbix sensor."""
self._name = name
self._zapi = zApi
self._state = None
self._attributes = {}
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return "issues"
def _call_zabbix_api(self):
return self._zapi.trigger.get(
output="extend", only_true=1, monitored=1, filter={"value": 1}
)
def update(self):
"""Update the sensor."""
_LOGGER.debug("Updating ZabbixTriggerCountSensor: %s", str(self._name))
triggers = self._call_zabbix_api()
self._state = len(triggers)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._attributes
class ZabbixSingleHostTriggerCountSensor(ZabbixTriggerCountSensor):
"""Get the active trigger count for a single Zabbix monitored host."""
def __init__(self, zApi, hostid, name=None):
"""Initialize Zabbix sensor."""
super().__init__(zApi, name)
self._hostid = hostid
if not name:
self._name = self._zapi.host.get(hostids=self._hostid, output="extend")[0][
"name"
]
self._attributes["Host ID"] = self._hostid
def _call_zabbix_api(self):
return self._zapi.trigger.get(
hostids=self._hostid,
output="extend",
only_true=1,
monitored=1,
filter={"value": 1},
)
class ZabbixMultipleHostTriggerCountSensor(ZabbixTriggerCountSensor):
"""Get the active trigger count for specified Zabbix monitored hosts."""
def __init__(self, zApi, hostids, name=None):
"""Initialize Zabbix sensor."""
super().__init__(zApi, name)
self._hostids = hostids
if not name:
host_names = self._zapi.host.get(hostids=self._hostids, output="extend")
self._name = " ".join(name["name"] for name in host_names)
self._attributes["Host IDs"] = self._hostids
def _call_zabbix_api(self):
return self._zapi.trigger.get(
hostids=self._hostids,
output="extend",
only_true=1,
monitored=1,
filter={"value": 1},
)
|
import io
import os.path
SCHEME = 'file'
URI_EXAMPLES = (
'./local/path/file',
'~/local/path/file',
'local/path/file',
'./local/path/file.gz',
'file:///home/user/file',
'file:///home/user/file.bz2',
)
open = io.open
def parse_uri(uri_as_string):
local_path = extract_local_path(uri_as_string)
return dict(scheme=SCHEME, uri_path=local_path)
def open_uri(uri_as_string, mode, transport_params):
parsed_uri = parse_uri(uri_as_string)
fobj = io.open(parsed_uri['uri_path'], mode)
return fobj
def extract_local_path(uri_as_string):
if uri_as_string.startswith('file://'):
local_path = uri_as_string.replace('file://', '', 1)
else:
local_path = uri_as_string
return os.path.expanduser(local_path)
|
import logging
import os
import sys
import numpy as np
import unittest
from gensim.models.wrappers import varembed
from gensim.test.utils import datapath
try:
import morfessor # noqa: F401
except ImportError:
raise unittest.SkipTest("Test requires Morfessor to be installed, which is not available")
varembed_model_vector_file = datapath('varembed_vectors.pkl')
varembed_model_morfessor_file = datapath('varembed_morfessor.bin')
AZURE = bool(os.environ.get('PIPELINE_WORKSPACE'))
@unittest.skipIf(AZURE, 'see <https://github.com/RaRe-Technologies/gensim/pull/2836>')
class TestVarembed(unittest.TestCase):
def testLoadVarembedFormat(self):
"""Test storing/loading the entire model."""
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file)
self.model_sanity(model)
def testSimilarity(self):
"""Test n_similarity for vocab words"""
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file)
self.assertTrue(model.n_similarity(['result'], ['targets']) == model.similarity('result', 'targets'))
def model_sanity(self, model):
"""Check vocabulary and vector size"""
self.assertEqual(model.vectors.shape, (model.vocab_size, model.vector_size))
self.assertTrue(model.vectors.shape[0] == len(model))
@unittest.skipIf(sys.version_info < (2, 7), 'Supported only on Python 2.7 and above')
def testAddMorphemesToEmbeddings(self):
"""Test add morphemes to Embeddings
Test only in Python 2.7 and above. Add Morphemes is not supported in earlier versions.
"""
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file)
model_with_morphemes = varembed.VarEmbed.load_varembed_format(
vectors=varembed_model_vector_file, morfessor_model=varembed_model_morfessor_file)
self.model_sanity(model_with_morphemes)
# Check syn0 is different for both models.
self.assertFalse(np.allclose(model.vectors, model_with_morphemes.vectors))
def testLookup(self):
"""Test lookup of vector for a particular word and list"""
model = varembed.VarEmbed.load_varembed_format(vectors=varembed_model_vector_file)
self.assertTrue(np.allclose(model['language'], model[['language']]))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
from datetime import timedelta
import logging
import operator
from typing import Dict, Optional
from pyicloud import PyiCloudService
from pyicloud.exceptions import (
PyiCloudFailedLoginException,
PyiCloudNoDevicesException,
PyiCloudServiceNotActivatedException,
)
from pyicloud.services.findmyiphone import AppleDevice
from homeassistant.components.zone import async_active_zone
from homeassistant.config_entries import SOURCE_REAUTH, ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, CONF_USERNAME
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import track_point_in_utc_time
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import slugify
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.dt import utcnow
from homeassistant.util.location import distance
from .const import (
DEVICE_BATTERY_LEVEL,
DEVICE_BATTERY_STATUS,
DEVICE_CLASS,
DEVICE_DISPLAY_NAME,
DEVICE_ID,
DEVICE_LOCATION,
DEVICE_LOCATION_HORIZONTAL_ACCURACY,
DEVICE_LOCATION_LATITUDE,
DEVICE_LOCATION_LONGITUDE,
DEVICE_LOST_MODE_CAPABLE,
DEVICE_LOW_POWER_MODE,
DEVICE_NAME,
DEVICE_PERSON_ID,
DEVICE_RAW_DEVICE_MODEL,
DEVICE_STATUS,
DEVICE_STATUS_CODES,
DEVICE_STATUS_SET,
DOMAIN,
)
ATTRIBUTION = "Data provided by Apple iCloud"
# entity attributes
ATTR_ACCOUNT_FETCH_INTERVAL = "account_fetch_interval"
ATTR_BATTERY = "battery"
ATTR_BATTERY_STATUS = "battery_status"
ATTR_DEVICE_NAME = "device_name"
ATTR_DEVICE_STATUS = "device_status"
ATTR_LOW_POWER_MODE = "low_power_mode"
ATTR_OWNER_NAME = "owner_fullname"
# services
SERVICE_ICLOUD_PLAY_SOUND = "play_sound"
SERVICE_ICLOUD_DISPLAY_MESSAGE = "display_message"
SERVICE_ICLOUD_LOST_DEVICE = "lost_device"
SERVICE_ICLOUD_UPDATE = "update"
ATTR_ACCOUNT = "account"
ATTR_LOST_DEVICE_MESSAGE = "message"
ATTR_LOST_DEVICE_NUMBER = "number"
ATTR_LOST_DEVICE_SOUND = "sound"
_LOGGER = logging.getLogger(__name__)
class IcloudAccount:
"""Representation of an iCloud account."""
def __init__(
self,
hass: HomeAssistantType,
username: str,
password: str,
icloud_dir: Store,
with_family: bool,
max_interval: int,
gps_accuracy_threshold: int,
config_entry: ConfigEntry,
):
"""Initialize an iCloud account."""
self.hass = hass
self._username = username
self._password = password
self._with_family = with_family
self._fetch_interval = max_interval
self._max_interval = max_interval
self._gps_accuracy_threshold = gps_accuracy_threshold
self._icloud_dir = icloud_dir
self.api: Optional[PyiCloudService] = None
self._owner_fullname = None
self._family_members_fullname = {}
self._devices = {}
self._retried_fetch = False
self._config_entry = config_entry
self.listeners = []
def setup(self) -> None:
"""Set up an iCloud account."""
try:
self.api = PyiCloudService(
self._username,
self._password,
self._icloud_dir.path,
with_family=self._with_family,
)
except PyiCloudFailedLoginException:
self.api = None
# Login failed which means credentials need to be updated.
_LOGGER.error(
(
"Your password for '%s' is no longer working. Go to the "
"Integrations menu and click on Configure on the discovered Apple "
"iCloud card to login again."
),
self._config_entry.data[CONF_USERNAME],
)
self.hass.add_job(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_REAUTH},
data={
**self._config_entry.data,
"unique_id": self._config_entry.unique_id,
},
)
)
return
try:
api_devices = self.api.devices
# Gets device owners infos
user_info = api_devices.response["userInfo"]
except (
PyiCloudServiceNotActivatedException,
PyiCloudNoDevicesException,
) as err:
_LOGGER.error("No iCloud device found")
raise ConfigEntryNotReady from err
self._owner_fullname = f"{user_info['firstName']} {user_info['lastName']}"
self._family_members_fullname = {}
if user_info.get("membersInfo") is not None:
for prs_id, member in user_info["membersInfo"].items():
self._family_members_fullname[
prs_id
] = f"{member['firstName']} {member['lastName']}"
self._devices = {}
self.update_devices()
def update_devices(self) -> None:
"""Update iCloud devices."""
if self.api is None:
return
api_devices = {}
try:
api_devices = self.api.devices
except Exception as err: # pylint: disable=broad-except
_LOGGER.error("Unknown iCloud error: %s", err)
self._fetch_interval = 2
dispatcher_send(self.hass, self.signal_device_update)
track_point_in_utc_time(
self.hass,
self.keep_alive,
utcnow() + timedelta(minutes=self._fetch_interval),
)
return
# Gets devices infos
new_device = False
for device in api_devices:
status = device.status(DEVICE_STATUS_SET)
device_id = status[DEVICE_ID]
device_name = status[DEVICE_NAME]
if (
status[DEVICE_BATTERY_STATUS] == "Unknown"
or status.get(DEVICE_BATTERY_LEVEL) is None
):
continue
if self._devices.get(device_id) is not None:
# Seen device -> updating
_LOGGER.debug("Updating iCloud device: %s", device_name)
self._devices[device_id].update(status)
else:
# New device, should be unique
_LOGGER.debug(
"Adding iCloud device: %s [model: %s]",
device_name,
status[DEVICE_RAW_DEVICE_MODEL],
)
self._devices[device_id] = IcloudDevice(self, device, status)
self._devices[device_id].update(status)
new_device = True
if (
DEVICE_STATUS_CODES.get(list(api_devices)[0][DEVICE_STATUS]) == "pending"
and not self._retried_fetch
):
_LOGGER.debug("Pending devices, trying again in 15s")
self._fetch_interval = 0.25
self._retried_fetch = True
else:
self._fetch_interval = self._determine_interval()
self._retried_fetch = False
dispatcher_send(self.hass, self.signal_device_update)
if new_device:
dispatcher_send(self.hass, self.signal_device_new)
track_point_in_utc_time(
self.hass,
self.keep_alive,
utcnow() + timedelta(minutes=self._fetch_interval),
)
def _determine_interval(self) -> int:
"""Calculate new interval between two API fetch (in minutes)."""
intervals = {"default": self._max_interval}
for device in self._devices.values():
# Max interval if no location
if device.location is None:
continue
current_zone = run_callback_threadsafe(
self.hass.loop,
async_active_zone,
self.hass,
device.location[DEVICE_LOCATION_LATITUDE],
device.location[DEVICE_LOCATION_LONGITUDE],
device.location[DEVICE_LOCATION_HORIZONTAL_ACCURACY],
).result()
# Max interval if in zone
if current_zone is not None:
continue
zones = (
self.hass.states.get(entity_id)
for entity_id in sorted(self.hass.states.entity_ids("zone"))
)
distances = []
for zone_state in zones:
zone_state_lat = zone_state.attributes[DEVICE_LOCATION_LATITUDE]
zone_state_long = zone_state.attributes[DEVICE_LOCATION_LONGITUDE]
zone_distance = distance(
device.location[DEVICE_LOCATION_LATITUDE],
device.location[DEVICE_LOCATION_LONGITUDE],
zone_state_lat,
zone_state_long,
)
distances.append(round(zone_distance / 1000, 1))
# Max interval if no zone
if not distances:
continue
mindistance = min(distances)
# Calculate out how long it would take for the device to drive
# to the nearest zone at 120 km/h:
interval = round(mindistance / 2, 0)
# Never poll more than once per minute
interval = max(interval, 1)
if interval > 180:
# Three hour drive?
# This is far enough that they might be flying
interval = self._max_interval
if (
device.battery_level is not None
and device.battery_level <= 33
and mindistance > 3
):
# Low battery - let's check half as often
interval = interval * 2
intervals[device.name] = interval
return max(
int(min(intervals.items(), key=operator.itemgetter(1))[1]),
self._max_interval,
)
def keep_alive(self, now=None) -> None:
"""Keep the API alive."""
if self.api is None:
self.setup()
if self.api is None:
return
self.api.authenticate()
self.update_devices()
def get_devices_with_name(self, name: str) -> [any]:
"""Get devices by name."""
result = []
name_slug = slugify(name.replace(" ", "", 99))
for device in self.devices.values():
if slugify(device.name.replace(" ", "", 99)) == name_slug:
result.append(device)
if not result:
raise Exception(f"No device with name {name}")
return result
@property
def username(self) -> str:
"""Return the account username."""
return self._username
@property
def owner_fullname(self) -> str:
"""Return the account owner fullname."""
return self._owner_fullname
@property
def family_members_fullname(self) -> Dict[str, str]:
"""Return the account family members fullname."""
return self._family_members_fullname
@property
def fetch_interval(self) -> int:
"""Return the account fetch interval."""
return self._fetch_interval
@property
def devices(self) -> Dict[str, any]:
"""Return the account devices."""
return self._devices
@property
def signal_device_new(self) -> str:
"""Event specific per Freebox entry to signal new device."""
return f"{DOMAIN}-{self._username}-device-new"
@property
def signal_device_update(self) -> str:
"""Event specific per Freebox entry to signal updates in devices."""
return f"{DOMAIN}-{self._username}-device-update"
class IcloudDevice:
"""Representation of a iCloud device."""
def __init__(self, account: IcloudAccount, device: AppleDevice, status):
"""Initialize the iCloud device."""
self._account = account
self._device = device
self._status = status
self._name = self._status[DEVICE_NAME]
self._device_id = self._status[DEVICE_ID]
self._device_class = self._status[DEVICE_CLASS]
self._device_model = self._status[DEVICE_DISPLAY_NAME]
if self._status[DEVICE_PERSON_ID]:
owner_fullname = account.family_members_fullname[
self._status[DEVICE_PERSON_ID]
]
else:
owner_fullname = account.owner_fullname
self._battery_level = None
self._battery_status = None
self._location = None
self._attrs = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_ACCOUNT_FETCH_INTERVAL: self._account.fetch_interval,
ATTR_DEVICE_NAME: self._device_model,
ATTR_DEVICE_STATUS: None,
ATTR_OWNER_NAME: owner_fullname,
}
def update(self, status) -> None:
"""Update the iCloud device."""
self._status = status
self._status[ATTR_ACCOUNT_FETCH_INTERVAL] = self._account.fetch_interval
device_status = DEVICE_STATUS_CODES.get(self._status[DEVICE_STATUS], "error")
self._attrs[ATTR_DEVICE_STATUS] = device_status
self._battery_status = self._status[DEVICE_BATTERY_STATUS]
self._attrs[ATTR_BATTERY_STATUS] = self._battery_status
device_battery_level = self._status.get(DEVICE_BATTERY_LEVEL, 0)
if self._battery_status != "Unknown" and device_battery_level is not None:
self._battery_level = int(device_battery_level * 100)
self._attrs[ATTR_BATTERY] = self._battery_level
self._attrs[ATTR_LOW_POWER_MODE] = self._status[DEVICE_LOW_POWER_MODE]
if (
self._status[DEVICE_LOCATION]
and self._status[DEVICE_LOCATION][DEVICE_LOCATION_LATITUDE]
):
location = self._status[DEVICE_LOCATION]
if self._location is None:
dispatcher_send(self._account.hass, self._account.signal_device_new)
self._location = location
def play_sound(self) -> None:
"""Play sound on the device."""
if self._account.api is None:
return
self._account.api.authenticate()
_LOGGER.debug("Playing sound for %s", self.name)
self.device.play_sound()
def display_message(self, message: str, sound: bool = False) -> None:
"""Display a message on the device."""
if self._account.api is None:
return
self._account.api.authenticate()
_LOGGER.debug("Displaying message for %s", self.name)
self.device.display_message("Subject not working", message, sound)
def lost_device(self, number: str, message: str) -> None:
"""Make the device in lost state."""
if self._account.api is None:
return
self._account.api.authenticate()
if self._status[DEVICE_LOST_MODE_CAPABLE]:
_LOGGER.debug("Make device lost for %s", self.name)
self.device.lost_device(number, message, None)
else:
_LOGGER.error("Cannot make device lost for %s", self.name)
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._device_id
@property
def name(self) -> str:
"""Return the Apple device name."""
return self._name
@property
def device(self) -> AppleDevice:
"""Return the Apple device."""
return self._device
@property
def device_class(self) -> str:
"""Return the Apple device class."""
return self._device_class
@property
def device_model(self) -> str:
"""Return the Apple device model."""
return self._device_model
@property
def battery_level(self) -> int:
"""Return the Apple device battery level."""
return self._battery_level
@property
def battery_status(self) -> str:
"""Return the Apple device battery status."""
return self._battery_status
@property
def location(self) -> Dict[str, any]:
"""Return the Apple device location."""
return self._location
@property
def state_attributes(self) -> Dict[str, any]:
"""Return the attributes."""
return self._attrs
|
import asyncio
from datetime import datetime
import logging
from smhi.smhi_lib import APIURL_TEMPLATE, SmhiForecastException
from homeassistant.components.smhi import weather as weather_smhi
from homeassistant.components.smhi.const import ATTR_SMHI_CLOUDINESS
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_WEATHER_ATTRIBUTION,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_VISIBILITY,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
DOMAIN as WEATHER_DOMAIN,
)
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import MockConfigEntry, load_fixture
_LOGGER = logging.getLogger(__name__)
TEST_CONFIG = {"name": "test", "longitude": "17.84197", "latitude": "59.32624"}
async def test_setup_hass(hass: HomeAssistant, aioclient_mock) -> None:
"""Test for successfully setting up the smhi platform.
This test are deeper integrated with the core. Since only
config_flow is used the component are setup with
"async_forward_entry_setup". The actual result are tested
with the entity state rather than "per function" unity tests
"""
uri = APIURL_TEMPLATE.format(TEST_CONFIG["longitude"], TEST_CONFIG["latitude"])
api_response = load_fixture("smhi.json")
aioclient_mock.get(uri, text=api_response)
entry = MockConfigEntry(domain="smhi", data=TEST_CONFIG)
await hass.config_entries.async_forward_entry_setup(entry, WEATHER_DOMAIN)
await hass.async_block_till_done()
assert aioclient_mock.call_count == 1
# Testing the actual entity state for
# deeper testing than normal unity test
state = hass.states.get("weather.smhi_test")
assert state.state == "sunny"
assert state.attributes[ATTR_SMHI_CLOUDINESS] == 50
assert state.attributes[ATTR_WEATHER_ATTRIBUTION].find("SMHI") >= 0
assert state.attributes[ATTR_WEATHER_HUMIDITY] == 55
assert state.attributes[ATTR_WEATHER_PRESSURE] == 1024
assert state.attributes[ATTR_WEATHER_TEMPERATURE] == 17
assert state.attributes[ATTR_WEATHER_VISIBILITY] == 50
assert state.attributes[ATTR_WEATHER_WIND_SPEED] == 7
assert state.attributes[ATTR_WEATHER_WIND_BEARING] == 134
_LOGGER.error(state.attributes)
assert len(state.attributes["forecast"]) == 4
forecast = state.attributes["forecast"][1]
assert forecast[ATTR_FORECAST_TIME] == "2018-09-02T12:00:00"
assert forecast[ATTR_FORECAST_TEMP] == 21
assert forecast[ATTR_FORECAST_TEMP_LOW] == 6
assert forecast[ATTR_FORECAST_PRECIPITATION] == 0
assert forecast[ATTR_FORECAST_CONDITION] == "partlycloudy"
def test_properties_no_data(hass: HomeAssistant) -> None:
"""Test properties when no API data available."""
weather = weather_smhi.SmhiWeather("name", "10", "10")
weather.hass = hass
assert weather.name == "name"
assert weather.should_poll is True
assert weather.temperature is None
assert weather.humidity is None
assert weather.wind_speed is None
assert weather.wind_bearing is None
assert weather.visibility is None
assert weather.pressure is None
assert weather.cloudiness is None
assert weather.condition is None
assert weather.forecast is None
assert weather.temperature_unit == TEMP_CELSIUS
# pylint: disable=protected-access
def test_properties_unknown_symbol() -> None:
"""Test behaviour when unknown symbol from API."""
hass = Mock()
data = Mock()
data.temperature = 5
data.mean_precipitation = 0.5
data.total_precipitation = 1
data.humidity = 5
data.wind_speed = 10
data.wind_direction = 180
data.horizontal_visibility = 6
data.pressure = 1008
data.cloudiness = 52
data.symbol = 100 # Faulty symbol
data.valid_time = datetime(2018, 1, 1, 0, 1, 2)
data2 = Mock()
data2.temperature = 5
data2.mean_precipitation = 0.5
data2.total_precipitation = 1
data2.humidity = 5
data2.wind_speed = 10
data2.wind_direction = 180
data2.horizontal_visibility = 6
data2.pressure = 1008
data2.cloudiness = 52
data2.symbol = 100 # Faulty symbol
data2.valid_time = datetime(2018, 1, 1, 12, 1, 2)
data3 = Mock()
data3.temperature = 5
data3.mean_precipitation = 0.5
data3.total_precipitation = 1
data3.humidity = 5
data3.wind_speed = 10
data3.wind_direction = 180
data3.horizontal_visibility = 6
data3.pressure = 1008
data3.cloudiness = 52
data3.symbol = 100 # Faulty symbol
data3.valid_time = datetime(2018, 1, 2, 12, 1, 2)
testdata = [data, data2, data3]
weather = weather_smhi.SmhiWeather("name", "10", "10")
weather.hass = hass
weather._forecasts = testdata
assert weather.condition is None
forecast = weather.forecast[0]
assert forecast[ATTR_FORECAST_CONDITION] is None
# pylint: disable=protected-access
async def test_refresh_weather_forecast_exceeds_retries(hass) -> None:
"""Test the refresh weather forecast function."""
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather_smhi.SmhiWeather,
"get_weather_forecast",
side_effect=SmhiForecastException(),
):
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
weather._fail_count = 2
await weather.async_update()
assert weather._forecasts is None
assert not call_later.mock_calls
async def test_refresh_weather_forecast_timeout(hass) -> None:
"""Test timeout exception."""
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather_smhi.SmhiWeather, "retry_update"
), patch.object(
weather_smhi.SmhiWeather,
"get_weather_forecast",
side_effect=asyncio.TimeoutError,
):
await weather.async_update()
assert len(call_later.mock_calls) == 1
# Assert we are going to wait RETRY_TIMEOUT seconds
assert call_later.mock_calls[0][1][0] == weather_smhi.RETRY_TIMEOUT
async def test_refresh_weather_forecast_exception() -> None:
"""Test any exception."""
hass = Mock()
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(
hass.helpers.event, "async_call_later"
) as call_later, patch.object(
weather,
"get_weather_forecast",
side_effect=SmhiForecastException(),
):
await weather.async_update()
assert len(call_later.mock_calls) == 1
# Assert we are going to wait RETRY_TIMEOUT seconds
assert call_later.mock_calls[0][1][0] == weather_smhi.RETRY_TIMEOUT
async def test_retry_update():
"""Test retry function of refresh forecast."""
hass = Mock()
weather = weather_smhi.SmhiWeather("name", "17.0022", "62.0022")
weather.hass = hass
with patch.object(weather, "async_update", AsyncMock()) as update:
await weather.retry_update(None)
assert len(update.mock_calls) == 1
def test_condition_class():
"""Test condition class."""
def get_condition(index: int) -> str:
"""Return condition given index."""
return [k for k, v in weather_smhi.CONDITION_CLASSES.items() if index in v][0]
# SMHI definitions as follows, see
# http://opendata.smhi.se/apidocs/metfcst/parameters.html
# 1. Clear sky
assert get_condition(1) == "sunny"
# 2. Nearly clear sky
assert get_condition(2) == "sunny"
# 3. Variable cloudiness
assert get_condition(3) == "partlycloudy"
# 4. Halfclear sky
assert get_condition(4) == "partlycloudy"
# 5. Cloudy sky
assert get_condition(5) == "cloudy"
# 6. Overcast
assert get_condition(6) == "cloudy"
# 7. Fog
assert get_condition(7) == "fog"
# 8. Light rain showers
assert get_condition(8) == "rainy"
# 9. Moderate rain showers
assert get_condition(9) == "rainy"
# 18. Light rain
assert get_condition(18) == "rainy"
# 19. Moderate rain
assert get_condition(19) == "rainy"
# 10. Heavy rain showers
assert get_condition(10) == "pouring"
# 20. Heavy rain
assert get_condition(20) == "pouring"
# 21. Thunder
assert get_condition(21) == "lightning"
# 11. Thunderstorm
assert get_condition(11) == "lightning-rainy"
# 15. Light snow showers
assert get_condition(15) == "snowy"
# 16. Moderate snow showers
assert get_condition(16) == "snowy"
# 17. Heavy snow showers
assert get_condition(17) == "snowy"
# 25. Light snowfall
assert get_condition(25) == "snowy"
# 26. Moderate snowfall
assert get_condition(26) == "snowy"
# 27. Heavy snowfall
assert get_condition(27) == "snowy"
# 12. Light sleet showers
assert get_condition(12) == "snowy-rainy"
# 13. Moderate sleet showers
assert get_condition(13) == "snowy-rainy"
# 14. Heavy sleet showers
assert get_condition(14) == "snowy-rainy"
# 22. Light sleet
assert get_condition(22) == "snowy-rainy"
# 23. Moderate sleet
assert get_condition(23) == "snowy-rainy"
# 24. Heavy sleet
assert get_condition(24) == "snowy-rainy"
|
from pyVmomi import vmodl, vim
##
## @brief Exception class to represent when task is blocked (e.g.:
## waiting for an answer to a question.
##
class TaskBlocked(Exception):
"""
Exception class to represent when task is blocked (e.g.: waiting
for an answer to a question.
"""
pass
#
# TaskUpdates
# verbose information about task progress
#
def TaskUpdatesVerbose(task, progress):
if isinstance(task.info.progress, int):
info = task.info
if not isinstance(progress, str):
progress = '%d%% (%s)' % (info.progress, info.state)
print('Task %s (key:%s, desc:%s) - %s' % (
info.name.info.name, info.key, info.description, progress))
globalTaskUpdate = None
def SetTasksVerbose(verbose=True):
global globalTaskUpdate
if verbose:
globalTaskUpdate = TaskUpdatesVerbose
else:
globalTaskUpdate = None
##
## @param raiseOnError [in] Any exception thrown is thrown up to the caller if
## raiseOnError is set to true
## @param si [in] ServiceInstance to use. If set to None, use the default one.
## @param pc [in] property collector to use else retrieve one from cache
## @param onProgressUpdate [in] callable to call with task progress updates.
## For example:
##
## def OnTaskProgressUpdate(task, percentDone):
## sys.stderr.write('# Task %s: %d%% complete ...\n' % (task, percentDone))
##
## Given a task object and a service instance, wait for the task completion
##
## @return state as either "success" or "error". To look at any errors, the
## user should reexamine the task object.
##
## NOTE: This is a blocking call.
##
def WaitForTask(task,
raiseOnError=True,
si=None,
pc=None,
onProgressUpdate=None):
"""
Wait for task to complete.
@type raiseOnError : bool
@param raiseOnError : Any exception thrown is thrown up to the caller
if raiseOnError is set to true.
@type si : ManagedObjectReference to a ServiceInstance.
@param si : ServiceInstance to use. If None, use the
information from the task.
@type pc : ManagedObjectReference to a PropertyCollector.
@param pc : Property collector to use. If None, get it from
the ServiceInstance.
@type onProgressUpdate : callable
@param onProgressUpdate : Callable to call with task progress updates.
For example::
def OnTaskProgressUpdate(task, percentDone):
print 'Task %s is %d%% complete.' % (task, percentDone)
"""
if si is None:
si = vim.ServiceInstance("ServiceInstance", task._stub)
if pc is None:
pc = si.content.propertyCollector
progressUpdater = ProgressUpdater(task, onProgressUpdate)
progressUpdater.Update('created')
filter = CreateFilter(pc, task)
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while state not in (vim.TaskInfo.State.success, vim.TaskInfo.State.error):
try:
version, state = GetTaskStatus(task, version, pc)
progressUpdater.UpdateIfNeeded()
except vmodl.fault.ManagedObjectNotFound as e:
print("Task object has been deleted: %s" % e.obj)
break
filter.Destroy()
if state == "error":
progressUpdater.Update('error: %s' % str(task.info.error))
if raiseOnError:
raise task.info.error
else:
print("Task reported error: " + str(task.info.error))
else:
progressUpdater.Update('completed')
return state
## Wait for multiple tasks to complete
# See WaitForTask for detail
#
# Difference: WaitForTasks won't return the state of tasks. User can check
# tasks state directly with task.info.state
#
# TODO: Did not check for question pending
def WaitForTasks(tasks,
raiseOnError=True,
si=None,
pc=None,
onProgressUpdate=None,
results=None):
"""
Wait for mulitiple tasks to complete. Much faster than calling WaitForTask
N times
"""
if not tasks:
return
if si is None:
si = vim.ServiceInstance("ServiceInstance", tasks[0]._stub)
if pc is None:
pc = si.content.propertyCollector
if results is None:
results = []
progressUpdaters = {}
for task in tasks:
progressUpdater = ProgressUpdater(task, onProgressUpdate)
progressUpdater.Update('created')
progressUpdaters[str(task)] = progressUpdater
filter = CreateTasksFilter(pc, tasks)
try:
version, state = None, None
# Loop looking for updates till the state moves to a completed state.
while len(progressUpdaters):
update = pc.WaitForUpdates(version)
for filterSet in update.filterSet:
for objSet in filterSet.objectSet:
task = objSet.obj
taskId = str(task)
for change in objSet.changeSet:
if change.name == 'info':
state = change.val.state
elif change.name == 'info.state':
state = change.val
else:
continue
progressUpdater = progressUpdaters.get(taskId)
if not progressUpdater:
continue
if state == vim.TaskInfo.State.success:
progressUpdater.Update('completed')
progressUpdaters.pop(taskId)
# cache the results, as task objects could expire if one
# of the tasks take a longer time to complete
results.append(task.info.result)
elif state == vim.TaskInfo.State.error:
err = task.info.error
progressUpdater.Update('error: %s' % str(err))
if raiseOnError:
raise err
else:
print("Task %s reported error: %s" % (taskId, str(err)))
progressUpdaters.pop(taskId)
else:
if onProgressUpdate:
progressUpdater.UpdateIfNeeded()
# Move to next version
version = update.version
finally:
if filter:
filter.Destroy()
return
def GetTaskStatus(task, version, pc):
update = pc.WaitForUpdates(version)
state = task.info.state
if (state == 'running' and task.info.name is not None and task.info.name.info.name != "Destroy"
and task.info.name.info.name != "Relocate"):
CheckForQuestionPending(task)
return update.version, state
def CreateFilter(pc, task):
""" Create property collector filter for task """
return CreateTasksFilter(pc, [task])
def CreateTasksFilter(pc, tasks):
""" Create property collector filter for tasks """
if not tasks:
return None
# First create the object specification as the task object.
objspecs = [vmodl.query.PropertyCollector.ObjectSpec(obj=task)
for task in tasks]
# Next, create the property specification as the state.
propspec = vmodl.query.PropertyCollector.PropertySpec(
type=vim.Task, pathSet=[], all=True)
# Create a filter spec with the specified object and property spec.
filterspec = vmodl.query.PropertyCollector.FilterSpec()
filterspec.objectSet = objspecs
filterspec.propSet = [propspec]
# Create the filter
return pc.CreateFilter(filterspec, True)
def CheckForQuestionPending(task):
"""
Check to see if VM needs to ask a question, throw exception
"""
vm = task.info.entity
if vm is not None and isinstance(vm, vim.VirtualMachine):
qst = vm.runtime.question
if qst is not None:
raise TaskBlocked("Task blocked, User Intervention required")
##
## @brief Class that keeps track of task percentage complete and calls
## a provided callback when it changes.
##
class ProgressUpdater(object):
"""
Class that keeps track of task percentage complete and calls a
provided callback when it changes.
"""
def __init__(self, task, onProgressUpdate):
self.task = task
self.onProgressUpdate = onProgressUpdate
self.prevProgress = 0
self.progress = 0
def Update(self, state):
global globalTaskUpdate
taskUpdate = globalTaskUpdate
if self.onProgressUpdate:
taskUpdate = self.onProgressUpdate
if taskUpdate:
taskUpdate(self.task, state)
def UpdateIfNeeded(self):
self.progress = self.task.info.progress
if self.progress != self.prevProgress:
self.Update(self.progress)
self.prevProgress = self.progress
|
from homeassistant.components.light.reproduce_state import DEPRECATION_WARNING
from homeassistant.core import State
from tests.common import async_mock_service
VALID_BRIGHTNESS = {"brightness": 180}
VALID_WHITE_VALUE = {"white_value": 200}
VALID_FLASH = {"flash": "short"}
VALID_EFFECT = {"effect": "random"}
VALID_TRANSITION = {"transition": 15}
VALID_COLOR_NAME = {"color_name": "red"}
VALID_COLOR_TEMP = {"color_temp": 240}
VALID_HS_COLOR = {"hs_color": (345, 75)}
VALID_KELVIN = {"kelvin": 4000}
VALID_PROFILE = {"profile": "relax"}
VALID_RGB_COLOR = {"rgb_color": (255, 63, 111)}
VALID_XY_COLOR = {"xy_color": (0.59, 0.274)}
async def test_reproducing_states(hass, caplog):
"""Test reproducing Light states."""
hass.states.async_set("light.entity_off", "off", {})
hass.states.async_set("light.entity_bright", "on", VALID_BRIGHTNESS)
hass.states.async_set("light.entity_white", "on", VALID_WHITE_VALUE)
hass.states.async_set("light.entity_flash", "on", VALID_FLASH)
hass.states.async_set("light.entity_effect", "on", VALID_EFFECT)
hass.states.async_set("light.entity_trans", "on", VALID_TRANSITION)
hass.states.async_set("light.entity_name", "on", VALID_COLOR_NAME)
hass.states.async_set("light.entity_temp", "on", VALID_COLOR_TEMP)
hass.states.async_set("light.entity_hs", "on", VALID_HS_COLOR)
hass.states.async_set("light.entity_kelvin", "on", VALID_KELVIN)
hass.states.async_set("light.entity_profile", "on", VALID_PROFILE)
hass.states.async_set("light.entity_rgb", "on", VALID_RGB_COLOR)
hass.states.async_set("light.entity_xy", "on", VALID_XY_COLOR)
turn_on_calls = async_mock_service(hass, "light", "turn_on")
turn_off_calls = async_mock_service(hass, "light", "turn_off")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[
State("light.entity_off", "off"),
State("light.entity_bright", "on", VALID_BRIGHTNESS),
State("light.entity_white", "on", VALID_WHITE_VALUE),
State("light.entity_flash", "on", VALID_FLASH),
State("light.entity_effect", "on", VALID_EFFECT),
State("light.entity_trans", "on", VALID_TRANSITION),
State("light.entity_name", "on", VALID_COLOR_NAME),
State("light.entity_temp", "on", VALID_COLOR_TEMP),
State("light.entity_hs", "on", VALID_HS_COLOR),
State("light.entity_kelvin", "on", VALID_KELVIN),
State("light.entity_profile", "on", VALID_PROFILE),
State("light.entity_rgb", "on", VALID_RGB_COLOR),
State("light.entity_xy", "on", VALID_XY_COLOR),
]
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("light.entity_off", "not_supported")]
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("light.entity_xy", "off"),
State("light.entity_off", "on", VALID_BRIGHTNESS),
State("light.entity_bright", "on", VALID_WHITE_VALUE),
State("light.entity_white", "on", VALID_FLASH),
State("light.entity_flash", "on", VALID_EFFECT),
State("light.entity_effect", "on", VALID_TRANSITION),
State("light.entity_trans", "on", VALID_COLOR_NAME),
State("light.entity_name", "on", VALID_COLOR_TEMP),
State("light.entity_temp", "on", VALID_HS_COLOR),
State("light.entity_hs", "on", VALID_KELVIN),
State("light.entity_kelvin", "on", VALID_PROFILE),
State("light.entity_profile", "on", VALID_RGB_COLOR),
State("light.entity_rgb", "on", VALID_XY_COLOR),
],
)
assert len(turn_on_calls) == 12
expected_calls = []
expected_off = VALID_BRIGHTNESS
expected_off["entity_id"] = "light.entity_off"
expected_calls.append(expected_off)
expected_bright = VALID_WHITE_VALUE
expected_bright["entity_id"] = "light.entity_bright"
expected_calls.append(expected_bright)
expected_white = VALID_FLASH
expected_white["entity_id"] = "light.entity_white"
expected_calls.append(expected_white)
expected_flash = VALID_EFFECT
expected_flash["entity_id"] = "light.entity_flash"
expected_calls.append(expected_flash)
expected_effect = VALID_TRANSITION
expected_effect["entity_id"] = "light.entity_effect"
expected_calls.append(expected_effect)
expected_trans = VALID_COLOR_NAME
expected_trans["entity_id"] = "light.entity_trans"
expected_calls.append(expected_trans)
expected_name = VALID_COLOR_TEMP
expected_name["entity_id"] = "light.entity_name"
expected_calls.append(expected_name)
expected_temp = VALID_HS_COLOR
expected_temp["entity_id"] = "light.entity_temp"
expected_calls.append(expected_temp)
expected_hs = VALID_KELVIN
expected_hs["entity_id"] = "light.entity_hs"
expected_calls.append(expected_hs)
expected_kelvin = VALID_PROFILE
expected_kelvin["entity_id"] = "light.entity_kelvin"
expected_calls.append(expected_kelvin)
expected_profile = VALID_RGB_COLOR
expected_profile["entity_id"] = "light.entity_profile"
expected_calls.append(expected_profile)
expected_rgb = VALID_XY_COLOR
expected_rgb["entity_id"] = "light.entity_rgb"
expected_calls.append(expected_rgb)
for call in turn_on_calls:
assert call.domain == "light"
found = False
for expected in expected_calls:
if call.data["entity_id"] == expected["entity_id"]:
# We found the matching entry
assert call.data == expected
found = True
break
# No entry found
assert found
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "light"
assert turn_off_calls[0].data == {"entity_id": "light.entity_xy"}
async def test_deprecation_warning(hass, caplog):
"""Test deprecation warning."""
hass.states.async_set("light.entity_off", "off", {})
turn_on_calls = async_mock_service(hass, "light", "turn_on")
await hass.helpers.state.async_reproduce_state(
[State("light.entity_off", "on", {"brightness_pct": 80})]
)
assert len(turn_on_calls) == 1
assert DEPRECATION_WARNING % ["brightness_pct"] in caplog.text
|
import numpy as np
import pandas as pd
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
class BM25Difference(CorpusBasedTermScorer):
'''
Designed for use only in the term_scorer argument.
This should really be inherit specific type.
term_scorer = (BM25Difference(corpus, k1 = 1.4, b=0.9)
.set_categories('Positive', ['Negative'], ['Plot']))
html = st.produce_frequency_explorer(
corpus,
category='Positive',
not_categories=['Negative'],
neutral_categories=['Plot'],
term_scorer=term_scorer,
metadata=rdf['movie_name'],
grey_threshold=0,
show_neutral=True
)
file_name = 'output/rotten_fresh_bm25.html'
open(file_name, 'wb').write(html.encode('utf-8'))
IFrame(src=file_name, width=1300, height=700)
'''
def _set_scorer_args(self, **kwargs):
self.k1 = kwargs.get('k1', 1.2)
self.b = kwargs.get('b', 0.95)
def get_scores(self, *args):
'''
In this case, args aren't used, since this information is taken
directly from the corpus categories.
Returns
-------
np.array, scores
'''
if self.tdf_ is None:
raise Exception("Use set_category_name('category name', ['not category name', ...]) " +
"to set the category of interest")
avgdl = self.tdf_.sum(axis=0).mean()
def idf(cat):
# Number of categories with term
n_q = (self.tdf_ > 0).astype(int).max(axis=1).sum()
N = len(self.tdf_)
return (N - n_q + 0.5) / (n_q + 0.5)
def length_adjusted_tf(cat):
tf = self.tdf_[cat]
dl = self.tdf_[cat].sum()
return ((tf * (self.k1 + 1))
/ (tf + self.k1 * (1 - self.b + self.b * (dl / avgdl))))
def bm25_score(cat):
return - length_adjusted_tf(cat) * np.log(idf(cat))
scores = bm25_score('cat') - bm25_score('ncat')
return scores
def get_name(self):
return 'BM25 difference'
|
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
_data_path_doc_accept)
has_brainstorm_data = partial(has_dataset, name='brainstorm.bst_auditory')
_description = u"""
URL: http://neuroimage.usc.edu/brainstorm/DatasetAuditory
- One subject, two acquisition runs of 6 minutes each
- Subject stimulated binaurally with intra-aural earphones
(air tubes+transducers)
- Each run contains:
- 200 regular beeps (440Hz)
- 40 easy deviant beeps (554.4Hz, 4 semitones higher)
- Random inter-stimulus interval: between 0.7s and 1.7s seconds, uniformly
distributed
- The subject presses a button when detecting a deviant with the right
index finger
- Auditory stimuli generated with the Matlab Psychophysics toolbox
"""
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
*, accept=False, verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='brainstorm',
download=download, archive_name='bst_auditory.tar.gz',
accept=accept)
_data_path_doc = _data_path_doc_accept.format(
name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH')
_data_path_doc = _data_path_doc.replace('brainstorm dataset',
'brainstorm (bst_auditory) dataset')
data_path.__doc__ = _data_path_doc
def get_version(): # noqa: D103
return _get_version('brainstorm.bst_auditory')
get_version.__doc__ = _version_doc.format(name='brainstorm')
def description():
"""Get description of brainstorm (bst_auditory) dataset."""
for desc in _description.splitlines():
print(desc)
|
import pytest
from PyQt5.QtCore import QUrl, QDateTime
from PyQt5.QtNetwork import QNetworkDiskCache, QNetworkCacheMetaData
from qutebrowser.browser.webkit import cache
@pytest.fixture
def disk_cache(tmpdir, config_stub):
return cache.DiskCache(str(tmpdir))
def preload_cache(cache, url='http://www.example.com/', content=b'foobar'):
metadata = QNetworkCacheMetaData()
metadata.setUrl(QUrl(url))
assert metadata.isValid()
device = cache.prepare(metadata)
assert device is not None
device.write(content)
cache.insert(device)
def test_cache_config_change_cache_size(config_stub, tmpdir):
"""Change cache size and emit signal to trigger on_config_changed."""
max_cache_size = 1024
config_stub.val.content.cache.size = max_cache_size
disk_cache = cache.DiskCache(str(tmpdir))
assert disk_cache.maximumCacheSize() == max_cache_size
config_stub.val.content.cache.size = max_cache_size * 2
assert disk_cache.maximumCacheSize() == max_cache_size * 2
def test_cache_size_leq_max_cache_size(config_stub, tmpdir):
"""Test cacheSize <= MaximumCacheSize when cache is activated."""
limit = 100
config_stub.val.content.cache.size = limit
disk_cache = cache.DiskCache(str(tmpdir))
assert disk_cache.maximumCacheSize() == limit
preload_cache(disk_cache, 'http://www.example.com/')
preload_cache(disk_cache, 'http://qutebrowser.org')
preload_cache(disk_cache, 'http://foo.xxx')
preload_cache(disk_cache, 'http://bar.net')
assert disk_cache.expire() < limit
# Add a threshold to the limit due to unforeseeable Qt internals
assert disk_cache.cacheSize() < limit + 100
def test_cache_existing_metadata_file(tmpdir, disk_cache):
"""Test querying existing meta data file from activated cache."""
url = 'http://qutebrowser.org'
content = b'foobar'
metadata = QNetworkCacheMetaData()
metadata.setUrl(QUrl(url))
assert metadata.isValid()
device = disk_cache.prepare(metadata)
assert device is not None
device.write(content)
disk_cache.insert(device)
disk_cache.updateMetaData(metadata)
files = list(tmpdir.visit(fil=lambda path: path.isfile()))
assert len(files) == 1
assert disk_cache.fileMetaData(str(files[0])) == metadata
def test_cache_nonexistent_metadata_file(disk_cache):
"""Test querying nonexistent meta data file from activated cache."""
cache_file = disk_cache.fileMetaData("nosuchfile")
assert not cache_file.isValid()
def test_cache_get_nonexistent_data(disk_cache):
"""Test querying some data that was never inserted."""
preload_cache(disk_cache, 'https://qutebrowser.org')
assert disk_cache.data(QUrl('http://qutebrowser.org')) is None
def test_cache_insert_data(disk_cache):
"""Test if entries inserted into the cache are actually there."""
url = 'http://qutebrowser.org'
content = b'foobar'
assert disk_cache.cacheSize() == 0
preload_cache(disk_cache, url, content)
assert disk_cache.cacheSize() != 0
assert disk_cache.data(QUrl(url)).readAll() == content
def test_cache_remove_data(disk_cache):
"""Test if a previously inserted entry can be removed from the cache."""
url = 'http://qutebrowser.org'
preload_cache(disk_cache, url)
assert disk_cache.cacheSize() > 0
assert disk_cache.remove(QUrl(url))
assert disk_cache.cacheSize() == 0
def test_cache_clear_activated(disk_cache):
"""Test if cache is empty after clearing it."""
assert disk_cache.cacheSize() == 0
preload_cache(disk_cache)
assert disk_cache.cacheSize() != 0
disk_cache.clear()
assert disk_cache.cacheSize() == 0
def test_cache_metadata(disk_cache):
"""Ensure that DiskCache.metaData() returns exactly what was inserted."""
url = 'http://qutebrowser.org'
metadata = QNetworkCacheMetaData()
metadata.setUrl(QUrl(url))
assert metadata.isValid()
device = disk_cache.prepare(metadata)
device.write(b'foobar')
disk_cache.insert(device)
assert disk_cache.metaData(QUrl(url)) == metadata
def test_cache_update_metadata(disk_cache):
"""Test updating the meta data for an existing cache entry."""
url = 'http://qutebrowser.org'
preload_cache(disk_cache, url, b'foo')
assert disk_cache.cacheSize() > 0
metadata = QNetworkCacheMetaData()
metadata.setUrl(QUrl(url))
assert metadata.isValid()
disk_cache.updateMetaData(metadata)
assert disk_cache.metaData(QUrl(url)) == metadata
def test_cache_full(tmpdir):
"""Do a sanity test involving everything."""
disk_cache = QNetworkDiskCache()
disk_cache.setCacheDirectory(str(tmpdir))
url = 'http://qutebrowser.org'
content = b'cutebowser'
preload_cache(disk_cache, url, content)
url2 = 'https://qutebrowser.org'
content2 = b'ohmycert'
preload_cache(disk_cache, url2, content2)
metadata = QNetworkCacheMetaData()
metadata.setUrl(QUrl(url))
soon = QDateTime.currentDateTime().addMonths(4)
assert soon.isValid()
metadata.setLastModified(soon)
assert metadata.isValid()
disk_cache.updateMetaData(metadata)
disk_cache.remove(QUrl(url2))
assert disk_cache.metaData(QUrl(url)).lastModified() == soon
assert disk_cache.data(QUrl(url)).readAll() == content
|
from unittest import TestCase
import numpy as np
import pandas as pd
from scattertext.ScatterChartData import ScatterChartData
from scattertext.TermCategoryFrequencies import TermCategoryFrequencies
class TestTermCategoryFrequencies(TestCase):
def setUp(self):
df = pd.DataFrame(
{'democrat': {'ago': 82, 'builds on': 1, 'filled': 3, 've got': 15, 'of natural': 2, 'and forged': 1,
'have built': 2, 's army': 4, 's protected': 1, 'the most': 28, 'gas alone': 1, 'you what': 9,
'few years': 8, 'gut education': 1, 's left': 2, 'for most': 1, 'raise': 18, 'problem can': 1,
'we the': 5, 'change will': 2},
'republican': {'ago': 39, 'builds on': 0, 'filled': 5, 've got': 16, 'of natural': 0, 'and forged': 0,
'have built': 1, 's army': 0, 's protected': 0, 'the most': 23, 'gas alone': 0,
'you what': 8, 'few years': 13, 'gut education': 0, 's left': 1, 'for most': 2, 'raise': 11,
'problem can': 0, 'we the': 5, 'change will': 0}}
)
self.term_cat_freq = TermCategoryFrequencies(df)
def test_get_num_terms(self):
self.assertEqual(self.term_cat_freq.get_num_terms(), 20)
def test_get_categories(self):
self.assertEqual(self.term_cat_freq.get_categories(), ['democrat', 'republican'])
def test_get_scaled_f_scores_vs_background(self):
df = self.term_cat_freq.get_scaled_f_scores_vs_background()
self.assertGreater(len(df), 20)
self.assertEqual(sum(df.corpus > 0), 3)
self.assertEqual(set(df.columns), {'corpus', 'background', 'Scaled f-score'})
def test_get_term_and_background_counts(self):
df = self.term_cat_freq.get_term_and_background_counts()
self.assertGreater(len(df), 20)
self.assertEqual(sum(df.corpus > 0), 3)
self.assertEqual(set(df.columns), {'corpus', 'background'})
def test_get_term_category_frequencies(self):
df = self.term_cat_freq.get_term_category_frequencies(ScatterChartData())
self.assertEqual(len(df), self.term_cat_freq.get_num_terms())
self.assertEqual(set(df.columns), {'democrat freq', 'republican freq'})
self.assertEqual(df.index.name, 'term')
def test_docs(self):
df = pd.DataFrame(
{'democrat': {'ago': 82, 'builds on': 1, 'filled': 3, 've got': 15, 'of natural': 2, 'and forged': 1,
'have built': 2, 's army': 4, 's protected': 1, 'the most': 28, 'gas alone': 1, 'you what': 9,
'few years': 8, 'gut education': 1, 's left': 2, 'for most': 1, 'raise': 18, 'problem can': 1,
'we the': 5, 'change will': 2},
'republican': {'ago': 39, 'builds on': 0, 'filled': 5, 've got': 16, 'of natural': 0, 'and forged': 0,
'have built': 1, 's army': 0, 's protected': 0, 'the most': 23, 'gas alone': 0,
'you what': 8, 'few years': 13, 'gut education': 0, 's left': 1, 'for most': 2, 'raise': 11,
'problem can': 0, 'we the': 5, 'change will': 0}}
)
doc_df = pd.DataFrame({'text': ['Blah blah gut education ve got filled ago',
'builds on most natural gas alone you what blah',
"change will 's army the most"],
'category': ['republican', 'republican', 'democrat']})
with self.assertRaises(AssertionError):
TermCategoryFrequencies(df, doc_df.rename(columns={'text': 'te'}))
with self.assertRaises(AssertionError):
TermCategoryFrequencies(df, doc_df.rename(columns={'category': 'te'}))
term_cat_freq = TermCategoryFrequencies(df, doc_df)
np.testing.assert_array_equal(term_cat_freq.get_doc_indices(),
[term_cat_freq.get_categories().index('republican'),
term_cat_freq.get_categories().index('republican'),
term_cat_freq.get_categories().index('democrat')])
np.testing.assert_array_equal(term_cat_freq.get_texts(),
['Blah blah gut education ve got filled ago',
'builds on most natural gas alone you what blah',
"change will 's army the most"])
def test_no_docs(self):
np.testing.assert_array_equal(self.term_cat_freq.get_doc_indices(),
[])
np.testing.assert_array_equal(self.term_cat_freq.get_texts(),
[])
|
import errno
import logging
import os
from homeassistant.components.camera import Camera
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from . import CONF_SMARTCAM, HUB as hub
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Verisure Camera."""
if not int(hub.config.get(CONF_SMARTCAM, 1)):
return False
directory_path = hass.config.config_dir
if not os.access(directory_path, os.R_OK):
_LOGGER.error("file path %s is not readable", directory_path)
return False
hub.update_overview()
smartcams = []
smartcams.extend(
[
VerisureSmartcam(hass, device_label, directory_path)
for device_label in hub.get("$.customerImageCameras[*].deviceLabel")
]
)
add_entities(smartcams)
class VerisureSmartcam(Camera):
"""Representation of a Verisure camera."""
def __init__(self, hass, device_label, directory_path):
"""Initialize Verisure File Camera component."""
super().__init__()
self._device_label = device_label
self._directory_path = directory_path
self._image = None
self._image_id = None
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, self.delete_image)
def camera_image(self):
"""Return image response."""
self.check_imagelist()
if not self._image:
_LOGGER.debug("No image to display")
return
_LOGGER.debug("Trying to open %s", self._image)
with open(self._image, "rb") as file:
return file.read()
def check_imagelist(self):
"""Check the contents of the image list."""
hub.update_smartcam_imageseries()
image_ids = hub.get_image_info(
"$.imageSeries[?(@.deviceLabel=='%s')].image[0].imageId", self._device_label
)
if not image_ids:
return
new_image_id = image_ids[0]
if new_image_id in ("-1", self._image_id):
_LOGGER.debug("The image is the same, or loading image_id")
return
_LOGGER.debug("Download new image %s", new_image_id)
new_image_path = os.path.join(
self._directory_path, "{}{}".format(new_image_id, ".jpg")
)
hub.session.download_image(self._device_label, new_image_id, new_image_path)
_LOGGER.debug("Old image_id=%s", self._image_id)
self.delete_image(self)
self._image_id = new_image_id
self._image = new_image_path
def delete_image(self, event):
"""Delete an old image."""
remove_image = os.path.join(
self._directory_path, "{}{}".format(self._image_id, ".jpg")
)
try:
os.remove(remove_image)
_LOGGER.debug("Deleting old image %s", remove_image)
except OSError as error:
if error.errno != errno.ENOENT:
raise
@property
def name(self):
"""Return the name of this camera."""
return hub.get_first(
"$.customerImageCameras[?(@.deviceLabel=='%s')].area", self._device_label
)
|
from homeassistant.components.lock import LockEntity
from . import DOMAIN
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the KEBA charging station platform."""
if discovery_info is None:
return
keba = hass.data[DOMAIN]
sensors = [KebaLock(keba, "Authentication", "authentication")]
async_add_entities(sensors)
class KebaLock(LockEntity):
"""The entity class for KEBA charging stations switch."""
def __init__(self, keba, name, entity_type):
"""Initialize the KEBA switch."""
self._keba = keba
self._name = name
self._entity_type = entity_type
self._state = True
@property
def should_poll(self):
"""Deactivate polling. Data updated by KebaHandler."""
return False
@property
def unique_id(self):
"""Return the unique ID of the lock."""
return f"{self._keba.device_id}_{self._entity_type}"
@property
def name(self):
"""Return the name of the device."""
return f"{self._keba.device_name} {self._name}"
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state
async def async_lock(self, **kwargs):
"""Lock wallbox."""
await self._keba.async_stop()
async def async_unlock(self, **kwargs):
"""Unlock wallbox."""
await self._keba.async_start()
async def async_update(self):
"""Attempt to retrieve on off state from the switch."""
self._state = self._keba.get_value("Authreq") == 1
def update_callback(self):
"""Schedule a state update."""
self.async_schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add update callback after being added to hass."""
self._keba.add_update_listener(self.update_callback)
|
from __future__ import print_function
import os
import subprocess
import sys
ROS_CACHE_TIMEOUT_ENV_NAME = 'ROS_CACHE_TIMEOUT'
ROS_LOCATIONS_ENV_NAME = 'ROS_LOCATIONS'
ROS_LOCATION_SEP = ';'
ERROR_PREFIX = 'Error: '
def ros_location_find(package_name):
# process ROS_LOCATION and return if found
ros_location = os.environ.get(ROS_LOCATIONS_ENV_NAME)
if ros_location is not None:
locations = ros_location.split(ROS_LOCATION_SEP)
for loc in locations:
index = loc.find('=')
if index != -1:
if package_name == loc[:index]:
return 0, loc[index + 1:]
if package_name == 'log':
p = subprocess.Popen('roslaunch-logs', stdout=subprocess.PIPE)
result_location = p.communicate()[0].decode().strip()
result_code = p.returncode
return result_code, result_location if result_code == 0 else ''
if package_name == 'test_results':
p = subprocess.Popen('rosrun.bat rosunit test_results_dir.py', stdout=subprocess.PIPE)
result_location = p.communicate()[0].decode().strip()
result_code = p.returncode
return result_code, result_location if result_code == 0 else ''
# process package_name and return
env = os.environ
env[ROS_CACHE_TIMEOUT_ENV_NAME] = '-1.0'
p = subprocess.Popen(['rospack', 'find', package_name], stdout=subprocess.PIPE)
result_location = p.communicate()[0].decode().strip()
result_code = p.returncode
if result_code == 0:
return result_code, result_location
p = subprocess.Popen(['rosstack', 'find', package_name], stdout=subprocess.PIPE)
result_location = p.communicate()[0].decode().strip()
result_code = p.returncode
if result_code == 0:
return result_code, result_location
# package <package_name> not found
return result_code, ''
# takes as argument either just a package-path or just a pkgname
# returns 0 for no argument or if package (+ path) exist, 1 else
# on success with arguments print result_path or Error: error message
def findpathmain(argv):
reldir = ''
parameters = os.path.normpath(argv[0]).split(os.path.sep)
package_name = parameters[0]
if len(parameters) > 1:
reldir = os.path.sep.join(parameters[1:])
else:
if len(argv) < 2 or argv[1] != 'forceeval':
print(ERROR_PREFIX + '[' + package_name + '] is not a valid argument!', file=sys.stderr)
return 1
error_code, package_dir = ros_location_find(package_name)
if error_code != 0:
print(ERROR_PREFIX + '[' + package_name + '] not found!', file=sys.stderr)
return error_code
else:
rosdir = os.path.normpath(os.path.sep.join([package_dir, reldir]))
print(rosdir)
return 0
if __name__ == '__main__':
if len(sys.argv) < 2:
sys.exit(1)
sys.exit(findpathmain(sys.argv[1:]))
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from mock import call
from diamond.collector import Collector
from rabbitmq import RabbitMQCollector
##########################################################################
class TestRabbitMQCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('RabbitMQCollector', {
'host': 'localhost:55672',
'user': 'guest',
'password': 'password',
'queues_ignored': '^ignored',
'cluster': True,
})
self.collector = RabbitMQCollector(config, None)
def test_import(self):
self.assertTrue(RabbitMQCollector)
@patch('rabbitmq.RabbitMQClient')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys(self, publish_mock, client_mock):
client = Mock()
queue_data = [{
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
'name': 'test_queue'
}, {
'name': 'ignored',
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
}]
overview_data = {
'node': 'rabbit@localhost',
'more_keys': {'nested_key': 3},
'key': 4,
'string': 'string',
}
node_health = {
'fd_used': 1,
'fd_total': 2,
'mem_used': 2,
'mem_limit': 4,
'sockets_used': 1,
'sockets_total': 2,
'disk_free_limit': 1,
'disk_free': 1,
'proc_used': 1,
'proc_total': 1,
'partitions': [],
}
client_mock.return_value = client
client.get_queues.return_value = queue_data
client.get_overview.return_value = overview_data
client.get_nodes.return_value = [1, 2, 3]
client.get_node.return_value = node_health
client.get_vhost_names.return_value = ['/']
self.collector.collect()
client.get_queues.assert_called_once_with('/')
client.get_queue.assert_not_called()
client.get_nodes.assert_called_once_with()
client.get_node.assert_called_once_with('rabbit@localhost')
client.get_vhost_names.assert_called_once_with()
metrics = {
'queues.test_queue.more_keys.nested_key': 1,
'queues.test_queue.key': 2,
'more_keys.nested_key': 3,
'key': 4,
'health.fd_used': 1,
'health.fd_total': 2,
'health.mem_used': 2,
'health.mem_limit': 4,
'health.sockets_used': 1,
'health.sockets_total': 2,
'health.disk_free_limit': 1,
'health.disk_free': 1,
'health.proc_used': 1,
'health.proc_total': 1,
'cluster.partitions': 0,
'cluster.nodes': 3
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch('rabbitmq.RabbitMQClient')
@patch.object(Collector, 'publish')
def test_opt_should_replace_dots(self, publish_mock, client_mock):
self.collector.config['replace_dot'] = '_'
client = Mock()
queue_data = [{
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
'name': 'test.queue'
}, {
'name': 'ignored',
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
}]
overview_data = {
'node': 'rabbit@localhost',
'more_keys': {'nested_key': 3},
'key': 4,
'string': 'string',
}
node_health = {
'fd_used': 1,
'fd_total': 2,
'mem_used': 2,
'mem_limit': 4,
'sockets_used': 1,
'sockets_total': 2,
'disk_free_limit': 1,
'disk_free': 1,
'proc_used': 1,
'proc_total': 1,
'partitions': [],
}
client_mock.return_value = client
client.get_queues.return_value = queue_data
client.get_overview.return_value = overview_data
client.get_nodes.return_value = [1, 2, 3]
client.get_node.return_value = node_health
client.get_vhost_names.return_value = ['/']
self.collector.collect()
metrics = {
'queues.test_queue.more_keys.nested_key': 1,
'queues.test_queue.key': 2,
'more_keys.nested_key': 3,
'key': 4,
'health.fd_used': 1,
'health.fd_total': 2,
'health.mem_used': 2,
'health.mem_limit': 4,
'health.sockets_used': 1,
'health.sockets_total': 2,
'health.disk_free_limit': 1,
'health.disk_free': 1,
'health.proc_used': 1,
'health.proc_total': 1,
'cluster.partitions': 0,
'cluster.nodes': 3
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
self.collector.config['replace_dot'] = False
@patch('rabbitmq.RabbitMQClient')
@patch.object(Collector, 'publish')
def test_opt_should_replace_slashes(self, publish_mock, client_mock):
self.collector.config['replace_slash'] = '_'
client = Mock()
queue_data = [{
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
'name': 'test/queue'
}, {
'name': 'ignored',
'more_keys': {'nested_key': 1},
'key': 2,
'string': 'str',
}]
overview_data = {
'node': 'rabbit@localhost',
'more_keys': {'nested_key': 3},
'key': 4,
'string': 'string',
}
node_health = {
'fd_used': 1,
'fd_total': 2,
'mem_used': 2,
'mem_limit': 4,
'sockets_used': 1,
'sockets_total': 2,
'disk_free_limit': 1,
'disk_free': 1,
'proc_used': 1,
'proc_total': 1,
'partitions': [],
}
client_mock.return_value = client
client.get_queues.return_value = queue_data
client.get_overview.return_value = overview_data
client.get_nodes.return_value = [1, 2, 3]
client.get_node.return_value = node_health
client.get_vhost_names.return_value = ['/']
self.collector.collect()
metrics = {
'queues.test_queue.more_keys.nested_key': 1,
'queues.test_queue.key': 2,
'more_keys.nested_key': 3,
'key': 4,
'health.fd_used': 1,
'health.fd_total': 2,
'health.mem_used': 2,
'health.mem_limit': 4,
'health.sockets_used': 1,
'health.sockets_total': 2,
'health.disk_free_limit': 1,
'health.disk_free': 1,
'health.proc_used': 1,
'health.proc_total': 1,
'cluster.partitions': 0,
'cluster.nodes': 3
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
self.collector.config['replace_slash'] = False
@patch('rabbitmq.RabbitMQClient')
@patch.object(Collector, 'publish')
def test_opt_individual_queues(self, publish_mock, client_mock):
self.collector.config['query_individual_queues'] = True
self.collector.config['queues'] = 'queue1 queue2 queue3 queue4'
client = Mock()
queue_data = {
'vhost1': {
'queue1': {
'name': 'queue1',
'key': 1,
'string': 'str',
},
'queue2': {
'name': 'queue2',
'key': 2,
'string': 'str',
},
'ignored': {
'name': 'ignored',
'key': 3,
'string': 'str',
},
},
'vhost2': {
'queue3': {
'name': 'queue3',
'key': 4,
'string': 'str',
},
'queue4': {
'name': 'queue4',
'key': 5,
'string': 'str',
},
'ignored': {
'name': 'ignored',
'key': 6,
'string': 'str',
}
}
}
overview_data = {
'node': 'rabbit@localhost',
}
node_health = {
'fd_used': 1,
'fd_total': 2,
'mem_used': 2,
'mem_limit': 4,
'sockets_used': 1,
'sockets_total': 2,
'disk_free_limit': 1,
'disk_free': 1,
'proc_used': 1,
'proc_total': 1,
'partitions': [],
}
client_mock.return_value = client
client.get_queue.side_effect = lambda v, q: queue_data.get(v).get(q)
client.get_overview.return_value = overview_data
client.get_nodes.return_value = [1, 2, 3]
client.get_node.return_value = node_health
client.get_vhost_names.return_value = ['vhost1', 'vhost2']
self.collector.collect()
client.get_queues.assert_not_called()
client.get_nodes.assert_called_once_with()
client.get_node.assert_called_once_with('rabbit@localhost')
client.get_vhost_names.assert_called_once_with()
client.get_queue.assert_has_calls([
call('vhost1', 'queue1'),
call('vhost1', 'queue2'),
call('vhost2', 'queue3'),
call('vhost2', 'queue4'),
], any_order=True)
metrics = {
'queues.queue1.key': 1,
'queues.queue2.key': 2,
'queues.queue3.key': 4,
'queues.queue4.key': 5,
'health.fd_used': 1,
'health.fd_total': 2,
'health.mem_used': 2,
'health.mem_limit': 4,
'health.sockets_used': 1,
'health.sockets_total': 2,
'health.disk_free_limit': 1,
'health.disk_free': 1,
'health.proc_used': 1,
'health.proc_total': 1,
'cluster.partitions': 0,
'cluster.nodes': 3
}
self.assertPublishedMany(publish_mock, metrics)
self.collector.config['query_individual_queues'] = False
self.collector.config['queues'] = ''
@patch('rabbitmq.RabbitMQClient')
@patch.object(Collector, 'publish')
def test_opt_vhost_individual_queues(self, publish_mock, client_mock):
self.collector.config['query_individual_queues'] = True
self.collector.config['vhosts'] = {
'vhost1': 'queue1 queue2',
'vhost2': 'queue3 queue4'
}
client = Mock()
queue_data = {
'vhost1': {
'queue1': {
'name': 'queue1',
'key': 1,
'string': 'str',
},
'queue2': {
'name': 'queue2',
'key': 2,
'string': 'str',
},
'ignored': {
'name': 'ignored',
'key': 3,
'string': 'str',
},
},
'vhost2': {
'queue3': {
'name': 'queue3',
'key': 4,
'string': 'str',
},
'queue4': {
'name': 'queue4',
'key': 5,
'string': 'str',
},
'ignored': {
'name': 'ignored',
'key': 6,
'string': 'str',
}
}
}
overview_data = {
'node': 'rabbit@localhost',
}
node_health = {
'fd_used': 1,
'fd_total': 2,
'mem_used': 2,
'mem_limit': 4,
'sockets_used': 1,
'sockets_total': 2,
'disk_free_limit': 1,
'disk_free': 1,
'proc_used': 1,
'proc_total': 1,
'partitions': [],
}
client_mock.return_value = client
client.get_queue.side_effect = lambda v, q: queue_data.get(v).get(q)
client.get_overview.return_value = overview_data
client.get_nodes.return_value = [1, 2, 3]
client.get_node.return_value = node_health
client.get_vhost_names.return_value = ['vhost1', 'vhost2']
self.collector.collect()
client.get_queues.assert_not_called()
client.get_nodes.assert_called_once_with()
client.get_node.assert_called_once_with('rabbit@localhost')
client.get_vhost_names.assert_called_once_with()
client.get_queue.assert_has_calls([
call('vhost1', 'queue1'),
call('vhost1', 'queue2'),
call('vhost2', 'queue3'),
call('vhost2', 'queue4'),
], any_order=True)
metrics = {
'vhosts.vhost1.queues.queue1.key': 1,
'vhosts.vhost1.queues.queue2.key': 2,
'vhosts.vhost2.queues.queue3.key': 4,
'vhosts.vhost2.queues.queue4.key': 5,
'health.fd_used': 1,
'health.fd_total': 2,
'health.mem_used': 2,
'health.mem_limit': 4,
'health.sockets_used': 1,
'health.sockets_total': 2,
'health.disk_free_limit': 1,
'health.disk_free': 1,
'health.proc_used': 1,
'health.proc_total': 1,
'cluster.partitions': 0,
'cluster.nodes': 3
}
self.assertPublishedMany(publish_mock, metrics)
self.collector.config['query_individual_queues'] = False
del self.collector.config['vhosts']
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from homeassistant.helpers import debounce
from tests.async_mock import AsyncMock
async def test_immediate_works(hass):
"""Test immediate works."""
calls = []
debouncer = debounce.Debouncer(
hass,
None,
cooldown=0.01,
immediate=True,
function=AsyncMock(side_effect=lambda: calls.append(None)),
)
# Call when nothing happening
await debouncer.async_call()
assert len(calls) == 1
assert debouncer._timer_task is not None
assert debouncer._execute_at_end_of_timer is False
assert debouncer._job.target == debouncer.function
# Call when cooldown active setting execute at end to True
await debouncer.async_call()
assert len(calls) == 1
assert debouncer._timer_task is not None
assert debouncer._execute_at_end_of_timer is True
assert debouncer._job.target == debouncer.function
# Canceling debounce in cooldown
debouncer.async_cancel()
assert debouncer._timer_task is None
assert debouncer._execute_at_end_of_timer is False
assert debouncer._job.target == debouncer.function
before_job = debouncer._job
# Call and let timer run out
await debouncer.async_call()
assert len(calls) == 2
await debouncer._handle_timer_finish()
assert len(calls) == 2
assert debouncer._timer_task is None
assert debouncer._execute_at_end_of_timer is False
assert debouncer._job.target == debouncer.function
assert debouncer._job == before_job
# Test calling doesn't execute/cooldown if currently executing.
await debouncer._execute_lock.acquire()
await debouncer.async_call()
assert len(calls) == 2
assert debouncer._timer_task is None
assert debouncer._execute_at_end_of_timer is False
debouncer._execute_lock.release()
assert debouncer._job.target == debouncer.function
async def test_not_immediate_works(hass):
"""Test immediate works."""
calls = []
debouncer = debounce.Debouncer(
hass,
None,
cooldown=0.01,
immediate=False,
function=AsyncMock(side_effect=lambda: calls.append(None)),
)
# Call when nothing happening
await debouncer.async_call()
assert len(calls) == 0
assert debouncer._timer_task is not None
assert debouncer._execute_at_end_of_timer is True
# Call while still on cooldown
await debouncer.async_call()
assert len(calls) == 0
assert debouncer._timer_task is not None
assert debouncer._execute_at_end_of_timer is True
# Canceling while on cooldown
debouncer.async_cancel()
assert debouncer._timer_task is None
assert debouncer._execute_at_end_of_timer is False
# Call and let timer run out
await debouncer.async_call()
assert len(calls) == 0
await debouncer._handle_timer_finish()
assert len(calls) == 1
assert debouncer._timer_task is not None
assert debouncer._execute_at_end_of_timer is False
assert debouncer._job.target == debouncer.function
# Reset debouncer
debouncer.async_cancel()
# Test calling doesn't schedule if currently executing.
await debouncer._execute_lock.acquire()
await debouncer.async_call()
assert len(calls) == 1
assert debouncer._timer_task is None
assert debouncer._execute_at_end_of_timer is False
debouncer._execute_lock.release()
assert debouncer._job.target == debouncer.function
async def test_immediate_works_with_function_swapped(hass):
"""Test immediate works and we can change out the function."""
calls = []
one_function = AsyncMock(side_effect=lambda: calls.append(1))
two_function = AsyncMock(side_effect=lambda: calls.append(2))
debouncer = debounce.Debouncer(
hass,
None,
cooldown=0.01,
immediate=True,
function=one_function,
)
# Call when nothing happening
await debouncer.async_call()
assert len(calls) == 1
assert debouncer._timer_task is not None
assert debouncer._execute_at_end_of_timer is False
assert debouncer._job.target == debouncer.function
# Call when cooldown active setting execute at end to True
await debouncer.async_call()
assert len(calls) == 1
assert debouncer._timer_task is not None
assert debouncer._execute_at_end_of_timer is True
assert debouncer._job.target == debouncer.function
# Canceling debounce in cooldown
debouncer.async_cancel()
assert debouncer._timer_task is None
assert debouncer._execute_at_end_of_timer is False
assert debouncer._job.target == debouncer.function
before_job = debouncer._job
debouncer.function = two_function
# Call and let timer run out
await debouncer.async_call()
assert len(calls) == 2
assert calls == [1, 2]
await debouncer._handle_timer_finish()
assert len(calls) == 2
assert calls == [1, 2]
assert debouncer._timer_task is None
assert debouncer._execute_at_end_of_timer is False
assert debouncer._job.target == debouncer.function
assert debouncer._job != before_job
# Test calling doesn't execute/cooldown if currently executing.
await debouncer._execute_lock.acquire()
await debouncer.async_call()
assert len(calls) == 2
assert calls == [1, 2]
assert debouncer._timer_task is None
assert debouncer._execute_at_end_of_timer is False
debouncer._execute_lock.release()
assert debouncer._job.target == debouncer.function
|
import asyncio
import importlib
import logging
from typing import Iterable
from aiohttp.client_exceptions import ClientConnectionError, ClientResponseError
from pysmartapp.event import EVENT_TYPE_DEVICE
from pysmartthings import Attribute, Capability, SmartThings
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
HTTP_FORBIDDEN,
HTTP_UNAUTHORIZED,
)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.dispatcher import (
async_dispatcher_connect,
async_dispatcher_send,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .config_flow import SmartThingsFlowHandler # noqa: F401
from .const import (
CONF_APP_ID,
CONF_INSTALLED_APP_ID,
CONF_LOCATION_ID,
CONF_REFRESH_TOKEN,
DATA_BROKERS,
DATA_MANAGER,
DOMAIN,
EVENT_BUTTON,
SIGNAL_SMARTTHINGS_UPDATE,
SUPPORTED_PLATFORMS,
TOKEN_REFRESH_INTERVAL,
)
from .smartapp import (
format_unique_id,
setup_smartapp,
setup_smartapp_endpoint,
smartapp_sync_subscriptions,
unload_smartapp_endpoint,
validate_installed_app,
validate_webhook_requirements,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Initialize the SmartThings platform."""
await setup_smartapp_endpoint(hass)
return True
async def async_migrate_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Handle migration of a previous version config entry.
A config entry created under a previous version must go through the
integration setup again so we can properly retrieve the needed data
elements. Force this by removing the entry and triggering a new flow.
"""
# Remove the entry which will invoke the callback to delete the app.
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
# only create new flow if there isn't a pending one for SmartThings.
flows = hass.config_entries.flow.async_progress()
if not [flow for flow in flows if flow["handler"] == DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(DOMAIN, context={"source": "import"})
)
# Return False because it could not be migrated.
return False
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Initialize config entry which represents an installed SmartApp."""
# For backwards compat
if entry.unique_id is None:
hass.config_entries.async_update_entry(
entry,
unique_id=format_unique_id(
entry.data[CONF_APP_ID], entry.data[CONF_LOCATION_ID]
),
)
if not validate_webhook_requirements(hass):
_LOGGER.warning(
"The 'base_url' of the 'http' integration must be configured and start with 'https://'"
)
return False
api = SmartThings(async_get_clientsession(hass), entry.data[CONF_ACCESS_TOKEN])
remove_entry = False
try:
# See if the app is already setup. This occurs when there are
# installs in multiple SmartThings locations (valid use-case)
manager = hass.data[DOMAIN][DATA_MANAGER]
smart_app = manager.smartapps.get(entry.data[CONF_APP_ID])
if not smart_app:
# Validate and setup the app.
app = await api.app(entry.data[CONF_APP_ID])
smart_app = setup_smartapp(hass, app)
# Validate and retrieve the installed app.
installed_app = await validate_installed_app(
api, entry.data[CONF_INSTALLED_APP_ID]
)
# Get scenes
scenes = await async_get_entry_scenes(entry, api)
# Get SmartApp token to sync subscriptions
token = await api.generate_tokens(
entry.data[CONF_CLIENT_ID],
entry.data[CONF_CLIENT_SECRET],
entry.data[CONF_REFRESH_TOKEN],
)
hass.config_entries.async_update_entry(
entry, data={**entry.data, CONF_REFRESH_TOKEN: token.refresh_token}
)
# Get devices and their current status
devices = await api.devices(location_ids=[installed_app.location_id])
async def retrieve_device_status(device):
try:
await device.status.refresh()
except ClientResponseError:
_LOGGER.debug(
"Unable to update status for device: %s (%s), the device will be excluded",
device.label,
device.device_id,
exc_info=True,
)
devices.remove(device)
await asyncio.gather(*(retrieve_device_status(d) for d in devices.copy()))
# Sync device subscriptions
await smartapp_sync_subscriptions(
hass,
token.access_token,
installed_app.location_id,
installed_app.installed_app_id,
devices,
)
# Setup device broker
broker = DeviceBroker(hass, entry, token, smart_app, devices, scenes)
broker.connect()
hass.data[DOMAIN][DATA_BROKERS][entry.entry_id] = broker
except ClientResponseError as ex:
if ex.status in (HTTP_UNAUTHORIZED, HTTP_FORBIDDEN):
_LOGGER.exception(
"Unable to setup configuration entry '%s' - please reconfigure the integration",
entry.title,
)
remove_entry = True
else:
_LOGGER.debug(ex, exc_info=True)
raise ConfigEntryNotReady from ex
except (ClientConnectionError, RuntimeWarning) as ex:
_LOGGER.debug(ex, exc_info=True)
raise ConfigEntryNotReady from ex
if remove_entry:
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
# only create new flow if there isn't a pending one for SmartThings.
flows = hass.config_entries.flow.async_progress()
if not [flow for flow in flows if flow["handler"] == DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}
)
)
return False
for component in SUPPORTED_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_get_entry_scenes(entry: ConfigEntry, api):
"""Get the scenes within an integration."""
try:
return await api.scenes(location_id=entry.data[CONF_LOCATION_ID])
except ClientResponseError as ex:
if ex.status == HTTP_FORBIDDEN:
_LOGGER.exception(
"Unable to load scenes for configuration entry '%s' because the access token does not have the required access",
entry.title,
)
else:
raise
return []
async def async_unload_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Unload a config entry."""
broker = hass.data[DOMAIN][DATA_BROKERS].pop(entry.entry_id, None)
if broker:
broker.disconnect()
tasks = [
hass.config_entries.async_forward_entry_unload(entry, component)
for component in SUPPORTED_PLATFORMS
]
return all(await asyncio.gather(*tasks))
async def async_remove_entry(hass: HomeAssistantType, entry: ConfigEntry) -> None:
"""Perform clean-up when entry is being removed."""
api = SmartThings(async_get_clientsession(hass), entry.data[CONF_ACCESS_TOKEN])
# Remove the installed_app, which if already removed raises a HTTP_FORBIDDEN error.
installed_app_id = entry.data[CONF_INSTALLED_APP_ID]
try:
await api.delete_installed_app(installed_app_id)
except ClientResponseError as ex:
if ex.status == HTTP_FORBIDDEN:
_LOGGER.debug(
"Installed app %s has already been removed",
installed_app_id,
exc_info=True,
)
else:
raise
_LOGGER.debug("Removed installed app %s", installed_app_id)
# Remove the app if not referenced by other entries, which if already
# removed raises a HTTP_FORBIDDEN error.
all_entries = hass.config_entries.async_entries(DOMAIN)
app_id = entry.data[CONF_APP_ID]
app_count = sum(1 for entry in all_entries if entry.data[CONF_APP_ID] == app_id)
if app_count > 1:
_LOGGER.debug(
"App %s was not removed because it is in use by other configuration entries",
app_id,
)
return
# Remove the app
try:
await api.delete_app(app_id)
except ClientResponseError as ex:
if ex.status == HTTP_FORBIDDEN:
_LOGGER.debug("App %s has already been removed", app_id, exc_info=True)
else:
raise
_LOGGER.debug("Removed app %s", app_id)
if len(all_entries) == 1:
await unload_smartapp_endpoint(hass)
class DeviceBroker:
"""Manages an individual SmartThings config entry."""
def __init__(
self,
hass: HomeAssistantType,
entry: ConfigEntry,
token,
smart_app,
devices: Iterable,
scenes: Iterable,
):
"""Create a new instance of the DeviceBroker."""
self._hass = hass
self._entry = entry
self._installed_app_id = entry.data[CONF_INSTALLED_APP_ID]
self._smart_app = smart_app
self._token = token
self._event_disconnect = None
self._regenerate_token_remove = None
self._assignments = self._assign_capabilities(devices)
self.devices = {device.device_id: device for device in devices}
self.scenes = {scene.scene_id: scene for scene in scenes}
def _assign_capabilities(self, devices: Iterable):
"""Assign platforms to capabilities."""
assignments = {}
for device in devices:
capabilities = device.capabilities.copy()
slots = {}
for platform_name in SUPPORTED_PLATFORMS:
platform = importlib.import_module(f".{platform_name}", self.__module__)
if not hasattr(platform, "get_capabilities"):
continue
assigned = platform.get_capabilities(capabilities)
if not assigned:
continue
# Draw-down capabilities and set slot assignment
for capability in assigned:
if capability not in capabilities:
continue
capabilities.remove(capability)
slots[capability] = platform_name
assignments[device.device_id] = slots
return assignments
def connect(self):
"""Connect handlers/listeners for device/lifecycle events."""
# Setup interval to regenerate the refresh token on a periodic basis.
# Tokens expire in 30 days and once expired, cannot be recovered.
async def regenerate_refresh_token(now):
"""Generate a new refresh token and update the config entry."""
await self._token.refresh(
self._entry.data[CONF_CLIENT_ID],
self._entry.data[CONF_CLIENT_SECRET],
)
self._hass.config_entries.async_update_entry(
self._entry,
data={
**self._entry.data,
CONF_REFRESH_TOKEN: self._token.refresh_token,
},
)
_LOGGER.debug(
"Regenerated refresh token for installed app: %s",
self._installed_app_id,
)
self._regenerate_token_remove = async_track_time_interval(
self._hass, regenerate_refresh_token, TOKEN_REFRESH_INTERVAL
)
# Connect handler to incoming device events
self._event_disconnect = self._smart_app.connect_event(self._event_handler)
def disconnect(self):
"""Disconnects handlers/listeners for device/lifecycle events."""
if self._regenerate_token_remove:
self._regenerate_token_remove()
if self._event_disconnect:
self._event_disconnect()
def get_assigned(self, device_id: str, platform: str):
"""Get the capabilities assigned to the platform."""
slots = self._assignments.get(device_id, {})
return [key for key, value in slots.items() if value == platform]
def any_assigned(self, device_id: str, platform: str):
"""Return True if the platform has any assigned capabilities."""
slots = self._assignments.get(device_id, {})
return any(value for value in slots.values() if value == platform)
async def _event_handler(self, req, resp, app):
"""Broker for incoming events."""
# Do not process events received from a different installed app
# under the same parent SmartApp (valid use-scenario)
if req.installed_app_id != self._installed_app_id:
return
updated_devices = set()
for evt in req.events:
if evt.event_type != EVENT_TYPE_DEVICE:
continue
device = self.devices.get(evt.device_id)
if not device:
continue
device.status.apply_attribute_update(
evt.component_id,
evt.capability,
evt.attribute,
evt.value,
data=evt.data,
)
# Fire events for buttons
if (
evt.capability == Capability.button
and evt.attribute == Attribute.button
):
data = {
"component_id": evt.component_id,
"device_id": evt.device_id,
"location_id": evt.location_id,
"value": evt.value,
"name": device.label,
"data": evt.data,
}
self._hass.bus.async_fire(EVENT_BUTTON, data)
_LOGGER.debug("Fired button event: %s", data)
else:
data = {
"location_id": evt.location_id,
"device_id": evt.device_id,
"component_id": evt.component_id,
"capability": evt.capability,
"attribute": evt.attribute,
"value": evt.value,
"data": evt.data,
}
_LOGGER.debug("Push update received: %s", data)
updated_devices.add(device.device_id)
async_dispatcher_send(self._hass, SIGNAL_SMARTTHINGS_UPDATE, updated_devices)
class SmartThingsEntity(Entity):
"""Defines a SmartThings entity."""
def __init__(self, device):
"""Initialize the instance."""
self._device = device
self._dispatcher_remove = None
async def async_added_to_hass(self):
"""Device added to hass."""
async def async_update_state(devices):
"""Update device state."""
if self._device.device_id in devices:
await self.async_update_ha_state(True)
self._dispatcher_remove = async_dispatcher_connect(
self.hass, SIGNAL_SMARTTHINGS_UPDATE, async_update_state
)
async def async_will_remove_from_hass(self) -> None:
"""Disconnect the device when removed."""
if self._dispatcher_remove:
self._dispatcher_remove()
@property
def device_info(self):
"""Get attributes about the device."""
return {
"identifiers": {(DOMAIN, self._device.device_id)},
"name": self._device.label,
"model": self._device.device_type_name,
"manufacturer": "Unavailable",
}
@property
def name(self) -> str:
"""Return the name of the device."""
return self._device.label
@property
def should_poll(self) -> bool:
"""No polling needed for this device."""
return False
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._device.device_id
|
import re
import socket
import diamond.collector
class SquidCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
self.host_pattern = re.compile("(([^@]+)@)?([^:]+)(:([0-9]+))?")
self.stat_pattern = re.compile("^([^ ]+) = ([0-9\.]+)$")
super(SquidCollector, self).__init__(*args, **kwargs)
def process_config(self):
super(SquidCollector, self).process_config()
self.squid_hosts = {}
for host in self.config['hosts']:
matches = self.host_pattern.match(host)
if matches.group(5):
port = matches.group(5)
else:
port = 3128
if matches.group(2):
nick = matches.group(2)
else:
nick = port
self.squid_hosts[nick] = {
'host': matches.group(3),
'port': int(port)
}
def get_default_config_help(self):
config_help = super(SquidCollector, self).get_default_config_help()
config_help.update({
'hosts': 'List of hosts to collect from. Format is ' +
'[nickname@]host[:port], [nickname@]host[:port], etc',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SquidCollector, self).get_default_config()
config.update({
'hosts': ['localhost:3128'],
'path': 'squid',
})
return config
def _getData(self, host, port):
try:
squid_sock = socket.socket()
squid_sock.connect((host, int(port)))
squid_sock.settimeout(0.25)
squid_sock.sendall(
"GET cache_object://localhost/counters HTTP/1.0\r\n" +
"Host: localhost\r\n" +
"Accept: */*\r\n" +
"Connection: close\r\n\r\n")
fulldata = ''
while True:
data = squid_sock.recv(1024)
if not data:
break
fulldata = fulldata + data
except Exception as e:
self.log.error('Couldnt connect to squid: %s', e)
return None
squid_sock.close()
return fulldata
def collect(self):
for nickname in self.squid_hosts.keys():
squid_host = self.squid_hosts[nickname]
fulldata = self._getData(squid_host['host'],
squid_host['port'])
if fulldata is not None:
fulldata = fulldata.splitlines()
for data in fulldata:
matches = self.stat_pattern.match(data)
if matches:
self.publish_counter("%s.%s" % (nickname,
matches.group(1)),
float(matches.group(2)))
|
from aiohomekit.model.characteristics import CharacteristicsTypes
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.core import callback
from . import KNOWN_DEVICES, HomeKitEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit lightbulb."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(aid, service):
if service["stype"] != "lightbulb":
return False
info = {"aid": aid, "iid": service["iid"]}
async_add_entities([HomeKitLight(conn, info)], True)
return True
conn.add_listener(async_add_service)
class HomeKitLight(HomeKitEntity, LightEntity):
"""Representation of a Homekit light."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.ON,
CharacteristicsTypes.BRIGHTNESS,
CharacteristicsTypes.COLOR_TEMPERATURE,
CharacteristicsTypes.HUE,
CharacteristicsTypes.SATURATION,
]
@property
def is_on(self):
"""Return true if device is on."""
return self.service.value(CharacteristicsTypes.ON)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self.service.value(CharacteristicsTypes.BRIGHTNESS) * 255 / 100
@property
def hs_color(self):
"""Return the color property."""
return (
self.service.value(CharacteristicsTypes.HUE),
self.service.value(CharacteristicsTypes.SATURATION),
)
@property
def color_temp(self):
"""Return the color temperature."""
return self.service.value(CharacteristicsTypes.COLOR_TEMPERATURE)
@property
def supported_features(self):
"""Flag supported features."""
features = 0
if self.service.has(CharacteristicsTypes.BRIGHTNESS):
features |= SUPPORT_BRIGHTNESS
if self.service.has(CharacteristicsTypes.COLOR_TEMPERATURE):
features |= SUPPORT_COLOR_TEMP
if self.service.has(CharacteristicsTypes.HUE):
features |= SUPPORT_COLOR
if self.service.has(CharacteristicsTypes.SATURATION):
features |= SUPPORT_COLOR
return features
async def async_turn_on(self, **kwargs):
"""Turn the specified light on."""
hs_color = kwargs.get(ATTR_HS_COLOR)
temperature = kwargs.get(ATTR_COLOR_TEMP)
brightness = kwargs.get(ATTR_BRIGHTNESS)
characteristics = {}
if hs_color is not None:
characteristics.update(
{
CharacteristicsTypes.HUE: hs_color[0],
CharacteristicsTypes.SATURATION: hs_color[1],
}
)
if brightness is not None:
characteristics[CharacteristicsTypes.BRIGHTNESS] = int(
brightness * 100 / 255
)
if temperature is not None:
characteristics[CharacteristicsTypes.COLOR_TEMPERATURE] = int(temperature)
characteristics[CharacteristicsTypes.ON] = True
await self.async_put_characteristics(characteristics)
async def async_turn_off(self, **kwargs):
"""Turn the specified light off."""
await self.async_put_characteristics({CharacteristicsTypes.ON: False})
|
from axis.event_stream import CLASS_OUTPUT
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .axis_base import AxisEventBase
from .const import DOMAIN as AXIS_DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a Axis switch."""
device = hass.data[AXIS_DOMAIN][config_entry.unique_id]
@callback
def async_add_switch(event_id):
"""Add switch from Axis device."""
event = device.api.event[event_id]
if event.CLASS == CLASS_OUTPUT:
async_add_entities([AxisSwitch(event, device)])
device.listeners.append(
async_dispatcher_connect(hass, device.signal_new_event, async_add_switch)
)
class AxisSwitch(AxisEventBase, SwitchEntity):
"""Representation of a Axis switch."""
@property
def is_on(self):
"""Return true if event is active."""
return self.event.is_tripped
async def async_turn_on(self, **kwargs):
"""Turn on switch."""
await self.device.api.vapix.ports[self.event.id].close()
async def async_turn_off(self, **kwargs):
"""Turn off switch."""
await self.device.api.vapix.ports[self.event.id].open()
@property
def name(self):
"""Return the name of the event."""
if self.event.id and self.device.api.vapix.ports[self.event.id].name:
return (
f"{self.device.name} {self.device.api.vapix.ports[self.event.id].name}"
)
return super().name
|
from time import time
from flexx import flx
class Circle(flx.Label):
CSS = """
.flx-Circle {
background: #f00;
border-radius: 10px;
width: 10px;
height: 10px;
}
"""
class Circles(flx.Widget):
def init(self):
with flx.PinboardLayout():
self._circles = [Circle() for i in range(32)]
self.tick()
def tick(self):
global Math, window
t = time()
for i, circle in enumerate(self._circles):
x = Math.sin(i*0.2 + t) * 30 + 50
y = Math.cos(i*0.2 + t) * 30 + 50
circle.apply_style(dict(left=x + '%', top=y + '%'))
window.setTimeout(self.tick, 30)
if __name__ == '__main__':
m = flx.App(Circles).launch('app')
flx.run()
|
import types
import inspect
def get_attrs(obj):
all_obj_attrs = [x for x in dir(obj) if not x.startswith('_')]
props = []
funcs = []
fields = []
for obj_attr in all_obj_attrs:
objs_to_check = list(obj.__class__.__mro__)
objs_to_check.insert(0, obj)
for obj_class in objs_to_check:
try:
attr = getattr(obj_class, obj_attr)
if isinstance(attr, property):
get_sig = str(inspect.signature(attr.fget))
if '->' in get_sig:
get_sig = ':' + get_sig.split('-> ')[1]
else:
get_sig = ''
if attr.fset is not None:
set_sig = inspect.signature(attr.fset)
if len(set_sig.parameters.keys()) > 0:
set_type = str(set_sig.parameters[list(set_sig.parameters.keys())[-1]])
else:
set_type = ''
if ':' in set_type:
set_type = ':' + set_type.split(':')[1]
else:
set_type = ''
props.append(obj_attr + ' - get{0}, set{1}'.format(get_sig, set_type))
else:
props.append(obj_attr + ' - get{0}'.format(get_sig))
break
elif isinstance(attr, types.FunctionType):
funcs.append(obj_attr + str(inspect.signature(attr)))
break
except Exception as err:
pass
else:
fields.append(obj_attr)
return sorted(props), sorted(funcs), sorted(fields)
def print_attrs(obj, recurse_to=None, indent=0):
if recurse_to is None:
recurse_to = set()
else:
recurse_to = set(recurse_to)
props, funcs, fields = get_attrs(obj)
def print2(x): return print((" " * indent) + x)
print(obj.__class__.__name__)
if len(props) > 0:
if len(funcs) + len(fields) == 0:
print2(" └ properties:")
pipe = ' '
else:
print2(" ├ properties:")
pipe = '│'
for index, prop in enumerate(props, start=1):
if prop in recurse_to:
if len(props) != index:
print((" " * indent) + ' {0} ├- {1} - '.format(pipe, prop), end='')
print_attrs(getattr(obj, prop), recurse_to=(recurse_to - set(prop)), indent=indent + 7)
else:
print((" " * indent) + ' {0} └- {1} - '.format(pipe, prop), end='')
print_attrs(getattr(obj, prop), recurse_to=(recurse_to - set(prop)), indent=indent + 7)
else:
if len(props) != index:
print2(' {0} ├- {1}'.format(pipe, prop))
else:
print2(' {0} └- {1}'.format(pipe, prop))
if len(funcs) > 0:
if len(fields) == 0:
print2(" └ methods:")
pipe = ' '
else:
print2(" ├ methods:")
pipe = '│'
for index, func in enumerate(funcs, start=1):
if len(funcs) != index:
print2(' {0} ├- {1}'.format(pipe, func))
else:
print2(' {0} └- {1}'.format(pipe, func))
if len(fields) > 0:
print2(" └ fields:")
for index, field in enumerate(fields, start=1):
if field in recurse_to:
if len(fields) != index:
print((" " * indent) + ' ├- {0} - '.format(field), end='')
print_attrs(getattr(obj, field), recurse_to=(recurse_to - set(field)), indent=indent + 6)
else:
print((" " * indent) + ' └- {0} - '.format(field), end='')
print_attrs(getattr(obj, field), recurse_to=(recurse_to - set(field)), indent=indent + 6)
else:
if len(fields) != index:
print2(' ├- {0}'.format(field))
else:
print2(' └- {0}'.format(field))
|
import voluptuous as vol
from homeassistant.components.pilight import DOMAIN, EVENT, SERVICE_NAME
from homeassistant.const import (
CONF_ID,
CONF_NAME,
CONF_PROTOCOL,
CONF_STATE,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
CONF_ECHO,
CONF_OFF,
CONF_OFF_CODE,
CONF_OFF_CODE_RECEIVE,
CONF_ON,
CONF_ON_CODE,
CONF_ON_CODE_RECEIVE,
CONF_SYSTEMCODE,
CONF_UNIT,
CONF_UNITCODE,
)
COMMAND_SCHEMA = vol.Schema(
{
vol.Optional(CONF_PROTOCOL): cv.string,
vol.Optional(CONF_ON): cv.positive_int,
vol.Optional(CONF_OFF): cv.positive_int,
vol.Optional(CONF_UNIT): cv.positive_int,
vol.Optional(CONF_UNITCODE): cv.positive_int,
vol.Optional(CONF_ID): vol.Any(cv.positive_int, cv.string),
vol.Optional(CONF_STATE): vol.Any(STATE_ON, STATE_OFF),
vol.Optional(CONF_SYSTEMCODE): cv.positive_int,
},
extra=vol.ALLOW_EXTRA,
)
RECEIVE_SCHEMA = COMMAND_SCHEMA.extend({vol.Optional(CONF_ECHO): cv.boolean})
SWITCHES_SCHEMA = vol.Schema(
{
vol.Required(CONF_ON_CODE): COMMAND_SCHEMA,
vol.Required(CONF_OFF_CODE): COMMAND_SCHEMA,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_OFF_CODE_RECEIVE): vol.All(cv.ensure_list, [COMMAND_SCHEMA]),
vol.Optional(CONF_ON_CODE_RECEIVE): vol.All(cv.ensure_list, [COMMAND_SCHEMA]),
}
)
class PilightBaseDevice(RestoreEntity):
"""Base class for pilight switches and lights."""
def __init__(self, hass, name, config):
"""Initialize a device."""
self._hass = hass
self._name = config.get(CONF_NAME, name)
self._is_on = False
self._code_on = config.get(CONF_ON_CODE)
self._code_off = config.get(CONF_OFF_CODE)
code_on_receive = config.get(CONF_ON_CODE_RECEIVE, [])
code_off_receive = config.get(CONF_OFF_CODE_RECEIVE, [])
self._code_on_receive = []
self._code_off_receive = []
for code_list, conf in (
(self._code_on_receive, code_on_receive),
(self._code_off_receive, code_off_receive),
):
for code in conf:
echo = code.pop(CONF_ECHO, True)
code_list.append(_ReceiveHandle(code, echo))
if any(self._code_on_receive) or any(self._code_off_receive):
hass.bus.listen(EVENT, self._handle_code)
self._brightness = 255
async def async_added_to_hass(self):
"""Call when entity about to be added to hass."""
await super().async_added_to_hass()
state = await self.async_get_last_state()
if state:
self._is_on = state.state == STATE_ON
self._brightness = state.attributes.get("brightness")
@property
def name(self):
"""Get the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed, state set when correct code is received."""
return False
@property
def assumed_state(self):
"""Return True if unable to access real state of the entity."""
return True
@property
def is_on(self):
"""Return true if switch is on."""
return self._is_on
def _handle_code(self, call):
"""Check if received code by the pilight-daemon.
If the code matches the receive on/off codes of this switch the switch
state is changed accordingly.
"""
# - True if off_code/on_code is contained in received code dict, not
# all items have to match.
# - Call turn on/off only once, even if more than one code is received
if any(self._code_on_receive):
for on_code in self._code_on_receive:
if on_code.match(call.data):
on_code.run(switch=self, turn_on=True)
break
if any(self._code_off_receive):
for off_code in self._code_off_receive:
if off_code.match(call.data):
off_code.run(switch=self, turn_on=False)
break
def set_state(self, turn_on, send_code=True, dimlevel=None):
"""Set the state of the switch.
This sets the state of the switch. If send_code is set to True, then
it will call the pilight.send service to actually send the codes
to the pilight daemon.
"""
if send_code:
if turn_on:
code = self._code_on
if dimlevel is not None:
code.update({"dimlevel": dimlevel})
self._hass.services.call(DOMAIN, SERVICE_NAME, code, blocking=True)
else:
self._hass.services.call(
DOMAIN, SERVICE_NAME, self._code_off, blocking=True
)
self._is_on = turn_on
self.schedule_update_ha_state()
def turn_on(self, **kwargs):
"""Turn the switch on by calling pilight.send service with on code."""
self.set_state(turn_on=True)
def turn_off(self, **kwargs):
"""Turn the switch on by calling pilight.send service with off code."""
self.set_state(turn_on=False)
class _ReceiveHandle:
def __init__(self, config, echo):
"""Initialize the handle."""
self.config_items = config.items()
self.echo = echo
def match(self, code):
"""Test if the received code matches the configured values.
The received values have to be a subset of the configured options.
"""
return self.config_items <= code.items()
def run(self, switch, turn_on):
"""Change the state of the switch."""
switch.set_state(turn_on=turn_on, send_code=self.echo)
|
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.views.decorators.cache import never_cache
from weblate.trans.forms import ContributorAgreementForm
from weblate.trans.models import ContributorAgreement
from weblate.trans.util import redirect_next
from weblate.utils.views import get_component
@never_cache
@login_required
def agreement_confirm(request, project, component):
component = get_component(request, project, component)
has_agreed = ContributorAgreement.objects.has_agreed(request.user, component)
if request.method == "POST":
form = ContributorAgreementForm(request.POST)
if form.is_valid() and not has_agreed:
ContributorAgreement.objects.create(user=request.user, component=component)
return redirect_next(request.GET.get("next"), component.get_absolute_url())
else:
form = ContributorAgreementForm(
initial={"next": request.GET.get("next"), "confirm": has_agreed}
)
return render(
request,
"contributor-agreement.html",
{"form": form, "object": component, "has_agreed": has_agreed},
)
|
from django.urls import reverse
from weblate.trans.tests.test_views import ViewTestCase
class ChangesTest(ViewTestCase):
def test_basic(self):
response = self.client.get(reverse("changes"))
self.assertContains(response, "Resource update")
def test_basic_csv_denied(self):
response = self.client.get(reverse("changes-csv"))
self.assertEqual(response.status_code, 403)
def test_basic_csv(self):
self.make_manager()
response = self.client.get(reverse("changes-csv"))
self.assertContains(response, "timestamp,")
def test_filter(self):
response = self.client.get(reverse("changes"), {"project": "test"})
self.assertContains(response, "Resource update")
self.assertNotContains(response, "Failed to find matching project!")
response = self.client.get(
reverse("changes"), {"project": "test", "component": "test"}
)
self.assertContains(response, "Resource update")
self.assertNotContains(response, "Failed to find matching project!")
response = self.client.get(
reverse("changes"), {"project": "test", "component": "test", "lang": "cs"}
)
self.assertContains(response, "Resource update")
self.assertNotContains(response, "Failed to find matching project!")
response = self.client.get(reverse("changes"), {"lang": "cs"})
self.assertContains(response, "Resource update")
self.assertNotContains(response, "Failed to find matching language!")
response = self.client.get(
reverse("changes"), {"project": "testx", "component": "test", "lang": "cs"}
)
self.assertContains(response, "Resource update")
self.assertContains(response, "Failed to find matching project!")
response = self.client.get(
reverse("changes"),
{"project": "test\000x", "component": "test", "lang": "cs"},
)
self.assertContains(response, "Resource update")
self.assertContains(response, "Null characters are not allowed")
def test_user(self):
self.edit_unit("Hello, world!\n", "Nazdar svete!\n")
response = self.client.get(reverse("changes"), {"user": self.user.username})
self.assertContains(response, "New translation")
self.assertNotContains(response, "Invalid search string!")
|
import errno
import fnmatch
import marshal
import os
import pickle
import stat
import sys
import tempfile
from hashlib import sha1
from io import BytesIO
from .utils import open_if_exists
bc_version = 5
# Magic bytes to identify Jinja bytecode cache files. Contains the
# Python major and minor version to avoid loading incompatible bytecode
# if a project upgrades its Python version.
bc_magic = (
b"j2"
+ pickle.dumps(bc_version, 2)
+ pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1], 2)
)
class Bucket:
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
# if marshal_load fails then we need to reload
try:
self.code = marshal.load(f)
except (EOFError, ValueError, TypeError):
self.reset()
return
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError("can't write empty bucket")
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal.dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache:
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode("utf-8"))
if filename is not None:
filename = "|" + filename
if isinstance(filename, str):
filename = filename.encode("utf-8")
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode("utf-8")).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern="__jinja2_%s.cache"):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
def _unsafe_dir():
raise RuntimeError(
"Cannot determine safe temp directory. You "
"need to explicitly provide one."
)
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == "nt":
return tmpdir
if not hasattr(os, "getuid"):
_unsafe_dir()
dirname = f"_jinja2-cache-{os.getuid()}"
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
if (
actual_dir_stat.st_uid != os.getuid()
or not stat.S_ISDIR(actual_dir_stat.st_mode)
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
):
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if (
actual_dir_stat.st_uid != os.getuid()
or not stat.S_ISDIR(actual_dir_stat.st_mode)
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU
):
_unsafe_dir()
return actual_dir
def _get_cache_filename(self, bucket):
return os.path.join(self.directory, self.pattern % (bucket.key,))
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), "rb")
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), "wb")
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(os.listdir(self.directory), self.pattern % ("*",))
for filename in files:
try:
remove(os.path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `cachelib <https://github.com/pallets/cachelib>`_
- `python-memcached <https://pypi.org/project/python-memcached/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only text. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(
self,
client,
prefix="jinja2/bytecode/",
timeout=None,
ignore_memcache_errors=True,
):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise
|
import logging
from mficlient.client import FailedToLogin, MFiClient
import requests
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSL = True
DEFAULT_VERIFY_SSL = True
SWITCH_MODELS = ["Outlet", "Output 5v", "Output 12v", "Output 24v", "Dimmer Switch"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up mFi sensors."""
host = config.get(CONF_HOST)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
use_tls = config[CONF_SSL]
verify_tls = config.get(CONF_VERIFY_SSL)
default_port = 6443 if use_tls else 6080
port = int(config.get(CONF_PORT, default_port))
try:
client = MFiClient(
host, username, password, port=port, use_tls=use_tls, verify=verify_tls
)
except (FailedToLogin, requests.exceptions.ConnectionError) as ex:
_LOGGER.error("Unable to connect to mFi: %s", str(ex))
return False
add_entities(
MfiSwitch(port)
for device in client.get_devices()
for port in device.ports.values()
if port.model in SWITCH_MODELS
)
class MfiSwitch(SwitchEntity):
"""Representation of an mFi switch-able device."""
def __init__(self, port):
"""Initialize the mFi device."""
self._port = port
self._target_state = None
@property
def unique_id(self):
"""Return the unique ID of the device."""
return self._port.ident
@property
def name(self):
"""Return the name of the device."""
return self._port.label
@property
def is_on(self):
"""Return true if the device is on."""
return self._port.output
def update(self):
"""Get the latest state and update the state."""
self._port.refresh()
if self._target_state is not None:
self._port.data["output"] = float(self._target_state)
self._target_state = None
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._port.control(True)
self._target_state = True
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._port.control(False)
self._target_state = False
@property
def current_power_w(self):
"""Return the current power usage in W."""
return int(self._port.data.get("active_pwr", 0))
@property
def device_state_attributes(self):
"""Return the state attributes for the device."""
return {
"volts": round(self._port.data.get("v_rms", 0), 1),
"amps": round(self._port.data.get("i_rms", 0), 1),
}
|
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
from . import (
ATTR_ALLOWED_BANDWIDTH,
ATTR_AUTO_BACKUPS,
ATTR_COST_PER_MONTH,
ATTR_CREATED_AT,
ATTR_DISK,
ATTR_IPV4_ADDRESS,
ATTR_IPV6_ADDRESS,
ATTR_MEMORY,
ATTR_OS,
ATTR_REGION,
ATTR_SUBSCRIPTION_ID,
ATTR_SUBSCRIPTION_NAME,
ATTR_VCPUS,
CONF_SUBSCRIPTION,
DATA_VULTR,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Vultr {}"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SUBSCRIPTION): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Vultr subscription switch."""
vultr = hass.data[DATA_VULTR]
subscription = config.get(CONF_SUBSCRIPTION)
name = config.get(CONF_NAME)
if subscription not in vultr.data:
_LOGGER.error("Subscription %s not found", subscription)
return False
add_entities([VultrSwitch(vultr, subscription, name)], True)
class VultrSwitch(SwitchEntity):
"""Representation of a Vultr subscription switch."""
def __init__(self, vultr, subscription, name):
"""Initialize a new Vultr switch."""
self._vultr = vultr
self._name = name
self.subscription = subscription
self.data = None
@property
def name(self):
"""Return the name of the switch."""
try:
return self._name.format(self.data["label"])
except (TypeError, KeyError):
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self.data["power_status"] == "running"
@property
def icon(self):
"""Return the icon of this server."""
return "mdi:server" if self.is_on else "mdi:server-off"
@property
def device_state_attributes(self):
"""Return the state attributes of the Vultr subscription."""
return {
ATTR_ALLOWED_BANDWIDTH: self.data.get("allowed_bandwidth_gb"),
ATTR_AUTO_BACKUPS: self.data.get("auto_backups"),
ATTR_COST_PER_MONTH: self.data.get("cost_per_month"),
ATTR_CREATED_AT: self.data.get("date_created"),
ATTR_DISK: self.data.get("disk"),
ATTR_IPV4_ADDRESS: self.data.get("main_ip"),
ATTR_IPV6_ADDRESS: self.data.get("v6_main_ip"),
ATTR_MEMORY: self.data.get("ram"),
ATTR_OS: self.data.get("os"),
ATTR_REGION: self.data.get("location"),
ATTR_SUBSCRIPTION_ID: self.data.get("SUBID"),
ATTR_SUBSCRIPTION_NAME: self.data.get("label"),
ATTR_VCPUS: self.data.get("vcpu_count"),
}
def turn_on(self, **kwargs):
"""Boot-up the subscription."""
if self.data["power_status"] != "running":
self._vultr.start(self.subscription)
def turn_off(self, **kwargs):
"""Halt the subscription."""
if self.data["power_status"] == "running":
self._vultr.halt(self.subscription)
def update(self):
"""Get the latest data from the device and update the data."""
self._vultr.update()
self.data = self._vultr.data[self.subscription]
|
import filelock
import os
import six
from chainer.dataset import download
from chainercv.datasets.voc.voc_utils \
import voc_instance_segmentation_label_names
from chainercv import utils
root = 'pfnet/chainercv/sbd'
url = 'http://www.eecs.berkeley.edu/Research/Projects/CS/vision/grouping/semantic_contours/benchmark.tgz' # NOQA
train_voc2012_url = 'http://home.bharathh.info/pubs/codes/SBD/train_noval.txt'
def _generate_voc2012_txt(base_path):
with open(os.path.join(base_path, 'train.txt'), 'r') as f:
train_ids = f.read().split('\n')[:-1]
with open(os.path.join(base_path, 'val.txt'), 'r') as f:
val_ids = f.read().split('\n')[:-1]
with open(os.path.join(base_path, 'train_voc2012.txt'), 'r') as f:
train_voc2012_ids = f.read().split('\n')[:-1]
all_ids = list(set(train_ids + val_ids))
val_voc2012_ids = [i for i in all_ids if i not in train_voc2012_ids]
with open(os.path.join(base_path, 'val_voc2012.txt'), 'w') as f:
f.write('\n'.join(sorted(val_voc2012_ids)))
with open(os.path.join(base_path, 'trainval_voc2012.txt'), 'w') as f:
f.write('\n'.join(sorted(all_ids)))
def get_sbd():
# To support ChainerMN, the target directory should be locked.
with filelock.FileLock(os.path.join(download.get_dataset_directory(
'pfnet/chainercv/.lock'), 'sbd.lock')):
data_root = download.get_dataset_directory(root)
base_path = os.path.join(data_root, 'benchmark_RELEASE/dataset')
train_voc2012_file = os.path.join(base_path, 'train_voc2012.txt')
if os.path.exists(train_voc2012_file):
# skip downloading
return base_path
download_file_path = utils.cached_download(url)
ext = os.path.splitext(url)[1]
utils.extractall(download_file_path, data_root, ext)
six.moves.urllib.request.urlretrieve(
train_voc2012_url, train_voc2012_file)
_generate_voc2012_txt(base_path)
return base_path
sbd_instance_segmentation_label_names = voc_instance_segmentation_label_names
|
from typing import Any, Dict, List, Optional, Union
from homematicip.aio.device import AsyncHeatingThermostat, AsyncHeatingThermostatCompact
from homematicip.aio.group import AsyncHeatingGroup
from homematicip.base.enums import AbsenceType
from homematicip.device import Switch
from homematicip.functionalHomes import IndoorClimateHome
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_ECO,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericEntity
from .hap import HomematicipHAP
HEATING_PROFILES = {"PROFILE_1": 0, "PROFILE_2": 1, "PROFILE_3": 2}
COOLING_PROFILES = {"PROFILE_4": 3, "PROFILE_5": 4, "PROFILE_6": 5}
ATTR_PRESET_END_TIME = "preset_end_time"
PERMANENT_END_TIME = "permanent"
HMIP_AUTOMATIC_CM = "AUTOMATIC"
HMIP_MANUAL_CM = "MANUAL"
HMIP_ECO_CM = "ECO"
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP climate from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = []
for device in hap.home.groups:
if isinstance(device, AsyncHeatingGroup):
entities.append(HomematicipHeatingGroup(hap, device))
if entities:
async_add_entities(entities)
class HomematicipHeatingGroup(HomematicipGenericEntity, ClimateEntity):
"""Representation of the HomematicIP heating group.
Heat mode is supported for all heating devices incl. their defined profiles.
Boost is available for radiator thermostats only.
Cool mode is only available for floor heating systems, if basically enabled in the hmip app.
"""
def __init__(self, hap: HomematicipHAP, device: AsyncHeatingGroup) -> None:
"""Initialize heating group."""
device.modelType = "HmIP-Heating-Group"
super().__init__(hap, device)
self._simple_heating = None
if device.actualTemperature is None:
self._simple_heating = self._first_radiator_thermostat
@property
def device_info(self) -> Dict[str, Any]:
"""Return device specific attributes."""
return {
"identifiers": {(HMIPC_DOMAIN, self._device.id)},
"name": self._device.label,
"manufacturer": "eQ-3",
"model": self._device.modelType,
"via_device": (HMIPC_DOMAIN, self._device.homeId),
}
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_PRESET_MODE | SUPPORT_TARGET_TEMPERATURE
@property
def target_temperature(self) -> float:
"""Return the temperature we try to reach."""
return self._device.setPointTemperature
@property
def current_temperature(self) -> float:
"""Return the current temperature."""
if self._simple_heating:
return self._simple_heating.valveActualTemperature
return self._device.actualTemperature
@property
def current_humidity(self) -> int:
"""Return the current humidity."""
return self._device.humidity
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie."""
if self._disabled_by_cooling_mode and not self._has_switch:
return HVAC_MODE_OFF
if self._device.boostMode:
return HVAC_MODE_HEAT
if self._device.controlMode == HMIP_MANUAL_CM:
return HVAC_MODE_HEAT if self._heat_mode_enabled else HVAC_MODE_COOL
return HVAC_MODE_AUTO
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
if self._disabled_by_cooling_mode and not self._has_switch:
return [HVAC_MODE_OFF]
return (
[HVAC_MODE_AUTO, HVAC_MODE_HEAT]
if self._heat_mode_enabled
else [HVAC_MODE_AUTO, HVAC_MODE_COOL]
)
@property
def hvac_action(self) -> Optional[str]:
"""
Return the current hvac_action.
This is only relevant for radiator thermostats.
"""
if (
self._device.floorHeatingMode == "RADIATOR"
and self._has_radiator_thermostat
and self._heat_mode_enabled
):
return (
CURRENT_HVAC_HEAT if self._device.valvePosition else CURRENT_HVAC_IDLE
)
return None
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode."""
if self._device.boostMode:
return PRESET_BOOST
if self.hvac_mode in (HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF):
return PRESET_NONE
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType == AbsenceType.VACATION:
return PRESET_AWAY
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.PERMANENT,
]:
return PRESET_ECO
return (
self._device.activeProfile.name
if self._device.activeProfile.name in self._device_profile_names
else None
)
@property
def preset_modes(self) -> List[str]:
"""Return a list of available preset modes incl. hmip profiles."""
# Boost is only available if a radiator thermostat is in the room,
# and heat mode is enabled.
profile_names = self._device_profile_names
presets = []
if (
self._heat_mode_enabled and self._has_radiator_thermostat
) or self._has_switch:
if not profile_names:
presets.append(PRESET_NONE)
presets.append(PRESET_BOOST)
presets.extend(profile_names)
return presets
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return self._device.minTemperature
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return self._device.maxTemperature
async def async_set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
if self.min_temp <= temperature <= self.max_temp:
await self._device.set_point_temperature(temperature)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
if hvac_mode not in self.hvac_modes:
return
if hvac_mode == HVAC_MODE_AUTO:
await self._device.set_control_mode(HMIP_AUTOMATIC_CM)
else:
await self._device.set_control_mode(HMIP_MANUAL_CM)
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode not in self.preset_modes:
return
if self._device.boostMode and preset_mode != PRESET_BOOST:
await self._device.set_boost(False)
if preset_mode == PRESET_BOOST:
await self._device.set_boost()
if preset_mode in self._device_profile_names:
profile_idx = self._get_profile_idx_by_name(preset_mode)
if self._device.controlMode != HMIP_AUTOMATIC_CM:
await self.async_set_hvac_mode(HVAC_MODE_AUTO)
await self._device.set_active_profile(profile_idx)
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the access point."""
state_attr = super().device_state_attributes
if self._device.controlMode == HMIP_ECO_CM:
if self._indoor_climate.absenceType in [
AbsenceType.PARTY,
AbsenceType.PERIOD,
AbsenceType.VACATION,
]:
state_attr[ATTR_PRESET_END_TIME] = self._indoor_climate.absenceEndTime
elif self._indoor_climate.absenceType == AbsenceType.PERMANENT:
state_attr[ATTR_PRESET_END_TIME] = PERMANENT_END_TIME
return state_attr
@property
def _indoor_climate(self) -> IndoorClimateHome:
"""Return the hmip indoor climate functional home of this group."""
return self._home.get_functionalHome(IndoorClimateHome)
@property
def _device_profiles(self) -> List[str]:
"""Return the relevant profiles."""
return [
profile
for profile in self._device.profiles
if profile.visible
and profile.name != ""
and profile.index in self._relevant_profile_group
]
@property
def _device_profile_names(self) -> List[str]:
"""Return a collection of profile names."""
return [profile.name for profile in self._device_profiles]
def _get_profile_idx_by_name(self, profile_name: str) -> int:
"""Return a profile index by name."""
relevant_index = self._relevant_profile_group
index_name = [
profile.index
for profile in self._device_profiles
if profile.name == profile_name
]
return relevant_index[index_name[0]]
@property
def _heat_mode_enabled(self) -> bool:
"""Return, if heating mode is enabled."""
return not self._device.cooling
@property
def _disabled_by_cooling_mode(self) -> bool:
"""Return, if group is disabled by the cooling mode."""
return self._device.cooling and (
self._device.coolingIgnored or not self._device.coolingAllowed
)
@property
def _relevant_profile_group(self) -> List[str]:
"""Return the relevant profile groups."""
if self._disabled_by_cooling_mode:
return []
return HEATING_PROFILES if self._heat_mode_enabled else COOLING_PROFILES
@property
def _has_switch(self) -> bool:
"""Return, if a switch is in the hmip heating group."""
for device in self._device.devices:
if isinstance(device, Switch):
return True
return False
@property
def _has_radiator_thermostat(self) -> bool:
"""Return, if a radiator thermostat is in the hmip heating group."""
return bool(self._first_radiator_thermostat)
@property
def _first_radiator_thermostat(
self,
) -> Optional[Union[AsyncHeatingThermostat, AsyncHeatingThermostatCompact]]:
"""Return the first radiator thermostat from the hmip heating group."""
for device in self._device.devices:
if isinstance(
device, (AsyncHeatingThermostat, AsyncHeatingThermostatCompact)
):
return device
return None
|
import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
sys.path = [os.path.abspath(os.path.dirname(os.path.dirname(__file__)))] + sys.path
os.environ['is_test_suite'] = 'True'
from auto_ml import Predictor
import dill
from nose.tools import assert_equal, assert_not_equal, with_setup
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
import utils_testing as utils
def test_predict_uncertainty_true():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, predict_intervals=True)
intervals = ml_predictor.predict_intervals(df_boston_test)
assert isinstance(intervals, pd.DataFrame)
assert intervals.shape[0] == df_boston_test.shape[0]
result_list = ml_predictor.predict_intervals(df_boston_test, return_type='list')
assert isinstance(result_list, list)
assert len(result_list) == df_boston_test.shape[0]
for idx, row in enumerate(result_list):
assert isinstance(row, list)
assert len(row) == 3
singles = df_boston_test.head().to_dict('records')
for row in singles:
result = ml_predictor.predict_intervals(row)
assert isinstance(result, dict)
assert 'prediction' in result
assert 'interval_0.05' in result
assert 'interval_0.95' in result
for row in singles:
result = ml_predictor.predict_intervals(row, return_type='list')
assert isinstance(result, list)
assert len(result) == 3
df_intervals = ml_predictor.predict_intervals(df_boston_test, return_type='df')
assert isinstance(df_intervals, pd.DataFrame)
try:
ml_predictor.predict_intervals(df_boston_test, return_type='this will not work')
assert False
except ValueError:
assert True
def test_prediction_intervals_actually_work():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, predict_intervals=[0.05, 0.95])
df_boston_test = df_boston_test.reset_index(drop=True)
intervals = ml_predictor.predict_intervals(df_boston_test)
actuals = df_boston_test.MEDV
count_under = 0
count_over = 0
# print(intervals)
for idx, row in intervals.iterrows():
actual = actuals.iloc[idx]
if actual < row['interval_0.05']:
count_under += 1
if actual > row['interval_0.95']:
count_over += 1
len_intervals = len(intervals)
pct_under = count_under * 1.0 / len_intervals
pct_over = count_over * 1.0 / len_intervals
# There's a decent bit of noise since this is such a small dataset
assert pct_under < 0.15
assert pct_over < 0.1
def test_prediction_intervals_lets_the_user_specify_number_of_intervals():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, predict_intervals=True, prediction_intervals=[.2])
intervals = ml_predictor.predict_intervals(df_boston_test, return_type='list')
assert len(intervals[0]) == 2
def test_predict_intervals_should_fail_if_not_trained():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train)
try:
intervals = ml_predictor.predict_intervals(df_boston_test)
assert False
except ValueError:
assert True
def test_predict_intervals_takes_in_custom_intervals():
np.random.seed(0)
df_boston_train, df_boston_test = utils.get_boston_regression_dataset()
column_descriptions = {
'MEDV': 'output'
, 'CHAS': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
# df_boston_train = pd.concat([df_boston_train, df_boston_train, df_boston_train])
ml_predictor.train(df_boston_train, predict_intervals=[0.4, 0.6])
custom_intervals = ml_predictor.predict_intervals(df_boston_test, return_type='list')
assert isinstance(custom_intervals, list)
singles = df_boston_test.head().to_dict('records')
acceptable_keys = set(['prediction', 'interval_0.4', 'interval_0.6'])
for row in singles:
result = ml_predictor.predict_intervals(row)
assert isinstance(result, dict)
assert 'prediction' in result
assert 'interval_0.4' in result
assert 'interval_0.6' in result
for key in result.keys():
assert key in acceptable_keys
for row in singles:
result = ml_predictor.predict_intervals(row, return_type='list')
assert isinstance(result, list)
assert len(result) == 3
df_intervals = ml_predictor.predict_intervals(df_boston_test, return_type='df')
assert df_intervals.shape[0] == df_boston_test.shape[0]
assert isinstance(df_intervals, pd.DataFrame)
# Now make sure that the interval values are actually different
ml_predictor = Predictor(type_of_estimator='regressor', column_descriptions=column_descriptions)
ml_predictor.train(df_boston_train, predict_intervals=True)
default_intervals = ml_predictor.predict_intervals(df_boston_test, return_type='list')
# This is a super flaky test, because we've got such a small datasize, and we're trying to get distributions from it
len_intervals = len(custom_intervals)
num_failures = 0
for idx, custom_row in enumerate(custom_intervals):
default_row = default_intervals[idx]
if int(custom_row[1]) <= int(default_row[1]):
num_failures += 1
print('{} should be higher than {}'.format(custom_row[1], default_row[1]))
if int(custom_row[2]) >= int(default_row[2]):
print('{} should be lower than {}'.format(custom_row[1], default_row[1]))
num_failures += 1
assert num_failures < 0.18 * len_intervals
|
import logging.handlers
import pathlib
import re
import sys
from typing import List, Tuple, Optional, Union
from logging import LogRecord
from datetime import datetime # This clearly never leads to confusion...
from os import isatty
from rich._log_render import LogRender
from rich.containers import Renderables
from rich.logging import RichHandler
from rich.table import Table
from rich.text import Text
from rich.traceback import Traceback
MAX_OLD_LOGS = 8
class RotatingFileHandler(logging.handlers.RotatingFileHandler):
"""Custom rotating file handler.
This file handler rotates a bit differently to the one in stdlib.
For a start, this works off of a "stem" and a "directory". The stem
is the base name of the log file, without the extension. The
directory is where all log files (including backups) will be placed.
Secondly, this logger rotates files downwards, and new logs are
*started* with the backup number incremented. The stdlib handler
rotates files upwards, and this leaves the logs in reverse order.
Thirdly, naming conventions are not customisable with this class.
Logs will initially be named in the format "{stem}.log", and after
rotating, the first log file will be renamed "{stem}-part1.log",
and a new file "{stem}-part2.log" will be created for logging to
continue.
A few things can't be modified in this handler: it must use append
mode, it doesn't support use of the `delay` arg, and it will ignore
custom namers and rotators.
When this handler is instantiated, it will search through the
directory for logs from previous runtimes, and will open the file
with the highest backup number to append to.
"""
def __init__(
self,
stem: str,
directory: pathlib.Path,
maxBytes: int = 0,
backupCount: int = 0,
encoding: Optional[str] = None,
) -> None:
self.baseStem = stem
self.directory = directory.resolve()
# Scan for existing files in directory, append to last part of existing log
log_part_re = re.compile(rf"{stem}-part(?P<partnum>\d+).log")
highest_part = 0
for path in directory.iterdir():
match = log_part_re.match(path.name)
if match and int(match["partnum"]) > highest_part:
highest_part = int(match["partnum"])
if highest_part:
filename = directory / f"{stem}-part{highest_part}.log"
else:
filename = directory / f"{stem}.log"
super().__init__(
filename,
mode="a",
maxBytes=maxBytes,
backupCount=backupCount,
encoding=encoding,
delay=False,
)
def doRollover(self):
if self.stream:
self.stream.close()
self.stream = None
initial_path = self.directory / f"{self.baseStem}.log"
if self.backupCount > 0 and initial_path.exists():
initial_path.replace(self.directory / f"{self.baseStem}-part1.log")
match = re.match(
rf"{self.baseStem}(?:-part(?P<part>\d+)?)?.log", pathlib.Path(self.baseFilename).name
)
latest_part_num = int(match.groupdict(default="1").get("part", "1"))
if self.backupCount < 1:
# No backups, just delete the existing log and start again
pathlib.Path(self.baseFilename).unlink()
elif latest_part_num > self.backupCount:
# Rotate files down one
# red-part2.log becomes red-part1.log etc, a new log is added at the end.
for i in range(1, self.backupCount):
next_log = self.directory / f"{self.baseStem}-part{i + 1}.log"
if next_log.exists():
prev_log = self.directory / f"{self.baseStem}-part{i}.log"
next_log.replace(prev_log)
else:
# Simply start a new file
self.baseFilename = str(
self.directory / f"{self.baseStem}-part{latest_part_num + 1}.log"
)
self.stream = self._open()
class RedLogRender(LogRender):
def __call__(
self,
console,
renderables,
log_time=None,
time_format=None,
level="",
path=None,
line_no=None,
link_path=None,
logger_name=None,
):
output = Table.grid(padding=(0, 1))
output.expand = True
if self.show_time:
output.add_column(style="log.time")
if self.show_level:
output.add_column(style="log.level", width=self.level_width)
output.add_column(ratio=1, style="log.message", overflow="fold")
if self.show_path and path:
output.add_column(style="log.path")
if logger_name:
output.add_column()
row = []
if self.show_time:
log_time = log_time or console.get_datetime()
log_time_display = log_time.strftime(time_format or self.time_format)
if log_time_display == self._last_time:
row.append(Text(" " * len(log_time_display)))
else:
row.append(Text(log_time_display))
self._last_time = log_time_display
if self.show_level:
row.append(level)
row.append(Renderables(renderables))
if self.show_path and path:
path_text = Text()
path_text.append(path, style=f"link file://{link_path}" if link_path else "")
if line_no:
path_text.append(f":{line_no}")
row.append(path_text)
if logger_name:
logger_name_text = Text()
logger_name_text.append(f"[{logger_name}]")
row.append(logger_name_text)
output.add_row(*row)
return output
class RedRichHandler(RichHandler):
"""Adaptation of Rich's RichHandler to manually adjust the path to a logger name"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._log_render = RedLogRender(
show_time=self._log_render.show_time,
show_level=self._log_render.show_level,
show_path=self._log_render.show_path,
level_width=self._log_render.level_width,
)
def emit(self, record: LogRecord) -> None:
"""Invoked by logging."""
path = pathlib.Path(record.pathname).name
level = self.get_level_text(record)
message = self.format(record)
time_format = None if self.formatter is None else self.formatter.datefmt
log_time = datetime.fromtimestamp(record.created)
traceback = None
if self.rich_tracebacks and record.exc_info and record.exc_info != (None, None, None):
exc_type, exc_value, exc_traceback = record.exc_info
assert exc_type is not None
assert exc_value is not None
traceback = Traceback.from_exception(
exc_type,
exc_value,
exc_traceback,
width=self.tracebacks_width,
extra_lines=self.tracebacks_extra_lines,
theme=self.tracebacks_theme,
word_wrap=self.tracebacks_word_wrap,
show_locals=self.tracebacks_show_locals,
locals_max_length=self.locals_max_length,
locals_max_string=self.locals_max_string,
)
message = record.getMessage()
use_markup = getattr(record, "markup") if hasattr(record, "markup") else self.markup
if use_markup:
message_text = Text.from_markup(message)
else:
message_text = Text(message)
if self.highlighter:
message_text = self.highlighter(message_text)
if self.KEYWORDS:
message_text.highlight_words(self.KEYWORDS, "logging.keyword")
self.console.print(
self._log_render(
self.console,
[message_text] if not traceback else [message_text, traceback],
log_time=log_time,
time_format=time_format,
level=level,
path=path,
line_no=record.lineno,
link_path=record.pathname if self.enable_link_path else None,
logger_name=record.name,
)
)
def init_logging(
level: int, location: pathlib.Path, force_rich_logging: Union[bool, None]
) -> None:
root_logger = logging.getLogger()
base_logger = logging.getLogger("red")
base_logger.setLevel(level)
dpy_logger = logging.getLogger("discord")
dpy_logger.setLevel(logging.WARNING)
warnings_logger = logging.getLogger("py.warnings")
warnings_logger.setLevel(logging.WARNING)
enable_rich_logging = False
if isatty(0) and force_rich_logging is None:
# Check if the bot thinks it has a active terminal.
enable_rich_logging = True
elif force_rich_logging is True:
enable_rich_logging = True
file_formatter = logging.Formatter(
"[{asctime}] [{levelname}] {name}: {message}", datefmt="%Y-%m-%d %H:%M:%S", style="{"
)
if enable_rich_logging is True:
rich_formatter = logging.Formatter("{message}", datefmt="[%X]", style="{")
stdout_handler = RedRichHandler(rich_tracebacks=True, show_path=False)
stdout_handler.setFormatter(rich_formatter)
else:
stdout_handler = logging.StreamHandler(sys.stdout)
stdout_handler.setFormatter(file_formatter)
root_logger.addHandler(stdout_handler)
logging.captureWarnings(True)
if not location.exists():
location.mkdir(parents=True, exist_ok=True)
# Rotate latest logs to previous logs
previous_logs: List[pathlib.Path] = []
latest_logs: List[Tuple[pathlib.Path, str]] = []
for path in location.iterdir():
match = re.match(r"latest(?P<part>-part\d+)?\.log", path.name)
if match:
part = match.groupdict(default="")["part"]
latest_logs.append((path, part))
match = re.match(r"previous(?:-part\d+)?.log", path.name)
if match:
previous_logs.append(path)
# Delete all previous.log files
for path in previous_logs:
path.unlink()
# Rename latest.log files to previous.log
for path, part in latest_logs:
path.replace(location / f"previous{part}.log")
latest_fhandler = RotatingFileHandler(
stem="latest",
directory=location,
maxBytes=1_000_000, # About 1MB per logfile
backupCount=MAX_OLD_LOGS,
encoding="utf-8",
)
all_fhandler = RotatingFileHandler(
stem="red",
directory=location,
maxBytes=1_000_000,
backupCount=MAX_OLD_LOGS,
encoding="utf-8",
)
for fhandler in (latest_fhandler, all_fhandler):
fhandler.setFormatter(file_formatter)
root_logger.addHandler(fhandler)
|
from aiohttp import ContentTypeError
from requests.exceptions import HTTPError
from homeassistant.components.plum_lightpad.const import DOMAIN
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
async def test_async_setup_no_domain_config(hass: HomeAssistant):
"""Test setup without configuration is noop."""
result = await async_setup_component(hass, DOMAIN, {})
assert result is True
assert DOMAIN not in hass.data
async def test_async_setup_imports_from_config(hass: HomeAssistant):
"""Test that specifying config will setup an entry."""
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData"
) as mock_loadCloudData, patch(
"homeassistant.components.plum_lightpad.async_setup_entry",
return_value=True,
) as mock_async_setup_entry:
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
"username": "test-plum-username",
"password": "test-plum-password",
}
},
)
await hass.async_block_till_done()
assert result is True
assert len(mock_loadCloudData.mock_calls) == 1
assert len(mock_async_setup_entry.mock_calls) == 1
async def test_async_setup_entry_sets_up_light(hass: HomeAssistant):
"""Test that configuring entry sets up light domain."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-plum-username", "password": "test-plum-password"},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData"
) as mock_loadCloudData, patch(
"homeassistant.components.plum_lightpad.light.async_setup_entry"
) as mock_light_async_setup_entry:
result = await hass.config_entries.async_setup(config_entry.entry_id)
assert result is True
await hass.async_block_till_done()
assert len(mock_loadCloudData.mock_calls) == 1
assert len(mock_light_async_setup_entry.mock_calls) == 1
async def test_async_setup_entry_handles_auth_error(hass: HomeAssistant):
"""Test that configuring entry handles Plum Cloud authentication error."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-plum-username", "password": "test-plum-password"},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData",
side_effect=ContentTypeError(Mock(), None),
), patch(
"homeassistant.components.plum_lightpad.light.async_setup_entry"
) as mock_light_async_setup_entry:
result = await hass.config_entries.async_setup(config_entry.entry_id)
assert result is False
assert len(mock_light_async_setup_entry.mock_calls) == 0
async def test_async_setup_entry_handles_http_error(hass: HomeAssistant):
"""Test that configuring entry handles HTTP error."""
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"username": "test-plum-username", "password": "test-plum-password"},
)
config_entry.add_to_hass(hass)
with patch(
"homeassistant.components.plum_lightpad.utils.Plum.loadCloudData",
side_effect=HTTPError,
), patch(
"homeassistant.components.plum_lightpad.light.async_setup_entry"
) as mock_light_async_setup_entry:
result = await hass.config_entries.async_setup(config_entry.entry_id)
assert result is False
assert len(mock_light_async_setup_entry.mock_calls) == 0
|
from collections import OrderedDict
import os
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
os.environ['is_test_suite'] = 'True'
import classifiers as classifier_tests
import regressors as regressor_tests
training_parameters = {
'model_names': ['DeepLearning', 'GradientBoosting', 'XGB', 'LGBM', 'CatBoost']
}
# Make this an OrderedDict so that we run the tests in a consistent order
test_names = OrderedDict([
('getting_single_predictions_multilabel_classification', classifier_tests.getting_single_predictions_multilabel_classification),
# ('getting_single_predictions_classification', classifier_tests.getting_single_predictions_classification),
('optimize_final_model_classification', classifier_tests.optimize_final_model_classification)
# ('feature_learning_getting_single_predictions_classification', classifier_tests.feature_learning_getting_single_predictions_classification),
# ('categorical_ensembling_classification', classifier_tests.categorical_ensembling_classification),
# ('feature_learning_categorical_ensembling_getting_single_predictions_classification', classifier_tests.feature_learning_categorical_ensembling_getting_single_predictions_classification)
])
def test_generator():
for model_name in training_parameters['model_names']:
for test_name, test in test_names.items():
test_model_name = model_name + 'Classifier'
# test_model_name = model_name
test.description = str(test_model_name) + '_' + test_name
yield test, test_model_name
|
import asyncio
from typing import Optional
import discord
from redbot.core import commands
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import humanize_list, inline
_ = Translator("Announcer", __file__)
class Announcer:
def __init__(self, ctx: commands.Context, message: str, config=None):
"""
:param ctx:
:param message:
:param config: Used to determine channel overrides
"""
self.ctx = ctx
self.message = message
self.config = config
self.active = None
def start(self):
"""
Starts an announcement.
:return:
"""
if self.active is None:
self.active = True
self.ctx.bot.loop.create_task(self.announcer())
def cancel(self):
"""
Cancels a running announcement.
:return:
"""
self.active = False
async def _get_announce_channel(self, guild: discord.Guild) -> Optional[discord.TextChannel]:
if await self.ctx.bot.cog_disabled_in_guild_raw("Admin", guild.id):
return
channel_id = await self.config.guild(guild).announce_channel()
return guild.get_channel(channel_id)
async def announcer(self):
guild_list = self.ctx.bot.guilds
failed = []
async for g in AsyncIter(guild_list, delay=0.5):
if not self.active:
return
channel = await self._get_announce_channel(g)
if channel:
if channel.permissions_for(g.me).send_messages:
try:
await channel.send(self.message)
except discord.Forbidden:
failed.append(str(g.id))
else:
failed.append(str(g.id))
if failed:
msg = (
_("I could not announce to the following server: ")
if len(failed) == 1
else _("I could not announce to the following servers: ")
)
msg += humanize_list(tuple(map(inline, failed)))
await self.ctx.bot.send_to_owners(msg)
self.active = False
|
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _data_path_doc,
_get_version, _version_doc)
has_sample_data = partial(has_dataset, name='sample')
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='sample',
download=download)
data_path.__doc__ = _data_path_doc.format(name='sample',
conf='MNE_DATASETS_SAMPLE_PATH')
def get_version(): # noqa: D103
return _get_version('sample')
get_version.__doc__ = _version_doc.format(name='sample')
|
from datetime import timedelta
import logging
import transmissionrpc
from transmissionrpc.error import TransmissionError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_HOST,
CONF_ID,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_USERNAME,
)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from .const import (
ATTR_DELETE_DATA,
ATTR_TORRENT,
CONF_LIMIT,
CONF_ORDER,
DATA_UPDATED,
DEFAULT_DELETE_DATA,
DEFAULT_LIMIT,
DEFAULT_NAME,
DEFAULT_ORDER,
DEFAULT_PORT,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
EVENT_DOWNLOADED_TORRENT,
EVENT_REMOVED_TORRENT,
EVENT_STARTED_TORRENT,
SERVICE_ADD_TORRENT,
SERVICE_REMOVE_TORRENT,
)
from .errors import AuthenticationError, CannotConnect, UnknownError
_LOGGER = logging.getLogger(__name__)
SERVICE_ADD_TORRENT_SCHEMA = vol.Schema(
{vol.Required(ATTR_TORRENT): cv.string, vol.Required(CONF_NAME): cv.string}
)
SERVICE_REMOVE_TORRENT_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ID): cv.positive_int,
vol.Optional(ATTR_DELETE_DATA, default=DEFAULT_DELETE_DATA): cv.boolean,
}
)
TRANS_SCHEMA = vol.All(
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
}
)
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [TRANS_SCHEMA])}, extra=vol.ALLOW_EXTRA
)
PLATFORMS = ["sensor", "switch"]
async def async_setup(hass, config):
"""Import the Transmission Component from config."""
if DOMAIN in config:
for entry in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=entry
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the Transmission Component."""
client = TransmissionClient(hass, config_entry)
hass.data.setdefault(DOMAIN, {})[config_entry.entry_id] = client
if not await client.async_setup():
return False
return True
async def async_unload_entry(hass, config_entry):
"""Unload Transmission Entry from config_entry."""
client = hass.data[DOMAIN].pop(config_entry.entry_id)
if client.unsub_timer:
client.unsub_timer()
for platform in PLATFORMS:
await hass.config_entries.async_forward_entry_unload(config_entry, platform)
if not hass.data[DOMAIN]:
hass.services.async_remove(DOMAIN, SERVICE_ADD_TORRENT)
hass.services.async_remove(DOMAIN, SERVICE_REMOVE_TORRENT)
return True
async def get_api(hass, entry):
"""Get Transmission client."""
host = entry[CONF_HOST]
port = entry[CONF_PORT]
username = entry.get(CONF_USERNAME)
password = entry.get(CONF_PASSWORD)
try:
api = await hass.async_add_executor_job(
transmissionrpc.Client, host, port, username, password
)
_LOGGER.debug("Successfully connected to %s", host)
return api
except TransmissionError as error:
if "401: Unauthorized" in str(error):
_LOGGER.error("Credentials for Transmission client are not valid")
raise AuthenticationError from error
if "111: Connection refused" in str(error):
_LOGGER.error("Connecting to the Transmission client %s failed", host)
raise CannotConnect from error
_LOGGER.error(error)
raise UnknownError from error
class TransmissionClient:
"""Transmission Client Object."""
def __init__(self, hass, config_entry):
"""Initialize the Transmission RPC API."""
self.hass = hass
self.config_entry = config_entry
self.tm_api = None
self._tm_data = None
self.unsub_timer = None
@property
def api(self):
"""Return the tm_data object."""
return self._tm_data
async def async_setup(self):
"""Set up the Transmission client."""
try:
self.tm_api = await get_api(self.hass, self.config_entry.data)
except CannotConnect as error:
raise ConfigEntryNotReady from error
except (AuthenticationError, UnknownError):
return False
self._tm_data = TransmissionData(self.hass, self.config_entry, self.tm_api)
await self.hass.async_add_executor_job(self._tm_data.init_torrent_list)
await self.hass.async_add_executor_job(self._tm_data.update)
self.add_options()
self.set_scan_interval(self.config_entry.options[CONF_SCAN_INTERVAL])
for platform in PLATFORMS:
self.hass.async_create_task(
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, platform
)
)
def add_torrent(service):
"""Add new torrent to download."""
tm_client = None
for entry in self.hass.config_entries.async_entries(DOMAIN):
if entry.data[CONF_NAME] == service.data[CONF_NAME]:
tm_client = self.hass.data[DOMAIN][entry.entry_id]
break
if tm_client is None:
_LOGGER.error("Transmission instance is not found")
return
torrent = service.data[ATTR_TORRENT]
if torrent.startswith(
("http", "ftp:", "magnet:")
) or self.hass.config.is_allowed_path(torrent):
tm_client.tm_api.add_torrent(torrent)
tm_client.api.update()
else:
_LOGGER.warning(
"Could not add torrent: unsupported type or no permission"
)
def remove_torrent(service):
"""Remove torrent."""
tm_client = None
for entry in self.hass.config_entries.async_entries(DOMAIN):
if entry.data[CONF_NAME] == service.data[CONF_NAME]:
tm_client = self.hass.data[DOMAIN][entry.entry_id]
break
if tm_client is None:
_LOGGER.error("Transmission instance is not found")
return
torrent_id = service.data[CONF_ID]
delete_data = service.data[ATTR_DELETE_DATA]
tm_client.tm_api.remove_torrent(torrent_id, delete_data=delete_data)
tm_client.api.update()
self.hass.services.async_register(
DOMAIN, SERVICE_ADD_TORRENT, add_torrent, schema=SERVICE_ADD_TORRENT_SCHEMA
)
self.hass.services.async_register(
DOMAIN,
SERVICE_REMOVE_TORRENT,
remove_torrent,
schema=SERVICE_REMOVE_TORRENT_SCHEMA,
)
self.config_entry.add_update_listener(self.async_options_updated)
return True
def add_options(self):
"""Add options for entry."""
if not self.config_entry.options:
scan_interval = self.config_entry.data.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
limit = self.config_entry.data.get(CONF_LIMIT, DEFAULT_LIMIT)
order = self.config_entry.data.get(CONF_ORDER, DEFAULT_ORDER)
options = {
CONF_SCAN_INTERVAL: scan_interval,
CONF_LIMIT: limit,
CONF_ORDER: order,
}
self.hass.config_entries.async_update_entry(
self.config_entry, options=options
)
def set_scan_interval(self, scan_interval):
"""Update scan interval."""
def refresh(event_time):
"""Get the latest data from Transmission."""
self._tm_data.update()
if self.unsub_timer is not None:
self.unsub_timer()
self.unsub_timer = async_track_time_interval(
self.hass, refresh, timedelta(seconds=scan_interval)
)
@staticmethod
async def async_options_updated(hass, entry):
"""Triggered by config entry options updates."""
tm_client = hass.data[DOMAIN][entry.entry_id]
tm_client.set_scan_interval(entry.options[CONF_SCAN_INTERVAL])
await hass.async_add_executor_job(tm_client.api.update)
class TransmissionData:
"""Get the latest data and update the states."""
def __init__(self, hass, config, api):
"""Initialize the Transmission RPC API."""
self.hass = hass
self.config = config
self.data = None
self.torrents = []
self.session = None
self.available = True
self._api = api
self.completed_torrents = []
self.started_torrents = []
self.all_torrents = []
@property
def host(self):
"""Return the host name."""
return self.config.data[CONF_HOST]
@property
def signal_update(self):
"""Update signal per transmission entry."""
return f"{DATA_UPDATED}-{self.host}"
def update(self):
"""Get the latest data from Transmission instance."""
try:
self.data = self._api.session_stats()
self.torrents = self._api.get_torrents()
self.session = self._api.get_session()
self.check_completed_torrent()
self.check_started_torrent()
self.check_removed_torrent()
_LOGGER.debug("Torrent Data for %s Updated", self.host)
self.available = True
except TransmissionError:
self.available = False
_LOGGER.error("Unable to connect to Transmission client %s", self.host)
dispatcher_send(self.hass, self.signal_update)
def init_torrent_list(self):
"""Initialize torrent lists."""
self.torrents = self._api.get_torrents()
self.completed_torrents = [
x.name for x in self.torrents if x.status == "seeding"
]
self.started_torrents = [
x.name for x in self.torrents if x.status == "downloading"
]
def check_completed_torrent(self):
"""Get completed torrent functionality."""
actual_torrents = self.torrents
actual_completed_torrents = [
var.name for var in actual_torrents if var.status == "seeding"
]
tmp_completed_torrents = list(
set(actual_completed_torrents).difference(self.completed_torrents)
)
for var in tmp_completed_torrents:
self.hass.bus.fire(EVENT_DOWNLOADED_TORRENT, {"name": var})
self.completed_torrents = actual_completed_torrents
def check_started_torrent(self):
"""Get started torrent functionality."""
actual_torrents = self.torrents
actual_started_torrents = [
var.name for var in actual_torrents if var.status == "downloading"
]
tmp_started_torrents = list(
set(actual_started_torrents).difference(self.started_torrents)
)
for var in tmp_started_torrents:
self.hass.bus.fire(EVENT_STARTED_TORRENT, {"name": var})
self.started_torrents = actual_started_torrents
def check_removed_torrent(self):
"""Get removed torrent functionality."""
actual_torrents = self.torrents
actual_all_torrents = [var.name for var in actual_torrents]
removed_torrents = list(set(self.all_torrents).difference(actual_all_torrents))
for var in removed_torrents:
self.hass.bus.fire(EVENT_REMOVED_TORRENT, {"name": var})
self.all_torrents = actual_all_torrents
def start_torrents(self):
"""Start all torrents."""
if len(self.torrents) <= 0:
return
self._api.start_all()
def stop_torrents(self):
"""Stop all active torrents."""
torrent_ids = [torrent.id for torrent in self.torrents]
self._api.stop_torrent(torrent_ids)
def set_alt_speed_enabled(self, is_enabled):
"""Set the alternative speed flag."""
self._api.set_session(alt_speed_enabled=is_enabled)
def get_alt_speed_enabled(self):
"""Get the alternative speed flag."""
if self.session is None:
return None
return self.session.alt_speed_enabled
|
from typing import List
import voluptuous as vol
from homeassistant.components.automation import AutomationActionType
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.homeassistant.triggers import state as state_trigger
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_PLATFORM,
CONF_TYPE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import CALLBACK_TYPE, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN
TRIGGER_TYPES = {"turned_on", "turned_off"}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TRIGGER_TYPES),
}
)
async def async_get_triggers(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device triggers for Fan devices."""
registry = await entity_registry.async_get_registry(hass)
triggers = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add triggers for each entity that belongs to this integration
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turned_on",
}
)
triggers.append(
{
CONF_PLATFORM: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "turned_off",
}
)
return triggers
async def async_attach_trigger(
hass: HomeAssistant,
config: ConfigType,
action: AutomationActionType,
automation_info: dict,
) -> CALLBACK_TYPE:
"""Attach a trigger."""
config = TRIGGER_SCHEMA(config)
if config[CONF_TYPE] == "turned_on":
from_state = STATE_OFF
to_state = STATE_ON
else:
from_state = STATE_ON
to_state = STATE_OFF
state_config = {
state_trigger.CONF_PLATFORM: "state",
CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_FROM: from_state,
state_trigger.CONF_TO: to_state,
}
state_config = state_trigger.TRIGGER_SCHEMA(state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
|
import socket
import unittest
from unittest import mock
import pytest
import requests
from uvcclient import camera, nvr
from homeassistant.components.camera import SUPPORT_STREAM
from homeassistant.components.uvc import camera as uvc
from homeassistant.exceptions import PlatformNotReady
from homeassistant.setup import setup_component
from tests.common import get_test_home_assistant
class TestUVCSetup(unittest.TestCase):
"""Test the UVC camera platform."""
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.addCleanup(self.hass.stop)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_full_config(self, mock_uvc, mock_remote):
"""Test the setup with full configuration."""
config = {
"platform": "uvc",
"nvr": "foo",
"password": "bar",
"port": 123,
"key": "secret",
}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
{"uuid": "three", "name": "Old AirCam", "id": "id3"},
]
def mock_get_camera(uuid):
"""Create a mock camera."""
if uuid == "id3":
return {"model": "airCam"}
return {"model": "UVC"}
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.side_effect = mock_get_camera
mock_remote.return_value.server_version = (3, 2, 0)
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 123, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "id1", "Front", "bar"),
mock.call(mock_remote.return_value, "id2", "Back", "bar"),
]
)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_partial_config(self, mock_uvc, mock_remote):
"""Test the setup with partial configuration."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
]
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.return_value = {"model": "UVC"}
mock_remote.return_value.server_version = (3, 2, 0)
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 7080, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "id1", "Front", "ubnt"),
mock.call(mock_remote.return_value, "id2", "Back", "ubnt"),
]
)
@mock.patch("uvcclient.nvr.UVCRemote")
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_partial_config_v31x(self, mock_uvc, mock_remote):
"""Test the setup with a v3.1.x server."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_cameras = [
{"uuid": "one", "name": "Front", "id": "id1"},
{"uuid": "two", "name": "Back", "id": "id2"},
]
mock_remote.return_value.index.return_value = mock_cameras
mock_remote.return_value.get_camera.return_value = {"model": "UVC"}
mock_remote.return_value.server_version = (3, 1, 3)
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert mock_remote.call_count == 1
assert mock_remote.call_args == mock.call("foo", 7080, "secret", ssl=False)
mock_uvc.assert_has_calls(
[
mock.call(mock_remote.return_value, "one", "Front", "ubnt"),
mock.call(mock_remote.return_value, "two", "Back", "ubnt"),
]
)
@mock.patch.object(uvc, "UnifiVideoCamera")
def test_setup_incomplete_config(self, mock_uvc):
"""Test the setup with incomplete configuration."""
assert setup_component(self.hass, "camera", {"platform": "uvc", "nvr": "foo"})
self.hass.block_till_done()
assert not mock_uvc.called
assert setup_component(
self.hass, "camera", {"platform": "uvc", "key": "secret"}
)
self.hass.block_till_done()
assert not mock_uvc.called
assert setup_component(
self.hass, "camera", {"platform": "uvc", "port": "invalid"}
)
self.hass.block_till_done()
assert not mock_uvc.called
@mock.patch.object(uvc, "UnifiVideoCamera")
@mock.patch("uvcclient.nvr.UVCRemote")
def setup_nvr_errors_during_indexing(self, error, mock_remote, mock_uvc):
"""Set up test for NVR errors during indexing."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_remote.return_value.index.side_effect = error
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert not mock_uvc.called
def test_setup_nvr_error_during_indexing_notauthorized(self):
"""Test for error: nvr.NotAuthorized."""
self.setup_nvr_errors_during_indexing(nvr.NotAuthorized)
def test_setup_nvr_error_during_indexing_nvrerror(self):
"""Test for error: nvr.NvrError."""
self.setup_nvr_errors_during_indexing(nvr.NvrError)
pytest.raises(PlatformNotReady)
def test_setup_nvr_error_during_indexing_connectionerror(self):
"""Test for error: requests.exceptions.ConnectionError."""
self.setup_nvr_errors_during_indexing(requests.exceptions.ConnectionError)
pytest.raises(PlatformNotReady)
@mock.patch.object(uvc, "UnifiVideoCamera")
@mock.patch("uvcclient.nvr.UVCRemote.__init__")
def setup_nvr_errors_during_initialization(self, error, mock_remote, mock_uvc):
"""Set up test for NVR errors during initialization."""
config = {"platform": "uvc", "nvr": "foo", "key": "secret"}
mock_remote.return_value = None
mock_remote.side_effect = error
assert setup_component(self.hass, "camera", {"camera": config})
self.hass.block_till_done()
assert not mock_remote.index.called
assert not mock_uvc.called
def test_setup_nvr_error_during_initialization_notauthorized(self):
"""Test for error: nvr.NotAuthorized."""
self.setup_nvr_errors_during_initialization(nvr.NotAuthorized)
def test_setup_nvr_error_during_initialization_nvrerror(self):
"""Test for error: nvr.NvrError."""
self.setup_nvr_errors_during_initialization(nvr.NvrError)
pytest.raises(PlatformNotReady)
def test_setup_nvr_error_during_initialization_connectionerror(self):
"""Test for error: requests.exceptions.ConnectionError."""
self.setup_nvr_errors_during_initialization(requests.exceptions.ConnectionError)
pytest.raises(PlatformNotReady)
class TestUVC(unittest.TestCase):
"""Test class for UVC."""
def setup_method(self, method):
"""Set up the mock camera."""
self.nvr = mock.MagicMock()
self.uuid = "uuid"
self.name = "name"
self.password = "seekret"
self.uvc = uvc.UnifiVideoCamera(self.nvr, self.uuid, self.name, self.password)
self.nvr.get_camera.return_value = {
"model": "UVC Fake",
"recordingSettings": {"fullTimeRecordEnabled": True},
"host": "host-a",
"internalHost": "host-b",
"username": "admin",
"channels": [
{
"id": "0",
"width": 1920,
"height": 1080,
"fps": 25,
"bitrate": 6000000,
"isRtspEnabled": True,
"rtspUris": [
"rtsp://host-a:7447/uuid_rtspchannel_0",
"rtsp://foo:7447/uuid_rtspchannel_0",
],
},
{
"id": "1",
"width": 1024,
"height": 576,
"fps": 15,
"bitrate": 1200000,
"isRtspEnabled": False,
"rtspUris": [
"rtsp://host-a:7447/uuid_rtspchannel_1",
"rtsp://foo:7447/uuid_rtspchannel_1",
],
},
],
}
self.nvr.server_version = (3, 2, 0)
self.uvc.update()
def test_properties(self):
"""Test the properties."""
assert self.name == self.uvc.name
assert self.uvc.is_recording
assert "Ubiquiti" == self.uvc.brand
assert "UVC Fake" == self.uvc.model
assert SUPPORT_STREAM == self.uvc.supported_features
def test_stream(self):
"""Test the RTSP stream URI."""
stream_source = yield from self.uvc.stream_source()
assert stream_source == "rtsp://foo:7447/uuid_rtspchannel_0"
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login(self, mock_camera, mock_store):
"""Test the login."""
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-a", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClient")
def test_login_v31x(self, mock_camera, mock_store):
"""Test login with v3.1.x server."""
self.nvr.server_version = (3, 1, 3)
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-a", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login_tries_both_addrs_and_caches(self, mock_camera, mock_store):
"""Test the login tries."""
responses = [0]
def mock_login(*a):
"""Mock login."""
try:
responses.pop(0)
raise OSError
except IndexError:
pass
mock_store.return_value.get_camera_password.return_value = None
mock_camera.return_value.login.side_effect = mock_login
self.uvc._login()
assert 2 == mock_camera.call_count
assert "host-b" == self.uvc._connect_addr
mock_camera.reset_mock()
self.uvc._login()
assert mock_camera.call_count == 1
assert mock_camera.call_args == mock.call("host-b", "admin", "seekret")
assert mock_camera.return_value.login.call_count == 1
assert mock_camera.return_value.login.call_args == mock.call()
@mock.patch("uvcclient.store.get_info_store")
@mock.patch("uvcclient.camera.UVCCameraClientV320")
def test_login_fails_both_properly(self, mock_camera, mock_store):
"""Test if login fails properly."""
mock_camera.return_value.login.side_effect = socket.error
assert self.uvc._login() is None
assert self.uvc._connect_addr is None
def test_camera_image_tries_login_bails_on_failure(self):
"""Test retrieving failure."""
with mock.patch.object(self.uvc, "_login") as mock_login:
mock_login.return_value = False
assert self.uvc.camera_image() is None
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
def test_camera_image_logged_in(self):
"""Test the login state."""
self.uvc._camera = mock.MagicMock()
assert self.uvc._camera.get_snapshot.return_value == self.uvc.camera_image()
def test_camera_image_error(self):
"""Test the camera image error."""
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = camera.CameraConnectError
assert self.uvc.camera_image() is None
def test_camera_image_reauths(self):
"""Test the re-authentication."""
responses = [0]
def mock_snapshot():
"""Mock snapshot."""
try:
responses.pop()
raise camera.CameraAuthError()
except IndexError:
pass
return "image"
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = mock_snapshot
with mock.patch.object(self.uvc, "_login") as mock_login:
assert "image" == self.uvc.camera_image()
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
assert [] == responses
def test_camera_image_reauths_only_once(self):
"""Test if the re-authentication only happens once."""
self.uvc._camera = mock.MagicMock()
self.uvc._camera.get_snapshot.side_effect = camera.CameraAuthError
with mock.patch.object(self.uvc, "_login") as mock_login:
with pytest.raises(camera.CameraAuthError):
self.uvc.camera_image()
assert mock_login.call_count == 1
assert mock_login.call_args == mock.call()
|
from sklearn.base import BaseEstimator, TransformerMixin
from auto_ml import utils
booleans = set([True, False, 'true', 'false', 'True', 'False', 'TRUE', 'FALSE'])
# Used in CustomSparseScaler
def calculate_scaling_ranges(X, col, min_percentile=0.05, max_percentile=0.95):
series_vals = X[col]
good_vals_indexes = series_vals.notnull()
series_vals = list(series_vals[good_vals_indexes])
series_vals = sorted(series_vals)
max_val_idx = int(max_percentile * len(series_vals)) - 1
min_val_idx = int(min_percentile * len(series_vals))
if len(series_vals) > 0:
max_val = series_vals[max_val_idx]
min_val = series_vals[min_val_idx]
else:
return 'ignore'
if max_val in booleans or min_val in booleans:
return 'pass_on_col'
inner_range = max_val - min_val
if inner_range == 0:
# Used to do recursion here, which is prettier and uses less code, but since we've already got the filtered and sorted series_vals, it makes sense to use those to avoid duplicate computation
# Grab the absolute largest max and min vals, and see if there is any difference in them, since our 95th and 5th percentile vals had no difference between them
max_val = series_vals[len(series_vals) - 1]
min_val = series_vals[0]
inner_range = max_val - min_val
if inner_range == 0:
# If this is a binary field, keep all the values in it, just make sure they're scaled to 1 or 0.
if max_val == 1:
min_val = 0
inner_range = 1
else:
# If this is just a column that holds all the same values for everything though, delete the column to save some space
return 'ignore'
col_summary = {
'max_val': max_val
, 'min_val': min_val
, 'inner_range': inner_range
}
return col_summary
# Scale sparse data to the 95th and 5th percentile
# Only do so for values that actuall exist (do absolutely nothing with rows that do not have this data point)
class CustomSparseScaler(BaseEstimator, TransformerMixin):
def __init__(self, column_descriptions, truncate_large_values=False, perform_feature_scaling=True, min_percentile=0.05, max_percentile=0.95):
self.column_descriptions = column_descriptions
self.numeric_col_descs = set([None, 'continuous', 'numerical', 'numeric', 'float', 'int'])
# Everything in column_descriptions (except numeric_col_descs) is a non-numeric column, and thus, cannot be scaled
self.cols_to_avoid = set([k for k, v in column_descriptions.items() if v not in self.numeric_col_descs])
# Setting these here so that they can be grid searchable
# Truncating large values is an interesting strategy. It forces all values to fit inside the 5th - 95th percentiles.
# Essentially, it turns any really large (or small) values into reasonably large (or small) values.
self.truncate_large_values = truncate_large_values
self.perform_feature_scaling = perform_feature_scaling
self.min_percentile = min_percentile
self.max_percentile = max_percentile
def get(self, prop_name, default=None):
try:
return getattr(self, prop_name)
except AttributeError:
return default
def fit(self, X, y=None):
print('Performing feature scaling')
self.column_ranges = {}
self.cols_to_ignore = []
if self.perform_feature_scaling:
for col in X.columns:
if col not in self.cols_to_avoid:
col_summary = calculate_scaling_ranges(X, col, min_percentile=self.min_percentile, max_percentile=self.max_percentile)
if col_summary == 'ignore':
self.cols_to_ignore.append(col)
elif col_summary == 'pass_on_col':
pass
else:
self.column_ranges[col] = col_summary
return self
# Perform basic min/max scaling, with the minor caveat that our min and max values are the 10th and 90th percentile values, to avoid outliers.
def transform(self, X, y=None):
if isinstance(X, dict):
for col, col_dict in self.column_ranges.items():
if col in X:
X[col] = scale_val(val=X[col], min_val=col_dict['min_val'], total_range=col_dict['inner_range'], truncate_large_values=self.truncate_large_values)
else:
if len(self.cols_to_ignore) > 0:
X = utils.safely_drop_columns(X, self.cols_to_ignore)
for col, col_dict in self.column_ranges.items():
if col in X.columns:
min_val = col_dict['min_val']
inner_range = col_dict['inner_range']
X[col] = X[col].apply(lambda x: scale_val(x, min_val, inner_range, self.truncate_large_values))
return X
def scale_val(val, min_val, total_range, truncate_large_values=False):
scaled_value = (val - min_val) / total_range
if truncate_large_values:
if scaled_value < 0:
scaled_value = 0
elif scaled_value > 1:
scaled_value = 1
return scaled_value
|
from datetime import timedelta
from typing import Any, Callable, Dict, List, Optional
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DEVICE_CLASS_TIMESTAMP, PERCENTAGE
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.dt import utcnow
from . import IPPDataUpdateCoordinator, IPPEntity
from .const import (
ATTR_COMMAND_SET,
ATTR_INFO,
ATTR_LOCATION,
ATTR_MARKER_HIGH_LEVEL,
ATTR_MARKER_LOW_LEVEL,
ATTR_MARKER_TYPE,
ATTR_SERIAL,
ATTR_STATE_MESSAGE,
ATTR_STATE_REASON,
ATTR_URI_SUPPORTED,
DOMAIN,
)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up IPP sensor based on a config entry."""
coordinator: IPPDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
# config flow sets this to either UUID, serial number or None
unique_id = entry.unique_id
if unique_id is None:
unique_id = entry.entry_id
sensors = []
sensors.append(IPPPrinterSensor(entry.entry_id, unique_id, coordinator))
sensors.append(IPPUptimeSensor(entry.entry_id, unique_id, coordinator))
for marker_index in range(len(coordinator.data.markers)):
sensors.append(
IPPMarkerSensor(entry.entry_id, unique_id, coordinator, marker_index)
)
async_add_entities(sensors, True)
class IPPSensor(IPPEntity):
"""Defines an IPP sensor."""
def __init__(
self,
*,
coordinator: IPPDataUpdateCoordinator,
enabled_default: bool = True,
entry_id: str,
unique_id: str,
icon: str,
key: str,
name: str,
unit_of_measurement: Optional[str] = None,
) -> None:
"""Initialize IPP sensor."""
self._unit_of_measurement = unit_of_measurement
self._key = key
self._unique_id = None
if unique_id is not None:
self._unique_id = f"{unique_id}_{key}"
super().__init__(
entry_id=entry_id,
device_id=unique_id,
coordinator=coordinator,
name=name,
icon=icon,
enabled_default=enabled_default,
)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
class IPPMarkerSensor(IPPSensor):
"""Defines an IPP marker sensor."""
def __init__(
self,
entry_id: str,
unique_id: str,
coordinator: IPPDataUpdateCoordinator,
marker_index: int,
) -> None:
"""Initialize IPP marker sensor."""
self.marker_index = marker_index
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
unique_id=unique_id,
icon="mdi:water",
key=f"marker_{marker_index}",
name=f"{coordinator.data.info.name} {coordinator.data.markers[marker_index].name}",
unit_of_measurement=PERCENTAGE,
)
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
return {
ATTR_MARKER_HIGH_LEVEL: self.coordinator.data.markers[
self.marker_index
].high_level,
ATTR_MARKER_LOW_LEVEL: self.coordinator.data.markers[
self.marker_index
].low_level,
ATTR_MARKER_TYPE: self.coordinator.data.markers[
self.marker_index
].marker_type,
}
@property
def state(self) -> Optional[int]:
"""Return the state of the sensor."""
level = self.coordinator.data.markers[self.marker_index].level
if level >= 0:
return level
return None
class IPPPrinterSensor(IPPSensor):
"""Defines an IPP printer sensor."""
def __init__(
self, entry_id: str, unique_id: str, coordinator: IPPDataUpdateCoordinator
) -> None:
"""Initialize IPP printer sensor."""
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
unique_id=unique_id,
icon="mdi:printer",
key="printer",
name=coordinator.data.info.name,
unit_of_measurement=None,
)
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
return {
ATTR_INFO: self.coordinator.data.info.printer_info,
ATTR_SERIAL: self.coordinator.data.info.serial,
ATTR_LOCATION: self.coordinator.data.info.location,
ATTR_STATE_MESSAGE: self.coordinator.data.state.message,
ATTR_STATE_REASON: self.coordinator.data.state.reasons,
ATTR_COMMAND_SET: self.coordinator.data.info.command_set,
ATTR_URI_SUPPORTED: self.coordinator.data.info.printer_uri_supported,
}
@property
def state(self) -> str:
"""Return the state of the sensor."""
return self.coordinator.data.state.printer_state
class IPPUptimeSensor(IPPSensor):
"""Defines a IPP uptime sensor."""
def __init__(
self, entry_id: str, unique_id: str, coordinator: IPPDataUpdateCoordinator
) -> None:
"""Initialize IPP uptime sensor."""
super().__init__(
coordinator=coordinator,
enabled_default=False,
entry_id=entry_id,
unique_id=unique_id,
icon="mdi:clock-outline",
key="uptime",
name=f"{coordinator.data.info.name} Uptime",
)
@property
def state(self) -> str:
"""Return the state of the sensor."""
uptime = utcnow() - timedelta(seconds=self.coordinator.data.info.uptime)
return uptime.replace(microsecond=0).isoformat()
@property
def device_class(self) -> Optional[str]:
"""Return the class of this sensor."""
return DEVICE_CLASS_TIMESTAMP
|
import json
from homeassistant.bootstrap import async_setup_component
from homeassistant.components import config
from tests.async_mock import AsyncMock, patch
VIEW_NAME = "api:config:group:config"
async def test_get_device_config(hass, hass_client):
"""Test getting device config."""
with patch.object(config, "SECTIONS", ["group"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
def mock_read(path):
"""Mock reading data."""
return {"hello.beer": {"free": "beer"}, "other.entity": {"do": "something"}}
with patch("homeassistant.components.config._read", mock_read):
resp = await client.get("/api/config/group/config/hello.beer")
assert resp.status == 200
result = await resp.json()
assert result == {"free": "beer"}
async def test_update_device_config(hass, hass_client):
"""Test updating device config."""
with patch.object(config, "SECTIONS", ["group"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
orig_data = {
"hello.beer": {"ignored": True},
"other.entity": {"polling_intensity": 2},
}
def mock_read(path):
"""Mock reading data."""
return orig_data
written = []
def mock_write(path, data):
"""Mock writing data."""
written.append(data)
mock_call = AsyncMock()
with patch("homeassistant.components.config._read", mock_read), patch(
"homeassistant.components.config._write", mock_write
), patch.object(hass.services, "async_call", mock_call):
resp = await client.post(
"/api/config/group/config/hello_beer",
data=json.dumps(
{"name": "Beer", "entities": ["light.top", "light.bottom"]}
),
)
await hass.async_block_till_done()
assert resp.status == 200
result = await resp.json()
assert result == {"result": "ok"}
orig_data["hello_beer"]["name"] = "Beer"
orig_data["hello_beer"]["entities"] = ["light.top", "light.bottom"]
assert written[0] == orig_data
mock_call.assert_called_once_with("group", "reload")
async def test_update_device_config_invalid_key(hass, hass_client):
"""Test updating device config."""
with patch.object(config, "SECTIONS", ["group"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
resp = await client.post(
"/api/config/group/config/not a slug", data=json.dumps({"name": "YO"})
)
assert resp.status == 400
async def test_update_device_config_invalid_data(hass, hass_client):
"""Test updating device config."""
with patch.object(config, "SECTIONS", ["group"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
resp = await client.post(
"/api/config/group/config/hello_beer", data=json.dumps({"invalid_option": 2})
)
assert resp.status == 400
async def test_update_device_config_invalid_json(hass, hass_client):
"""Test updating device config."""
with patch.object(config, "SECTIONS", ["group"]):
await async_setup_component(hass, "config", {})
client = await hass_client()
resp = await client.post("/api/config/group/config/hello_beer", data="not json")
assert resp.status == 400
|
import re
import html
from PyQt5.QtWidgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate
from PyQt5.QtCore import QRectF, QRegularExpression, QSize, Qt
from PyQt5.QtGui import (QIcon, QPalette, QTextDocument, QTextOption,
QAbstractTextDocumentLayout, QSyntaxHighlighter,
QTextCharFormat)
from qutebrowser.config import config
from qutebrowser.utils import qtutils
class _Highlighter(QSyntaxHighlighter):
def __init__(self, doc, pattern, color):
super().__init__(doc)
self._format = QTextCharFormat()
self._format.setForeground(color)
words = pattern.split()
words.sort(key=len, reverse=True)
pat = "|".join(re.escape(word) for word in words)
self._expression = QRegularExpression(
pat, QRegularExpression.CaseInsensitiveOption
)
def highlightBlock(self, text):
"""Override highlightBlock for custom highlighting."""
match_iterator = self._expression.globalMatch(text)
while match_iterator.hasNext():
match = match_iterator.next()
self.setFormat(
match.capturedStart(),
match.capturedLength(),
self._format
)
class CompletionItemDelegate(QStyledItemDelegate):
"""Delegate used by CompletionView to draw individual items.
Mainly a cleaned up port of Qt's way to draw a TreeView item, except it
uses a QTextDocument to draw the text and add marking.
Original implementation:
qt/src/gui/styles/qcommonstyle.cpp:drawControl:2153
Attributes:
_opt: The QStyleOptionViewItem which is used.
_style: The style to be used.
_painter: The QPainter to be used.
_doc: The QTextDocument to be used.
"""
# FIXME this is horribly slow when resizing.
# We should probably cache something in _get_textdoc or so, but as soon as
# we implement eliding that cache probably isn't worth much anymore...
# https://github.com/qutebrowser/qutebrowser/issues/121
def __init__(self, parent=None):
self._painter = None
self._opt = None
self._doc = None
self._style = None
super().__init__(parent)
def _draw_background(self):
"""Draw the background of an ItemViewItem."""
assert self._opt is not None
assert self._style is not None
self._style.drawPrimitive(self._style.PE_PanelItemViewItem, self._opt,
self._painter, self._opt.widget)
def _draw_icon(self):
"""Draw the icon of an ItemViewItem."""
assert self._opt is not None
assert self._style is not None
icon_rect = self._style.subElementRect(
self._style.SE_ItemViewItemDecoration, self._opt, self._opt.widget)
if not icon_rect.isValid():
# The rect seems to be wrong in all kind of ways if no icon should
# be displayed.
return
mode = QIcon.Normal
if not self._opt.state & QStyle.State_Enabled:
mode = QIcon.Disabled
elif self._opt.state & QStyle.State_Selected:
mode = QIcon.Selected
state = QIcon.On if self._opt.state & QStyle.State_Open else QIcon.Off
self._opt.icon.paint(self._painter, icon_rect,
self._opt.decorationAlignment, mode, state)
def _draw_text(self, index):
"""Draw the text of an ItemViewItem.
This is the main part where we differ from the original implementation
in Qt: We use a QTextDocument to draw text.
Args:
index: The QModelIndex of the item to draw.
"""
assert self._opt is not None
assert self._painter is not None
assert self._style is not None
if not self._opt.text:
return
text_rect_ = self._style.subElementRect(
self._style.SE_ItemViewItemText, self._opt, self._opt.widget)
qtutils.ensure_valid(text_rect_)
margin = self._style.pixelMetric(QStyle.PM_FocusFrameHMargin,
self._opt, self._opt.widget) + 1
# remove width padding
text_rect = text_rect_.adjusted(margin, 0, -margin, 0)
qtutils.ensure_valid(text_rect)
# move text upwards a bit
if index.parent().isValid():
text_rect.adjust(0, -1, 0, -1)
else:
text_rect.adjust(0, -2, 0, -2)
self._painter.save()
state = self._opt.state
if state & QStyle.State_Enabled and state & QStyle.State_Active:
cg = QPalette.Normal
elif state & QStyle.State_Enabled:
cg = QPalette.Inactive
else:
cg = QPalette.Disabled
if state & QStyle.State_Selected:
self._painter.setPen(self._opt.palette.color(
cg, QPalette.HighlightedText))
# This is a dirty fix for the text jumping by one pixel for
# whatever reason.
text_rect.adjust(0, -1, 0, 0)
else:
self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))
if state & QStyle.State_Editing:
self._painter.setPen(self._opt.palette.color(cg, QPalette.Text))
self._painter.drawRect(text_rect_.adjusted(0, 0, -1, -1))
self._painter.translate(text_rect.left(), text_rect.top())
self._get_textdoc(index)
self._draw_textdoc(text_rect, index.column())
self._painter.restore()
def _draw_textdoc(self, rect, col):
"""Draw the QTextDocument of an item.
Args:
rect: The QRect to clip the drawing to.
"""
assert self._painter is not None
assert self._doc is not None
assert self._opt is not None
# We can't use drawContents because then the color would be ignored.
clip = QRectF(0, 0, rect.width(), rect.height())
self._painter.save()
if self._opt.state & QStyle.State_Selected:
color = config.cache['colors.completion.item.selected.fg']
elif not self._opt.state & QStyle.State_Enabled:
color = config.cache['colors.completion.category.fg']
else:
colors = config.cache['colors.completion.fg']
# if multiple colors are set, use different colors per column
color = colors[col % len(colors)]
self._painter.setPen(color)
ctx = QAbstractTextDocumentLayout.PaintContext()
ctx.palette.setColor(QPalette.Text, self._painter.pen().color())
if clip.isValid():
self._painter.setClipRect(clip)
ctx.clip = clip
self._doc.documentLayout().draw(self._painter, ctx)
self._painter.restore()
def _get_textdoc(self, index):
"""Create the QTextDocument of an item.
Args:
index: The QModelIndex of the item to draw.
"""
assert self._opt is not None
# FIXME we probably should do eliding here. See
# qcommonstyle.cpp:viewItemDrawText
# https://github.com/qutebrowser/qutebrowser/issues/118
text_option = QTextOption()
if self._opt.features & QStyleOptionViewItem.WrapText:
text_option.setWrapMode(QTextOption.WordWrap)
else:
text_option.setWrapMode(QTextOption.ManualWrap)
text_option.setTextDirection(self._opt.direction)
text_option.setAlignment(QStyle.visualAlignment(
self._opt.direction, self._opt.displayAlignment))
if self._doc is not None:
self._doc.deleteLater()
self._doc = QTextDocument(self)
self._doc.setDefaultFont(self._opt.font)
self._doc.setDefaultTextOption(text_option)
self._doc.setDocumentMargin(2)
if index.parent().isValid():
view = self.parent()
pattern = view.pattern
columns_to_filter = index.model().columns_to_filter(index)
if index.column() in columns_to_filter and pattern:
if self._opt.state & QStyle.State_Selected:
color = config.val.colors.completion.item.selected.match.fg
else:
color = config.val.colors.completion.match.fg
_Highlighter(self._doc, pattern, color)
self._doc.setPlainText(self._opt.text)
else:
self._doc.setHtml(
'<span style="font: {};">{}</span>'.format(
html.escape(config.val.fonts.completion.category),
html.escape(self._opt.text)))
def _draw_focus_rect(self):
"""Draw the focus rectangle of an ItemViewItem."""
assert self._opt is not None
assert self._style is not None
state = self._opt.state
if not state & QStyle.State_HasFocus:
return
o = self._opt
o.rect = self._style.subElementRect(
self._style.SE_ItemViewItemFocusRect, self._opt, self._opt.widget)
o.state |= int(QStyle.State_KeyboardFocusChange | QStyle.State_Item)
qtutils.ensure_valid(o.rect)
if state & QStyle.State_Enabled:
cg = QPalette.Normal
else:
cg = QPalette.Disabled
if state & QStyle.State_Selected:
role = QPalette.Highlight
else:
role = QPalette.Window
o.backgroundColor = self._opt.palette.color(cg, role)
self._style.drawPrimitive(QStyle.PE_FrameFocusRect, o, self._painter,
self._opt.widget)
def sizeHint(self, option, index):
"""Override sizeHint of QStyledItemDelegate.
Return the cell size based on the QTextDocument size, but might not
work correctly yet.
Args:
option: const QStyleOptionViewItem & option
index: const QModelIndex & index
Return:
A QSize with the recommended size.
"""
value = index.data(Qt.SizeHintRole)
if value is not None:
return value
self._opt = QStyleOptionViewItem(option)
self.initStyleOption(self._opt, index)
self._style = self._opt.widget.style()
self._get_textdoc(index)
assert self._doc is not None
docsize = self._doc.size().toSize()
size = self._style.sizeFromContents(QStyle.CT_ItemViewItem, self._opt,
docsize, self._opt.widget)
qtutils.ensure_valid(size)
return size + QSize(10, 3) # type: ignore[operator]
def paint(self, painter, option, index):
"""Override the QStyledItemDelegate paint function.
Args:
painter: QPainter * painter
option: const QStyleOptionViewItem & option
index: const QModelIndex & index
"""
self._painter = painter
self._painter.save()
self._opt = QStyleOptionViewItem(option)
self.initStyleOption(self._opt, index)
self._style = self._opt.widget.style()
self._draw_background()
self._draw_icon()
self._draw_text(index)
self._draw_focus_rect()
self._painter.restore()
|
from tests.async_mock import mock_open, patch
KEY_PYTHON = "python"
KEY_SERVER = "server"
ADB_DEVICE_TCP_ASYNC_FAKE = "AdbDeviceTcpAsyncFake"
DEVICE_ASYNC_FAKE = "DeviceAsyncFake"
class AdbDeviceTcpAsyncFake:
"""A fake of the `adb_shell.adb_device_async.AdbDeviceTcpAsync` class."""
def __init__(self, *args, **kwargs):
"""Initialize a fake `adb_shell.adb_device_async.AdbDeviceTcpAsync` instance."""
self.available = False
async def close(self):
"""Close the socket connection."""
self.available = False
async def connect(self, *args, **kwargs):
"""Try to connect to a device."""
raise NotImplementedError
async def shell(self, cmd, *args, **kwargs):
"""Send an ADB shell command."""
return None
class ClientAsyncFakeSuccess:
"""A fake of the `ClientAsync` class when the connection and shell commands succeed."""
def __init__(self, host="127.0.0.1", port=5037):
"""Initialize a `ClientAsyncFakeSuccess` instance."""
self._devices = []
async def device(self, serial):
"""Mock the `ClientAsync.device` method when the device is connected via ADB."""
device = DeviceAsyncFake(serial)
self._devices.append(device)
return device
class ClientAsyncFakeFail:
"""A fake of the `ClientAsync` class when the connection and shell commands fail."""
def __init__(self, host="127.0.0.1", port=5037):
"""Initialize a `ClientAsyncFakeFail` instance."""
self._devices = []
async def device(self, serial):
"""Mock the `ClientAsync.device` method when the device is not connected via ADB."""
self._devices = []
return None
class DeviceAsyncFake:
"""A fake of the `DeviceAsync` class."""
def __init__(self, host):
"""Initialize a `DeviceAsyncFake` instance."""
self.host = host
async def shell(self, cmd):
"""Send an ADB shell command."""
raise NotImplementedError
def patch_connect(success):
"""Mock the `adb_shell.adb_device_async.AdbDeviceTcpAsync` and `ClientAsync` classes."""
async def connect_success_python(self, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.connect` method when it succeeds."""
self.available = True
async def connect_fail_python(self, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.connect` method when it fails."""
raise OSError
if success:
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.connect",
connect_success_python,
),
KEY_SERVER: patch(
"androidtv.adb_manager.adb_manager_async.ClientAsync",
ClientAsyncFakeSuccess,
),
}
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.connect", connect_fail_python
),
KEY_SERVER: patch(
"androidtv.adb_manager.adb_manager_async.ClientAsync", ClientAsyncFakeFail
),
}
def patch_shell(response=None, error=False):
"""Mock the `AdbDeviceTcpAsyncFake.shell` and `DeviceAsyncFake.shell` methods."""
async def shell_success(self, cmd, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.shell` and `DeviceAsyncFake.shell` methods when they are successful."""
self.shell_cmd = cmd
return response
async def shell_fail_python(self, cmd, *args, **kwargs):
"""Mock the `AdbDeviceTcpAsyncFake.shell` method when it fails."""
self.shell_cmd = cmd
raise ValueError
async def shell_fail_server(self, cmd):
"""Mock the `DeviceAsyncFake.shell` method when it fails."""
self.shell_cmd = cmd
raise ConnectionResetError
if not error:
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.shell", shell_success
),
KEY_SERVER: patch(f"{__name__}.{DEVICE_ASYNC_FAKE}.shell", shell_success),
}
return {
KEY_PYTHON: patch(
f"{__name__}.{ADB_DEVICE_TCP_ASYNC_FAKE}.shell", shell_fail_python
),
KEY_SERVER: patch(f"{__name__}.{DEVICE_ASYNC_FAKE}.shell", shell_fail_server),
}
PATCH_ADB_DEVICE_TCP = patch(
"androidtv.adb_manager.adb_manager_async.AdbDeviceTcpAsync", AdbDeviceTcpAsyncFake
)
PATCH_ANDROIDTV_OPEN = patch(
"homeassistant.components.androidtv.media_player.open", mock_open()
)
PATCH_KEYGEN = patch("homeassistant.components.androidtv.media_player.keygen")
PATCH_SIGNER = patch(
"homeassistant.components.androidtv.media_player.ADBPythonSync.load_adbkey",
return_value="signer for testing",
)
def isfile(filepath):
"""Mock `os.path.isfile`."""
return filepath.endswith("adbkey")
PATCH_ISFILE = patch("os.path.isfile", isfile)
PATCH_ACCESS = patch("os.access", return_value=True)
def patch_firetv_update(state, current_app, running_apps):
"""Patch the `FireTV.update()` method."""
return patch(
"androidtv.firetv.firetv_async.FireTVAsync.update",
return_value=(state, current_app, running_apps),
)
def patch_androidtv_update(
state, current_app, running_apps, device, is_volume_muted, volume_level
):
"""Patch the `AndroidTV.update()` method."""
return patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.update",
return_value=(
state,
current_app,
running_apps,
device,
is_volume_muted,
volume_level,
),
)
PATCH_LAUNCH_APP = patch("androidtv.basetv.basetv_async.BaseTVAsync.launch_app")
PATCH_STOP_APP = patch("androidtv.basetv.basetv_async.BaseTVAsync.stop_app")
# Cause the update to raise an unexpected type of exception
PATCH_ANDROIDTV_UPDATE_EXCEPTION = patch(
"androidtv.androidtv.androidtv_async.AndroidTVAsync.update",
side_effect=ZeroDivisionError,
)
|
import asyncio
from datetime import timedelta
import logging
from random import randrange
import aiohttp
from homeassistant.components.sensor import DEVICE_CLASS_POWER
from homeassistant.const import POWER_WATT
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle, dt as dt_util
from .const import DOMAIN as TIBBER_DOMAIN, MANUFACTURER
_LOGGER = logging.getLogger(__name__)
ICON = "mdi:currency-usd"
SCAN_INTERVAL = timedelta(minutes=1)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
PARALLEL_UPDATES = 0
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Tibber sensor."""
tibber_connection = hass.data.get(TIBBER_DOMAIN)
dev = []
for home in tibber_connection.get_homes(only_active=False):
try:
await home.update_info()
except asyncio.TimeoutError as err:
_LOGGER.error("Timeout connecting to Tibber home: %s ", err)
raise PlatformNotReady() from err
except aiohttp.ClientError as err:
_LOGGER.error("Error connecting to Tibber home: %s ", err)
raise PlatformNotReady() from err
if home.has_active_subscription:
dev.append(TibberSensorElPrice(home))
if home.has_real_time_consumption:
dev.append(TibberSensorRT(home))
async_add_entities(dev, True)
class TibberSensor(Entity):
"""Representation of a generic Tibber sensor."""
def __init__(self, tibber_home):
"""Initialize the sensor."""
self._tibber_home = tibber_home
self._last_updated = None
self._state = None
self._is_available = False
self._device_state_attributes = {}
self._name = tibber_home.info["viewer"]["home"]["appNickname"]
if self._name is None:
self._name = tibber_home.info["viewer"]["home"]["address"].get(
"address1", ""
)
self._spread_load_constant = randrange(3600)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._device_state_attributes
@property
def model(self):
"""Return the model of the sensor."""
return None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_id(self):
"""Return the ID of the physical device this sensor is part of."""
home = self._tibber_home.info["viewer"]["home"]
return home["meteringPointData"]["consumptionEan"]
@property
def device_info(self):
"""Return the device_info of the device."""
device_info = {
"identifiers": {(TIBBER_DOMAIN, self.device_id)},
"name": self.name,
"manufacturer": MANUFACTURER,
}
if self.model is not None:
device_info["model"] = self.model
return device_info
class TibberSensorElPrice(TibberSensor):
"""Representation of a Tibber sensor for el price."""
async def async_update(self):
"""Get the latest data and updates the states."""
now = dt_util.now()
if (
self._tibber_home.current_price_total
and self._last_updated
and self._last_updated.hour == now.hour
and self._tibber_home.last_data_timestamp
):
return
if (
not self._tibber_home.last_data_timestamp
or (self._tibber_home.last_data_timestamp - now).total_seconds()
< 5 * 3600 + self._spread_load_constant
or not self._is_available
):
_LOGGER.debug("Asking for new data")
await self._fetch_data()
res = self._tibber_home.current_price_data()
self._state, price_level, self._last_updated = res
self._device_state_attributes["price_level"] = price_level
attrs = self._tibber_home.current_attributes()
self._device_state_attributes.update(attrs)
self._is_available = self._state is not None
@property
def available(self):
"""Return True if entity is available."""
return self._is_available
@property
def name(self):
"""Return the name of the sensor."""
return f"Electricity price {self._name}"
@property
def model(self):
"""Return the model of the sensor."""
return "Price Sensor"
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self._tibber_home.price_unit
@property
def unique_id(self):
"""Return a unique ID."""
return self.device_id
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def _fetch_data(self):
_LOGGER.debug("Fetching data")
try:
await self._tibber_home.update_info_and_price_info()
except (asyncio.TimeoutError, aiohttp.ClientError):
return
data = self._tibber_home.info["viewer"]["home"]
self._device_state_attributes["app_nickname"] = data["appNickname"]
self._device_state_attributes["grid_company"] = data["meteringPointData"][
"gridCompany"
]
self._device_state_attributes["estimated_annual_consumption"] = data[
"meteringPointData"
]["estimatedAnnualConsumption"]
class TibberSensorRT(TibberSensor):
"""Representation of a Tibber sensor for real time consumption."""
async def async_added_to_hass(self):
"""Start listen for real time data."""
await self._tibber_home.rt_subscribe(self.hass.loop, self._async_callback)
async def _async_callback(self, payload):
"""Handle received data."""
errors = payload.get("errors")
if errors:
_LOGGER.error(errors[0])
return
data = payload.get("data")
if data is None:
return
live_measurement = data.get("liveMeasurement")
if live_measurement is None:
return
self._state = live_measurement.pop("power", None)
for key, value in live_measurement.items():
if value is None:
continue
self._device_state_attributes[key] = value
self.async_write_ha_state()
@property
def available(self):
"""Return True if entity is available."""
return self._tibber_home.rt_subscription_running
@property
def model(self):
"""Return the model of the sensor."""
return "Tibber Pulse"
@property
def name(self):
"""Return the name of the sensor."""
return f"Real time consumption {self._name}"
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return POWER_WATT
@property
def unique_id(self):
"""Return a unique ID."""
return f"{self.device_id}_rt_consumption"
@property
def device_class(self):
"""Return the device class of the sensor."""
return DEVICE_CLASS_POWER
|
import datetime
import json
import logging
import re
import growattServer
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
CURRENCY_EURO,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_CURRENT,
DEVICE_CLASS_ENERGY,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
DEVICE_CLASS_VOLTAGE,
ELECTRICAL_CURRENT_AMPERE,
ENERGY_KILO_WATT_HOUR,
FREQUENCY_HERTZ,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
VOLT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_PLANT_ID = "plant_id"
DEFAULT_PLANT_ID = "0"
DEFAULT_NAME = "Growatt"
SCAN_INTERVAL = datetime.timedelta(minutes=5)
# Sensor type order is: Sensor name, Unit of measurement, api data name, additional options
TOTAL_SENSOR_TYPES = {
"total_money_today": ("Total money today", CURRENCY_EURO, "plantMoneyText", {}),
"total_money_total": ("Money lifetime", CURRENCY_EURO, "totalMoneyText", {}),
"total_energy_today": (
"Energy Today",
ENERGY_KILO_WATT_HOUR,
"todayEnergy",
{"device_class": DEVICE_CLASS_ENERGY},
),
"total_output_power": (
"Output Power",
POWER_WATT,
"invTodayPpv",
{"device_class": DEVICE_CLASS_POWER},
),
"total_energy_output": (
"Lifetime energy output",
ENERGY_KILO_WATT_HOUR,
"totalEnergy",
{"device_class": DEVICE_CLASS_ENERGY},
),
"total_maximum_output": (
"Maximum power",
POWER_WATT,
"nominalPower",
{"device_class": DEVICE_CLASS_POWER},
),
}
INVERTER_SENSOR_TYPES = {
"inverter_energy_today": (
"Energy today",
ENERGY_KILO_WATT_HOUR,
"powerToday",
{"round": 1, "device_class": DEVICE_CLASS_ENERGY},
),
"inverter_energy_total": (
"Lifetime energy output",
ENERGY_KILO_WATT_HOUR,
"powerTotal",
{"round": 1, "device_class": DEVICE_CLASS_ENERGY},
),
"inverter_voltage_input_1": (
"Input 1 voltage",
VOLT,
"vpv1",
{"round": 2, "device_class": DEVICE_CLASS_VOLTAGE},
),
"inverter_amperage_input_1": (
"Input 1 Amperage",
ELECTRICAL_CURRENT_AMPERE,
"ipv1",
{"round": 1, "device_class": DEVICE_CLASS_CURRENT},
),
"inverter_wattage_input_1": (
"Input 1 Wattage",
POWER_WATT,
"ppv1",
{"device_class": DEVICE_CLASS_POWER, "round": 1},
),
"inverter_voltage_input_2": (
"Input 2 voltage",
VOLT,
"vpv2",
{"round": 1, "device_class": DEVICE_CLASS_VOLTAGE},
),
"inverter_amperage_input_2": (
"Input 2 Amperage",
ELECTRICAL_CURRENT_AMPERE,
"ipv2",
{"round": 1, "device_class": DEVICE_CLASS_CURRENT},
),
"inverter_wattage_input_2": (
"Input 2 Wattage",
POWER_WATT,
"ppv2",
{"device_class": DEVICE_CLASS_POWER, "round": 1},
),
"inverter_voltage_input_3": (
"Input 3 voltage",
VOLT,
"vpv3",
{"round": 1, "device_class": DEVICE_CLASS_VOLTAGE},
),
"inverter_amperage_input_3": (
"Input 3 Amperage",
ELECTRICAL_CURRENT_AMPERE,
"ipv3",
{"round": 1, "device_class": DEVICE_CLASS_CURRENT},
),
"inverter_wattage_input_3": (
"Input 3 Wattage",
POWER_WATT,
"ppv3",
{"device_class": DEVICE_CLASS_POWER, "round": 1},
),
"inverter_internal_wattage": (
"Internal wattage",
POWER_WATT,
"ppv",
{"device_class": DEVICE_CLASS_POWER, "round": 1},
),
"inverter_reactive_voltage": (
"Reactive voltage",
VOLT,
"vacr",
{"round": 1, "device_class": DEVICE_CLASS_VOLTAGE},
),
"inverter_inverter_reactive_amperage": (
"Reactive amperage",
ELECTRICAL_CURRENT_AMPERE,
"iacr",
{"round": 1, "device_class": DEVICE_CLASS_CURRENT},
),
"inverter_frequency": ("AC frequency", FREQUENCY_HERTZ, "fac", {"round": 1}),
"inverter_current_wattage": (
"Output power",
POWER_WATT,
"pac",
{"device_class": DEVICE_CLASS_POWER, "round": 1},
),
"inverter_current_reactive_wattage": (
"Reactive wattage",
POWER_WATT,
"pacr",
{"device_class": DEVICE_CLASS_POWER, "round": 1},
),
"inverter_ipm_temperature": (
"Intelligent Power Management temperature",
TEMP_CELSIUS,
"ipmTemperature",
{"device_class": DEVICE_CLASS_TEMPERATURE, "round": 1},
),
"inverter_temperature": (
"Temperature",
TEMP_CELSIUS,
"temperature",
{"device_class": DEVICE_CLASS_TEMPERATURE, "round": 1},
),
}
STORAGE_SENSOR_TYPES = {
"storage_storage_production_today": (
"Storage production today",
ENERGY_KILO_WATT_HOUR,
"eBatDisChargeToday",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_storage_production_lifetime": (
"Lifetime Storage production",
ENERGY_KILO_WATT_HOUR,
"eBatDisChargeTotal",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_grid_discharge_today": (
"Grid discharged today",
ENERGY_KILO_WATT_HOUR,
"eacDisChargeToday",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_load_consumption_today": (
"Load consumption today",
ENERGY_KILO_WATT_HOUR,
"eopDischrToday",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_load_consumption_lifetime": (
"Lifetime load consumption",
ENERGY_KILO_WATT_HOUR,
"eopDischrTotal",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_grid_charged_today": (
"Grid charged today",
ENERGY_KILO_WATT_HOUR,
"eacChargeToday",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_charge_storage_lifetime": (
"Lifetime storaged charged",
ENERGY_KILO_WATT_HOUR,
"eChargeTotal",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_solar_production": (
"Solar power production",
POWER_WATT,
"ppv",
{"device_class": DEVICE_CLASS_POWER},
),
"storage_battery_percentage": (
"Battery percentage",
PERCENTAGE,
"capacity",
{"device_class": DEVICE_CLASS_BATTERY},
),
"storage_power_flow": (
"Storage charging/ discharging(-ve)",
POWER_WATT,
"pCharge",
{"device_class": DEVICE_CLASS_POWER},
),
"storage_load_consumption_solar_storage": (
"Load consumption(Solar + Storage)",
"VA",
"rateVA",
{},
),
"storage_charge_today": (
"Charge today",
ENERGY_KILO_WATT_HOUR,
"eChargeToday",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_import_from_grid": (
"Import from grid",
POWER_WATT,
"pAcInPut",
{"device_class": DEVICE_CLASS_POWER},
),
"storage_import_from_grid_today": (
"Import from grid today",
ENERGY_KILO_WATT_HOUR,
"eToUserToday",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_import_from_grid_total": (
"Import from grid total",
ENERGY_KILO_WATT_HOUR,
"eToUserTotal",
{"device_class": DEVICE_CLASS_ENERGY},
),
"storage_load_consumption": (
"Load consumption",
POWER_WATT,
"outPutPower",
{"device_class": DEVICE_CLASS_POWER},
),
"storage_grid_voltage": (
"AC input voltage",
VOLT,
"vGrid",
{"round": 2, "device_class": DEVICE_CLASS_VOLTAGE},
),
"storage_pv_charging_voltage": (
"PV charging voltage",
VOLT,
"vpv",
{"round": 2, "device_class": DEVICE_CLASS_VOLTAGE},
),
"storage_ac_input_frequency_out": (
"AC input frequency",
FREQUENCY_HERTZ,
"freqOutPut",
{"round": 2},
),
"storage_output_voltage": (
"Output voltage",
VOLT,
"outPutVolt",
{"round": 2, "device_class": DEVICE_CLASS_VOLTAGE},
),
"storage_ac_output_frequency": (
"Ac output frequency",
FREQUENCY_HERTZ,
"freqGrid",
{"round": 2},
),
"storage_current_PV": (
"Solar charge current",
ELECTRICAL_CURRENT_AMPERE,
"iAcCharge",
{"round": 2, "device_class": DEVICE_CLASS_CURRENT},
),
"storage_current_1": (
"Solar current to storage",
ELECTRICAL_CURRENT_AMPERE,
"iChargePV1",
{"round": 2, "device_class": DEVICE_CLASS_CURRENT},
),
"storage_grid_amperage_input": (
"Grid charge current",
ELECTRICAL_CURRENT_AMPERE,
"chgCurr",
{"round": 2, "device_class": DEVICE_CLASS_CURRENT},
),
"storage_grid_out_current": (
"Grid out current",
ELECTRICAL_CURRENT_AMPERE,
"outPutCurrent",
{"round": 2, "device_class": DEVICE_CLASS_CURRENT},
),
"storage_battery_voltage": (
"Battery voltage",
VOLT,
"vBat",
{"round": 2, "device_class": DEVICE_CLASS_VOLTAGE},
),
"storage_load_percentage": (
"Load percentage",
PERCENTAGE,
"loadPercent",
{"device_class": DEVICE_CLASS_BATTERY, "round": 2},
),
}
SENSOR_TYPES = {**TOTAL_SENSOR_TYPES, **INVERTER_SENSOR_TYPES, **STORAGE_SENSOR_TYPES}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PLANT_ID, default=DEFAULT_PLANT_ID): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Growatt sensor."""
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
plant_id = config[CONF_PLANT_ID]
name = config[CONF_NAME]
api = growattServer.GrowattApi()
# Log in to api and fetch first plant if no plant id is defined.
login_response = api.login(username, password)
if not login_response["success"] and login_response["errCode"] == "102":
_LOGGER.error("Username or Password may be incorrect!")
return
user_id = login_response["userId"]
if plant_id == DEFAULT_PLANT_ID:
plant_info = api.plant_list(user_id)
plant_id = plant_info["data"][0]["plantId"]
# Get a list of devices for specified plant to add sensors for.
devices = api.device_list(plant_id)
entities = []
probe = GrowattData(api, username, password, plant_id, "total")
for sensor in TOTAL_SENSOR_TYPES:
entities.append(
GrowattInverter(probe, f"{name} Total", sensor, f"{plant_id}-{sensor}")
)
# Add sensors for each device in the specified plant.
for device in devices:
probe = GrowattData(
api, username, password, device["deviceSn"], device["deviceType"]
)
sensors = []
if device["deviceType"] == "inverter":
sensors = INVERTER_SENSOR_TYPES
elif device["deviceType"] == "storage":
probe.plant_id = plant_id
sensors = STORAGE_SENSOR_TYPES
else:
_LOGGER.debug(
"Device type %s was found but is not supported right now.",
device["deviceType"],
)
for sensor in sensors:
entities.append(
GrowattInverter(
probe,
f"{device['deviceAilas']}",
sensor,
f"{device['deviceSn']}-{sensor}",
)
)
add_entities(entities, True)
class GrowattInverter(Entity):
"""Representation of a Growatt Sensor."""
def __init__(self, probe, name, sensor, unique_id):
"""Initialize a PVOutput sensor."""
self.sensor = sensor
self.probe = probe
self._name = name
self._state = None
self._unique_id = unique_id
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name} {SENSOR_TYPES[self.sensor][0]}"
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return self._unique_id
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:solar-power"
@property
def state(self):
"""Return the state of the sensor."""
result = self.probe.get_data(SENSOR_TYPES[self.sensor][2])
round_to = SENSOR_TYPES[self.sensor][3].get("round")
if round_to is not None:
result = round(result, round_to)
return result
@property
def device_class(self):
"""Return the device class of the sensor."""
return SENSOR_TYPES[self.sensor][3].get("device_class")
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return SENSOR_TYPES[self.sensor][1]
def update(self):
"""Get the latest data from the Growat API and updates the state."""
self.probe.update()
class GrowattData:
"""The class for handling data retrieval."""
def __init__(self, api, username, password, device_id, growatt_type):
"""Initialize the probe."""
self.growatt_type = growatt_type
self.api = api
self.device_id = device_id
self.plant_id = None
self.data = {}
self.username = username
self.password = password
@Throttle(SCAN_INTERVAL)
def update(self):
"""Update probe data."""
self.api.login(self.username, self.password)
_LOGGER.debug("Updating data for %s", self.device_id)
try:
if self.growatt_type == "total":
total_info = self.api.plant_info(self.device_id)
del total_info["deviceList"]
# PlantMoneyText comes in as "3.1/€" remove anything that isn't part of the number
total_info["plantMoneyText"] = re.sub(
r"[^\d.,]", "", total_info["plantMoneyText"]
)
self.data = total_info
elif self.growatt_type == "inverter":
inverter_info = self.api.inverter_detail(self.device_id)
self.data = inverter_info
elif self.growatt_type == "storage":
storage_info_detail = self.api.storage_params(self.device_id)[
"storageDetailBean"
]
storage_energy_overview = self.api.storage_energy_overview(
self.plant_id, self.device_id
)
self.data = {**storage_info_detail, **storage_energy_overview}
except json.decoder.JSONDecodeError:
_LOGGER.error("Unable to fetch data from Growatt server")
def get_data(self, variable):
"""Get the data."""
return self.data.get(variable)
|
from datetime import date, datetime, timedelta
import logging
_LOGGER = logging.getLogger(__name__)
DAY_TO_NUMBER = {
"Mo": 1,
"M": 1,
"Tu": 2,
"We": 3,
"W": 3,
"Th": 4,
"Fr": 5,
"F": 5,
"Sa": 6,
"Su": 7,
}
def calculate_next_active_alarms(alarms):
"""
Calculate garmin next active alarms from settings.
Alarms are sorted by time
"""
active_alarms = []
_LOGGER.debug(alarms)
for alarm_setting in alarms:
if alarm_setting["alarmMode"] != "ON":
continue
for day in alarm_setting["alarmDays"]:
alarm_time = alarm_setting["alarmTime"]
if day == "ONCE":
midnight = datetime.combine(date.today(), datetime.min.time())
alarm = midnight + timedelta(minutes=alarm_time)
if alarm < datetime.now():
alarm += timedelta(days=1)
else:
start_of_week = datetime.combine(
date.today() - timedelta(days=datetime.today().isoweekday() % 7),
datetime.min.time(),
)
days_to_add = DAY_TO_NUMBER[day] % 7
alarm = start_of_week + timedelta(minutes=alarm_time, days=days_to_add)
if alarm < datetime.now():
alarm += timedelta(days=7)
active_alarms.append(alarm.isoformat())
return sorted(active_alarms) if active_alarms else None
|
from datetime import timedelta
import logging
from pyarlo import PyArlo
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.const import CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import track_time_interval
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by arlo.netgear.com"
DATA_ARLO = "data_arlo"
DEFAULT_BRAND = "Netgear Arlo"
DOMAIN = "arlo"
NOTIFICATION_ID = "arlo_notification"
NOTIFICATION_TITLE = "Arlo Component Setup"
SCAN_INTERVAL = timedelta(seconds=60)
SIGNAL_UPDATE_ARLO = "arlo_update"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): cv.time_period,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up an Arlo component."""
conf = config[DOMAIN]
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
scan_interval = conf[CONF_SCAN_INTERVAL]
try:
arlo = PyArlo(username, password, preload=False)
if not arlo.is_connected:
return False
# assign refresh period to base station thread
arlo_base_station = next((station for station in arlo.base_stations), None)
if arlo_base_station is not None:
arlo_base_station.refresh_rate = scan_interval.total_seconds()
elif not arlo.cameras:
_LOGGER.error("No Arlo camera or base station available")
return False
hass.data[DATA_ARLO] = arlo
except (ConnectTimeout, HTTPError) as ex:
_LOGGER.error("Unable to connect to Netgear Arlo: %s", str(ex))
hass.components.persistent_notification.create(
f"Error: {ex}<br />You will need to restart hass after fixing.",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
return False
def hub_refresh(event_time):
"""Call ArloHub to refresh information."""
_LOGGER.debug("Updating Arlo Hub component")
hass.data[DATA_ARLO].update(update_cameras=True, update_base_station=True)
dispatcher_send(hass, SIGNAL_UPDATE_ARLO)
# register service
hass.services.register(DOMAIN, "update", hub_refresh)
# register scan interval for ArloHub
track_time_interval(hass, hub_refresh, scan_interval)
return True
|
import multiprocessing
import os
import signal
import tempfile
import time
import unittest
import shutil
import openrazer.client
import openrazer_daemon.daemon
import openrazer._fake_driver as fake_driver
def run_daemon(daemon_dir, driver_dir):
# TODO console_log false
daemon = openrazer_daemon.daemon.RazerDaemon(verbose=True, console_log=False, test_dir=driver_dir)
daemon.run()
class DeviceManagerTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls._daemon_dir = tempfile.mkdtemp(prefix='tmp_', suffix='_daemondata')
cls._tmp_dir = tempfile.mkdtemp(prefix='tmp_', suffix='_daemontest')
cls._bw_serial = 'IO0000000000001'
cls._bw_chroma = fake_driver.FakeDevice('razerblackwidowchroma', serial=cls._bw_serial, tmp_dir=cls._tmp_dir)
print("Created BlackWidow Chroma endpoints")
cls._daemon_proc = multiprocessing.Process(target=run_daemon, args=(cls._daemon_dir, cls._tmp_dir))
cls._daemon_proc.start()
print("Started daemon")
time.sleep(5)
@classmethod
def tearDownClass(cls):
print("Stopping daemon")
os.kill(cls._daemon_proc.pid, signal.SIGINT)
time.sleep(3)
if cls._daemon_proc.is_alive():
print("Daemon still alive...")
time.sleep(8)
if cls._daemon_proc.is_alive():
cls._daemon_proc.terminate()
if cls._daemon_proc.is_alive():
print("Failed to kill daemon")
cls._bw_chroma.close()
shutil.rmtree(cls._tmp_dir)
shutil.rmtree(cls._daemon_dir)
time.sleep(5)
def setUp(self):
self._bw_chroma.create_endpoints()
self.device_manager = openrazer.client.DeviceManager()
def test_device_list(self):
self.assertEqual(len(self.device_manager.devices), 1)
def test_serial(self):
device = self.device_manager.devices[0]
self.assertEqual(device.serial, self._bw_chroma.get('device_serial'))
def test_name(self):
device = self.device_manager.devices[0]
self.assertEqual(device.name, self._bw_chroma.get('device_type'))
def test_type(self):
device = self.device_manager.devices[0]
self.assertEqual(device.type, 'keyboard')
def test_fw_version(self):
device = self.device_manager.devices[0]
self.assertEqual(device.firmware_version, self._bw_chroma.get('firmware_version'))
def test_brightness(self):
device = self.device_manager.devices[0]
# Test 100%
device.brightness = 100.0
self.assertEqual('255', self._bw_chroma.get('matrix_brightness'))
self.assertEqual(100.0, device.brightness)
device.brightness = 50.0
self.assertEqual('127', self._bw_chroma.get('matrix_brightness'))
self.assertAlmostEqual(50.0, device.brightness, delta=0.4)
device.brightness = 0.0
self.assertEqual('0', self._bw_chroma.get('matrix_brightness'))
self.assertEqual(0, device.brightness)
def test_capabilities(self):
device = self.device_manager.devices[0]
self.assertEqual(device.capabilities, device._capabilities)
def test_device_keyboard_game_mode(self):
device = self.device_manager.devices[0]
self._bw_chroma.set('mode_game', '1')
self.assertTrue(device.game_mode_led)
device.game_mode_led = False
self.assertEqual(self._bw_chroma.get('mode_game'), '0')
device.game_mode_led = True
self.assertEqual(self._bw_chroma.get('mode_game'), '1')
def test_device_keyboard_macro_mode(self):
device = self.device_manager.devices[0]
self._bw_chroma.set('mode_macro', '1')
self.assertTrue(device.macro_mode_led)
device.macro_mode_led = False
self.assertEqual(self._bw_chroma.get('mode_macro'), '0')
device.macro_mode_led = True
self.assertEqual(self._bw_chroma.get('mode_macro'), '1')
self._bw_chroma.set('mode_macro_effect', '0')
self.assertEqual(device.macro_mode_led_effect, openrazer.client.constants.MACRO_LED_STATIC)
device.macro_mode_led_effect = openrazer.client.constants.MACRO_LED_BLINK
self.assertEqual(self._bw_chroma.get('mode_macro'), str(openrazer.client.constants.MACRO_LED_BLINK))
def test_device_keyboard_effect_none(self):
device = self.device_manager.devices[0]
device.fx.none()
self.assertEqual(self._bw_chroma.get('matrix_effect_none'), '1')
def test_device_keyboard_effect_spectrum(self):
device = self.device_manager.devices[0]
device.fx.spectrum()
self.assertEqual(self._bw_chroma.get('matrix_effect_spectrum'), '1')
def test_device_keyboard_effect_wave(self):
device = self.device_manager.devices[0]
device.fx.wave(openrazer.client.constants.WAVE_LEFT)
self.assertEqual(self._bw_chroma.get('matrix_effect_wave'), str(openrazer.client.constants.WAVE_LEFT))
device.fx.wave(openrazer.client.constants.WAVE_RIGHT)
self.assertEqual(self._bw_chroma.get('matrix_effect_wave'), str(openrazer.client.constants.WAVE_RIGHT))
with self.assertRaises(ValueError):
device.fx.wave('lalala')
def test_device_keyboard_effect_static(self):
device = self.device_manager.devices[0]
device.fx.static(255, 0, 255)
self.assertEqual(b'\xFF\x00\xFF', self._bw_chroma.get('matrix_effect_static', binary=True))
for red, green, blue in ((256.0, 0, 0), (0, 256.0, 0), (0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.static(red, green, blue)
device.fx.static(256, 0, 700)
self.assertEqual(b'\xFF\x00\xFF', self._bw_chroma.get('matrix_effect_static', binary=True))
def test_device_keyboard_effect_reactive(self):
device = self.device_manager.devices[0]
time = openrazer.client.constants.REACTIVE_500MS
device.fx.reactive(255, 0, 255, time)
self.assertEqual(b'\x01\xFF\x00\xFF', self._bw_chroma.get('matrix_effect_reactive', binary=True))
for red, green, blue in ((256.0, 0, 0), (0, 256.0, 0), (0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.reactive(red, green, blue, time)
device.fx.reactive(256, 0, 700, time)
self.assertEqual(b'\x01\xFF\x00\xFF', self._bw_chroma.get('matrix_effect_reactive', binary=True))
with self.assertRaises(ValueError):
device.fx.reactive(255, 0, 255, 'lalala')
def test_device_keyboard_effect_breath_single(self):
device = self.device_manager.devices[0]
device.fx.breath_single(255, 0, 255)
self.assertEqual(b'\xFF\x00\xFF', self._bw_chroma.get('matrix_effect_breath', binary=True))
for red, green, blue in ((256.0, 0, 0), (0, 256.0, 0), (0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.breath_single(red, green, blue)
device.fx.breath_single(256, 0, 700)
self.assertEqual(b'\xFF\x00\xFF', self._bw_chroma.get('matrix_effect_breath', binary=True))
def test_device_keyboard_effect_breath_dual(self):
device = self.device_manager.devices[0]
device.fx.breath_dual(255, 0, 255, 255, 0, 0)
self.assertEqual(b'\xFF\x00\xFF\xFF\x00\x00', self._bw_chroma.get('matrix_effect_breath', binary=True))
for r1, g1, b1, r2, g2, b2 in ((256.0, 0, 0, 0, 0, 0), (0, 256.0, 0, 0, 0, 0), (0, 0, 256.0, 0, 0, 0),
(0, 0, 0, 256.0, 0, 0), (0, 0, 0, 0, 256.0, 0), (0, 0, 0, 0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.breath_dual(r1, g1, b1, r2, g2, b2)
device.fx.breath_dual(256, 0, 700, 255, 0, 0)
self.assertEqual(b'\xFF\x00\xFF\xFF\x00\x00', self._bw_chroma.get('matrix_effect_breath', binary=True))
def test_device_keyboard_effect_breath_random(self):
device = self.device_manager.devices[0]
device.fx.breath_random()
self.assertEqual(self._bw_chroma.get('matrix_effect_breath'), '1')
def test_device_keyboard_effect_ripple(self):
device = self.device_manager.devices[0]
refresh_rate = 0.01
device.fx.ripple(255, 0, 255, refresh_rate)
time.sleep(0.1)
custom_effect_payload = self._bw_chroma.get('matrix_custom_frame', binary=True)
self.assertGreater(len(custom_effect_payload), 1)
self.assertEqual(self._bw_chroma.get('matrix_effect_custom'), '1')
for red, green, blue in ((256.0, 0, 0), (0, 256.0, 0), (0, 0, 256.0)):
with self.assertRaises(ValueError):
device.fx.reactive(red, green, blue, refresh_rate)
with self.assertRaises(ValueError):
device.fx.reactive(255, 0, 255, 'lalala')
device.fx.none()
def test_device_keyboard_effect_random_ripple(self):
device = self.device_manager.devices[0]
refresh_rate = 0.01
device.fx.ripple_random(refresh_rate)
time.sleep(0.1)
custom_effect_payload = self._bw_chroma.get('matrix_custom_frame', binary=True)
self.assertGreater(len(custom_effect_payload), 1)
self.assertEqual(self._bw_chroma.get('matrix_effect_custom'), '1')
with self.assertRaises(ValueError):
device.fx.ripple_random('lalala')
device.fx.none()
def test_device_keyboard_effect_framebuffer(self):
device = self.device_manager.devices[0]
device.fx.advanced.matrix.set(0, 0, (255, 0, 255))
self.assertEqual(device.fx.advanced.matrix.get(0, 0), (255, 0, 255))
device.fx.advanced.draw()
custom_effect_payload = self._bw_chroma.get('matrix_custom_frame', binary=True)
self.assertEqual(custom_effect_payload[:4], b'\x00\xFF\x00\xFF')
device.fx.advanced.matrix.to_framebuffer() # Save 255, 0, 255
device.fx.advanced.matrix.reset() # Clear FB
device.fx.advanced.matrix.set(0, 0, (0, 255, 0))
device.fx.advanced.draw_fb_or() # Draw FB or'd with Matrix
custom_effect_payload = self._bw_chroma.get('matrix_custom_frame', binary=True)
self.assertEqual(custom_effect_payload[:4], b'\x00\xFF\xFF\xFF')
# Append that to FB
device.fx.advanced.matrix.to_framebuffer_or()
device.fx.advanced.draw()
custom_effect_payload = self._bw_chroma.get('matrix_custom_frame', binary=True)
binary = device.fx.advanced.matrix.to_binary()
self.assertEqual(binary, custom_effect_payload)
def test_device_keyboard_macro_enable(self):
device = self.device_manager.devices[0]
device.macro.enable_macros()
self.assertEqual(self._bw_chroma.get('macro_keys'), '1')
def test_device_keyboard_macro_add(self):
device = self.device_manager.devices[0]
url_macro = device.macro.create_url_macro_item('http://example.org')
device.macro.add_macro('M1', [url_macro])
macros = device.macro.get_macros()
self.assertIn('M1', macros)
with self.assertRaises(ValueError):
device.macro.add_macro('M6', url_macro) # Unknown key
with self.assertRaises(ValueError):
device.macro.add_macro('M1', 'lalala') # Not a sequnce
with self.assertRaises(ValueError):
device.macro.add_macro('M1', ['lalala']) # Bad element in sequence
def test_device_keyboard_macro_del(self):
device = self.device_manager.devices[0]
url_macro = device.macro.create_url_macro_item('http://example.org')
device.macro.add_macro('M2', [url_macro])
macros = device.macro.get_macros()
self.assertIn('M2', macros)
device.macro.del_macro('M2')
macros = device.macro.get_macros()
self.assertNotIn('M2', macros)
with self.assertRaises(ValueError):
device.macro.del_macro('M6') # Unknown key
if __name__ == "__main__":
unittest.main()
|
import collections
import difflib
def find_common_prefix(a, b):
if not a or not b:
return 0
if a[0] == b[0]:
pointermax = min(len(a), len(b))
pointermid = pointermax
pointermin = 0
while pointermin < pointermid:
if a[pointermin:pointermid] == b[pointermin:pointermid]:
pointermin = pointermid
else:
pointermax = pointermid
pointermid = int((pointermax - pointermin) // 2 + pointermin)
return pointermid
return 0
def find_common_suffix(a, b):
if not a or not b:
return 0
if a[-1] == b[-1]:
pointermax = min(len(a), len(b))
pointermid = pointermax
pointermin = 0
while pointermin < pointermid:
a_tail = a[-pointermid:len(a) - pointermin]
b_tail = b[-pointermid:len(b) - pointermin]
if a_tail == b_tail:
pointermin = pointermid
else:
pointermax = pointermid
pointermid = int((pointermax - pointermin) // 2 + pointermin)
return pointermid
return 0
class DiffChunk(collections.namedtuple(
'DiffChunk', 'tag, start_a, end_a, start_b, end_b')):
__slots__ = ()
class MyersSequenceMatcher(difflib.SequenceMatcher):
def __init__(self, isjunk=None, a="", b=""):
if isjunk is not None:
raise NotImplementedError('isjunk is not supported yet')
# The sequences we're comparing must be considered immutable;
# calling e.g., GtkTextBuffer methods to retrieve these line-by-line
# isn't really a thing we can or should do.
self.a = a[:]
self.b = b[:]
self.matching_blocks = self.opcodes = None
self.aindex = []
self.bindex = []
self.common_prefix = self.common_suffix = 0
self.lines_discarded = False
def get_matching_blocks(self):
if self.matching_blocks is None:
for i in self.initialise():
pass
return self.matching_blocks
def get_opcodes(self):
opcodes = super().get_opcodes()
return [DiffChunk._make(chunk) for chunk in opcodes]
def get_difference_opcodes(self):
return [chunk for chunk in self.get_opcodes() if chunk.tag != "equal"]
def preprocess_remove_prefix_suffix(self, a, b):
# remove common prefix and common suffix
self.common_prefix = self.common_suffix = 0
self.common_prefix = find_common_prefix(a, b)
if self.common_prefix > 0:
a = a[self.common_prefix:]
b = b[self.common_prefix:]
if len(a) > 0 and len(b) > 0:
self.common_suffix = find_common_suffix(a, b)
if self.common_suffix > 0:
a = a[:len(a) - self.common_suffix]
b = b[:len(b) - self.common_suffix]
return (a, b)
def preprocess_discard_nonmatching_lines(self, a, b):
# discard lines that do not match any line from the other file
if len(a) == 0 or len(b) == 0:
self.aindex = []
self.bindex = []
return (a, b)
def index_matching(a, b):
aset = frozenset(a)
matches, index = [], []
for i, line in enumerate(b):
if line in aset:
matches.append(line)
index.append(i)
return matches, index
indexed_b, self.bindex = index_matching(a, b)
indexed_a, self.aindex = index_matching(b, a)
# We only use the optimised result if it's worthwhile. The constant
# represents a heuristic of how many lines constitute 'worthwhile'.
self.lines_discarded = (len(b) - len(indexed_b) > 10 or
len(a) - len(indexed_a) > 10)
if self.lines_discarded:
a = indexed_a
b = indexed_b
return (a, b)
def preprocess(self):
"""
Pre-processing optimizations:
1) remove common prefix and common suffix
2) remove lines that do not match
"""
a, b = self.preprocess_remove_prefix_suffix(self.a, self.b)
return self.preprocess_discard_nonmatching_lines(a, b)
def postprocess(self):
"""
Perform some post-processing cleanup to reduce 'chaff' and make
the result more human-readable. Since Myers diff is a greedy
algorithm backward scanning of matching chunks might reveal
some smaller chunks that can be combined together.
"""
mb = [self.matching_blocks[-1]]
i = len(self.matching_blocks) - 2
while i >= 0:
cur_a, cur_b, cur_len = self.matching_blocks[i]
i -= 1
while i >= 0:
prev_a, prev_b, prev_len = self.matching_blocks[i]
if prev_b + prev_len == cur_b or prev_a + prev_len == cur_a:
prev_slice_a = self.a[cur_a - prev_len:cur_a]
prev_slice_b = self.b[cur_b - prev_len:cur_b]
if prev_slice_a == prev_slice_b:
cur_b -= prev_len
cur_a -= prev_len
cur_len += prev_len
i -= 1
continue
break
mb.append((cur_a, cur_b, cur_len))
mb.reverse()
self.matching_blocks = mb
def build_matching_blocks(self, lastsnake):
"""Build list of matching blocks based on snakes
The resulting blocks take into consideration multiple preprocessing
optimizations:
* add separate blocks for common prefix and suffix
* shift positions and split blocks based on the list of discarded
non-matching lines
"""
self.matching_blocks = matching_blocks = []
common_prefix = self.common_prefix
common_suffix = self.common_suffix
aindex = self.aindex
bindex = self.bindex
while lastsnake is not None:
lastsnake, x, y, snake = lastsnake
if self.lines_discarded:
# split snakes if needed because of discarded lines
x += snake - 1
y += snake - 1
xprev = aindex[x] + common_prefix
yprev = bindex[y] + common_prefix
if snake > 1:
newsnake = 1
for i in range(1, snake):
x -= 1
y -= 1
xnext = aindex[x] + common_prefix
ynext = bindex[y] + common_prefix
if (xprev - xnext != 1) or (yprev - ynext != 1):
matching_blocks.insert(0, (xprev, yprev, newsnake))
newsnake = 0
xprev = xnext
yprev = ynext
newsnake += 1
matching_blocks.insert(0, (xprev, yprev, newsnake))
else:
matching_blocks.insert(0, (xprev, yprev, snake))
else:
matching_blocks.insert(0, (x + common_prefix,
y + common_prefix, snake))
if common_prefix:
matching_blocks.insert(0, (0, 0, common_prefix))
if common_suffix:
matching_blocks.append((len(self.a) - common_suffix,
len(self.b) - common_suffix,
common_suffix))
matching_blocks.append((len(self.a), len(self.b), 0))
# clean-up to free memory
self.aindex = self.bindex = None
def initialise(self):
"""
Optimized implementation of the O(NP) algorithm described by Sun Wu,
Udi Manber, Gene Myers, Webb Miller
("An O(NP) Sequence Comparison Algorithm", 1989)
http://research.janelia.org/myers/Papers/np_diff.pdf
"""
a, b = self.preprocess()
m = len(a)
n = len(b)
middle = m + 1
lastsnake = None
delta = n - m + middle
dmin = min(middle, delta)
dmax = max(middle, delta)
if n > 0 and m > 0:
size = n + m + 2
fp = [(-1, None)] * size
p = -1
while True:
p += 1
if not p % 100:
yield None
# move along vertical edge
yv = -1
node = None
for km in range(dmin - p, delta, 1):
t = fp[km + 1]
if yv < t[0]:
yv, node = t
else:
yv += 1
x = yv - km + middle
if x < m and yv < n and a[x] == b[yv]:
snake = x
x += 1
yv += 1
while x < m and yv < n and a[x] == b[yv]:
x += 1
yv += 1
snake = x - snake
node = (node, x - snake, yv - snake, snake)
fp[km] = (yv, node)
# move along horizontal edge
yh = -1
node = None
for km in range(dmax + p, delta, -1):
t = fp[km - 1]
if yh <= t[0]:
yh, node = t
yh += 1
x = yh - km + middle
if x < m and yh < n and a[x] == b[yh]:
snake = x
x += 1
yh += 1
while x < m and yh < n and a[x] == b[yh]:
x += 1
yh += 1
snake = x - snake
node = (node, x - snake, yh - snake, snake)
fp[km] = (yh, node)
# point on the diagonal that leads to the sink
if yv < yh:
y, node = fp[delta + 1]
else:
y, node = fp[delta - 1]
y += 1
x = y - delta + middle
if x < m and y < n and a[x] == b[y]:
snake = x
x += 1
y += 1
while x < m and y < n and a[x] == b[y]:
x += 1
y += 1
snake = x - snake
node = (node, x - snake, y - snake, snake)
fp[delta] = (y, node)
if y >= n:
lastsnake = node
break
self.build_matching_blocks(lastsnake)
self.postprocess()
yield 1
class InlineMyersSequenceMatcher(MyersSequenceMatcher):
def preprocess_discard_nonmatching_lines(self, a, b):
if len(a) <= 2 and len(b) <= 2:
self.aindex = []
self.bindex = []
return (a, b)
def index_matching_kmers(a, b):
aset = set([a[i:i + 3] for i in range(len(a) - 2)])
matches, index = [], []
next_poss_match = 0
# Start from where we can get a valid triple
for i in range(2, len(b)):
if b[i - 2:i + 1] not in aset:
continue
# Make sure we don't re-record matches from overlapping kmers
for j in range(max(next_poss_match, i - 2), i + 1):
matches.append(b[j])
index.append(j)
next_poss_match = i + 1
return matches, index
indexed_b, self.bindex = index_matching_kmers(a, b)
indexed_a, self.aindex = index_matching_kmers(b, a)
# We only use the optimised result if it's worthwhile. The constant
# represents a heuristic of how many lines constitute 'worthwhile'.
self.lines_discarded = (len(b) - len(indexed_b) > 10 or
len(a) - len(indexed_a) > 10)
if self.lines_discarded:
a = indexed_a
b = indexed_b
return (a, b)
class SyncPointMyersSequenceMatcher(MyersSequenceMatcher):
def __init__(self, isjunk=None, a="", b="", syncpoints=None):
super().__init__(isjunk, a, b)
self.isjunk = isjunk
self.syncpoints = syncpoints
def initialise(self):
if self.syncpoints is None or len(self.syncpoints) == 0:
for i in super().initialise():
yield i
else:
chunks = []
ai = 0
bi = 0
for aj, bj in self.syncpoints:
chunks.append((ai, bi, self.a[ai:aj], self.b[bi:bj]))
ai = aj
bi = bj
if ai < len(self.a) or bi < len(self.b):
chunks.append((ai, bi, self.a[ai:], self.b[bi:]))
self.split_matching_blocks = []
self.matching_blocks = []
for ai, bi, a, b in chunks:
matching_blocks = []
matcher = MyersSequenceMatcher(self.isjunk, a, b)
for i in matcher.initialise():
yield None
blocks = matcher.get_matching_blocks()
mb_len = len(matching_blocks) - 1
if mb_len >= 0 and len(blocks) > 1:
aj = matching_blocks[mb_len][0]
bj = matching_blocks[mb_len][1]
bl = matching_blocks[mb_len][2]
if (aj + bl == ai and bj + bl == bi and
blocks[0][0] == 0 and blocks[0][1] == 0):
block = blocks.pop(0)
matching_blocks[mb_len] = (aj, bj, bl + block[2])
for x, y, l in blocks[:-1]:
matching_blocks.append((ai + x, bi + y, l))
self.matching_blocks.extend(matching_blocks)
# Split matching blocks each need to be terminated to get our
# split chunks correctly created
self.split_matching_blocks.append(
matching_blocks + [(ai + len(a), bi + len(b), 0)])
self.matching_blocks.append((len(self.a), len(self.b), 0))
yield 1
def get_opcodes(self):
# This is just difflib.SequenceMatcher.get_opcodes in which we instead
# iterate over our internal set of split matching blocks.
if self.opcodes is not None:
return self.opcodes
i = j = 0
self.opcodes = opcodes = []
self.get_matching_blocks()
for matching_blocks in self.split_matching_blocks:
for ai, bj, size in matching_blocks:
tag = ''
if i < ai and j < bj:
tag = 'replace'
elif i < ai:
tag = 'delete'
elif j < bj:
tag = 'insert'
if tag:
opcodes.append((tag, i, ai, j, bj))
i, j = ai + size, bj + size
# the list of matching blocks is terminated by a
# sentinel with size 0
if size:
opcodes.append(('equal', ai, i, bj, j))
return [DiffChunk._make(chunk) for chunk in opcodes]
|