text
stringlengths 213
32.3k
|
---|
import typing
import numbers
import hyperopt
import hyperopt.pyll.base
class HyperoptProxy(object):
"""
Hyperopt proxy class.
See `hyperopt`'s documentation for more details:
https://github.com/hyperopt/hyperopt/wiki/FMin
Reason of these wrappers:
A hyper space in `hyperopt` requires a `label` to instantiate. This
`label` is used later as a reference to original hyper space that is
sampled. In `matchzoo`, hyper spaces are used in
:class:`matchzoo.engine.Param`. Only if a hyper space's label
matches its parent :class:`matchzoo.engine.Param`'s name, `matchzoo`
can correctly back-refrenced the parameter got sampled. This can be
done by asking the user always use the same name for a parameter and
its hyper space, but typos can occur. As a result, these wrappers
are created to hide hyper spaces' `label`, and always correctly
bind them with its parameter's name.
Examples::
>>> import matchzoo as mz
>>> from hyperopt.pyll.stochastic import sample
Basic Usage:
>>> model = mz.models.DenseBaseline()
>>> sample(model.params.hyper_space) # doctest: +SKIP
{'mlp_num_layers': 1.0, 'mlp_num_units': 274.0}
Arithmetic Operations:
>>> new_space = 2 ** mz.hyper_spaces.quniform(2, 6)
>>> model.params.get('mlp_num_layers').hyper_space = new_space
>>> sample(model.params.hyper_space) # doctest: +SKIP
{'mlp_num_layers': 8.0, 'mlp_num_units': 292.0}
"""
def __init__(
self,
hyperopt_func: typing.Callable[..., hyperopt.pyll.Apply],
**kwargs
):
"""
:class:`HyperoptProxy` constructor.
:param hyperopt_func: Target `hyperopt.hp` function to proxy.
:param kwargs: Keyword arguments of the proxy function, must pass all
parameters in `hyperopt_func`.
"""
self._func = hyperopt_func
self._kwargs = kwargs
def convert(self, name: str) -> hyperopt.pyll.Apply:
"""
Attach `name` as `hyperopt.hp`'s `label`.
:param name:
:return: a `hyperopt` ready search space
"""
return self._func(name, **self._kwargs)
def __add__(self, other):
"""__add__."""
return _wrap_as_composite_func(self, other, lambda x, y: x + y)
def __radd__(self, other):
"""__radd__."""
return _wrap_as_composite_func(self, other, lambda x, y: x + y)
def __sub__(self, other):
"""__sub__."""
return _wrap_as_composite_func(self, other, lambda x, y: x - y)
def __rsub__(self, other):
"""__rsub__."""
return _wrap_as_composite_func(self, other, lambda x, y: y - x)
def __mul__(self, other):
"""__mul__."""
return _wrap_as_composite_func(self, other, lambda x, y: x * y)
def __rmul__(self, other):
"""__rmul__."""
return _wrap_as_composite_func(self, other, lambda x, y: x * y)
def __truediv__(self, other):
"""__truediv__."""
return _wrap_as_composite_func(self, other, lambda x, y: x / y)
def __rtruediv__(self, other):
"""__rtruediv__."""
return _wrap_as_composite_func(self, other, lambda x, y: y / x)
def __floordiv__(self, other):
"""__floordiv__."""
return _wrap_as_composite_func(self, other, lambda x, y: x // y)
def __rfloordiv__(self, other):
"""__rfloordiv__."""
return _wrap_as_composite_func(self, other, lambda x, y: y // x)
def __pow__(self, other):
"""__pow__."""
return _wrap_as_composite_func(self, other, lambda x, y: x ** y)
def __rpow__(self, other):
"""__rpow__."""
return _wrap_as_composite_func(self, other, lambda x, y: y ** x)
def __neg__(self):
"""__neg__."""
return _wrap_as_composite_func(self, None, lambda x, _: -x)
def _wrap_as_composite_func(self, other, func):
def _wrapper(name, **kwargs):
return func(self._func(name, **kwargs), other)
return HyperoptProxy(_wrapper, **self._kwargs)
class choice(HyperoptProxy):
""":func:`hyperopt.hp.choice` proxy."""
def __init__(self, options: list):
"""
:func:`hyperopt.hp.choice` proxy.
:param options: options to search from
"""
super().__init__(hyperopt_func=hyperopt.hp.choice, options=options)
self._options = options
def __str__(self):
""":return: `str` representation of the hyper space."""
return f'choice in {self._options}'
class quniform(HyperoptProxy):
""":func:`hyperopt.hp.quniform` proxy."""
def __init__(
self,
low: numbers.Number,
high: numbers.Number,
q: numbers.Number = 1
):
"""
:func:`hyperopt.hp.quniform` proxy.
If using with integer values, then `high` is exclusive.
:param low: lower bound of the space
:param high: upper bound of the space
:param q: similar to the `step` in the python built-in `range`
"""
super().__init__(hyperopt_func=hyperopt.hp.quniform,
low=low,
high=high, q=q)
self._low = low
self._high = high
self._q = q
def __str__(self):
""":return: `str` representation of the hyper space."""
return f'quantitative uniform distribution in ' \
f'[{self._low}, {self._high}), with a step size of {self._q}'
class uniform(HyperoptProxy):
""":func:`hyperopt.hp.uniform` proxy."""
def __init__(
self,
low: numbers.Number,
high: numbers.Number
):
"""
:func:`hyperopt.hp.uniform` proxy.
:param low: lower bound of the space
:param high: upper bound of the space
"""
super().__init__(hyperopt_func=hyperopt.hp.uniform, low=low, high=high)
self._low = low
self._high = high
def __str__(self):
""":return: `str` representation of the hyper space."""
return f'uniform distribution in [{self._low}, {self._high})'
def sample(space):
"""
Take a sample in the hyper space.
This method is stateless, so the distribution of the samples is different
from that of `tune` call. This function just gives a general idea of what
a sample from the `space` looks like.
Example:
>>> import matchzoo as mz
>>> space = mz.models.Naive.get_default_params().hyper_space
>>> mz.hyper_spaces.sample(space) # doctest: +ELLIPSIS
{'optimizer': ...}
"""
return hyperopt.pyll.stochastic.sample(space)
|
from datetime import timedelta
from typing import Any, Callable, Dict, List, Optional
from homeassistant.components.sensor import DEVICE_CLASS_CURRENT
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DATA_BYTES,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TIMESTAMP,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util.dt import utcnow
from . import WLEDDataUpdateCoordinator, WLEDDeviceEntity
from .const import ATTR_LED_COUNT, ATTR_MAX_POWER, CURRENT_MA, DOMAIN
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up WLED sensor based on a config entry."""
coordinator: WLEDDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
sensors = [
WLEDEstimatedCurrentSensor(entry.entry_id, coordinator),
WLEDUptimeSensor(entry.entry_id, coordinator),
WLEDFreeHeapSensor(entry.entry_id, coordinator),
WLEDWifiBSSIDSensor(entry.entry_id, coordinator),
WLEDWifiChannelSensor(entry.entry_id, coordinator),
WLEDWifiRSSISensor(entry.entry_id, coordinator),
WLEDWifiSignalSensor(entry.entry_id, coordinator),
]
async_add_entities(sensors, True)
class WLEDSensor(WLEDDeviceEntity):
"""Defines a WLED sensor."""
def __init__(
self,
*,
coordinator: WLEDDataUpdateCoordinator,
enabled_default: bool = True,
entry_id: str,
icon: str,
key: str,
name: str,
unit_of_measurement: Optional[str] = None,
) -> None:
"""Initialize WLED sensor."""
self._unit_of_measurement = unit_of_measurement
self._key = key
super().__init__(
entry_id=entry_id,
coordinator=coordinator,
name=name,
icon=icon,
enabled_default=enabled_default,
)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return f"{self.coordinator.data.info.mac_address}_{self._key}"
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
class WLEDEstimatedCurrentSensor(WLEDSensor):
"""Defines a WLED estimated current sensor."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize WLED estimated current sensor."""
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
icon="mdi:power",
key="estimated_current",
name=f"{coordinator.data.info.name} Estimated Current",
unit_of_measurement=CURRENT_MA,
)
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
return {
ATTR_LED_COUNT: self.coordinator.data.info.leds.count,
ATTR_MAX_POWER: self.coordinator.data.info.leds.max_power,
}
@property
def state(self) -> int:
"""Return the state of the sensor."""
return self.coordinator.data.info.leds.power
@property
def device_class(self) -> Optional[str]:
"""Return the class of this sensor."""
return DEVICE_CLASS_CURRENT
class WLEDUptimeSensor(WLEDSensor):
"""Defines a WLED uptime sensor."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize WLED uptime sensor."""
super().__init__(
coordinator=coordinator,
enabled_default=False,
entry_id=entry_id,
icon="mdi:clock-outline",
key="uptime",
name=f"{coordinator.data.info.name} Uptime",
)
@property
def state(self) -> str:
"""Return the state of the sensor."""
uptime = utcnow() - timedelta(seconds=self.coordinator.data.info.uptime)
return uptime.replace(microsecond=0).isoformat()
@property
def device_class(self) -> Optional[str]:
"""Return the class of this sensor."""
return DEVICE_CLASS_TIMESTAMP
class WLEDFreeHeapSensor(WLEDSensor):
"""Defines a WLED free heap sensor."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize WLED free heap sensor."""
super().__init__(
coordinator=coordinator,
enabled_default=False,
entry_id=entry_id,
icon="mdi:memory",
key="free_heap",
name=f"{coordinator.data.info.name} Free Memory",
unit_of_measurement=DATA_BYTES,
)
@property
def state(self) -> int:
"""Return the state of the sensor."""
return self.coordinator.data.info.free_heap
class WLEDWifiSignalSensor(WLEDSensor):
"""Defines a WLED Wi-Fi signal sensor."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize WLED Wi-Fi signal sensor."""
super().__init__(
coordinator=coordinator,
enabled_default=False,
entry_id=entry_id,
icon="mdi:wifi",
key="wifi_signal",
name=f"{coordinator.data.info.name} Wi-Fi Signal",
unit_of_measurement=PERCENTAGE,
)
@property
def state(self) -> int:
"""Return the state of the sensor."""
return self.coordinator.data.info.wifi.signal
class WLEDWifiRSSISensor(WLEDSensor):
"""Defines a WLED Wi-Fi RSSI sensor."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize WLED Wi-Fi RSSI sensor."""
super().__init__(
coordinator=coordinator,
enabled_default=False,
entry_id=entry_id,
icon="mdi:wifi",
key="wifi_rssi",
name=f"{coordinator.data.info.name} Wi-Fi RSSI",
unit_of_measurement=SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
)
@property
def state(self) -> int:
"""Return the state of the sensor."""
return self.coordinator.data.info.wifi.rssi
@property
def device_class(self) -> Optional[str]:
"""Return the class of this sensor."""
return DEVICE_CLASS_SIGNAL_STRENGTH
class WLEDWifiChannelSensor(WLEDSensor):
"""Defines a WLED Wi-Fi Channel sensor."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize WLED Wi-Fi Channel sensor."""
super().__init__(
coordinator=coordinator,
enabled_default=False,
entry_id=entry_id,
icon="mdi:wifi",
key="wifi_channel",
name=f"{coordinator.data.info.name} Wi-Fi Channel",
)
@property
def state(self) -> int:
"""Return the state of the sensor."""
return self.coordinator.data.info.wifi.channel
class WLEDWifiBSSIDSensor(WLEDSensor):
"""Defines a WLED Wi-Fi BSSID sensor."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize WLED Wi-Fi BSSID sensor."""
super().__init__(
coordinator=coordinator,
enabled_default=False,
entry_id=entry_id,
icon="mdi:wifi",
key="wifi_bssid",
name=f"{coordinator.data.info.name} Wi-Fi BSSID",
)
@property
def state(self) -> str:
"""Return the state of the sensor."""
return self.coordinator.data.info.wifi.bssid
|
import asyncio
import io
import logging
from PIL import UnidentifiedImageError
import aiohttp
import async_timeout
from colorthief import ColorThief
import voluptuous as vol
from homeassistant.components.color_extractor.const import (
ATTR_PATH,
ATTR_URL,
DOMAIN,
SERVICE_TURN_ON,
)
from homeassistant.components.light import (
ATTR_RGB_COLOR,
DOMAIN as LIGHT_DOMAIN,
LIGHT_TURN_ON_SCHEMA,
SERVICE_TURN_ON as LIGHT_SERVICE_TURN_ON,
)
from homeassistant.helpers import aiohttp_client
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
# Extend the existing light.turn_on service schema
SERVICE_SCHEMA = vol.All(
cv.has_at_least_one_key(ATTR_URL, ATTR_PATH),
cv.make_entity_service_schema(
{
**LIGHT_TURN_ON_SCHEMA,
vol.Exclusive(ATTR_PATH, "color_extractor"): cv.isfile,
vol.Exclusive(ATTR_URL, "color_extractor"): cv.url,
}
),
)
def _get_file(file_path):
"""Get a PIL acceptable input file reference.
Allows us to mock patch during testing to make BytesIO stream.
"""
return file_path
def _get_color(file_handler) -> tuple:
"""Given an image file, extract the predominant color from it."""
color_thief = ColorThief(file_handler)
# get_color returns a SINGLE RGB value for the given image
color = color_thief.get_color(quality=1)
_LOGGER.debug("Extracted RGB color %s from image", color)
return color
async def async_setup(hass, hass_config):
"""Set up services for color_extractor integration."""
async def async_handle_service(service_call):
"""Decide which color_extractor method to call based on service."""
service_data = dict(service_call.data)
try:
if ATTR_URL in service_data:
image_type = "URL"
image_reference = service_data.pop(ATTR_URL)
color = await async_extract_color_from_url(image_reference)
elif ATTR_PATH in service_data:
image_type = "file path"
image_reference = service_data.pop(ATTR_PATH)
color = await hass.async_add_executor_job(
extract_color_from_path, image_reference
)
except UnidentifiedImageError as ex:
_LOGGER.error(
"Bad image from %s '%s' provided, are you sure it's an image? %s",
image_type,
image_reference,
ex,
)
return
if color:
service_data[ATTR_RGB_COLOR] = color
await hass.services.async_call(
LIGHT_DOMAIN, LIGHT_SERVICE_TURN_ON, service_data, blocking=True
)
hass.services.async_register(
DOMAIN,
SERVICE_TURN_ON,
async_handle_service,
schema=SERVICE_SCHEMA,
)
async def async_extract_color_from_url(url):
"""Handle call for URL based image."""
if not hass.config.is_allowed_external_url(url):
_LOGGER.error(
"External URL '%s' is not allowed, please add to 'allowlist_external_urls'",
url,
)
return None
_LOGGER.debug("Getting predominant RGB from image URL '%s'", url)
# Download the image into a buffer for ColorThief to check against
try:
session = aiohttp_client.async_get_clientsession(hass)
with async_timeout.timeout(10):
response = await session.get(url)
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
_LOGGER.error("Failed to get ColorThief image due to HTTPError: %s", err)
return None
content = await response.content.read()
with io.BytesIO(content) as _file:
_file.name = "color_extractor.jpg"
_file.seek(0)
return _get_color(_file)
def extract_color_from_path(file_path):
"""Handle call for local file based image."""
if not hass.config.is_allowed_path(file_path):
_LOGGER.error(
"File path '%s' is not allowed, please add to 'allowlist_external_dirs'",
file_path,
)
return None
_LOGGER.debug("Getting predominant RGB from file path '%s'", file_path)
_file = _get_file(file_path)
return _get_color(_file)
return True
|
import logging
import gammu # pylint: disable=import-error, no-member
import voluptuous as vol
from homeassistant.components.notify import PLATFORM_SCHEMA, BaseNotificationService
from homeassistant.const import CONF_NAME, CONF_RECIPIENT
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN, SMS_GATEWAY
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_RECIPIENT): cv.string, vol.Optional(CONF_NAME): cv.string}
)
def get_service(hass, config, discovery_info=None):
"""Get the SMS notification service."""
if SMS_GATEWAY not in hass.data[DOMAIN]:
_LOGGER.error("SMS gateway not found, cannot initialize service")
return
gateway = hass.data[DOMAIN][SMS_GATEWAY]
if discovery_info is None:
number = config[CONF_RECIPIENT]
else:
number = discovery_info[CONF_RECIPIENT]
return SMSNotificationService(gateway, number)
class SMSNotificationService(BaseNotificationService):
"""Implement the notification service for SMS."""
def __init__(self, gateway, number):
"""Initialize the service."""
self.gateway = gateway
self.number = number
async def async_send_message(self, message="", **kwargs):
"""Send SMS message."""
smsinfo = {
"Class": -1,
"Unicode": False,
"Entries": [{"ID": "ConcatenatedTextLong", "Buffer": message}],
}
try:
# Encode messages
encoded = gammu.EncodeSMS(smsinfo) # pylint: disable=no-member
except gammu.GSMError as exc: # pylint: disable=no-member
_LOGGER.error("Encoding message %s failed: %s", message, exc)
return
# Send messages
for encoded_message in encoded:
# Fill in numbers
encoded_message["SMSC"] = {"Location": 1}
encoded_message["Number"] = self.number
try:
# Actually send the message
await self.gateway.send_sms_async(encoded_message)
except gammu.GSMError as exc: # pylint: disable=no-member
_LOGGER.error("Sending to %s failed: %s", self.number, exc)
|
import pytest
from homeassistant.components.geonetnz_volcano import DOMAIN
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
CONF_UNIT_SYSTEM,
)
from tests.common import MockConfigEntry
@pytest.fixture
def config_entry():
"""Create a mock GeoNet NZ Volcano config entry."""
return MockConfigEntry(
domain=DOMAIN,
data={
CONF_LATITUDE: -41.2,
CONF_LONGITUDE: 174.7,
CONF_RADIUS: 25,
CONF_UNIT_SYSTEM: "metric",
CONF_SCAN_INTERVAL: 300.0,
},
title="-41.2, 174.7",
)
|
import asyncio
import logging
import os
from pathlib import Path
from subprocess import PIPE, Popen
import sys
from typing import Optional
from urllib.parse import urlparse
import pkg_resources
if sys.version_info[:2] >= (3, 8):
from importlib.metadata import ( # pylint: disable=no-name-in-module,import-error
PackageNotFoundError,
version,
)
else:
from importlib_metadata import ( # pylint: disable=import-error
PackageNotFoundError,
version,
)
_LOGGER = logging.getLogger(__name__)
def is_virtual_env() -> bool:
"""Return if we run in a virtual environment."""
# Check supports venv && virtualenv
return getattr(sys, "base_prefix", sys.prefix) != sys.prefix or hasattr(
sys, "real_prefix"
)
def is_docker_env() -> bool:
"""Return True if we run in a docker env."""
return Path("/.dockerenv").exists()
def is_installed(package: str) -> bool:
"""Check if a package is installed and will be loaded when we import it.
Returns True when the requirement is met.
Returns False when the package is not installed or doesn't meet req.
"""
try:
req = pkg_resources.Requirement.parse(package)
except ValueError:
# This is a zip file. We no longer use this in Home Assistant,
# leaving it in for custom components.
req = pkg_resources.Requirement.parse(urlparse(package).fragment)
try:
return version(req.project_name) in req
except PackageNotFoundError:
return False
def install_package(
package: str,
upgrade: bool = True,
target: Optional[str] = None,
constraints: Optional[str] = None,
find_links: Optional[str] = None,
no_cache_dir: Optional[bool] = False,
) -> bool:
"""Install a package on PyPi. Accepts pip compatible package strings.
Return boolean if install successful.
"""
# Not using 'import pip; pip.main([])' because it breaks the logger
_LOGGER.info("Attempting install of %s", package)
env = os.environ.copy()
args = [sys.executable, "-m", "pip", "install", "--quiet", package]
if no_cache_dir:
args.append("--no-cache-dir")
if upgrade:
args.append("--upgrade")
if constraints is not None:
args += ["--constraint", constraints]
if find_links is not None:
args += ["--find-links", find_links, "--prefer-binary"]
if target:
assert not is_virtual_env()
# This only works if not running in venv
args += ["--user"]
env["PYTHONUSERBASE"] = os.path.abspath(target)
if sys.platform != "win32":
# Workaround for incompatible prefix setting
# See http://stackoverflow.com/a/4495175
args += ["--prefix="]
process = Popen(args, stdin=PIPE, stdout=PIPE, stderr=PIPE, env=env)
_, stderr = process.communicate()
if process.returncode != 0:
_LOGGER.error(
"Unable to install package %s: %s",
package,
stderr.decode("utf-8").lstrip().strip(),
)
return False
return True
async def async_get_user_site(deps_dir: str) -> str:
"""Return user local library path.
This function is a coroutine.
"""
env = os.environ.copy()
env["PYTHONUSERBASE"] = os.path.abspath(deps_dir)
args = [sys.executable, "-m", "site", "--user-site"]
process = await asyncio.create_subprocess_exec(
*args,
stdin=asyncio.subprocess.PIPE,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.DEVNULL,
env=env,
)
stdout, _ = await process.communicate()
lib_dir = stdout.decode().strip()
return lib_dir
|
import logging
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_PROBLEM,
BinarySensorEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN, SIGNAL_UPDATE_SMARTY
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Smarty Binary Sensor Platform."""
smarty = hass.data[DOMAIN]["api"]
name = hass.data[DOMAIN]["name"]
sensors = [
AlarmSensor(name, smarty),
WarningSensor(name, smarty),
BoostSensor(name, smarty),
]
async_add_entities(sensors, True)
class SmartyBinarySensor(BinarySensorEntity):
"""Representation of a Smarty Binary Sensor."""
def __init__(self, name, device_class, smarty):
"""Initialize the entity."""
self._name = name
self._state = None
self._sensor_type = device_class
self._smarty = smarty
@property
def device_class(self):
"""Return the class of the sensor."""
return self._sensor_type
@property
def should_poll(self) -> bool:
"""Do not poll."""
return False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._state
async def async_added_to_hass(self):
"""Call to update."""
async_dispatcher_connect(self.hass, SIGNAL_UPDATE_SMARTY, self._update_callback)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
class BoostSensor(SmartyBinarySensor):
"""Boost State Binary Sensor."""
def __init__(self, name, smarty):
"""Alarm Sensor Init."""
super().__init__(name=f"{name} Boost State", device_class=None, smarty=smarty)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.boost
class AlarmSensor(SmartyBinarySensor):
"""Alarm Binary Sensor."""
def __init__(self, name, smarty):
"""Alarm Sensor Init."""
super().__init__(
name=f"{name} Alarm", device_class=DEVICE_CLASS_PROBLEM, smarty=smarty
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.alarm
class WarningSensor(SmartyBinarySensor):
"""Warning Sensor."""
def __init__(self, name, smarty):
"""Warning Sensor Init."""
super().__init__(
name=f"{name} Warning", device_class=DEVICE_CLASS_PROBLEM, smarty=smarty
)
def update(self) -> None:
"""Update state."""
_LOGGER.debug("Updating sensor %s", self._name)
self._state = self._smarty.warning
|
import io
import logging
import urllib.parse
try:
import requests
except ImportError:
MISSING_DEPS = True
from smart_open import utils, constants
import http.client as httplib
logger = logging.getLogger(__name__)
SCHEME = 'webhdfs'
URI_EXAMPLES = (
'webhdfs://host:port/path/file',
)
MIN_PART_SIZE = 50 * 1024**2 # minimum part size for HDFS multipart uploads
def parse_uri(uri_as_str):
return dict(scheme=SCHEME, uri=uri_as_str)
def open_uri(uri, mode, transport_params):
kwargs = utils.check_kwargs(open, transport_params)
return open(uri, mode, **kwargs)
def open(http_uri, mode, min_part_size=MIN_PART_SIZE):
"""
Parameters
----------
http_uri: str
webhdfs url converted to http REST url
min_part_size: int, optional
For writing only.
"""
if http_uri.startswith(SCHEME):
http_uri = _convert_to_http_uri(http_uri)
if mode == constants.READ_BINARY:
fobj = BufferedInputBase(http_uri)
elif mode == constants.WRITE_BINARY:
fobj = BufferedOutputBase(http_uri, min_part_size=min_part_size)
else:
raise NotImplementedError("webhdfs support for mode %r not implemented" % mode)
fobj.name = http_uri.split('/')[-1]
return fobj
def _convert_to_http_uri(webhdfs_url):
"""
Convert webhdfs uri to http url and return it as text
Parameters
----------
webhdfs_url: str
A URL starting with webhdfs://
"""
split_uri = urllib.parse.urlsplit(webhdfs_url)
netloc = split_uri.hostname
if split_uri.port:
netloc += ":{}".format(split_uri.port)
query = split_uri.query
if split_uri.username:
query += (
("&" if query else "") + "user.name=" + urllib.parse.quote(split_uri.username)
)
return urllib.parse.urlunsplit(
("http", netloc, "/webhdfs/v1" + split_uri.path, query, "")
)
#
# For old unit tests.
#
def convert_to_http_uri(parsed_uri):
return _convert_to_http_uri(parsed_uri.uri)
class BufferedInputBase(io.BufferedIOBase):
def __init__(self, uri):
self._uri = uri
payload = {"op": "OPEN", "offset": 0}
self._response = requests.get(self._uri, params=payload, stream=True)
if self._response.status_code != httplib.OK:
raise WebHdfsException.from_response(self._response)
self._buf = b''
#
# Override some methods from io.IOBase.
#
def close(self):
"""Flush and close this stream."""
logger.debug("close: called")
def readable(self):
"""Return True if the stream can be read from."""
return True
def seekable(self):
"""If False, seek(), tell() and truncate() will raise IOError.
We offer only seek support, and no truncate support."""
return False
#
# io.BufferedIOBase methods.
#
def detach(self):
"""Unsupported."""
raise io.UnsupportedOperation
def read(self, size=None):
if size is None:
self._buf, retval = b'', self._buf + self._response.raw.read()
return retval
elif size < len(self._buf):
self._buf, retval = self._buf[size:], self._buf[:size]
return retval
try:
while len(self._buf) < size:
self._buf += self._response.raw.read(io.DEFAULT_BUFFER_SIZE)
except StopIteration:
pass
self._buf, retval = self._buf[size:], self._buf[:size]
return retval
def read1(self, size=-1):
"""This is the same as read()."""
return self.read(size=size)
def readinto(self, b):
"""Read up to len(b) bytes into b, and return the number of bytes
read."""
data = self.read(len(b))
if not data:
return 0
b[:len(data)] = data
return len(data)
def readline(self):
self._buf, retval = b'', self._buf + self._response.raw.readline()
return retval
class BufferedOutputBase(io.BufferedIOBase):
def __init__(self, uri, min_part_size=MIN_PART_SIZE):
"""
Parameters
----------
min_part_size: int, optional
For writing only.
"""
self._uri = uri
self._closed = False
self.min_part_size = min_part_size
# creating empty file first
payload = {"op": "CREATE", "overwrite": True}
init_response = requests.put(self._uri, params=payload, allow_redirects=False)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException.from_response(init_response)
uri = init_response.headers['location']
response = requests.put(uri, data="", headers={'content-type': 'application/octet-stream'})
if not response.status_code == httplib.CREATED:
raise WebHdfsException.from_response(response)
self.lines = []
self.parts = 0
self.chunk_bytes = 0
self.total_size = 0
#
# This member is part of the io.BufferedIOBase interface.
#
self.raw = None
#
# Override some methods from io.IOBase.
#
def writable(self):
"""Return True if the stream supports writing."""
return True
#
# io.BufferedIOBase methods.
#
def detach(self):
raise io.UnsupportedOperation("detach() not supported")
def _upload(self, data):
payload = {"op": "APPEND"}
init_response = requests.post(self._uri, params=payload, allow_redirects=False)
if not init_response.status_code == httplib.TEMPORARY_REDIRECT:
raise WebHdfsException.from_response(init_response)
uri = init_response.headers['location']
response = requests.post(uri, data=data,
headers={'content-type': 'application/octet-stream'})
if not response.status_code == httplib.OK:
raise WebHdfsException.from_response(response)
def write(self, b):
"""
Write the given bytes (binary string) into the WebHDFS file from constructor.
"""
if self._closed:
raise ValueError("I/O operation on closed file")
if not isinstance(b, bytes):
raise TypeError("input must be a binary string")
self.lines.append(b)
self.chunk_bytes += len(b)
self.total_size += len(b)
if self.chunk_bytes >= self.min_part_size:
buff = b"".join(self.lines)
logger.info(
"uploading part #%i, %i bytes (total %.3fGB)",
self.parts, len(buff), self.total_size / 1024.0 ** 3
)
self._upload(buff)
logger.debug("upload of part #%i finished", self.parts)
self.parts += 1
self.lines, self.chunk_bytes = [], 0
def close(self):
buff = b"".join(self.lines)
if buff:
logger.info(
"uploading last part #%i, %i bytes (total %.3fGB)",
self.parts, len(buff), self.total_size / 1024.0 ** 3
)
self._upload(buff)
logger.debug("upload of last part #%i finished", self.parts)
self._closed = True
@property
def closed(self):
return self._closed
class WebHdfsException(Exception):
def __init__(self, msg="", status_code=None):
self.msg = msg
self.status_code = status_code
super(WebHdfsException, self).__init__(repr(self))
def __repr__(self):
return "{}(status_code={}, msg={!r})".format(
self.__class__.__name__, self.status_code, self.msg
)
@classmethod
def from_response(cls, response):
return cls(msg=response.text, status_code=response.status_code)
|
from xml.etree import ElementTree as etree
import os.path
import getpass
import logging
import tempfile
import pkg_resources
from ..Telegraf.decoder import decoder
from yandextank.common.util import read_resource
import configparser
logger = logging.getLogger(__name__)
class ConfigManager(object):
"""
Config reader and parser helper.
XML support
"""
@staticmethod
def parse_xml(config):
if os.path.exists(config):
return etree.parse(config)
else:
return etree.fromstring(config)
def getconfig(self, filename, target_hint):
"""Prepare config data."""
try:
config = read_resource(filename)
tree = self.parse_xml(config)
except IOError as exc:
logger.error("Error loading config: %s", exc)
raise RuntimeError("Can't read monitoring config %s" % filename)
hosts = tree.findall('Host')
config = []
for host in hosts:
host_config = self.get_host_config(host, target_hint)
config.append(host_config)
return config
def get_host_config(self, host, target_hint):
defaults = {
"CPU": {
"name": '[inputs.cpu]',
"percpu": 'false',
"fielddrop": '["time_*", "usage_guest_nice"]'
},
"Memory": {
"name": '[inputs.mem]',
"fielddrop":
'["active", "inactive", "total", "used_per*", "avail*"]',
},
"Disk": {
"name": '[inputs.diskio]',
"devices": '[{devices}]'.format(
devices=",".join(
['"vda%s","sda%s"' % (num, num) for num in range(6)])),
},
"Net": {
"name": '[inputs.net]',
"interfaces": '[{interfaces}]'.format(
interfaces=",".join(
['"eth%s"' % (num) for num in range(6)])),
"fielddrop":
'["icmp*", "ip*", "udplite*", "tcp*", "udp*", "drop*", "err*"]',
},
"Nstat": {
"name": '[inputs.nstat]',
"fieldpass": '["TcpRetransSegs"]',
},
"Netstat": {
"name": '[inputs.netstat]',
},
"NetResponse": {
"name": '[inputs.net_response]',
"protocol": '"tcp"',
"address": '":80"',
"timeout": '"1s"'
},
"System": {
"name": '[inputs.system]',
"fielddrop": '["n_users", "n_cpus", "uptime*"]',
},
"Kernel": {
"name": '[inputs.kernel]',
"fielddrop": '["boot_time"]',
},
"KernelVmstat": {
"name": '[inputs.kernel_vmstat]',
"fieldpass": '["pgfault", "pgmajfault"]',
}
}
defaults_enabled = ['CPU', 'Memory', 'Disk', 'Net', 'System', 'Kernel']
defaults_boolean = [
'percpu', 'round_interval', 'fielddrop', 'fieldpass', 'interfaces',
'devices'
]
hostname = host.get('address').lower()
if hostname == '[target]':
if not target_hint:
raise ValueError(
"Can't use `[target]` keyword with no target parameter specified"
)
logger.debug("Using target hint: %s", target_hint)
hostname = target_hint.lower()
custom = []
startups = []
shutdowns = []
sources = []
telegrafraw = []
# agent defaults
host_config = {}
for metric in host:
if str(metric.tag) in defaults:
for key in tuple(defaults[metric.tag].keys()):
if key != 'name' and key not in defaults_boolean:
value = metric.get(key, None)
if value:
defaults[metric.tag][key] = "'{value}'".format(
value=value)
elif key in defaults_boolean:
value = metric.get(key, None)
if value:
defaults[metric.tag][key] = "{value}".format(
value=value)
host_config[metric.tag] = defaults[metric.tag]
# custom metrics
if (str(metric.tag)).lower() == 'custom':
isdiff = metric.get('diff', 0)
cmd = {
'cmd': metric.text,
'label': metric.get('label'),
'diff': isdiff
}
custom.append(cmd)
elif (str(metric.tag)).lower() == 'startup':
startups.append(metric.text)
elif (str(metric.tag)).lower() == 'shutdown':
shutdowns.append(metric.text)
elif (str(metric.tag)).lower() == 'source':
sources.append(metric.text)
elif (str(metric.tag)).lower() == 'telegrafraw':
telegrafraw.append(metric.text)
if len(host_config) == 0:
logger.info('Empty host config, using defaults')
for section in defaults_enabled:
host_config[section] = defaults[section]
result = {
'host_config': host_config,
'port': int(host.get('port', 22)),
'python': host.get('python', '/usr/bin/env python3'),
'interval': host.get('interval', 1),
'username': host.get('username', getpass.getuser()),
'telegraf': host.get('telegraf', '/usr/bin/telegraf'),
'comment': host.get('comment', ''),
'custom': custom,
'host': hostname,
'startup': startups,
'shutdown': shutdowns,
'source': sources,
'telegrafraw': telegrafraw
}
logger.info("Telegraf Result config %s", result)
return result
class AgentConfig(object):
""" Agent config generator helper """
def __init__(self, config, old_style_configs):
self.host = config['host']
self.custom = config['custom']
self.startups = config['startup']
self.shutdowns = config['shutdown']
self.sources = config['source']
self.interval = config['interval']
self.comment = config['comment']
self.telegrafraw = config['telegrafraw']
self.host_config = config['host_config']
self.old_style_configs = old_style_configs
def create_startup_config(self):
""" Startup and shutdown commands config
Used by agent.py on the target
"""
cfg_path = "agent_startup_{}.cfg".format(self.host)
if os.path.isfile(cfg_path):
logger.info(
'Found agent startup config file in working directory with the same name as created for host %s.\n'
'Creating new one via tempfile. This will affect predictable filenames for agent artefacts',
self.host)
handle, cfg_path = tempfile.mkstemp('.cfg', 'agent_')
os.close(handle)
try:
config = configparser.RawConfigParser(strict=False)
# FIXME incinerate such a string formatting inside a method call
# T_T
config.add_section('startup')
[
config.set('startup', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.startups)
]
config.add_section('shutdown')
[
config.set('shutdown', "cmd%s" % idx, cmd)
for idx, cmd in enumerate(self.shutdowns)
]
config.add_section('source')
[
config.set('source', "file%s" % idx, path)
for idx, path in enumerate(self.sources)
]
with open(cfg_path, 'w') as fds:
config.write(fds)
except Exception as exc:
logger.error(
'Error trying to create monitoring startups config. Malformed? %s',
exc,
exc_info=True)
return cfg_path
def create_custom_exec_script(self):
""" bash script w/ custom commands inside
inspired by half a night trying to avoid escaping bash special characters
"""
cfg_path = "agent_customs_{}.cfg".format(self.host)
if os.path.isfile(cfg_path):
logger.info(
'Found agent custom execs config file in working directory with the same name as created for host %s.\n'
'Creating new one via tempfile. This will affect predictable filenames for agent artefacts',
self.host)
handle, cfg_path = tempfile.mkstemp('.sh', 'agent_customs_')
os.close(handle)
cmds = ""
for idx, cmd in enumerate(self.custom):
cmds += "-{idx}) {cmd};;\n".format(idx=idx, cmd=cmd['cmd'])
customs_script = """
#!/bin/sh
while :
do
case "$1" in
{cmds}
*) break;;
esac
shift
done
""".format(cmds=cmds)
with open(cfg_path, 'w') as fds:
fds.write(customs_script)
return cfg_path
def create_collector_config(self, workdir):
""" Telegraf collector config,
toml format
"""
cfg_path = "agent_collector_{}.cfg".format(self.host)
if os.path.isfile(cfg_path):
logger.info(
'Found agent config file in working directory with the same name as created for host %s.\n'
'Creating new one via tempfile. This will affect predictable filenames for agent artefacts',
self.host)
handle, cfg_path = tempfile.mkstemp('.cfg', 'agent_collector_')
os.close(handle)
self.monitoring_data_output = "{remote_folder}/monitoring.rawdata".format(
remote_folder=workdir)
defaults_old_enabled = ['CPU', 'Memory', 'Disk', 'Net', 'System']
try:
config = configparser.RawConfigParser(strict=False)
config.add_section("global_tags")
config.add_section("agent")
config.set(
"agent",
"interval",
"'{interval}s'".format(interval=self.interval))
config.set("agent", "round_interval", "true")
config.set("agent", "flush_interval", "'1s'")
config.set("agent", "collection_jitter", "'0s'")
config.set("agent", "flush_jitter", "'1s'")
for section in self.host_config.keys():
# telegraf-style config
if not self.old_style_configs:
config.add_section(
"{section_name}".format(
section_name=self.host_config[section]['name']))
for key, value in self.host_config[section].items():
if key != 'name':
config.set(
"{section_name}".format(
section_name=self.host_config[section][
'name']),
"{key}".format(key=key),
"{value}".format(value=value))
# monitoring-style config
else:
if section in defaults_old_enabled:
config.add_section(
"{section_name}".format(
section_name=self.host_config[section]['name']))
for key, value in self.host_config[section].items():
if key in [
'fielddrop', 'fieldpass', 'percpu',
'devices', 'interfaces'
]:
config.set(
"{section_name}".format(
section_name=self.host_config[section][
'name']),
"{key}".format(key=key),
"{value}".format(value=value))
# outputs
config.add_section("[outputs.file]")
config.set(
"[outputs.file]",
"files",
"['{config}']".format(config=self.monitoring_data_output))
config.set("[outputs.file]", "data_format", "'json'")
with open(cfg_path, 'w') as fds:
config.write(fds)
# dirty hack, this allow to avoid bash escape quoting, we're pushing shell script w/ arguments
# index of argument is index of custom metric in our config
inputs = ""
for idx, cmd in enumerate(self.custom):
inputs += "[[inputs.exec]]\n"
inputs += "commands = ['/bin/sh {workdir}/agent_customs.sh -{idx}']\n".format(
workdir=workdir, idx=idx)
inputs += "data_format = 'value'\n"
inputs += "data_type = 'float'\n"
inputs += "name_prefix = '{}_'\n\n".format(cmd.get('label'))
if cmd['diff']:
decoder.diff_metrics['custom'].append(
decoder.find_common_names(cmd.get('label')))
with open(cfg_path, 'a') as fds:
fds.write(inputs)
# telegraf raw configuration into xml
telegraf_raw = ""
for element in self.telegrafraw:
telegraf_raw += element
with open(cfg_path, 'a') as fds:
fds.write(telegraf_raw)
except Exception as exc:
logger.error(
'Error trying to create monitoring config. Malformed? %s',
exc,
exc_info=True)
return cfg_path
def create_agent_py(agent_filename):
if not os.path.isfile(agent_filename):
with open(agent_filename, 'w') as f:
f.write(read_resource(pkg_resources.resource_filename('yandextank.plugins.Telegraf', 'agent/agent.py')))
os.chmod(agent_filename, 0o775)
return os.path.abspath(agent_filename)
|
from django.conf import settings
from django.contrib.auth.hashers import make_password
from weblate.auth.data import (
GLOBAL_PERMISSIONS,
GROUPS,
PERMISSIONS,
ROLES,
SELECTION_ALL,
)
def migrate_permissions_list(model, permissions):
ids = set()
# Update/create permissions
for code, name in permissions:
instance, created = model.objects.get_or_create(
codename=code, defaults={"name": name}
)
ids.add(instance.pk)
if not created and instance.name != name:
instance.name = name
instance.save(update_fields=["name"])
return ids
def migrate_permissions(model):
"""Create permissions as defined in the data."""
ids = set()
# Per object permissions
ids.update(migrate_permissions_list(model, PERMISSIONS))
# Global permissions
ids.update(migrate_permissions_list(model, GLOBAL_PERMISSIONS))
# Delete stale permissions
model.objects.exclude(id__in=ids).delete()
def migrate_roles(model, perm_model):
"""Create roles as defined in the data."""
result = False
for role, permissions in ROLES:
instance, created = model.objects.get_or_create(name=role)
result |= created
instance.permissions.set(
perm_model.objects.filter(codename__in=permissions), clear=True
)
return result
def migrate_groups(model, role_model, update=False):
"""Create groups as defined in the data."""
for group, roles, selection in GROUPS:
defaults = {
"internal": True,
"project_selection": selection,
"language_selection": SELECTION_ALL,
}
instance, created = model.objects.get_or_create(name=group, defaults=defaults)
if created or update:
instance.roles.set(role_model.objects.filter(name__in=roles), clear=True)
if update:
for key, value in defaults.items():
setattr(instance, key, value)
instance.save()
def create_anonymous(model, group_model, update=True):
user, created = model.objects.get_or_create(
username=settings.ANONYMOUS_USER_NAME,
defaults={
"full_name": "Anonymous",
"email": "[email protected]",
"is_active": False,
"password": make_password(None),
},
)
if user.is_active:
raise ValueError(
f"Anonymous user ({settings.ANONYMOUS_USER_NAME}) already exists and is "
"active, please change the ANONYMOUS_USER_NAME setting or mark the user "
"as not active in the admin interface."
)
if created or update:
user.set_unusable_password()
user.save()
user.groups.set(
group_model.objects.filter(name__in=("Guests", "Viewers")), clear=True
)
|
from collections import namedtuple
import iptc
import mock
import pytest
from paasta_tools import iptables
EMPTY_RULE = iptables.Rule(
protocol="ip",
src="0.0.0.0/0.0.0.0",
dst="0.0.0.0/0.0.0.0",
target=None,
matches=(),
target_parameters=(),
)
@pytest.yield_fixture
def mock_Table():
with mock.patch.object(iptc, "Table", autospec=True) as m:
m.return_value.autocommit = True
yield m
@pytest.yield_fixture
def mock_Chain():
with mock.patch.object(iptc, "Chain", autospec=True) as m:
yield m
def test_rule_from_iptc_simple():
rule = iptc.Rule()
rule.create_target("DROP")
rule.src = "169.229.226.0/255.255.255.0"
assert iptables.Rule.from_iptc(rule) == EMPTY_RULE._replace(
src="169.229.226.0/255.255.255.0", target="DROP"
)
def test_rule_from_iptc_mac_match():
rule = iptc.Rule()
rule.create_target("DROP")
rule.create_match("mac")
rule.matches[0].mac_source = "20:C9:D0:2B:6F:F3"
assert iptables.Rule.from_iptc(rule) == EMPTY_RULE._replace(
target="DROP", matches=(("mac", (("mac-source", ("20:C9:D0:2B:6F:F3",)),)),)
)
def test_rule_from_iptc_target_parameters():
rule = iptc.Rule()
target = rule.create_target("LOG")
target.set_parameter("log-prefix", "my-prefix ")
assert iptables.Rule.from_iptc(rule) == EMPTY_RULE._replace(
target="LOG", target_parameters=(("log-prefix", ("my-prefix ",)),)
)
def test_rule_tcp_to_iptc():
rule = EMPTY_RULE._replace(
protocol="tcp", target="ACCEPT", matches=(("tcp", (("dport", ("443",)),)),)
).to_iptc()
assert rule.protocol == "tcp"
assert rule.target.name == "ACCEPT"
assert len(rule.matches) == 1
assert rule.matches[0].name == "tcp"
assert rule.matches[0].parameters["dport"] == "443"
def test_mac_src_to_iptc():
rule = EMPTY_RULE._replace(
target="ACCEPT", matches=(("mac", (("mac-source", ("20:C9:D0:2B:6F:F3",)),)),)
).to_iptc()
assert rule.protocol == "ip"
assert rule.target.name == "ACCEPT"
assert len(rule.matches) == 1
assert rule.matches[0].name == "mac"
assert rule.matches[0].parameters["mac_source"] == "20:C9:D0:2B:6F:F3"
def test_iptables_txn_normal():
table = mock.Mock(autocommit=True)
with iptables.iptables_txn(table):
assert table.autocommit is False
assert table.commit.called is False
assert table.refresh.called is False
assert table.commit.called is True
assert table.refresh.called is True
assert table.autocommit is True
def test_iptables_txn_with_exception():
table = mock.Mock(autocommit=True)
with pytest.raises(ValueError):
with iptables.iptables_txn(table):
raise ValueError("just testing lol")
assert table.commit.called is False
assert table.refresh.called is True
assert table.autocommit is True
def test_all_chains(mock_Table):
chain1 = mock.Mock()
chain1.name = "INPUT"
chain2 = mock.Mock()
chain2.name = "OUTPUT"
mock_Table.return_value = mock.Mock(chains=[chain1, chain2])
assert iptables.all_chains() == {"INPUT", "OUTPUT"}
def test_ensure_chain():
with mock.patch.object(
iptables,
"list_chain",
autospec=True,
return_value={
EMPTY_RULE._replace(target="DROP"),
EMPTY_RULE._replace(target="ACCEPT", src="1.0.0.0/255.255.255.0"),
},
), mock.patch.object(
iptables, "insert_rule", autospec=True
) as mock_insert_rule, mock.patch.object(
iptables, "delete_rules", autospec=True
) as mock_delete_rules:
iptables.ensure_chain(
"PAASTA.service",
(
EMPTY_RULE._replace(target="DROP"),
EMPTY_RULE._replace(target="ACCEPT", src="2.0.0.0/255.255.255.0"),
),
)
# It should add the missing rule
assert mock_insert_rule.mock_calls == [
mock.call(
"PAASTA.service",
EMPTY_RULE._replace(target="ACCEPT", src="2.0.0.0/255.255.255.0"),
)
]
# It should delete the extra rule
assert mock_delete_rules.mock_calls == [
mock.call(
"PAASTA.service",
{EMPTY_RULE._replace(target="ACCEPT", src="1.0.0.0/255.255.255.0")},
)
]
def test_ensure_chain_creates_chain_if_doesnt_exist():
with mock.patch.object(
iptables, "list_chain", side_effect=iptables.ChainDoesNotExist("PAASTA.service")
), mock.patch.object(iptables, "create_chain", autospec=True) as mock_create_chain:
iptables.ensure_chain("PAASTA.service", ())
assert mock_create_chain.mock_calls == [mock.call("PAASTA.service")]
def test_ensure_rule_does_not_exist():
with mock.patch.object(
iptables,
"list_chain",
return_value=(
EMPTY_RULE._replace(target="ACCEPT"),
EMPTY_RULE._replace(src="10.0.0.0/255.255.255.0"),
),
), mock.patch.object(iptables, "insert_rule", autospec=True) as mock_insert_rule:
iptables.ensure_rule("PAASTA.service", EMPTY_RULE._replace(target="DROP"))
assert mock_insert_rule.mock_calls == [
mock.call("PAASTA.service", EMPTY_RULE._replace(target="DROP"))
]
def test_ensure_rule_already_exists():
with mock.patch.object(
iptables,
"list_chain",
return_value=(
EMPTY_RULE._replace(target="DROP"),
EMPTY_RULE._replace(src="10.0.0.0/255.255.255.0"),
),
), mock.patch.object(iptables, "insert_rule", autospec=True) as mock_insert_rule:
iptables.ensure_rule("PAASTA.service", EMPTY_RULE._replace(target="DROP"))
assert mock_insert_rule.called is False
def test_insert_rule(mock_Table, mock_Chain):
iptables.insert_rule("PAASTA.service", EMPTY_RULE._replace(target="DROP"))
(call,) = mock_Chain("filter", "PAASTA.service").insert_rule.call_args_list
args, kwargs = call
(rule,) = args
assert iptables.Rule.from_iptc(rule) == EMPTY_RULE._replace(target="DROP")
def test_delete_rules(mock_Table, mock_Chain):
mock_Chain.return_value.rules = (
EMPTY_RULE._replace(target="DROP").to_iptc(),
EMPTY_RULE._replace(target="ACCEPT").to_iptc(),
EMPTY_RULE._replace(
target="REJECT",
target_parameters=(("reject-with", ("icmp-port-unreachable",)),),
).to_iptc(),
)
iptables.delete_rules(
"PAASTA.service",
(
EMPTY_RULE._replace(target="ACCEPT"),
EMPTY_RULE._replace(
target="REJECT",
target_parameters=(("reject-with", ("icmp-port-unreachable",)),),
),
),
)
assert mock_Chain("filter", "PAASTA.service").delete_rule.mock_calls == [
mock.call(mock_Chain.return_value.rules[1]),
mock.call(mock_Chain.return_value.rules[2]),
]
def test_create_chain(mock_Table):
iptables.create_chain("PAASTA.service")
mock_Table("filter").create_chain.assert_called_once_with("PAASTA.service")
def test_delete_chain(mock_Table, mock_Chain):
iptables.delete_chain("PAASTA.service")
chain = mock_Chain("filter", "PAASTA.service")
assert chain.flush.called is True
assert chain.delete.called is True
def test_list_chain_simple(mock_Table, mock_Chain):
chain = mock_Chain("PAASTA.internet", mock_Table.return_value)
rule = iptc.Rule()
rule.create_target("DROP")
chain.rules = [rule]
mock_Table.return_value.chains = [chain]
assert iptables.list_chain("PAASTA.internet") == (
EMPTY_RULE._replace(target="DROP"),
)
def test_list_chain_does_not_exist(mock_Table, mock_Chain):
mock_Table.return_value.chains = []
with pytest.raises(iptables.ChainDoesNotExist):
iptables.list_chain("PAASTA.internet")
class TestReorderChain:
class FakeRule(namedtuple("FakeRule", ("target", "id"))):
def to_iptc(self):
return self
@pytest.yield_fixture(autouse=True)
def chain_mock(self):
with mock.patch.object(
iptables, "iptables_txn", autospec=True
), mock.patch.object(iptables.iptc, "Table", autospec=True), mock.patch.object(
iptables.iptc, "Chain", autospec=True
) as chain_mock, mock.patch.object(
iptables, "list_chain", autospec=True
) as list_chain_mock:
self.chain_mock = chain_mock
self.list_chain_mock = list_chain_mock
yield
def test_reorder_chain_flip(self):
self.list_chain_mock.return_value = [
self.FakeRule("REJECT", "a"),
self.FakeRule("LOG", "b"),
self.FakeRule("ACCEPT", "c"),
self.FakeRule("ACCEPT", "d"),
]
iptables.reorder_chain("")
assert self.chain_mock.return_value.replace_rule.mock_calls == [
mock.call(self.FakeRule("ACCEPT", "c"), 0),
mock.call(self.FakeRule("ACCEPT", "d"), 1),
mock.call(self.FakeRule("LOG", "b"), 2),
mock.call(self.FakeRule("REJECT", "a"), 3),
]
def test_reorder_chain_log_first(self):
self.list_chain_mock.return_value = [
self.FakeRule("LOG", "b"),
self.FakeRule("ACCEPT", "c"),
self.FakeRule("ACCEPT", "d"),
self.FakeRule("REJECT", "a"),
]
iptables.reorder_chain("")
assert self.chain_mock.return_value.replace_rule.mock_calls == [
mock.call(self.FakeRule("ACCEPT", "c"), 0),
mock.call(self.FakeRule("ACCEPT", "d"), 1),
mock.call(self.FakeRule("LOG", "b"), 2),
]
def test_reorder_chain_empty(self):
self.list_chain_mock.return_value = []
iptables.reorder_chain("")
assert self.chain_mock.return_value.replace_rule.mock_calls == []
def test_reorder_chain_already_in_order(self):
self.chain_mock.return_value.rules = [
self.FakeRule("ACCEPT", "c"),
self.FakeRule("ACCEPT", "d"),
self.FakeRule("LOG", "b"),
self.FakeRule("REJECT", "a"),
]
iptables.reorder_chain("")
assert self.chain_mock.return_value.replace_rule.mock_calls == []
def test_reorder_chain_log_at_bottom(self):
self.list_chain_mock.return_value = [
self.FakeRule("ACCEPT", "c"),
self.FakeRule("ACCEPT", "d"),
self.FakeRule("REJECT", "a"),
self.FakeRule("LOG", "b"),
]
iptables.reorder_chain("")
assert self.chain_mock.return_value.replace_rule.mock_calls == [
mock.call(self.FakeRule("LOG", "b"), 2),
mock.call(self.FakeRule("REJECT", "a"), 3),
]
def test_reorder_chain_reject_in_middle(self):
self.list_chain_mock.return_value = [
self.FakeRule("ACCEPT", "c"),
self.FakeRule("REJECT", "a"),
self.FakeRule("ACCEPT", "d"),
]
iptables.reorder_chain("")
assert self.chain_mock.return_value.replace_rule.mock_calls == [
mock.call(self.FakeRule("ACCEPT", "d"), 1),
mock.call(self.FakeRule("REJECT", "a"), 2),
]
def test_reorder_chain_other_target_names(self):
self.list_chain_mock.return_value = [
self.FakeRule("HELLOWORLD", "c"),
self.FakeRule("REJECT", "a"),
self.FakeRule("FOOBAR", "d"),
]
iptables.reorder_chain("")
assert self.chain_mock.return_value.replace_rule.mock_calls == [
mock.call(self.FakeRule("FOOBAR", "d"), 1),
mock.call(self.FakeRule("REJECT", "a"), 2),
]
|
import asyncio
from ipaddress import ip_address
import logging
import os
from typing import Dict, Union
import aiohttp
from aiohttp import hdrs, web
from aiohttp.web_exceptions import HTTPBadGateway
from multidict import CIMultiDict
from homeassistant.components.http import HomeAssistantView
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from .const import X_HASSIO, X_INGRESS_PATH
_LOGGER = logging.getLogger(__name__)
@callback
def async_setup_ingress_view(hass: HomeAssistantType, host: str):
"""Auth setup."""
websession = hass.helpers.aiohttp_client.async_get_clientsession()
hassio_ingress = HassIOIngress(host, websession)
hass.http.register_view(hassio_ingress)
class HassIOIngress(HomeAssistantView):
"""Hass.io view to handle base part."""
name = "api:hassio:ingress"
url = "/api/hassio_ingress/{token}/{path:.*}"
requires_auth = False
def __init__(self, host: str, websession: aiohttp.ClientSession):
"""Initialize a Hass.io ingress view."""
self._host = host
self._websession = websession
def _create_url(self, token: str, path: str) -> str:
"""Create URL to service."""
return f"http://{self._host}/ingress/{token}/{path}"
async def _handle(
self, request: web.Request, token: str, path: str
) -> Union[web.Response, web.StreamResponse, web.WebSocketResponse]:
"""Route data to Hass.io ingress service."""
try:
# Websocket
if _is_websocket(request):
return await self._handle_websocket(request, token, path)
# Request
return await self._handle_request(request, token, path)
except aiohttp.ClientError as err:
_LOGGER.debug("Ingress error with %s / %s: %s", token, path, err)
raise HTTPBadGateway() from None
get = _handle
post = _handle
put = _handle
delete = _handle
patch = _handle
options = _handle
async def _handle_websocket(
self, request: web.Request, token: str, path: str
) -> web.WebSocketResponse:
"""Ingress route for websocket."""
if hdrs.SEC_WEBSOCKET_PROTOCOL in request.headers:
req_protocols = [
str(proto.strip())
for proto in request.headers[hdrs.SEC_WEBSOCKET_PROTOCOL].split(",")
]
else:
req_protocols = ()
ws_server = web.WebSocketResponse(
protocols=req_protocols, autoclose=False, autoping=False
)
await ws_server.prepare(request)
# Preparing
url = self._create_url(token, path)
source_header = _init_header(request, token)
# Support GET query
if request.query_string:
url = f"{url}?{request.query_string}"
# Start proxy
async with self._websession.ws_connect(
url,
headers=source_header,
protocols=req_protocols,
autoclose=False,
autoping=False,
) as ws_client:
# Proxy requests
await asyncio.wait(
[
_websocket_forward(ws_server, ws_client),
_websocket_forward(ws_client, ws_server),
],
return_when=asyncio.FIRST_COMPLETED,
)
return ws_server
async def _handle_request(
self, request: web.Request, token: str, path: str
) -> Union[web.Response, web.StreamResponse]:
"""Ingress route for request."""
url = self._create_url(token, path)
data = await request.read()
source_header = _init_header(request, token)
async with self._websession.request(
request.method,
url,
headers=source_header,
params=request.query,
allow_redirects=False,
data=data,
) as result:
headers = _response_header(result)
# Simple request
if (
hdrs.CONTENT_LENGTH in result.headers
and int(result.headers.get(hdrs.CONTENT_LENGTH, 0)) < 4194000
):
# Return Response
body = await result.read()
return web.Response(
headers=headers,
status=result.status,
content_type=result.content_type,
body=body,
)
# Stream response
response = web.StreamResponse(status=result.status, headers=headers)
response.content_type = result.content_type
try:
await response.prepare(request)
async for data in result.content.iter_chunked(4096):
await response.write(data)
except (aiohttp.ClientError, aiohttp.ClientPayloadError) as err:
_LOGGER.debug("Stream error %s / %s: %s", token, path, err)
return response
def _init_header(
request: web.Request, token: str
) -> Union[CIMultiDict, Dict[str, str]]:
"""Create initial header."""
headers = {}
# filter flags
for name, value in request.headers.items():
if name in (
hdrs.CONTENT_LENGTH,
hdrs.CONTENT_ENCODING,
hdrs.SEC_WEBSOCKET_EXTENSIONS,
hdrs.SEC_WEBSOCKET_PROTOCOL,
hdrs.SEC_WEBSOCKET_VERSION,
hdrs.SEC_WEBSOCKET_KEY,
):
continue
headers[name] = value
# Inject token / cleanup later on Supervisor
headers[X_HASSIO] = os.environ.get("HASSIO_TOKEN", "")
# Ingress information
headers[X_INGRESS_PATH] = f"/api/hassio_ingress/{token}"
# Set X-Forwarded-For
forward_for = request.headers.get(hdrs.X_FORWARDED_FOR)
connected_ip = ip_address(request.transport.get_extra_info("peername")[0])
if forward_for:
forward_for = f"{forward_for}, {connected_ip!s}"
else:
forward_for = f"{connected_ip!s}"
headers[hdrs.X_FORWARDED_FOR] = forward_for
# Set X-Forwarded-Host
forward_host = request.headers.get(hdrs.X_FORWARDED_HOST)
if not forward_host:
forward_host = request.host
headers[hdrs.X_FORWARDED_HOST] = forward_host
# Set X-Forwarded-Proto
forward_proto = request.headers.get(hdrs.X_FORWARDED_PROTO)
if not forward_proto:
forward_proto = request.url.scheme
headers[hdrs.X_FORWARDED_PROTO] = forward_proto
return headers
def _response_header(response: aiohttp.ClientResponse) -> Dict[str, str]:
"""Create response header."""
headers = {}
for name, value in response.headers.items():
if name in (
hdrs.TRANSFER_ENCODING,
hdrs.CONTENT_LENGTH,
hdrs.CONTENT_TYPE,
hdrs.CONTENT_ENCODING,
):
continue
headers[name] = value
return headers
def _is_websocket(request: web.Request) -> bool:
"""Return True if request is a websocket."""
headers = request.headers
if (
"upgrade" in headers.get(hdrs.CONNECTION, "").lower()
and headers.get(hdrs.UPGRADE, "").lower() == "websocket"
):
return True
return False
async def _websocket_forward(ws_from, ws_to):
"""Handle websocket message directly."""
try:
async for msg in ws_from:
if msg.type == aiohttp.WSMsgType.TEXT:
await ws_to.send_str(msg.data)
elif msg.type == aiohttp.WSMsgType.BINARY:
await ws_to.send_bytes(msg.data)
elif msg.type == aiohttp.WSMsgType.PING:
await ws_to.ping()
elif msg.type == aiohttp.WSMsgType.PONG:
await ws_to.pong()
elif ws_to.closed:
await ws_to.close(code=ws_to.close_code, message=msg.extra)
except RuntimeError:
_LOGGER.debug("Ingress Websocket runtime error")
|
import pytest
from homeassistant.components import zeroconf
from tests.async_mock import patch
zeroconf.orig_install_multiple_zeroconf_catcher = (
zeroconf.install_multiple_zeroconf_catcher
)
zeroconf.install_multiple_zeroconf_catcher = lambda zc: None
@pytest.fixture(autouse=True)
def prevent_io():
"""Fixture to prevent certain I/O from happening."""
with patch(
"homeassistant.components.http.ban.async_load_ip_bans_config",
return_value=[],
):
yield
|
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
async def async_setup(hass, config):
"""Component setup, do nothing."""
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Set up a config entry for solarlog."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.config_entries.async_forward_entry_unload(entry, "sensor")
|
import json
import logging
from xml.parsers.expat import ExpatError
import httpx
from jsonpath import jsonpath
import voluptuous as vol
import xmltodict
from homeassistant.components.sensor import DEVICE_CLASSES_SCHEMA, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_DEVICE_CLASS,
CONF_FORCE_UPDATE,
CONF_HEADERS,
CONF_METHOD,
CONF_NAME,
CONF_PASSWORD,
CONF_PAYLOAD,
CONF_RESOURCE,
CONF_RESOURCE_TEMPLATE,
CONF_TIMEOUT,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.reload import async_setup_reload_service
from . import DOMAIN, PLATFORMS
from .data import DEFAULT_TIMEOUT, RestData
_LOGGER = logging.getLogger(__name__)
DEFAULT_METHOD = "GET"
DEFAULT_NAME = "REST Sensor"
DEFAULT_VERIFY_SSL = True
DEFAULT_FORCE_UPDATE = False
CONF_JSON_ATTRS = "json_attributes"
CONF_JSON_ATTRS_PATH = "json_attributes_path"
METHODS = ["POST", "GET"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Exclusive(CONF_RESOURCE, CONF_RESOURCE): cv.url,
vol.Exclusive(CONF_RESOURCE_TEMPLATE, CONF_RESOURCE): cv.template,
vol.Optional(CONF_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_HEADERS): vol.Schema({cv.string: cv.string}),
vol.Optional(CONF_JSON_ATTRS, default=[]): cv.ensure_list_csv,
vol.Optional(CONF_METHOD, default=DEFAULT_METHOD): vol.In(METHODS),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PAYLOAD): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_JSON_ATTRS_PATH): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
}
)
PLATFORM_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_RESOURCE, CONF_RESOURCE_TEMPLATE), PLATFORM_SCHEMA
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the RESTful sensor."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
resource = config.get(CONF_RESOURCE)
resource_template = config.get(CONF_RESOURCE_TEMPLATE)
method = config.get(CONF_METHOD)
payload = config.get(CONF_PAYLOAD)
verify_ssl = config.get(CONF_VERIFY_SSL)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
headers = config.get(CONF_HEADERS)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
device_class = config.get(CONF_DEVICE_CLASS)
value_template = config.get(CONF_VALUE_TEMPLATE)
json_attrs = config.get(CONF_JSON_ATTRS)
json_attrs_path = config.get(CONF_JSON_ATTRS_PATH)
force_update = config.get(CONF_FORCE_UPDATE)
timeout = config.get(CONF_TIMEOUT)
if value_template is not None:
value_template.hass = hass
if resource_template is not None:
resource_template.hass = hass
resource = resource_template.render(parse_result=False)
if username and password:
if config.get(CONF_AUTHENTICATION) == HTTP_DIGEST_AUTHENTICATION:
auth = httpx.DigestAuth(username, password)
else:
auth = (username, password)
else:
auth = None
rest = RestData(method, resource, auth, headers, payload, verify_ssl, timeout)
await rest.async_update()
if rest.data is None:
raise PlatformNotReady
# Must update the sensor now (including fetching the rest resource) to
# ensure it's updating its state.
async_add_entities(
[
RestSensor(
hass,
rest,
name,
unit,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
json_attrs_path,
)
],
True,
)
class RestSensor(Entity):
"""Implementation of a REST sensor."""
def __init__(
self,
hass,
rest,
name,
unit_of_measurement,
device_class,
value_template,
json_attrs,
force_update,
resource_template,
json_attrs_path,
):
"""Initialize the REST sensor."""
self._hass = hass
self.rest = rest
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._device_class = device_class
self._value_template = value_template
self._json_attrs = json_attrs
self._attributes = None
self._force_update = force_update
self._resource_template = resource_template
self._json_attrs_path = json_attrs_path
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
@property
def available(self):
"""Return if the sensor data are available."""
return self.rest.data is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def force_update(self):
"""Force update."""
return self._force_update
async def async_update(self):
"""Get the latest data from REST API and update the state."""
if self._resource_template is not None:
self.rest.set_url(self._resource_template.render(parse_result=False))
await self.rest.async_update()
value = self.rest.data
_LOGGER.debug("Data fetched from resource: %s", value)
if self.rest.headers is not None:
# If the http request failed, headers will be None
content_type = self.rest.headers.get("content-type")
if content_type and (
content_type.startswith("text/xml")
or content_type.startswith("application/xml")
):
try:
value = json.dumps(xmltodict.parse(value))
_LOGGER.debug("JSON converted from XML: %s", value)
except ExpatError:
_LOGGER.warning(
"REST xml result could not be parsed and converted to JSON"
)
_LOGGER.debug("Erroneous XML: %s", value)
if self._json_attrs:
self._attributes = {}
if value:
try:
json_dict = json.loads(value)
if self._json_attrs_path is not None:
json_dict = jsonpath(json_dict, self._json_attrs_path)
# jsonpath will always store the result in json_dict[0]
# so the next line happens to work exactly as needed to
# find the result
if isinstance(json_dict, list):
json_dict = json_dict[0]
if isinstance(json_dict, dict):
attrs = {
k: json_dict[k] for k in self._json_attrs if k in json_dict
}
self._attributes = attrs
else:
_LOGGER.warning(
"JSON result was not a dictionary"
" or list with 0th element a dictionary"
)
except ValueError:
_LOGGER.warning("REST result could not be parsed as JSON")
_LOGGER.debug("Erroneous JSON: %s", value)
else:
_LOGGER.warning("Empty reply found when expecting JSON data")
if value is not None and self._value_template is not None:
value = self._value_template.async_render_with_possible_json_value(
value, None
)
self._state = value
async def async_will_remove_from_hass(self):
"""Shutdown the session."""
await self.rest.async_remove()
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
|
from pydispatch import dispatcher
from tests.async_mock import MagicMock
def value_changed(value):
"""Fire a value changed."""
dispatcher.send(
MockNetwork.SIGNAL_VALUE_CHANGED,
value=value,
node=value.node,
network=value.node._network,
)
def node_changed(node):
"""Fire a node changed."""
dispatcher.send(MockNetwork.SIGNAL_NODE, node=node, network=node._network)
def notification(node_id, network=None):
"""Fire a notification."""
dispatcher.send(
MockNetwork.SIGNAL_NOTIFICATION, args={"nodeId": node_id}, network=network
)
class MockOption(MagicMock):
"""Mock Z-Wave options."""
def __init__(self, device=None, config_path=None, user_path=None, cmd_line=None):
"""Initialize a Z-Wave mock options."""
super().__init__()
self.device = device
self.config_path = config_path
self.user_path = user_path
self.cmd_line = cmd_line
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockNetwork(MagicMock):
"""Mock Z-Wave network."""
SIGNAL_NETWORK_FAILED = "mock_NetworkFailed"
SIGNAL_NETWORK_STARTED = "mock_NetworkStarted"
SIGNAL_NETWORK_READY = "mock_NetworkReady"
SIGNAL_NETWORK_STOPPED = "mock_NetworkStopped"
SIGNAL_NETWORK_RESETTED = "mock_DriverResetted"
SIGNAL_NETWORK_AWAKED = "mock_DriverAwaked"
SIGNAL_DRIVER_FAILED = "mock_DriverFailed"
SIGNAL_DRIVER_READY = "mock_DriverReady"
SIGNAL_DRIVER_RESET = "mock_DriverReset"
SIGNAL_DRIVER_REMOVED = "mock_DriverRemoved"
SIGNAL_GROUP = "mock_Group"
SIGNAL_NODE = "mock_Node"
SIGNAL_NODE_ADDED = "mock_NodeAdded"
SIGNAL_NODE_EVENT = "mock_NodeEvent"
SIGNAL_NODE_NAMING = "mock_NodeNaming"
SIGNAL_NODE_NEW = "mock_NodeNew"
SIGNAL_NODE_PROTOCOL_INFO = "mock_NodeProtocolInfo"
SIGNAL_NODE_READY = "mock_NodeReady"
SIGNAL_NODE_REMOVED = "mock_NodeRemoved"
SIGNAL_SCENE_EVENT = "mock_SceneEvent"
SIGNAL_VALUE = "mock_Value"
SIGNAL_VALUE_ADDED = "mock_ValueAdded"
SIGNAL_VALUE_CHANGED = "mock_ValueChanged"
SIGNAL_VALUE_REFRESHED = "mock_ValueRefreshed"
SIGNAL_VALUE_REMOVED = "mock_ValueRemoved"
SIGNAL_POLLING_ENABLED = "mock_PollingEnabled"
SIGNAL_POLLING_DISABLED = "mock_PollingDisabled"
SIGNAL_CREATE_BUTTON = "mock_CreateButton"
SIGNAL_DELETE_BUTTON = "mock_DeleteButton"
SIGNAL_BUTTON_ON = "mock_ButtonOn"
SIGNAL_BUTTON_OFF = "mock_ButtonOff"
SIGNAL_ESSENTIAL_NODE_QUERIES_COMPLETE = "mock_EssentialNodeQueriesComplete"
SIGNAL_NODE_QUERIES_COMPLETE = "mock_NodeQueriesComplete"
SIGNAL_AWAKE_NODES_QUERIED = "mock_AwakeNodesQueried"
SIGNAL_ALL_NODES_QUERIED = "mock_AllNodesQueried"
SIGNAL_ALL_NODES_QUERIED_SOME_DEAD = "mock_AllNodesQueriedSomeDead"
SIGNAL_MSG_COMPLETE = "mock_MsgComplete"
SIGNAL_NOTIFICATION = "mock_Notification"
SIGNAL_CONTROLLER_COMMAND = "mock_ControllerCommand"
SIGNAL_CONTROLLER_WAITING = "mock_ControllerWaiting"
STATE_STOPPED = 0
STATE_FAILED = 1
STATE_RESETTED = 3
STATE_STARTED = 5
STATE_AWAKED = 7
STATE_READY = 10
def __init__(self, options=None, *args, **kwargs):
"""Initialize a Z-Wave mock network."""
super().__init__()
self.options = options
self.state = MockNetwork.STATE_STOPPED
class MockNode(MagicMock):
"""Mock Z-Wave node."""
def __init__(
self,
*,
node_id=567,
name="Mock Node",
manufacturer_id="ABCD",
product_id="123",
product_type="678",
command_classes=None,
can_wake_up_value=True,
manufacturer_name="Test Manufacturer",
product_name="Test Product",
network=None,
**kwargs,
):
"""Initialize a Z-Wave mock node."""
super().__init__()
self.node_id = node_id
self.name = name
self.manufacturer_id = manufacturer_id
self.product_id = product_id
self.product_type = product_type
self.manufacturer_name = manufacturer_name
self.product_name = product_name
self.can_wake_up_value = can_wake_up_value
self._command_classes = command_classes or []
if network is not None:
self._network = network
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def has_command_class(self, command_class):
"""Test if mock has a command class."""
return command_class in self._command_classes
def get_battery_level(self):
"""Return mock battery level."""
return 42
def can_wake_up(self):
"""Return whether the node can wake up."""
return self.can_wake_up_value
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
class MockValue(MagicMock):
"""Mock Z-Wave value."""
_mock_value_id = 1234
def __init__(
self,
*,
label="Mock Value",
node=None,
instance=0,
index=0,
value_id=None,
**kwargs,
):
"""Initialize a Z-Wave mock value."""
super().__init__()
self.label = label
self.node = node
self.instance = instance
self.index = index
if value_id is None:
MockValue._mock_value_id += 1
value_id = MockValue._mock_value_id
self.value_id = value_id
self.object_id = value_id
for attr_name in kwargs:
setattr(self, attr_name, kwargs[attr_name])
def _get_child_mock(self, **kw):
"""Create child mocks with right MagicMock class."""
return MagicMock(**kw)
def refresh(self):
"""Mock refresh of node value."""
value_changed(self)
class MockEntityValues:
"""Mock Z-Wave entity values."""
def __init__(self, **kwargs):
"""Initialize the mock zwave values."""
self.primary = None
self.wakeup = None
self.battery = None
self.power = None
for name in kwargs:
setattr(self, name, kwargs[name])
def __iter__(self):
"""Allow iteration over all values."""
return iter(self.__dict__.values())
|
from __future__ import division
try:
import MySQLdb
from MySQLdb import MySQLError
except ImportError:
MySQLdb = None
import diamond
import time
import re
class MySQLPerfCollector(diamond.collector.Collector):
def process_config(self):
super(MySQLPerfCollector, self).process_config()
self.db = None
self.last_wait_count = {}
self.last_wait_sum = {}
self.last_timestamp = {}
self.last_data = {}
self.monitors = {
'slave_sql': {
'wait/synch/cond/sql/MYSQL_RELAY_LOG::update_cond':
'wait_for_update',
'wait/io/file/innodb/innodb_data_file':
'innodb_data_file',
'wait/io/file/innodb/innodb_log_file':
'innodb_log_file',
'wait/io/file/myisam/dfile':
'myisam_dfile',
'wait/io/file/myisam/kfile':
'myisam_kfile',
'wait/io/file/sql/binlog':
'binlog',
'wait/io/file/sql/relay_log_info':
'relaylog_info',
'wait/io/file/sql/relaylog':
'relaylog',
'wait/synch/mutex/innodb':
'innodb_mutex',
'wait/synch/mutex':
'other_mutex',
'wait/synch/rwlock':
'rwlocks',
'wait/io':
'other_io',
},
'slave_io': {
'wait/io/file/sql/relaylog_index':
'relaylog_index',
'wait/synch/mutex/sql/MYSQL_RELAY_LOG::LOCK_index':
'relaylog_index_lock',
'wait/synch/mutex/sql/Master_info::data_lock':
'master_info_lock',
'wait/synch/mutex/mysys/IO_CACHE::append_buffer_lock':
'append_buffer_lock',
'wait/synch/mutex/sql/LOG::LOCK_log':
'log_lock',
'wait/io/file/sql/master_info':
'master_info',
'wait/io/file/sql/relaylog':
'relaylog',
'wait/synch/mutex':
'other_mutex',
'wait/synch/rwlock':
'rwlocks',
'wait/io':
'other_io',
}
}
if self.config['hosts'].__class__.__name__ != 'list':
self.config['hosts'] = [self.config['hosts']]
# Move legacy config format to new format
if 'host' in self.config:
hoststr = "%s:%s@%s:%s/%s" % (
self.config['user'],
self.config['passwd'],
self.config['host'],
self.config['port'],
self.config['db'],
)
self.config['hosts'].append(hoststr)
def get_default_config_help(self):
config_help = super(MySQLPerfCollector, self).get_default_config_help()
config_help.update({
'hosts': 'List of hosts to collect from. Format is ' +
'yourusername:yourpassword@host:' +
'port/performance_schema[/nickname]',
'slave': 'Collect Slave Replication Metrics',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MySQLPerfCollector, self).get_default_config()
config.update({
'path': 'mysql',
# Connection settings
'hosts': [],
'slave': 'False',
})
return config
def connect(self, params):
if MySQLdb is None:
self.log.error('Unable to import MySQLdb')
return
try:
self.db = MySQLdb.connect(**params)
except MySQLError as e:
self.log.error('MySQLPerfCollector couldnt connect to database %s',
e)
return {}
self.log.debug('MySQLPerfCollector: Connected to database.')
def query_list(self, query, params):
cursor = self.db.cursor()
cursor.execute(query, params)
return list(cursor.fetchall())
def slave_load(self, nickname, thread):
data = self.query_list("""
SELECT
his.event_name,
his.sum_timer_wait,
his.count_star,
cur.event_name,
UNIX_TIMESTAMP(SYSDATE())
FROM
events_waits_summary_by_thread_by_event_name his
JOIN threads thr USING (thread_id)
JOIN events_waits_current cur USING (thread_id)
WHERE
name = %s
ORDER BY
his.event_name
""", (thread,))
wait_sum = sum([x[1] for x in data])
wait_count = sum([x[2] for x in data])
timestamp = int(time.time())
if 0 in data and len(data[0]) > 5:
cur_event_name, timestamp = data[0][3:]
if thread not in self.last_wait_sum:
# Avoid bogus data
self.last_wait_sum[thread] = wait_sum
self.last_wait_count[thread] = wait_count
self.last_timestamp[thread] = timestamp
self.last_data[thread] = data
return
wait_delta = wait_sum - self.last_wait_sum[thread]
time_delta = (timestamp - self.last_timestamp[thread]) * 1000000000000
if time_delta == 0:
return
# Summarize a few things
thread_name = thread[thread.rfind('/') + 1:]
data.append(
['wait/synch/mutex/innodb',
sum([x[1] for x in data
if x[0].startswith('wait/synch/mutex/innodb')])])
data.append(
['wait/synch/mutex',
sum([x[1] for x in data
if (x[0].startswith('wait/synch/mutex') and
x[0] not in self.monitors[thread_name])]) - data[-1][1]])
data.append(
['wait/synch/rwlock',
sum([x[1] for x in data
if x[0].startswith('wait/synch/rwlock')])])
data.append(
['wait/io',
sum([x[1] for x in data
if (x[0].startswith('wait/io') and
x[0] not in self.monitors[thread_name])])])
for d in zip(self.last_data[thread], data):
if d[0][0] in self.monitors[thread_name]:
self.publish(nickname + thread_name + '.' +
self.monitors[thread_name][d[0][0]],
(d[1][1] - d[0][1]) / time_delta * 100)
# Also log what's unaccounted for. This is where Actual Work gets done
self.publish(nickname + thread_name + '.other_work',
float(time_delta - wait_delta) / time_delta * 100)
self.last_wait_sum[thread] = wait_sum
self.last_wait_count[thread] = wait_count
self.last_timestamp[thread] = timestamp
self.last_data[thread] = data
def collect(self):
for host in self.config['hosts']:
matches = re.search(
'^([^:]*):([^@]*)@([^:]*):?([^/]*)/([^/]*)/?(.*)$', host)
if not matches:
continue
params = {'host': matches.group(3)}
try:
params['port'] = int(matches.group(4))
except ValueError:
params['port'] = 3306
params['db'] = matches.group(5)
params['user'] = matches.group(1)
params['passwd'] = matches.group(2)
nickname = matches.group(6)
if len(nickname):
nickname += '.'
self.connect(params=params)
if self.config['slave']:
self.slave_load(nickname, 'thread/sql/slave_io')
self.slave_load(nickname, 'thread/sql/slave_sql')
self.db.close()
|
from matchzoo.engine.base_task import BaseTask
class Classification(BaseTask):
"""Classification task.
Examples:
>>> classification_task = Classification(num_classes=2)
>>> classification_task.metrics = ['precision']
>>> classification_task.num_classes
2
>>> classification_task.output_shape
(2,)
>>> classification_task.output_dtype
<class 'int'>
>>> print(classification_task)
Classification Task with 2 classes
"""
def __init__(self, num_classes: int = 2, **kwargs):
"""Classification task."""
super().__init__(**kwargs)
if not isinstance(num_classes, int):
raise TypeError("Number of classes must be an integer.")
if num_classes < 2:
raise ValueError("Number of classes can't be smaller than 2")
self._num_classes = num_classes
@property
def num_classes(self) -> int:
""":return: number of classes to classify."""
return self._num_classes
@classmethod
def list_available_losses(cls) -> list:
""":return: a list of available losses."""
return ['categorical_crossentropy']
@classmethod
def list_available_metrics(cls) -> list:
""":return: a list of available metrics."""
return ['acc']
@property
def output_shape(self) -> tuple:
""":return: output shape of a single sample of the task."""
return self._num_classes,
@property
def output_dtype(self):
""":return: target data type, expect `int` as output."""
return int
def __str__(self):
""":return: Task name as string."""
return f'Classification Task with {self._num_classes} classes'
|
from homeassistant.const import PERCENTAGE, STATE_UNKNOWN
from homeassistant.helpers import device_registry
async def test_sensor(hass, create_registrations, webhook_client):
"""Test that sensors can be registered and updated."""
webhook_id = create_registrations[1]["webhook_id"]
webhook_url = f"/api/webhook/{webhook_id}"
reg_resp = await webhook_client.post(
webhook_url,
json={
"type": "register_sensor",
"data": {
"attributes": {"foo": "bar"},
"device_class": "battery",
"icon": "mdi:battery",
"name": "Battery State",
"state": 100,
"type": "sensor",
"unique_id": "battery_state",
"unit_of_measurement": PERCENTAGE,
},
},
)
assert reg_resp.status == 201
json = await reg_resp.json()
assert json == {"success": True}
await hass.async_block_till_done()
entity = hass.states.get("sensor.test_1_battery_state")
assert entity is not None
assert entity.attributes["device_class"] == "battery"
assert entity.attributes["icon"] == "mdi:battery"
assert entity.attributes["unit_of_measurement"] == PERCENTAGE
assert entity.attributes["foo"] == "bar"
assert entity.domain == "sensor"
assert entity.name == "Test 1 Battery State"
assert entity.state == "100"
update_resp = await webhook_client.post(
webhook_url,
json={
"type": "update_sensor_states",
"data": [
{
"icon": "mdi:battery-unknown",
"state": 123,
"type": "sensor",
"unique_id": "battery_state",
},
# This invalid data should not invalidate whole request
{"type": "sensor", "unique_id": "invalid_state", "invalid": "data"},
],
},
)
assert update_resp.status == 200
json = await update_resp.json()
assert json["invalid_state"]["success"] is False
updated_entity = hass.states.get("sensor.test_1_battery_state")
assert updated_entity.state == "123"
dev_reg = await device_registry.async_get_registry(hass)
assert len(dev_reg.devices) == len(create_registrations)
async def test_sensor_must_register(hass, create_registrations, webhook_client):
"""Test that sensors must be registered before updating."""
webhook_id = create_registrations[1]["webhook_id"]
webhook_url = f"/api/webhook/{webhook_id}"
resp = await webhook_client.post(
webhook_url,
json={
"type": "update_sensor_states",
"data": [{"state": 123, "type": "sensor", "unique_id": "battery_state"}],
},
)
assert resp.status == 200
json = await resp.json()
assert json["battery_state"]["success"] is False
assert json["battery_state"]["error"]["code"] == "not_registered"
async def test_sensor_id_no_dupes(hass, create_registrations, webhook_client, caplog):
"""Test that a duplicate unique ID in registration updates the sensor."""
webhook_id = create_registrations[1]["webhook_id"]
webhook_url = f"/api/webhook/{webhook_id}"
payload = {
"type": "register_sensor",
"data": {
"attributes": {"foo": "bar"},
"device_class": "battery",
"icon": "mdi:battery",
"name": "Battery State",
"state": 100,
"type": "sensor",
"unique_id": "battery_state",
"unit_of_measurement": PERCENTAGE,
},
}
reg_resp = await webhook_client.post(webhook_url, json=payload)
assert reg_resp.status == 201
reg_json = await reg_resp.json()
assert reg_json == {"success": True}
await hass.async_block_till_done()
assert "Re-register" not in caplog.text
entity = hass.states.get("sensor.test_1_battery_state")
assert entity is not None
assert entity.attributes["device_class"] == "battery"
assert entity.attributes["icon"] == "mdi:battery"
assert entity.attributes["unit_of_measurement"] == PERCENTAGE
assert entity.attributes["foo"] == "bar"
assert entity.domain == "sensor"
assert entity.name == "Test 1 Battery State"
assert entity.state == "100"
payload["data"]["state"] = 99
dupe_resp = await webhook_client.post(webhook_url, json=payload)
assert dupe_resp.status == 201
dupe_reg_json = await dupe_resp.json()
assert dupe_reg_json == {"success": True}
await hass.async_block_till_done()
assert "Re-register" in caplog.text
entity = hass.states.get("sensor.test_1_battery_state")
assert entity is not None
assert entity.attributes["device_class"] == "battery"
assert entity.attributes["icon"] == "mdi:battery"
assert entity.attributes["unit_of_measurement"] == PERCENTAGE
assert entity.attributes["foo"] == "bar"
assert entity.domain == "sensor"
assert entity.name == "Test 1 Battery State"
assert entity.state == "99"
async def test_register_sensor_no_state(hass, create_registrations, webhook_client):
"""Test that sensors can be registered, when there is no (unknown) state."""
webhook_id = create_registrations[1]["webhook_id"]
webhook_url = f"/api/webhook/{webhook_id}"
reg_resp = await webhook_client.post(
webhook_url,
json={
"type": "register_sensor",
"data": {
"name": "Battery State",
"state": None,
"type": "sensor",
"unique_id": "battery_state",
},
},
)
assert reg_resp.status == 201
json = await reg_resp.json()
assert json == {"success": True}
await hass.async_block_till_done()
entity = hass.states.get("sensor.test_1_battery_state")
assert entity is not None
assert entity.domain == "sensor"
assert entity.name == "Test 1 Battery State"
assert entity.state == STATE_UNKNOWN
reg_resp = await webhook_client.post(
webhook_url,
json={
"type": "register_sensor",
"data": {
"name": "Backup Battery State",
"type": "sensor",
"unique_id": "backup_battery_state",
},
},
)
assert reg_resp.status == 201
json = await reg_resp.json()
assert json == {"success": True}
await hass.async_block_till_done()
entity = hass.states.get("sensor.test_1_backup_battery_state")
assert entity
assert entity.domain == "sensor"
assert entity.name == "Test 1 Backup Battery State"
assert entity.state == STATE_UNKNOWN
async def test_update_sensor_no_state(hass, create_registrations, webhook_client):
"""Test that sensors can be updated, when there is no (unknown) state."""
webhook_id = create_registrations[1]["webhook_id"]
webhook_url = f"/api/webhook/{webhook_id}"
reg_resp = await webhook_client.post(
webhook_url,
json={
"type": "register_sensor",
"data": {
"name": "Battery State",
"state": 100,
"type": "sensor",
"unique_id": "battery_state",
},
},
)
assert reg_resp.status == 201
json = await reg_resp.json()
assert json == {"success": True}
await hass.async_block_till_done()
entity = hass.states.get("sensor.test_1_battery_state")
assert entity is not None
assert entity.state == "100"
update_resp = await webhook_client.post(
webhook_url,
json={
"type": "update_sensor_states",
"data": [{"state": None, "type": "sensor", "unique_id": "battery_state"}],
},
)
assert update_resp.status == 200
json = await update_resp.json()
assert json == {"battery_state": {"success": True}}
updated_entity = hass.states.get("sensor.test_1_battery_state")
assert updated_entity.state == STATE_UNKNOWN
|
from typing import List
import pytest
import voluptuous as vol
from homeassistant.components.climate import (
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SET_TEMPERATURE_SCHEMA,
ClimateDevice,
ClimateEntity,
)
from tests.async_mock import MagicMock
from tests.common import async_mock_service
async def test_set_temp_schema_no_req(hass, caplog):
"""Test the set temperature schema with missing required data."""
domain = "climate"
service = "test_set_temperature"
schema = SET_TEMPERATURE_SCHEMA
calls = async_mock_service(hass, domain, service, schema)
data = {"hvac_mode": "off", "entity_id": ["climate.test_id"]}
with pytest.raises(vol.Invalid):
await hass.services.async_call(domain, service, data)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_set_temp_schema(hass, caplog):
"""Test the set temperature schema with ok required data."""
domain = "climate"
service = "test_set_temperature"
schema = SET_TEMPERATURE_SCHEMA
calls = async_mock_service(hass, domain, service, schema)
data = {"temperature": 20.0, "hvac_mode": "heat", "entity_id": ["climate.test_id"]}
await hass.services.async_call(domain, service, data)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[-1].data == data
class MockClimateEntity(ClimateEntity):
"""Mock Climate device to use in tests."""
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
return HVAC_MODE_HEAT
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return [HVAC_MODE_OFF, HVAC_MODE_HEAT]
def turn_on(self) -> None:
"""Turn on."""
def turn_off(self) -> None:
"""Turn off."""
async def test_sync_turn_on(hass):
"""Test if async turn_on calls sync turn_on."""
climate = MockClimateEntity()
climate.hass = hass
climate.turn_on = MagicMock()
await climate.async_turn_on()
assert climate.turn_on.called
async def test_sync_turn_off(hass):
"""Test if async turn_off calls sync turn_off."""
climate = MockClimateEntity()
climate.hass = hass
climate.turn_off = MagicMock()
await climate.async_turn_off()
assert climate.turn_off.called
def test_deprecated_base_class(caplog):
"""Test deprecated base class."""
class CustomClimate(ClimateDevice):
"""Custom climate entity class."""
@property
def hvac_mode(self):
pass
@property
def hvac_modes(self):
pass
CustomClimate()
assert "ClimateDevice is deprecated, modify CustomClimate" in caplog.text
|
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _data_path_doc,
_get_version, _version_doc)
has_opm_data = partial(has_dataset, name='opm')
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='opm',
download=download)
data_path.__doc__ = _data_path_doc.format(name='opm',
conf='MNE_DATASETS_OPML_PATH')
def get_version(): # noqa: D103
return _get_version('opm')
get_version.__doc__ = _version_doc.format(name='opm')
|
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.ssd import Multibox
from chainercv.links.model.ssd import SSD
from chainercv.utils import assert_is_detection_link
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
class DummyExtractor(chainer.Link):
insize = 32
grids = (10, 4, 1)
def forward(self, x):
n_sample = x.shape[0]
n_dims = (32, 16, 8)
return [
chainer.Variable(
_random_array(self.xp, (n_sample, n_dim, grid, grid)))
for grid, n_dim in zip(self.grids, n_dims)]
class DummySSD(SSD):
def __init__(self, n_fg_class):
super(DummySSD, self).__init__(
extractor=DummyExtractor(),
multibox=Multibox(
n_class=n_fg_class + 1,
aspect_ratios=((2,), (2, 3), (2,))),
steps=(0.1, 0.25, 1),
sizes=(0.1, 0.25, 1, 1.2),
mean=np.array((0, 1, 2)).reshape((-1, 1, 1)))
@testing.parameterize(
{'n_fg_class': 1},
{'n_fg_class': 5},
{'n_fg_class': 20},
)
class TestSSD(unittest.TestCase):
def setUp(self):
self.link = DummySSD(n_fg_class=self.n_fg_class)
self.n_bbox = 10 * 10 * 4 + 4 * 4 * 6 + 1 * 1 * 4
def _check_call(self):
x = _random_array(self.link.xp, (1, 3, 32, 32))
mb_locs, mb_confs = self.link(x)
self.assertIsInstance(mb_locs, chainer.Variable)
self.assertIsInstance(mb_locs.array, self.link.xp.ndarray)
self.assertEqual(mb_locs.shape, (1, self.n_bbox, 4))
self.assertIsInstance(mb_confs, chainer.Variable)
self.assertIsInstance(mb_confs.array, self.link.xp.ndarray)
self.assertEqual(mb_confs.shape, (1, self.n_bbox, self.n_fg_class + 1))
def test_call_cpu(self):
self._check_call()
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
def test_prepare(self):
img = np.random.randint(0, 255, size=(3, 480, 640))
img = self.link._prepare(img)
self.assertEqual(img.shape, (3, self.link.insize, self.link.insize))
def test_use_preset(self):
self.link.nms_thresh = 0
self.link.score_thresh = 0
self.link.use_preset('visualize')
self.assertEqual(self.link.nms_thresh, 0.45)
self.assertEqual(self.link.score_thresh, 0.6)
self.link.nms_thresh = 0
self.link.score_thresh = 0
self.link.use_preset('evaluate')
self.assertEqual(self.link.nms_thresh, 0.45)
self.assertEqual(self.link.score_thresh, 0.01)
with self.assertRaises(ValueError):
self.link.use_preset('unknown')
def test_predict_cpu(self):
assert_is_detection_link(self.link, self.n_fg_class)
@attr.gpu
def test_predict_gpu(self):
self.link.to_gpu()
assert_is_detection_link(self.link, self.n_fg_class)
testing.run_module(__name__, __file__)
|
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import cast
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_creating_entry_sets_up_media_player(hass):
"""Test setting up Cast loads the media player."""
with patch(
"homeassistant.components.cast.media_player.async_setup_entry",
return_value=True,
) as mock_setup, patch(
"pychromecast.discovery.discover_chromecasts", return_value=(True, None)
), patch(
"pychromecast.discovery.stop_discovery"
):
result = await hass.config_entries.flow.async_init(
cast.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
async def test_configuring_cast_creates_entry(hass):
"""Test that specifying config will create an entry."""
with patch(
"homeassistant.components.cast.async_setup_entry", return_value=True
) as mock_setup:
await async_setup_component(
hass, cast.DOMAIN, {"cast": {"some_config": "to_trigger_import"}}
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
async def test_not_configuring_cast_not_creates_entry(hass):
"""Test that no config will not create an entry."""
with patch(
"homeassistant.components.cast.async_setup_entry", return_value=True
) as mock_setup:
await async_setup_component(hass, cast.DOMAIN, {})
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
|
import base64
import datetime
import io
import logging
import posixpath
import pprint
import random
import sys
import time
import xml.etree.ElementTree as ET
import zlib
from http import client
import pkg_resources
from radicale import (auth, httputils, log, pathutils, rights, storage, web,
xmlutils)
from radicale.app.delete import ApplicationDeleteMixin
from radicale.app.get import ApplicationGetMixin
from radicale.app.head import ApplicationHeadMixin
from radicale.app.mkcalendar import ApplicationMkcalendarMixin
from radicale.app.mkcol import ApplicationMkcolMixin
from radicale.app.move import ApplicationMoveMixin
from radicale.app.options import ApplicationOptionsMixin
from radicale.app.post import ApplicationPostMixin
from radicale.app.propfind import ApplicationPropfindMixin
from radicale.app.proppatch import ApplicationProppatchMixin
from radicale.app.put import ApplicationPutMixin
from radicale.app.report import ApplicationReportMixin
from radicale.log import logger
# WORKAROUND: https://github.com/tiran/defusedxml/issues/54
import defusedxml.ElementTree as DefusedET # isort: skip
sys.modules["xml.etree"].ElementTree = ET
VERSION = pkg_resources.get_distribution("radicale").version
class Application(
ApplicationDeleteMixin, ApplicationGetMixin, ApplicationHeadMixin,
ApplicationMkcalendarMixin, ApplicationMkcolMixin,
ApplicationMoveMixin, ApplicationOptionsMixin,
ApplicationPropfindMixin, ApplicationProppatchMixin,
ApplicationPostMixin, ApplicationPutMixin,
ApplicationReportMixin):
"""WSGI application."""
def __init__(self, configuration):
"""Initialize Application.
``configuration`` see ``radicale.config`` module.
The ``configuration`` must not change during the lifetime of
this object, it is kept as an internal reference.
"""
super().__init__()
self.configuration = configuration
self._auth = auth.load(configuration)
self._storage = storage.load(configuration)
self._rights = rights.load(configuration)
self._web = web.load(configuration)
self._encoding = configuration.get("encoding", "request")
def _headers_log(self, environ):
"""Sanitize headers for logging."""
request_environ = dict(environ)
# Mask passwords
mask_passwords = self.configuration.get("logging", "mask_passwords")
authorization = request_environ.get("HTTP_AUTHORIZATION", "")
if mask_passwords and authorization.startswith("Basic"):
request_environ["HTTP_AUTHORIZATION"] = "Basic **masked**"
if request_environ.get("HTTP_COOKIE"):
request_environ["HTTP_COOKIE"] = "**masked**"
return request_environ
def __call__(self, environ, start_response):
with log.register_stream(environ["wsgi.errors"]):
try:
status, headers, answers = self._handle_request(environ)
except Exception as e:
try:
method = str(environ["REQUEST_METHOD"])
except Exception:
method = "unknown"
try:
path = str(environ.get("PATH_INFO", ""))
except Exception:
path = ""
logger.error("An exception occurred during %s request on %r: "
"%s", method, path, e, exc_info=True)
status, headers, answer = httputils.INTERNAL_SERVER_ERROR
answer = answer.encode("ascii")
status = "%d %s" % (
status.value, client.responses.get(status, "Unknown"))
headers = [
("Content-Length", str(len(answer)))] + list(headers)
answers = [answer]
start_response(status, headers)
return answers
def _handle_request(self, environ):
"""Manage a request."""
def response(status, headers=(), answer=None):
headers = dict(headers)
# Set content length
if answer:
if hasattr(answer, "encode"):
logger.debug("Response content:\n%s", answer)
headers["Content-Type"] += "; charset=%s" % self._encoding
answer = answer.encode(self._encoding)
accept_encoding = [
encoding.strip() for encoding in
environ.get("HTTP_ACCEPT_ENCODING", "").split(",")
if encoding.strip()]
if "gzip" in accept_encoding:
zcomp = zlib.compressobj(wbits=16 + zlib.MAX_WBITS)
answer = zcomp.compress(answer) + zcomp.flush()
headers["Content-Encoding"] = "gzip"
headers["Content-Length"] = str(len(answer))
# Add extra headers set in configuration
for key in self.configuration.options("headers"):
headers[key] = self.configuration.get("headers", key)
# Start response
time_end = datetime.datetime.now()
status = "%d %s" % (
status, client.responses.get(status, "Unknown"))
logger.info(
"%s response status for %r%s in %.3f seconds: %s",
environ["REQUEST_METHOD"], environ.get("PATH_INFO", ""),
depthinfo, (time_end - time_begin).total_seconds(), status)
# Return response content
return status, list(headers.items()), [answer] if answer else []
remote_host = "unknown"
if environ.get("REMOTE_HOST"):
remote_host = repr(environ["REMOTE_HOST"])
elif environ.get("REMOTE_ADDR"):
remote_host = environ["REMOTE_ADDR"]
if environ.get("HTTP_X_FORWARDED_FOR"):
remote_host = "%s (forwarded for %r)" % (
remote_host, environ["HTTP_X_FORWARDED_FOR"])
remote_useragent = ""
if environ.get("HTTP_USER_AGENT"):
remote_useragent = " using %r" % environ["HTTP_USER_AGENT"]
depthinfo = ""
if environ.get("HTTP_DEPTH"):
depthinfo = " with depth %r" % environ["HTTP_DEPTH"]
time_begin = datetime.datetime.now()
logger.info(
"%s request for %r%s received from %s%s",
environ["REQUEST_METHOD"], environ.get("PATH_INFO", ""), depthinfo,
remote_host, remote_useragent)
headers = pprint.pformat(self._headers_log(environ))
logger.debug("Request headers:\n%s", headers)
# Let reverse proxies overwrite SCRIPT_NAME
if "HTTP_X_SCRIPT_NAME" in environ:
# script_name must be removed from PATH_INFO by the client.
unsafe_base_prefix = environ["HTTP_X_SCRIPT_NAME"]
logger.debug("Script name overwritten by client: %r",
unsafe_base_prefix)
else:
# SCRIPT_NAME is already removed from PATH_INFO, according to the
# WSGI specification.
unsafe_base_prefix = environ.get("SCRIPT_NAME", "")
# Sanitize base prefix
base_prefix = pathutils.sanitize_path(unsafe_base_prefix).rstrip("/")
logger.debug("Sanitized script name: %r", base_prefix)
# Sanitize request URI (a WSGI server indicates with an empty path,
# that the URL targets the application root without a trailing slash)
path = pathutils.sanitize_path(environ.get("PATH_INFO", ""))
logger.debug("Sanitized path: %r", path)
# Get function corresponding to method
function = getattr(
self, "do_%s" % environ["REQUEST_METHOD"].upper(), None)
if not function:
return response(*httputils.METHOD_NOT_ALLOWED)
# If "/.well-known" is not available, clients query "/"
if path == "/.well-known" or path.startswith("/.well-known/"):
return response(*httputils.NOT_FOUND)
# Ask authentication backend to check rights
login = password = ""
external_login = self._auth.get_external_login(environ)
authorization = environ.get("HTTP_AUTHORIZATION", "")
if external_login:
login, password = external_login
login, password = login or "", password or ""
elif authorization.startswith("Basic"):
authorization = authorization[len("Basic"):].strip()
login, password = httputils.decode_request(
self.configuration, environ, base64.b64decode(
authorization.encode("ascii"))).split(":", 1)
user = self._auth.login(login, password) or "" if login else ""
if user and login == user:
logger.info("Successful login: %r", user)
elif user:
logger.info("Successful login: %r -> %r", login, user)
elif login:
logger.warning("Failed login attempt from %s: %r",
remote_host, login)
# Random delay to avoid timing oracles and bruteforce attacks
delay = self.configuration.get("auth", "delay")
if delay > 0:
random_delay = delay * (0.5 + random.random())
logger.debug("Sleeping %.3f seconds", random_delay)
time.sleep(random_delay)
if user and not pathutils.is_safe_path_component(user):
# Prevent usernames like "user/calendar.ics"
logger.info("Refused unsafe username: %r", user)
user = ""
# Create principal collection
if user:
principal_path = "/%s/" % user
with self._storage.acquire_lock("r", user):
principal = next(self._storage.discover(
principal_path, depth="1"), None)
if not principal:
if "W" in self._rights.authorization(user, principal_path):
with self._storage.acquire_lock("w", user):
try:
self._storage.create_collection(principal_path)
except ValueError as e:
logger.warning("Failed to create principal "
"collection %r: %s", user, e)
user = ""
else:
logger.warning("Access to principal path %r denied by "
"rights backend", principal_path)
if self.configuration.get("server", "_internal_server"):
# Verify content length
content_length = int(environ.get("CONTENT_LENGTH") or 0)
if content_length:
max_content_length = self.configuration.get(
"server", "max_content_length")
if max_content_length and content_length > max_content_length:
logger.info("Request body too large: %d", content_length)
return response(*httputils.REQUEST_ENTITY_TOO_LARGE)
if not login or user:
status, headers, answer = function(
environ, base_prefix, path, user)
if (status, headers, answer) == httputils.NOT_ALLOWED:
logger.info("Access to %r denied for %s", path,
repr(user) if user else "anonymous user")
else:
status, headers, answer = httputils.NOT_ALLOWED
if ((status, headers, answer) == httputils.NOT_ALLOWED and not user and
not external_login):
# Unknown or unauthorized user
logger.debug("Asking client for authentication")
status = client.UNAUTHORIZED
realm = self.configuration.get("auth", "realm")
headers = dict(headers)
headers.update({
"WWW-Authenticate":
"Basic realm=\"%s\"" % realm})
return response(status, headers, answer)
def _read_xml_request_body(self, environ):
content = httputils.decode_request(
self.configuration, environ,
httputils.read_raw_request_body(self.configuration, environ))
if not content:
return None
try:
xml_content = DefusedET.fromstring(content)
except ET.ParseError as e:
logger.debug("Request content (Invalid XML):\n%s", content)
raise RuntimeError("Failed to parse XML: %s" % e) from e
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Request content:\n%s",
xmlutils.pretty_xml(xml_content))
return xml_content
def _xml_response(self, xml_content):
if logger.isEnabledFor(logging.DEBUG):
logger.debug("Response content:\n%s",
xmlutils.pretty_xml(xml_content))
f = io.BytesIO()
ET.ElementTree(xml_content).write(f, encoding=self._encoding,
xml_declaration=True)
return f.getvalue()
def _webdav_error_response(self, status, human_tag):
"""Generate XML error response."""
headers = {"Content-Type": "text/xml; charset=%s" % self._encoding}
content = self._xml_response(xmlutils.webdav_error(human_tag))
return status, headers, content
class Access:
"""Helper class to check access rights of an item"""
def __init__(self, rights, user, path):
self._rights = rights
self.user = user
self.path = path
self.parent_path = pathutils.unstrip_path(
posixpath.dirname(pathutils.strip_path(path)), True)
self.permissions = self._rights.authorization(self.user, self.path)
self._parent_permissions = None
@property
def parent_permissions(self):
if self.path == self.parent_path:
return self.permissions
if self._parent_permissions is None:
self._parent_permissions = self._rights.authorization(
self.user, self.parent_path)
return self._parent_permissions
def check(self, permission, item=None):
if permission not in "rw":
raise ValueError("Invalid permission argument: %r" % permission)
if not item:
permissions = permission + permission.upper()
parent_permissions = permission
elif isinstance(item, storage.BaseCollection):
if item.get_meta("tag"):
permissions = permission
else:
permissions = permission.upper()
parent_permissions = ""
else:
permissions = ""
parent_permissions = permission
return bool(rights.intersect(self.permissions, permissions) or (
self.path != self.parent_path and
rights.intersect(self.parent_permissions, parent_permissions)))
|
import sys
from marathon.exceptions import InternalServerError
from marathon.exceptions import MarathonError
from paasta_tools import marathon_tools
from paasta_tools.metrics.metastatus_lib import assert_marathon_apps
def check_marathon_apps():
clients = marathon_tools.get_list_of_marathon_clients()
if not clients:
print("UNKNOWN: Failed to load marathon clients.")
sys.exit(3)
try:
result = assert_marathon_apps(clients)
except (MarathonError, InternalServerError, ValueError) as e:
print("CRITICAL: Unable to connect to Marathon cluster: %s" % e)
sys.exit(2)
if result.healthy:
print("OK: " + result.message)
sys.exit(0)
else:
print(result.message)
sys.exit(2)
if __name__ == "__main__":
check_marathon_apps()
|
import logging
from pyruckus import Ruckus
from pyruckus.exceptions import AuthenticationError
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from .const import ( # pylint:disable=unused-import
API_SERIAL,
API_SYSTEM_OVERVIEW,
DOMAIN,
)
_LOGGER = logging.getLogger(__package__)
DATA_SCHEMA = vol.Schema({"host": str, "username": str, "password": str})
def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
try:
ruckus = Ruckus(data[CONF_HOST], data[CONF_USERNAME], data[CONF_PASSWORD])
except AuthenticationError as error:
raise InvalidAuth from error
except ConnectionError as error:
raise CannotConnect from error
mesh_name = ruckus.mesh_name()
system_info = ruckus.system_info()
try:
host_serial = system_info[API_SYSTEM_OVERVIEW][API_SERIAL]
except KeyError as error:
raise CannotConnect from error
return {
"title": mesh_name,
"serial": host_serial,
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Ruckus Unleashed."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await self.hass.async_add_executor_job(
validate_input, self.hass, user_input
)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
else:
await self.async_set_unique_id(info["serial"])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
from weblate.checks.models import CHECKS
def highlight_string(source, unit):
"""Return highlights for a string."""
if unit is None:
return []
highlights = []
for check in CHECKS:
if not CHECKS[check].target:
continue
highlights += CHECKS[check].check_highlight(source, unit)
# Remove empty strings
highlights = [highlight for highlight in highlights if highlight[2]]
# Sort by order in string
highlights.sort(key=lambda x: x[0])
# Remove overlapping ones
for hl_idx in range(0, len(highlights)):
if hl_idx >= len(highlights):
break
elref = highlights[hl_idx]
for hl_idx_next in range(hl_idx + 1, len(highlights)):
if hl_idx_next >= len(highlights):
break
eltest = highlights[hl_idx_next]
if eltest[0] >= elref[0] and eltest[0] < elref[1]:
# Elements overlap, remove inner one
highlights.pop(hl_idx_next)
elif eltest[0] > elref[1]:
# This is not an overlapping element
break
return highlights
|
from flask import request, make_response
import flask
from flask.views import MethodView
from sqlalchemy import asc, desc
# Application imports
from sandman2.exception import NotFoundException, BadRequestException
from sandman2.model import db
from sandman2.decorators import etag, validate_fields
def add_link_headers(response, links):
"""Return *response* with the proper link headers set, based on the contents
of *links*.
:param response: :class:`flask.Response` response object for links to be
added
:param dict links: Dictionary of links to be added
:rtype :class:`flask.Response` :
"""
link_string = '<{}>; rel=self'.format(links['self'])
for link in links.values():
link_string += ', <{}>; rel=related'.format(link)
response.headers['Link'] = link_string
return response
def jsonify(resource):
"""Return a Flask ``Response`` object containing a
JSON representation of *resource*.
:param resource: The resource to act as the basis of the response
"""
response = flask.jsonify(resource.to_dict())
response = add_link_headers(response, resource.links())
return response
def is_valid_method(model, resource=None):
"""Return the error message to be sent to the client if the current
request passes fails any user-defined validation."""
validation_function_name = 'is_valid_{}'.format(
request.method.lower())
if hasattr(model, validation_function_name):
return getattr(model, validation_function_name)(request, resource)
class Service(MethodView):
"""The *Service* class is a generic extension of Flask's *MethodView*,
providing default RESTful functionality for a given ORM resource.
Each service has an associated *__model__* attribute which represents the
ORM resource it exposes. Services are JSON-only. HTML-based representation
is available through the admin interface.
"""
#: The sandman2.model.Model-derived class to expose
__model__ = None
#: The string used to describe the elements when a collection is
#: returned.
__json_collection_name__ = 'resources'
def delete(self, resource_id):
"""Return an HTTP response object resulting from a HTTP DELETE call.
:param resource_id: The value of the resource's primary key
"""
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().delete(resource)
db.session().commit()
return self._no_content_response()
@etag
def get(self, resource_id=None):
"""Return an HTTP response object resulting from an HTTP GET call.
If *resource_id* is provided, return just the single resource.
Otherwise, return the full collection.
:param resource_id: The value of the resource's primary key
"""
if request.path.endswith('meta'):
return self._meta()
if resource_id is None:
error_message = is_valid_method(self.__model__)
if error_message:
raise BadRequestException(error_message)
if 'export' in request.args:
return self._export(self._all_resources())
return flask.jsonify({
self.__json_collection_name__: self._all_resources()
})
else:
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
return jsonify(resource)
def patch(self, resource_id):
"""Return an HTTP response object resulting from an HTTP PATCH call.
:returns: ``HTTP 200`` if the resource already exists
:returns: ``HTTP 400`` if the request is malformed
:returns: ``HTTP 404`` if the resource is not found
:param resource_id: The value of the resource's primary key
"""
resource = self._resource(resource_id)
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
if not request.json:
raise BadRequestException('No JSON data received')
resource.update(request.json)
db.session().merge(resource)
db.session().commit()
return jsonify(resource)
@validate_fields
def post(self):
"""Return the JSON representation of a new resource created through
an HTTP POST call.
:returns: ``HTTP 201`` if a resource is properly created
:returns: ``HTTP 204`` if the resource already exists
:returns: ``HTTP 400`` if the request is malformed or missing data
"""
resource = self.__model__.query.filter_by(**request.json).first()
if resource:
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
return self._no_content_response()
resource = self.__model__(**request.json) # pylint: disable=not-callable
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().add(resource)
db.session().commit()
return self._created_response(resource)
def put(self, resource_id):
"""Return the JSON representation of a new resource created or updated
through an HTTP PUT call.
If resource_id is not provided, it is assumed the primary key field is
included and a totally new resource is created. Otherwise, the existing
resource referred to by *resource_id* is updated with the provided JSON
data. This method is idempotent.
:returns: ``HTTP 201`` if a new resource is created
:returns: ``HTTP 200`` if a resource is updated
:returns: ``HTTP 400`` if the request is malformed or missing data
"""
resource = self.__model__.query.get(resource_id)
if resource:
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
resource.update(request.json)
db.session().merge(resource)
db.session().commit()
return jsonify(resource)
resource = self.__model__(**request.json) # pylint: disable=not-callable
error_message = is_valid_method(self.__model__, resource)
if error_message:
raise BadRequestException(error_message)
db.session().add(resource)
db.session().commit()
return self._created_response(resource)
def _meta(self):
"""Return a description of this resource as reported by the
database."""
return flask.jsonify(self.__model__.description())
def _resource(self, resource_id):
"""Return the ``sandman2.model.Model`` instance with the given
*resource_id*.
:rtype: :class:`sandman2.model.Model`
"""
resource = self.__model__.query.get(resource_id)
if not resource:
raise NotFoundException()
return resource
def _all_resources(self):
"""Return the complete collection of resources as a list of
dictionaries.
:rtype: :class:`sandman2.model.Model`
"""
queryset = self.__model__.query
args = {k: v for (k, v) in request.args.items() if k not in ('page', 'export')}
limit = None
if args:
filters = []
order = []
for key, value in args.items():
if value.startswith('%'):
filters.append(getattr(self.__model__, key).like(str(value), escape='/'))
elif key == 'sort':
direction = desc if value.startswith('-') else asc
order.append(direction(getattr(self.__model__, value.lstrip('-'))))
elif key == 'limit':
limit = int(value)
elif hasattr(self.__model__, key):
filters.append(getattr(self.__model__, key) == value)
else:
raise BadRequestException('Invalid field [{}]'.format(key))
queryset = queryset.filter(*filters).order_by(*order)
if 'page' in request.args:
resources = queryset.paginate(page=int(request.args['page']), per_page=limit).items
else:
queryset = queryset.limit(limit)
resources = queryset.all()
return [r.to_dict() for r in resources]
def _export(self, collection):
"""Return a CSV of the resources in *collection*.
:param list collection: A list of resources represented by dicts
"""
fieldnames = collection[0].keys()
faux_csv = ','.join(fieldnames) + '\r\n'
for resource in collection:
faux_csv += ','.join((str(x) for x in resource.values())) + '\r\n'
response = make_response(faux_csv)
response.mimetype = 'text/csv'
return response
@staticmethod
def _no_content_response():
"""Return an HTTP 204 "No Content" response.
:returns: HTTP Response
"""
response = make_response()
response.status_code = 204
return response
@staticmethod
def _created_response(resource):
"""Return an HTTP 201 "Created" response.
:returns: HTTP Response
"""
response = jsonify(resource)
response.status_code = 201
return response
|
import asyncio
from homeassistant.components.binary_sensor import DOMAIN as DOMAIN_BINARY_SENSOR
from homeassistant.components.sensor import DOMAIN as DOMAIN_SENSOR
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers import aiohttp_client
from .const import DOMAIN
from .hub import GTIHub
PLATFORMS = [DOMAIN_SENSOR, DOMAIN_BINARY_SENSOR]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the HVV component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up HVV from a config entry."""
hub = GTIHub(
entry.data[CONF_HOST],
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
aiohttp_client.async_get_clientsession(hass),
)
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = hub
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
return unload_ok
|
import os
import threading
import pkg_resources
from radicale import config, log
from radicale.app import Application
from radicale.log import logger
VERSION = pkg_resources.get_distribution("radicale").version
_application = None
_application_config_path = None
_application_lock = threading.Lock()
def _init_application(config_path, wsgi_errors):
global _application, _application_config_path
with _application_lock:
if _application is not None:
return
log.setup()
with log.register_stream(wsgi_errors):
_application_config_path = config_path
configuration = config.load(config.parse_compound_paths(
config.DEFAULT_CONFIG_PATH,
config_path))
log.set_level(configuration.get("logging", "level"))
# Log configuration after logger is configured
for source, miss in configuration.sources():
logger.info("%s %s", "Skipped missing" if miss else "Loaded",
source)
_application = Application(configuration)
def application(environ, start_response):
"""Entry point for external WSGI servers."""
config_path = environ.get("RADICALE_CONFIG",
os.environ.get("RADICALE_CONFIG"))
if _application is None:
_init_application(config_path, environ["wsgi.errors"])
if _application_config_path != config_path:
raise ValueError("RADICALE_CONFIG must not change: %s != %s" %
(repr(config_path), repr(_application_config_path)))
return _application(environ, start_response)
|
import asyncio
from aiohttp import ClientError
import pytest
from smart_meter_texas.exceptions import (
SmartMeterTexasAPIError,
SmartMeterTexasAuthError,
)
from homeassistant import config_entries, setup
from homeassistant.components.smart_meter_texas.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
TEST_LOGIN = {CONF_USERNAME: "test-username", CONF_PASSWORD: "test-password"}
async def test_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch("smart_meter_texas.Client.authenticate", return_value=True), patch(
"homeassistant.components.smart_meter_texas.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.smart_meter_texas.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_LOGIN
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == TEST_LOGIN[CONF_USERNAME]
assert result2["data"] == TEST_LOGIN
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"smart_meter_texas.Client.authenticate",
side_effect=SmartMeterTexasAuthError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_LOGIN,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
@pytest.mark.parametrize(
"side_effect", [asyncio.TimeoutError, ClientError, SmartMeterTexasAPIError]
)
async def test_form_cannot_connect(hass, side_effect):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"smart_meter_texas.Client.authenticate",
side_effect=side_effect,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], TEST_LOGIN
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_exception(hass):
"""Test base exception is handled."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"smart_meter_texas.Client.authenticate",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
TEST_LOGIN,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_duplicate_account(hass):
"""Test that a duplicate account cannot be configured."""
MockConfigEntry(
domain=DOMAIN,
unique_id="user123",
data={"username": "user123", "password": "password123"},
).add_to_hass(hass)
with patch(
"smart_meter_texas.Client.authenticate",
return_value=True,
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={"username": "user123", "password": "password123"},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
|
import os
import pytest
from molecule import config
from molecule.driver import lxc
@pytest.fixture
def _instance(config_instance):
return lxc.LXC(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_testinfra_options_property(_instance):
assert {
'connection': 'ansible',
'ansible-inventory': _instance._config.provisioner.inventory_file
} == _instance.testinfra_options
def test_name_property(_instance):
assert 'lxc' == _instance.name
def test_options_property(_instance):
x = {'managed': True}
assert x == _instance.options
def test_login_cmd_template_property(_instance):
assert 'sudo lxc-attach -n {instance}' == _instance.login_cmd_template
def test_safe_files_property(_instance):
assert [] == _instance.safe_files
def test_default_safe_files_property(_instance):
assert [] == _instance.default_safe_files
def test_delegated_property(_instance):
assert not _instance.delegated
def test_managed_property(_instance):
assert _instance.managed
def test_default_ssh_connection_options_property(_instance):
assert [] == _instance.default_ssh_connection_options
def test_login_options(_instance):
assert {'instance': 'foo'} == _instance.login_options('foo')
def test_ansible_connection_options(_instance):
x = {'ansible_connection': 'lxc'}
assert x == _instance.ansible_connection_options('foo')
def test_instance_config_property(_instance):
x = os.path.join(_instance._config.scenario.ephemeral_directory,
'instance_config.yml')
assert x == _instance.instance_config
def test_ssh_connection_options_property(_instance):
assert [] == _instance.ssh_connection_options
def test_status(mocker, _instance):
result = _instance.status()
assert 2 == len(result)
assert result[0].instance_name == 'instance-1'
assert result[0].driver_name == 'lxc'
assert result[0].provisioner_name == 'ansible'
assert result[0].scenario_name == 'default'
assert result[0].created == 'false'
assert result[0].converged == 'false'
assert result[1].instance_name == 'instance-2'
assert result[1].driver_name == 'lxc'
assert result[1].provisioner_name == 'ansible'
assert result[1].scenario_name == 'default'
assert result[1].created == 'false'
assert result[1].converged == 'false'
def test_created_property(_instance):
assert 'false' == _instance._created()
def test_converged_property(_instance):
assert 'false' == _instance._converged()
|
from stash.tests.stashtest import StashTestCase
class TotdTests(StashTestCase):
"""
Tests for the 'totd' command.
"""
def test_help(self):
"""
Test 'totd --help'.
"""
output = self.run_command("totd --help", exitcode=0)
self.assertIn("totd", output)
self.assertIn("-h", output)
self.assertIn("--help", output)
self.assertIn("-n", output)
self.assertIn("--count", output)
def test_count(self):
"""
Test 'totd --count'.
"""
output = self.run_command("totd --count", exitcode=0).replace("\n", "")
# ensure that the string is correct
self.assertTrue(output.startswith("Total available tips: "))
# ensure that number of tips is not zero
self.assertFalse(output.endswith(" "))
def test_simple(self):
"""
Test a simple 'totd' execution.
Ensure that different totds are returned.
"""
known = []
n_unique = 0
for i in range(100):
output = self.run_command("totd", exitcode=0).replace("\n", "")
if output not in known:
known.append(output)
n_unique += 1
self.assertGreater(n_unique, 3)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl import logging
from compare_gan import datasets
from compare_gan import eval_utils
from compare_gan import utils
import gin
import numpy as np
from six.moves import range
import tensorflow as tf
import tensorflow_hub as hub
FLAGS = flags.FLAGS
# Special value returned when a fake image generated by a GAN has NaNs.
NAN_DETECTED = 31337.0
@gin.configurable("eval_z", blacklist=["shape", "name"])
def z_generator(shape, distribution_fn=tf.random.uniform,
minval=-1.0, maxval=1.0, stddev=1.0, name=None):
"""Random noise distributions as TF op.
Args:
shape: A 1-D integer Tensor or Python array.
distribution_fn: Function that create a Tensor. If the function has any
of the arguments 'minval', 'maxval' or 'stddev' these are passed to it.
minval: The lower bound on the range of random values to generate.
maxval: The upper bound on the range of random values to generate.
stddev: The standard deviation of a normal distribution.
name: A name for the operation.
Returns:
Tensor with the given shape and dtype tf.float32.
"""
return utils.call_with_accepted_args(
distribution_fn, shape=shape, minval=minval, maxval=maxval,
stddev=stddev, name=name)
def _update_bn_accumulators(sess, generated, num_accu_examples):
"""Returns True if the accumlators for batch norm were updated.
Args:
sess: `tf.Session` object. Checkpoint should already be loaded.
generated: Output tensor of the generator.
num_accu_examples: How many examples should be used to update accumulators.
Returns:
True if there were accumlators.
"""
# Create update ops for batch statistic updates for each batch normalization
# with accumlators.
update_accu_switches = [v for v in tf.global_variables()
if "accu/update_accus" in v.name]
logging.info("update_accu_switches: %s", update_accu_switches)
if not update_accu_switches:
return False
sess.run([tf.assign(v, 1) for v in update_accu_switches])
batch_size = generated.shape[0].value
num_batches = num_accu_examples // batch_size
for i in range(num_batches):
if i % 500 == 0:
logging.info("Updating BN accumulators %d/%d steps.", i, num_batches)
sess.run(generated)
sess.run([tf.assign(v, 0) for v in update_accu_switches])
logging.info("Done updating BN accumulators.")
return True
def evaluate_tfhub_module(module_spec, eval_tasks, use_tpu,
num_averaging_runs):
"""Evaluate model at given checkpoint_path.
Args:
module_spec: string, path to a TF hub module.
eval_tasks: List of objects that inherit from EvalTask.
use_tpu: Whether to use TPUs.
num_averaging_runs: Determines how many times each metric is computed.
Returns:
Dict[Text, float] with all the computed results.
Raises:
NanFoundError: If generator output has any NaNs.
"""
# Make sure that the same latent variables are used for each evaluation.
np.random.seed(42)
dataset = datasets.get_dataset()
num_test_examples = dataset.eval_test_samples
batch_size = 64
num_batches = int(np.ceil(num_test_examples / batch_size))
# Load and update the generator.
result_dict = {}
fake_dsets = []
with tf.Graph().as_default():
tf.set_random_seed(42)
with tf.Session() as sess:
if use_tpu:
sess.run(tf.contrib.tpu.initialize_system())
def sample_from_generator():
"""Create graph for sampling images."""
generator = hub.Module(
module_spec,
name="gen_module",
tags={"gen", "bs{}".format(batch_size)})
logging.info("Generator inputs: %s", generator.get_input_info_dict())
z_dim = generator.get_input_info_dict()["z"].get_shape()[1].value
z = z_generator(shape=[batch_size, z_dim])
if "labels" in generator.get_input_info_dict():
# Conditional GAN.
assert dataset.num_classes
labels = tf.random.uniform(
[batch_size], maxval=dataset.num_classes, dtype=tf.int32)
inputs = dict(z=z, labels=labels)
else:
# Unconditional GAN.
assert "labels" not in generator.get_input_info_dict()
inputs = dict(z=z)
return generator(inputs=inputs, as_dict=True)["generated"]
if use_tpu:
generated = tf.contrib.tpu.rewrite(sample_from_generator)
else:
generated = sample_from_generator()
tf.global_variables_initializer().run()
if _update_bn_accumulators(sess, generated, num_accu_examples=204800):
saver = tf.train.Saver()
save_path = os.path.join(module_spec, "model-with-accu.ckpt")
checkpoint_path = saver.save(
sess,
save_path=save_path)
logging.info("Exported generator with accumulated batch stats to "
"%s.", checkpoint_path)
if not eval_tasks:
logging.error("Task list is empty, returning.")
return
for i in range(num_averaging_runs):
logging.info("Generating fake data set %d/%d.", i+1, num_averaging_runs)
fake_dset = eval_utils.EvalDataSample(
eval_utils.sample_fake_dataset(sess, generated, num_batches))
fake_dsets.append(fake_dset)
logging.info("Computing inception features for generated data %d/%d.",
i+1, num_averaging_runs)
activations, logits = eval_utils.inception_transform_np(
fake_dset.images, batch_size)
fake_dset.set_inception_features(
activations=activations, logits=logits)
fake_dset.set_num_examples(num_test_examples)
if i != 0:
# Free up some memory by releasing additional fake data samples.
# For ImageNet128 50k images are ~9 GiB. This will blow up metrics
# (such as fractal dimension) if num_averaging_runs > 1.
fake_dset.discard_images()
real_dset = eval_utils.EvalDataSample(
eval_utils.get_real_images(
dataset=dataset, num_examples=num_test_examples))
logging.info("Getting Inception features for real images.")
real_dset.activations, _ = eval_utils.inception_transform_np(
real_dset.images, batch_size)
real_dset.set_num_examples(num_test_examples)
# Run all the tasks and update the result dictionary with the task statistics.
result_dict = {}
for task in eval_tasks:
task_results_dicts = [
task.run_after_session(fake_dset, real_dset)
for fake_dset in fake_dsets
]
# Average the score for each key.
result_statistics = {}
for key in task_results_dicts[0].keys():
scores_for_key = np.array([d[key] for d in task_results_dicts])
mean, std = np.mean(scores_for_key), np.std(scores_for_key)
scores_as_string = "_".join([str(x) for x in scores_for_key])
result_statistics[key + "_mean"] = mean
result_statistics[key + "_std"] = std
result_statistics[key + "_list"] = scores_as_string
logging.info("Computed results for task %s: %s", task, result_statistics)
result_dict.update(result_statistics)
return result_dict
|
import argparse
import os
import platform
import subprocess
import sys
import threading
from typing import List
from homeassistant.const import REQUIRED_PYTHON_VER, RESTART_EXIT_CODE, __version__
def validate_python() -> None:
"""Validate that the right Python version is running."""
if sys.version_info[:3] < REQUIRED_PYTHON_VER:
print(
"Home Assistant requires at least Python "
f"{REQUIRED_PYTHON_VER[0]}.{REQUIRED_PYTHON_VER[1]}.{REQUIRED_PYTHON_VER[2]}"
)
sys.exit(1)
def ensure_config_path(config_dir: str) -> None:
"""Validate the configuration directory."""
# pylint: disable=import-outside-toplevel
import homeassistant.config as config_util
lib_dir = os.path.join(config_dir, "deps")
# Test if configuration directory exists
if not os.path.isdir(config_dir):
if config_dir != config_util.get_default_config_dir():
print(
f"Fatal Error: Specified configuration directory {config_dir} "
"does not exist"
)
sys.exit(1)
try:
os.mkdir(config_dir)
except OSError:
print(
"Fatal Error: Unable to create default configuration "
f"directory {config_dir}"
)
sys.exit(1)
# Test if library directory exists
if not os.path.isdir(lib_dir):
try:
os.mkdir(lib_dir)
except OSError:
print(f"Fatal Error: Unable to create library directory {lib_dir}")
sys.exit(1)
def get_arguments() -> argparse.Namespace:
"""Get parsed passed in arguments."""
# pylint: disable=import-outside-toplevel
import homeassistant.config as config_util
parser = argparse.ArgumentParser(
description="Home Assistant: Observe, Control, Automate."
)
parser.add_argument("--version", action="version", version=__version__)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument(
"--safe-mode", action="store_true", help="Start Home Assistant in safe mode"
)
parser.add_argument(
"--debug", action="store_true", help="Start Home Assistant in debug mode"
)
parser.add_argument(
"--open-ui", action="store_true", help="Open the webinterface in a browser"
)
parser.add_argument(
"--skip-pip",
action="store_true",
help="Skips pip install of required packages on startup",
)
parser.add_argument(
"-v", "--verbose", action="store_true", help="Enable verbose logging to file."
)
parser.add_argument(
"--pid-file",
metavar="path_to_pid_file",
default=None,
help="Path to PID file useful for running as daemon",
)
parser.add_argument(
"--log-rotate-days",
type=int,
default=None,
help="Enables daily log rotation and keeps up to the specified days",
)
parser.add_argument(
"--log-file",
type=str,
default=None,
help="Log file to write to. If not set, CONFIG/home-assistant.log is used",
)
parser.add_argument(
"--log-no-color", action="store_true", help="Disable color logs"
)
parser.add_argument(
"--runner",
action="store_true",
help=f"On restart exit with code {RESTART_EXIT_CODE}",
)
parser.add_argument(
"--script", nargs=argparse.REMAINDER, help="Run one of the embedded scripts"
)
if os.name == "posix":
parser.add_argument(
"--daemon", action="store_true", help="Run Home Assistant as daemon"
)
arguments = parser.parse_args()
if os.name != "posix" or arguments.debug or arguments.runner:
setattr(arguments, "daemon", False)
return arguments
def daemonize() -> None:
"""Move current process to daemon process."""
# Create first fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# Decouple fork
os.setsid()
# Create second fork
pid = os.fork()
if pid > 0:
sys.exit(0)
# redirect standard file descriptors to devnull
infd = open(os.devnull)
outfd = open(os.devnull, "a+")
sys.stdout.flush()
sys.stderr.flush()
os.dup2(infd.fileno(), sys.stdin.fileno())
os.dup2(outfd.fileno(), sys.stdout.fileno())
os.dup2(outfd.fileno(), sys.stderr.fileno())
def check_pid(pid_file: str) -> None:
"""Check that Home Assistant is not already running."""
# Check pid file
try:
with open(pid_file) as file:
pid = int(file.readline())
except OSError:
# PID File does not exist
return
# If we just restarted, we just found our own pidfile.
if pid == os.getpid():
return
try:
os.kill(pid, 0)
except OSError:
# PID does not exist
return
print("Fatal Error: Home Assistant is already running.")
sys.exit(1)
def write_pid(pid_file: str) -> None:
"""Create a PID File."""
pid = os.getpid()
try:
with open(pid_file, "w") as file:
file.write(str(pid))
except OSError:
print(f"Fatal Error: Unable to write pid file {pid_file}")
sys.exit(1)
def closefds_osx(min_fd: int, max_fd: int) -> None:
"""Make sure file descriptors get closed when we restart.
We cannot call close on guarded fds, and we cannot easily test which fds
are guarded. But we can set the close-on-exec flag on everything we want to
get rid of.
"""
# pylint: disable=import-outside-toplevel
from fcntl import F_GETFD, F_SETFD, FD_CLOEXEC, fcntl
for _fd in range(min_fd, max_fd):
try:
val = fcntl(_fd, F_GETFD)
if not val & FD_CLOEXEC:
fcntl(_fd, F_SETFD, val | FD_CLOEXEC)
except OSError:
pass
def cmdline() -> List[str]:
"""Collect path and arguments to re-execute the current hass instance."""
if os.path.basename(sys.argv[0]) == "__main__.py":
modulepath = os.path.dirname(sys.argv[0])
os.environ["PYTHONPATH"] = os.path.dirname(modulepath)
return [sys.executable] + [arg for arg in sys.argv if arg != "--daemon"]
return [arg for arg in sys.argv if arg != "--daemon"]
def try_to_restart() -> None:
"""Attempt to clean up state and start a new Home Assistant instance."""
# Things should be mostly shut down already at this point, now just try
# to clean up things that may have been left behind.
sys.stderr.write("Home Assistant attempting to restart.\n")
# Count remaining threads, ideally there should only be one non-daemonized
# thread left (which is us). Nothing we really do with it, but it might be
# useful when debugging shutdown/restart issues.
try:
nthreads = sum(
thread.is_alive() and not thread.daemon for thread in threading.enumerate()
)
if nthreads > 1:
sys.stderr.write(f"Found {nthreads} non-daemonic threads.\n")
# Somehow we sometimes seem to trigger an assertion in the python threading
# module. It seems we find threads that have no associated OS level thread
# which are not marked as stopped at the python level.
except AssertionError:
sys.stderr.write("Failed to count non-daemonic threads.\n")
# Try to not leave behind open filedescriptors with the emphasis on try.
try:
max_fd = os.sysconf("SC_OPEN_MAX")
except ValueError:
max_fd = 256
if platform.system() == "Darwin":
closefds_osx(3, max_fd)
else:
os.closerange(3, max_fd)
# Now launch into a new instance of Home Assistant. If this fails we
# fall through and exit with error 100 (RESTART_EXIT_CODE) in which case
# systemd will restart us when RestartForceExitStatus=100 is set in the
# systemd.service file.
sys.stderr.write("Restarting Home Assistant\n")
args = cmdline()
os.execv(args[0], args)
def main() -> int:
"""Start Home Assistant."""
validate_python()
# Run a simple daemon runner process on Windows to handle restarts
if os.name == "nt" and "--runner" not in sys.argv:
nt_args = cmdline() + ["--runner"]
while True:
try:
subprocess.check_call(nt_args)
sys.exit(0)
except KeyboardInterrupt:
sys.exit(0)
except subprocess.CalledProcessError as exc:
if exc.returncode != RESTART_EXIT_CODE:
sys.exit(exc.returncode)
args = get_arguments()
if args.script is not None:
# pylint: disable=import-outside-toplevel
from homeassistant import scripts
return scripts.run(args.script)
config_dir = os.path.abspath(os.path.join(os.getcwd(), args.config))
ensure_config_path(config_dir)
# Daemon functions
if args.pid_file:
check_pid(args.pid_file)
if args.daemon:
daemonize()
if args.pid_file:
write_pid(args.pid_file)
# pylint: disable=import-outside-toplevel
from homeassistant import runner
runtime_conf = runner.RuntimeConfig(
config_dir=config_dir,
verbose=args.verbose,
log_rotate_days=args.log_rotate_days,
log_file=args.log_file,
log_no_color=args.log_no_color,
skip_pip=args.skip_pip,
safe_mode=args.safe_mode,
debug=args.debug,
open_ui=args.open_ui,
)
exit_code = runner.run(runtime_conf)
if exit_code == RESTART_EXIT_CODE and not args.runner:
try_to_restart()
return exit_code
if __name__ == "__main__":
sys.exit(main())
|
import asyncio
from aiohttp.client_exceptions import ClientError
import pytest
from yarl import URL
from homeassistant.components.qwikswitch import DOMAIN as QWIKSWITCH
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock
from tests.test_util.aiohttp import AiohttpClientMockResponse, MockLongPollSideEffect
@pytest.fixture
def qs_devices():
"""Return a set of devices as a response."""
return [
{
"id": "@a00001",
"name": "Switch 1",
"type": "rel",
"val": "OFF",
"time": "1522777506",
"rssi": "51%",
},
{
"id": "@a00002",
"name": "Light 2",
"type": "rel",
"val": "ON",
"time": "1522777507",
"rssi": "45%",
},
{
"id": "@a00003",
"name": "Dim 3",
"type": "dim",
"val": "280c00",
"time": "1522777544",
"rssi": "62%",
},
]
EMPTY_PACKET = {"cmd": ""}
async def test_binary_sensor_device(hass, aioclient_mock, qs_devices):
"""Test a binary sensor device."""
config = {
"qwikswitch": {
"sensors": {"name": "s1", "id": "@a00001", "channel": 1, "type": "imod"}
}
}
aioclient_mock.get("http://127.0.0.1:2020/&device", json=qs_devices)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
# verify initial state is off per the 'val' in qs_devices
state_obj = hass.states.get("binary_sensor.s1")
assert state_obj.state == "off"
# receive turn on command from network
listen_mock.queue_response(
json={"id": "@a00001", "cmd": "STATUS.ACK", "data": "4e0e1601", "rssi": "61%"}
)
await asyncio.sleep(0.01)
await hass.async_block_till_done()
state_obj = hass.states.get("binary_sensor.s1")
assert state_obj.state == "on"
# receive turn off command from network
listen_mock.queue_response(
json={"id": "@a00001", "cmd": "STATUS.ACK", "data": "4e0e1701", "rssi": "61%"},
)
await asyncio.sleep(0.01)
await hass.async_block_till_done()
state_obj = hass.states.get("binary_sensor.s1")
assert state_obj.state == "off"
listen_mock.stop()
async def test_sensor_device(hass, aioclient_mock, qs_devices):
"""Test a sensor device."""
config = {
"qwikswitch": {
"sensors": {
"name": "ss1",
"id": "@a00001",
"channel": 1,
"type": "qwikcord",
}
}
}
aioclient_mock.get("http://127.0.0.1:2020/&device", json=qs_devices)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
state_obj = hass.states.get("sensor.ss1")
assert state_obj.state == "None"
# receive command that sets the sensor value
listen_mock.queue_response(
json={"id": "@a00001", "name": "ss1", "type": "rel", "val": "4733800001a00000"},
)
await asyncio.sleep(0.01)
await hass.async_block_till_done()
state_obj = hass.states.get("sensor.ss1")
assert state_obj.state == "416"
listen_mock.stop()
async def test_switch_device(hass, aioclient_mock, qs_devices):
"""Test a switch device."""
async def get_devices_json(method, url, data):
return AiohttpClientMockResponse(method=method, url=url, json=qs_devices)
config = {"qwikswitch": {"switches": ["@a00001"]}}
aioclient_mock.get("http://127.0.0.1:2020/&device", side_effect=get_devices_json)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
# verify initial state is off per the 'val' in qs_devices
state_obj = hass.states.get("switch.switch_1")
assert state_obj.state == "off"
# ask hass to turn on and verify command is sent to device
aioclient_mock.mock_calls.clear()
aioclient_mock.get("http://127.0.0.1:2020/@a00001=100", json={"data": "OK"})
await hass.services.async_call(
"switch", "turn_on", {"entity_id": "switch.switch_1"}, blocking=True
)
await asyncio.sleep(0.01)
assert (
"GET",
URL("http://127.0.0.1:2020/@a00001=100"),
None,
None,
) in aioclient_mock.mock_calls
# verify state is on
state_obj = hass.states.get("switch.switch_1")
assert state_obj.state == "on"
# ask hass to turn off and verify command is sent to device
aioclient_mock.mock_calls.clear()
aioclient_mock.get("http://127.0.0.1:2020/@a00001=0", json={"data": "OK"})
await hass.services.async_call(
"switch", "turn_off", {"entity_id": "switch.switch_1"}, blocking=True
)
assert (
"GET",
URL("http://127.0.0.1:2020/@a00001=0"),
None,
None,
) in aioclient_mock.mock_calls
# verify state is off
state_obj = hass.states.get("switch.switch_1")
assert state_obj.state == "off"
# check if setting the value in the network show in hass
qs_devices[0]["val"] = "ON"
listen_mock.queue_response(json=EMPTY_PACKET)
await hass.async_block_till_done()
state_obj = hass.states.get("switch.switch_1")
assert state_obj.state == "on"
listen_mock.stop()
async def test_light_device(hass, aioclient_mock, qs_devices):
"""Test a light device."""
async def get_devices_json(method, url, data):
return AiohttpClientMockResponse(method=method, url=url, json=qs_devices)
config = {"qwikswitch": {}}
aioclient_mock.get("http://127.0.0.1:2020/&device", side_effect=get_devices_json)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
# verify initial state is on per the 'val' in qs_devices
state_obj = hass.states.get("light.dim_3")
assert state_obj.state == "on"
assert state_obj.attributes["brightness"] == 255
# ask hass to turn off and verify command is sent to device
aioclient_mock.mock_calls.clear()
aioclient_mock.get("http://127.0.0.1:2020/@a00003=0", json={"data": "OK"})
await hass.services.async_call(
"light", "turn_off", {"entity_id": "light.dim_3"}, blocking=True
)
await asyncio.sleep(0.01)
assert (
"GET",
URL("http://127.0.0.1:2020/@a00003=0"),
None,
None,
) in aioclient_mock.mock_calls
state_obj = hass.states.get("light.dim_3")
assert state_obj.state == "off"
# change brightness in network and check that hass updates
qs_devices[2]["val"] = "280c55" # half dimmed
listen_mock.queue_response(json=EMPTY_PACKET)
await asyncio.sleep(0.01)
await hass.async_block_till_done()
state_obj = hass.states.get("light.dim_3")
assert state_obj.state == "on"
assert 16 < state_obj.attributes["brightness"] < 240
# turn off in the network and see that it is off in hass as well
qs_devices[2]["val"] = "280c78" # off
listen_mock.queue_response(json=EMPTY_PACKET)
await asyncio.sleep(0.01)
await hass.async_block_till_done()
state_obj = hass.states.get("light.dim_3")
assert state_obj.state == "off"
# ask hass to turn on and verify command is sent to device
aioclient_mock.mock_calls.clear()
aioclient_mock.get("http://127.0.0.1:2020/@a00003=100", json={"data": "OK"})
await hass.services.async_call(
"light", "turn_on", {"entity_id": "light.dim_3"}, blocking=True
)
assert (
"GET",
URL("http://127.0.0.1:2020/@a00003=100"),
None,
None,
) in aioclient_mock.mock_calls
await hass.async_block_till_done()
state_obj = hass.states.get("light.dim_3")
assert state_obj.state == "on"
listen_mock.stop()
async def test_button(hass, aioclient_mock, qs_devices):
"""Test that buttons fire an event."""
async def get_devices_json(method, url, data):
return AiohttpClientMockResponse(method=method, url=url, json=qs_devices)
config = {"qwikswitch": {"button_events": "TOGGLE"}}
aioclient_mock.get("http://127.0.0.1:2020/&device", side_effect=get_devices_json)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
button_pressed = Mock()
hass.bus.async_listen_once("qwikswitch.button.@a00002", button_pressed)
listen_mock.queue_response(
json={"id": "@a00002", "cmd": "TOGGLE"},
)
await asyncio.sleep(0.01)
await hass.async_block_till_done()
button_pressed.assert_called_once()
listen_mock.stop()
async def test_failed_update_devices(hass, aioclient_mock):
"""Test that code behaves correctly when unable to get the devices."""
config = {"qwikswitch": {}}
aioclient_mock.get("http://127.0.0.1:2020/&device", exc=ClientError())
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert not await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
listen_mock.stop()
async def test_single_invalid_sensor(hass, aioclient_mock, qs_devices):
"""Test that a single misconfigured sensor doesn't block the others."""
config = {
"qwikswitch": {
"sensors": [
{"name": "ss1", "id": "@a00001", "channel": 1, "type": "qwikcord"},
{"name": "ss2", "id": "@a00002", "channel": 1, "type": "ERROR_TYPE"},
{"name": "ss3", "id": "@a00003", "channel": 1, "type": "qwikcord"},
]
}
}
aioclient_mock.get("http://127.0.0.1:2020/&device", json=qs_devices)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
await asyncio.sleep(0.01)
assert hass.states.get("sensor.ss1")
assert not hass.states.get("sensor.ss2")
assert hass.states.get("sensor.ss3")
listen_mock.stop()
async def test_non_binary_sensor_with_binary_args(
hass, aioclient_mock, qs_devices, caplog
):
"""Test that the system logs a warning when a non-binary device has binary specific args."""
config = {
"qwikswitch": {
"sensors": [
{
"name": "ss1",
"id": "@a00001",
"channel": 1,
"type": "qwikcord",
"invert": True,
},
]
}
}
aioclient_mock.get("http://127.0.0.1:2020/&device", json=qs_devices)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
await asyncio.sleep(0.01)
await hass.async_block_till_done()
assert hass.states.get("sensor.ss1")
assert "invert should only be used for binary_sensors" in caplog.text
listen_mock.stop()
async def test_non_relay_switch(hass, aioclient_mock, qs_devices, caplog):
"""Test that the system logs a warning when a switch is configured for a device that is not a relay."""
config = {"qwikswitch": {"switches": ["@a00003"]}}
aioclient_mock.get("http://127.0.0.1:2020/&device", json=qs_devices)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
await asyncio.sleep(0.01)
await hass.async_block_till_done()
assert not hass.states.get("switch.dim_3")
assert "You specified a switch that is not a relay @a00003" in caplog.text
listen_mock.stop()
async def test_unknown_device(hass, aioclient_mock, qs_devices, caplog):
"""Test that the system logs a warning when a network device has unknown type."""
config = {"qwikswitch": {}}
qs_devices[1]["type"] = "ERROR_TYPE"
aioclient_mock.get("http://127.0.0.1:2020/&device", json=qs_devices)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, QWIKSWITCH, config)
await hass.async_start()
await hass.async_block_till_done()
await asyncio.sleep(0.01)
await hass.async_block_till_done()
assert hass.states.get("light.switch_1")
assert not hass.states.get("light.light_2")
assert hass.states.get("light.dim_3")
assert "Ignored unknown QSUSB device" in caplog.text
listen_mock.stop()
async def test_no_discover_info(hass, hass_storage, aioclient_mock, caplog):
"""Test that discovery with no discovery_info does not result in errors."""
config = {
"qwikswitch": {},
"light": {"platform": "qwikswitch"},
"switch": {"platform": "qwikswitch"},
"sensor": {"platform": "qwikswitch"},
"binary_sensor": {"platform": "qwikswitch"},
}
aioclient_mock.get(
"http://127.0.0.1:2020/&device",
json=[
{
"id": "@a00001",
"name": "Switch 1",
"type": "ERROR_TYPE",
"val": "OFF",
"time": "1522777506",
"rssi": "51%",
},
],
)
listen_mock = MockLongPollSideEffect()
aioclient_mock.get("http://127.0.0.1:2020/&listen", side_effect=listen_mock)
assert await async_setup_component(hass, "light", config)
assert await async_setup_component(hass, "switch", config)
assert await async_setup_component(hass, "sensor", config)
assert await async_setup_component(hass, "binary_sensor", config)
await hass.async_start()
await hass.async_block_till_done()
assert "Error while setting up qwikswitch platform" not in caplog.text
listen_mock.stop()
|
import logging
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
ENTITY_ID_FORMAT,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import (
CONF_ENTITY_ID,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_FRIENDLY_NAME,
CONF_ICON_TEMPLATE,
CONF_LIGHTS,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.script import Script
from .const import CONF_AVAILABILITY_TEMPLATE, DOMAIN, PLATFORMS
from .template_entity import TemplateEntity
_LOGGER = logging.getLogger(__name__)
_VALID_STATES = [STATE_ON, STATE_OFF, "true", "false"]
CONF_ON_ACTION = "turn_on"
CONF_OFF_ACTION = "turn_off"
CONF_LEVEL_ACTION = "set_level"
CONF_LEVEL_TEMPLATE = "level_template"
CONF_TEMPERATURE_TEMPLATE = "temperature_template"
CONF_TEMPERATURE_ACTION = "set_temperature"
CONF_COLOR_TEMPLATE = "color_template"
CONF_COLOR_ACTION = "set_color"
CONF_WHITE_VALUE_TEMPLATE = "white_value_template"
CONF_WHITE_VALUE_ACTION = "set_white_value"
LIGHT_SCHEMA = vol.All(
cv.deprecated(CONF_ENTITY_ID),
vol.Schema(
{
vol.Required(CONF_ON_ACTION): cv.SCRIPT_SCHEMA,
vol.Required(CONF_OFF_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_LEVEL_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_LEVEL_TEMPLATE): cv.template,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
vol.Optional(CONF_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_TEMPERATURE_TEMPLATE): cv.template,
vol.Optional(CONF_TEMPERATURE_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_COLOR_TEMPLATE): cv.template,
vol.Optional(CONF_COLOR_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_WHITE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_WHITE_VALUE_ACTION): cv.SCRIPT_SCHEMA,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
),
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_LIGHTS): cv.schema_with_slug_keys(LIGHT_SCHEMA)}
)
async def _async_create_entities(hass, config):
"""Create the Template Lights."""
lights = []
for device, device_config in config[CONF_LIGHTS].items():
friendly_name = device_config.get(CONF_FRIENDLY_NAME, device)
state_template = device_config.get(CONF_VALUE_TEMPLATE)
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
unique_id = device_config.get(CONF_UNIQUE_ID)
on_action = device_config[CONF_ON_ACTION]
off_action = device_config[CONF_OFF_ACTION]
level_action = device_config.get(CONF_LEVEL_ACTION)
level_template = device_config.get(CONF_LEVEL_TEMPLATE)
temperature_action = device_config.get(CONF_TEMPERATURE_ACTION)
temperature_template = device_config.get(CONF_TEMPERATURE_TEMPLATE)
color_action = device_config.get(CONF_COLOR_ACTION)
color_template = device_config.get(CONF_COLOR_TEMPLATE)
white_value_action = device_config.get(CONF_WHITE_VALUE_ACTION)
white_value_template = device_config.get(CONF_WHITE_VALUE_TEMPLATE)
lights.append(
LightTemplate(
hass,
device,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
level_action,
level_template,
temperature_action,
temperature_template,
color_action,
color_template,
white_value_action,
white_value_template,
unique_id,
)
)
return lights
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template lights."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities(await _async_create_entities(hass, config))
class LightTemplate(TemplateEntity, LightEntity):
"""Representation of a templated Light, including dimmable."""
def __init__(
self,
hass,
device_id,
friendly_name,
state_template,
icon_template,
entity_picture_template,
availability_template,
on_action,
off_action,
level_action,
level_template,
temperature_action,
temperature_template,
color_action,
color_template,
white_value_action,
white_value_template,
unique_id,
):
"""Initialize the light."""
super().__init__(
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._name = friendly_name
self._template = state_template
domain = __name__.split(".")[-2]
self._on_script = Script(hass, on_action, friendly_name, domain)
self._off_script = Script(hass, off_action, friendly_name, domain)
self._level_script = None
if level_action is not None:
self._level_script = Script(hass, level_action, friendly_name, domain)
self._level_template = level_template
self._temperature_script = None
if temperature_action is not None:
self._temperature_script = Script(
hass, temperature_action, friendly_name, domain
)
self._temperature_template = temperature_template
self._color_script = None
if color_action is not None:
self._color_script = Script(hass, color_action, friendly_name, domain)
self._color_template = color_template
self._white_value_script = None
if white_value_action is not None:
self._white_value_script = Script(
hass, white_value_action, friendly_name, domain
)
self._white_value_template = white_value_template
self._state = False
self._brightness = None
self._temperature = None
self._color = None
self._white_value = None
self._unique_id = unique_id
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def color_temp(self):
"""Return the CT color value in mireds."""
return self._temperature
@property
def white_value(self):
"""Return the white value."""
return self._white_value
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return self._color
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this light."""
return self._unique_id
@property
def supported_features(self):
"""Flag supported features."""
supported_features = 0
if self._level_script is not None:
supported_features |= SUPPORT_BRIGHTNESS
if self._temperature_script is not None:
supported_features |= SUPPORT_COLOR_TEMP
if self._color_script is not None:
supported_features |= SUPPORT_COLOR
if self._white_value_script is not None:
supported_features |= SUPPORT_WHITE_VALUE
return supported_features
@property
def is_on(self):
"""Return true if device is on."""
return self._state
async def async_added_to_hass(self):
"""Register callbacks."""
if self._template:
self.add_template_attribute(
"_state", self._template, None, self._update_state
)
if self._level_template:
self.add_template_attribute(
"_brightness",
self._level_template,
None,
self._update_brightness,
none_on_template_error=True,
)
if self._temperature_template:
self.add_template_attribute(
"_temperature",
self._temperature_template,
None,
self._update_temperature,
none_on_template_error=True,
)
if self._color_template:
self.add_template_attribute(
"_color",
self._color_template,
None,
self._update_color,
none_on_template_error=True,
)
if self._white_value_template:
self.add_template_attribute(
"_white_value",
self._white_value_template,
None,
self._update_white_value,
none_on_template_error=True,
)
await super().async_added_to_hass()
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
optimistic_set = False
# set optimistic states
if self._template is None:
self._state = True
optimistic_set = True
if self._level_template is None and ATTR_BRIGHTNESS in kwargs:
_LOGGER.info(
"Optimistically setting brightness to %s", kwargs[ATTR_BRIGHTNESS]
)
self._brightness = kwargs[ATTR_BRIGHTNESS]
optimistic_set = True
if self._white_value_template is None and ATTR_WHITE_VALUE in kwargs:
_LOGGER.info(
"Optimistically setting white value to %s", kwargs[ATTR_WHITE_VALUE]
)
self._white_value = kwargs[ATTR_WHITE_VALUE]
optimistic_set = True
if self._temperature_template is None and ATTR_COLOR_TEMP in kwargs:
_LOGGER.info(
"Optimistically setting color temperature to %s",
kwargs[ATTR_COLOR_TEMP],
)
self._temperature = kwargs[ATTR_COLOR_TEMP]
optimistic_set = True
if ATTR_BRIGHTNESS in kwargs and self._level_script:
await self._level_script.async_run(
{"brightness": kwargs[ATTR_BRIGHTNESS]}, context=self._context
)
elif ATTR_COLOR_TEMP in kwargs and self._temperature_script:
await self._temperature_script.async_run(
{"color_temp": kwargs[ATTR_COLOR_TEMP]}, context=self._context
)
elif ATTR_WHITE_VALUE in kwargs and self._white_value_script:
await self._white_value_script.async_run(
{"white_value": kwargs[ATTR_WHITE_VALUE]}, context=self._context
)
elif ATTR_HS_COLOR in kwargs and self._color_script:
hs_value = kwargs[ATTR_HS_COLOR]
await self._color_script.async_run(
{"hs": hs_value, "h": int(hs_value[0]), "s": int(hs_value[1])},
context=self._context,
)
else:
await self._on_script.async_run(context=self._context)
if optimistic_set:
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._off_script.async_run(context=self._context)
if self._template is None:
self._state = False
self.async_write_ha_state()
@callback
def _update_brightness(self, brightness):
"""Update the brightness from the template."""
try:
if brightness in ("None", ""):
self._brightness = None
return
if 0 <= int(brightness) <= 255:
self._brightness = int(brightness)
else:
_LOGGER.error(
"Received invalid brightness : %s. Expected: 0-255", brightness
)
self._brightness = None
except ValueError:
_LOGGER.error(
"Template must supply an integer brightness from 0-255, or 'None'",
exc_info=True,
)
self._brightness = None
@callback
def _update_white_value(self, white_value):
"""Update the white value from the template."""
try:
if white_value in ("None", ""):
self._white_value = None
return
if 0 <= int(white_value) <= 255:
self._white_value = int(white_value)
else:
_LOGGER.error(
"Received invalid white value: %s. Expected: 0-255", white_value
)
self._white_value = None
except ValueError:
_LOGGER.error(
"Template must supply an integer white_value from 0-255, or 'None'",
exc_info=True,
)
self._white_value = None
@callback
def _update_state(self, result):
"""Update the state from the template."""
if isinstance(result, TemplateError):
# This behavior is legacy
self._state = False
if not self._availability_template:
self._available = True
return
if isinstance(result, bool):
self._state = result
return
state = str(result).lower()
if state in _VALID_STATES:
self._state = state in ("true", STATE_ON)
return
_LOGGER.error(
"Received invalid light is_on state: %s. Expected: %s",
state,
", ".join(_VALID_STATES),
)
self._state = None
@callback
def _update_temperature(self, render):
"""Update the temperature from the template."""
try:
if render in ("None", ""):
self._temperature = None
return
temperature = int(render)
if self.min_mireds <= temperature <= self.max_mireds:
self._temperature = temperature
else:
_LOGGER.error(
"Received invalid color temperature : %s. Expected: 0-%s",
temperature,
self.max_mireds,
)
self._temperature = None
except ValueError:
_LOGGER.error(
"Template must supply an integer temperature within the range for this light, or 'None'",
exc_info=True,
)
self._temperature = None
@callback
def _update_color(self, render):
"""Update the hs_color from the template."""
h_str = s_str = None
if isinstance(render, str):
if render in ("None", ""):
self._color = None
return
h_str, s_str = map(
float, render.replace("(", "").replace(")", "").split(",", 1)
)
elif isinstance(render, (list, tuple)) and len(render) == 2:
h_str, s_str = render
if (
h_str is not None
and s_str is not None
and 0 <= h_str <= 360
and 0 <= s_str <= 100
):
self._color = (h_str, s_str)
elif h_str is not None and s_str is not None:
_LOGGER.error(
"Received invalid hs_color : (%s, %s). Expected: (0-360, 0-100)",
h_str,
s_str,
)
self._color = None
else:
_LOGGER.error("Received invalid hs_color : (%s)", render)
self._color = None
|
import diamond.collector
from subprocess import Popen, PIPE
import re
from diamond.collector import str_to_bool
class NetfilterAccountingCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = (
super(NetfilterAccountingCollector, self).get_default_config_help())
config_help.update({
'bin': 'The path to the smartctl binary',
'reset': 'Reset counters after collecting',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns default configuration options.
"""
config = super(NetfilterAccountingCollector, self).get_default_config()
config.update({
'path': 'nfacct',
'bin': 'nfacct',
'use_sudo': False,
'reset': True,
'sudo_cmd': '/usr/bin/sudo',
'method': 'Threaded'
})
return config
def collect(self):
"""
Collect and publish netfilter counters
"""
cmd = [self.config['bin'], "list"]
if str_to_bool(self.config['reset']):
cmd.append("reset")
if str_to_bool(self.config['use_sudo']):
cmd.insert(0, self.config['sudo_cmd'])
# We avoid use of the XML format to mtaintain compatbility with older
# versions of nfacct and also to avoid the bug where pkts and bytes were
# flipped
# Each line is of the format:
# { pkts = 00000000000001121700, bytes = 00000000000587037355 } = ipv4;
matcher = re.compile("{ pkts = (.*), bytes = (.*) } = (.*);")
lines = Popen(cmd, stdout=PIPE).communicate()[0].strip().splitlines()
for line in lines:
matches = re.match(matcher, line)
if matches:
num_packets = int(matches.group(1))
num_bytes = int(matches.group(2))
name = matches.group(3)
self.publish(name + ".pkts", num_packets)
self.publish(name + ".bytes", num_bytes)
|
from homeassistant.components.humidifier import (
ATTR_AVAILABLE_MODES,
ATTR_HUMIDITY,
ATTR_MODE,
DOMAIN,
SERVICE_SET_HUMIDITY,
SERVICE_SET_MODE,
intent,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.intent import IntentHandleError
from tests.common import async_mock_service
async def test_intent_set_humidity(hass):
"""Test the set humidity intent."""
hass.states.async_set(
"humidifier.bedroom_humidifier", STATE_ON, {ATTR_HUMIDITY: 40}
)
humidity_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)
turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await intent.async_setup_intents(hass)
result = await hass.helpers.intent.async_handle(
"test",
intent.INTENT_HUMIDITY,
{"name": {"value": "Bedroom humidifier"}, "humidity": {"value": "50"}},
)
await hass.async_block_till_done()
assert result.speech["plain"]["speech"] == "The bedroom humidifier is set to 50%"
assert len(turn_on_calls) == 0
assert len(humidity_calls) == 1
call = humidity_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_SET_HUMIDITY
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert call.data.get(ATTR_HUMIDITY) == 50
async def test_intent_set_humidity_and_turn_on(hass):
"""Test the set humidity intent for turned off humidifier."""
hass.states.async_set(
"humidifier.bedroom_humidifier", STATE_OFF, {ATTR_HUMIDITY: 40}
)
humidity_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_HUMIDITY)
turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await intent.async_setup_intents(hass)
result = await hass.helpers.intent.async_handle(
"test",
intent.INTENT_HUMIDITY,
{"name": {"value": "Bedroom humidifier"}, "humidity": {"value": "50"}},
)
await hass.async_block_till_done()
assert (
result.speech["plain"]["speech"]
== "Turned bedroom humidifier on and set humidity to 50%"
)
assert len(turn_on_calls) == 1
call = turn_on_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_TURN_ON
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert len(humidity_calls) == 1
call = humidity_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_SET_HUMIDITY
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert call.data.get(ATTR_HUMIDITY) == 50
async def test_intent_set_mode(hass):
"""Test the set mode intent."""
hass.states.async_set(
"humidifier.bedroom_humidifier",
STATE_ON,
{
ATTR_HUMIDITY: 40,
ATTR_SUPPORTED_FEATURES: 1,
ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_MODE: "home",
},
)
mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE)
turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await intent.async_setup_intents(hass)
result = await hass.helpers.intent.async_handle(
"test",
intent.INTENT_MODE,
{"name": {"value": "Bedroom humidifier"}, "mode": {"value": "away"}},
)
await hass.async_block_till_done()
assert (
result.speech["plain"]["speech"]
== "The mode for bedroom humidifier is set to away"
)
assert len(turn_on_calls) == 0
assert len(mode_calls) == 1
call = mode_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_SET_MODE
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert call.data.get(ATTR_MODE) == "away"
async def test_intent_set_mode_and_turn_on(hass):
"""Test the set mode intent."""
hass.states.async_set(
"humidifier.bedroom_humidifier",
STATE_OFF,
{
ATTR_HUMIDITY: 40,
ATTR_SUPPORTED_FEATURES: 1,
ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_MODE: "home",
},
)
mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE)
turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await intent.async_setup_intents(hass)
result = await hass.helpers.intent.async_handle(
"test",
intent.INTENT_MODE,
{"name": {"value": "Bedroom humidifier"}, "mode": {"value": "away"}},
)
await hass.async_block_till_done()
assert (
result.speech["plain"]["speech"]
== "Turned bedroom humidifier on and set away mode"
)
assert len(turn_on_calls) == 1
call = turn_on_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_TURN_ON
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert len(mode_calls) == 1
call = mode_calls[0]
assert call.domain == DOMAIN
assert call.service == SERVICE_SET_MODE
assert call.data.get(ATTR_ENTITY_ID) == "humidifier.bedroom_humidifier"
assert call.data.get(ATTR_MODE) == "away"
async def test_intent_set_mode_tests_feature(hass):
"""Test the set mode intent where modes are not supported."""
hass.states.async_set(
"humidifier.bedroom_humidifier", STATE_ON, {ATTR_HUMIDITY: 40}
)
mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE)
await intent.async_setup_intents(hass)
try:
await hass.helpers.intent.async_handle(
"test",
intent.INTENT_MODE,
{"name": {"value": "Bedroom humidifier"}, "mode": {"value": "away"}},
)
assert False, "handling intent should have raised"
except IntentHandleError as err:
assert str(err) == "Entity bedroom humidifier does not support modes"
assert len(mode_calls) == 0
async def test_intent_set_unknown_mode(hass):
"""Test the set mode intent for unsupported mode."""
hass.states.async_set(
"humidifier.bedroom_humidifier",
STATE_ON,
{
ATTR_HUMIDITY: 40,
ATTR_SUPPORTED_FEATURES: 1,
ATTR_AVAILABLE_MODES: ["home", "away"],
ATTR_MODE: "home",
},
)
mode_calls = async_mock_service(hass, DOMAIN, SERVICE_SET_MODE)
await intent.async_setup_intents(hass)
try:
await hass.helpers.intent.async_handle(
"test",
intent.INTENT_MODE,
{"name": {"value": "Bedroom humidifier"}, "mode": {"value": "eco"}},
)
assert False, "handling intent should have raised"
except IntentHandleError as err:
assert str(err) == "Entity bedroom humidifier does not support eco mode"
assert len(mode_calls) == 0
|
from functools import partial
import struct
import numpy as np
from scipy import sparse
from .constants import (FIFF, _dig_kind_named, _dig_cardinal_named,
_ch_kind_named, _ch_coil_type_named, _ch_unit_named,
_ch_unit_mul_named)
from ..utils.numerics import _julian_to_cal
##############################################################################
# HELPERS
class Tag(object):
"""Tag in FIF tree structure.
Parameters
----------
kind : int
Kind of Tag.
type_ : int
Type of Tag.
size : int
Size in bytes.
int : next
Position of next Tag.
pos : int
Position of Tag is the original file.
"""
def __init__(self, kind, type_, size, next, pos=None): # noqa: D102
self.kind = int(kind)
self.type = int(type_)
self.size = int(size)
self.next = int(next)
self.pos = pos if pos is not None else next
self.pos = int(self.pos)
self.data = None
def __repr__(self): # noqa: D105
out = ("<Tag | kind %s - type %s - size %s - next %s - pos %s"
% (self.kind, self.type, self.size, self.next, self.pos))
if hasattr(self, 'data'):
out += " - data %s" % self.data
out += ">"
return out
def __eq__(self, tag): # noqa: D105
return int(self.kind == tag.kind and
self.type == tag.type and
self.size == tag.size and
self.next == tag.next and
self.pos == tag.pos and
self.data == tag.data)
def read_tag_info(fid):
"""Read Tag info (or header)."""
tag = _read_tag_header(fid)
if tag is None:
return None
if tag.next == 0:
fid.seek(tag.size, 1)
elif tag.next > 0:
fid.seek(tag.next, 0)
return tag
def _frombuffer_rows(fid, tag_size, dtype=None, shape=None, rlims=None):
"""Get a range of rows from a large tag."""
if shape is not None:
item_size = np.dtype(dtype).itemsize
if not len(shape) == 2:
raise ValueError('Only implemented for 2D matrices')
want_shape = np.prod(shape)
have_shape = tag_size // item_size
if want_shape != have_shape:
raise ValueError('Wrong shape specified, requested %s have %s'
% (want_shape, have_shape))
if not len(rlims) == 2:
raise ValueError('rlims must have two elements')
n_row_out = rlims[1] - rlims[0]
if n_row_out <= 0:
raise ValueError('rlims must yield at least one output')
row_size = item_size * shape[1]
# # of bytes to skip at the beginning, # to read, where to end
start_skip = int(rlims[0] * row_size)
read_size = int(n_row_out * row_size)
end_pos = int(fid.tell() + tag_size)
# Move the pointer ahead to the read point
fid.seek(start_skip, 1)
# Do the reading
out = np.frombuffer(fid.read(read_size), dtype=dtype)
# Move the pointer ahead to the end of the tag
fid.seek(end_pos)
else:
out = np.frombuffer(fid.read(tag_size), dtype=dtype)
return out
def _loc_to_coil_trans(loc):
"""Convert loc vector to coil_trans."""
assert loc.shape[-1] == 12
coil_trans = np.zeros(loc.shape[:-1] + (4, 4))
coil_trans[..., :3, 3] = loc[..., :3]
coil_trans[..., :3, :3] = np.reshape(
loc[..., 3:], loc.shape[:-1] + (3, 3)).swapaxes(-1, -2)
coil_trans[..., -1, -1] = 1.
return coil_trans
def _coil_trans_to_loc(coil_trans):
"""Convert coil_trans to loc."""
coil_trans = coil_trans.astype(np.float64)
return np.roll(coil_trans.T[:, :3], 1, 0).flatten()
def _loc_to_eeg_loc(loc):
"""Convert a loc to an EEG loc."""
if not np.isfinite(loc[:3]).all():
raise RuntimeError('Missing EEG channel location')
if np.isfinite(loc[3:6]).all() and (loc[3:6]).any():
return np.array([loc[0:3], loc[3:6]]).T
else:
return loc[0:3][:, np.newaxis].copy()
##############################################################################
# READING FUNCTIONS
# None of these functions have docstring because it's more compact that way,
# and hopefully it's clear what they do by their names and variable values.
# See ``read_tag`` for variable descriptions. Return values are implied
# by the function names.
_is_matrix = 4294901760 # ffff0000
_matrix_coding_dense = 16384 # 4000
_matrix_coding_CCS = 16400 # 4010
_matrix_coding_RCS = 16416 # 4020
_data_type = 65535 # ffff
def _read_tag_header(fid):
"""Read only the header of a Tag."""
s = fid.read(4 * 4)
if len(s) == 0:
return None
# struct.unpack faster than np.frombuffer, saves ~10% of time some places
return Tag(*struct.unpack('>iIii', s))
_matrix_bit_dtype = {
FIFF.FIFFT_INT: (4, '>i4'),
FIFF.FIFFT_JULIAN: (4, '>i4'),
FIFF.FIFFT_FLOAT: (4, '>f4'),
FIFF.FIFFT_DOUBLE: (8, '>f8'),
FIFF.FIFFT_COMPLEX_FLOAT: (8, '>f4'),
FIFF.FIFFT_COMPLEX_DOUBLE: (16, '>f8'),
}
def _read_matrix(fid, tag, shape, rlims, matrix_coding):
"""Read a matrix (dense or sparse) tag."""
matrix_coding = matrix_coding >> 16
# This should be easy to implement (see _frombuffer_rows)
# if we need it, but for now, it's not...
if shape is not None:
raise ValueError('Row reading not implemented for matrices '
'yet')
# Matrices
if matrix_coding == _matrix_coding_dense:
# Find dimensions and return to the beginning of tag data
pos = fid.tell()
fid.seek(tag.size - 4, 1)
ndim = int(np.frombuffer(fid.read(4), dtype='>i4'))
fid.seek(-(ndim + 1) * 4, 1)
dims = np.frombuffer(fid.read(4 * ndim), dtype='>i4')[::-1]
#
# Back to where the data start
#
fid.seek(pos, 0)
if ndim > 3:
raise Exception('Only 2 or 3-dimensional matrices are '
'supported at this time')
matrix_type = _data_type & tag.type
try:
bit, dtype = _matrix_bit_dtype[matrix_type]
except KeyError:
raise RuntimeError('Cannot handle matrix of type %d yet'
% matrix_type)
data = fid.read(int(bit * dims.prod()))
data = np.frombuffer(data, dtype=dtype)
# Note: we need the non-conjugate transpose here
if matrix_type == FIFF.FIFFT_COMPLEX_FLOAT:
data = data.view('>c8')
elif matrix_type == FIFF.FIFFT_COMPLEX_DOUBLE:
data = data.view('>c16')
data.shape = dims
elif matrix_coding in (_matrix_coding_CCS, _matrix_coding_RCS):
# Find dimensions and return to the beginning of tag data
pos = fid.tell()
fid.seek(tag.size - 4, 1)
ndim = int(np.frombuffer(fid.read(4), dtype='>i4'))
fid.seek(-(ndim + 2) * 4, 1)
dims = np.frombuffer(fid.read(4 * (ndim + 1)), dtype='>i4')
if ndim != 2:
raise Exception('Only two-dimensional matrices are '
'supported at this time')
# Back to where the data start
fid.seek(pos, 0)
nnz = int(dims[0])
nrow = int(dims[1])
ncol = int(dims[2])
data = np.frombuffer(fid.read(4 * nnz), dtype='>f4')
shape = (dims[1], dims[2])
if matrix_coding == _matrix_coding_CCS:
# CCS
tmp_indices = fid.read(4 * nnz)
indices = np.frombuffer(tmp_indices, dtype='>i4')
tmp_ptr = fid.read(4 * (ncol + 1))
indptr = np.frombuffer(tmp_ptr, dtype='>i4')
if indptr[-1] > len(indices) or np.any(indptr < 0):
# There was a bug in MNE-C that caused some data to be
# stored without byte swapping
indices = np.concatenate(
(np.frombuffer(tmp_indices[:4 * (nrow + 1)], dtype='>i4'),
np.frombuffer(tmp_indices[4 * (nrow + 1):], dtype='<i4')))
indptr = np.frombuffer(tmp_ptr, dtype='<i4')
data = sparse.csc_matrix((data, indices, indptr), shape=shape)
else:
# RCS
tmp_indices = fid.read(4 * nnz)
indices = np.frombuffer(tmp_indices, dtype='>i4')
tmp_ptr = fid.read(4 * (nrow + 1))
indptr = np.frombuffer(tmp_ptr, dtype='>i4')
if indptr[-1] > len(indices) or np.any(indptr < 0):
# There was a bug in MNE-C that caused some data to be
# stored without byte swapping
indices = np.concatenate(
(np.frombuffer(tmp_indices[:4 * (ncol + 1)], dtype='>i4'),
np.frombuffer(tmp_indices[4 * (ncol + 1):], dtype='<i4')))
indptr = np.frombuffer(tmp_ptr, dtype='<i4')
data = sparse.csr_matrix((data, indices,
indptr), shape=shape)
else:
raise Exception('Cannot handle other than dense or sparse '
'matrices yet')
return data
def _read_simple(fid, tag, shape, rlims, dtype):
"""Read simple datatypes from tag (typically used with partial)."""
return _frombuffer_rows(fid, tag.size, dtype=dtype, shape=shape,
rlims=rlims)
def _read_string(fid, tag, shape, rlims):
"""Read a string tag."""
# Always decode to ISO 8859-1 / latin1 (FIFF standard).
d = _frombuffer_rows(fid, tag.size, dtype='>c', shape=shape, rlims=rlims)
return str(d.tobytes().decode('latin1', 'ignore'))
def _read_complex_float(fid, tag, shape, rlims):
"""Read complex float tag."""
# data gets stored twice as large
if shape is not None:
shape = (shape[0], shape[1] * 2)
d = _frombuffer_rows(fid, tag.size, dtype=">f4", shape=shape, rlims=rlims)
d = d.view(">c8")
return d
def _read_complex_double(fid, tag, shape, rlims):
"""Read complex double tag."""
# data gets stored twice as large
if shape is not None:
shape = (shape[0], shape[1] * 2)
d = _frombuffer_rows(fid, tag.size, dtype=">f8", shape=shape, rlims=rlims)
d = d.view(">c16")
return d
def _read_id_struct(fid, tag, shape, rlims):
"""Read ID struct tag."""
return dict(
version=int(np.frombuffer(fid.read(4), dtype=">i4")),
machid=np.frombuffer(fid.read(8), dtype=">i4"),
secs=int(np.frombuffer(fid.read(4), dtype=">i4")),
usecs=int(np.frombuffer(fid.read(4), dtype=">i4")))
def _read_dig_point_struct(fid, tag, shape, rlims):
"""Read dig point struct tag."""
kind = int(np.frombuffer(fid.read(4), dtype=">i4"))
kind = _dig_kind_named.get(kind, kind)
ident = int(np.frombuffer(fid.read(4), dtype=">i4"))
if kind == FIFF.FIFFV_POINT_CARDINAL:
ident = _dig_cardinal_named.get(ident, ident)
return dict(
kind=kind, ident=ident,
r=np.frombuffer(fid.read(12), dtype=">f4"),
coord_frame=FIFF.FIFFV_COORD_UNKNOWN)
def _read_coord_trans_struct(fid, tag, shape, rlims):
"""Read coord trans struct tag."""
from ..transforms import Transform
fro = int(np.frombuffer(fid.read(4), dtype=">i4"))
to = int(np.frombuffer(fid.read(4), dtype=">i4"))
rot = np.frombuffer(fid.read(36), dtype=">f4").reshape(3, 3)
move = np.frombuffer(fid.read(12), dtype=">f4")
trans = np.r_[np.c_[rot, move],
np.array([[0], [0], [0], [1]]).T]
data = Transform(fro, to, trans)
fid.seek(48, 1) # Skip over the inverse transformation
return data
_ch_coord_dict = {
FIFF.FIFFV_MEG_CH: FIFF.FIFFV_COORD_DEVICE,
FIFF.FIFFV_REF_MEG_CH: FIFF.FIFFV_COORD_DEVICE,
FIFF.FIFFV_EEG_CH: FIFF.FIFFV_COORD_HEAD,
}
def _read_ch_info_struct(fid, tag, shape, rlims):
"""Read channel info struct tag."""
d = dict(
scanno=int(np.frombuffer(fid.read(4), dtype=">i4")),
logno=int(np.frombuffer(fid.read(4), dtype=">i4")),
kind=int(np.frombuffer(fid.read(4), dtype=">i4")),
range=float(np.frombuffer(fid.read(4), dtype=">f4")),
cal=float(np.frombuffer(fid.read(4), dtype=">f4")),
coil_type=int(np.frombuffer(fid.read(4), dtype=">i4")),
# deal with really old OSX Anaconda bug by casting to float64
loc=np.frombuffer(fid.read(48), dtype=">f4").astype(np.float64),
# unit and exponent
unit=int(np.frombuffer(fid.read(4), dtype=">i4")),
unit_mul=int(np.frombuffer(fid.read(4), dtype=">i4")),
)
# channel name
ch_name = np.frombuffer(fid.read(16), dtype=">c")
ch_name = ch_name[:np.argmax(ch_name == b'')].tobytes()
d['ch_name'] = ch_name.decode()
# coil coordinate system definition
d['coord_frame'] = _ch_coord_dict.get(d['kind'], FIFF.FIFFV_COORD_UNKNOWN)
d['kind'] = _ch_kind_named.get(d['kind'], d['kind'])
d['coil_type'] = _ch_coil_type_named.get(d['coil_type'], d['coil_type'])
d['unit'] = _ch_unit_named.get(d['unit'], d['unit'])
d['unit_mul'] = _ch_unit_mul_named.get(d['unit_mul'], d['unit_mul'])
return d
def _read_old_pack(fid, tag, shape, rlims):
"""Read old pack tag."""
offset = float(np.frombuffer(fid.read(4), dtype=">f4"))
scale = float(np.frombuffer(fid.read(4), dtype=">f4"))
data = np.frombuffer(fid.read(tag.size - 8), dtype=">i2")
data = data * scale # to float64
data += offset
return data
def _read_dir_entry_struct(fid, tag, shape, rlims):
"""Read dir entry struct tag."""
return [_read_tag_header(fid) for _ in range(tag.size // 16 - 1)]
def _read_julian(fid, tag, shape, rlims):
"""Read julian tag."""
return _julian_to_cal(int(np.frombuffer(fid.read(4), dtype=">i4")))
# Read types call dict
_call_dict = {
FIFF.FIFFT_STRING: _read_string,
FIFF.FIFFT_COMPLEX_FLOAT: _read_complex_float,
FIFF.FIFFT_COMPLEX_DOUBLE: _read_complex_double,
FIFF.FIFFT_ID_STRUCT: _read_id_struct,
FIFF.FIFFT_DIG_POINT_STRUCT: _read_dig_point_struct,
FIFF.FIFFT_COORD_TRANS_STRUCT: _read_coord_trans_struct,
FIFF.FIFFT_CH_INFO_STRUCT: _read_ch_info_struct,
FIFF.FIFFT_OLD_PACK: _read_old_pack,
FIFF.FIFFT_DIR_ENTRY_STRUCT: _read_dir_entry_struct,
FIFF.FIFFT_JULIAN: _read_julian,
}
_call_dict_names = {
FIFF.FIFFT_STRING: 'str',
FIFF.FIFFT_COMPLEX_FLOAT: 'c8',
FIFF.FIFFT_COMPLEX_DOUBLE: 'c16',
FIFF.FIFFT_ID_STRUCT: 'ids',
FIFF.FIFFT_DIG_POINT_STRUCT: 'dps',
FIFF.FIFFT_COORD_TRANS_STRUCT: 'cts',
FIFF.FIFFT_CH_INFO_STRUCT: 'cis',
FIFF.FIFFT_OLD_PACK: 'op_',
FIFF.FIFFT_DIR_ENTRY_STRUCT: 'dir',
FIFF.FIFFT_JULIAN: 'jul',
FIFF.FIFFT_VOID: 'nul', # 0
}
# Append the simple types
_simple_dict = {
FIFF.FIFFT_BYTE: '>B',
FIFF.FIFFT_SHORT: '>i2',
FIFF.FIFFT_INT: '>i4',
FIFF.FIFFT_USHORT: '>u2',
FIFF.FIFFT_UINT: '>u4',
FIFF.FIFFT_FLOAT: '>f4',
FIFF.FIFFT_DOUBLE: '>f8',
FIFF.FIFFT_DAU_PACK16: '>i2',
}
for key, dtype in _simple_dict.items():
_call_dict[key] = partial(_read_simple, dtype=dtype)
_call_dict_names[key] = dtype
def read_tag(fid, pos=None, shape=None, rlims=None):
"""Read a Tag from a file at a given position.
Parameters
----------
fid : file
The open FIF file descriptor.
pos : int
The position of the Tag in the file.
shape : tuple | None
If tuple, the shape of the stored matrix. Only to be used with
data stored as a vector (not implemented for matrices yet).
rlims : tuple | None
If tuple, the first (inclusive) and last (exclusive) rows to retrieve.
Note that data are assumed to be stored row-major in the file. Only to
be used with data stored as a vector (not implemented for matrices
yet).
Returns
-------
tag : Tag
The Tag read.
"""
if pos is not None:
fid.seek(pos, 0)
tag = _read_tag_header(fid)
if tag.size > 0:
matrix_coding = _is_matrix & tag.type
if matrix_coding != 0:
tag.data = _read_matrix(fid, tag, shape, rlims, matrix_coding)
else:
# All other data types
try:
fun = _call_dict[tag.type]
except KeyError:
raise Exception('Unimplemented tag data type %s' % tag.type)
tag.data = fun(fid, tag, shape, rlims)
if tag.next != FIFF.FIFFV_NEXT_SEQ:
# f.seek(tag.next,0)
fid.seek(tag.next, 1) # XXX : fix? pb when tag.next < 0
return tag
def find_tag(fid, node, findkind):
"""Find Tag in an open FIF file descriptor.
Parameters
----------
fid : file-like
Open file.
node : dict
Node to search.
findkind : int
Tag kind to find.
Returns
-------
tag : instance of Tag
The first tag found.
"""
if node['directory'] is not None:
for subnode in node['directory']:
if subnode.kind == findkind:
return read_tag(fid, subnode.pos)
return None
def has_tag(node, kind):
"""Check if the node contains a Tag of a given kind."""
for d in node['directory']:
if d.kind == kind:
return True
return False
|
import os
import sys
import shutil
import tempfile
import subprocess
from flexx import flx
from flexx.util.testing import run_tests_if_main, raises, skip
def setup_module():
flx.manager._clear_old_pending_sessions(1)
flx.assets.__init__()
flx.assets.associate_asset(__name__, 'foo.js', 'xx')
flx.assets.associate_asset(__name__,
'https://cdnjs.cloudflare.com/ajax/libs/codemirror/5.21.0/codemirror.min.js')
def teardown_module():
flx.manager._clear_old_pending_sessions(1)
flx.assets.__init__()
class MyExportTestApp(flx.JsComponent):
pass
def test_dump():
# Standalone apps
app = flx.App(MyExportTestApp)
d = app.dump(None, 0)
assert len(d) == 1 and 'myexporttestapp.html' in d.keys()
app = flx.App(MyExportTestApp)
app.serve('')
d = app.dump(None, 0)
assert len(d) == 1 and 'index.html' in d.keys()
with raises(ValueError):
d = app.dump('', 0)
d = app.dump('index.htm', 0)
assert len(d) == 1 and 'index.htm' in d.keys()
# Multiple files
d = app.dump('index.html', 2)
fnames = list(d.keys())
assert len(fnames) == 6 and 'index.html' in fnames
assert 'flexx/assets/shared/foo.js' in d
assert 'flexx/assets/shared/flexx-core.js' in d
assert 'flexx/assets/shared/codemirror.min.js' in d
d = app.dump('index.html', 3)
fnames = list(d.keys())
assert len(fnames) == 5 and 'index.html' in fnames
assert 'flexx/assets/shared/foo.js' in d
assert 'flexx/assets/shared/flexx-core.js' in d
assert 'flexx/assets/shared/codemirror.min.js' not in d
def test_export():
dir = os.path.join(tempfile.gettempdir(), 'flexx_export')
if os.path.isdir(dir):
shutil.rmtree(dir)
# os.mkdir(dir) -> No, export can create this dir!
# Create app and export
app = flx.App(MyExportTestApp)
app.export(dir, 0) # standalone
assert len(os.listdir(dir)) == 1
assert os.path.isfile(os.path.join(dir, 'myexporttestapp.html'))
# Expor again, now with external assets
app.export(dir, 3)
assert len(os.listdir(dir)) == 2
assert os.path.isfile(os.path.join(dir, 'flexx', 'assets', 'shared', 'reset.css'))
assert os.path.isfile(os.path.join(dir, 'flexx', 'assets', 'shared', 'flexx-core.js'))
assert os.path.isfile(os.path.join(dir, 'flexx', 'assets', 'shared', 'foo.js'))
# Export under specific name
app.export(os.path.join(dir, 'foo.html'))
assert len(os.listdir(dir)) == 3
assert os.path.isfile(os.path.join(dir, 'foo.html'))
def test_dump_consistency():
# This is why we have ``sesstion._id = name`` in _app.py
app1 = flx.App(MyExportTestApp)
d1 = app1.dump()
app2 = flx.App(MyExportTestApp)
d2 = app2.dump()
assert d1 == d2
def test_assetstore_data():
store = flx.assets.__class__() # new AssetStore
store.add_shared_data('foo.png', b'xx')
d = store._dump_data()
assert len(d) == 1 and 'flexx/data/shared/foo.png' in d.keys()
CODE = """
import sys
from flexx import flx
class Foo(flx.Widget):
pass
app = flx.App(Foo)
d = app.dump()
for fname in ['foo.html', 'flexx/assets/shared/flexx.ui._widget.js']:
assert fname in d
assert not flx.manager.get_app_names(), 'manager.get_app_names not empty'
assert not flx.manager._appinfo, 'manager._appinfo not empty'
assert 'tornado' not in sys.modules, 'tornado unexpectedly imported'
"""
def test_dump_side_effects():
# In subprocess to get a real clean sheet
p = subprocess.Popen([sys.executable, '-c', CODE], env=os.environ,
stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
out = p.communicate()[0]
if p.returncode:
raise RuntimeError(out.decode())
run_tests_if_main()
|
import os
from socket import gethostname
import time
from uuid import getnode as getmac
from urllib.parse import unquote
import httplib2 # included with oauth2client
from oauth2client.client import TokenRevokeError
import gmusicapi
from gmusicapi.clients.shared import _OAuthClient
from gmusicapi.appdirs import my_appdirs
from gmusicapi.exceptions import CallFailure, NotLoggedIn
from gmusicapi.protocol import musicmanager, upload_pb2, locker_pb2
from gmusicapi.utils import utils
from gmusicapi import session
class Musicmanager(_OAuthClient):
"""Allows uploading by posing as Google's Music Manager.
Musicmanager uses OAuth, so a plaintext email and password are not required
when logging in.
For most authors and users of gmusicapi scripts,
:func:`perform_oauth` should be run once per machine to
store credentials to disk.
Future calls to :func:`login` can use
use the stored credentials by default.
Some authors may want more control over the OAuth flow.
In this case, credentials can be directly provided to :func:`login`.
"""
OAUTH_FILEPATH = os.path.join(my_appdirs.user_data_dir, 'oauth.cred')
_session_class = session.Musicmanager
def __init__(self, debug_logging=True, validate=True, verify_ssl=True):
super().__init__(self.__class__.__name__,
debug_logging,
validate,
verify_ssl)
def login(self, oauth_credentials=OAUTH_FILEPATH,
uploader_id=None, uploader_name=None):
"""Authenticates the Music Manager using OAuth.
Returns ``True`` on success, ``False`` on failure.
Unlike the :class:`Webclient`, OAuth allows authentication without
providing plaintext credentials to the application.
In most cases, the default parameters should be acceptable. Users on
virtual machines will want to provide `uploader_id`.
:param oauth_credentials: ``oauth2client.client.OAuth2Credentials`` or the path to a
``oauth2client.file.Storage`` file. By default, the same default path used by
:func:`perform_oauth` is used.
Endusers will likely call :func:`perform_oauth` once to write
credentials to disk and then ignore this parameter.
This param
is mostly intended to allow flexibility for developers of a
3rd party service who intend to perform their own OAuth flow
(eg on their website).
:param uploader_id: a unique id as a MAC address, eg ``'00:11:22:33:AA:BB'``.
This should only be provided in cases where the default
(host MAC address incremented by 1) will not work.
Upload behavior is undefined if a Music Manager uses the same id, especially when
reporting bad matches.
``ValueError`` will be raised if this is provided but not in the proper form.
``OSError`` will be raised if this is not provided and a real MAC could not be
determined (most common when running on a VPS).
If provided, use the same id on all future runs for this machine,
because of the upload device limit explained below.
:param uploader_name: human-readable non-unique id; default is
``"<hostname> (gmusicapi-{version})"``.
This doesn't appear to be a part of authentication at all.
Registering with (id, name = X, Y) and logging in with
(id, name = X, Z) works, and does not change the server-stored
uploader_name.
There are hard limits on how many upload devices can be registered; refer to `Google's
docs <http://support.google.com/googleplay/bin/answer.py?hl=en&answer=1230356>`__. There
have been limits on deauthorizing devices in the past, so it's smart not to register
more devices than necessary.
"""
return (self._oauth_login(oauth_credentials) and
self._perform_upauth(uploader_id, uploader_name))
def _perform_upauth(self, uploader_id, uploader_name):
"""Auth or register ourselves as an upload client.
Return True on success; see :py:func:`login` for params.
"""
if uploader_id is None:
mac_int = getmac()
if (mac_int >> 40) % 2:
self.session.logout()
raise OSError('a valid MAC could not be determined.'
' Provide uploader_id (and be'
' sure to provide the same one on future runs).')
else:
# distinguish us from a Music Manager on this machine
mac_int = (mac_int + 1) % (1 << 48)
uploader_id = utils.create_mac_string(mac_int)
if not utils.is_valid_mac(uploader_id):
self.session.logout()
raise ValueError('uploader_id is not in a valid form.'
'\nProvide 6 pairs of hex digits'
' with capital letters',
' (eg "00:11:22:33:AA:BB")')
if uploader_name is None:
uploader_name = gethostname() + " (gmusicapi-%s)" % gmusicapi.__version__
try:
# this is a MM-specific step that might register a new device.
self._make_call(musicmanager.AuthenticateUploader,
uploader_id,
uploader_name)
self.logger.info("successful upauth")
self.uploader_id = uploader_id
self.uploader_name = uploader_name
except CallFailure:
self.logger.exception("upauth failure")
self.session.logout()
return False
return True
def logout(self, revoke_oauth=False):
"""Forgets local authentication in this Client instance.
:param revoke_oauth: if True, oauth credentials will be permanently
revoked. If credentials came from a file, it will be deleted.
Returns ``True`` on success."""
# TODO the login/logout stuff is all over the place
success = True
if revoke_oauth:
try:
# this automatically deletes a Storage file, if present
self.session._oauth_creds.revoke(httplib2.Http())
except TokenRevokeError:
self.logger.exception("could not revoke oauth credentials")
success = False
self.uploader_id = None
self.uploader_name = None
return success and super().logout()
# mostly copy-paste from Webclient.get_all_songs.
# not worried about overlap in this case; the logic of either could change.
def get_uploaded_songs(self, incremental=False):
"""Returns a list of dictionaries, each with the following keys:
``('id', 'title', 'album', 'album_artist', 'artist', 'track_number',
'track_size', 'disc_number', 'total_disc_count')``.
All Access tracks that were added to the library will not be included,
only tracks uploaded/matched by the user.
:param incremental: if True, return a generator that yields lists
of at most 1000 dictionaries
as they are retrieved from the server. This can be useful for
presenting a loading bar to a user.
"""
to_return = self._get_all_songs()
if not incremental:
to_return = [song for chunk in to_return for song in chunk]
return to_return
# mostly copy-paste from Webclient.get_all_songs.
# not worried about overlap in this case; the logic of either could change.
def get_purchased_songs(self, incremental=False):
"""Returns a list of dictionaries, each with the following keys:
``('id', 'title', 'album', 'album_artist', 'artist', 'track_number',
'track_size', 'disc_number', 'total_disc_count')``.
:param incremental: if True, return a generator that yields lists
of at most 1000 dictionaries
as they are retrieved from the server. This can be useful for
presenting a loading bar to a user.
"""
to_return = self._get_all_songs(export_type=2)
if not incremental:
to_return = [song for chunk in to_return for song in chunk]
return to_return
@staticmethod
def _track_info_to_dict(track_info):
"""Given a download_pb2.DownloadTrackInfo, return a dictionary."""
# figure it's better to hardcode keys here than use introspection
# and risk returning a new field all of a sudden.
return {field: getattr(track_info, field) for field in
('id', 'title', 'album', 'album_artist', 'artist',
'track_number', 'track_size', 'disc_number',
'total_disc_count')}
def _get_all_songs(self, export_type=1):
"""Return a generator of song chunks."""
get_next_chunk = True
# need to spoof .continuation_token access, and
# can't add attrs to object(). Can with functions.
lib_chunk = lambda: 0 # noqa
lib_chunk.continuation_token = None
while get_next_chunk:
lib_chunk = self._make_call(musicmanager.ListTracks,
self.uploader_id,
lib_chunk.continuation_token,
export_type)
yield [self._track_info_to_dict(info)
for info in lib_chunk.download_track_info]
get_next_chunk = lib_chunk.HasField('continuation_token')
@utils.enforce_id_param
def download_song(self, song_id):
"""Download an uploaded or purchased song from your library.
Subscription tracks can't be downloaded with this method.
Returns a tuple ``(u'suggested_filename', 'audio_bytestring')``.
The filename
will be what the Music Manager would save the file as,
presented as a unicode string with the proper file extension.
You don't have to use it if you don't want.
:param song_id: a single uploaded or purchased song id.
To write the song to disk, use something like::
filename, audio = mm.download_song(an_id)
# if open() throws a UnicodeEncodeError, either use
# filename.encode('utf-8')
# or change your default encoding to something sane =)
with open(filename, 'wb') as f:
f.write(audio)
Unlike with :py:func:`Webclient.get_song_download_info
<gmusicapi.clients.Webclient.get_song_download_info>`,
there is no download limit when using this interface.
Also unlike the Webclient, downloading a track requires authentication.
Returning a url does not suffice, since retrieving a track without auth
will produce an http 500.
"""
url = self._make_call(musicmanager.GetDownloadLink,
song_id,
self.uploader_id)['url']
response = self._make_call(musicmanager.DownloadTrack, url)
cd_header = response.headers['content-disposition']
filename = unquote(cd_header.split("filename*=UTF-8''")[-1])
return (filename, response.content)
def get_quota(self):
"""Returns a tuple of (number of uploaded tracks, allowed number of uploaded tracks)."""
if self.uploader_id is None:
raise NotLoggedIn("Not authenticated as an upload device;"
" run Musicmanager.login(...perform_upload_auth=True...)"
" first.")
client_state = self._make_call(
musicmanager.GetClientState, self.uploader_id).clientstate_response
return (client_state.total_track_count, client_state.locker_track_limit)
@utils.accept_singleton(str)
@utils.empty_arg_shortcircuit(return_code='{}')
def upload(self, filepaths, enable_matching=False,
enable_transcoding=True, transcode_quality='320k'):
"""Uploads the given filepaths.
All non-mp3 files will be transcoded before being uploaded.
This is a limitation of Google's backend.
An available installation of ffmpeg or avconv is required in most cases:
see `the installation page
<https://unofficial-google-music-api.readthedocs.io/en
/latest/usage.html?#installation>`__ for details.
Returns a 3-tuple ``(uploaded, matched, not_uploaded)`` of dictionaries, eg::
(
{'<filepath>': '<new server id>'}, # uploaded
{'<filepath>': '<new server id>'}, # matched
{'<filepath>': '<reason, eg ALREADY_EXISTS>'} # not uploaded
)
:param filepaths: a list of filepaths, or a single filepath.
:param enable_matching: if ``True``, attempt to use `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__
to avoid uploading every song.
This requires ffmpeg or avconv.
**WARNING**: currently, mismatched songs can *not* be fixed with the 'Fix Incorrect Match'
button nor :py:func:`report_incorrect_match
<gmusicapi.clients.Webclient.report_incorrect_match>`.
They would have to be deleted and reuploaded with matching disabled
(or with the Music Manager).
Fixing matches from gmusicapi may be supported in a future release; see issue `#89
<https://github.com/simon-weber/gmusicapi/issues/89>`__.
:param enable_transcoding:
if ``False``, non-MP3 files that aren't matched using `scan and match
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=2920799&topic=2450455>`__
will not be uploaded.
:param transcode_quality: if int, pass to ffmpeg/avconv ``-q:a`` for libmp3lame
(`lower-better int,
<http://trac.ffmpeg.org/wiki/Encoding%20VBR%20(Variable%20Bit%20Rate)%20mp3%20audio>`__).
If string, pass to ffmpeg/avconv ``-b:a`` (eg ``'128k'`` for an average bitrate of 128k).
The default is 320kbps cbr (the highest possible quality).
All Google-supported filetypes are supported; see `Google's documentation
<http://support.google.com/googleplay/bin/answer.py?hl=en&answer=1100462>`__.
If ``PERMANENT_ERROR`` is given as a not_uploaded reason, attempts to reupload will never
succeed. The file will need to be changed before the server will reconsider it; the easiest
way is to change metadata tags (it's not important that the tag be uploaded, just that the
contents of the file change somehow).
"""
if self.uploader_id is None or self.uploader_name is None:
raise NotLoggedIn("Not authenticated as an upload device;"
" run Api.login(...perform_upload_auth=True...)"
" first.")
# TODO there is way too much code in this function.
# To return.
uploaded = {}
matched = {}
not_uploaded = {}
# Gather local information on the files.
local_info = {} # {clientid: (path, Track)}
for path in filepaths:
try:
track = musicmanager.UploadMetadata.fill_track_info(path)
except BaseException as e:
self.logger.exception("problem gathering local info of '%r'", path)
user_err_msg = str(e)
if 'Non-ASCII strings must be converted to unicode' in str(e):
# This is a protobuf-specific error; they require either ascii or unicode.
# To keep behavior consistent, make no effort to guess - require users
# to decode first.
user_err_msg = ("nonascii bytestrings must be decoded to unicode"
" (error: '%s')" % user_err_msg)
not_uploaded[path] = user_err_msg
else:
local_info[track.client_id] = (path, track)
if not local_info:
return uploaded, matched, not_uploaded
# TODO allow metadata faking
# Upload metadata; the server tells us what to do next.
res = self._make_call(musicmanager.UploadMetadata,
[t for (path, t) in local_info.values()],
self.uploader_id)
# TODO checking for proper contents should be handled in verification
md_res = res.metadata_response
responses = [r for r in md_res.track_sample_response]
sample_requests = [req for req in md_res.signed_challenge_info]
# Send scan and match samples if requested.
for sample_request in sample_requests:
path, track = local_info[sample_request.challenge_info.client_track_id]
bogus_sample = None
if not enable_matching:
bogus_sample = b'' # just send empty bytes
try:
res = self._make_call(musicmanager.ProvideSample,
path, sample_request, track,
self.uploader_id, bogus_sample)
except (OSError, ValueError) as e:
self.logger.warning("couldn't create scan and match sample for '%r': %s",
path, str(e))
not_uploaded[path] = str(e)
else:
responses.extend(res.sample_response.track_sample_response)
# Read sample responses and prep upload requests.
to_upload = {} # {serverid: (path, Track, do_not_rematch?)}
for sample_res in responses:
path, track = local_info[sample_res.client_track_id]
if sample_res.response_code == upload_pb2.TrackSampleResponse.MATCHED:
self.logger.info("matched '%r' to sid %s", path, sample_res.server_track_id)
matched[path] = sample_res.server_track_id
if not enable_matching:
self.logger.error("'%r' was matched without matching enabled", path)
elif sample_res.response_code == upload_pb2.TrackSampleResponse.UPLOAD_REQUESTED:
to_upload[sample_res.server_track_id] = (path, track, False)
else:
# there was a problem
# report the symbolic name of the response code enum for debugging
enum_desc = upload_pb2._TRACKSAMPLERESPONSE.enum_types[0]
res_name = enum_desc.values_by_number[sample_res.response_code].name
err_msg = "TrackSampleResponse code %s: %s" % (sample_res.response_code, res_name)
if res_name == 'ALREADY_EXISTS':
# include the sid, too
# this shouldn't be relied on externally, but I use it in
# tests - being surrounded by parens is how it's matched
err_msg += "(%s)" % sample_res.server_track_id
self.logger.warning("upload of '%r' rejected: %s", path, err_msg)
not_uploaded[path] = err_msg
# Send upload requests.
if to_upload:
# TODO reordering requests could avoid wasting time waiting for reup sync
self._make_call(musicmanager.UpdateUploadState, 'start', self.uploader_id)
for server_id, (path, track, do_not_rematch) in to_upload.items():
# It can take a few tries to get an session.
should_retry = True
attempts = 0
while should_retry and attempts < 10:
session = self._make_call(musicmanager.GetUploadSession,
self.uploader_id, len(uploaded),
track, path, server_id, do_not_rematch)
attempts += 1
got_session, error_details = \
musicmanager.GetUploadSession.process_session(session)
if got_session:
self.logger.info("got an upload session for '%r'", path)
break
should_retry, reason, error_code = error_details
self.logger.debug("problem getting upload session: %s\ncode=%s retrying=%s",
reason, error_code, should_retry)
if error_code == 200 and do_not_rematch:
# reupload requests need to wait on a server sync
# 200 == already uploaded, so force a retry in this case
should_retry = True
time.sleep(6) # wait before retrying
else:
err_msg = "GetUploadSession error %s: %s" % (error_code, reason)
self.logger.warning("giving up on upload session for '%r': %s", path, err_msg)
not_uploaded[path] = err_msg
continue # to next upload
# got a session, do the upload
# this terribly inconsistent naming isn't my fault: Google--
session = session['sessionStatus']
external = session['externalFieldTransfers'][0]
session_url = external['putInfo']['url']
content_type = external.get('content_type', 'audio/mpeg')
if track.original_content_type != locker_pb2.Track.MP3:
if enable_transcoding:
try:
self.logger.info("transcoding '%r' to mp3", path)
contents = utils.transcode_to_mp3(path, quality=transcode_quality)
except (OSError, ValueError) as e:
self.logger.warning("error transcoding %r: %s", path, e)
not_uploaded[path] = "transcoding error: %s" % e
continue
else:
not_uploaded[path] = "transcoding disabled"
continue
else:
with open(path, 'rb') as f:
contents = f.read()
upload_response = self._make_call(musicmanager.UploadFile,
session_url, content_type, contents)
success = upload_response.get('sessionStatus', {}).get('state')
if success:
uploaded[path] = server_id
else:
# 404 == already uploaded? serverside check on clientid?
self.logger.debug("could not finalize upload of '%r'. response: %s",
path, upload_response)
not_uploaded[path] = 'could not finalize upload; details in log'
self._make_call(musicmanager.UpdateUploadState, 'stopped', self.uploader_id)
return uploaded, matched, not_uploaded
|
import logging
from pprint import pformat
from pynetgear import Netgear
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import (
CONF_DEVICES,
CONF_EXCLUDE,
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_APS = "accesspoints"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=""): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_USERNAME, default=""): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT): cv.port,
vol.Optional(CONF_DEVICES, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_EXCLUDE, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_APS, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
def get_scanner(hass, config):
"""Validate the configuration and returns a Netgear scanner."""
info = config[DOMAIN]
host = info[CONF_HOST]
ssl = info[CONF_SSL]
username = info[CONF_USERNAME]
password = info[CONF_PASSWORD]
port = info.get(CONF_PORT)
devices = info[CONF_DEVICES]
excluded_devices = info[CONF_EXCLUDE]
accesspoints = info[CONF_APS]
api = Netgear(password, host, username, port, ssl)
scanner = NetgearDeviceScanner(api, devices, excluded_devices, accesspoints)
_LOGGER.debug("Logging in")
results = scanner.get_attached_devices()
if results is not None:
scanner.last_results = results
else:
_LOGGER.error("Failed to Login")
return None
return scanner
class NetgearDeviceScanner(DeviceScanner):
"""Queries a Netgear wireless router using the SOAP-API."""
def __init__(
self,
api,
devices,
excluded_devices,
accesspoints,
):
"""Initialize the scanner."""
self.tracked_devices = devices
self.excluded_devices = excluded_devices
self.tracked_accesspoints = accesspoints
self.last_results = []
self._api = api
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
devices = []
for dev in self.last_results:
tracked = (
not self.tracked_devices
or dev.mac in self.tracked_devices
or dev.name in self.tracked_devices
)
tracked = tracked and (
not self.excluded_devices
or not (
dev.mac in self.excluded_devices
or dev.name in self.excluded_devices
)
)
if tracked:
devices.append(dev.mac)
if (
self.tracked_accesspoints
and dev.conn_ap_mac in self.tracked_accesspoints
):
devices.append(f"{dev.mac}_{dev.conn_ap_mac}")
return devices
def get_device_name(self, device):
"""Return the name of the given device or the MAC if we don't know."""
parts = device.split("_")
mac = parts[0]
ap_mac = None
if len(parts) > 1:
ap_mac = parts[1]
name = None
for dev in self.last_results:
if dev.mac == mac:
name = dev.name
break
if not name or name == "--":
name = mac
if ap_mac:
ap_name = "Router"
for dev in self.last_results:
if dev.mac == ap_mac:
ap_name = dev.name
break
return f"{name} on {ap_name}"
return name
def _update_info(self):
"""Retrieve latest information from the Netgear router.
Returns boolean if scanning successful.
"""
_LOGGER.debug("Scanning")
results = self.get_attached_devices()
if _LOGGER.isEnabledFor(logging.DEBUG):
_LOGGER.debug("Scan result: \n%s", pformat(results))
if results is None:
_LOGGER.warning("Error scanning devices")
self.last_results = results or []
def get_attached_devices(self):
"""List attached devices with pynetgear.
The v2 method takes more time and is more heavy on the router
so we only use it if we need connected AP info.
"""
if self.tracked_accesspoints:
return self._api.get_attached_devices_2()
return self._api.get_attached_devices()
|
import logging
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from . import (
CONF_SENSORS,
DATA_EIGHT,
NAME_MAP,
EightSleepHeatEntity,
EightSleepUserEntity,
)
ATTR_ROOM_TEMP = "Room Temperature"
ATTR_AVG_ROOM_TEMP = "Average Room Temperature"
ATTR_BED_TEMP = "Bed Temperature"
ATTR_AVG_BED_TEMP = "Average Bed Temperature"
ATTR_RESP_RATE = "Respiratory Rate"
ATTR_AVG_RESP_RATE = "Average Respiratory Rate"
ATTR_HEART_RATE = "Heart Rate"
ATTR_AVG_HEART_RATE = "Average Heart Rate"
ATTR_SLEEP_DUR = "Time Slept"
ATTR_LIGHT_PERC = f"Light Sleep {PERCENTAGE}"
ATTR_DEEP_PERC = f"Deep Sleep {PERCENTAGE}"
ATTR_REM_PERC = f"REM Sleep {PERCENTAGE}"
ATTR_TNT = "Tosses & Turns"
ATTR_SLEEP_STAGE = "Sleep Stage"
ATTR_TARGET_HEAT = "Target Heating Level"
ATTR_ACTIVE_HEAT = "Heating Active"
ATTR_DURATION_HEAT = "Heating Time Remaining"
ATTR_PROCESSING = "Processing"
ATTR_SESSION_START = "Session Start"
ATTR_FIT_DATE = "Fitness Date"
ATTR_FIT_DURATION_SCORE = "Fitness Duration Score"
ATTR_FIT_ASLEEP_SCORE = "Fitness Asleep Score"
ATTR_FIT_OUT_SCORE = "Fitness Out-of-Bed Score"
ATTR_FIT_WAKEUP_SCORE = "Fitness Wakeup Score"
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the eight sleep sensors."""
if discovery_info is None:
return
name = "Eight"
sensors = discovery_info[CONF_SENSORS]
eight = hass.data[DATA_EIGHT]
if hass.config.units.is_metric:
units = "si"
else:
units = "us"
all_sensors = []
for sensor in sensors:
if "bed_state" in sensor:
all_sensors.append(EightHeatSensor(name, eight, sensor))
elif "room_temp" in sensor:
all_sensors.append(EightRoomSensor(name, eight, sensor, units))
else:
all_sensors.append(EightUserSensor(name, eight, sensor, units))
async_add_entities(all_sensors, True)
class EightHeatSensor(EightSleepHeatEntity):
"""Representation of an eight sleep heat-based sensor."""
def __init__(self, name, eight, sensor):
"""Initialize the sensor."""
super().__init__(eight)
self._sensor = sensor
self._mapped_name = NAME_MAP.get(self._sensor, self._sensor)
self._name = f"{name} {self._mapped_name}"
self._state = None
self._side = self._sensor.split("_")[0]
self._userid = self._eight.fetch_userid(self._side)
self._usrobj = self._eight.users[self._userid]
_LOGGER.debug(
"Heat Sensor: %s, Side: %s, User: %s",
self._sensor,
self._side,
self._userid,
)
@property
def name(self):
"""Return the name of the sensor, if any."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return PERCENTAGE
async def async_update(self):
"""Retrieve latest state."""
_LOGGER.debug("Updating Heat sensor: %s", self._sensor)
self._state = self._usrobj.heating_level
@property
def device_state_attributes(self):
"""Return device state attributes."""
return {
ATTR_TARGET_HEAT: self._usrobj.target_heating_level,
ATTR_ACTIVE_HEAT: self._usrobj.now_heating,
ATTR_DURATION_HEAT: self._usrobj.heating_remaining,
}
class EightUserSensor(EightSleepUserEntity):
"""Representation of an eight sleep user-based sensor."""
def __init__(self, name, eight, sensor, units):
"""Initialize the sensor."""
super().__init__(eight)
self._sensor = sensor
self._sensor_root = self._sensor.split("_", 1)[1]
self._mapped_name = NAME_MAP.get(self._sensor, self._sensor)
self._name = f"{name} {self._mapped_name}"
self._state = None
self._attr = None
self._units = units
self._side = self._sensor.split("_", 1)[0]
self._userid = self._eight.fetch_userid(self._side)
self._usrobj = self._eight.users[self._userid]
_LOGGER.debug(
"User Sensor: %s, Side: %s, User: %s",
self._sensor,
self._side,
self._userid,
)
@property
def name(self):
"""Return the name of the sensor, if any."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if (
"current_sleep" in self._sensor
or "last_sleep" in self._sensor
or "current_sleep_fitness" in self._sensor
):
return "Score"
if "bed_temp" in self._sensor:
if self._units == "si":
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
return None
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if "bed_temp" in self._sensor:
return "mdi:thermometer"
async def async_update(self):
"""Retrieve latest state."""
_LOGGER.debug("Updating User sensor: %s", self._sensor)
if "current" in self._sensor:
if "fitness" in self._sensor:
self._state = self._usrobj.current_sleep_fitness_score
self._attr = self._usrobj.current_fitness_values
else:
self._state = self._usrobj.current_sleep_score
self._attr = self._usrobj.current_values
elif "last" in self._sensor:
self._state = self._usrobj.last_sleep_score
self._attr = self._usrobj.last_values
elif "bed_temp" in self._sensor:
temp = self._usrobj.current_values["bed_temp"]
try:
if self._units == "si":
self._state = round(temp, 2)
else:
self._state = round((temp * 1.8) + 32, 2)
except TypeError:
self._state = None
elif "sleep_stage" in self._sensor:
self._state = self._usrobj.current_values["stage"]
@property
def device_state_attributes(self):
"""Return device state attributes."""
if self._attr is None:
# Skip attributes if sensor type doesn't support
return None
if "fitness" in self._sensor_root:
state_attr = {
ATTR_FIT_DATE: self._attr["date"],
ATTR_FIT_DURATION_SCORE: self._attr["duration"],
ATTR_FIT_ASLEEP_SCORE: self._attr["asleep"],
ATTR_FIT_OUT_SCORE: self._attr["out"],
ATTR_FIT_WAKEUP_SCORE: self._attr["wakeup"],
}
return state_attr
state_attr = {ATTR_SESSION_START: self._attr["date"]}
state_attr[ATTR_TNT] = self._attr["tnt"]
state_attr[ATTR_PROCESSING] = self._attr["processing"]
sleep_time = (
sum(self._attr["breakdown"].values()) - self._attr["breakdown"]["awake"]
)
state_attr[ATTR_SLEEP_DUR] = sleep_time
try:
state_attr[ATTR_LIGHT_PERC] = round(
(self._attr["breakdown"]["light"] / sleep_time) * 100, 2
)
except ZeroDivisionError:
state_attr[ATTR_LIGHT_PERC] = 0
try:
state_attr[ATTR_DEEP_PERC] = round(
(self._attr["breakdown"]["deep"] / sleep_time) * 100, 2
)
except ZeroDivisionError:
state_attr[ATTR_DEEP_PERC] = 0
try:
state_attr[ATTR_REM_PERC] = round(
(self._attr["breakdown"]["rem"] / sleep_time) * 100, 2
)
except ZeroDivisionError:
state_attr[ATTR_REM_PERC] = 0
try:
if self._units == "si":
room_temp = round(self._attr["room_temp"], 2)
else:
room_temp = round((self._attr["room_temp"] * 1.8) + 32, 2)
except TypeError:
room_temp = None
try:
if self._units == "si":
bed_temp = round(self._attr["bed_temp"], 2)
else:
bed_temp = round((self._attr["bed_temp"] * 1.8) + 32, 2)
except TypeError:
bed_temp = None
if "current" in self._sensor_root:
try:
state_attr[ATTR_RESP_RATE] = round(self._attr["resp_rate"], 2)
except TypeError:
state_attr[ATTR_RESP_RATE] = None
try:
state_attr[ATTR_HEART_RATE] = round(self._attr["heart_rate"], 2)
except TypeError:
state_attr[ATTR_HEART_RATE] = None
state_attr[ATTR_SLEEP_STAGE] = self._attr["stage"]
state_attr[ATTR_ROOM_TEMP] = room_temp
state_attr[ATTR_BED_TEMP] = bed_temp
elif "last" in self._sensor_root:
try:
state_attr[ATTR_AVG_RESP_RATE] = round(self._attr["resp_rate"], 2)
except TypeError:
state_attr[ATTR_AVG_RESP_RATE] = None
try:
state_attr[ATTR_AVG_HEART_RATE] = round(self._attr["heart_rate"], 2)
except TypeError:
state_attr[ATTR_AVG_HEART_RATE] = None
state_attr[ATTR_AVG_ROOM_TEMP] = room_temp
state_attr[ATTR_AVG_BED_TEMP] = bed_temp
return state_attr
class EightRoomSensor(EightSleepUserEntity):
"""Representation of an eight sleep room sensor."""
def __init__(self, name, eight, sensor, units):
"""Initialize the sensor."""
super().__init__(eight)
self._sensor = sensor
self._mapped_name = NAME_MAP.get(self._sensor, self._sensor)
self._name = f"{name} {self._mapped_name}"
self._state = None
self._attr = None
self._units = units
@property
def name(self):
"""Return the name of the sensor, if any."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
async def async_update(self):
"""Retrieve latest state."""
_LOGGER.debug("Updating Room sensor: %s", self._sensor)
temp = self._eight.room_temperature()
try:
if self._units == "si":
self._state = round(temp, 2)
else:
self._state = round((temp * 1.8) + 32, 2)
except TypeError:
self._state = None
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
if self._units == "si":
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return "mdi:thermometer"
|
from ipaddress import IPv4Address, IPv6Address, ip_address, ip_network
from typing import Union
import yarl
# RFC6890 - IP addresses of loopback interfaces
LOOPBACK_NETWORKS = (
ip_network("127.0.0.0/8"),
ip_network("::1/128"),
ip_network("::ffff:127.0.0.0/104"),
)
# RFC6890 - Address allocation for Private Internets
PRIVATE_NETWORKS = (
ip_network("fd00::/8"),
ip_network("10.0.0.0/8"),
ip_network("172.16.0.0/12"),
ip_network("192.168.0.0/16"),
)
# RFC6890 - Link local ranges
LINK_LOCAL_NETWORK = ip_network("169.254.0.0/16")
def is_loopback(address: Union[IPv4Address, IPv6Address]) -> bool:
"""Check if an address is a loopback address."""
return any(address in network for network in LOOPBACK_NETWORKS)
def is_private(address: Union[IPv4Address, IPv6Address]) -> bool:
"""Check if an address is a private address."""
return any(address in network for network in PRIVATE_NETWORKS)
def is_link_local(address: Union[IPv4Address, IPv6Address]) -> bool:
"""Check if an address is link local."""
return address in LINK_LOCAL_NETWORK
def is_local(address: Union[IPv4Address, IPv6Address]) -> bool:
"""Check if an address is loopback or private."""
return is_loopback(address) or is_private(address)
def is_ip_address(address: str) -> bool:
"""Check if a given string is an IP address."""
try:
ip_address(address)
except ValueError:
return False
return True
def normalize_url(address: str) -> str:
"""Normalize a given URL."""
url = yarl.URL(address.rstrip("/"))
if url.is_default_port():
return str(url.with_port(None))
return str(url)
|
from datetime import datetime
from django.db.models import Q
from django.test import SimpleTestCase, TestCase
from pytz import utc
from weblate.trans.models import Change, Unit
from weblate.trans.util import PLURAL_SEPARATOR
from weblate.utils.search import Comparer, parse_query
from weblate.utils.state import (
STATE_APPROVED,
STATE_EMPTY,
STATE_FUZZY,
STATE_READONLY,
STATE_TRANSLATED,
)
class ComparerTest(SimpleTestCase):
def test_different(self):
self.assertLessEqual(Comparer().similarity("a", "b"), 50)
def test_same(self):
self.assertEqual(Comparer().similarity("a", "a"), 100)
def test_unicode(self):
# Test fallback to Python implementation in jellyfish
# for unicode strings
self.assertEqual(Comparer().similarity("NICHOLASŸ", "NICHOLAS"), 88)
def test_long(self):
# This is expected to raise MemoryError inside jellyfish
self.assertLessEqual(Comparer().similarity("a" * 200000, "b" * 200000), 50)
class QueryParserTest(TestCase):
def assert_query(self, string, expected):
result = parse_query(string)
self.assertEqual(result, expected)
self.assertFalse(Unit.objects.filter(result).exists())
def test_simple(self):
self.assert_query(
"hello world",
(
Q(source__substring="hello")
| Q(target__substring="hello")
| Q(context__substring="hello")
)
& (
Q(source__substring="world")
| Q(target__substring="world")
| Q(context__substring="world")
),
)
def test_quote(self):
expected = (
Q(source__substring="hello world")
| Q(target__substring="hello world")
| Q(context__substring="hello world")
)
self.assert_query('"hello world"', expected)
self.assert_query("'hello world'", expected)
def test_context(self):
expected = Q(context__substring="hello world")
self.assert_query('key:"hello world"', expected)
self.assert_query("context:'hello world'", expected)
def test_text(self):
self.assert_query("note:TEXT", Q(note__substring="TEXT"))
self.assert_query("location:TEXT", Q(location__substring="TEXT"))
def test_comment(self):
self.assert_query("comment:TEXT", Q(comment__comment__substring="TEXT"))
self.assert_query(
"comment_author:nijel", Q(comment__user__username__iexact="nijel")
)
def test_field(self):
self.assert_query(
"source:hello target:world",
Q(source__substring="hello") & Q(target__substring="world"),
)
self.assert_query("location:hello.c", Q(location__substring="hello.c"))
def test_exact(self):
self.assert_query("source:='hello'", Q(source__iexact="hello"))
self.assert_query('source:="hello world"', Q(source__iexact="hello world"))
self.assert_query("source:='hello world'", Q(source__iexact="hello world"))
self.assert_query("source:=hello", Q(source__iexact="hello"))
def test_regex(self):
self.assert_query('source:r"^hello"', Q(source__regex="^hello"))
with self.assertRaises(ValueError):
self.assert_query('source:r"^(hello"', Q(source__regex="^(hello"))
def test_logic(self):
self.assert_query(
"source:hello AND NOT target:world",
Q(source__substring="hello") & ~Q(target__substring="world"),
)
self.assert_query(
"source:hello OR target:world",
Q(source__substring="hello") | Q(target__substring="world"),
)
def test_empty(self):
self.assert_query("", Q())
def test_invalid(self):
with self.assertRaises(ValueError):
self.assert_query(
"changed:inval AND target:world", Q(target__substring="world")
)
def test_year(self):
self.assert_query(
"changed:2018",
Q(change__timestamp__gte=datetime(2018, 1, 1, 0, 0, tzinfo=utc))
& Q(
change__timestamp__lte=datetime(
2018, 12, 31, 23, 59, 59, 999999, tzinfo=utc
)
)
& Q(change__action__in=Change.ACTIONS_CONTENT),
)
def test_change_action(self):
expected = (
Q(change__timestamp__gte=datetime(2018, 1, 1, 0, 0, tzinfo=utc))
& Q(
change__timestamp__lte=datetime(
2018, 12, 31, 23, 59, 59, 999999, tzinfo=utc
)
)
& Q(change__action=Change.ACTION_MARKED_EDIT)
)
self.assert_query(
"change_time:2018 AND change_action:marked-for-edit", expected
)
self.assert_query(
"change_time:2018 AND change_action:'Marked for edit'", expected
)
def test_dates(self):
action_change = Q(change__action__in=Change.ACTIONS_CONTENT)
self.assert_query(
"changed:>20190301",
Q(change__timestamp__gte=datetime(2019, 3, 1, 0, 0, tzinfo=utc))
& action_change,
)
self.assert_query(
"changed:>2019-03-01",
Q(change__timestamp__gte=datetime(2019, 3, 1, 0, 0, tzinfo=utc))
& action_change,
)
self.assert_query(
"changed:2019-03-01",
Q(change__timestamp__gte=datetime(2019, 3, 1, 0, 0, tzinfo=utc))
& Q(
change__timestamp__lte=datetime(
2019, 3, 1, 23, 59, 59, 999999, tzinfo=utc
)
)
& action_change,
)
with self.assertRaises(ValueError):
self.assert_query("changed:>=2010-01-", Q())
def test_date_range(self):
self.assert_query(
"changed:[2019-03-01 to 2019-04-01]",
Q(change__timestamp__gte=datetime(2019, 3, 1, 0, 0, tzinfo=utc))
& Q(
change__timestamp__lte=datetime(
2019, 4, 1, 23, 59, 59, 999999, tzinfo=utc
)
)
& Q(change__action__in=Change.ACTIONS_CONTENT),
)
def test_date_added(self):
self.assert_query(
"added:>2019-03-01",
Q(timestamp__gte=datetime(2019, 3, 1, 0, 0, tzinfo=utc)),
)
def test_bool(self):
self.assert_query("pending:true", Q(pending=True))
def test_nonexisting(self):
with self.assertRaises(ValueError):
self.assert_query("nonexisting:true", Q())
def test_state(self):
self.assert_query("state:>=empty", Q(state__gte=STATE_EMPTY))
self.assert_query("state:>=translated", Q(state__gte=STATE_TRANSLATED))
self.assert_query("state:<translated", Q(state__lt=STATE_TRANSLATED))
self.assert_query("state:translated", Q(state=STATE_TRANSLATED))
self.assert_query("state:needs-editing", Q(state=STATE_FUZZY))
def test_invalid_state(self):
with self.assertRaises(ValueError):
self.assert_query("state:invalid", Q())
def test_parenthesis(self):
self.assert_query(
"state:translated AND ( source:hello OR source:bar )",
Q(state=STATE_TRANSLATED)
& (Q(source__substring="hello") | Q(source__substring="bar")),
)
self.assert_query(
"state:translated AND (source:hello OR source:bar)",
Q(state=STATE_TRANSLATED)
& (Q(source__substring="hello") | Q(source__substring="bar")),
)
def test_language(self):
self.assert_query("language:cs", Q(translation__language__code__iexact="cs"))
self.assert_query('language:r".*"', Q(translation__language__code__regex=".*"))
def test_component(self):
self.assert_query(
"component:hello", Q(translation__component__slug__iexact="hello")
)
def test_project(self):
self.assert_query(
"project:hello", Q(translation__component__project__slug__iexact="hello")
)
def test_html(self):
self.assert_query(
"<b>bold</b>",
Q(source__substring="<b>bold</b>")
| Q(target__substring="<b>bold</b>")
| Q(context__substring="<b>bold</b>"),
)
def test_has(self):
self.assert_query("has:plural", Q(source__contains=PLURAL_SEPARATOR))
self.assert_query("has:suggestion", Q(suggestion__isnull=False))
self.assert_query("has:check", Q(check__dismissed=False))
self.assert_query("has:comment", Q(comment__resolved=False))
self.assert_query("has:resolved-comment", Q(comment__resolved=True))
self.assert_query("has:dismissed-check", Q(check__dismissed=True))
self.assert_query("has:translation", Q(state__gte=STATE_TRANSLATED))
self.assert_query("has:variant", Q(variant__isnull=False))
self.assert_query("has:label", Q(source_unit__labels__isnull=False))
self.assert_query("has:context", ~Q(context=""))
self.assert_query(
"has:screenshot",
Q(screenshots__isnull=False) | Q(source_unit__screenshots__isnull=False),
)
self.assert_query("has:flags", ~Q(source_unit__extra_flags=""))
def test_is(self):
self.assert_query("is:pending", Q(pending=True))
self.assert_query("is:translated", Q(state__gte=STATE_TRANSLATED))
self.assert_query("is:untranslated", Q(state__lt=STATE_TRANSLATED))
self.assert_query("is:approved", Q(state=STATE_APPROVED))
self.assert_query("is:read-only", Q(state=STATE_READONLY))
self.assert_query("is:fuzzy", Q(state=STATE_FUZZY))
def test_changed_by(self):
self.assert_query(
"changed_by:nijel",
Q(change__author__username__iexact="nijel")
& Q(change__action__in=Change.ACTIONS_CONTENT),
)
def test_suggestions(self):
self.assert_query("suggestion:text", Q(suggestion__target__substring="text"))
self.assert_query(
"suggestion_author:nijel", Q(suggestion__user__username__iexact="nijel")
)
def test_checks(self):
self.assert_query(
"check:ellipsis",
Q(check__check__iexact="ellipsis") & Q(check__dismissed=False),
)
self.assert_query(
"dismissed_check:ellipsis",
Q(check__check__iexact="ellipsis") & Q(check__dismissed=True),
)
def test_labels(self):
self.assert_query(
"label:'test label'",
Q(source_unit__labels__name__iexact="test label"),
)
def test_priority(self):
self.assert_query("priority:10", Q(priority=10))
self.assert_query("priority:>=10", Q(priority__gte=10))
def test_text_html(self):
self.assert_query("target:<name>", Q(target__substring="<name>"))
def test_text_long(self):
self.assert_query(
"[one to other]",
(
Q(source__substring="[one")
| Q(target__substring="[one")
| Q(context__substring="[one")
)
& (
Q(source__substring="to")
| Q(target__substring="to")
| Q(context__substring="to")
)
& (
Q(source__substring="other]")
| Q(target__substring="other]")
| Q(context__substring="other]")
),
)
def test_lowercase_or(self):
self.assert_query(
"state:<translated or state:empty",
Q(state__lt=STATE_TRANSLATED) | Q(state=STATE_EMPTY),
)
def test_timestamp_format(self):
self.assert_query(
"changed:>=01/20/2020",
Q(change__timestamp__gte=datetime(2020, 1, 20, 0, 0, tzinfo=utc))
& Q(change__action__in=Change.ACTIONS_CONTENT),
)
def test_timestamp_interval(self):
self.assert_query(
"changed:2020-03-27",
Q(change__timestamp__gte=datetime(2020, 3, 27, 0, 0, tzinfo=utc))
& Q(
change__timestamp__lte=datetime(
2020, 3, 27, 23, 59, 59, 999999, tzinfo=utc
)
)
& Q(change__action__in=Change.ACTIONS_CONTENT),
)
def test_non_quoted_strings(self):
self.assert_query(
"%(count)s word",
parse_query("'%(count)s' 'word'"),
)
self.assert_query("{actor}", parse_query("'{actor}'"))
def test_specialchars(self):
self.assert_query(
"to %{_topdir}",
(
Q(source__substring="to")
| Q(target__substring="to")
| Q(context__substring="to")
)
& (
Q(source__substring="%{_topdir}")
| Q(target__substring="%{_topdir}")
| Q(context__substring="%{_topdir}")
),
)
def test_url(self):
self.assert_query("https://weblate.org/", parse_query("'https://weblate.org/'"))
def test_quotes(self):
self.assert_query("'", parse_query('''"'"'''))
self.assert_query('"', parse_query("""'"'"""))
self.assert_query("source:'", parse_query('''source:"'"'''))
self.assert_query('source:"', parse_query("""source:'"'"""))
|
import unittest
from uiautomator import param_to_property
class TestParamToProperty(unittest.TestCase):
def test_props(self):
args_value = None
kwargs_value = None
@param_to_property("one", "two", "three")
def func(*args, **kwargs):
self.assertEqual(args, args_value)
self.assertEqual(kwargs, kwargs_value)
args_value = ("one", "two", "three")
kwargs_value = {"test": 1}
func.one.two.three(test=1)
args_value = ("one", "three")
kwargs_value = {"another_test": 100}
func.one("three", another_test=100)
args_value = ("one", "two", "three")
kwargs_value = {}
func("one", "two", "three")
args_value = ("three", "one", "two")
kwargs_value = {}
func.three("one", "two")
def test_kwprops(self):
args_value = None
kwargs_value = None
@param_to_property(key=["one", "two", "three"])
def func(*args, **kwargs):
self.assertEqual(args, args_value)
self.assertEqual(kwargs, kwargs_value)
args_value = (1,)
kwargs_value = {"key": "one"}
func.one(1)
args_value = (2, 3)
kwargs_value = {"key": "two"}
func.two(2, 3)
args_value = ()
kwargs_value = {}
func()
def test_error(self):
@param_to_property(key=["one", "two", "three"])
def func(*args, **kwargs):
pass
with self.assertRaises(AttributeError):
func.one.one
with self.assertRaises(SyntaxError):
@param_to_property("a", "b", key=["one", "two", "three"])
def func(*args, **kwargs):
pass
|
import logging
from toonapi import ToonConnectionError, ToonError
_LOGGER = logging.getLogger(__name__)
def toon_exception_handler(func):
"""Decorate Toon calls to handle Toon exceptions.
A decorator that wraps the passed in function, catches Toon errors,
and handles the availability of the device in the data coordinator.
"""
async def handler(self, *args, **kwargs):
try:
await func(self, *args, **kwargs)
self.coordinator.update_listeners()
except ToonConnectionError as error:
_LOGGER.error("Error communicating with API: %s", error)
self.coordinator.last_update_success = False
self.coordinator.update_listeners()
except ToonError as error:
_LOGGER.error("Invalid response from API: %s", error)
return handler
|
import logging
import voluptuous as vol
from homeassistant.components.notify import PLATFORM_SCHEMA, BaseNotificationService
from homeassistant.const import CONF_NAME
from homeassistant.core import split_entity_id
import homeassistant.helpers.config_validation as cv
from . import ATTR_ENTITY_ID, ATTR_LANGUAGE, ATTR_MESSAGE, DOMAIN
CONF_MEDIA_PLAYER = "media_player"
CONF_TTS_SERVICE = "tts_service"
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_TTS_SERVICE): cv.entity_id,
vol.Required(CONF_MEDIA_PLAYER): cv.entity_id,
vol.Optional(ATTR_LANGUAGE): cv.string,
}
)
async def async_get_service(hass, config, discovery_info=None):
"""Return the notify service."""
return TTSNotificationService(config)
class TTSNotificationService(BaseNotificationService):
"""The TTS Notification Service."""
def __init__(self, config):
"""Initialize the service."""
_, self._tts_service = split_entity_id(config[CONF_TTS_SERVICE])
self._media_player = config[CONF_MEDIA_PLAYER]
self._language = config.get(ATTR_LANGUAGE)
async def async_send_message(self, message="", **kwargs):
"""Call TTS service to speak the notification."""
_LOGGER.debug("%s '%s' on %s", self._tts_service, message, self._media_player)
data = {
ATTR_MESSAGE: message,
ATTR_ENTITY_ID: self._media_player,
}
if self._language:
data[ATTR_LANGUAGE] = self._language
await self.hass.services.async_call(
DOMAIN,
self._tts_service,
data,
)
|
import pytest
import contextlib
import os
asyncio = pytest.importorskip("asyncio")
httpx = pytest.importorskip("httpx")
import vcr # noqa: E402
class BaseDoRequest:
_client_class = None
def __init__(self, *args, **kwargs):
self._client = self._client_class(*args, **kwargs)
class DoSyncRequest(BaseDoRequest):
_client_class = httpx.Client
def __call__(self, *args, **kwargs):
return self._client.request(*args, timeout=60, **kwargs)
class DoAsyncRequest(BaseDoRequest):
_client_class = httpx.AsyncClient
@staticmethod
def run_in_loop(coroutine):
with contextlib.closing(asyncio.new_event_loop()) as loop:
asyncio.set_event_loop(loop)
task = loop.create_task(coroutine)
return loop.run_until_complete(task)
def __call__(self, *args, **kwargs):
async def _request():
async with self._client as c:
return await c.request(*args, **kwargs)
return DoAsyncRequest.run_in_loop(_request())
def pytest_generate_tests(metafunc):
if "do_request" in metafunc.fixturenames:
metafunc.parametrize("do_request", [DoAsyncRequest, DoSyncRequest])
if "scheme" in metafunc.fixturenames:
metafunc.parametrize("scheme", ["http", "https"])
@pytest.fixture
def yml(tmpdir, request):
return str(tmpdir.join(request.function.__name__ + ".yaml"))
def test_status(tmpdir, scheme, do_request):
url = scheme + "://mockbin.org/request"
with vcr.use_cassette(str(tmpdir.join("status.yaml"))):
response = do_request()("GET", url)
with vcr.use_cassette(str(tmpdir.join("status.yaml"))) as cassette:
cassette_response = do_request()("GET", url)
assert cassette_response.status_code == response.status_code
assert cassette.play_count == 1
def test_case_insensitive_headers(tmpdir, scheme, do_request):
url = scheme + "://mockbin.org/request"
with vcr.use_cassette(str(tmpdir.join("whatever.yaml"))):
do_request()("GET", url)
with vcr.use_cassette(str(tmpdir.join("whatever.yaml"))) as cassette:
cassette_response = do_request()("GET", url)
assert "Content-Type" in cassette_response.headers
assert "content-type" in cassette_response.headers
assert cassette.play_count == 1
def test_content(tmpdir, scheme, do_request):
url = scheme + "://httpbin.org"
with vcr.use_cassette(str(tmpdir.join("cointent.yaml"))):
response = do_request()("GET", url)
with vcr.use_cassette(str(tmpdir.join("cointent.yaml"))) as cassette:
cassette_response = do_request()("GET", url)
assert cassette_response.content == response.content
assert cassette.play_count == 1
def test_json(tmpdir, scheme, do_request):
url = scheme + "://httpbin.org/get"
headers = {"Content-Type": "application/json"}
with vcr.use_cassette(str(tmpdir.join("json.yaml"))):
response = do_request(headers=headers)("GET", url)
with vcr.use_cassette(str(tmpdir.join("json.yaml"))) as cassette:
cassette_response = do_request(headers=headers)("GET", url)
assert cassette_response.json() == response.json()
assert cassette.play_count == 1
def test_params_same_url_distinct_params(tmpdir, scheme, do_request):
url = scheme + "://httpbin.org/get"
headers = {"Content-Type": "application/json"}
params = {"a": 1, "b": False, "c": "c"}
with vcr.use_cassette(str(tmpdir.join("get.yaml"))) as cassette:
response = do_request()("GET", url, params=params, headers=headers)
with vcr.use_cassette(str(tmpdir.join("get.yaml"))) as cassette:
cassette_response = do_request()("GET", url, params=params, headers=headers)
assert cassette_response.request.url == response.request.url
assert cassette_response.json() == response.json()
assert cassette.play_count == 1
params = {"other": "params"}
with vcr.use_cassette(str(tmpdir.join("get.yaml"))) as cassette:
with pytest.raises(vcr.errors.CannotOverwriteExistingCassetteException):
do_request()("GET", url, params=params, headers=headers)
def test_redirect(tmpdir, do_request, yml):
url = "https://mockbin.org/redirect/303/2"
response = do_request()("GET", url)
with vcr.use_cassette(yml):
response = do_request()("GET", url)
with vcr.use_cassette(yml) as cassette:
cassette_response = do_request()("GET", url)
assert cassette_response.status_code == response.status_code
assert len(cassette_response.history) == len(response.history)
assert len(cassette) == 3
assert cassette.play_count == 3
# Assert that the real response and the cassette response have a similar
# looking request_info.
assert cassette_response.request.url == response.request.url
assert cassette_response.request.method == response.request.method
assert {k: v for k, v in cassette_response.request.headers.items()} == {
k: v for k, v in response.request.headers.items()
}
def test_work_with_gzipped_data(tmpdir, do_request, yml):
with vcr.use_cassette(yml):
do_request()("GET", "https://httpbin.org/gzip")
with vcr.use_cassette(yml) as cassette:
cassette_response = do_request()("GET", "https://httpbin.org/gzip")
assert "gzip" in cassette_response.json()["headers"]["Accept-Encoding"]
assert cassette_response.read()
assert cassette.play_count == 1
@pytest.mark.parametrize("url", ["https://github.com/kevin1024/vcrpy/issues/" + str(i) for i in range(3, 6)])
def test_simple_fetching(tmpdir, do_request, yml, url):
with vcr.use_cassette(yml):
do_request()("GET", url)
with vcr.use_cassette(yml) as cassette:
cassette_response = do_request()("GET", url)
assert str(cassette_response.request.url) == url
assert cassette.play_count == 1
def test_behind_proxy(do_request):
# This is recorded because otherwise we should have a live proxy somewhere.
yml = (
os.path.dirname(os.path.realpath(__file__)) + "/cassettes/" + "test_httpx_test_test_behind_proxy.yml"
)
url = "https://httpbin.org/headers"
proxy = "http://localhost:8080"
proxies = {"http": proxy, "https": proxy}
with vcr.use_cassette(yml):
response = do_request(proxies=proxies, verify=False)("GET", url)
with vcr.use_cassette(yml) as cassette:
cassette_response = do_request(proxies=proxies, verify=False)("GET", url)
assert str(cassette_response.request.url) == url
assert cassette.play_count == 1
assert cassette_response.headers["Via"] == "my_own_proxy", str(cassette_response.headers)
assert cassette_response.request.url == response.request.url
def test_cookies(tmpdir, scheme, do_request):
def client_cookies(client):
return [c for c in client._client.cookies]
def response_cookies(response):
return [c for c in response.cookies]
client = do_request()
assert client_cookies(client) == []
url = scheme + "://httpbin.org"
testfile = str(tmpdir.join("cookies.yml"))
with vcr.use_cassette(testfile):
r1 = client("GET", url + "/cookies/set?k1=v1&k2=v2")
assert response_cookies(r1.history[0]) == ["k1", "k2"]
assert response_cookies(r1) == []
r2 = client("GET", url + "/cookies")
assert len(r2.json()["cookies"]) == 2
assert client_cookies(client) == ["k1", "k2"]
new_client = do_request()
assert client_cookies(new_client) == []
with vcr.use_cassette(testfile) as cassette:
cassette_response = new_client("GET", url + "/cookies/set?k1=v1&k2=v2")
assert response_cookies(cassette_response.history[0]) == ["k1", "k2"]
assert response_cookies(cassette_response) == []
assert cassette.play_count == 2
assert client_cookies(new_client) == ["k1", "k2"]
def test_relative_redirects(tmpdir, scheme, do_request):
url = scheme + "://mockbin.com/redirect/301?to=/redirect/301?to=/request"
testfile = str(tmpdir.join("relative_redirects.yml"))
with vcr.use_cassette(testfile):
response = do_request()("GET", url)
assert len(response.history) == 2, response
assert response.json()["url"].endswith("request")
with vcr.use_cassette(testfile) as cassette:
response = do_request()("GET", url)
assert len(response.history) == 2
assert response.json()["url"].endswith("request")
assert cassette.play_count == 3
def test_redirect_wo_allow_redirects(do_request, yml):
url = "https://mockbin.org/redirect/308/5"
with vcr.use_cassette(yml):
response = do_request()("GET", url, allow_redirects=False)
assert str(response.url).endswith("308/5")
assert response.status_code == 308
with vcr.use_cassette(yml) as cassette:
response = do_request()("GET", url, allow_redirects=False)
assert str(response.url).endswith("308/5")
assert response.status_code == 308
assert cassette.play_count == 1
|
import enum
import logging
import warnings
from nikola import DEBUG
__all__ = (
"get_logger",
"LOGGER",
)
# Handlers/formatters
class ApplicationWarning(Exception):
"""An application warning, raised in strict mode."""
pass
class StrictModeExceptionHandler(logging.StreamHandler):
"""A logging handler that raises an exception on warnings."""
def emit(self, record: logging.LogRecord) -> None:
"""Emit a logging record."""
if record.levelno >= logging.WARNING:
raise ApplicationWarning(self.format(record))
class ColorfulFormatter(logging.Formatter):
"""Stream handler with colors."""
_colorful = False
def format(self, record: logging.LogRecord) -> str:
"""Format a message and add colors to it."""
message = super().format(record)
return self.wrap_in_color(record).format(message)
def wrap_in_color(self, record: logging.LogRecord) -> str:
"""Return the colorized string for this record."""
if not self._colorful:
return "{}"
if record.levelno >= logging.ERROR:
return "\033[1;31m{}\033[0m"
elif record.levelno >= logging.WARNING:
return "\033[1;33m{}\033[0m"
elif record.levelno >= logging.INFO:
return "\033[1m{}\033[0m"
return "\033[37m{}\033[0m"
# Initial configuration
class LoggingMode(enum.Enum):
"""Logging mode options."""
NORMAL = 0
STRICT = 1
QUIET = 2
def configure_logging(logging_mode: LoggingMode = LoggingMode.NORMAL) -> None:
"""Configure logging for Nikola.
This method can be called multiple times, previous configuration will be overridden.
"""
if DEBUG:
logging.root.level = logging.DEBUG
else:
logging.root.level = logging.INFO
if logging_mode == LoggingMode.QUIET:
logging.root.handlers = []
return
handler = logging.StreamHandler()
handler.setFormatter(
ColorfulFormatter(
fmt="[%(asctime)s] %(levelname)s: %(name)s: %(message)s",
datefmt="%Y-%m-%d %H:%M:%S",
)
)
handlers = [handler]
if logging_mode == LoggingMode.STRICT:
handlers.append(StrictModeExceptionHandler())
logging.root.handlers = handlers
configure_logging()
# For compatibility with old code written with Logbook in mind
# TODO remove in v9
def patch_notice_level(logger: logging.Logger) -> logging.Logger:
"""Patch logger to issue WARNINGs with logger.notice."""
logger.notice = logger.warning
return logger
# User-facing loggers
def get_logger(name: str, handlers=None) -> logging.Logger:
"""Get a logger with handlers attached."""
logger = logging.getLogger(name)
if handlers is not None:
for h in handlers:
logger.addHandler(h)
return patch_notice_level(logger)
LOGGER = get_logger("Nikola")
# Push warnings to logging
def showwarning(message, category, filename, lineno, file=None, line=None):
"""Show a warning (from the warnings module) to the user."""
try:
n = category.__name__
except AttributeError:
n = str(category)
get_logger(n).warning("{0}:{1}: {2}".format(filename, lineno, message))
warnings.showwarning = showwarning
|
import errno
import os
import select
import socket
import socketserver
import ssl
import sys
import wsgiref.simple_server
from urllib.parse import unquote
from radicale import Application, config
from radicale.log import logger
if hasattr(socket, "EAI_ADDRFAMILY"):
COMPAT_EAI_ADDRFAMILY = socket.EAI_ADDRFAMILY
elif hasattr(socket, "EAI_NONAME"):
# Windows and BSD don't have a special error code for this
COMPAT_EAI_ADDRFAMILY = socket.EAI_NONAME
if hasattr(socket, "EAI_NODATA"):
COMPAT_EAI_NODATA = socket.EAI_NODATA
elif hasattr(socket, "EAI_NONAME"):
# Windows and BSD don't have a special error code for this
COMPAT_EAI_NODATA = socket.EAI_NONAME
if hasattr(socket, "IPPROTO_IPV6"):
COMPAT_IPPROTO_IPV6 = socket.IPPROTO_IPV6
elif os.name == "nt":
# Workaround: https://bugs.python.org/issue29515
COMPAT_IPPROTO_IPV6 = 41
def format_address(address):
return "[%s]:%d" % address[:2]
class ParallelHTTPServer(socketserver.ThreadingMixIn,
wsgiref.simple_server.WSGIServer):
# We wait for child threads ourself
block_on_close = False
daemon_threads = True
def __init__(self, configuration, family, address, RequestHandlerClass):
self.configuration = configuration
self.address_family = family
super().__init__(address, RequestHandlerClass)
self.client_sockets = set()
def server_bind(self):
if self.address_family == socket.AF_INET6:
# Only allow IPv6 connections to the IPv6 socket
self.socket.setsockopt(COMPAT_IPPROTO_IPV6, socket.IPV6_V6ONLY, 1)
super().server_bind()
def get_request(self):
# Set timeout for client
request, client_address = super().get_request()
timeout = self.configuration.get("server", "timeout")
if timeout:
request.settimeout(timeout)
client_socket, client_socket_out = socket.socketpair()
self.client_sockets.add(client_socket_out)
return request, (*client_address, client_socket)
def finish_request_locked(self, request, client_address):
return super().finish_request(request, client_address)
def finish_request(self, request, client_address):
*client_address, client_socket = client_address
client_address = tuple(client_address)
try:
return self.finish_request_locked(request, client_address)
finally:
client_socket.close()
def handle_error(self, request, client_address):
if issubclass(sys.exc_info()[0], socket.timeout):
logger.info("Client timed out", exc_info=True)
else:
logger.error("An exception occurred during request: %s",
sys.exc_info()[1], exc_info=True)
class ParallelHTTPSServer(ParallelHTTPServer):
def server_bind(self):
super().server_bind()
# Wrap the TCP socket in an SSL socket
certfile = self.configuration.get("server", "certificate")
keyfile = self.configuration.get("server", "key")
cafile = self.configuration.get("server", "certificate_authority")
# Test if the files can be read
for name, filename in [("certificate", certfile), ("key", keyfile),
("certificate_authority", cafile)]:
type_name = config.DEFAULT_CONFIG_SCHEMA["server"][name][
"type"].__name__
source = self.configuration.get_source("server", name)
if name == "certificate_authority" and not filename:
continue
try:
open(filename, "r").close()
except OSError as e:
raise RuntimeError(
"Invalid %s value for option %r in section %r in %s: %r "
"(%s)" % (type_name, name, "server", source, filename,
e)) from e
context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(certfile=certfile, keyfile=keyfile)
if cafile:
context.load_verify_locations(cafile=cafile)
context.verify_mode = ssl.CERT_REQUIRED
self.socket = context.wrap_socket(
self.socket, server_side=True, do_handshake_on_connect=False)
def finish_request_locked(self, request, client_address):
try:
try:
request.do_handshake()
except socket.timeout:
raise
except Exception as e:
raise RuntimeError("SSL handshake failed: %s" % e) from e
except Exception:
try:
self.handle_error(request, client_address)
finally:
self.shutdown_request(request)
return
return super().finish_request_locked(request, client_address)
class ServerHandler(wsgiref.simple_server.ServerHandler):
# Don't pollute WSGI environ with OS environment
os_environ = {}
def log_exception(self, exc_info):
logger.error("An exception occurred during request: %s",
exc_info[1], exc_info=exc_info)
class RequestHandler(wsgiref.simple_server.WSGIRequestHandler):
"""HTTP requests handler."""
def log_request(self, code="-", size="-"):
pass # Disable request logging.
def log_error(self, format_, *args):
logger.error("An error occurred during request: %s", format_ % args)
def get_environ(self):
env = super().get_environ()
if hasattr(self.connection, "getpeercert"):
# The certificate can be evaluated by the auth module
env["REMOTE_CERTIFICATE"] = self.connection.getpeercert()
# Parent class only tries latin1 encoding
env["PATH_INFO"] = unquote(self.path.split("?", 1)[0])
return env
def handle(self):
"""Copy of WSGIRequestHandler.handle with different ServerHandler"""
self.raw_requestline = self.rfile.readline(65537)
if len(self.raw_requestline) > 65536:
self.requestline = ""
self.request_version = ""
self.command = ""
self.send_error(414)
return
if not self.parse_request():
return
handler = ServerHandler(
self.rfile, self.wfile, self.get_stderr(), self.get_environ()
)
handler.request_handler = self
handler.run(self.server.get_app())
def serve(configuration, shutdown_socket=None):
"""Serve radicale from configuration.
`shutdown_socket` can be used to gracefully shutdown the server.
The socket can be created with `socket.socketpair()`, when the other socket
gets closed the server stops accepting new requests by clients and the
function returns after all active requests are finished.
"""
logger.info("Starting Radicale")
# Copy configuration before modifying
configuration = configuration.copy()
configuration.update({"server": {"_internal_server": "True"}}, "server",
privileged=True)
use_ssl = configuration.get("server", "ssl")
server_class = ParallelHTTPSServer if use_ssl else ParallelHTTPServer
application = Application(configuration)
servers = {}
try:
for address in configuration.get("server", "hosts"):
# Try to bind sockets for IPv4 and IPv6
possible_families = (socket.AF_INET, socket.AF_INET6)
bind_ok = False
for i, family in enumerate(possible_families):
is_last = i == len(possible_families) - 1
try:
server = server_class(configuration, family, address,
RequestHandler)
except OSError as e:
# Ignore unsupported families (only one must work)
if ((bind_ok or not is_last) and (
isinstance(e, socket.gaierror) and (
# Hostname does not exist or doesn't have
# address for address family
# macOS: IPv6 address for INET address family
e.errno == socket.EAI_NONAME or
# Address not for address family
e.errno == COMPAT_EAI_ADDRFAMILY or
e.errno == COMPAT_EAI_NODATA) or
# Workaround for PyPy
str(e) == "address family mismatched" or
# Address family not available (e.g. IPv6 disabled)
# macOS: IPv4 address for INET6 address family with
# IPV6_V6ONLY set
e.errno == errno.EADDRNOTAVAIL or
# Address family not supported
e.errno == errno.EAFNOSUPPORT or
# Protocol not supported
e.errno == errno.EPROTONOSUPPORT)):
continue
raise RuntimeError("Failed to start server %r: %s" % (
format_address(address), e)) from e
servers[server.socket] = server
bind_ok = True
server.set_app(application)
logger.info("Listening on %r%s",
format_address(server.server_address),
" with SSL" if use_ssl else "")
if not servers:
raise RuntimeError("No servers started")
# Mainloop
select_timeout = None
if os.name == "nt":
# Fallback to busy waiting. (select(...) blocks SIGINT on Windows.)
select_timeout = 1.0
max_connections = configuration.get("server", "max_connections")
logger.info("Radicale server ready")
while True:
rlist = []
# Wait for finished clients
for server in servers.values():
rlist.extend(server.client_sockets)
# Accept new connections if max_connections is not reached
if max_connections <= 0 or len(rlist) < max_connections:
rlist.extend(servers)
# Use socket to get notified of program shutdown
if shutdown_socket is not None:
rlist.append(shutdown_socket)
rlist, _, _ = select.select(rlist, [], [], select_timeout)
rlist = set(rlist)
if shutdown_socket in rlist:
logger.info("Stopping Radicale")
break
for server in servers.values():
finished_sockets = server.client_sockets.intersection(rlist)
for s in finished_sockets:
s.close()
server.client_sockets.remove(s)
rlist.remove(s)
if finished_sockets:
server.service_actions()
if rlist:
server = servers.get(rlist.pop())
if server:
server.handle_request()
finally:
# Wait for clients to finish and close servers
for server in servers.values():
for s in server.client_sockets:
s.recv(1)
s.close()
server.server_close()
|
import pytest
import yarl
from homeassistant.components.demo.tts import DemoProvider
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
DOMAIN as DOMAIN_MP,
MEDIA_TYPE_MUSIC,
SERVICE_PLAY_MEDIA,
)
import homeassistant.components.tts as tts
from homeassistant.components.tts import _get_cache_files
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import HTTP_NOT_FOUND
from homeassistant.setup import async_setup_component
from tests.async_mock import PropertyMock, patch
from tests.common import assert_setup_component, async_mock_service
def relative_url(url):
"""Convert an absolute url to a relative one."""
return str(yarl.URL(url).relative())
@pytest.fixture
def demo_provider():
"""Demo TTS provider."""
return DemoProvider("en")
@pytest.fixture(autouse=True)
def mock_get_cache_files():
"""Mock the list TTS cache function."""
with patch(
"homeassistant.components.tts._get_cache_files", return_value={}
) as mock_cache_files:
yield mock_cache_files
@pytest.fixture(autouse=True)
def mock_init_cache_dir():
"""Mock the TTS cache dir in memory."""
with patch(
"homeassistant.components.tts._init_tts_cache_dir",
side_effect=lambda hass, cache_dir: hass.config.path(cache_dir),
) as mock_cache_dir:
yield mock_cache_dir
@pytest.fixture
def empty_cache_dir(tmp_path, mock_init_cache_dir, mock_get_cache_files, request):
"""Mock the TTS cache dir with empty dir."""
mock_init_cache_dir.side_effect = None
mock_init_cache_dir.return_value = str(tmp_path)
# Restore original get cache files behavior, we're working with a real dir.
mock_get_cache_files.side_effect = _get_cache_files
yield tmp_path
if request.node.rep_call.passed:
return
# Print contents of dir if failed
print("Content of dir for", request.node.nodeid)
for fil in tmp_path.iterdir():
print(fil.relative_to(tmp_path))
# To show the log.
assert False
@pytest.fixture()
def mutagen_mock():
"""Mock writing tags."""
with patch(
"homeassistant.components.tts.SpeechManager.write_tags",
side_effect=lambda *args: args[1],
):
yield
@pytest.fixture(autouse=True)
async def internal_url_mock(hass):
"""Mock internal URL of the instance."""
await async_process_ha_core_config(
hass,
{"internal_url": "http://example.local:8123"},
)
async def test_setup_component_demo(hass):
"""Set up the demo platform with defaults."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
assert hass.services.has_service(tts.DOMAIN, "demo_say")
assert hass.services.has_service(tts.DOMAIN, "clear_cache")
async def test_setup_component_demo_no_access_cache_folder(hass, mock_init_cache_dir):
"""Set up the demo platform with defaults."""
config = {tts.DOMAIN: {"platform": "demo"}}
mock_init_cache_dir.side_effect = OSError(2, "No access")
assert not await async_setup_component(hass, tts.DOMAIN, config)
assert not hass.services.has_service(tts.DOMAIN, "demo_say")
assert not hass.services.has_service(tts.DOMAIN, "clear_cache")
async def test_setup_component_and_test_service(hass, empty_cache_dir):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_config_language(
hass, empty_cache_dir
):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "language": "de"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_config_language_special(
hass, empty_cache_dir
):
"""Set up the demo platform and call service with extend language."""
import homeassistant.components.demo.tts as demo_tts
demo_tts.SUPPORT_LANGUAGES.append("en_US")
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "language": "en_US"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en-us_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en-us_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_wrong_conf_language(hass):
"""Set up the demo platform and call service with wrong config."""
config = {tts.DOMAIN: {"platform": "demo", "language": "ru"}}
with assert_setup_component(0, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
async def test_setup_component_and_test_service_with_service_language(
hass, empty_cache_dir
):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3"
).is_file()
async def test_setup_component_test_service_with_wrong_service_language(
hass, empty_cache_dir
):
"""Set up the demo platform and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "lang",
},
blocking=True,
)
assert len(calls) == 0
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_lang_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_service_options(
hass, empty_cache_dir
):
"""Set up the demo platform and call service with options."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
tts.ATTR_OPTIONS: {"voice": "alex", "age": 5},
},
blocking=True,
)
opt_hash = tts._hash_options({"voice": "alex", "age": 5})
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== f"http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir
/ f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
).is_file()
async def test_setup_component_and_test_with_service_options_def(hass, empty_cache_dir):
"""Set up the demo platform and call service with default options."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN), patch(
"homeassistant.components.demo.tts.DemoProvider.default_options",
new_callable=PropertyMock(return_value={"voice": "alex"}),
):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
},
blocking=True,
)
opt_hash = tts._hash_options({"voice": "alex"})
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== f"http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
)
await hass.async_block_till_done()
assert (
empty_cache_dir
/ f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_service_options_wrong(
hass, empty_cache_dir
):
"""Set up the demo platform and call service with wrong options."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_LANGUAGE: "de",
tts.ATTR_OPTIONS: {"speed": 1},
},
blocking=True,
)
opt_hash = tts._hash_options({"speed": 1})
assert len(calls) == 0
await hass.async_block_till_done()
assert not (
empty_cache_dir
/ f"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_{opt_hash}_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_base_url_set(hass):
"""Set up the demo platform with ``base_url`` set and call service."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "base_url": "http://fnord"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert calls[0].data[ATTR_MEDIA_CONTENT_TYPE] == MEDIA_TYPE_MUSIC
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID] == "http://fnord"
"/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491"
"_en_-_demo.mp3"
)
async def test_setup_component_and_test_service_clear_cache(hass, empty_cache_dir):
"""Set up the demo platform and call service clear cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
# To make sure the file is persisted
await hass.async_block_till_done()
assert len(calls) == 1
await hass.async_block_till_done()
assert (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
await hass.services.async_call(
tts.DOMAIN, tts.SERVICE_CLEAR_CACHE, {}, blocking=True
)
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_and_test_service_with_receive_voice(
hass, demo_provider, hass_client
):
"""Set up the demo platform and call service and receive voice."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
req = await client.get(relative_url(calls[0].data[ATTR_MEDIA_CONTENT_ID]))
_, demo_data = demo_provider.get_tts_audio("bla", "en")
demo_data = tts.SpeechManager.write_tags(
"42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3",
demo_data,
demo_provider,
"There is someone at the door.",
"en",
None,
)
assert req.status == 200
assert await req.read() == demo_data
async def test_setup_component_and_test_service_with_receive_voice_german(
hass, demo_provider, hass_client
):
"""Set up the demo platform and call service and receive voice."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "language": "de"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
req = await client.get(relative_url(calls[0].data[ATTR_MEDIA_CONTENT_ID]))
_, demo_data = demo_provider.get_tts_audio("bla", "de")
demo_data = tts.SpeechManager.write_tags(
"42f18378fd4393d18c8dd11d03fa9563c1e54491_de_-_demo.mp3",
demo_data,
demo_provider,
"There is someone at the door.",
"de",
None,
)
assert req.status == 200
assert await req.read() == demo_data
async def test_setup_component_and_web_view_wrong_file(hass, hass_client):
"""Set up the demo platform and receive wrong file from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
req = await client.get(url)
assert req.status == HTTP_NOT_FOUND
async def test_setup_component_and_web_view_wrong_filename(hass, hass_client):
"""Set up the demo platform and receive wrong filename from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_proxy/265944dsk32c1b2a621be5930510bb2cd_en_-_demo.mp3"
req = await client.get(url)
assert req.status == HTTP_NOT_FOUND
async def test_setup_component_test_without_cache(hass, empty_cache_dir):
"""Set up demo platform without cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "cache": False}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_test_with_cache_call_service_without_cache(
hass, empty_cache_dir
):
"""Set up demo platform with cache and call service without cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
config = {tts.DOMAIN: {"platform": "demo", "cache": True}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
tts.ATTR_CACHE: False,
},
blocking=True,
)
assert len(calls) == 1
await hass.async_block_till_done()
assert not (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
).is_file()
async def test_setup_component_test_with_cache_dir(
hass, empty_cache_dir, demo_provider
):
"""Set up demo platform with cache and call service without cache."""
calls = async_mock_service(hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
_, demo_data = demo_provider.get_tts_audio("bla", "en")
cache_file = (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
with open(cache_file, "wb") as voice_file:
voice_file.write(demo_data)
config = {tts.DOMAIN: {"platform": "demo", "cache": True}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
with patch(
"homeassistant.components.demo.tts.DemoProvider.get_tts_audio",
return_value=(None, None),
):
await hass.services.async_call(
tts.DOMAIN,
"demo_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "There is someone at the door.",
},
blocking=True,
)
assert len(calls) == 1
assert (
calls[0].data[ATTR_MEDIA_CONTENT_ID]
== "http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
async def test_setup_component_test_with_error_on_get_tts(hass):
"""Set up demo platform with wrong get_tts_audio."""
config = {tts.DOMAIN: {"platform": "demo"}}
with assert_setup_component(1, tts.DOMAIN), patch(
"homeassistant.components.demo.tts.DemoProvider.get_tts_audio",
return_value=(None, None),
):
assert await async_setup_component(hass, tts.DOMAIN, config)
async def test_setup_component_load_cache_retrieve_without_mem_cache(
hass, demo_provider, empty_cache_dir, hass_client
):
"""Set up component and load cache and get without mem cache."""
_, demo_data = demo_provider.get_tts_audio("bla", "en")
cache_file = (
empty_cache_dir / "42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
with open(cache_file, "wb") as voice_file:
voice_file.write(demo_data)
config = {tts.DOMAIN: {"platform": "demo", "cache": True}}
with assert_setup_component(1, tts.DOMAIN):
assert await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
req = await client.get(url)
assert req.status == 200
assert await req.read() == demo_data
async def test_setup_component_and_web_get_url(hass, hass_client):
"""Set up the demo platform and receive file from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_get_url"
data = {"platform": "demo", "message": "There is someone at the door."}
req = await client.post(url, json=data)
assert req.status == 200
response = await req.json()
assert response.get("url") == (
"http://example.local:8123/api/tts_proxy/42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.mp3"
)
async def test_setup_component_and_web_get_url_bad_config(hass, hass_client):
"""Set up the demo platform and receive wrong file from web."""
config = {tts.DOMAIN: {"platform": "demo"}}
await async_setup_component(hass, tts.DOMAIN, config)
client = await hass_client()
url = "/api/tts_get_url"
data = {"message": "There is someone at the door."}
req = await client.post(url, json=data)
assert req.status == 400
async def test_tags_with_wave(hass, demo_provider):
"""Set up the demo platform and call service and receive voice."""
# below data represents an empty wav file
demo_data = bytes.fromhex(
"52 49 46 46 24 00 00 00 57 41 56 45 66 6d 74 20 10 00 00 00 01 00 02 00"
+ "22 56 00 00 88 58 01 00 04 00 10 00 64 61 74 61 00 00 00 00"
)
tagged_data = tts.SpeechManager.write_tags(
"42f18378fd4393d18c8dd11d03fa9563c1e54491_en_-_demo.wav",
demo_data,
demo_provider,
"AI person is in front of your door.",
"en",
None,
)
assert tagged_data != demo_data
|
from django.contrib.sites.models import Site
from django.test import TestCase
from tagging.models import Tag
from zinnia.managers import PUBLISHED
from zinnia.managers import entries_published
from zinnia.managers import tags_published
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.models.entry import Entry
from zinnia.signals import disconnect_entry_signals
from zinnia.tests.utils import datetime
from zinnia.tests.utils import skip_if_custom_user
@skip_if_custom_user
class ManagersTestCase(TestCase):
def setUp(self):
disconnect_entry_signals()
self.sites = [
Site.objects.get_current(),
Site.objects.create(domain='http://domain.com',
name='Domain.com')]
self.authors = [
Author.objects.create_user(username='webmaster',
email='[email protected]'),
Author.objects.create_user(username='contributor',
email='[email protected]')]
self.categories = [
Category.objects.create(title='Category 1',
slug='category-1'),
Category.objects.create(title='Category 2',
slug='category-2')]
params = {'title': 'My entry 1', 'content': 'My content 1',
'tags': 'zinnia, test', 'slug': 'my-entry-1',
'status': PUBLISHED}
self.entry_1 = Entry.objects.create(**params)
self.entry_1.authors.add(self.authors[0])
self.entry_1.categories.add(*self.categories)
self.entry_1.sites.add(*self.sites)
params = {'title': 'My entry 2', 'content': 'My content 2',
'tags': 'zinnia, test', 'slug': 'my-entry-2'}
self.entry_2 = Entry.objects.create(**params)
self.entry_2.authors.add(*self.authors)
self.entry_2.categories.add(self.categories[0])
self.entry_2.sites.add(self.sites[0])
def test_tags_published(self):
self.assertEqual(tags_published().count(), Tag.objects.count())
Tag.objects.create(name='out')
self.assertNotEqual(tags_published().count(), Tag.objects.count())
def test_author_published_manager_get_query_set(self):
self.assertEqual(Author.published.count(), 1)
self.entry_2.status = PUBLISHED
self.entry_2.save()
self.assertEqual(Author.published.count(), 2)
self.entry_2.sites.remove(self.sites[0])
self.entry_2.sites.add(self.sites[1])
self.assertEqual(Author.published.count(), 1)
def test_category_published_manager_get_query_set(self):
category = Category.objects.create(
title='Third Category', slug='third-category')
self.assertEqual(Category.published.count(), 2)
self.entry_2.categories.add(category)
self.entry_2.status = PUBLISHED
self.entry_2.save()
self.assertEqual(Category.published.count(), 3)
def test_entries_published(self):
self.assertEqual(entries_published(Entry.objects.all()).count(), 1)
self.entry_2.status = PUBLISHED
self.entry_2.save()
self.assertEqual(entries_published(Entry.objects.all()).count(), 2)
self.entry_1.sites.clear()
self.assertEqual(entries_published(Entry.objects.all()).count(), 1)
self.entry_1.sites.add(*self.sites)
self.entry_1.start_publication = datetime(2030, 1, 1)
self.entry_1.save()
self.assertEqual(entries_published(Entry.objects.all()).count(), 1)
self.entry_1.start_publication = datetime(2000, 1, 1)
self.entry_1.save()
self.assertEqual(entries_published(Entry.objects.all()).count(), 2)
self.entry_1.end_publication = datetime(2000, 1, 1)
self.entry_1.save()
self.assertEqual(entries_published(Entry.objects.all()).count(), 1)
self.entry_1.end_publication = datetime(2030, 1, 1)
self.entry_1.save()
self.assertEqual(entries_published(Entry.objects.all()).count(), 2)
def test_entry_published_manager_get_query_set(self):
self.assertEqual(Entry.published.count(), 1)
self.entry_2.status = PUBLISHED
self.entry_2.save()
self.assertEqual(Entry.published.count(), 2)
self.entry_1.sites.clear()
self.assertEqual(Entry.published.count(), 1)
self.entry_1.sites.add(*self.sites)
self.entry_1.start_publication = datetime(2030, 1, 1)
self.entry_1.save()
self.assertEqual(Entry.published.count(), 1)
self.entry_1.start_publication = datetime(2000, 1, 1)
self.entry_1.save()
self.assertEqual(Entry.published.count(), 2)
self.entry_1.end_publication = datetime(2000, 1, 1)
self.entry_1.save()
self.assertEqual(Entry.published.count(), 1)
self.entry_1.end_publication = datetime(2030, 1, 1)
self.entry_1.save()
self.assertEqual(Entry.published.count(), 2)
def test_entry_published_manager_on_site(self):
self.assertEqual(Entry.published.on_site().count(), 2)
self.entry_2.sites.clear()
self.entry_2.sites.add(self.sites[1])
self.assertEqual(Entry.published.on_site().count(), 1)
self.entry_1.sites.clear()
self.assertEqual(Entry.published.on_site().count(), 0)
def test_entry_published_manager_basic_search(self):
self.assertEqual(Entry.published.basic_search('My ').count(), 1)
self.entry_2.status = PUBLISHED
self.entry_2.save()
self.assertEqual(Entry.published.basic_search('My ').count(), 2)
self.assertEqual(Entry.published.basic_search('1').count(), 1)
self.assertEqual(Entry.published.basic_search('content 1').count(), 2)
def test_entry_published_manager_advanced_search(self):
category = Category.objects.create(
title='SimpleCategory', slug='simple')
self.entry_2.categories.add(category)
self.entry_2.tags = self.entry_2.tags + ', custom'
self.entry_2.status = PUBLISHED
self.entry_2.save()
self.assertEqual(
Entry.published.advanced_search('content').count(), 2)
search = Entry.published.advanced_search('content 1')
self.assertEqual(search.count(), 1)
self.assertEqual(search.all()[0], self.entry_1)
self.assertEqual(
Entry.published.advanced_search('content 1 or 2').count(), 2)
self.assertEqual(
Entry.published.advanced_search('content 1 and 2').count(), 0)
self.assertEqual(
Entry.published.advanced_search('content 1 2').count(), 0)
self.assertEqual(
Entry.published.advanced_search('"My content" 1 or 2').count(), 2)
self.assertEqual(
Entry.published.advanced_search('-"My content" 2').count(), 0)
search = Entry.published.advanced_search('content -1')
self.assertEqual(search.count(), 1)
self.assertEqual(search.all()[0], self.entry_2)
self.assertEqual(Entry.published.advanced_search(
'content category:SimpleCategory').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'content category:simple').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'content category:"Category 1"').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'content category:"category-1"').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'content category:"category-2"').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'content tag:zinnia').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'content tag:custom').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'content author:webmaster').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'content author:contributor').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'content author:webmaster tag:zinnia').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'content author:webmaster tag:custom').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'content 1 or 2 author:webmaster').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'content 1 or 2 author:webmaster').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'(author:webmaster content) my').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'(author:webmaster) or (author:contributor)').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'(author:webmaster) (author:contributor)').count(), 0)
self.assertEqual(Entry.published.advanced_search(
'(author:webmaster content) 1').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'(author:webmaster content) or 2').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'(author:contributor content) or 1').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'(author:contributor content) or 2').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'(author:webmaster or ("hello world")) and 2').count(), 1)
# Complex queries
self.assertEqual(Entry.published.advanced_search(
'(author:admin and "content 1") or author:webmaster').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'author:admin and ("content 1" or author:webmaster)').count(), 0)
self.assertEqual(Entry.published.advanced_search(
'author:admin and "content 1" or author:webmaster').count(), 0)
self.assertEqual(Entry.published.advanced_search(
'-(author:webmaster and "content 1")').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'-(-author:webmaster and "content 1")').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'category:"category -1" or author:"web master"').count(), 0)
self.assertEqual(Entry.published.advanced_search(
'category:"category-1" or author:"webmaster"').count(), 2)
# Wildcards
self.assertEqual(Entry.published.advanced_search(
'author:webm*').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'author:*bmas*').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'author:*master').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'author:*master category:*ory-2').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'author:*master or category:cate*').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'category:*ate*').count(), 2)
self.assertEqual(Entry.published.advanced_search(
'author:"webmast*"').count(), 0)
self.assertEqual(Entry.published.advanced_search(
'tag:"zinnia*"').count(), 0)
self.assertEqual(Entry.published.advanced_search(
'tag:*inni*').count(), 2)
def test_entry_published_manager_advanced_search_with_punctuation(self):
self.entry_2.content = 'How are you today ? Fine thank you ! OK.'
self.entry_2.status = PUBLISHED
self.entry_2.save()
self.assertEqual(Entry.published.advanced_search(
'today ?').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'today or ! or .').count(), 1)
self.assertEqual(Entry.published.advanced_search(
'"you today ?"').count(), 1)
def test_entry_published_manager_search(self):
self.entry_2.content = self.entry_2.content + ' * '
self.entry_2.status = PUBLISHED
self.entry_2.save()
# Be sure that basic_search does not return
# the same results of advanced_search
self.assertNotEqual(
Entry.published.basic_search('content 1').count(),
Entry.published.advanced_search('content 1').count())
# Now check the fallback with the '*' pattern
# which will fails advanced search
self.assertEqual(Entry.published.search('*').count(), 1)
|
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
class FPN(chainer.Chain):
"""An extractor class of Feature Pyramid Networks.
This class wraps a feature extractor and provides
multi-scale features.
Args:
base (Link): A base feature extractor.
It should have :meth:`forward` and :obj:`mean`.
:meth:`forward` should take a batch of images and return
feature maps of them. The size of the :math:`k+1`-th feature map
should be the half as that of the :math:`k`-th feature map.
n_base_output (int): The number of feature maps
that :obj:`base` returns.
scales (tuple of floats): The scales of feature maps.
"""
def __init__(self, base, n_base_output, scales):
super(FPN, self).__init__()
with self.init_scope():
self.base = base
self.inner = chainer.ChainList()
self.outer = chainer.ChainList()
init = {'initialW': initializers.GlorotNormal()}
for _ in range(n_base_output):
self.inner.append(L.Convolution2D(256, 1, **init))
self.outer.append(L.Convolution2D(256, 3, pad=1, **init))
self.scales = scales
@property
def mean(self):
return self.base.mean
def forward(self, x):
hs = list(self.base(x))
for i in reversed(range(len(hs))):
hs[i] = self.inner[i](hs[i])
if i + 1 < len(hs):
hs[i] += F.unpooling_2d(hs[i + 1], 2, cover_all=False)
for i in range(len(hs)):
hs[i] = self.outer[i](hs[i])
while len(hs) < len(self.scales):
hs.append(F.max_pooling_2d(hs[-1], 1, stride=2, cover_all=False))
return hs
|
import logging
from mastodon import Mastodon
from mastodon.Mastodon import MastodonAPIError, MastodonUnauthorizedError
import voluptuous as vol
from homeassistant.components.notify import PLATFORM_SCHEMA, BaseNotificationService
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_CLIENT_ID, CONF_CLIENT_SECRET
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_BASE_URL = "base_url"
DEFAULT_URL = "https://mastodon.social"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
vol.Optional(CONF_BASE_URL, default=DEFAULT_URL): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Mastodon notification service."""
client_id = config.get(CONF_CLIENT_ID)
client_secret = config.get(CONF_CLIENT_SECRET)
access_token = config.get(CONF_ACCESS_TOKEN)
base_url = config.get(CONF_BASE_URL)
try:
mastodon = Mastodon(
client_id=client_id,
client_secret=client_secret,
access_token=access_token,
api_base_url=base_url,
)
mastodon.account_verify_credentials()
except MastodonUnauthorizedError:
_LOGGER.warning("Authentication failed")
return None
return MastodonNotificationService(mastodon)
class MastodonNotificationService(BaseNotificationService):
"""Implement the notification service for Mastodon."""
def __init__(self, api):
"""Initialize the service."""
self._api = api
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
try:
self._api.toot(message)
except MastodonAPIError:
_LOGGER.error("Unable to send message")
|
from sqlalchemy import Column, Integer, String, Boolean, Index
from lemur.database import db
class Domain(db.Model):
__tablename__ = "domains"
__table_args__ = (
Index(
"ix_domains_name_gin",
"name",
postgresql_ops={"name": "gin_trgm_ops"},
postgresql_using="gin",
),
)
id = Column(Integer, primary_key=True)
name = Column(String(256), index=True)
sensitive = Column(Boolean, default=False)
def __repr__(self):
return "Domain(name={name})".format(name=self.name)
|
import asyncio
import json
import pytest
from homeassistant.components.minio import (
CONF_ACCESS_KEY,
CONF_HOST,
CONF_LISTEN,
CONF_LISTEN_BUCKET,
CONF_PORT,
CONF_SECRET_KEY,
CONF_SECURE,
DOMAIN,
QueueListener,
)
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
from tests.async_mock import MagicMock, call, patch
from tests.components.minio.common import TEST_EVENT
@pytest.fixture(name="minio_client")
def minio_client_fixture():
"""Patch Minio client."""
with patch("homeassistant.components.minio.minio_helper.Minio") as minio_mock:
minio_client_mock = minio_mock.return_value
yield minio_client_mock
@pytest.fixture(name="minio_client_event")
def minio_client_event_fixture():
"""Patch helper function for minio notification stream."""
with patch("homeassistant.components.minio.minio_helper.Minio") as minio_mock:
minio_client_mock = minio_mock.return_value
response_mock = MagicMock()
stream_mock = MagicMock()
stream_mock.__next__.side_effect = [
"",
"",
bytearray(json.dumps(TEST_EVENT), "utf-8"),
]
response_mock.stream.return_value = stream_mock
minio_client_mock._url_open.return_value = response_mock
yield minio_client_mock
async def test_minio_services(hass, caplog, minio_client):
"""Test Minio services."""
hass.config.allowlist_external_dirs = {"/test"}
await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "localhost",
CONF_PORT: "9000",
CONF_ACCESS_KEY: "abcdef",
CONF_SECRET_KEY: "0123456789",
CONF_SECURE: "true",
}
},
)
await hass.async_start()
await hass.async_block_till_done()
assert "Setup of domain minio took" in caplog.text
# Call services
await hass.services.async_call(
DOMAIN,
"put",
{"file_path": "/test/some_file", "key": "some_key", "bucket": "some_bucket"},
blocking=True,
)
assert minio_client.fput_object.call_args == call(
"some_bucket", "some_key", "/test/some_file"
)
minio_client.reset_mock()
await hass.services.async_call(
DOMAIN,
"get",
{"file_path": "/test/some_file", "key": "some_key", "bucket": "some_bucket"},
blocking=True,
)
assert minio_client.fget_object.call_args == call(
"some_bucket", "some_key", "/test/some_file"
)
minio_client.reset_mock()
await hass.services.async_call(
DOMAIN, "remove", {"key": "some_key", "bucket": "some_bucket"}, blocking=True
)
assert minio_client.remove_object.call_args == call("some_bucket", "some_key")
minio_client.reset_mock()
async def test_minio_listen(hass, caplog, minio_client_event):
"""Test minio listen on notifications."""
minio_client_event.presigned_get_object.return_value = "http://url"
events = []
@callback
def event_callback(event):
"""Handle event callbback."""
events.append(event)
hass.bus.async_listen("minio", event_callback)
await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "localhost",
CONF_PORT: "9000",
CONF_ACCESS_KEY: "abcdef",
CONF_SECRET_KEY: "0123456789",
CONF_SECURE: "true",
CONF_LISTEN: [{CONF_LISTEN_BUCKET: "test"}],
}
},
)
await hass.async_start()
await hass.async_block_till_done()
assert "Setup of domain minio took" in caplog.text
while not events:
await asyncio.sleep(0)
assert 1 == len(events)
event = events[0]
assert DOMAIN == event.event_type
assert "s3:ObjectCreated:Put" == event.data["event_name"]
assert "5jJkTAo.jpg" == event.data["file_name"]
assert "test" == event.data["bucket"]
assert "5jJkTAo.jpg" == event.data["key"]
assert "http://url" == event.data["presigned_url"]
assert 0 == len(event.data["metadata"])
async def test_queue_listener():
"""Tests QueueListener firing events on Home Assistant event bus."""
hass = MagicMock()
queue_listener = QueueListener(hass)
queue_listener.start()
queue_entry = {
"event_name": "s3:ObjectCreated:Put",
"bucket": "some_bucket",
"key": "some_dir/some_file.jpg",
"presigned_url": "http://host/url?signature=secret",
"metadata": {},
}
queue_listener.queue.put(queue_entry)
queue_listener.stop()
call_domain, call_event = hass.bus.fire.call_args[0]
expected_event = {
"event_name": "s3:ObjectCreated:Put",
"file_name": "some_file.jpg",
"bucket": "some_bucket",
"key": "some_dir/some_file.jpg",
"presigned_url": "http://host/url?signature=secret",
"metadata": {},
}
assert DOMAIN == call_domain
assert json.dumps(expected_event, sort_keys=True) == json.dumps(
call_event, sort_keys=True
)
|
import os
import shutil
import tempfile
import json
import re
import zipfile
import platform
from io import open
import six
from six.moves import configparser
try:
from stashutils.extensions import create_command
from libversion import VersionSpecifier
except ImportError:
create_command = None
VersionSpecifier = None
class WheelError(Exception):
"""Error related to a wheel."""
pass
def parse_wheel_name(filename):
"""
Parse the filename of a wheel and return the information as dict.
"""
if not filename.endswith(".whl"):
raise WheelError("PEP427 violation: wheels need to end with '.whl'")
else:
filename = filename[:-4]
splitted = filename.split("-")
distribution = splitted[0]
version = splitted[1]
if len(splitted) == 6:
build_tag = splitted[2]
python_tag = splitted[3]
abi_tag = splitted[4]
platform_tag = splitted[5]
elif len(splitted) == 5:
build_tag = None
python_tag = splitted[2]
abi_tag = splitted[3]
platform_tag = splitted[4]
else:
raise WheelError("PEP427 violation: invalid naming schema")
return {
"distribution": distribution,
"version": version,
"build_tag": build_tag,
"python_tag": python_tag,
"abi_tag": abi_tag,
"platform_tag": platform_tag,
}
def escape_filename_component(fragment):
"""
Escape a component of the filename as specified in PEP 427.
"""
return re.sub(r"[^\w\d.]+", "_", fragment, re.UNICODE)
def generate_filename(
distribution,
version,
build_tag=None,
python_tag=None,
abi_tag=None,
platform_tag=None,
):
"""
Generate a filename for the wheel and return it.
"""
if python_tag is None:
if six.PY3:
python_tag = "py3"
else:
python_tag = "py2"
if abi_tag is None:
abi_tag = "none"
if platform_tag is None:
platform_tag = "any"
return "{d}-{v}{b}-{py}-{a}-{p}.whl".format(
d=escape_filename_component(distribution),
v=escape_filename_component(version),
b=("-" + escape_filename_component(build_tag) if build_tag is not None else ""),
py=escape_filename_component(python_tag),
a=escape_filename_component(abi_tag),
p=escape_filename_component(platform_tag),
)
def wheel_is_compatible(filename):
"""
Return True if the wheel is compatible, False otherwise.
"""
data = parse_wheel_name(filename)
if ("py2.py3" in data["python_tag"]) or ("py3.py2" in data["python_tag"]):
# only here to skip elif/else
pass
elif six.PY3:
if not data["python_tag"].startswith("py3"):
return False
else:
if not data["python_tag"].startswith("py2"):
return False
if data["abi_tag"].lower() != "none":
return False
if data["platform_tag"].lower() != "any":
return False
return True
class BaseHandler(object):
"""
Baseclass for installation handlers.
"""
name = "<name not set>"
def __init__(self, wheel, verbose=False):
self.wheel = wheel
self.verbose = verbose
def copytree(self, packagepath, src, dest, remove=False):
"""
Copies a package directory tree.
:param packagepath: relative path of the (sub-)package, e.g. 'package/subpackage/'
:type packagepath: str
:param src: path to the actual source of the root package
:type src: str
:param dest: path to copy to
:type dest: str
:return: the path to which the directories have been copied.
:trype: str
"""
if self.verbose:
print("Copying {s} -> {d}".format(s=src, d=dest))
if os.path.isfile(src):
if os.path.isdir(dest):
dest = os.path.join(dest, os.path.basename(src))
if os.path.exists(dest) and remove:
os.remove(dest)
shutil.copy(src, dest)
return dest
else:
target = os.path.join(
dest,
# os.path.basename(os.path.normpath(src)),
packagepath,
)
if os.path.exists(target) and remove:
shutil.rmtree(target)
shutil.copytree(src, target)
return target
@property
def distinfo_name(self):
"""the name of the *.dist-info directory."""
data = parse_wheel_name(self.wheel.filename)
return "{pkg}-{v}.dist-info".format(
pkg=data["distribution"],
v=data["version"],
)
class TopLevelHandler(BaseHandler):
"""Handler for 'top_level.txt'"""
name = "top_level.txt installer"
def handle_install(self, src, dest):
tltxtp = os.path.join(src, self.distinfo_name, "top_level.txt")
files_installed = []
if not os.path.exists(tltxtp):
files = os.listdir(src)
fin = [file_name for file_name in files if file_name != self.distinfo_name]
print('No top_level.txt, try to fix this.', fin)
else:
with open(tltxtp, "r") as f:
fin = f.readlines()
for pkg_name in fin:
pure = pkg_name.replace("\r", "").replace("\n", "")
sp = os.path.join(src, pure)
if os.path.exists(sp):
p = self.copytree(pure, sp, dest, remove=True)
elif os.path.exists(sp + ".py"):
dp = os.path.join(dest, pure + ".py")
p = self.copytree(pure, sp + ".py", dp, remove=True)
else:
raise WheelError("top_level.txt entry '{e}' not found in toplevel directory!".format(e=pure))
files_installed.append(p)
return files_installed
class ConsoleScriptsHandler(BaseHandler):
"""Handler for 'console_scripts'."""
name = "console_scripts installer"
def handle_install(self, src, dest):
eptxtp = os.path.join(src, self.distinfo_name, "entry_points.txt")
if not os.path.exists(eptxtp):
if self.verbose:
print("No entry_points.txt found, skipping.")
return
parser = configparser.ConfigParser()
try:
parser.read(eptxtp)
except configparser.MissingSectionHeaderError:
# print message and return
if self.verbose:
print("No section headers found in entry_points.txt, passing.")
return
if not parser.has_section("console_scripts"):
if self.verbose:
print("No console_scripts definition found, skipping.")
return
if create_command is None:
if self.verbose:
print("Warning: could not import create_command(); skipping.")
return
files_installed = []
mdp = os.path.join(src, self.distinfo_name, "metadata.json")
if os.path.exists(mdp):
with open(mdp, "r") as fin:
desc = json.load(fin).get("summary", "???")
else:
desc = "???"
for command, definition in parser.items(section="console_scripts"):
# name, loc = definition.replace(" ", "").split("=")
modname, funcname = definition.split(":")
if not command.endswith(".py"):
command += ".py"
path = create_command(
command,
(
u"""'''%s'''
from %s import %s
if __name__ == "__main__":
%s()
""" % (desc,
modname,
funcname,
funcname)
).encode("utf-8")
)
files_installed.append(path)
return files_installed
class WheelInfoHandler(BaseHandler):
"""Handler for wheel informations."""
name = "WHEEL information checker"
supported_major_versions = [1]
supported_versions = ["1.0"]
def handle_install(self, src, dest):
wtxtp = os.path.join(src, self.distinfo_name, "WHEEL")
with open(wtxtp, "r") as fin:
for line in fin:
line = line.replace("\r", "").replace("\n", "")
ki = line.find(":")
key = line[:ki]
value = line[ki + 2:]
if key.lower() == "wheel-version":
major, minor = value.split(".")
major, minor = int(major), int(minor)
if major not in self.supported_major_versions:
raise WheelError("Wheel major version is incompatible!")
if value not in self.supported_versions:
print("WARNING: unsupported minor version: " + str(value))
self.wheel.version = (major, minor)
elif key.lower() == "generator":
if self.verbose:
print("Wheel generated by: " + value)
return []
class DependencyHandler(BaseHandler):
"""
Handler for the dependencies.
"""
name = "dependency handler"
def handle_install(self, src, dest):
metajsonp = os.path.join(src, self.distinfo_name, "metadata.json")
metadatap = os.path.join(src, self.distinfo_name, "METADATA")
if not os.path.exists(metajsonp):
if os.path.exists(metadatap):
if self.verbose:
print("Reading 'METADATA' file...")
dependencies = self.read_dependencies_from_METADATA(metadatap)
else:
if self.verbose:
print("Warning: could find neither 'metadata.json' nor `METADATA`, can not detect dependencies!")
return
else:
if self.verbose:
print("Reading 'metadata.json' file...")
with open(metajsonp, "r") as fin:
content = json.load(fin)
dependencies = []
for ds in content.get("run_requires", []):
ex = ds.get("extra", None)
dep = ds.get("requires", [])
if ex is not None:
if ex not in self.wheel.extras:
# extra not wanted
continue
else:
if self.verbose:
print("Adding dependencies for extra '{e}'...".format(e=ex))
dependencies += dep
else:
dependencies += dep
self.wheel.dependencies += dependencies
def read_dependencies_from_METADATA(self, p):
"""read dependencies from distinfo/METADATA"""
dependencies = []
with open(p, "r", encoding='utf-8') as fin:
for line in fin:
line = line.replace("\n", "")
if line.startswith("Requires-Dist: "):
t = line[len("Requires-Dist: "):]
if ";" in t:
es = t[t.find(";") + 1:].replace('"', "").replace("'", "")
t = t[:t.find(";")].strip()
if VersionSpecifier is None:
# libversion not found
print(
"Warning: could not import libversion.VersionSpecifier! Ignoring version and extra dependencies."
)
rq, v, extras = "<libversion not found>", "???", []
else:
rq, v, extras = VersionSpecifier.parse_requirement(es)
if rq == "python_version":
# handle python version dependencies
if not v.match(platform.python_version()):
# dependency NOT required
continue
elif rq == "extra":
# handle extra dependencies
matched = any([v.match(e) for e in self.wheel.extras])
if not matched:
# dependency NOT required
continue
else:
if self.verbose:
print("Adding dependencies for extras...")
else:
# unknown requirement for dependency
# warn user and register the dependency
print("Warning: unknown dependency requirement: '{}'".format(rq))
print("Warning: Adding dependency '{}', ignoring requirements for dependency.".format(t))
# do not do anything here- As long as we dont use 'continue', 'break', ... the dependency will be added.
dependencies.append(t)
return dependencies
# list of default handlers
DEFAULT_HANDLERS = [
WheelInfoHandler,
DependencyHandler,
TopLevelHandler,
ConsoleScriptsHandler,
]
class Wheel(object):
"""class for installing python wheels."""
def __init__(self, path, handlers=DEFAULT_HANDLERS, extras=[], verbose=False):
self.path = path
self.extras = extras
self.verbose = verbose
self.filename = os.path.basename(self.path)
self.handlers = [handler(self, self.verbose) for handler in handlers]
self.version = None # to be set by handler
self.dependencies = [] # to be set by handler
if not wheel_is_compatible(self.filename):
raise WheelError("Incompatible wheel: {p}!".format(p=self.filename))
def install(self, targetdir):
"""
Install the wheel into the target directory.
Return (files_installed, dependencies)
"""
if self.verbose:
print("Extracting wheel..")
tp = self.extract_into_temppath()
if self.verbose:
print("Extraction finished, running handlers...")
try:
files_installed = []
for handler in self.handlers:
if hasattr(handler, "handle_install"):
if self.verbose:
print("Running handler '{h}'...".format(h=getattr(handler, "name", "<unknown>")))
tfi = handler.handle_install(tp, targetdir)
if tfi is not None:
files_installed += tfi
finally:
if self.verbose:
print("Cleaning up...")
if os.path.exists(tp):
shutil.rmtree(tp)
return (files_installed, self.dependencies)
def extract_into_temppath(self):
"""
Extract the wheel into a temporary directory.
Return the path of the temporary directory.
"""
p = os.path.join(tempfile.gettempdir(), "wheel_tmp", self.filename)
if not os.path.exists(p):
os.makedirs(p)
with zipfile.ZipFile(self.path, mode="r") as zf:
zf.extractall(p)
return p
if __name__ == "__main__":
# test script
import argparse
import sys
parser = argparse.ArgumentParser(description="Wheel debug installer")
parser.add_argument("path", help="path to .whl", action="store")
parser.add_argument("-q", help="be less verbose", action="store_false", dest="verbose")
parser.add_argument("extras", action="store", nargs="*", help="extras to install")
ns = parser.parse_args()
print("Installing {} with extras {}...".format(ns.path, ns.extras))
fi, dep = Wheel(ns.path, verbose=ns.verbose, extras=ns.extras).install(os.path.expanduser("~/Documents/site-packages/"))
print("files installed: ")
print(fi)
print("dependencies:")
print(dep)
if len(dep) > 0:
print("WARNING: Dependencies were not installed.")
|
import logging
import unittest
from homeassistant.components import litejet
from tests.common import get_test_home_assistant
_LOGGER = logging.getLogger(__name__)
class TestLiteJet(unittest.TestCase):
"""Test the litejet component."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.hass.start()
self.hass.block_till_done()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_is_ignored_unspecified(self):
"""Ensure it is ignored when unspecified."""
self.hass.data["litejet_config"] = {}
assert not litejet.is_ignored(self.hass, "Test")
def test_is_ignored_empty(self):
"""Ensure it is ignored when empty."""
self.hass.data["litejet_config"] = {litejet.CONF_EXCLUDE_NAMES: []}
assert not litejet.is_ignored(self.hass, "Test")
def test_is_ignored_normal(self):
"""Test if usually ignored."""
self.hass.data["litejet_config"] = {
litejet.CONF_EXCLUDE_NAMES: ["Test", "Other One"]
}
assert litejet.is_ignored(self.hass, "Test")
assert not litejet.is_ignored(self.hass, "Other one")
assert not litejet.is_ignored(self.hass, "Other 0ne")
assert litejet.is_ignored(self.hass, "Other One There")
assert litejet.is_ignored(self.hass, "Other One")
|
import mock
import pytest
import paasta_tools.instance.kubernetes as pik
from paasta_tools import utils
def test_instance_types_integrity():
for it in pik.INSTANCE_TYPES:
assert it in utils.INSTANCE_TYPES
for it in pik.INSTANCE_TYPES_WITH_SET_STATE:
assert it in utils.INSTANCE_TYPES
def instance_status_kwargs():
return dict(
service="",
instance="",
instance_type="",
verbose=0,
include_smartstack=False,
include_envoy=False,
settings=mock.Mock(),
)
@mock.patch("paasta_tools.instance.kubernetes.cr_status", autospec=True)
@mock.patch("paasta_tools.instance.kubernetes.kubernetes_status", autospec=True)
def test_instance_status_invalid_instance_type(mock_kubernetes_status, mock_cr_status):
kwargs = instance_status_kwargs()
with pytest.raises(RuntimeError) as excinfo:
pik.instance_status(**kwargs)
assert "Unknown instance type" in str(excinfo.value)
assert len(mock_cr_status.mock_calls) == 0
assert len(mock_kubernetes_status.mock_calls) == 0
@mock.patch("paasta_tools.instance.kubernetes.cr_status", autospec=True)
@mock.patch("paasta_tools.instance.kubernetes.kubernetes_status", autospec=True)
def test_instance_status_kubernetes_only(mock_kubernetes_status, mock_cr_status):
kwargs = instance_status_kwargs()
kwargs.update(instance_type="kubernetes")
pik.instance_status(**kwargs)
assert len(mock_cr_status.mock_calls) == 0
assert len(mock_kubernetes_status.mock_calls) == 1
@mock.patch("paasta_tools.instance.kubernetes.cr_status", autospec=True)
@mock.patch("paasta_tools.instance.kubernetes.kubernetes_status", autospec=True)
def test_instance_status_cr_only(mock_kubernetes_status, mock_cr_status):
kwargs = instance_status_kwargs()
kwargs.update(instance_type="flink")
pik.instance_status(**kwargs)
assert len(mock_cr_status.mock_calls) == 1
assert len(mock_kubernetes_status.mock_calls) == 0
@mock.patch("paasta_tools.instance.kubernetes.cr_status", autospec=True)
@mock.patch("paasta_tools.instance.kubernetes.kubernetes_status", autospec=True)
def test_instance_status_cr_and_kubernetes(mock_kubernetes_status, mock_cr_status):
kwargs = instance_status_kwargs()
kwargs.update(instance_type="cassandracluster")
pik.instance_status(**kwargs)
assert len(mock_cr_status.mock_calls) == 1
assert len(mock_kubernetes_status.mock_calls) == 1
@mock.patch("paasta_tools.instance.kubernetes.job_status", autospec=True)
@mock.patch(
"paasta_tools.kubernetes_tools.replicasets_for_service_instance", autospec=True
)
@mock.patch("paasta_tools.kubernetes_tools.pods_for_service_instance", autospec=True)
@mock.patch("paasta_tools.kubernetes_tools.get_kubernetes_app_by_name", autospec=True)
@mock.patch(
"paasta_tools.instance.kubernetes.LONG_RUNNING_INSTANCE_TYPE_HANDLERS",
autospec=True,
)
def test_kubernetes_status(
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS,
mock_get_kubernetes_app_by_name,
mock_pods_for_service_instance,
mock_replicasets_for_service_instance,
mock_job_status,
):
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS["flink"] = mock.Mock()
mock_pods_for_service_instance.return_value = []
mock_replicasets_for_service_instance.return_value = []
status = pik.kubernetes_status(
service="",
instance="",
verbose=0,
include_smartstack=False,
include_envoy=False,
instance_type="flink",
settings=mock.Mock(),
)
assert "app_count" in status
assert "evicted_count" in status
assert "bounce_method" in status
assert "desired_state" in status
@mock.patch("paasta_tools.instance.kubernetes.job_status", autospec=True)
@mock.patch(
"paasta_tools.kubernetes_tools.load_service_namespace_config", autospec=True
)
@mock.patch("paasta_tools.instance.kubernetes.mesh_status", autospec=True)
@mock.patch(
"paasta_tools.kubernetes_tools.replicasets_for_service_instance", autospec=True
)
@mock.patch("paasta_tools.kubernetes_tools.pods_for_service_instance", autospec=True)
@mock.patch("paasta_tools.kubernetes_tools.get_kubernetes_app_by_name", autospec=True)
@mock.patch(
"paasta_tools.instance.kubernetes.LONG_RUNNING_INSTANCE_TYPE_HANDLERS",
autospec=True,
)
def test_kubernetes_status_include_smartstack(
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS,
mock_get_kubernetes_app_by_name,
mock_pods_for_service_instance,
mock_replicasets_for_service_instance,
mock_mesh_status,
mock_load_service_namespace_config,
mock_job_status,
):
mock_load_service_namespace_config.return_value = {"proxy_port": 1234}
mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS["flink"] = mock.Mock()
mock_pods_for_service_instance.return_value = []
mock_replicasets_for_service_instance.return_value = []
mock_service = mock.Mock()
status = pik.kubernetes_status(
service=mock_service,
instance="",
verbose=0,
include_smartstack=True,
include_envoy=False,
instance_type="flink",
settings=mock.Mock(),
)
assert (
mock_load_service_namespace_config.mock_calls[0][2]["service"] is mock_service
)
assert mock_mesh_status.mock_calls[0][2]["service"] is mock_service
assert "app_count" in status
assert "evicted_count" in status
assert "bounce_method" in status
assert "desired_state" in status
def test_cr_status_bad_instance_type():
with pytest.raises(RuntimeError) as excinfo:
pik.cr_status(
service="",
instance="",
verbose=0,
instance_type="marathon",
kube_client=mock.Mock(),
)
assert "Unknown instance type" in str(excinfo.value)
@mock.patch("paasta_tools.kubernetes_tools.get_cr", autospec=True)
def test_cr_status_happy_path(mock_get_cr):
mock_status = mock.Mock()
mock_metadata = mock.Mock()
mock_return = dict(status=mock_status, metadata=mock_metadata)
mock_get_cr.return_value = mock_return
status = pik.cr_status(
service="",
instance="",
verbose=0,
instance_type="flink",
kube_client=mock.Mock(),
)
assert status == mock_return
def test_set_cr_desired_state_invalid_instance_type():
with pytest.raises(RuntimeError) as excinfo:
pik.set_cr_desired_state(
kube_client=mock.Mock(),
service=mock.Mock(),
instance=mock.Mock(),
instance_type="marathon",
desired_state=mock.Mock(),
)
assert "Unknown instance type" in str(excinfo.value)
@mock.patch("paasta_tools.kubernetes_tools.set_cr_desired_state", autospec=True)
def test_set_cr_desired_state_calls_k8s_tools(mock_set_cr_desired_state):
pik.set_cr_desired_state(
kube_client=mock.Mock(),
service=mock.Mock(),
instance=mock.Mock(),
instance_type="flink",
desired_state=mock.Mock(),
)
assert len(mock_set_cr_desired_state.mock_calls) == 1
def test_can_set_state():
for it in pik.INSTANCE_TYPES_WITH_SET_STATE:
assert pik.can_set_state(it)
assert not pik.can_set_state("marathon")
def test_can_handle():
for it in pik.INSTANCE_TYPES:
assert pik.can_handle(it)
assert not pik.can_handle("marathon")
|
import logging
import time
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_OCTOPRINT
from homeassistant.const import (
CONF_API_KEY,
CONF_BINARY_SENSORS,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PATH,
CONF_PORT,
CONF_SENSORS,
CONF_SSL,
CONTENT_TYPE_JSON,
PERCENTAGE,
TEMP_CELSIUS,
TIME_SECONDS,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.util import slugify as util_slugify
_LOGGER = logging.getLogger(__name__)
CONF_BED = "bed"
CONF_NUMBER_OF_TOOLS = "number_of_tools"
DEFAULT_NAME = "OctoPrint"
DOMAIN = "octoprint"
def has_all_unique_names(value):
"""Validate that printers have an unique name."""
names = [util_slugify(printer["name"]) for printer in value]
vol.Schema(vol.Unique())(names)
return value
def ensure_valid_path(value):
"""Validate the path, ensuring it starts and ends with a /."""
vol.Schema(cv.string)(value)
if value[0] != "/":
value = f"/{value}"
if value[-1] != "/":
value += "/"
return value
BINARY_SENSOR_TYPES = {
# API Endpoint, Group, Key, unit
"Printing": ["printer", "state", "printing", None],
"Printing Error": ["printer", "state", "error", None],
}
BINARY_SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(
CONF_MONITORED_CONDITIONS, default=list(BINARY_SENSOR_TYPES)
): vol.All(cv.ensure_list, [vol.In(BINARY_SENSOR_TYPES)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SENSOR_TYPES = {
# API Endpoint, Group, Key, unit, icon
"Temperatures": ["printer", "temperature", "*", TEMP_CELSIUS],
"Current State": ["printer", "state", "text", None, "mdi:printer-3d"],
"Job Percentage": [
"job",
"progress",
"completion",
PERCENTAGE,
"mdi:file-percent",
],
"Time Remaining": [
"job",
"progress",
"printTimeLeft",
TIME_SECONDS,
"mdi:clock-end",
],
"Time Elapsed": ["job", "progress", "printTime", TIME_SECONDS, "mdi:clock-start"],
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=80): cv.port,
vol.Optional(CONF_PATH, default="/"): ensure_valid_path,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_NUMBER_OF_TOOLS, default=0): cv.positive_int,
vol.Optional(CONF_BED, default=False): cv.boolean,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
vol.Optional(
CONF_BINARY_SENSORS, default={}
): BINARY_SENSOR_SCHEMA,
}
)
],
has_all_unique_names,
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the OctoPrint component."""
printers = hass.data[DOMAIN] = {}
success = False
def device_discovered(service, info):
"""Get called when an Octoprint server has been discovered."""
_LOGGER.debug("Found an Octoprint server: %s", info)
discovery.listen(hass, SERVICE_OCTOPRINT, device_discovered)
if DOMAIN not in config:
# Skip the setup if there is no configuration present
return True
for printer in config[DOMAIN]:
name = printer[CONF_NAME]
protocol = "https" if printer[CONF_SSL] else "http"
base_url = (
f"{protocol}://{printer[CONF_HOST]}:{printer[CONF_PORT]}"
f"{printer[CONF_PATH]}api/"
)
api_key = printer[CONF_API_KEY]
number_of_tools = printer[CONF_NUMBER_OF_TOOLS]
bed = printer[CONF_BED]
try:
octoprint_api = OctoPrintAPI(base_url, api_key, bed, number_of_tools)
printers[base_url] = octoprint_api
octoprint_api.get("printer")
octoprint_api.get("job")
except requests.exceptions.RequestException as conn_err:
_LOGGER.error("Error setting up OctoPrint API: %r", conn_err)
continue
sensors = printer[CONF_SENSORS][CONF_MONITORED_CONDITIONS]
load_platform(
hass,
"sensor",
DOMAIN,
{"name": name, "base_url": base_url, "sensors": sensors},
config,
)
b_sensors = printer[CONF_BINARY_SENSORS][CONF_MONITORED_CONDITIONS]
load_platform(
hass,
"binary_sensor",
DOMAIN,
{"name": name, "base_url": base_url, "sensors": b_sensors},
config,
)
success = True
return success
class OctoPrintAPI:
"""Simple JSON wrapper for OctoPrint's API."""
def __init__(self, api_url, key, bed, number_of_tools):
"""Initialize OctoPrint API and set headers needed later."""
self.api_url = api_url
self.headers = {CONTENT_TYPE: CONTENT_TYPE_JSON, "X-Api-Key": key}
self.printer_last_reading = [{}, None]
self.job_last_reading = [{}, None]
self.job_available = False
self.printer_available = False
self.available = False
self.printer_error_logged = False
self.job_error_logged = False
self.bed = bed
self.number_of_tools = number_of_tools
def get_tools(self):
"""Get the list of tools that temperature is monitored on."""
tools = []
if self.number_of_tools > 0:
for tool_number in range(0, self.number_of_tools):
tools.append(f"tool{tool_number!s}")
if self.bed:
tools.append("bed")
if not self.bed and self.number_of_tools == 0:
temps = self.printer_last_reading[0].get("temperature")
if temps is not None:
tools = temps.keys()
return tools
def get(self, endpoint):
"""Send a get request, and return the response as a dict."""
# Only query the API at most every 30 seconds
now = time.time()
if endpoint == "job":
last_time = self.job_last_reading[1]
if last_time is not None:
if now - last_time < 30.0:
return self.job_last_reading[0]
elif endpoint == "printer":
last_time = self.printer_last_reading[1]
if last_time is not None:
if now - last_time < 30.0:
return self.printer_last_reading[0]
url = self.api_url + endpoint
try:
response = requests.get(url, headers=self.headers, timeout=9)
response.raise_for_status()
if endpoint == "job":
self.job_last_reading[0] = response.json()
self.job_last_reading[1] = time.time()
self.job_available = True
elif endpoint == "printer":
self.printer_last_reading[0] = response.json()
self.printer_last_reading[1] = time.time()
self.printer_available = True
self.available = self.printer_available and self.job_available
if self.available:
self.job_error_logged = False
self.printer_error_logged = False
return response.json()
except Exception as conn_exc: # pylint: disable=broad-except
log_string = "Failed to update OctoPrint status. Error: %s" % conn_exc
# Only log the first failure
if endpoint == "job":
log_string = f"Endpoint: job {log_string}"
if not self.job_error_logged:
_LOGGER.error(log_string)
self.job_error_logged = True
self.job_available = False
elif endpoint == "printer":
log_string = f"Endpoint: printer {log_string}"
if not self.printer_error_logged:
_LOGGER.error(log_string)
self.printer_error_logged = True
self.printer_available = False
self.available = False
return None
def update(self, sensor_type, end_point, group, tool=None):
"""Return the value for sensor_type from the provided endpoint."""
response = self.get(end_point)
if response is not None:
return get_value_from_json(response, sensor_type, group, tool)
return response
def get_value_from_json(json_dict, sensor_type, group, tool):
"""Return the value for sensor_type from the JSON."""
if group not in json_dict:
return None
if sensor_type in json_dict[group]:
if sensor_type == "target" and json_dict[sensor_type] is None:
return 0
return json_dict[group][sensor_type]
if tool is not None:
if sensor_type in json_dict[group][tool]:
return json_dict[group][tool][sensor_type]
return None
|
import keras
import tensorflow as tf
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.param import Param
from matchzoo.engine import hyper_spaces
class KNRM(BaseModel):
"""
KNRM model.
Examples:
>>> model = KNRM()
>>> model.params['embedding_input_dim'] = 10000
>>> model.params['embedding_output_dim'] = 10
>>> model.params['embedding_trainable'] = True
>>> model.params['kernel_num'] = 11
>>> model.params['sigma'] = 0.1
>>> model.params['exact_sigma'] = 0.001
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.build()
"""
@classmethod
def get_default_params(cls):
"""Get default parameters."""
params = super().get_default_params(with_embedding=True)
params.add(Param(
name='kernel_num',
value=11,
hyper_space=hyper_spaces.quniform(low=5, high=20),
desc="The number of RBF kernels."
))
params.add(Param(
name='sigma',
value=0.1,
hyper_space=hyper_spaces.quniform(
low=0.01, high=0.2, q=0.01),
desc="The `sigma` defines the kernel width."
))
params.add(Param(
name='exact_sigma', value=0.001,
desc="The `exact_sigma` denotes the `sigma` "
"for exact match."
))
return params
def build(self):
"""Build model."""
query, doc = self._make_inputs()
embedding = self._make_embedding_layer()
q_embed = embedding(query)
d_embed = embedding(doc)
mm = keras.layers.Dot(axes=[2, 2], normalize=True)([q_embed, d_embed])
KM = []
for i in range(self._params['kernel_num']):
mu = 1. / (self._params['kernel_num'] - 1) + (2. * i) / (
self._params['kernel_num'] - 1) - 1.0
sigma = self._params['sigma']
if mu > 1.0:
sigma = self._params['exact_sigma']
mu = 1.0
mm_exp = self._kernel_layer(mu, sigma)(mm)
mm_doc_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 2))(mm_exp)
mm_log = keras.layers.Activation(tf.math.log1p)(mm_doc_sum)
mm_sum = keras.layers.Lambda(
lambda x: tf.reduce_sum(x, 1))(mm_log)
KM.append(mm_sum)
phi = keras.layers.Lambda(lambda x: tf.stack(x, 1))(KM)
out = self._make_output_layer()(phi)
self._backend = keras.Model(inputs=[query, doc], outputs=[out])
@classmethod
def _kernel_layer(cls, mu: float, sigma: float) -> keras.layers.Layer:
"""
Gaussian kernel layer in KNRM.
:param mu: Float, mean of the kernel.
:param sigma: Float, sigma of the kernel.
:return: `keras.layers.Layer`.
"""
def kernel(x):
return tf.math.exp(-0.5 * (x - mu) * (x - mu) / sigma / sigma)
return keras.layers.Activation(kernel)
|
from django.contrib.sites.models import Site
from django.test import TestCase
from django.utils import timezone
import django_comments as comments
from zinnia.managers import PUBLISHED
from zinnia.models.author import Author
from zinnia.models.entry import Entry
from zinnia.signals import disconnect_discussion_signals
from zinnia.signals import disconnect_entry_signals
from zinnia.spam_checker.backends.long_enough import backend
from zinnia.tests.utils import skip_if_custom_user
@skip_if_custom_user
class LongEnoughTestCase(TestCase):
"""Test cases for zinnia.spam_checker.long_enough"""
def setUp(self):
disconnect_entry_signals()
disconnect_discussion_signals()
self.site = Site.objects.get_current()
self.author = Author.objects.create(username='admin',
email='[email protected]')
params = {'title': 'My test entry',
'content': 'My test entry',
'slug': 'my-test-entry',
'status': PUBLISHED}
self.entry = Entry.objects.create(**params)
self.entry.sites.add(self.site)
self.entry.authors.add(self.author)
def test_long_enough(self):
comment = comments.get_model().objects.create(
comment='My Comment', user=self.author, is_public=True,
content_object=self.entry, site=self.site,
submit_date=timezone.now())
self.assertEqual(backend(comment, self.entry, {}), True)
comment.comment = 'Hello I just wanted to thank for great article'
comment.save()
self.assertEqual(backend(comment, self.entry, {}), False)
|
from homeassistant.components.lock import SUPPORT_OPEN, LockEntity
from .const import ATTR_DISCOVER_DEVICES
from .entity import HMDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Homematic lock platform."""
if discovery_info is None:
return
devices = []
for conf in discovery_info[ATTR_DISCOVER_DEVICES]:
devices.append(HMLock(conf))
add_entities(devices, True)
class HMLock(HMDevice, LockEntity):
"""Representation of a Homematic lock aka KeyMatic."""
@property
def is_locked(self):
"""Return true if the lock is locked."""
return not bool(self._hm_get_state())
def lock(self, **kwargs):
"""Lock the lock."""
self._hmdevice.lock()
def unlock(self, **kwargs):
"""Unlock the lock."""
self._hmdevice.unlock()
def open(self, **kwargs):
"""Open the door latch."""
self._hmdevice.open()
def _init_data_struct(self):
"""Generate the data dictionary (self._data) from metadata."""
self._state = "STATE"
self._data.update({self._state: None})
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN
|
import re
import sys
import cgi
import os
import os.path
import urllib.parse
import cherrypy
localFile = os.path.join(os.path.dirname(__file__), 'coverage.cache')
the_coverage = None
try:
from coverage import coverage
the_coverage = coverage(data_file=localFile)
def start():
the_coverage.start()
except ImportError:
# Setting the_coverage to None will raise errors
# that need to be trapped downstream.
the_coverage = None
import warnings
warnings.warn(
'No code coverage will be performed; '
'coverage.py could not be imported.')
def start():
pass
start.priority = 20
TEMPLATE_MENU = """<html>
<head>
<title>CherryPy Coverage Menu</title>
<style>
body {font: 9pt Arial, serif;}
#tree {
font-size: 8pt;
font-family: Andale Mono, monospace;
white-space: pre;
}
#tree a:active, a:focus {
background-color: black;
padding: 1px;
color: white;
border: 0px solid #9999FF;
-moz-outline-style: none;
}
.fail { color: red;}
.pass { color: #888;}
#pct { text-align: right;}
h3 {
font-size: small;
font-weight: bold;
font-style: italic;
margin-top: 5px;
}
input { border: 1px solid #ccc; padding: 2px; }
.directory {
color: #933;
font-style: italic;
font-weight: bold;
font-size: 10pt;
}
.file {
color: #400;
}
a { text-decoration: none; }
#crumbs {
color: white;
font-size: 8pt;
font-family: Andale Mono, monospace;
width: 100%;
background-color: black;
}
#crumbs a {
color: #f88;
}
#options {
line-height: 2.3em;
border: 1px solid black;
background-color: #eee;
padding: 4px;
}
#exclude {
width: 100%;
margin-bottom: 3px;
border: 1px solid #999;
}
#submit {
background-color: black;
color: white;
border: 0;
margin-bottom: -9px;
}
</style>
</head>
<body>
<h2>CherryPy Coverage</h2>"""
TEMPLATE_FORM = """
<div id="options">
<form action='menu' method=GET>
<input type='hidden' name='base' value='%(base)s' />
Show percentages
<input type='checkbox' %(showpct)s name='showpct' value='checked' /><br />
Hide files over
<input type='text' id='pct' name='pct' value='%(pct)s' size='3' />%%<br />
Exclude files matching<br />
<input type='text' id='exclude' name='exclude'
value='%(exclude)s' size='20' />
<br />
<input type='submit' value='Change view' id="submit"/>
</form>
</div>"""
TEMPLATE_FRAMESET = """<html>
<head><title>CherryPy coverage data</title></head>
<frameset cols='250, 1*'>
<frame src='menu?base=%s' />
<frame name='main' src='' />
</frameset>
</html>
"""
TEMPLATE_COVERAGE = """<html>
<head>
<title>Coverage for %(name)s</title>
<style>
h2 { margin-bottom: .25em; }
p { margin: .25em; }
.covered { color: #000; background-color: #fff; }
.notcovered { color: #fee; background-color: #500; }
.excluded { color: #00f; background-color: #fff; }
table .covered, table .notcovered, table .excluded
{ font-family: Andale Mono, monospace;
font-size: 10pt; white-space: pre; }
.lineno { background-color: #eee;}
.notcovered .lineno { background-color: #000;}
table { border-collapse: collapse;
</style>
</head>
<body>
<h2>%(name)s</h2>
<p>%(fullpath)s</p>
<p>Coverage: %(pc)s%%</p>"""
TEMPLATE_LOC_COVERED = """<tr class="covered">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_LOC_NOT_COVERED = """<tr class="notcovered">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_LOC_EXCLUDED = """<tr class="excluded">
<td class="lineno">%s </td>
<td>%s</td>
</tr>\n"""
TEMPLATE_ITEM = (
"%s%s<a class='file' href='report?name=%s' target='main'>%s</a>\n"
)
def _percent(statements, missing):
s = len(statements)
e = s - len(missing)
if s > 0:
return int(round(100.0 * e / s))
return 0
def _show_branch(root, base, path, pct=0, showpct=False, exclude='',
coverage=the_coverage):
# Show the directory name and any of our children
dirs = [k for k, v in root.items() if v]
dirs.sort()
for name in dirs:
newpath = os.path.join(path, name)
if newpath.lower().startswith(base):
relpath = newpath[len(base):]
yield '| ' * relpath.count(os.sep)
yield (
"<a class='directory' "
"href='menu?base=%s&exclude=%s'>%s</a>\n" %
(newpath, urllib.parse.quote_plus(exclude), name)
)
for chunk in _show_branch(
root[name], base, newpath, pct, showpct,
exclude, coverage=coverage
):
yield chunk
# Now list the files
if path.lower().startswith(base):
relpath = path[len(base):]
files = [k for k, v in root.items() if not v]
files.sort()
for name in files:
newpath = os.path.join(path, name)
pc_str = ''
if showpct:
try:
_, statements, _, missing, _ = coverage.analysis2(newpath)
except Exception:
# Yes, we really want to pass on all errors.
pass
else:
pc = _percent(statements, missing)
pc_str = ('%3d%% ' % pc).replace(' ', ' ')
if pc < float(pct) or pc == -1:
pc_str = "<span class='fail'>%s</span>" % pc_str
else:
pc_str = "<span class='pass'>%s</span>" % pc_str
yield TEMPLATE_ITEM % ('| ' * (relpath.count(os.sep) + 1),
pc_str, newpath, name)
def _skip_file(path, exclude):
if exclude:
return bool(re.search(exclude, path))
def _graft(path, tree):
d = tree
p = path
atoms = []
while True:
p, tail = os.path.split(p)
if not tail:
break
atoms.append(tail)
atoms.append(p)
if p != '/':
atoms.append('/')
atoms.reverse()
for node in atoms:
if node:
d = d.setdefault(node, {})
def get_tree(base, exclude, coverage=the_coverage):
"""Return covered module names as a nested dict."""
tree = {}
runs = coverage.data.executed_files()
for path in runs:
if not _skip_file(path, exclude) and not os.path.isdir(path):
_graft(path, tree)
return tree
class CoverStats(object):
def __init__(self, coverage, root=None):
self.coverage = coverage
if root is None:
# Guess initial depth. Files outside this path will not be
# reachable from the web interface.
root = os.path.dirname(cherrypy.__file__)
self.root = root
@cherrypy.expose
def index(self):
return TEMPLATE_FRAMESET % self.root.lower()
@cherrypy.expose
def menu(self, base='/', pct='50', showpct='',
exclude=r'python\d\.\d|test|tut\d|tutorial'):
# The coverage module uses all-lower-case names.
base = base.lower().rstrip(os.sep)
yield TEMPLATE_MENU
yield TEMPLATE_FORM % locals()
# Start by showing links for parent paths
yield "<div id='crumbs'>"
path = ''
atoms = base.split(os.sep)
atoms.pop()
for atom in atoms:
path += atom + os.sep
yield ("<a href='menu?base=%s&exclude=%s'>%s</a> %s"
% (path, urllib.parse.quote_plus(exclude), atom, os.sep))
yield '</div>'
yield "<div id='tree'>"
# Then display the tree
tree = get_tree(base, exclude, self.coverage)
if not tree:
yield '<p>No modules covered.</p>'
else:
for chunk in _show_branch(tree, base, '/', pct,
showpct == 'checked', exclude,
coverage=self.coverage):
yield chunk
yield '</div>'
yield '</body></html>'
def annotated_file(self, filename, statements, excluded, missing):
source = open(filename, 'r')
buffer = []
for lineno, line in enumerate(source.readlines()):
lineno += 1
line = line.strip('\n\r')
empty_the_buffer = True
if lineno in excluded:
template = TEMPLATE_LOC_EXCLUDED
elif lineno in missing:
template = TEMPLATE_LOC_NOT_COVERED
elif lineno in statements:
template = TEMPLATE_LOC_COVERED
else:
empty_the_buffer = False
buffer.append((lineno, line))
if empty_the_buffer:
for lno, pastline in buffer:
yield template % (lno, cgi.escape(pastline))
buffer = []
yield template % (lineno, cgi.escape(line))
@cherrypy.expose
def report(self, name):
filename, statements, excluded, missing, _ = self.coverage.analysis2(
name)
pc = _percent(statements, missing)
yield TEMPLATE_COVERAGE % dict(name=os.path.basename(name),
fullpath=name,
pc=pc)
yield '<table>\n'
for line in self.annotated_file(filename, statements, excluded,
missing):
yield line
yield '</table>'
yield '</body>'
yield '</html>'
def serve(path=localFile, port=8080, root=None):
if coverage is None:
raise ImportError('The coverage module could not be imported.')
from coverage import coverage
cov = coverage(data_file=path)
cov.load()
cherrypy.config.update({'server.socket_port': int(port),
'server.thread_pool': 10,
'environment': 'production',
})
cherrypy.quickstart(CoverStats(cov, root))
if __name__ == '__main__':
serve(*tuple(sys.argv[1:]))
|
import unittest
from urwid import graphics
from urwid.compat import B
import urwid
class LineBoxTest(unittest.TestCase):
def border(self, tl, t, tr, l, r, bl, b, br):
return [bytes().join([tl, t, tr]),
bytes().join([l, B(" "), r]),
bytes().join([bl, b, br]),]
def test_linebox_pack(self):
# Bug #346 'pack' Padding does not run with LineBox
urwid.set_encoding("utf-8")
t = urwid.Text("AAA\nCCC\nDDD")
size = t.pack()
l = urwid.LineBox(t)
self.assertEqual(l.pack()[0], size[0] + 2)
self.assertEqual(l.pack()[1], size[1] + 2)
def test_linebox_border(self):
urwid.set_encoding("utf-8")
t = urwid.Text("")
l = urwid.LineBox(t).render((3,)).text
# default
self.assertEqual(l,
self.border(B("\xe2\x94\x8c"), B("\xe2\x94\x80"),
B("\xe2\x94\x90"), B("\xe2\x94\x82"), B("\xe2\x94\x82"),
B("\xe2\x94\x94"), B("\xe2\x94\x80"), B("\xe2\x94\x98")))
nums = [B(str(n)) for n in range(8)]
b = dict(zip(["tlcorner", "tline", "trcorner", "lline", "rline",
"blcorner", "bline", "brcorner"], nums))
l = urwid.LineBox(t, **b).render((3,)).text
self.assertEqual(l, self.border(*nums))
class BarGraphTest(unittest.TestCase):
def bgtest(self, desc, data, top, widths, maxrow, exp ):
rval = graphics.calculate_bargraph_display(data,top,widths,maxrow)
assert rval == exp, "%s expected %r, got %r"%(desc,exp,rval)
def test1(self):
self.bgtest('simplest',[[0]],5,[1],1,
[(1,[(0,1)])] )
self.bgtest('simpler',[[0],[0]],5,[1,2],5,
[(5,[(0,3)])] )
self.bgtest('simple',[[5]],5,[1],1,
[(1,[(1,1)])] )
self.bgtest('2col-1',[[2],[0]],5,[1,2],5,
[(3,[(0,3)]), (2,[(1,1),(0,2)]) ] )
self.bgtest('2col-2',[[0],[2]],5,[1,2],5,
[(3,[(0,3)]), (2,[(0,1),(1,2)]) ] )
self.bgtest('2col-3',[[2],[3]],5,[1,2],5,
[(2,[(0,3)]), (1,[(0,1),(1,2)]), (2,[(1,3)]) ] )
self.bgtest('3col-1',[[5],[3],[0]],5,[2,1,1],5,
[(2,[(1,2),(0,2)]), (3,[(1,3),(0,1)]) ] )
self.bgtest('3col-2',[[4],[4],[4]],5,[2,1,1],5,
[(1,[(0,4)]), (4,[(1,4)]) ] )
self.bgtest('3col-3',[[1],[2],[3]],5,[2,1,1],5,
[(2,[(0,4)]), (1,[(0,3),(1,1)]), (1,[(0,2),(1,2)]),
(1,[(1,4)]) ] )
self.bgtest('3col-4',[[4],[2],[4]],5,[1,2,1],5,
[(1,[(0,4)]), (2,[(1,1),(0,2),(1,1)]), (2,[(1,4)]) ] )
def test2(self):
self.bgtest('simple1a',[[2,0],[2,1]],2,[1,1],2,
[(1,[(1,2)]),(1,[(1,1),(2,1)]) ] )
self.bgtest('simple1b',[[2,1],[2,0]],2,[1,1],2,
[(1,[(1,2)]),(1,[(2,1),(1,1)]) ] )
self.bgtest('cross1a',[[2,2],[1,2]],2,[1,1],2,
[(2,[(2,2)]) ] )
self.bgtest('cross1b',[[1,2],[2,2]],2,[1,1],2,
[(2,[(2,2)]) ] )
self.bgtest('mix1a',[[3,2,1],[2,2,2],[1,2,3]],3,[1,1,1],3,
[(1,[(1,1),(0,1),(3,1)]),(1,[(2,1),(3,2)]),
(1,[(3,3)]) ] )
self.bgtest('mix1b',[[1,2,3],[2,2,2],[3,2,1]],3,[1,1,1],3,
[(1,[(3,1),(0,1),(1,1)]),(1,[(3,2),(2,1)]),
(1,[(3,3)]) ] )
class SmoothBarGraphTest(unittest.TestCase):
def sbgtest(self, desc, data, top, exp ):
urwid.set_encoding('utf-8')
g = urwid.BarGraph( ['black','red','blue'],
None, {(1,0):'red/black', (2,1):'blue/red'})
g.set_data( data, top )
rval = g.calculate_display((5,3))
assert rval == exp, "%s expected %r, got %r"%(desc,exp,rval)
def test1(self):
self.sbgtest('simple', [[3]], 5,
[(1, [(0, 5)]), (1, [((1, 0, 6), 5)]), (1, [(1, 5)])] )
self.sbgtest('boring', [[4,2]], 6,
[(1, [(0, 5)]), (1, [(1, 5)]), (1, [(2,5)]) ] )
self.sbgtest('two', [[4],[2]], 6,
[(1, [(0, 5)]), (1, [(1, 3), (0, 2)]), (1, [(1, 5)]) ] )
self.sbgtest('twos', [[3],[4]], 6,
[(1, [(0, 5)]), (1, [((1,0,4), 3), (1, 2)]), (1, [(1,5)]) ] )
self.sbgtest('twof', [[4],[3]], 6,
[(1, [(0, 5)]), (1, [(1,3), ((1,0,4), 2)]), (1, [(1,5)]) ] )
|
from datetime import timedelta
import logging
from omnilogic import OmniLogicException
from homeassistant.const import ATTR_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
ALL_ITEM_KINDS,
ATTR_IDENTIFIERS,
ATTR_MANUFACTURER,
ATTR_MODEL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class OmniLogicUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching update data from single endpoint."""
def __init__(
self,
hass: HomeAssistant,
api: str,
name: str,
polling_interval: int,
):
"""Initialize the global Omnilogic data updater."""
self.api = api
super().__init__(
hass=hass,
logger=_LOGGER,
name=name,
update_interval=timedelta(seconds=polling_interval),
)
async def _async_update_data(self):
"""Fetch data from OmniLogic."""
try:
data = await self.api.get_telemetry_data()
except OmniLogicException as error:
raise UpdateFailed(f"Error updating from OmniLogic: {error}") from error
parsed_data = {}
def get_item_data(item, item_kind, current_id, data):
"""Get data per kind of Omnilogic API item."""
if isinstance(item, list):
for single_item in item:
data = get_item_data(single_item, item_kind, current_id, data)
if "systemId" in item:
system_id = item["systemId"]
current_id = current_id + (item_kind, system_id)
data[current_id] = item
for kind in ALL_ITEM_KINDS:
if kind in item:
data = get_item_data(item[kind], kind, current_id, data)
return data
parsed_data = get_item_data(data, "Backyard", (), parsed_data)
return parsed_data
class OmniLogicEntity(CoordinatorEntity):
"""Defines the base OmniLogic entity."""
def __init__(
self,
coordinator: OmniLogicUpdateCoordinator,
kind: str,
name: str,
item_id: tuple,
icon: str,
):
"""Initialize the OmniLogic Entity."""
super().__init__(coordinator)
bow_id = None
entity_data = coordinator.data[item_id]
backyard_id = item_id[:2]
if len(item_id) == 6:
bow_id = item_id[:4]
msp_system_id = coordinator.data[backyard_id]["systemId"]
entity_friendly_name = f"{coordinator.data[backyard_id]['BackyardName']} "
unique_id = f"{msp_system_id}"
if bow_id is not None:
unique_id = f"{unique_id}_{coordinator.data[bow_id]['systemId']}"
entity_friendly_name = (
f"{entity_friendly_name}{coordinator.data[bow_id]['Name']} "
)
unique_id = f"{unique_id}_{coordinator.data[item_id]['systemId']}_{kind}"
if entity_data.get("Name") is not None:
entity_friendly_name = f"{entity_friendly_name} {entity_data['Name']}"
entity_friendly_name = f"{entity_friendly_name} {name}"
unique_id = unique_id.replace(" ", "_")
self._kind = kind
self._name = entity_friendly_name
self._unique_id = unique_id
self._item_id = item_id
self._icon = icon
self._attrs = {}
self._msp_system_id = msp_system_id
self._backyard_name = coordinator.data[backyard_id]["BackyardName"]
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self):
"""Return the icon for the entity."""
return self._icon
@property
def device_state_attributes(self):
"""Return the attributes."""
return self._attrs
@property
def device_info(self):
"""Define the device as back yard/MSP System."""
return {
ATTR_IDENTIFIERS: {(DOMAIN, self._msp_system_id)},
ATTR_NAME: self._backyard_name,
ATTR_MANUFACTURER: "Hayward",
ATTR_MODEL: "OmniLogic",
}
|
from __future__ import unicode_literals
import os
import sys
import traceback
js_cache = None
def execjs_encode(item):
"""execute js function and need modify code"""
global js_cache
# 1. please change js_call_function name. modify local js file path or modify js code string
js_call_function = 'hex_md5'
js_file_local_path = r"js_md5.js"
js_code_for_string = """ """
if js_cache:
resource = js_cache
elif os.path.isfile(js_file_local_path):
with open(js_file_local_path, 'r') as f:
resource = f.read()
else:
resource = js_code_for_string
if "execjs" not in sys.modules:
try:
import execjs
except ImportError:
exit("[-] Please: pip install PyExecJS")
exec_compile = __import__("execjs").compile(resource.encode('utf8').decode('utf8'))
try:
# 2. if need use can add extra parameters like:
# return exec_compile.call(js_call_function, item, "param1", "param2")
return exec_compile.call(js_call_function, item,)
except:
print("[-] execjs error:\n")
exit(traceback.print_exc())
|
from unittest import TestCase
import numpy as np
from scattertext.Scalers import percentile_alphabetical
class TestPercentile_lexicographic(TestCase):
def test_percentile_lexicographic(self):
scores = [1, 1, 5, 18, 1, 3]
text = ['c', 'a', 'five', 'eighteen', 'b', 'three']
ranking = percentile_alphabetical(scores, text)
np.testing.assert_array_almost_equal(ranking, np.array([0.4, 0, 0.8, 1., 0.2, 0.6]))
|
from typing import Any
from homeassistant.components.scene import Scene
from homeassistant.helpers.event import async_call_later
from .const import ATTR_DESCRIPTION, DOMAIN, NEXIA_DEVICE, UPDATE_COORDINATOR
from .entity import NexiaEntity
SCENE_ACTIVATION_TIME = 5
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up automations for a Nexia device."""
nexia_data = hass.data[DOMAIN][config_entry.entry_id]
nexia_home = nexia_data[NEXIA_DEVICE]
coordinator = nexia_data[UPDATE_COORDINATOR]
entities = []
# Automation switches
for automation_id in nexia_home.get_automation_ids():
automation = nexia_home.get_automation_by_id(automation_id)
entities.append(NexiaAutomationScene(coordinator, automation))
async_add_entities(entities, True)
class NexiaAutomationScene(NexiaEntity, Scene):
"""Provides Nexia automation support."""
def __init__(self, coordinator, automation):
"""Initialize the automation scene."""
super().__init__(
coordinator,
name=automation.name,
unique_id=automation.automation_id,
)
self._automation = automation
@property
def device_state_attributes(self):
"""Return the scene specific state attributes."""
data = super().device_state_attributes
data[ATTR_DESCRIPTION] = self._automation.description
return data
@property
def icon(self):
"""Return the icon of the automation scene."""
return "mdi:script-text-outline"
async def async_activate(self, **kwargs: Any) -> None:
"""Activate an automation scene."""
await self.hass.async_add_executor_job(self._automation.activate)
async def refresh_callback(_):
await self.coordinator.async_refresh()
async_call_later(self.hass, SCENE_ACTIVATION_TIME, refresh_callback)
|
from appconf import AppConf
from weblate.utils.classloader import ClassLoader
class VCSConf(AppConf):
VCS_BACKENDS = (
"weblate.vcs.git.GitRepository",
"weblate.vcs.git.GitWithGerritRepository",
"weblate.vcs.git.SubversionRepository",
"weblate.vcs.git.GithubRepository",
"weblate.vcs.git.GitLabRepository",
"weblate.vcs.git.PagureRepository",
"weblate.vcs.git.LocalRepository",
"weblate.vcs.git.GitForcePushRepository",
"weblate.vcs.mercurial.HgRepository",
)
VCS_CLONE_DEPTH = 1
# GitHub username for sending pull requests
GITHUB_USERNAME = None
GITHUB_TOKEN = None
GITHUB_CREDENTIALS = {}
# GitLab username for sending merge requests
GITLAB_USERNAME = None
GITLAB_TOKEN = None
GITLAB_CREDENTIALS = {}
# GitLab username for sending merge requests
PAGURE_USERNAME = None
PAGURE_TOKEN = None
PAGURE_CREDENTIALS = {}
class Meta:
prefix = ""
class VcsClassLoader(ClassLoader):
def __init__(self):
super().__init__("VCS_BACKENDS", False)
self.errors = {}
def load_data(self):
result = super().load_data()
for key, vcs in list(result.items()):
try:
version = vcs.get_version()
except Exception as error:
supported = False
self.errors[vcs.name] = str(error)
else:
supported = vcs.is_supported()
if not supported:
self.errors[vcs.name] = f"Outdated version: {version}"
if not supported or not vcs.is_configured():
result.pop(key)
return result
# Initialize VCS list
VCS_REGISTRY = VcsClassLoader()
|
import asyncio
import logging
import os
from aiohttp import ClientError, ClientResponseError
from august.api_async import ApiAsync
from august.authenticator_async import AuthenticationState, AuthenticatorAsync
from homeassistant.const import (
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers import aiohttp_client
from .const import (
CONF_ACCESS_TOKEN_CACHE_FILE,
CONF_INSTALL_ID,
CONF_LOGIN_METHOD,
DEFAULT_AUGUST_CONFIG_FILE,
VERIFICATION_CODE_KEY,
)
from .exceptions import CannotConnect, InvalidAuth, RequireValidation
_LOGGER = logging.getLogger(__name__)
class AugustGateway:
"""Handle the connection to August."""
def __init__(self, hass):
"""Init the connection."""
self._aiohttp_session = aiohttp_client.async_get_clientsession(hass)
self._token_refresh_lock = asyncio.Lock()
self._access_token_cache_file = None
self._hass = hass
self._config = None
self.api = None
self.authenticator = None
self.authentication = None
@property
def access_token(self):
"""Access token for the api."""
return self.authentication.access_token
def config_entry(self):
"""Config entry."""
return {
CONF_LOGIN_METHOD: self._config[CONF_LOGIN_METHOD],
CONF_USERNAME: self._config[CONF_USERNAME],
CONF_PASSWORD: self._config[CONF_PASSWORD],
CONF_INSTALL_ID: self._config.get(CONF_INSTALL_ID),
CONF_TIMEOUT: self._config.get(CONF_TIMEOUT),
CONF_ACCESS_TOKEN_CACHE_FILE: self._access_token_cache_file,
}
async def async_setup(self, conf):
"""Create the api and authenticator objects."""
if conf.get(VERIFICATION_CODE_KEY):
return
self._access_token_cache_file = conf.get(
CONF_ACCESS_TOKEN_CACHE_FILE,
f".{conf[CONF_USERNAME]}{DEFAULT_AUGUST_CONFIG_FILE}",
)
self._config = conf
self.api = ApiAsync(
self._aiohttp_session, timeout=self._config.get(CONF_TIMEOUT)
)
self.authenticator = AuthenticatorAsync(
self.api,
self._config[CONF_LOGIN_METHOD],
self._config[CONF_USERNAME],
self._config[CONF_PASSWORD],
install_id=self._config.get(CONF_INSTALL_ID),
access_token_cache_file=self._hass.config.path(
self._access_token_cache_file
),
)
await self.authenticator.async_setup_authentication()
async def async_authenticate(self):
"""Authenticate with the details provided to setup."""
self.authentication = None
try:
self.authentication = await self.authenticator.async_authenticate()
if self.authentication.state == AuthenticationState.AUTHENTICATED:
# Call the locks api to verify we are actually
# authenticated because we can be authenticated
# by have no access
await self.api.async_get_operable_locks(self.access_token)
except ClientResponseError as ex:
if ex.status == HTTP_UNAUTHORIZED:
raise InvalidAuth from ex
raise CannotConnect from ex
except ClientError as ex:
_LOGGER.error("Unable to connect to August service: %s", str(ex))
raise CannotConnect from ex
if self.authentication.state == AuthenticationState.BAD_PASSWORD:
raise InvalidAuth
if self.authentication.state == AuthenticationState.REQUIRES_VALIDATION:
raise RequireValidation
if self.authentication.state != AuthenticationState.AUTHENTICATED:
_LOGGER.error("Unknown authentication state: %s", self.authentication.state)
raise InvalidAuth
return self.authentication
async def async_reset_authentication(self):
"""Remove the cache file."""
await self._hass.async_add_executor_job(self._reset_authentication)
def _reset_authentication(self):
"""Remove the cache file."""
if os.path.exists(self._access_token_cache_file):
os.unlink(self._access_token_cache_file)
async def async_refresh_access_token_if_needed(self):
"""Refresh the august access token if needed."""
if self.authenticator.should_refresh():
async with self._token_refresh_lock:
refreshed_authentication = (
await self.authenticator.async_refresh_access_token(force=False)
)
_LOGGER.info(
"Refreshed august access token. The old token expired at %s, and the new token expires at %s",
self.authentication.access_token_expires,
refreshed_authentication.access_token_expires,
)
self.authentication = refreshed_authentication
|
import asyncio
import logging
from typing import Sequence, TypeVar, Union
from pyatv import AppleTVDevice, connect_to_apple_tv, scan_for_apple_tvs
from pyatv.exceptions import DeviceAuthenticationError
import voluptuous as vol
from homeassistant.components.discovery import SERVICE_APPLE_TV
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_NAME
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = "apple_tv"
SERVICE_SCAN = "apple_tv_scan"
SERVICE_AUTHENTICATE = "apple_tv_authenticate"
ATTR_ATV = "atv"
ATTR_POWER = "power"
CONF_LOGIN_ID = "login_id"
CONF_START_OFF = "start_off"
CONF_CREDENTIALS = "credentials"
DEFAULT_NAME = "Apple TV"
DATA_APPLE_TV = "data_apple_tv"
DATA_ENTITIES = "data_apple_tv_entities"
KEY_CONFIG = "apple_tv_configuring"
NOTIFICATION_AUTH_ID = "apple_tv_auth_notification"
NOTIFICATION_AUTH_TITLE = "Apple TV Authentication"
NOTIFICATION_SCAN_ID = "apple_tv_scan_notification"
NOTIFICATION_SCAN_TITLE = "Apple TV Scan"
T = TypeVar("T")
# This version of ensure_list interprets an empty dict as no value
def ensure_list(value: Union[T, Sequence[T]]) -> Sequence[T]:
"""Wrap value in list if it is not one."""
if value is None or (isinstance(value, dict) and not value):
return []
return value if isinstance(value, list) else [value]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
ensure_list,
[
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_LOGIN_ID): cv.string,
vol.Optional(CONF_CREDENTIALS): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_START_OFF, default=False): cv.boolean,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
# Currently no attributes but it might change later
APPLE_TV_SCAN_SCHEMA = vol.Schema({})
APPLE_TV_AUTHENTICATE_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
def request_configuration(hass, config, atv, credentials):
"""Request configuration steps from the user."""
configurator = hass.components.configurator
async def configuration_callback(callback_data):
"""Handle the submitted configuration."""
pin = callback_data.get("pin")
try:
await atv.airplay.finish_authentication(pin)
hass.components.persistent_notification.async_create(
f"Authentication succeeded!<br /><br />"
f"Add the following to credentials: "
f"in your apple_tv configuration:<br /><br />{credentials}",
title=NOTIFICATION_AUTH_TITLE,
notification_id=NOTIFICATION_AUTH_ID,
)
except DeviceAuthenticationError as ex:
hass.components.persistent_notification.async_create(
f"Authentication failed! Did you enter correct PIN?<br /><br />Details: {ex}",
title=NOTIFICATION_AUTH_TITLE,
notification_id=NOTIFICATION_AUTH_ID,
)
hass.async_add_job(configurator.request_done, instance)
instance = configurator.request_config(
"Apple TV Authentication",
configuration_callback,
description="Please enter PIN code shown on screen.",
submit_caption="Confirm",
fields=[{"id": "pin", "name": "PIN Code", "type": "password"}],
)
async def scan_apple_tvs(hass):
"""Scan for devices and present a notification of the ones found."""
atvs = await scan_for_apple_tvs(hass.loop, timeout=3)
devices = []
for atv in atvs:
login_id = atv.login_id
if login_id is None:
login_id = "Home Sharing disabled"
devices.append(
f"Name: {atv.name}<br />Host: {atv.address}<br />Login ID: {login_id}"
)
if not devices:
devices = ["No device(s) found"]
found_devices = "<br /><br />".join(devices)
hass.components.persistent_notification.async_create(
f"The following devices were found:<br /><br />{found_devices}",
title=NOTIFICATION_SCAN_TITLE,
notification_id=NOTIFICATION_SCAN_ID,
)
async def async_setup(hass, config):
"""Set up the Apple TV component."""
if DATA_APPLE_TV not in hass.data:
hass.data[DATA_APPLE_TV] = {}
async def async_service_handler(service):
"""Handle service calls."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
if service.service == SERVICE_SCAN:
hass.async_add_job(scan_apple_tvs, hass)
return
if entity_ids:
devices = [
device
for device in hass.data[DATA_ENTITIES]
if device.entity_id in entity_ids
]
else:
devices = hass.data[DATA_ENTITIES]
for device in devices:
if service.service != SERVICE_AUTHENTICATE:
continue
atv = device.atv
credentials = await atv.airplay.generate_credentials()
await atv.airplay.load_credentials(credentials)
_LOGGER.debug("Generated new credentials: %s", credentials)
await atv.airplay.start_authentication()
hass.async_add_job(request_configuration, hass, config, atv, credentials)
async def atv_discovered(service, info):
"""Set up an Apple TV that was auto discovered."""
await _setup_atv(
hass,
config,
{
CONF_NAME: info["name"],
CONF_HOST: info["host"],
CONF_LOGIN_ID: info["properties"]["hG"],
CONF_START_OFF: False,
},
)
discovery.async_listen(hass, SERVICE_APPLE_TV, atv_discovered)
tasks = [_setup_atv(hass, config, conf) for conf in config.get(DOMAIN, [])]
if tasks:
await asyncio.wait(tasks)
hass.services.async_register(
DOMAIN, SERVICE_SCAN, async_service_handler, schema=APPLE_TV_SCAN_SCHEMA
)
hass.services.async_register(
DOMAIN,
SERVICE_AUTHENTICATE,
async_service_handler,
schema=APPLE_TV_AUTHENTICATE_SCHEMA,
)
return True
async def _setup_atv(hass, hass_config, atv_config):
"""Set up an Apple TV."""
name = atv_config.get(CONF_NAME)
host = atv_config.get(CONF_HOST)
login_id = atv_config.get(CONF_LOGIN_ID)
start_off = atv_config.get(CONF_START_OFF)
credentials = atv_config.get(CONF_CREDENTIALS)
if host in hass.data[DATA_APPLE_TV]:
return
details = AppleTVDevice(name, host, login_id)
session = async_get_clientsession(hass)
atv = connect_to_apple_tv(details, hass.loop, session=session)
if credentials:
await atv.airplay.load_credentials(credentials)
power = AppleTVPowerManager(hass, atv, start_off)
hass.data[DATA_APPLE_TV][host] = {ATTR_ATV: atv, ATTR_POWER: power}
hass.async_create_task(
discovery.async_load_platform(
hass, "media_player", DOMAIN, atv_config, hass_config
)
)
hass.async_create_task(
discovery.async_load_platform(hass, "remote", DOMAIN, atv_config, hass_config)
)
class AppleTVPowerManager:
"""Manager for global power management of an Apple TV.
An instance is used per device to share the same power state between
several platforms.
"""
def __init__(self, hass, atv, is_off):
"""Initialize power manager."""
self.hass = hass
self.atv = atv
self.listeners = []
self._is_on = not is_off
def init(self):
"""Initialize power management."""
if self._is_on:
self.atv.push_updater.start()
@property
def turned_on(self):
"""Return true if device is on or off."""
return self._is_on
def set_power_on(self, value):
"""Change if a device is on or off."""
if value != self._is_on:
self._is_on = value
if not self._is_on:
self.atv.push_updater.stop()
else:
self.atv.push_updater.start()
for listener in self.listeners:
self.hass.async_create_task(listener.async_update_ha_state())
|
import pytest
import socket
from kombu import Connection, Exchange, Queue, Consumer, Producer
class test_MemoryTransport:
def setup(self):
self.c = Connection(transport='memory')
self.e = Exchange('test_transport_memory')
self.q = Queue('test_transport_memory',
exchange=self.e,
routing_key='test_transport_memory')
self.q2 = Queue('test_transport_memory2',
exchange=self.e,
routing_key='test_transport_memory2')
self.fanout = Exchange('test_transport_memory_fanout', type='fanout')
self.q3 = Queue('test_transport_memory_fanout1',
exchange=self.fanout)
self.q4 = Queue('test_transport_memory_fanout2',
exchange=self.fanout)
def test_driver_version(self):
assert self.c.transport.driver_version()
def test_produce_consume_noack(self):
channel = self.c.channel()
producer = Producer(channel, self.e)
consumer = Consumer(channel, self.q, no_ack=True)
for i in range(10):
producer.publish({'foo': i}, routing_key='test_transport_memory')
_received = []
def callback(message_data, message):
_received.append(message)
consumer.register_callback(callback)
consumer.consume()
while 1:
if len(_received) == 10:
break
self.c.drain_events()
assert len(_received) == 10
def test_produce_consume_fanout(self):
producer = self.c.Producer()
consumer = self.c.Consumer([self.q3, self.q4])
producer.publish(
{'hello': 'world'},
declare=consumer.queues,
exchange=self.fanout,
)
assert self.q3(self.c).get().payload == {'hello': 'world'}
assert self.q4(self.c).get().payload == {'hello': 'world'}
assert self.q3(self.c).get() is None
assert self.q4(self.c).get() is None
def test_produce_consume(self):
channel = self.c.channel()
producer = Producer(channel, self.e)
consumer1 = Consumer(channel, self.q)
consumer2 = Consumer(channel, self.q2)
self.q2(channel).declare()
for i in range(10):
producer.publish({'foo': i}, routing_key='test_transport_memory')
for i in range(10):
producer.publish({'foo': i}, routing_key='test_transport_memory2')
_received1 = []
_received2 = []
def callback1(message_data, message):
_received1.append(message)
message.ack()
def callback2(message_data, message):
_received2.append(message)
message.ack()
consumer1.register_callback(callback1)
consumer2.register_callback(callback2)
consumer1.consume()
consumer2.consume()
while 1:
if len(_received1) + len(_received2) == 20:
break
self.c.drain_events()
assert len(_received1) + len(_received2) == 20
# compression
producer.publish({'compressed': True},
routing_key='test_transport_memory',
compression='zlib')
m = self.q(channel).get()
assert m.payload == {'compressed': True}
# queue.delete
for i in range(10):
producer.publish({'foo': i}, routing_key='test_transport_memory')
assert self.q(channel).get()
self.q(channel).delete()
self.q(channel).declare()
assert self.q(channel).get() is None
# queue.purge
for i in range(10):
producer.publish({'foo': i}, routing_key='test_transport_memory2')
assert self.q2(channel).get()
self.q2(channel).purge()
assert self.q2(channel).get() is None
def test_drain_events(self):
with pytest.raises(socket.timeout):
self.c.drain_events(timeout=0.1)
c1 = self.c.channel()
c2 = self.c.channel()
with pytest.raises(socket.timeout):
self.c.drain_events(timeout=0.1)
del(c1) # so pyflakes doesn't complain.
del(c2)
def test_drain_events_unregistered_queue(self):
c1 = self.c.channel()
producer = self.c.Producer()
consumer = self.c.Consumer([self.q2])
producer.publish(
{'hello': 'world'},
declare=consumer.queues,
routing_key=self.q2.routing_key,
exchange=self.q2.exchange,
)
message = consumer.queues[0].get()._raw
class Cycle:
def get(self, callback, timeout=None):
return (message, 'foo'), c1
self.c.transport.cycle = Cycle()
self.c.drain_events()
def test_queue_for(self):
chan = self.c.channel()
chan.queues.clear()
x = chan._queue_for('foo')
assert x
assert chan._queue_for('foo') is x
# see the issue
# https://github.com/celery/kombu/issues/1050
def test_producer_on_return(self):
def on_return(_exception, _exchange, _routing_key, _message):
pass
channel = self.c.channel()
producer = Producer(channel, on_return=on_return)
consumer = self.c.Consumer([self.q3])
producer.publish(
{'hello': 'on return'},
declare=consumer.queues,
exchange=self.fanout,
)
assert self.q3(self.c).get().payload == {'hello': 'on return'}
assert self.q3(self.c).get() is None
|
import numpy as np
from scattertext.Scalers import percentile_min
from scattertext.termranking import AbsoluteFrequencyRanker
class ScatterChartData(object):
def __init__(self,
minimum_term_frequency=3,
minimum_not_category_term_frequency=0,
jitter=None,
seed=0,
pmi_threshold_coefficient=3,
max_terms=None,
filter_unigrams=False,
term_ranker=AbsoluteFrequencyRanker,
use_non_text_features=False,
term_significance=None,
terms_to_include=None,
score_transform=percentile_min):
'''
Parameters
----------
term_doc_matrix : TermDocMatrix
The term doc matrix to use for the scatter chart.
minimum_term_frequency : int, optional
Minimum times an ngram has to be seen to be included. Default is 3.
minimum_not_category_term_frequency : int, optional
If an n-gram does not occur in the category, minimum times it
must been seen to be included. Default is 0.
jitter : float, optional
Maximum amount of noise to be added to points, 0.2 is a lot. Default is None to disable jitter.
seed : float, optional
Random seed. Default 0
pmi_threshold_coefficient : int
Filter out bigrams with a PMI of < 2 * pmi_threshold_coefficient. Default is 3
max_terms : int, optional
Maximum number of terms to include in visualization
filter_unigrams : bool, optional
If True, remove unigrams that are part of bigrams. Default is False.
term_ranker : TermRanker, optional
TermRanker class for determining term frequency ranks.
use_non_text_features : bool, default = False
Use non-BoW features (e.g., Empath) instead of text features
term_significance : TermSignificance instance or None
Way of getting significance scores. If None, p values will not be added.
terms_to_include : set or None
Only annotate these terms in chart
score_transform : function
Transforms original scores into value between 0 and 1. Default is percentile_min
'''
self.jitter = jitter
self.minimum_term_frequency = minimum_term_frequency
self.minimum_not_category_term_frequency = minimum_not_category_term_frequency
self.seed = seed
self.pmi_threshold_coefficient = pmi_threshold_coefficient
self.filter_unigrams = filter_unigrams
self.term_ranker = term_ranker
self.max_terms = max_terms
self.use_non_text_features = use_non_text_features
self.term_significance = term_significance
self.terms_to_include = terms_to_include
self.score_transform = score_transform
np.random.seed(seed)
|
import os
from molecule import logger
from molecule import util
LOG = logger.get_logger(__name__)
VALID_KEYS = [
'created',
'converged',
'driver',
'prepared',
'sanity_checked',
]
class InvalidState(Exception):
"""
Exception class raised when an error occurs in :class:`.State`.
"""
pass
class State(object):
"""
A class which manages the state file. Intended to be used as a singleton
throughout a given Molecule config. The initial state is serialized to
disk if the file does not exist, otherwise is deserialized from the
existing state file. Changes made to the object are immediately
serialized.
State is not a top level option in Molecule's config. It's purpose is for
bookkeeping, and each :class:`.Config` object has a reference to a State_
object.
.. note::
Currently, it's use is significantly smaller than it was in v1 of
Molecule.
"""
def __init__(self, config):
"""
Initialize a new state class and returns None.
:param config: An instance of a Molecule config.
:returns: None
"""
self._config = config
self._state_file = self._get_state_file()
self._data = self._get_data()
self._write_state_file()
def marshal(func):
def wrapper(self, *args, **kwargs):
func(self, *args, **kwargs)
self._write_state_file()
return wrapper
@property
def state_file(self):
return self._state_file
@property
def converged(self):
return self._data.get('converged')
@property
def created(self):
return self._data.get('created')
@property
def driver(self):
return self._data.get('driver')
@property
def prepared(self):
return self._data.get('prepared')
@property
def sanity_checked(self):
return self._data.get('sanity_checked')
@marshal
def reset(self):
self._data = self._default_data()
@marshal
def change_state(self, key, value):
"""
Changes the state of the instance data with the given
``key`` and the provided ``value``.
Wrapping with a decorator is probably not necessary.
:param key: A ``str`` containing the key to update
:param value: A value to change the ``key`` to
:return: None
"""
if key not in VALID_KEYS:
raise InvalidState
self._data[key] = value
def _get_data(self):
if os.path.isfile(self.state_file):
return self._load_file()
return self._default_data()
def _default_data(self):
return {
'converged': False,
'created': False,
'driver': None,
'prepared': None,
'sanity_checked': False,
}
def _load_file(self):
return util.safe_load_file(self.state_file)
def _write_state_file(self):
util.write_file(self.state_file, util.safe_dump(self._data))
def _get_state_file(self):
return os.path.join(self._config.scenario.ephemeral_directory,
'state.yml')
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from preprocessing import preprocessing_factory
from configs.kitti_config import config
from nets.mobilenetdet import scale_bboxes
from datasets import dataset_factory
from tensorflow.contrib import slim
dataset = dataset_factory.get_dataset(
'kitti', 'train', '/home/zehao/Dataset/KITII/tfrecord')
# def conver_box(bboxes, img_h, img_w):
# [ymin, xmin, ymax, xmax] = tf.unstack(bboxes, axis=1)
# img_h = tf.cast(img_h, tf.float32)
# img_w = tf.cast(img_w, tf.float32)
# ymin = tf.truediv(ymin, img_h)
# xmin = tf.truediv(xmin, img_w)
# ymax = tf.truediv(ymax, img_h)
# xmax = tf.truediv(xmax, img_w)
# return tf.expand_dims(tf.stack([ymin,xmin,ymax,xmax], axis=1), axis=0)
with tf.Graph().as_default() as graph:
with tf.device('/cpu:0'):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=1,
common_queue_capacity=20 * 1,
common_queue_min=10 * 1)
[image, shape, bbox, label] = provider.get(['image', 'shape', 'object/bbox', 'object/label'])
bbox = scale_bboxes(bbox, shape)
bbox = tf.expand_dims(bbox, axis=0)
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'mobilenetdet',
is_training=True)
image, gt_labels, gt_bboxes = image_preprocessing_fn(image,
config.IMG_HEIGHT,
config.IMG_WIDTH,
labels=label,
bboxes=bbox,
)
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(coord=coord)
# [image, bbox, label] = sess.run([image, gt_bboxes, gt_labels])
summary_writer = tf.summary.FileWriter("/home/zehao/PycharmProjects/MobileNet/summary", sess.graph)
merge = tf.summary.merge_all()
merge = sess.run(merge)
summary_writer.add_summary(merge)
summary_writer.close()
|
from homeassistant.components.light import SUPPORT_BRIGHTNESS, LightEntity
from . import DOMAIN as QWIKSWITCH, QSToggleEntity
async def async_setup_platform(hass, _, add_entities, discovery_info=None):
"""Add lights from the main Qwikswitch component."""
if discovery_info is None:
return
qsusb = hass.data[QWIKSWITCH]
devs = [QSLight(qsid, qsusb) for qsid in discovery_info[QWIKSWITCH]]
add_entities(devs)
class QSLight(QSToggleEntity, LightEntity):
"""Light based on a Qwikswitch relay/dimmer module."""
@property
def brightness(self):
"""Return the brightness of this light (0-255)."""
return self.device.value if self.device.is_dimmer else None
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS if self.device.is_dimmer else 0
|
from homeassistant import setup
import homeassistant.components.sleepiq as sleepiq
from tests.async_mock import MagicMock, patch
from tests.common import load_fixture
CONFIG = {"sleepiq": {"username": "foo", "password": "bar"}}
def mock_responses(mock, single=False):
"""Mock responses for SleepIQ."""
base_url = "https://prod-api.sleepiq.sleepnumber.com/rest/"
if single:
suffix = "-single"
else:
suffix = ""
mock.put(base_url + "login", text=load_fixture("sleepiq-login.json"))
mock.get(base_url + "bed?_k=0987", text=load_fixture(f"sleepiq-bed{suffix}.json"))
mock.get(base_url + "sleeper?_k=0987", text=load_fixture("sleepiq-sleeper.json"))
mock.get(
base_url + "bed/familyStatus?_k=0987",
text=load_fixture(f"sleepiq-familystatus{suffix}.json"),
)
async def test_setup(hass, requests_mock):
"""Test the setup."""
mock_responses(requests_mock)
# We're mocking the load_platform discoveries or else the platforms
# will be setup during tear down when blocking till done, but the mocks
# are no longer active.
with patch("homeassistant.helpers.discovery.load_platform", MagicMock()):
assert sleepiq.setup(hass, CONFIG)
async def test_setup_login_failed(hass, requests_mock):
"""Test the setup if a bad username or password is given."""
mock_responses(requests_mock)
requests_mock.put(
"https://prod-api.sleepiq.sleepnumber.com/rest/login",
status_code=401,
json=load_fixture("sleepiq-login-failed.json"),
)
response = sleepiq.setup(hass, CONFIG)
assert not response
async def test_setup_component_no_login(hass):
"""Test the setup when no login is configured."""
conf = CONFIG.copy()
del conf["sleepiq"]["username"]
assert not await setup.async_setup_component(hass, sleepiq.DOMAIN, conf)
async def test_setup_component_no_password(hass):
"""Test the setup when no password is configured."""
conf = CONFIG.copy()
del conf["sleepiq"]["password"]
assert not await setup.async_setup_component(hass, sleepiq.DOMAIN, conf)
|
from flexx import flx
class SineExample(flx.Widget):
def init(self):
time = [i/100 for i in range(100)]
with flx.VBox():
with flx.HBox():
flx.Label(text='Frequency:')
self.slider1 = flx.Slider(min=1, max=10, value=5, flex=1)
flx.Label(text='Phase:')
self.slider2 = flx.Slider(min=0, max=6, value=0, flex=1)
self.plot = flx.PlotWidget(flex=1, xdata=time, xlabel='time',
ylabel='amplitude', title='a sinusoid')
@flx.reaction
def __update_amplitude(self, *events):
global Math
freq, phase = self.slider1.value, self.slider2.value
ydata = []
for x in self.plot.xdata:
ydata.append(Math.sin(freq*x*2*Math.PI+phase))
self.plot.set_data(self.plot.xdata, ydata)
if __name__ == '__main__':
m = flx.launch(SineExample)
flx.run()
|
import temescal
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import STATE_ON
SUPPORT_LG = (
SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_SELECT_SOURCE
| SUPPORT_SELECT_SOUND_MODE
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LG platform."""
if discovery_info is not None:
add_entities([LGDevice(discovery_info)])
class LGDevice(MediaPlayerEntity):
"""Representation of an LG soundbar device."""
def __init__(self, discovery_info):
"""Initialize the LG speakers."""
self._host = discovery_info.get("host")
self._port = discovery_info.get("port")
properties = discovery_info.get("properties")
self._uuid = properties.get("UUID")
self._name = ""
self._volume = 0
self._volume_min = 0
self._volume_max = 0
self._function = -1
self._functions = []
self._equaliser = -1
self._equalisers = []
self._mute = 0
self._rear_volume = 0
self._rear_volume_min = 0
self._rear_volume_max = 0
self._woofer_volume = 0
self._woofer_volume_min = 0
self._woofer_volume_max = 0
self._bass = 0
self._treble = 0
self._device = None
async def async_added_to_hass(self):
"""Register the callback after hass is ready for it."""
await self.hass.async_add_executor_job(self._connect)
def _connect(self):
"""Perform the actual devices setup."""
self._device = temescal.temescal(
self._host, port=self._port, callback=self.handle_event
)
self.update()
def handle_event(self, response):
"""Handle responses from the speakers."""
data = response["data"]
if response["msg"] == "EQ_VIEW_INFO":
if "i_bass" in data:
self._bass = data["i_bass"]
if "i_treble" in data:
self._treble = data["i_treble"]
if "ai_eq_list" in data:
self._equalisers = data["ai_eq_list"]
if "i_curr_eq" in data:
self._equaliser = data["i_curr_eq"]
elif response["msg"] == "SPK_LIST_VIEW_INFO":
if "i_vol" in data:
self._volume = data["i_vol"]
if "s_user_name" in data:
self._name = data["s_user_name"]
if "i_vol_min" in data:
self._volume_min = data["i_vol_min"]
if "i_vol_max" in data:
self._volume_max = data["i_vol_max"]
if "b_mute" in data:
self._mute = data["b_mute"]
if "i_curr_func" in data:
self._function = data["i_curr_func"]
elif response["msg"] == "FUNC_VIEW_INFO":
if "i_curr_func" in data:
self._function = data["i_curr_func"]
if "ai_func_list" in data:
self._functions = data["ai_func_list"]
elif response["msg"] == "SETTING_VIEW_INFO":
if "i_rear_min" in data:
self._rear_volume_min = data["i_rear_min"]
if "i_rear_max" in data:
self._rear_volume_max = data["i_rear_max"]
if "i_rear_level" in data:
self._rear_volume = data["i_rear_level"]
if "i_woofer_min" in data:
self._woofer_volume_min = data["i_woofer_min"]
if "i_woofer_max" in data:
self._woofer_volume_max = data["i_woofer_max"]
if "i_woofer_level" in data:
self._woofer_volume = data["i_woofer_level"]
if "i_curr_eq" in data:
self._equaliser = data["i_curr_eq"]
if "s_user_name" in data:
self._name = data["s_user_name"]
self.schedule_update_ha_state()
def update(self):
"""Trigger updates from the device."""
self._device.get_eq()
self._device.get_info()
self._device.get_func()
self._device.get_settings()
self._device.get_product_info()
# Temporary fix until handling of unknown equaliser settings is integrated in the temescal library
for equaliser in self._equalisers:
if equaliser >= len(temescal.equalisers):
temescal.equalisers.append("unknown " + str(equaliser))
@property
def unique_id(self):
"""Return the device's unique ID."""
return self._uuid
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
if self._volume_max != 0:
return self._volume / self._volume_max
return 0
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._mute
@property
def state(self):
"""Return the state of the device."""
return STATE_ON
@property
def sound_mode(self):
"""Return the current sound mode."""
if self._equaliser == -1 or self._equaliser >= len(temescal.equalisers):
return None
return temescal.equalisers[self._equaliser]
@property
def sound_mode_list(self):
"""Return the available sound modes."""
modes = []
for equaliser in self._equalisers:
modes.append(temescal.equalisers[equaliser])
return sorted(modes)
@property
def source(self):
"""Return the current input source."""
if self._function == -1:
return None
return temescal.functions[self._function]
@property
def source_list(self):
"""List of available input sources."""
sources = []
for function in self._functions:
sources.append(temescal.functions[function])
return sorted(sources)
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_LG
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
volume = volume * self._volume_max
self._device.set_volume(int(volume))
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._device.set_mute(mute)
def select_source(self, source):
"""Select input source."""
self._device.set_func(temescal.functions.index(source))
def select_sound_mode(self, sound_mode):
"""Set Sound Mode for Receiver.."""
self._device.set_eq(temescal.equalisers.index(sound_mode))
|
import botocore
from retrying import retry
from lemur.extensions import metrics, sentry
from lemur.plugins.lemur_aws.sts import sts_client
def retry_throttled(exception):
"""
Determines if this exception is due to throttling
:param exception:
:return:
"""
if isinstance(exception, botocore.exceptions.ClientError):
if exception.response["Error"]["Code"] == "NoSuchEntity":
return False
# No need to retry deletion requests if there is a DeleteConflict error.
# This error indicates that the certificate is still attached to an entity
# and cannot be deleted.
if exception.response["Error"]["Code"] == "DeleteConflict":
return False
metrics.send("iam_retry", "counter", 1, metric_tags={"exception": str(exception)})
return True
def get_name_from_arn(arn):
"""
Extract the certificate name from an arn.
:param arn: IAM SSL arn
:return: name of the certificate as uploaded to AWS
"""
return arn.split("/", 1)[1]
def create_arn_from_cert(account_number, region, certificate_name):
"""
Create an ARN from a certificate.
:param account_number:
:param region:
:param certificate_name:
:return:
"""
return "arn:aws:iam::{account_number}:server-certificate/{certificate_name}".format(
account_number=account_number, certificate_name=certificate_name
)
@sts_client("iam")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=25)
def upload_cert(name, body, private_key, path, cert_chain=None, **kwargs):
"""
Upload a certificate to AWS
:param name:
:param body:
:param private_key:
:param cert_chain:
:param path:
:return:
"""
assert isinstance(private_key, str)
client = kwargs.pop("client")
if not path or path == "/":
path = "/"
else:
name = name + "-" + path.strip("/")
metrics.send("upload_cert", "counter", 1, metric_tags={"name": name, "path": path})
try:
if cert_chain:
return client.upload_server_certificate(
Path=path,
ServerCertificateName=name,
CertificateBody=str(body),
PrivateKey=str(private_key),
CertificateChain=str(cert_chain),
)
else:
return client.upload_server_certificate(
Path=path,
ServerCertificateName=name,
CertificateBody=str(body),
PrivateKey=str(private_key),
)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] != "EntityAlreadyExists":
raise e
@sts_client("iam")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=25)
def delete_cert(cert_name, **kwargs):
"""
Delete a certificate from AWS
:param cert_name:
:return:
"""
client = kwargs.pop("client")
metrics.send("delete_cert", "counter", 1, metric_tags={"cert_name": cert_name})
try:
client.delete_server_certificate(ServerCertificateName=cert_name)
except botocore.exceptions.ClientError as e:
if e.response["Error"]["Code"] != "NoSuchEntity":
raise e
@sts_client("iam")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=25)
def get_certificate(name, **kwargs):
"""
Retrieves an SSL certificate.
:return:
"""
client = kwargs.pop("client")
metrics.send("get_certificate", "counter", 1, metric_tags={"name": name})
try:
return client.get_server_certificate(ServerCertificateName=name)["ServerCertificate"]
except client.exceptions.NoSuchEntityException:
sentry.captureException()
return None
@sts_client("iam")
@retry(retry_on_exception=retry_throttled, wait_fixed=2000, stop_max_attempt_number=25)
def get_certificates(**kwargs):
"""
Fetches one page of certificate objects for a given account.
:param kwargs:
:return:
"""
client = kwargs.pop("client")
metrics.send("get_certificates", "counter", 1)
return client.list_server_certificates(**kwargs)
def get_all_certificates(**kwargs):
"""
Use STS to fetch all of the SSL certificates from a given account
"""
certificates = []
account_number = kwargs.get("account_number")
metrics.send(
"get_all_certificates",
"counter",
1,
metric_tags={"account_number": account_number},
)
while True:
response = get_certificates(**kwargs)
metadata = response["ServerCertificateMetadataList"]
for m in metadata:
certificates.append(
get_certificate(
m["ServerCertificateName"], account_number=account_number
)
)
if not response.get("Marker"):
return certificates
else:
kwargs.update(dict(Marker=response["Marker"]))
|
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import (
CONCENTRATION_PARTS_PER_MILLION,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
LIGHT_LUX,
PERCENTAGE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.entity import Entity
from . import FIBARO_DEVICES, FibaroDevice
SENSOR_TYPES = {
"com.fibaro.temperatureSensor": [
"Temperature",
None,
None,
DEVICE_CLASS_TEMPERATURE,
],
"com.fibaro.smokeSensor": [
"Smoke",
CONCENTRATION_PARTS_PER_MILLION,
"mdi:fire",
None,
],
"CO2": ["CO2", CONCENTRATION_PARTS_PER_MILLION, "mdi:cloud", None],
"com.fibaro.humiditySensor": [
"Humidity",
PERCENTAGE,
None,
DEVICE_CLASS_HUMIDITY,
],
"com.fibaro.lightSensor": ["Light", LIGHT_LUX, None, DEVICE_CLASS_ILLUMINANCE],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fibaro controller devices."""
if discovery_info is None:
return
add_entities(
[FibaroSensor(device) for device in hass.data[FIBARO_DEVICES]["sensor"]], True
)
class FibaroSensor(FibaroDevice, Entity):
"""Representation of a Fibaro Sensor."""
def __init__(self, fibaro_device):
"""Initialize the sensor."""
self.current_value = None
self.last_changed_time = None
super().__init__(fibaro_device)
self.entity_id = f"{DOMAIN}.{self.ha_id}"
if fibaro_device.type in SENSOR_TYPES:
self._unit = SENSOR_TYPES[fibaro_device.type][1]
self._icon = SENSOR_TYPES[fibaro_device.type][2]
self._device_class = SENSOR_TYPES[fibaro_device.type][3]
else:
self._unit = None
self._icon = None
self._device_class = None
try:
if not self._unit:
if self.fibaro_device.properties.unit == "lux":
self._unit = LIGHT_LUX
elif self.fibaro_device.properties.unit == "C":
self._unit = TEMP_CELSIUS
elif self.fibaro_device.properties.unit == "F":
self._unit = TEMP_FAHRENHEIT
else:
self._unit = self.fibaro_device.properties.unit
except (KeyError, ValueError):
pass
@property
def state(self):
"""Return the state of the sensor."""
return self.current_value
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self._icon
@property
def device_class(self):
"""Return the device class of the sensor."""
return self._device_class
def update(self):
"""Update the state."""
try:
self.current_value = float(self.fibaro_device.properties.value)
except (KeyError, ValueError):
pass
|
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def expand_input_by_factor(n, divisible_by=8):
return lambda num_inputs: _make_divisible(num_inputs * n, divisible_by)
|
import hashlib
import os as _os
from urllib.request import urlretrieve
import numpy as np
from .backends.api import open_dataset as _open_dataset
from .core.dataarray import DataArray
from .core.dataset import Dataset
_default_cache_dir = _os.sep.join(("~", ".xarray_tutorial_data"))
def file_md5_checksum(fname):
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
hash_md5.update(f.read())
return hash_md5.hexdigest()
# idea borrowed from Seaborn
def open_dataset(
name,
cache=True,
cache_dir=_default_cache_dir,
github_url="https://github.com/pydata/xarray-data",
branch="master",
**kws,
):
"""
Open a dataset from the online repository (requires internet).
If a local copy is found then always use that to avoid network traffic.
Parameters
----------
name : str
Name of the file containing the dataset. If no suffix is given, assumed
to be netCDF ('.nc' is appended)
e.g. 'air_temperature'
cache_dir : str, optional
The directory in which to search for and write cached data.
cache : bool, optional
If True, then cache data locally for use on subsequent calls
github_url : str
Github repository where the data is stored
branch : str
The git branch to download from
kws : dict, optional
Passed to xarray.open_dataset
See Also
--------
xarray.open_dataset
"""
root, ext = _os.path.splitext(name)
if not ext:
ext = ".nc"
fullname = root + ext
longdir = _os.path.expanduser(cache_dir)
localfile = _os.sep.join((longdir, fullname))
md5name = fullname + ".md5"
md5file = _os.sep.join((longdir, md5name))
if not _os.path.exists(localfile):
# This will always leave this directory on disk.
# May want to add an option to remove it.
if not _os.path.isdir(longdir):
_os.mkdir(longdir)
url = "/".join((github_url, "raw", branch, fullname))
urlretrieve(url, localfile)
url = "/".join((github_url, "raw", branch, md5name))
urlretrieve(url, md5file)
localmd5 = file_md5_checksum(localfile)
with open(md5file) as f:
remotemd5 = f.read()
if localmd5 != remotemd5:
_os.remove(localfile)
msg = """
MD5 checksum does not match, try downloading dataset again.
"""
raise OSError(msg)
ds = _open_dataset(localfile, **kws)
if not cache:
ds = ds.load()
_os.remove(localfile)
return ds
def load_dataset(*args, **kwargs):
"""
Open, load into memory, and close a dataset from the online repository
(requires internet).
See Also
--------
open_dataset
"""
with open_dataset(*args, **kwargs) as ds:
return ds.load()
def scatter_example_dataset():
A = DataArray(
np.zeros([3, 11, 4, 4]),
dims=["x", "y", "z", "w"],
coords=[
np.arange(3),
np.linspace(0, 1, 11),
np.arange(4),
0.1 * np.random.randn(4),
],
)
B = 0.1 * A.x ** 2 + A.y ** 2.5 + 0.1 * A.z * A.w
A = -0.1 * A.x + A.y / (5 + A.z) + A.w
ds = Dataset({"A": A, "B": B})
ds["w"] = ["one", "two", "three", "five"]
ds.x.attrs["units"] = "xunits"
ds.y.attrs["units"] = "yunits"
ds.z.attrs["units"] = "zunits"
ds.w.attrs["units"] = "wunits"
ds.A.attrs["units"] = "Aunits"
ds.B.attrs["units"] = "Bunits"
return ds
|
import logging
import threading
import pychromecast
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import HomeAssistant
from homeassistant.helpers.dispatcher import dispatcher_send
from .const import (
INTERNAL_DISCOVERY_RUNNING_KEY,
KNOWN_CHROMECAST_INFO_KEY,
SIGNAL_CAST_DISCOVERED,
SIGNAL_CAST_REMOVED,
)
from .helpers import ChromecastInfo, ChromeCastZeroconf
_LOGGER = logging.getLogger(__name__)
def discover_chromecast(hass: HomeAssistant, info: ChromecastInfo):
"""Discover a Chromecast."""
if info.uuid is None:
_LOGGER.error("Discovered chromecast without uuid %s", info)
return
if info.uuid in hass.data[KNOWN_CHROMECAST_INFO_KEY]:
_LOGGER.debug("Discovered update for known chromecast %s", info)
else:
_LOGGER.debug("Discovered chromecast %s", info)
hass.data[KNOWN_CHROMECAST_INFO_KEY][info.uuid] = info
dispatcher_send(hass, SIGNAL_CAST_DISCOVERED, info)
def _remove_chromecast(hass: HomeAssistant, info: ChromecastInfo):
# Removed chromecast
_LOGGER.debug("Removed chromecast %s", info)
dispatcher_send(hass, SIGNAL_CAST_REMOVED, info)
def setup_internal_discovery(hass: HomeAssistant) -> None:
"""Set up the pychromecast internal discovery."""
if INTERNAL_DISCOVERY_RUNNING_KEY not in hass.data:
hass.data[INTERNAL_DISCOVERY_RUNNING_KEY] = threading.Lock()
if not hass.data[INTERNAL_DISCOVERY_RUNNING_KEY].acquire(blocking=False):
# Internal discovery is already running
return
def internal_add_update_callback(uuid, service_name):
"""Handle zeroconf discovery of a new or updated chromecast."""
service = listener.services[uuid]
# For support of deprecated IP based white listing
zconf = ChromeCastZeroconf.get_zeroconf()
service_info = None
tries = 0
while service_info is None and tries < 4:
try:
service_info = zconf.get_service_info(
"_googlecast._tcp.local.", service_name
)
except OSError:
# If the zeroconf fails to receive the necessary data we abort
# adding the service
break
tries += 1
if not service_info:
_LOGGER.warning(
"setup_internal_discovery failed to get info for %s, %s",
uuid,
service_name,
)
return
addresses = service_info.parsed_addresses()
host = addresses[0] if addresses else service_info.server
discover_chromecast(
hass,
ChromecastInfo(
services=service[0],
uuid=service[1],
model_name=service[2],
friendly_name=service[3],
host=host,
port=service_info.port,
),
)
def internal_remove_callback(uuid, service_name, service):
"""Handle zeroconf discovery of a removed chromecast."""
_remove_chromecast(
hass,
ChromecastInfo(
services=service[0],
uuid=service[1],
model_name=service[2],
friendly_name=service[3],
),
)
_LOGGER.debug("Starting internal pychromecast discovery")
listener = pychromecast.CastListener(
internal_add_update_callback,
internal_remove_callback,
internal_add_update_callback,
)
browser = pychromecast.start_discovery(listener, ChromeCastZeroconf.get_zeroconf())
def stop_discovery(event):
"""Stop discovery of new chromecasts."""
_LOGGER.debug("Stopping internal pychromecast discovery")
pychromecast.discovery.stop_discovery(browser)
hass.data[INTERNAL_DISCOVERY_RUNNING_KEY].release()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_discovery)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import ctypes
import errno
import os
import unittest
from absl import command_name
from absl.testing import absltest
import mock
def _get_kernel_process_name():
"""Returns the Kernel's name for our process or an empty string."""
try:
with open('/proc/self/status', 'rt') as status_file:
for line in status_file:
if line.startswith('Name:'):
return line.split(':', 2)[1].strip().encode('ascii', 'replace')
return b''
except IOError:
return b''
def _is_prctl_syscall_available():
try:
libc = ctypes.CDLL('libc.so.6', use_errno=True)
except OSError:
return False
zero = ctypes.c_ulong(0)
try:
status = libc.prctl(zero, zero, zero, zero, zero)
except AttributeError:
return False
if status < 0 and errno.ENOSYS == ctypes.get_errno():
return False
return True
@unittest.skipIf(not _get_kernel_process_name(),
'_get_kernel_process_name() fails.')
class CommandNameTest(absltest.TestCase):
def assertProcessNameSimilarTo(self, new_name):
if not isinstance(new_name, bytes):
new_name = new_name.encode('ascii', 'replace')
actual_name = _get_kernel_process_name()
self.assertTrue(actual_name)
self.assertTrue(new_name.startswith(actual_name),
msg='set {!r} vs found {!r}'.format(new_name, actual_name))
@unittest.skipIf(not os.access('/proc/self/comm', os.W_OK),
'/proc/self/comm is not writeable.')
def test_set_kernel_process_name(self):
new_name = u'ProcessNam0123456789abcdefghijklmnöp'
command_name.set_kernel_process_name(new_name)
self.assertProcessNameSimilarTo(new_name)
@unittest.skipIf(not _is_prctl_syscall_available(),
'prctl() system call missing from libc.so.6.')
def test_set_kernel_process_name_no_proc_file(self):
new_name = b'NoProcFile0123456789abcdefghijklmnop'
mock_open = mock.mock_open()
with mock.patch.object(command_name, 'open', mock_open, create=True):
mock_open.side_effect = IOError('mock open that raises.')
command_name.set_kernel_process_name(new_name)
mock_open.assert_called_with('/proc/self/comm', mock.ANY)
self.assertProcessNameSimilarTo(new_name)
def test_set_kernel_process_name_failure(self):
starting_name = _get_kernel_process_name()
new_name = b'NameTest'
mock_open = mock.mock_open()
mock_ctypes_cdll = mock.patch('ctypes.CDLL')
with mock.patch.object(command_name, 'open', mock_open, create=True):
with mock.patch('ctypes.CDLL') as mock_ctypes_cdll:
mock_open.side_effect = IOError('mock open that raises.')
mock_libc = mock.Mock(['prctl'])
mock_ctypes_cdll.return_value = mock_libc
command_name.set_kernel_process_name(new_name)
mock_open.assert_called_with('/proc/self/comm', mock.ANY)
self.assertEqual(1, mock_libc.prctl.call_count)
self.assertEqual(starting_name, _get_kernel_process_name()) # No change.
def test_make_process_name_useful(self):
test_name = 'hello.from.test'
with mock.patch('sys.argv', [test_name]):
command_name.make_process_name_useful()
self.assertProcessNameSimilarTo(test_name)
if __name__ == '__main__':
absltest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl import flags
import mock
from perfkitbenchmarker.providers.azure import azure_disk
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
class AzureDiskGetDevicePathTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(AzureDiskGetDevicePathTest, self).setUp()
# Patch the __init__ method for simplicity.
with mock.patch.object(azure_disk.AzureDisk, '__init__', lambda self: None):
self.disk = azure_disk.AzureDisk()
self.disk.disk_type = 'NOT_LOCAL'
self.disk.machine_type = 'fake'
def test_get_device_path_starts_at_c(self):
self.disk.lun = 0
self.assertEqual('/dev/sdc', self.disk.GetDevicePath())
def test_get_device_path_eq_z(self):
self.disk.lun = 23
self.assertEqual('/dev/sdz', self.disk.GetDevicePath())
def test_get_device_path_eq_aa(self):
self.disk.lun = 24
self.assertEqual('/dev/sdaa', self.disk.GetDevicePath())
def test_get_device_path_eq_ba(self):
self.disk.lun = 50
self.assertEqual('/dev/sdba', self.disk.GetDevicePath())
def test_get_device_path_greatest_allowable_index(self):
self.disk.lun = 699
self.assertEqual('/dev/sdzz', self.disk.GetDevicePath())
def test_get_device_path_index_too_large(self):
self.disk.lun = 700
with self.assertRaises(azure_disk.TooManyAzureDisksError):
self.disk.GetDevicePath()
if __name__ == '__main__':
unittest.main()
|
from tqdm import tqdm
import matchzoo as mz
from .units import StatefulUnit
def build_unit_from_data_pack(
unit: StatefulUnit,
data_pack: mz.DataPack, mode: str = 'both',
flatten: bool = True, verbose: int = 1
) -> StatefulUnit:
"""
Build a :class:`StatefulUnit` from a :class:`DataPack` object.
:param unit: :class:`StatefulUnit` object to be built.
:param data_pack: The input :class:`DataPack` object.
:param mode: One of 'left', 'right', and 'both', to determine the source
data for building the :class:`VocabularyUnit`.
:param flatten: Flatten the datapack or not. `True` to organize the
:class:`DataPack` text as a list, and `False` to organize
:class:`DataPack` text as a list of list.
:param verbose: Verbosity.
:return: A built :class:`StatefulUnit` object.
"""
corpus = []
if flatten:
data_pack.apply_on_text(corpus.extend, mode=mode, verbose=verbose)
else:
data_pack.apply_on_text(corpus.append, mode=mode, verbose=verbose)
if verbose:
description = 'Building ' + unit.__class__.__name__ + \
' from a datapack.'
corpus = tqdm(corpus, desc=description)
unit.fit(corpus)
return unit
|
import collections
from collections import Counter
import six
import yaml
from jinja2 import Template
from kalliope.core.NeuronParameterLoader import NeuronParameterLoader
from kalliope.core.Models.MatchedSynapse import MatchedSynapse
from kalliope.core.Utils.Utils import Utils
from kalliope.core.ConfigurationManager import SettingLoader
import logging
logging.basicConfig()
logger = logging.getLogger("kalliope")
class OrderAnalyser:
"""
This Class is used to get a list of synapses that match a given Spoken order
"""
brain = None
settings = None
@classmethod
def __init__(cls):
cls.settings = SettingLoader().settings
@classmethod
def get_matching_synapse(cls, order, brain=None):
"""
Return the list of matching synapses from the given order
:param order: The user order
:param brain: The loaded brain
:return: The List of synapses matching the given order
"""
cls.brain = brain
logger.debug("[OrderAnalyser] Received order: %s" % order)
if isinstance(order, six.binary_type):
order = order.decode('utf-8')
# We use a namedtuple to associate the synapse and the signal of the synapse
synapse_order_tuple = collections.namedtuple('tuple_synapse_matchingOrder',
['synapse', 'matched_order', 'user_order'])
# if the received order is None we can stop the process immediately
if order is None:
return list()
# test each synapse from the brain
list_match_synapse = cls.get_list_match_synapse(order, synapse_order_tuple)
# create a list of MatchedSynapse from the tuple list
list_synapse_to_process = cls.get_list_synapses_to_process(list_match_synapse)
return list_synapse_to_process
@classmethod
def get_list_synapses_to_process(cls, list_match_synapse):
list_synapse_to_process = list()
for tuple_el in list_match_synapse:
new_matching_synapse = MatchedSynapse(matched_synapse=tuple_el.synapse,
matched_order=tuple_el.matched_order,
user_order=tuple_el.user_order)
list_synapse_to_process.append(new_matching_synapse)
return list_synapse_to_process
@classmethod
def get_list_match_synapse(cls, order, synapse_order_tuple):
list_match_synapse = list()
order = order.lower() # Needed for STT Correction
for synapse in cls.brain.synapses:
if synapse.enabled:
for signal in synapse.signals:
logger.debug("[OrderAnalyser] Testing Synapse name %s" % synapse.name)
# we are only concerned by synapse with a order type of signal
if signal.name == "order":
# Check if signal is matching the order and parameters.
if cls.is_order_matching_signal(user_order=order,
signal=signal):
order = cls.order_correction(order=order,
signal=signal)
logger.debug("Order found! Run synapse name: %s" % synapse.name)
Utils.print_success("Order matched in the brain. Running synapse \"%s\"" % synapse.name)
list_match_synapse.append(synapse_order_tuple(synapse=synapse,
matched_order=cls.get_signal_order(signal),
user_order=order))
# we matched this synapse with an order, don't need to check another
break
return list_match_synapse
@classmethod
def order_correction(cls, order, signal):
stt_correction_list = list()
stt_correction_file_path = cls.get_stt_correction_file_path(signal)
stt_correction = cls.get_stt_correction(signal)
if stt_correction_file_path is not None:
stt_correction_list = cls.load_stt_correction_file(stt_correction_file_path)
if stt_correction is not None:
stt_correction_list = cls.override_stt_correction_list(stt_correction_list, stt_correction)
if stt_correction_list:
order = cls.override_order_with_correction(order, stt_correction_list)
return order
@staticmethod
def get_stt_correction(signal):
stt_correction = None
try:
stt_correction = signal.parameters["stt-correction"]
logger.debug("[OrderAnalyser] stt-correction provided by user")
except KeyError:
logger.debug("[OrderAnalyser] No stt-correction provided")
finally:
return stt_correction
@staticmethod
def get_not_containing_words(signal):
not_containing_words = None
try:
not_containing_words = signal.parameters['excluded-words']
if isinstance(not_containing_words, str):
logger.debug("[OrderAnalyser] not contain words should be a list not a string.")
not_containing_words = None
raise KeyError
logger.debug("[OrderAnalyser] not-contain provided by user : %s" % not_containing_words)
except KeyError:
logger.debug("[OrderAnalyser] No excluded-words provided, change expected_matching_type to normal")
return not_containing_words
@staticmethod
def get_stt_correction_file_path(signal):
stt_correction_file_path = None
try:
stt_correction_file_path = signal.parameters["stt-correction-file"]
logger.debug("[OrderAnalyser] stt-correction-file provided by user")
except KeyError:
logger.debug("[OrderAnalyser] No stt-correction-file provided")
finally:
return stt_correction_file_path
@staticmethod
def get_matching_type(signal):
expected_matching_type = "normal"
try:
expected_matching_type = signal.parameters["matching-type"]
except KeyError:
logger.debug("[OrderAnalyser] Warning, missing parameter 'matching-type' in order. "
"Fallback to 'normal'")
finally:
return expected_matching_type
@staticmethod
def _get_split_order_without_bracket(order):
"""
Get an order with bracket inside like: "hello my name is {{ name }}.
return a list of string without bracket like ["hello", "my", "name", "is"]
:param order: sentence to split
:return: list of string without bracket
"""
matches = Utils.find_all_matching_brackets(order)
for match in matches:
order = order.replace(match, "")
# then split
split_order = order.split()
return split_order
@classmethod
def is_not_contain_matching(cls, user_order, signal_order, not_containing_words):
"""
True if :
- all word in the user_order are present in the signal_order
- if not containing words in the order
:param user_order: order from the user
:param signal_order: order in the signal
:param not_containing_words: list of words which are not present in the order
:return: Boolean
"""
logger.debug(
"[OrderAnalyser] is_not_contain_matching called with user_order: %s, signal_order: %s and should not contains %s"
% (user_order, signal_order, not_containing_words))
# Check that normal matching is valid otherwise returns False
if not cls.is_normal_matching(user_order, signal_order):
return False
for m in not_containing_words:
if m in user_order.split():
return False
return True
@classmethod
def is_normal_matching(cls, user_order, signal_order):
"""
True if :
- all word in the user_order are present in the signal_order
:param user_order: order from the user
:param signal_order: order in the signal
:return: Boolean
"""
logger.debug("[OrderAnalyser] is_normal_matching called with user_order: %s, signal_order: %s" % (user_order,
signal_order))
if not signal_order: return False
split_user_order = user_order.split()
split_signal_order_without_brackets = cls._get_split_order_without_bracket(signal_order)
c1, c2 = Counter(split_signal_order_without_brackets), Counter(split_user_order)
for k, n in c1.items():
if n > c2[k]:
return False
return True
@classmethod
def is_strict_matching(cls, user_order, signal_order):
"""
True if :
- all word in the user_order are present in the signal_order
- no additional word
:param user_order: order from the user
:param signal_order: order in the signal
:return: Boolean
"""
logger.debug("[OrderAnalyser] is_strict_matching called with user_order: %s, signal_order: %s" % (user_order,
signal_order))
if cls.is_normal_matching(user_order=user_order, signal_order=signal_order):
# if the signal order contains bracket, we need to instantiate it with loaded parameters from the user order
if Utils.is_containing_bracket(signal_order):
signal_order = cls._get_instantiated_order_signal_from_user_order(signal_order, user_order)
split_user_order = user_order.split()
split_instantiated_signal = signal_order.split()
if len(split_user_order) == len(split_instantiated_signal):
return True
return False
@classmethod
def is_ordered_strict_matching(cls, user_order, signal_order):
"""
True if :
- all word in the user_order are present in the signal_order
- no additional word
- same order as word present in signal_order
:param user_order: order from the user
:param signal_order: order in the signal
:return: Boolean
"""
logger.debug(
"[OrderAnalyser] ordered_strict_matching called with user_order: %s, signal_order: %s" % (user_order,
signal_order))
if cls.is_normal_matching(user_order=user_order, signal_order=signal_order) and \
cls.is_strict_matching(user_order=user_order, signal_order=signal_order):
# if the signal order contains bracket, we need to instantiate it with loaded parameters from the user order
if Utils.is_containing_bracket(signal_order):
signal_order = cls._get_instantiated_order_signal_from_user_order(signal_order, user_order)
split_user_order = user_order.split()
split_signal_order = signal_order.split()
return split_user_order == split_signal_order
return False
@classmethod
def is_order_matching_signal(cls, user_order, signal):
"""
return True if the user_order matches the provided signal. False otherwise
Note: it applies `stt-correction` and handles `not-contain` matching type.
:param user_order: order from the user
:param signal: The signal to compare with the user_order
:return: True if the order match
"""
matching_type_function = {
"normal": cls.is_normal_matching,
"strict": cls.is_strict_matching,
"ordered-strict": cls.is_ordered_strict_matching,
"not-contain": cls.is_not_contain_matching
}
expected_order_type = cls.get_matching_type(signal)
signal_order = cls.get_signal_order(signal)
user_order = cls.order_correction(user_order, signal)
# Lowercase all incoming
user_order = user_order.lower()
signal_order = signal_order.lower()
if expected_order_type in matching_type_function:
if expected_order_type == "not-contain":
not_containing_words = cls.get_not_containing_words(signal)
return cls.is_not_contain_matching(user_order, signal_order, not_containing_words)
else:
return matching_type_function[expected_order_type](user_order, signal_order)
else:
return cls.is_normal_matching(user_order, signal_order)
@classmethod
def get_signal_order(cls, signal):
signal_order = ""
if isinstance(signal.parameters, str) or isinstance(signal.parameters, six.text_type):
signal_order = signal.parameters
if isinstance(signal.parameters, dict):
try:
signal_order = signal.parameters["text"]
except KeyError:
logger.debug("[OrderAnalyser] Warning, missing parameter 'text' in order. "
"Signal Order will be skipped")
return signal_order
@classmethod
def _get_instantiated_order_signal_from_user_order(cls, signal_order, user_order):
"""
return instantiated signal order with parameters loaded from the user order
E.g:
signal_order = "this is an {{ variable }}
user_order = "this is an order"
returned value is: "this is an order"
:param user_order: the order from the user
:param signal_order: the order with brackets from the synapse
:return: jinja instantiated order from the signal
"""
# get parameters
parameters_from_user_order = NeuronParameterLoader.get_parameters(synapse_order=signal_order,
user_order=user_order)
# we load variables into the expected order from the signal
t = Template(signal_order)
signal_order = t.render(**parameters_from_user_order)
return signal_order
@classmethod
def override_order_with_correction(cls, order, stt_correction):
logger.debug("[OrderAnalyser] override_order_this_correction: stt_correction list: %s" % stt_correction)
logger.debug("[OrderAnalyser] override_order_this_correction: order before correction: %s" % order)
for correction in stt_correction:
try:
input_val = str(correction["input"]).lower()
output_val = str(correction["output"]).lower()
except KeyError as e:
logger.debug("[OrderAnalyser] override_order_this_correction: "
"Missing key %s. Order will not be modified" % e)
return order
if str(input_val) in order: # remove split for the case a correction contains more than one word
logger.debug("[OrderAnalyser] STT override '%s' by '%s'" % (input_val, output_val))
order = order.replace(input_val, output_val)
return order
@classmethod
def load_stt_correction_file(cls, stt_correction_file):
stt_correction_file_path = Utils.get_real_file_path(stt_correction_file)
stt_correction_file = open(stt_correction_file_path, "r")
stt_correction = yaml.full_load(stt_correction_file)
return stt_correction
@classmethod
def override_stt_correction_list(cls, stt_correction_list_to_override, correction_list):
# add non existing dict
for correction_to_check in correction_list:
if correction_to_check["input"] not in (x["input"] for x in stt_correction_list_to_override):
stt_correction_list_to_override.append(correction_to_check)
# override dict with same input
for correction_to_override in stt_correction_list_to_override:
for correction_to_check in correction_list:
if correction_to_check["input"] == correction_to_override["input"]:
correction_to_override["output"] = correction_to_check["output"]
break
return stt_correction_list_to_override
|
from datetime import timedelta
import logging
from life360 import Life360Error
import voluptuous as vol
from homeassistant.components.device_tracker import (
CONF_SCAN_INTERVAL,
DOMAIN as DEVICE_TRACKER_DOMAIN,
)
from homeassistant.components.zone import async_active_zone
from homeassistant.const import (
ATTR_BATTERY_CHARGING,
ATTR_ENTITY_ID,
CONF_PREFIX,
LENGTH_FEET,
LENGTH_KILOMETERS,
LENGTH_METERS,
LENGTH_MILES,
STATE_UNKNOWN,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_time_interval
from homeassistant.util.async_ import run_callback_threadsafe
from homeassistant.util.distance import convert
import homeassistant.util.dt as dt_util
from .const import (
CONF_CIRCLES,
CONF_DRIVING_SPEED,
CONF_ERROR_THRESHOLD,
CONF_MAX_GPS_ACCURACY,
CONF_MAX_UPDATE_WAIT,
CONF_MEMBERS,
CONF_SHOW_AS_STATE,
CONF_WARNING_THRESHOLD,
DOMAIN,
SHOW_DRIVING,
SHOW_MOVING,
)
_LOGGER = logging.getLogger(__name__)
SPEED_FACTOR_MPH = 2.25
EVENT_DELAY = timedelta(seconds=30)
ATTR_ADDRESS = "address"
ATTR_AT_LOC_SINCE = "at_loc_since"
ATTR_DRIVING = "driving"
ATTR_LAST_SEEN = "last_seen"
ATTR_MOVING = "moving"
ATTR_PLACE = "place"
ATTR_RAW_SPEED = "raw_speed"
ATTR_SPEED = "speed"
ATTR_WAIT = "wait"
ATTR_WIFI_ON = "wifi_on"
EVENT_UPDATE_OVERDUE = "life360_update_overdue"
EVENT_UPDATE_RESTORED = "life360_update_restored"
def _include_name(filter_dict, name):
if not name:
return False
if not filter_dict:
return True
name = name.lower()
if filter_dict["include"]:
return name in filter_dict["list"]
return name not in filter_dict["list"]
def _exc_msg(exc):
return f"{exc.__class__.__name__}: {exc}"
def _dump_filter(filter_dict, desc, func=lambda x: x):
if not filter_dict:
return
_LOGGER.debug(
"%scluding %s: %s",
"In" if filter_dict["include"] else "Ex",
desc,
", ".join([func(name) for name in filter_dict["list"]]),
)
def setup_scanner(hass, config, see, discovery_info=None):
"""Set up device scanner."""
config = hass.data[DOMAIN]["config"]
apis = hass.data[DOMAIN]["apis"]
Life360Scanner(hass, config, see, apis)
return True
def _utc_from_ts(val):
try:
return dt_util.utc_from_timestamp(float(val))
except (TypeError, ValueError):
return None
def _dt_attr_from_ts(timestamp):
utc = _utc_from_ts(timestamp)
if utc:
return utc
return STATE_UNKNOWN
def _bool_attr_from_int(val):
try:
return bool(int(val))
except (TypeError, ValueError):
return STATE_UNKNOWN
class Life360Scanner:
"""Life360 device scanner."""
def __init__(self, hass, config, see, apis):
"""Initialize Life360Scanner."""
self._hass = hass
self._see = see
self._max_gps_accuracy = config.get(CONF_MAX_GPS_ACCURACY)
self._max_update_wait = config.get(CONF_MAX_UPDATE_WAIT)
self._prefix = config[CONF_PREFIX]
self._circles_filter = config.get(CONF_CIRCLES)
self._members_filter = config.get(CONF_MEMBERS)
self._driving_speed = config.get(CONF_DRIVING_SPEED)
self._show_as_state = config[CONF_SHOW_AS_STATE]
self._apis = apis
self._errs = {}
self._error_threshold = config[CONF_ERROR_THRESHOLD]
self._warning_threshold = config[CONF_WARNING_THRESHOLD]
self._max_errs = self._error_threshold + 1
self._dev_data = {}
self._circles_logged = set()
self._members_logged = set()
_dump_filter(self._circles_filter, "Circles")
_dump_filter(self._members_filter, "device IDs", self._dev_id)
self._started = dt_util.utcnow()
self._update_life360()
track_time_interval(
self._hass, self._update_life360, config[CONF_SCAN_INTERVAL]
)
def _dev_id(self, name):
return self._prefix + name
def _ok(self, key):
if self._errs.get(key, 0) >= self._max_errs:
_LOGGER.error("%s: OK again", key)
self._errs[key] = 0
def _err(self, key, err_msg):
_errs = self._errs.get(key, 0)
if _errs < self._max_errs:
self._errs[key] = _errs = _errs + 1
msg = f"{key}: {err_msg}"
if _errs >= self._error_threshold:
if _errs == self._max_errs:
msg = f"Suppressing further errors until OK: {msg}"
_LOGGER.error(msg)
elif _errs >= self._warning_threshold:
_LOGGER.warning(msg)
def _exc(self, key, exc):
self._err(key, _exc_msg(exc))
def _prev_seen(self, dev_id, last_seen):
prev_seen, reported = self._dev_data.get(dev_id, (None, False))
if self._max_update_wait:
now = dt_util.utcnow()
most_recent_update = last_seen or prev_seen or self._started
overdue = now - most_recent_update > self._max_update_wait
if overdue and not reported and now - self._started > EVENT_DELAY:
self._hass.bus.fire(
EVENT_UPDATE_OVERDUE,
{ATTR_ENTITY_ID: f"{DEVICE_TRACKER_DOMAIN}.{dev_id}"},
)
reported = True
elif not overdue and reported:
self._hass.bus.fire(
EVENT_UPDATE_RESTORED,
{
ATTR_ENTITY_ID: f"{DEVICE_TRACKER_DOMAIN}.{dev_id}",
ATTR_WAIT: str(last_seen - (prev_seen or self._started)).split(
"."
)[0],
},
)
reported = False
# Don't remember last_seen unless it's really an update.
if not last_seen or prev_seen and last_seen <= prev_seen:
last_seen = prev_seen
self._dev_data[dev_id] = last_seen, reported
return prev_seen
def _update_member(self, member, dev_id):
loc = member.get("location")
try:
last_seen = _utc_from_ts(loc.get("timestamp"))
except AttributeError:
last_seen = None
prev_seen = self._prev_seen(dev_id, last_seen)
if not loc:
err_msg = member["issues"]["title"]
if err_msg:
if member["issues"]["dialog"]:
err_msg += f": {member['issues']['dialog']}"
else:
err_msg = "Location information missing"
self._err(dev_id, err_msg)
return
# Only update when we truly have an update.
if not last_seen:
_LOGGER.warning("%s: Ignoring update because timestamp is missing", dev_id)
return
if prev_seen and last_seen < prev_seen:
_LOGGER.warning(
"%s: Ignoring update because timestamp is older than last timestamp",
dev_id,
)
_LOGGER.debug("%s < %s", last_seen, prev_seen)
return
if last_seen == prev_seen:
return
lat = loc.get("latitude")
lon = loc.get("longitude")
gps_accuracy = loc.get("accuracy")
try:
lat = float(lat)
lon = float(lon)
# Life360 reports accuracy in feet, but Device Tracker expects
# gps_accuracy in meters.
gps_accuracy = round(
convert(float(gps_accuracy), LENGTH_FEET, LENGTH_METERS)
)
except (TypeError, ValueError):
self._err(dev_id, f"GPS data invalid: {lat}, {lon}, {gps_accuracy}")
return
self._ok(dev_id)
msg = f"Updating {dev_id}"
if prev_seen:
msg += f"; Time since last update: {last_seen - prev_seen}"
_LOGGER.debug(msg)
if self._max_gps_accuracy is not None and gps_accuracy > self._max_gps_accuracy:
_LOGGER.warning(
"%s: Ignoring update because expected GPS "
"accuracy (%.0f) is not met: %.0f",
dev_id,
self._max_gps_accuracy,
gps_accuracy,
)
return
# Get raw attribute data, converting empty strings to None.
place = loc.get("name") or None
address1 = loc.get("address1") or None
address2 = loc.get("address2") or None
if address1 and address2:
address = ", ".join([address1, address2])
else:
address = address1 or address2
raw_speed = loc.get("speed") or None
driving = _bool_attr_from_int(loc.get("isDriving"))
moving = _bool_attr_from_int(loc.get("inTransit"))
try:
battery = int(float(loc.get("battery")))
except (TypeError, ValueError):
battery = None
# Try to convert raw speed into real speed.
try:
speed = float(raw_speed) * SPEED_FACTOR_MPH
if self._hass.config.units.is_metric:
speed = convert(speed, LENGTH_MILES, LENGTH_KILOMETERS)
speed = max(0, round(speed))
except (TypeError, ValueError):
speed = STATE_UNKNOWN
# Make driving attribute True if it isn't and we can derive that it
# should be True from other data.
if (
driving in (STATE_UNKNOWN, False)
and self._driving_speed is not None
and speed != STATE_UNKNOWN
):
driving = speed >= self._driving_speed
attrs = {
ATTR_ADDRESS: address,
ATTR_AT_LOC_SINCE: _dt_attr_from_ts(loc.get("since")),
ATTR_BATTERY_CHARGING: _bool_attr_from_int(loc.get("charge")),
ATTR_DRIVING: driving,
ATTR_LAST_SEEN: last_seen,
ATTR_MOVING: moving,
ATTR_PLACE: place,
ATTR_RAW_SPEED: raw_speed,
ATTR_SPEED: speed,
ATTR_WIFI_ON: _bool_attr_from_int(loc.get("wifiState")),
}
# If user wants driving or moving to be shown as state, and current
# location is not in a HA zone, then set location name accordingly.
loc_name = None
active_zone = run_callback_threadsafe(
self._hass.loop, async_active_zone, self._hass, lat, lon, gps_accuracy
).result()
if not active_zone:
if SHOW_DRIVING in self._show_as_state and driving is True:
loc_name = SHOW_DRIVING
elif SHOW_MOVING in self._show_as_state and moving is True:
loc_name = SHOW_MOVING
self._see(
dev_id=dev_id,
location_name=loc_name,
gps=(lat, lon),
gps_accuracy=gps_accuracy,
battery=battery,
attributes=attrs,
picture=member.get("avatar"),
)
def _update_members(self, members, members_updated):
for member in members:
member_id = member["id"]
if member_id in members_updated:
continue
err_key = "Member data"
try:
first = member.get("firstName")
last = member.get("lastName")
if first and last:
full_name = " ".join([first, last])
else:
full_name = first or last
slug_name = cv.slugify(full_name)
include_member = _include_name(self._members_filter, slug_name)
dev_id = self._dev_id(slug_name)
if member_id not in self._members_logged:
self._members_logged.add(member_id)
_LOGGER.debug(
"%s -> %s: will%s be tracked, id=%s",
full_name,
dev_id,
"" if include_member else " NOT",
member_id,
)
sharing = bool(int(member["features"]["shareLocation"]))
except (KeyError, TypeError, ValueError, vol.Invalid):
self._err(err_key, member)
continue
self._ok(err_key)
if include_member and sharing:
members_updated.append(member_id)
self._update_member(member, dev_id)
def _update_life360(self, now=None):
circles_updated = []
members_updated = []
for api in self._apis.values():
err_key = "get_circles"
try:
circles = api.get_circles()
except Life360Error as exc:
self._exc(err_key, exc)
continue
self._ok(err_key)
for circle in circles:
circle_id = circle["id"]
if circle_id in circles_updated:
continue
circles_updated.append(circle_id)
circle_name = circle["name"]
incl_circle = _include_name(self._circles_filter, circle_name)
if circle_id not in self._circles_logged:
self._circles_logged.add(circle_id)
_LOGGER.debug(
"%s Circle: will%s be included, id=%s",
circle_name,
"" if incl_circle else " NOT",
circle_id,
)
try:
places = api.get_circle_places(circle_id)
place_data = "Circle's Places:"
for place in places:
place_data += f"\n- name: {place['name']}"
place_data += f"\n latitude: {place['latitude']}"
place_data += f"\n longitude: {place['longitude']}"
place_data += f"\n radius: {place['radius']}"
if not places:
place_data += " None"
_LOGGER.debug(place_data)
except (Life360Error, KeyError):
pass
if incl_circle:
err_key = f'get_circle_members "{circle_name}"'
try:
members = api.get_circle_members(circle_id)
except Life360Error as exc:
self._exc(err_key, exc)
continue
self._ok(err_key)
self._update_members(members, members_updated)
|
import pygogo as gogo
from itertools import chain
from html.parser import HTMLParser
from meza.compat import decode
from riko.utils import fetch
from riko.bado import coroutine, return_value, microdom
from riko.bado.io import async_url_open
TIMEOUT = 10
logger = gogo.Gogo(__name__, monolog=True).logger
class LinkParser(HTMLParser):
def reset(self):
HTMLParser.reset(self)
self.entry = iter([])
def handle_starttag(self, tag, attrs):
entry = dict(attrs)
alternate = entry.get('rel') == 'alternate'
rss = 'rss' in entry.get('type', '')
if (alternate or rss) and 'href' in entry:
entry['link'] = entry['href']
entry['tag'] = tag
self.entry = chain(self.entry, [entry])
def file2entries(f, parser):
for line in f:
parser.feed(decode(line))
for entry in parser.entry:
yield entry
def doc2entries(document):
for node in document.childNodes:
if hasattr(node, 'attributes') and node.attributes:
entry = node.attributes
alternate = entry.get('rel') == 'alternate'
rss = 'rss' in entry.get('type', '')
else:
alternate = rss = None
if (alternate or rss) and 'href' in entry:
entry['link'] = entry['href']
entry['tag'] = node.nodeName
yield entry
for node in document.childNodes:
for entry in doc2entries(node):
yield entry
@coroutine
def async_get_rss(url, convert_charrefs=False):
try:
f = yield async_url_open(url, timeout=TIMEOUT)
except ValueError:
f = filter(None, url.splitlines())
document = microdom.parse(f, lenient=True)
return_value(doc2entries(document))
def get_rss(url, convert_charrefs=False):
try:
parser = LinkParser(convert_charrefs=convert_charrefs)
except TypeError:
parser = LinkParser()
try:
f = fetch(url, timeout=TIMEOUT)
except ValueError:
f = filter(None, url.splitlines())
return file2entries(f, parser)
|
from .precision import Precision
from .average_precision import AveragePrecision
from .discounted_cumulative_gain import DiscountedCumulativeGain
from .mean_reciprocal_rank import MeanReciprocalRank
from .mean_average_precision import MeanAveragePrecision
from .normalized_discounted_cumulative_gain import \
NormalizedDiscountedCumulativeGain
def list_available() -> list:
from matchzoo.engine.base_metric import BaseMetric
from matchzoo.utils import list_recursive_concrete_subclasses
return list_recursive_concrete_subclasses(BaseMetric)
|
import copy
import json
import logging
import re
import time
from yandextank.common.util import expand_to_seconds, expand_to_milliseconds
from ...common.interfaces import AbstractCriterion
logger = logging.getLogger(__name__)
class AvgTimeCriterion(AbstractCriterion):
""" average response time criterion """
@staticmethod
def get_type_string():
return 'time'
def __init__(self, autostop, param_str):
AbstractCriterion.__init__(self)
self.seconds_count = 0
params = param_str.split(',')
self.rt_limit = expand_to_milliseconds(params[0])
self.seconds_limit = expand_to_seconds(params[1])
self.autostop = autostop
self.tag = params[2].strip() if len(params) == 3 else None
def notify(self, data, stat):
rt_total, requests_number = self.parse_data(data)
rt_actual = rt_total / 1000.0 / requests_number
if rt_actual > self.rt_limit:
if not self.seconds_count:
self.cause_second = (data, stat)
logger.debug(self.explain())
self.seconds_count += 1
self.autostop.add_counting(self)
if self.seconds_count >= self.seconds_limit:
return True
else:
self.seconds_count = 0
return False
def parse_data(self, data):
requests_number = data["overall"]["interval_real"]["len"]
# Parse data for specific tag if it is present
if self.tag:
if data["tagged"].get(self.tag):
rt_total = data["tagged"][self.tag]["interval_real"]["total"]
requests_number = data["tagged"][self.tag]["interval_real"]["len"]
# rt_total=0 if current tag differs from selected one
else:
rt_total = 0
# Parse data for overall
else:
rt_total = data["overall"]["interval_real"]["total"]
return rt_total, requests_number
def get_rc(self):
return self.RC_TIME
def explain(self):
items = self.get_criterion_parameters()
explanation = "Average response time higher than %(limit)sms for %(seconds_count)ss, since %(since_time)s" % items
if self.tag:
explanation = explanation + " for tag %(tag)s" % items
return explanation
def get_criterion_parameters(self):
parameters = {
'limit': self.rt_limit,
'seconds_count': self.seconds_count,
'seconds_limit': self.seconds_limit,
'since_time': self.cause_second[0]["ts"],
'tag': self.tag
}
return parameters
def widget_explain(self):
items = self.get_criterion_parameters()
return "Avg Time >%(limit)sms for %(seconds_count)s/%(seconds_limit)ss" % items, \
float(self.seconds_count) / self.seconds_limit
class HTTPCodesCriterion(AbstractCriterion):
""" HTTP codes criterion """
@staticmethod
def get_type_string():
return 'http'
def __init__(self, autostop, param_str):
AbstractCriterion.__init__(self)
self.seconds_count = 0
params = param_str.split(',')
self.codes_mask = params[0].lower()
self.codes_regex = re.compile(self.codes_mask.replace("x", '.'))
self.autostop = autostop
level_str = params[1].strip()
if level_str[-1:] == '%':
self.level = float(level_str[:-1]) / 100
self.is_relative = True
else:
self.level = int(level_str)
self.is_relative = False
self.seconds_limit = expand_to_seconds(params[2])
self.tag = params[3].strip() if len(params) == 4 else None
def notify(self, data, stat):
matched_responses, total_responses = self.parse_data(data)
if self.is_relative:
if total_responses:
matched_responses = float(matched_responses) / total_responses
else:
matched_responses = 0
logger.debug("HTTP codes matching mask %s: %s/%s", self.codes_mask, matched_responses, self.level)
if matched_responses >= self.level:
if not self.seconds_count:
self.cause_second = (data, stat)
logger.debug(self.explain())
self.seconds_count += 1
self.autostop.add_counting(self)
if self.seconds_count >= self.seconds_limit:
return True
else:
self.seconds_count = 0
return False
def parse_data(self, data):
# Parse data for specific tag
if self.tag:
if data["tagged"].get(self.tag):
total_responses = data["tagged"][self.tag]["interval_real"]["len"]
matched_responses = self.count_matched_codes(
self.codes_regex, data["tagged"][self.tag]["proto_code"]["count"])
# matched_responses=0 if current tag differs from selected one
else:
matched_responses = 0
total_responses = data["overall"]["interval_real"]["len"]
# Parse data for overall
else:
matched_responses = self.count_matched_codes(self.codes_regex, data["overall"]["proto_code"]["count"])
total_responses = data["overall"]["interval_real"]["len"]
return matched_responses, total_responses
def get_rc(self):
return self.RC_HTTP
def get_level_str(self):
""" format level str """
if self.is_relative:
level_str = str(100 * self.level) + "%"
else:
level_str = self.level
return level_str
def explain(self):
items = self.get_criterion_parameters()
explanation = "%(code)s codes count higher than %(level)s for %(seconds_count)ss, since %(since_time)s" % items
if self.tag:
explanation = explanation + " for tag %(tag)s" % items
return explanation
def get_criterion_parameters(self):
parameters = {
'code': self.codes_mask,
'level': self.get_level_str(),
'seconds_count': self.seconds_count,
'seconds_limit': self.seconds_limit,
'since_time': self.cause_second[0].get('ts'),
'tag': self.tag
}
return parameters
def widget_explain(self):
items = self.get_criterion_parameters()
return "HTTP %(code)s>%(level)s for %(seconds_count)s/%(seconds_limit)ss" % items, \
float(self.seconds_count) / self.seconds_limit
class NetCodesCriterion(AbstractCriterion):
""" Net codes criterion """
@staticmethod
def get_type_string():
return 'net'
def __init__(self, autostop, param_str):
AbstractCriterion.__init__(self)
self.seconds_count = 0
params = param_str.split(',')
self.codes_mask = params[0].lower()
self.codes_regex = re.compile(self.codes_mask.replace("x", '.'))
self.autostop = autostop
level_str = params[1].strip()
if level_str[-1:] == '%':
self.level = float(level_str[:-1]) / 100
self.is_relative = True
else:
self.level = int(level_str)
self.is_relative = False
self.seconds_limit = expand_to_seconds(params[2])
self.tag = params[3].strip() if len(params) == 4 else None
def notify(self, data, stat):
matched_responses, total_responses = self.parse_data(data)
if self.is_relative:
if total_responses:
matched_responses = float(matched_responses) / total_responses
else:
matched_responses = 0
logger.debug("Net codes matching mask %s: %s/%s", self.codes_mask, matched_responses, self.level)
if matched_responses >= self.level:
if not self.seconds_count:
self.cause_second = (data, stat)
logger.debug(self.explain())
self.seconds_count += 1
self.autostop.add_counting(self)
if self.seconds_count >= self.seconds_limit:
return True
else:
self.seconds_count = 0
return False
def parse_data(self, data):
# Count data for specific tag if it's present
if self.tag:
if data["tagged"].get(self.tag):
total_responses = data["tagged"][self.tag]["interval_real"]["len"]
code_count = data["tagged"][self.tag]["net_code"]["count"]
codes = copy.deepcopy(code_count)
if '0' in codes:
codes.pop('0')
matched_responses = self.count_matched_codes(self.codes_regex, codes)
# matched_responses=0 if current tag differs from selected one
else:
matched_responses = 0
total_responses = data["overall"]["interval_real"]["len"]
# Count data for overall
else:
code_count = data["overall"]["net_code"]["count"]
total_responses = data["overall"]["interval_real"]["len"]
codes = copy.deepcopy(code_count)
if '0' in codes:
codes.pop('0')
matched_responses = self.count_matched_codes(self.codes_regex, codes)
return matched_responses, total_responses
def get_rc(self):
return self.RC_NET
def get_level_str(self):
""" format level str """
if self.is_relative:
level_str = str(100 * self.level) + "%"
else:
level_str = self.level
return level_str
def explain(self):
items = self.get_criterion_parameters()
explanation = "%(code)s net codes count higher than %(level)s for %(seconds_count)ss, since %(since_time)s" \
% items
if self.tag:
explanation = explanation + " for tag %(tag)s" % items
return explanation
def get_criterion_parameters(self):
parameters = {
'code': self.codes_mask,
'level': self.get_level_str(),
'seconds_count': self.seconds_count,
'seconds_limit': self.seconds_limit,
'since_time': self.cause_second[0].get("ts"),
'tag': self.tag
}
return parameters
def widget_explain(self):
items = self.get_criterion_parameters()
return "Net %(code)s>%(level)s for %(seconds_count)s/%(seconds_limit)ss" % items, \
float(self.seconds_count) / self.seconds_limit
class QuantileCriterion(AbstractCriterion):
""" quantile criterion """
@staticmethod
def get_type_string():
return 'quantile'
def __init__(self, autostop, param_str):
AbstractCriterion.__init__(self)
self.seconds_count = 0
params = param_str.split(',')
self.quantile = float(params[0])
self.rt_limit = expand_to_milliseconds(params[1])
self.seconds_limit = expand_to_seconds(params[2])
self.autostop = autostop
self.tag = params[3].strip() if len(params) == 4 else None
def notify(self, data, stat):
quantiles = self.parse_data(data)
logger.debug('Autostop quantiles for ts %s: %s', data['ts'], quantiles)
if self.quantile not in quantiles.keys():
logger.warning("No quantile %s in %s", self.quantile, quantiles)
if self.quantile in quantiles.keys() and quantiles[self.quantile] / 1000.0 > self.rt_limit:
if not self.seconds_count:
self.cause_second = (data, stat)
self.seconds_count += 1
logger.debug(self.explain())
self.autostop.add_counting(self)
if self.seconds_count >= self.seconds_limit:
return True
else:
self.seconds_count = 0
return False
def parse_data(self, data):
# Parse data for specific tag
if self.tag:
if data["tagged"].get(self.tag):
quantile_values = data["tagged"][self.tag]["interval_real"]["q"]["value"]
# quantile_values empty if current tag differs from selected one
else:
quantile_values = []
# Parse data for overall
else:
quantile_values = data["overall"]["interval_real"]["q"]["value"]
quantiles = dict(zip(data["overall"]["interval_real"]["q"]["q"], quantile_values))
return quantiles
def get_rc(self):
return self.RC_TIME
def explain(self):
items = self.get_criterion_parameters()
explanation = "Percentile %(percentile)s higher than %(limit)sms for %(seconds_count)ss, since %(since_time)s" \
% items
if self.tag:
explanation = explanation + " for tag %(tag)s" % items
return explanation
def get_criterion_parameters(self):
parameters = {
'percentile': self.quantile,
'limit': self.rt_limit,
'seconds_count': self.seconds_count,
'seconds_limit': self.seconds_limit,
'since_time': self.cause_second[0].get("ts"),
'tag': self.tag
}
return parameters
def widget_explain(self):
items = self.get_criterion_parameters()
return "%(percentile)s%% >%(limit)sms for %(seconds_count)s/%(seconds_limit)ss" % items, \
float(self.seconds_count) / self.seconds_limit
class SteadyCumulativeQuantilesCriterion(AbstractCriterion):
""" quantile criterion """
@staticmethod
def get_type_string():
return 'steady_cumulative'
def __init__(self, autostop, param_str):
raise NotImplementedError
AbstractCriterion.__init__(self)
self.seconds_count = 0
self.quantile_hash = ""
self.seconds_limit = expand_to_seconds(param_str.split(',')[0])
self.autostop = autostop
def notify(self, data, stat):
quantiles = dict(
zip(data["overall"]["q"]["q"], data["overall"]["q"]["values"]))
quantile_hash = json.dumps(quantiles)
logging.debug("Cumulative quantiles hash: %s", quantile_hash)
if self.quantile_hash == quantile_hash:
if not self.seconds_count:
self.cause_second = (data, stat)
logger.debug(self.explain())
self.seconds_count += 1
self.autostop.add_counting(self)
if self.seconds_count >= self.seconds_limit:
return True
else:
self.seconds_count = 0
self.quantile_hash = quantile_hash
return False
def get_rc(self):
return self.RC_STEADY
def explain(self):
items = self.get_criterion_parameters()
return "Cumulative percentiles are steady for %ss, since %s" % items
def get_criterion_parameters(self):
parameters = (
self.seconds_count, self.cause_second[0]["ts"])
return parameters
def widget_explain(self):
items = (self.seconds_count, self.seconds_limit)
return "Steady for %s/%ss" % items, float(
self.seconds_count) / self.seconds_limit
class TimeLimitCriterion(AbstractCriterion):
""" time limit criterion """
@staticmethod
def get_type_string():
return 'limit'
def __init__(self, autostop, param_str):
AbstractCriterion.__init__(self)
self.start_time = time.time()
self.end_time = time.time()
self.time_limit = expand_to_seconds(param_str)
def notify(self, data, stat):
self.end_time = time.time()
return (self.end_time - self.start_time) > self.time_limit
def get_rc(self):
return self.RC_TIME
def explain(self):
return "Test time elapsed. Limit: %(limit)ss, actual time: %(actual)ss" % self.get_criterion_parameters()
def get_criterion_parameters(self):
parameters = {
'limit': self.time_limit,
'actual': self.end_time - self.start_time
}
return parameters
def widget_explain(self):
return "Time limit: %(limit)ss, actual time: %(actual)ss" % self.get_criterion_parameters()
|
import os
from datetime import date, timedelta
from glob import glob
from time import time
from typing import List, Optional
from celery import current_task
from celery.schedules import crontab
from django.conf import settings
from django.db import transaction
from django.db.models import Count, F
from django.utils import timezone
from django.utils.translation import gettext as _
from django.utils.translation import ngettext, override
from filelock import Timeout
from weblate.addons.models import Addon
from weblate.auth.models import User, get_anonymous
from weblate.lang.models import Language
from weblate.trans.autotranslate import AutoTranslate
from weblate.trans.exceptions import FileParseError
from weblate.trans.models import (
Change,
Comment,
Component,
Project,
Suggestion,
Translation,
)
from weblate.utils.celery import app
from weblate.utils.data import data_dir
from weblate.utils.errors import report_error
from weblate.utils.files import remove_tree
from weblate.vcs.base import RepositoryException
@app.task(
trail=False, autoretry_for=(Timeout,), retry_backoff=600, retry_backoff_max=3600
)
def perform_update(cls, pk, auto=False, obj=None):
try:
if obj is None:
if cls == "Project":
obj = Project.objects.get(pk=pk)
else:
obj = Component.objects.get(pk=pk)
if settings.AUTO_UPDATE in ("full", True) or not auto:
obj.do_update()
else:
obj.update_remote_branch()
except FileParseError:
# This is stored as alert, so we can silently ignore here
return
@app.task(
trail=False, autoretry_for=(Timeout,), retry_backoff=600, retry_backoff_max=3600
)
def perform_load(
pk: int,
force: bool = False,
langs: Optional[List[str]] = None,
changed_template: bool = False,
from_link: bool = False,
):
component = Component.objects.get(pk=pk)
component.create_translations(
force=force, langs=langs, changed_template=changed_template, from_link=from_link
)
@app.task(
trail=False, autoretry_for=(Timeout,), retry_backoff=600, retry_backoff_max=3600
)
def perform_commit(pk, *args):
component = Component.objects.get(pk=pk)
component.commit_pending(*args)
@app.task(
trail=False, autoretry_for=(Timeout,), retry_backoff=600, retry_backoff_max=3600
)
def perform_push(pk, *args, **kwargs):
component = Component.objects.get(pk=pk)
component.do_push(*args, **kwargs)
@app.task(trail=False)
def update_component_stats(pk):
component = Component.objects.get(pk=pk)
component.stats.ensure_basic()
@app.task(
trail=False, autoretry_for=(Timeout,), retry_backoff=600, retry_backoff_max=3600
)
def commit_pending(hours=None, pks=None, logger=None):
if pks is None:
components = Component.objects.all()
else:
components = Component.objects.filter(translation__pk__in=pks).distinct()
for component in components.prefetch():
if hours is None:
age = timezone.now() - timedelta(hours=component.commit_pending_age)
else:
age = timezone.now() - timedelta(hours=hours)
last_change = component.stats.last_changed
if not last_change:
continue
if last_change > age:
continue
if not component.needs_commit():
continue
if logger:
logger(f"Committing {component}")
perform_commit.delay(component.pk, "commit_pending", None)
def cleanup_sources(project):
"""Remove stale source Unit objects."""
for component in project.component_set.filter(template="").iterator():
translation = component.source_translation
# Skip translations with a filename (eg. when POT file is present)
if translation.filename:
continue
with transaction.atomic():
# Remove all units where there is just one referenced unit (self)
translation.unit_set.annotate(Count("unit")).filter(
unit__count__lte=1
).delete()
@app.task(trail=False)
def cleanup_project(pk):
"""Perform cleanup of project models."""
try:
project = Project.objects.get(pk=pk)
except Project.DoesNotExist:
return
cleanup_sources(project)
@app.task(trail=False)
def cleanup_suggestions():
# Process suggestions
anonymous_user = get_anonymous()
suggestions = Suggestion.objects.prefetch_related("unit")
for suggestion in suggestions:
with transaction.atomic():
# Remove suggestions with same text as real translation
if (
suggestion.unit.target == suggestion.target
and suggestion.unit.translated
):
suggestion.delete_log(
anonymous_user, change=Change.ACTION_SUGGESTION_CLEANUP
)
continue
# Remove duplicate suggestions
sugs = Suggestion.objects.filter(
unit=suggestion.unit, target=suggestion.target
).exclude(id=suggestion.id)
# Do not rely on the SQL as MySQL compares strings case insensitive
for other in sugs:
if other.target == suggestion.target:
suggestion.delete_log(
anonymous_user, change=Change.ACTION_SUGGESTION_CLEANUP
)
break
@app.task(trail=False)
def update_remotes():
"""Update all remote branches (without attempt to merge)."""
if settings.AUTO_UPDATE not in ("full", "remote", True, False):
return
for component in Component.objects.with_repo().iterator():
perform_update("Component", -1, auto=True, obj=component)
@app.task(trail=False)
def cleanup_stale_repos():
prefix = data_dir("vcs")
vcs_mask = os.path.join(prefix, "*", "*")
yesterday = time() - 86400
for path in glob(vcs_mask):
if not os.path.isdir(path):
continue
# Skip recently modified paths
if os.path.getmtime(path) > yesterday:
continue
# Parse path
project, component = os.path.split(path[len(prefix) + 1 :])
# Find matching components
objects = Component.objects.with_repo().filter(
slug=component, project__slug=project
)
# Remove stale dirs
if not objects.exists():
remove_tree(path)
@app.task(trail=False)
def cleanup_old_suggestions():
if not settings.SUGGESTION_CLEANUP_DAYS:
return
cutoff = timezone.now() - timedelta(days=settings.SUGGESTION_CLEANUP_DAYS)
Suggestion.objects.filter(timestamp__lt=cutoff).delete()
@app.task(trail=False)
def cleanup_old_comments():
if not settings.COMMENT_CLEANUP_DAYS:
return
cutoff = timezone.now() - timedelta(days=settings.COMMENT_CLEANUP_DAYS)
Comment.objects.filter(timestamp__lt=cutoff).delete()
@app.task(trail=False)
def repository_alerts(threshold=settings.REPOSITORY_ALERT_THRESHOLD):
non_linked = Component.objects.with_repo()
for component in non_linked.iterator():
try:
if component.repository.count_missing() > threshold:
component.add_alert("RepositoryOutdated")
else:
component.delete_alert("RepositoryOutdated")
if component.repository.count_outgoing() > threshold:
component.add_alert("RepositoryChanges")
else:
component.delete_alert("RepositoryChanges")
except RepositoryException as error:
report_error(cause="Could not check repository status")
component.add_alert("MergeFailure", error=component.error_text(error))
@app.task(trail=False)
def component_alerts(component_ids=None):
if component_ids:
components = Component.objects.filter(pk__in=component_ids).iterator()
else:
components = Component.objects.iterator()
for component in components:
component.update_alerts()
@app.task(trail=False, autoretry_for=(Component.DoesNotExist,), retry_backoff=60)
def component_after_save(
pk, changed_git, changed_setup, changed_template, changed_variant, skip_push
):
component = Component.objects.get(pk=pk)
component.after_save(
changed_git, changed_setup, changed_template, changed_variant, skip_push
)
return {"component": pk}
@app.task(trail=False)
def component_removal(pk, uid):
user = User.objects.get(pk=uid)
try:
obj = Component.objects.get(pk=pk)
obj.acting_user = user
Change.objects.create(
project=obj.project,
action=Change.ACTION_REMOVE_COMPONENT,
target=obj.slug,
user=user,
author=user,
)
obj.delete()
if obj.allow_translation_propagation:
components = obj.project.component_set.filter(
allow_translation_propagation=True
)
for component_id in components.values_list("id", flat=True):
update_checks.delay(component_id)
except Component.DoesNotExist:
return
@app.task(trail=False)
def project_removal(pk, uid):
user = User.objects.get(pk=uid)
try:
obj = Project.objects.get(pk=pk)
Change.objects.create(
action=Change.ACTION_REMOVE_PROJECT, target=obj.slug, user=user, author=user
)
obj.delete()
except Project.DoesNotExist:
return
@app.task(trail=False)
def auto_translate(
user_id,
translation_id,
mode,
filter_type,
auto_source,
component,
engines,
threshold,
):
if user_id:
user = User.objects.get(pk=user_id)
else:
user = None
with override(user.profile.language if user else "en"):
translation = Translation.objects.get(pk=translation_id)
translation.log_info(
"starting automatic translation %s", current_task.request.id
)
auto = AutoTranslate(user, translation, filter_type, mode)
if auto_source == "mt":
auto.process_mt(engines, threshold)
else:
auto.process_others(component)
translation.log_info("completed automatic translation")
if auto.updated == 0:
message = _("Automatic translation completed, no strings were updated.")
else:
message = (
ngettext(
"Automatic translation completed, %d string was updated.",
"Automatic translation completed, %d strings were updated.",
auto.updated,
)
% auto.updated
)
return {"translation": translation_id, "message": message}
@app.task(trail=False)
def create_component(addons_from=None, in_task=False, **kwargs):
kwargs["project"] = Project.objects.get(pk=kwargs["project"])
kwargs["source_language"] = Language.objects.get(pk=kwargs["source_language"])
component = Component.objects.create(**kwargs)
Change.objects.create(action=Change.ACTION_CREATE_COMPONENT, component=component)
if addons_from:
addons = Addon.objects.filter(
component__pk=addons_from, project_scope=False, repo_scope=False
)
for addon in addons:
# Avoid installing duplicate addons
if component.addon_set.filter(name=addon.name).exists():
continue
if not addon.addon.can_install(component, None):
continue
addon.addon.create(component, configuration=addon.configuration)
if in_task:
return None
return component
@app.task(trail=False)
def update_checks(pk):
component = Component.objects.get(pk=pk)
for translation in component.translation_set.exclude(
pk=component.source_translation.pk
).iterator():
for unit in translation.unit_set.iterator():
unit.run_checks()
for unit in component.source_translation.unit_set.iterator():
unit.run_checks()
for translation in component.translation_set.iterator():
translation.invalidate_cache()
@app.task(trail=False)
def daily_update_checks():
# Update every component roughly once in a month
components = Component.objects.annotate(idmod=F("id") % 30).filter(
idmod=date.today().day
)
for component_id in components.values_list("id", flat=True):
update_checks.delay(component_id)
@app.on_after_finalize.connect
def setup_periodic_tasks(sender, **kwargs):
sender.add_periodic_task(3600, commit_pending.s(), name="commit-pending")
sender.add_periodic_task(
crontab(hour=3, minute=30), update_remotes.s(), name="update-remotes"
)
sender.add_periodic_task(
crontab(hour=0, minute=30), daily_update_checks.s(), name="daily-update-checks"
)
sender.add_periodic_task(3600 * 24, repository_alerts.s(), name="repository-alerts")
sender.add_periodic_task(3600 * 24, component_alerts.s(), name="component-alerts")
sender.add_periodic_task(
3600 * 24, cleanup_suggestions.s(), name="suggestions-cleanup"
)
sender.add_periodic_task(
3600 * 24, cleanup_stale_repos.s(), name="cleanup-stale-repos"
)
sender.add_periodic_task(
3600 * 24, cleanup_old_suggestions.s(), name="cleanup-old-suggestions"
)
sender.add_periodic_task(
3600 * 24, cleanup_old_comments.s(), name="cleanup-old-comments"
)
|
from warnings import warn
from itertools import combinations, permutations
import networkx as nx
from tqdm import tqdm
from joblib import Parallel, delayed
from pgmpy.base import PDAG
from pgmpy.estimators import StructureEstimator
from pgmpy.estimators.CITests import chi_square, pearsonr, independence_match
from pgmpy.global_vars import SHOW_PROGRESS
class PC(StructureEstimator):
def __init__(self, data=None, independencies=None, **kwargs):
"""
Class for constraint-based estimation of DAGs using the PC algorithm
from a given data set. Identifies (conditional) dependencies in data
set using chi_square dependency test and uses the PC algorithm to
estimate a DAG pattern that satisfies the identified dependencies. The
DAG pattern can then be completed to a faithful DAG, if possible.
Parameters
----------
data: pandas DataFrame object
datafame object where each column represents one variable. (If some
values in the data are missing the data cells should be set to
`numpy.NaN`. Note that pandas converts each column containing
`numpy.NaN`s to dtype `float`.)
References
----------
[1] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques,
2009, Section 18.2
[2] Neapolitan, Learning Bayesian Networks, Section 10.1.2 for the PC algorithm (page 550), http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
"""
super(PC, self).__init__(data=data, independencies=independencies, **kwargs)
def estimate(
self,
variant="stable",
ci_test="chi_square",
max_cond_vars=5,
return_type="dag",
significance_level=0.01,
n_jobs=-1,
show_progress=True,
**kwargs,
):
"""
Estimates a DAG/PDAG from the given dataset using the PC algorithm which
is a constraint-based structure learning algorithm[1]. The independencies
in the dataset are identified by doing statistical independece test. This
method returns a DAG/PDAG structure which is faithful to the independencies
implied by the dataset
Parameters
----------
variant: str (one of "orig", "stable", "parallel")
The variant of PC algorithm to run.
"orig": The original PC algorithm. Might not give the same
results in different runs but does less independence
tests compared to stable.
"stable": Gives the same result in every run but does needs to
do more statistical independence tests.
"parallel": Parallel version of PC Stable. Can run on multiple
cores with the same result on each run.
ci_test: str or fun
The statistical test to use for testing conditional independence in
the dataset. If `str` values should be one of:
"independence_match": If using this option, an additional parameter
`independencies` must be specified.
"chi_square": Uses the Chi-Square independence test. This works
only for discrete datasets.
"pearsonr": Uses the pertial correlation based on pearson
correlation coefficient to test independence. This works
only for continuous datasets.
max_cond_vars: int
The maximum number of conditional variables allowed to do the statistical
test with.
return_type: str (one of "dag", "cpdag", "pdag", "skeleton")
The type of structure to return.
If `return_type=pdag` or `return_type=cpdag`: a partially directed structure is returned.
If `return_type=dag`, a fully directed structure is returned if it
is possible to orient all the edges.
If `return_type="skeleton", returns an undirected graph along
with the separating sets.
significance_level: float (default: 0.01)
The statistical tests use this value to compare with the p-value of
the test to decide whether the tested variables are independent or
not. Different tests can treat this parameter differently:
1. Chi-Square: If p-value > significance_level, it assumes that the
independence condition satisfied in the data.
2. pearsonr: If p-value > significance_level, it assumes that the
independence condition satisfied in the data.
Returns
-------
model: DAG-instance, PDAG-instance, or (networkx.UndirectedGraph, dict)
The estimated model structure, can be a partially directed graph (PDAG)
or a fully directed graph (DAG), or (Undirected Graph, separating sets)
depending on the value of `return_type` argument.
References
----------
[1] Original PC: P. Spirtes, C. Glymour, and R. Scheines, Causation,
Prediction, and Search, 2nd ed. Cambridge, MA: MIT Press, 2000.
[2] Stable PC: D. Colombo and M. H. Maathuis, “A modification of the PC algorithm
yielding order-independent skeletons,” ArXiv e-prints, Nov. 2012.
[3] Parallel PC: Le, Thuc, et al. "A fast PC algorithm for high dimensional causal
discovery with multi-core PCs." IEEE/ACM transactions on computational
biology and bioinformatics (2016).
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import PC
>>> data = pd.DataFrame(np.random.randint(0, 5, size=(2500, 3)), columns=list('XYZ'))
>>> data['sum'] = data.sum(axis=1)
>>> print(data)
X Y Z sum
0 3 0 1 4
1 1 4 3 8
2 0 0 3 3
3 0 2 3 5
4 2 1 1 4
... .. .. .. ...
2495 2 3 0 5
2496 1 1 2 4
2497 0 4 2 6
2498 0 0 0 0
2499 2 4 0 6
[2500 rows x 4 columns]
>>> c = PC(data)
>>> model = c.estimate()
>>> print(model.edges())
[('Z', 'sum'), ('X', 'sum'), ('Y', 'sum')]
"""
# Step 0: Do checks that the specified parameters are correct, else throw meaningful error.
if variant not in ("orig", "stable", "parallel"):
raise ValueError(
f"variant must be one of: orig, stable, or parallel. Got: {variant}"
)
elif (not callable(ci_test)) and (
ci_test not in ("chi_square", "independence_match", "pearsonr")
):
raise ValueError(
"ci_test must be a callable or one of: chi_square, pearsonr, independence_match"
)
if (ci_test == "independence_match") and (self.independencies is None):
raise ValueError(
"For using independence_match, independencies argument must be specified"
)
elif (ci_test in ("chi_square", "pearsonr")) and (self.data is None):
raise ValueError(
"For using Chi Square or Pearsonr, data arguement must be specified"
)
# Step 1: Run the PC algorithm to build the skeleton and get the separating sets.
skel, separating_sets = self.build_skeleton(
ci_test=ci_test,
max_cond_vars=max_cond_vars,
significance_level=significance_level,
variant=variant,
n_jobs=n_jobs,
show_progress=show_progress,
**kwargs,
)
if return_type.lower() == "skeleton":
return skel, separating_sets
# Step 2: Orient the edges based on build the PDAG/CPDAG.
pdag = self.skeleton_to_pdag(skel, separating_sets)
# Step 3: Either return the CPDAG or fully orient the edges to build a DAG.
if return_type.lower() in ("pdag", "cpdag"):
return pdag
elif return_type.lower() == "dag":
return pdag.to_dag()
else:
raise ValueError(
f"return_type must be one of: dag, pdag, cpdag, or skeleton. Got: {return_type}"
)
def build_skeleton(
self,
ci_test="chi_square",
max_cond_vars=5,
significance_level=0.01,
variant="stable",
n_jobs=-1,
show_progress=True,
**kwargs,
):
"""
Estimates a graph skeleton (UndirectedGraph) from a set of independencies
using (the first part of) the PC algorithm. The independencies can either be
provided as an instance of the `Independencies`-class or by passing a
decision function that decides any conditional independency assertion.
Returns a tuple `(skeleton, separating_sets)`.
If an Independencies-instance is passed, the contained IndependenceAssertions
have to admit a faithful BN representation. This is the case if
they are obtained as a set of d-seperations of some Bayesian network or
if the independence assertions are closed under the semi-graphoid axioms.
Otherwise the procedure may fail to identify the correct structure.
Parameters
----------
Returns
-------
skeleton: UndirectedGraph
An estimate for the undirected graph skeleton of the BN underlying the data.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation procedures)
References
----------
[1] Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
[2] Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 3.4.2.1 (page 85), Algorithm 3.3
Examples
--------
>>> from pgmpy.estimators import PC
>>> from pgmpy.models import DAG
>>> from pgmpy.independencies import Independencies
>>> # build skeleton from list of independencies:
... ind = Independencies(['B', 'C'], ['A', ['B', 'C'], 'D'])
>>> # we need to compute closure, otherwise this set of independencies doesn't
... # admit a faithful representation:
... ind = ind.closure()
>>> skel, sep_sets = PC.build_skeleton("ABCD", ind)
>>> print(skel.edges())
[('A', 'D'), ('B', 'D'), ('C', 'D')]
>>> # build skeleton from d-seperations of DAG:
... model = DAG([('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')])
>>> skel, sep_sets = PC.build_skeleton(model.nodes(), model.get_independencies())
>>> print(skel.edges())
[('A', 'C'), ('B', 'C'), ('B', 'D'), ('C', 'E')]
"""
# Initialize initial values and structures.
lim_neighbors = 0
separating_sets = dict()
if ci_test == "chi_square":
ci_test = chi_square
elif ci_test == "pearsonr":
ci_test = pearsonr
elif ci_test == "independence_match":
ci_test = independence_match
elif callable(ci_test):
ci_test = ci_test
else:
raise ValueError(
f"ci_test must either be chi_square, pearsonr, independence_match, or a function. Got: {ci_test}"
)
if show_progress and SHOW_PROGRESS:
pbar = tqdm(total=max_cond_vars)
pbar.set_description("Working for n conditional variables: 0")
# Step 1: Initialize a fully connected undirected graph
graph = nx.complete_graph(n=self.variables, create_using=nx.Graph)
# Exit condition: 1. If all the nodes in graph has less than `lim_neighbors` neighbors.
# or 2. `lim_neighbors` is greater than `max_conditional_variables`.
while not all(
[len(list(graph.neighbors(var))) < lim_neighbors for var in self.variables]
):
# Step 2: Iterate over the edges and find a conditioning set of
# size `lim_neighbors` which makes u and v independent.
if variant == "orig":
for (u, v) in graph.edges():
for separating_set in combinations(
set(graph.neighbors(u)) - set([v]), lim_neighbors
):
# If a conditioning set exists remove the edge, store the separating set
# and move on to finding conditioning set for next edge.
if ci_test(
u,
v,
separating_set,
data=self.data,
independencies=self.independencies,
significance_level=significance_level,
**kwargs,
):
separating_sets[frozenset((u, v))] = separating_set
graph.remove_edge(u, v)
break
elif variant == "stable":
# In case of stable, precompute neighbors as this is the stable algorithm.
neighbors = {node: set(graph[node]) for node in graph.nodes()}
for (u, v) in graph.edges():
for separating_set in combinations(
neighbors[u] - set([v]), lim_neighbors
):
# If a conditioning set exists remove the edge, store the
# separating set and move on to finding conditioning set for next edge.
if ci_test(
u,
v,
separating_set,
data=self.data,
independencies=self.independencies,
significance_level=significance_level,
**kwargs,
):
separating_sets[frozenset((u, v))] = separating_set
graph.remove_edge(u, v)
break
elif variant == "parallel":
neighbors = {node: set(graph[node]) for node in graph.nodes()}
def _parallel_fun(u, v):
for separating_set in combinations(
neighbors[u] - set([v]), lim_neighbors
):
if ci_test(
u,
v,
separating_set,
data=self.data,
independencies=self.independencies,
significance_level=significance_level,
**kwargs,
):
return (u, v), separating_set
results = Parallel(n_jobs=n_jobs, prefer="threads")(
delayed(_parallel_fun)(u, v) for (u, v) in graph.edges()
)
for result in results:
if result is not None:
(u, v), sep_set = result
graph.remove_edge(u, v)
separating_sets[frozenset((u, v))] = sep_set
else:
raise ValueError(
f"variant must be one of (orig, stable, parallel). Got: {variant}"
)
# Step 3: After iterating over all the edges, expand the search space by increasing the size
# of conditioning set by 1.
if lim_neighbors >= max_cond_vars:
warn("Reached maximum number of allowed conditional variables. Exiting")
break
lim_neighbors += 1
if show_progress and SHOW_PROGRESS:
pbar.update(1)
pbar.set_description(
f"Working for n conditional variables: {lim_neighbors}"
)
if show_progress and SHOW_PROGRESS:
pbar.close()
return graph, separating_sets
@staticmethod
def skeleton_to_pdag(skeleton, separating_sets):
"""Orients the edges of a graph skeleton based on information from
`separating_sets` to form a DAG pattern (DAG).
Parameters
----------
skeleton: UndirectedGraph
An undirected graph skeleton as e.g. produced by the
estimate_skeleton method.
separating_sets: dict
A dict containing for each pair of not directly connected nodes a
separating set ("witnessing set") of variables that makes then
conditionally independent. (needed for edge orientation)
Returns
-------
pdag: DAG
An estimate for the DAG pattern of the BN underlying the data. The
graph might contain some nodes with both-way edges (X->Y and Y->X).
Any completion by (removing one of the both-way edges for each such
pair) results in a I-equivalent Bayesian network DAG.
References
----------
Neapolitan, Learning Bayesian Networks, Section 10.1.2, Algorithm 10.2 (page 550)
http://www.cs.technion.ac.il/~dang/books/Learning%20Bayesian%20Networks(Neapolitan,%20Richard).pdf
Examples
--------
>>> import pandas as pd
>>> import numpy as np
>>> from pgmpy.estimators import ConstraintBasedEstimator
>>> data = pd.DataFrame(np.random.randint(0, 4, size=(5000, 3)), columns=list('ABD'))
>>> data['C'] = data['A'] - data['B']
>>> data['D'] += data['A']
>>> c = ConstraintBasedEstimator(data)
>>> pdag = c.skeleton_to_pdag(*c.estimate_skeleton())
>>> pdag.edges() # edges: A->C, B->C, A--D (not directed)
[('B', 'C'), ('A', 'C'), ('A', 'D'), ('D', 'A')]
"""
pdag = skeleton.to_directed()
node_pairs = list(permutations(pdag.nodes(), 2))
# 1) for each X-Z-Y, if Z not in the separating set of X,Y, then orient edges as X->Z<-Y
# (Algorithm 3.4 in Koller & Friedman PGM, page 86)
for pair in node_pairs:
X, Y = pair
if not skeleton.has_edge(X, Y):
for Z in set(skeleton.neighbors(X)) & set(skeleton.neighbors(Y)):
if Z not in separating_sets[frozenset((X, Y))]:
pdag.remove_edges_from([(Z, X), (Z, Y)])
progress = True
while progress: # as long as edges can be oriented (removed)
num_edges = pdag.number_of_edges()
# 2) for each X->Z-Y, orient edges to Z->Y
# (Explanation in Koller & Friedman PGM, page 88)
for pair in node_pairs:
X, Y = pair
if not pdag.has_edge(X, Y):
for Z in (set(pdag.successors(X)) - set(pdag.predecessors(X))) & (
set(pdag.successors(Y)) & set(pdag.predecessors(Y))
):
pdag.remove_edge(Y, Z)
# 3) for each X-Y with a directed path from X to Y, orient edges to X->Y
for pair in node_pairs:
X, Y = pair
if pdag.has_edge(Y, X) and pdag.has_edge(X, Y):
for path in nx.all_simple_paths(pdag, X, Y):
is_directed = True
for src, dst in list(zip(path, path[1:])):
if pdag.has_edge(dst, src):
is_directed = False
if is_directed:
pdag.remove_edge(Y, X)
break
# 4) for each X-Z-Y with X->W, Y->W, and Z-W, orient edges to Z->W
for pair in node_pairs:
X, Y = pair
for Z in (
set(pdag.successors(X))
& set(pdag.predecessors(X))
& set(pdag.successors(Y))
& set(pdag.predecessors(Y))
):
for W in (
(set(pdag.successors(X)) - set(pdag.predecessors(X)))
& (set(pdag.successors(Y)) - set(pdag.predecessors(Y)))
& (set(pdag.successors(Z)) & set(pdag.predecessors(Z)))
):
pdag.remove_edge(W, Z)
progress = num_edges > pdag.number_of_edges()
# TODO: This is temp fix to get a PDAG object.
edges = set(pdag.edges())
undirected_edges = []
directed_edges = []
for u, v in edges:
if (v, u) in edges:
undirected_edges.append((u, v))
else:
directed_edges.append((u, v))
return PDAG(directed_ebunch=directed_edges, undirected_ebunch=undirected_edges)
|
import time
import pyotp
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_TOKEN
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
DEFAULT_NAME = "OTP Sensor"
TIME_STEP = 30 # Default time step assumed by Google Authenticator
ICON = "mdi:update"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the OTP sensor."""
name = config.get(CONF_NAME)
token = config.get(CONF_TOKEN)
async_add_entities([TOTPSensor(name, token)], True)
return True
# Only TOTP supported at the moment, HOTP might be added later
class TOTPSensor(Entity):
"""Representation of a TOTP sensor."""
def __init__(self, name, token):
"""Initialize the sensor."""
self._name = name
self._otp = pyotp.TOTP(token)
self._state = None
self._next_expiration = None
async def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
self._call_loop()
@callback
def _call_loop(self):
self._state = self._otp.now()
self.async_write_ha_state()
# Update must occur at even TIME_STEP, e.g. 12:00:00, 12:00:30,
# 12:01:00, etc. in order to have synced time (see RFC6238)
self._next_expiration = TIME_STEP - (time.time() % TIME_STEP)
self.hass.loop.call_later(self._next_expiration, self._call_loop)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def icon(self):
"""Return the icon to use in the frontend."""
return ICON
|
import argparse
import json
import os
import socket
from datetime import datetime
import service_configuration_lib
import yaml
from paasta_tools.marathon_tools import get_all_namespaces
from paasta_tools.marathon_tools import get_all_namespaces_for_service
from paasta_tools.utils import atomic_file_write
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import DEFAULT_SOA_DIR
YOCALHOST = "169.254.255.254"
def parse_args():
parser = argparse.ArgumentParser(description="")
parser.add_argument(
"-o", "--output", help="Output filename.", dest="output_filename", required=True
)
parser.add_argument(
"-f",
"--format",
help="Output format. Defaults to %(default)s",
dest="output_format",
choices=["rfc1700", "yaml", "json"],
default="rfc1700",
)
args = parser.parse_args()
return args
def get_service_lines_for_service(service):
lines = []
config = service_configuration_lib.read_service_configuration(service)
port = config.get("port", None)
description = config.get("description", "No description")
if port is not None:
lines.append("%s\t%d/tcp\t# %s" % (service, port, description))
for namespace, config in get_all_namespaces_for_service(service, full_name=False):
proxy_port = config.get("proxy_port", None)
if proxy_port is not None:
lines.append(
"%s\t%d/tcp\t# %s"
% (compose_job_id(service, namespace), proxy_port, description)
)
return lines
def write_yaml_file(filename):
previous_config = maybe_load_previous_config(filename, yaml.safe_load)
configuration = generate_configuration()
if previous_config and previous_config == configuration:
return
with atomic_file_write(filename) as fp:
fp.write(
"# This file is automatically generated by paasta_tools.\n"
"# It was automatically generated at {now} on {host}.\n".format(
host=socket.getfqdn(), now=datetime.now().isoformat()
)
)
yaml.safe_dump(
configuration,
fp,
indent=2,
explicit_start=True,
default_flow_style=False,
allow_unicode=False,
)
def maybe_load_previous_config(filename, config_loader):
try:
with open(filename, "r") as fp:
previous_config = config_loader(fp)
return previous_config
except Exception:
pass
return None
def generate_configuration():
service_data = get_all_namespaces()
config = {}
for (name, data) in service_data:
proxy_port = data.get("proxy_port")
if proxy_port is None:
continue
config[name] = {"host": YOCALHOST, "port": int(proxy_port)}
return config
def write_json_file(filename):
configuration = generate_configuration()
with atomic_file_write(filename) as fp:
json.dump(
obj=configuration, fp=fp, indent=2, sort_keys=True, separators=(",", ": ")
)
def write_rfc1700_file(filename):
strings = [
"# This file is generated by generate_services_file",
"# DO NOT EDIT BY HAND",
]
for service in sorted(os.listdir(DEFAULT_SOA_DIR)):
strings.extend(get_service_lines_for_service(service))
with atomic_file_write(filename) as fp:
fp.write("\n".join(strings))
fp.write("\n")
def main():
args = parse_args()
if args.output_format == "rfc1700":
write_rfc1700_file(filename=args.output_filename)
elif args.output_format == "yaml":
write_yaml_file(filename=args.output_filename)
elif args.output_format == "json":
write_json_file(filename=args.output_filename)
else:
raise (NotImplementedError)
if __name__ == "__main__":
main()
|
import importlib
import six
from scrapy.utils.misc import load_object
from . import connection, defaults
# TODO: add SCRAPY_JOB support.
class Scheduler(object):
"""Redis-based scheduler
Settings
--------
SCHEDULER_PERSIST : bool (default: False)
Whether to persist or clear redis queue.
SCHEDULER_FLUSH_ON_START : bool (default: False)
Whether to flush redis queue on start.
SCHEDULER_IDLE_BEFORE_CLOSE : int (default: 0)
How many seconds to wait before closing if no message is received.
SCHEDULER_QUEUE_KEY : str
Scheduler redis key.
SCHEDULER_QUEUE_CLASS : str
Scheduler queue class.
SCHEDULER_DUPEFILTER_KEY : str
Scheduler dupefilter redis key.
SCHEDULER_DUPEFILTER_CLASS : str
Scheduler dupefilter class.
SCHEDULER_SERIALIZER : str
Scheduler serializer.
"""
def __init__(self, server,
persist=False,
flush_on_start=False,
queue_key=defaults.SCHEDULER_QUEUE_KEY,
queue_cls=defaults.SCHEDULER_QUEUE_CLASS,
dupefilter_key=defaults.SCHEDULER_DUPEFILTER_KEY,
dupefilter_cls=defaults.SCHEDULER_DUPEFILTER_CLASS,
idle_before_close=0,
serializer=None):
"""Initialize scheduler.
Parameters
----------
server : Redis
The redis server instance.
persist : bool
Whether to flush requests when closing. Default is False.
flush_on_start : bool
Whether to flush requests on start. Default is False.
queue_key : str
Requests queue key.
queue_cls : str
Importable path to the queue class.
dupefilter_key : str
Duplicates filter key.
dupefilter_cls : str
Importable path to the dupefilter class.
idle_before_close : int
Timeout before giving up.
"""
if idle_before_close < 0:
raise TypeError("idle_before_close cannot be negative")
self.server = server
self.persist = persist
self.flush_on_start = flush_on_start
self.queue_key = queue_key
self.queue_cls = queue_cls
self.dupefilter_cls = dupefilter_cls
self.dupefilter_key = dupefilter_key
self.idle_before_close = idle_before_close
self.serializer = serializer
self.stats = None
def __len__(self):
return len(self.queue)
@classmethod
def from_settings(cls, settings):
kwargs = {
'persist': settings.getbool('SCHEDULER_PERSIST'),
'flush_on_start': settings.getbool('SCHEDULER_FLUSH_ON_START'),
'idle_before_close': settings.getint('SCHEDULER_IDLE_BEFORE_CLOSE'),
}
# If these values are missing, it means we want to use the defaults.
optional = {
# TODO: Use custom prefixes for this settings to note that are
# specific to scrapy-redis.
'queue_key': 'SCHEDULER_QUEUE_KEY',
'queue_cls': 'SCHEDULER_QUEUE_CLASS',
'dupefilter_key': 'SCHEDULER_DUPEFILTER_KEY',
# We use the default setting name to keep compatibility.
'dupefilter_cls': 'DUPEFILTER_CLASS',
'serializer': 'SCHEDULER_SERIALIZER',
}
for name, setting_name in optional.items():
val = settings.get(setting_name)
if val:
kwargs[name] = val
# Support serializer as a path to a module.
if isinstance(kwargs.get('serializer'), six.string_types):
kwargs['serializer'] = importlib.import_module(kwargs['serializer'])
server = connection.from_settings(settings)
# Ensure the connection is working.
server.ping()
return cls(server=server, **kwargs)
@classmethod
def from_crawler(cls, crawler):
instance = cls.from_settings(crawler.settings)
# FIXME: for now, stats are only supported from this constructor
instance.stats = crawler.stats
return instance
def open(self, spider):
self.spider = spider
try:
self.queue = load_object(self.queue_cls)(
server=self.server,
spider=spider,
key=self.queue_key % {'spider': spider.name},
serializer=self.serializer,
)
except TypeError as e:
raise ValueError("Failed to instantiate queue class '%s': %s",
self.queue_cls, e)
self.df = load_object(self.dupefilter_cls).from_spider(spider)
if self.flush_on_start:
self.flush()
# notice if there are requests already in the queue to resume the crawl
if len(self.queue):
spider.log("Resuming crawl (%d requests scheduled)" % len(self.queue))
def close(self, reason):
if not self.persist:
self.flush()
def flush(self):
self.df.clear()
self.queue.clear()
def enqueue_request(self, request):
if not request.dont_filter and self.df.request_seen(request):
self.df.log(request, self.spider)
return False
if self.stats:
self.stats.inc_value('scheduler/enqueued/redis', spider=self.spider)
self.queue.push(request)
return True
def next_request(self):
block_pop_timeout = self.idle_before_close
request = self.queue.pop(block_pop_timeout)
if request and self.stats:
self.stats.inc_value('scheduler/dequeued/redis', spider=self.spider)
return request
def has_pending_requests(self):
return len(self) > 0
|
Subsets and Splits