text
stringlengths 213
32.3k
|
---|
import datetime
import errno
import os
from wsgiref.simple_server import make_server
from pyramid.config import Configurator
from pyramid.response import Response
DRAIN_FILE = "drain"
def drain(request):
if not os.path.exists(DRAIN_FILE):
with open(DRAIN_FILE, "w+") as f:
f.write(str(datetime.datetime.now().timestamp()))
return Response(status_int=200)
def stop_drain(request):
try:
os.remove(DRAIN_FILE)
return Response(status_int=200)
except OSError as e:
if e.errno != errno.ENOENT:
raise
else:
return Response(status_int=200)
def status_drain(request):
if os.path.exists(DRAIN_FILE):
return Response(status_int=200)
else:
return Response(status_int=400)
def safe_to_kill(request):
if os.path.exists(DRAIN_FILE):
with open(DRAIN_FILE) as f:
dt = datetime.datetime.fromtimestamp(float(f.read()))
delta = datetime.datetime.now() - dt
if delta.seconds > 2:
return Response(status_int=200)
else:
return Response(status_int=400)
else:
return Response(status_int=400)
if __name__ == "__main__":
with Configurator() as config:
config.add_route("drain", "/drain")
config.add_route("stop_drain", "/drain/stop")
config.add_route("drain_status", "/drain/status")
config.add_route("drain_safe_to_kill", "/drain/safe_to_kill")
config.add_view(drain, route_name="drain")
config.add_view(stop_drain, route_name="stop_drain")
config.add_view(status_drain, route_name="drain_status")
config.add_view(safe_to_kill, route_name="drain_safe_to_kill")
app = config.make_wsgi_app()
server = make_server("0.0.0.0", 3000, app)
server.serve_forever()
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from mock import Mock
from diamond.collector import Collector
from flume import FlumeCollector
class TestFlumeCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('FlumeCollector', {
'interval': 10
})
self.collector = FlumeCollector(config, None)
def test_import(self):
self.assertTrue(FlumeCollector)
@patch.object(Collector, 'publish')
@patch.object(Collector, 'publish_gauge')
@patch.object(Collector, 'publish_counter')
def test_collect_should_work(self,
publish_mock,
publish_gauge_mock,
publish_counter_mock):
patch_urlopen = patch('urllib2.urlopen',
Mock(return_value=self.getFixture('metrics')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
metrics = {
'CHANNEL.channel1.ChannelFillPercentage': 0.0,
'CHANNEL.channel1.EventPutAttempt': 50272828,
'CHANNEL.channel1.EventPutSuccess': 50255318,
'CHANNEL.channel1.EventTakeAttempt': 50409933,
'CHANNEL.channel1.EventTakeSuccess': 50255318,
'SINK.sink1.BatchComplete': 251705,
'SINK.sink1.BatchEmpty': 76250,
'SINK.sink1.BatchUnderflow': 379,
'SINK.sink1.ConnectionClosed': 6,
'SINK.sink1.ConnectionCreated': 7,
'SINK.sink1.ConnectionFailed': 0,
'SINK.sink1.EventDrainAttempt': 25190171,
'SINK.sink1.EventDrainSuccess': 25189571,
'SOURCE.source1.AppendAccepted': 0,
'SOURCE.source1.AppendBatchAccepted': 56227,
'SOURCE.source1.AppendBatchReceived': 56258,
'SOURCE.source1.AppendReceived': 0,
'SOURCE.source1.EventAccepted': 50282681,
'SOURCE.source1.EventReceived': 50311681,
'SOURCE.source1.OpenConnection': 0
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany([publish_mock,
publish_gauge_mock,
publish_counter_mock
], metrics)
@patch.object(Collector, 'publish')
def test_blank_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('metrics_blank')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
@patch.object(Collector, 'publish')
def test_invalid_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch(
'urllib2.urlopen',
Mock(return_value=self.getFixture('metrics_invalid')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
if __name__ == "__main__":
unittest.main()
|
from http.server import SimpleHTTPRequestHandler, HTTPServer
import pytest
from mock import MagicMock
from threading import Thread
from yandextank.plugins.Pandora import Plugin
# https://raw.githubusercontent.com/yandex/yandex-tank/develop/README.md
class RequestHandler(SimpleHTTPRequestHandler):
def _do_handle(self):
content = '{"test": "ammo"}'.encode('utf-8')
self.send_response(200)
self.send_header('Content-Type', 'application/json')
self.send_header('Content-Length', len(content))
self.end_headers()
self.wfile.write(content)
def do_GET(self):
self._do_handle()
def do_HEAD(self):
self._do_handle()
SERVER = HTTPServer(('localhost', 1234), RequestHandler)
THREAD = Thread(target=SERVER.serve_forever, name="StatServer")
def setup_module(module):
THREAD.start()
@pytest.mark.parametrize('cfg, expected', [
(
{'pools': [
{
'ammo': {'uri-headers': '[User-Agent: Wget/1.13.4 (linux-gnu)] [Host: foo.ru] [Accept-Encoding: gzip,deflate,sdch]',
'type': 'uri',
'file': 'http://localhost:1234/ammo'
}
}]},
{'pools': [
{
'ammo': {'uri-headers': '[User-Agent: Wget/1.13.4 (linux-gnu)] [Host: foo.ru] [Accept-Encoding: gzip,deflate,sdch]',
'type': 'uri',
'file': 'some local file'}
}]}
)
])
def test_patch_config(cfg, expected):
plugin = Plugin(MagicMock(), {}, 'pandora')
# '/tmp/9b73d966bcbf27467d4c4190cfe58c2a.downloaded_resource'
filename = plugin.patch_config(cfg)['pools'][0]['ammo']['file']
assert filename.endswith('.downloaded_resource')
def teardown_module(module):
SERVER.shutdown()
SERVER.socket.close()
THREAD.join()
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hostname(host):
assert 'instance' == host.check_output('hostname -s')
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
f = host.file('/etc/molecule/instance')
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
def test_hostonly_interface(host):
i = host.interface('eth1').addresses
# NOTE(retr0h): Contains ipv4 and ipv6 addresses.
assert len(i) == 2
def test_internal_interface(host):
assert '192.168.0.1' in host.interface('eth2').addresses
|
import logging
import RFXtrx as rfxtrxmod
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.const import CONF_DEVICES, STATE_ON
from homeassistant.core import callback
from . import (
CONF_AUTOMATIC_ADD,
CONF_DATA_BITS,
CONF_SIGNAL_REPETITIONS,
DEFAULT_SIGNAL_REPETITIONS,
SIGNAL_EVENT,
RfxtrxCommandEntity,
get_device_id,
get_rfx_object,
)
from .const import COMMAND_OFF_LIST, COMMAND_ON_LIST
_LOGGER = logging.getLogger(__name__)
SUPPORT_RFXTRX = SUPPORT_BRIGHTNESS
def supported(event):
"""Return whether an event supports light."""
return (
isinstance(event.device, rfxtrxmod.LightingDevice)
and event.device.known_to_be_dimmable
)
async def async_setup_entry(
hass,
config_entry,
async_add_entities,
):
"""Set up config entry."""
discovery_info = config_entry.data
device_ids = set()
# Add switch from config file
entities = []
for packet_id, entity_info in discovery_info[CONF_DEVICES].items():
event = get_rfx_object(packet_id)
if event is None:
_LOGGER.error("Invalid device: %s", packet_id)
continue
if not supported(event):
continue
device_id = get_device_id(
event.device, data_bits=entity_info.get(CONF_DATA_BITS)
)
if device_id in device_ids:
continue
device_ids.add(device_id)
entity = RfxtrxLight(
event.device, device_id, entity_info[CONF_SIGNAL_REPETITIONS]
)
entities.append(entity)
async_add_entities(entities)
@callback
def light_update(event, device_id):
"""Handle light updates from the RFXtrx gateway."""
if not supported(event):
return
if device_id in device_ids:
return
device_ids.add(device_id)
_LOGGER.info(
"Added light (Device ID: %s Class: %s Sub: %s, Event: %s)",
event.device.id_string.lower(),
event.device.__class__.__name__,
event.device.subtype,
"".join(f"{x:02x}" for x in event.data),
)
entity = RfxtrxLight(
event.device, device_id, DEFAULT_SIGNAL_REPETITIONS, event=event
)
async_add_entities([entity])
# Subscribe to main RFXtrx events
if discovery_info[CONF_AUTOMATIC_ADD]:
hass.helpers.dispatcher.async_dispatcher_connect(SIGNAL_EVENT, light_update)
class RfxtrxLight(RfxtrxCommandEntity, LightEntity):
"""Representation of a RFXtrx light."""
_brightness = 0
async def async_added_to_hass(self):
"""Restore RFXtrx device state (ON/OFF)."""
await super().async_added_to_hass()
if self._event is None:
old_state = await self.async_get_last_state()
if old_state is not None:
self._state = old_state.state == STATE_ON
self._brightness = old_state.attributes.get(ATTR_BRIGHTNESS)
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_RFXTRX
@property
def is_on(self):
"""Return true if device is on."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
self._state = True
if brightness is None:
await self._async_send(self._device.send_on)
self._brightness = 255
else:
await self._async_send(self._device.send_dim, brightness * 100 // 255)
self._brightness = brightness
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
await self._async_send(self._device.send_off)
self._state = False
self._brightness = 0
self.async_write_ha_state()
def _apply_event(self, event):
"""Apply command from rfxtrx."""
super()._apply_event(event)
if event.values["Command"] in COMMAND_ON_LIST:
self._state = True
elif event.values["Command"] in COMMAND_OFF_LIST:
self._state = False
elif event.values["Command"] == "Set level":
self._brightness = event.values["Dim level"] * 255 // 100
self._state = self._brightness > 0
@callback
def _handle_event(self, event, device_id):
"""Check if event applies to me and update."""
if device_id != self._device_id:
return
self._apply_event(event)
self.async_write_ha_state()
|
import logging
from hole.exceptions import HoleError
import voluptuous as vol
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import CONF_NAME
from homeassistant.helpers import config_validation as cv, entity_platform
from . import PiHoleEntity
from .const import (
DATA_KEY_API,
DATA_KEY_COORDINATOR,
DOMAIN as PIHOLE_DOMAIN,
SERVICE_DISABLE,
SERVICE_DISABLE_ATTR_DURATION,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the Pi-hole switch."""
name = entry.data[CONF_NAME]
hole_data = hass.data[PIHOLE_DOMAIN][entry.entry_id]
switches = [
PiHoleSwitch(
hole_data[DATA_KEY_API],
hole_data[DATA_KEY_COORDINATOR],
name,
entry.entry_id,
)
]
async_add_entities(switches, True)
# register service
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_DISABLE,
{
vol.Required(SERVICE_DISABLE_ATTR_DURATION): vol.All(
cv.time_period_str, cv.positive_timedelta
),
},
"async_disable",
)
class PiHoleSwitch(PiHoleEntity, SwitchEntity):
"""Representation of a Pi-hole switch."""
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def unique_id(self):
"""Return the unique id of the switch."""
return f"{self._server_unique_id}/Switch"
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return "mdi:pi-hole"
@property
def is_on(self):
"""Return if the service is on."""
return self.api.data.get("status") == "enabled"
async def async_turn_on(self, **kwargs):
"""Turn on the service."""
try:
await self.api.enable()
await self.async_update()
except HoleError as err:
_LOGGER.error("Unable to enable Pi-hole: %s", err)
async def async_turn_off(self, **kwargs):
"""Turn off the service."""
await self.async_disable()
async def async_disable(self, duration=None):
"""Disable the service for a given duration."""
duration_seconds = True # Disable infinitely by default
if duration is not None:
duration_seconds = duration.total_seconds()
_LOGGER.debug(
"Disabling Pi-hole '%s' (%s) for %d seconds",
self.name,
self.api.host,
duration_seconds,
)
try:
await self.api.disable(duration_seconds)
await self.async_update()
except HoleError as err:
_LOGGER.error("Unable to disable Pi-hole: %s", err)
|
import argparse
import glob
import json
import logging
import os
import socket
import sys
import pwd
import threading
from urllib.parse import urljoin
from datetime import datetime
import pkg_resources
import yaml
from cerberus import Validator
from yandextank.core import TankCore
from yandextank.core.tankcore import VALIDATED_CONF
from yandextank.validator.validator import ValidationError, load_yaml_schema
from .client import APIClient, OverloadClient, LPRequisites
from .plugin import LPJob, BackendTypes
from .plugin import Plugin as DataUploader
DATA_LOG = 'test_data.log'
MONITORING_LOG = 'monitoring.log'
SECTION = 'meta'
def get_logger():
global logger
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
handler = logging.StreamHandler(stream=sys.stdout)
handler.setLevel(logging.INFO)
logger.addHandler(handler)
verbose_handler = logging.FileHandler(
datetime.now().strftime("post_loader_%Y-%m-%d_%H-%M-%S.log"), 'w')
verbose_handler.setLevel(logging.DEBUG)
logger.addHandler(verbose_handler)
def from_tank_config(test_dir):
try:
config_file = glob.glob(os.path.join(test_dir, VALIDATED_CONF))[0]
logger.info('Config file found: %s' % config_file)
except IndexError:
raise OSError('Config file {} not found in {}'.format(VALIDATED_CONF, test_dir))
with open(config_file) as f:
tank_cfg = yaml.load(f, Loader=yaml.FullLoader)
try:
section, config = next(filter(
lambda item: 'DataUploader' in item[1].get('package', ''),
tank_cfg.items(),
))
except StopIteration:
logger.warning('DataUploader configuration not found in {}'.format(config_file))
section, config = None, {}
return section, config
def check_log(log_name):
assert os.path.exists(log_name), \
'Data log {} not found\n'.format(log_name) + \
'JsonReport plugin should be enabled when launching Yandex-tank'
def upload_data(shooting_dir, log_name, lp_job):
data_log = os.path.join(shooting_dir, log_name)
check_log(data_log)
sys.stdout.write('Uploading test data')
with open(data_log, 'r') as f:
for line in f:
data = json.loads(line.strip())
lp_job.push_test_data(data['data'], data['stats'])
sys.stdout.write('.')
sys.stdout.flush()
sys.stdout.write('\n')
def upload_monitoring(shooting_dir, log_name, lp_job):
data_log = os.path.join(shooting_dir, log_name)
check_log(data_log)
sys.stdout.write('Uploading monitoring data')
with open(data_log, 'r') as f:
for line in f.readlines():
lp_job.push_monitoring_data(json.loads(line.strip()))
sys.stdout.write('.')
sys.stdout.flush()
sys.stdout.write('\n')
def send_config_snapshot(config, lp_job):
lp_job.send_config(LPRequisites.CONFIGINFO, yaml.dump(config))
def edit_metainfo(lp_config, lp_job):
lp_job.edit_metainfo(regression_component=lp_config.get('component'),
cmdline=lp_config.get('cmdline'),
ammo_path=lp_config.get('ammo_path'),
loop_count=lp_config.get('loop_count'))
def get_plugin_dir(shooting_dir):
DIRNAME = 'lunapark'
parent = os.path.abspath(os.path.join(shooting_dir, os.pardir))
if os.path.basename(parent) == DIRNAME:
return parent
else:
plugin_dir = os.path.join(parent, DIRNAME)
if not os.path.exists(plugin_dir):
os.makedirs(plugin_dir)
return plugin_dir
def make_symlink(shooting_dir, name):
plugin_dir = get_plugin_dir(shooting_dir)
link_name = os.path.join(plugin_dir, str(name))
try:
os.symlink(os.path.relpath(shooting_dir, plugin_dir), link_name)
except OSError:
logger.warning('Unable to create symlink for artifact: %s', link_name)
else:
logger.info('Symlink created: {}'.format(os.path.abspath(link_name)))
class ConfigError(Exception):
pass
def post_loader():
CONFIG_SCHEMA = load_yaml_schema(pkg_resources.resource_filename('yandextank.plugins.DataUploader',
'config/postloader_schema.yaml'))
parser = argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter)
parser.add_argument('-c', '--config', help='YAML config. Format:\n{}'.format(yaml.dump(CONFIG_SCHEMA)))
parser.add_argument('-a', '--api_address',
help='service to upload test results to, e.g. https://overload.yandex.net')
parser.add_argument('-t', '--target', help='Address of the tested target, host[:port]')
parser.add_argument('-o', '--operator', help='User who conducted the test')
parser.add_argument('--task', help='task name, for Lunapark users only')
parser.add_argument('--job_name', help='Job name')
parser.add_argument('--job_dsc', help='Job description')
parser.add_argument('--token', help='path to token file, for Overload users only')
parser.add_argument('test_dir',
help='Directory containing test artifacts')
args = parser.parse_args()
assert os.path.exists(args.test_dir), 'Directory {} not found'.format(args.test_dir)
get_logger()
# load cfg
if args.config:
with open(args.config) as f:
config = yaml.load(f, Loader=yaml.FullLoader)
section = None
else:
section, config = from_tank_config(args.test_dir)
# parse target host and port
if args.target is not None:
try:
target_host, target_port = args.target.rsplit(':', 1)
except ValueError:
target_host, target_port = args.target, None
else:
target_host, target_port = None, None
# update cfg from cli options
for key, value in [('api_address', args.api_address),
('target_host', target_host),
('target_port', target_port),
('operator', args.operator),
('task', args.task),
('job_name', args.job_name),
('job_dsc', args.job_dsc),
('token_file', args.token)]:
if value is not None:
config[key] = value
# Validation
v = Validator(schema=CONFIG_SCHEMA,
allow_unknown=True)
if not v.validate(config):
raise ValidationError(v.errors)
config = v.normalized(config)
# lunapark or overload?
backend_type = BackendTypes.identify_backend(config['api_address'], section)
if backend_type == BackendTypes.LUNAPARK:
client = APIClient
api_token = None
elif backend_type == BackendTypes.OVERLOAD:
client = OverloadClient
try:
api_token = DataUploader.read_token(config["token_file"])
except KeyError:
raise ConfigError('Token file required')
else:
raise RuntimeError("Backend type doesn't match any of the expected")
user_agent = ' '.join(('Uploader/{}'.format(DataUploader.VERSION),
TankCore.get_user_agent()))
api_client = client(base_url=config['api_address'],
user_agent=user_agent,
api_token=api_token,
core_interrupted=threading.Event()
# todo: add timeouts
)
lp_job = LPJob(
client=api_client,
target_host=config.get('target_host'),
target_port=config.get('target_port'),
person=config.get('operator') or pwd.getpwuid(os.geteuid())[0],
task=config.get('task'),
name=config['job_name'],
description=config['job_dsc'],
tank=socket.getfqdn())
edit_metainfo(config, lp_job)
upload_data(args.test_dir, DATA_LOG, lp_job)
send_config_snapshot(config, lp_job)
try:
upload_monitoring(args.test_dir, MONITORING_LOG, lp_job)
except AssertionError as e:
logger.error(e)
lp_job.close(0)
make_symlink(args.test_dir, lp_job.number)
logger.info(
'LP job created: {}'.format(
urljoin(
api_client.base_url, str(
lp_job.number))))
if __name__ == '__main__':
post_loader()
|
from datetime import timedelta
import logging
import struct
from typing import Any, Dict, Optional
from pymodbus.exceptions import ConnectionException, ModbusException
from pymodbus.pdu import ExceptionResponse
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_NAME,
CONF_SCAN_INTERVAL,
CONF_SLAVE,
CONF_STRUCTURE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.helpers.typing import (
ConfigType,
DiscoveryInfoType,
HomeAssistantType,
)
from . import ModbusHub
from .const import (
CALL_TYPE_REGISTER_HOLDING,
CALL_TYPE_REGISTER_INPUT,
CONF_CLIMATES,
CONF_CURRENT_TEMP,
CONF_CURRENT_TEMP_REGISTER_TYPE,
CONF_DATA_COUNT,
CONF_DATA_TYPE,
CONF_MAX_TEMP,
CONF_MIN_TEMP,
CONF_OFFSET,
CONF_PRECISION,
CONF_SCALE,
CONF_STEP,
CONF_TARGET_TEMP,
CONF_UNIT,
DATA_TYPE_CUSTOM,
DEFAULT_STRUCT_FORMAT,
MODBUS_DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(
hass: HomeAssistantType,
config: ConfigType,
async_add_entities,
discovery_info: Optional[DiscoveryInfoType] = None,
):
"""Read configuration and create Modbus climate."""
if discovery_info is None:
return
entities = []
for entity in discovery_info[CONF_CLIMATES]:
hub: ModbusHub = hass.data[MODBUS_DOMAIN][discovery_info[CONF_NAME]]
count = entity[CONF_DATA_COUNT]
data_type = entity[CONF_DATA_TYPE]
name = entity[CONF_NAME]
structure = entity[CONF_STRUCTURE]
if data_type != DATA_TYPE_CUSTOM:
try:
structure = f">{DEFAULT_STRUCT_FORMAT[data_type][count]}"
except KeyError:
_LOGGER.error(
"Climate %s: Unable to find a data type matching count value %s, try a custom type",
name,
count,
)
continue
try:
size = struct.calcsize(structure)
except struct.error as err:
_LOGGER.error("Error in sensor %s structure: %s", name, err)
continue
if count * 2 != size:
_LOGGER.error(
"Structure size (%d bytes) mismatch registers count (%d words)",
size,
count,
)
continue
entity[CONF_STRUCTURE] = structure
entities.append(ModbusThermostat(hub, entity))
async_add_entities(entities)
class ModbusThermostat(ClimateEntity):
"""Representation of a Modbus Thermostat."""
def __init__(
self,
hub: ModbusHub,
config: Dict[str, Any],
):
"""Initialize the modbus thermostat."""
self._hub: ModbusHub = hub
self._name = config[CONF_NAME]
self._slave = config[CONF_SLAVE]
self._target_temperature_register = config[CONF_TARGET_TEMP]
self._current_temperature_register = config[CONF_CURRENT_TEMP]
self._current_temperature_register_type = config[
CONF_CURRENT_TEMP_REGISTER_TYPE
]
self._target_temperature = None
self._current_temperature = None
self._data_type = config[CONF_DATA_TYPE]
self._structure = config[CONF_STRUCTURE]
self._count = config[CONF_DATA_COUNT]
self._precision = config[CONF_PRECISION]
self._scale = config[CONF_SCALE]
self._scan_interval = timedelta(seconds=config[CONF_SCAN_INTERVAL])
self._offset = config[CONF_OFFSET]
self._unit = config[CONF_UNIT]
self._max_temp = config[CONF_MAX_TEMP]
self._min_temp = config[CONF_MIN_TEMP]
self._temp_step = config[CONF_STEP]
self._available = True
async def async_added_to_hass(self):
"""Handle entity which will be added."""
async_track_time_interval(
self.hass, lambda arg: self._update(), self._scan_interval
)
@property
def should_poll(self):
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
# Handle polling directly in this entity
return False
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def hvac_mode(self):
"""Return the current HVAC mode."""
return HVAC_MODE_AUTO
@property
def hvac_modes(self):
"""Return the possible HVAC modes."""
return [HVAC_MODE_AUTO]
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
# Home Assistant expects this method.
# We'll keep it here to avoid getting exceptions.
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the target temperature."""
return self._target_temperature
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT if self._unit == "F" else TEMP_CELSIUS
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._max_temp
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
return self._temp_step
def set_temperature(self, **kwargs):
"""Set new target temperature."""
target_temperature = int(
(kwargs.get(ATTR_TEMPERATURE) - self._offset) / self._scale
)
if target_temperature is None:
return
byte_string = struct.pack(self._structure, target_temperature)
register_value = struct.unpack(">h", byte_string[0:2])[0]
self._write_register(self._target_temperature_register, register_value)
self._update()
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
def _update(self):
"""Update Target & Current Temperature."""
self._target_temperature = self._read_register(
CALL_TYPE_REGISTER_HOLDING, self._target_temperature_register
)
self._current_temperature = self._read_register(
self._current_temperature_register_type, self._current_temperature_register
)
self.schedule_update_ha_state()
def _read_register(self, register_type, register) -> Optional[float]:
"""Read register using the Modbus hub slave."""
try:
if register_type == CALL_TYPE_REGISTER_INPUT:
result = self._hub.read_input_registers(
self._slave, register, self._count
)
else:
result = self._hub.read_holding_registers(
self._slave, register, self._count
)
except ConnectionException:
self._available = False
return
if isinstance(result, (ModbusException, ExceptionResponse)):
self._available = False
return
byte_string = b"".join(
[x.to_bytes(2, byteorder="big") for x in result.registers]
)
val = struct.unpack(self._structure, byte_string)[0]
register_value = format(
(self._scale * val) + self._offset, f".{self._precision}f"
)
register_value = float(register_value)
self._available = True
return register_value
def _write_register(self, register, value):
"""Write holding register using the Modbus hub slave."""
try:
self._hub.write_registers(self._slave, register, [value, 0])
except ConnectionException:
self._available = False
return
self._available = True
|
from pscript import window
from ... import event
from ._stack import StackLayout
class TabLayout(StackLayout):
""" A StackLayout which provides a tabbar for selecting the current widget.
The title of each child widget is used for the tab label.
The ``node`` of this widget is a
`<div> <https://developer.mozilla.org/docs/Web/HTML/Element/div>`_.
The visible child widget fills the entire area of this element,
except for a small area at the top where the tab-bar is shown.
"""
CSS = """
.flx-TabLayout > .flx-Widget {
top: 30px;
margin: 0;
height: calc(100% - 30px);
border: 1px solid #ddd;
}
.flx-TabLayout > .flx-tabbar {
box-sizing: border-box;
position: absolute;
left: 0;
right: 0;
top: 0;
height: 30px;
overflow: hidden;
}
.flx-tabbar > .flx-tab-item {
display: inline-block;
height: 22px; /* 100% - 8px: 3 margin + 2 borders + 2 padding -1 overlap */
margin-top: 3px;
padding: 3px 6px 1px 6px;
overflow: hidden;
min-width: 10px;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
background: #ececec;
border: 1px solid #bbb;
border-radius: 3px 3px 0px 0px;
margin-left: -1px;
transition: background 0.3s;
}
.flx-tabbar > .flx-tab-item:first-of-type {
margin-left: 0;
}
.flx-tabbar > .flx-tab-item.flx-current {
background: #eaecff;
border-top: 3px solid #7bf;
margin-top: 0;
}
.flx-tabbar > .flx-tab-item:hover {
background: #eaecff;
}
"""
def _create_dom(self):
outernode = window.document.createElement('div')
self._tabbar = window.document.createElement('div')
self._tabbar.classList.add('flx-tabbar')
self._addEventListener(self._tabbar, 'mousedown', # also works for touch
self._tabbar_click)
outernode.appendChild(self._tabbar)
return outernode
def _render_dom(self):
nodes = [child.outernode for child in self.children]
nodes.append(self._tabbar)
return nodes
@event.reaction
def __update_tabs(self):
children = self.children
current = self.current
# Add items to tabbar as needed
while len(self._tabbar.children) < len(children):
node = window.document.createElement('p')
node.classList.add('flx-tab-item')
node.index = len(self._tabbar.children)
self._tabbar.appendChild(node)
# Remove items from tabbar as needed
while len(self._tabbar.children) > len(children):
c = self._tabbar.children[len(self._tabbar.children) - 1]
self._tabbar.removeChild(c)
# Update titles
for i in range(len(children)):
widget = children[i]
node = self._tabbar.children[i]
node.textContent = widget.title
if widget is current:
node.classList.add('flx-current')
else:
node.classList.remove('flx-current')
# Update sizes
self.__checks_sizes()
@event.reaction('size')
def __checks_sizes(self, *events):
# Make the tabbar items occupy (nearly) the full width
nodes = self._tabbar.children
width = (self.size[0] - 10) / len(nodes) - 2 - 12 # - padding and border
for i in range(len(nodes)):
nodes[i].style.width = width + 'px'
@event.emitter
def user_current(self, current):
""" Event emitted when the user selects a tab. Can be used to distinguish
user-invoked from programatically-invoked tab changes.
Has ``old_value`` and ``new_value`` attributes.
"""
if isinstance(current, (float, int)):
current = self.children[int(current)]
d = {'old_value': self.current, 'new_value': current}
self.set_current(current)
return d
def _tabbar_click(self, e):
index = e.target.index
if index >= 0:
self.user_current(index)
|
import functools
import logging
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import memcached_server
from perfkitbenchmarker.linux_packages import ycsb
from perfkitbenchmarker.providers.aws import aws_network
FLAGS = flags.FLAGS
flags.DEFINE_enum('memcached_managed', providers.GCP,
[providers.GCP, providers.AWS],
'Managed memcached provider (GCP/AWS) to use.')
flags.DEFINE_enum('memcached_scenario', 'custom',
['custom', 'managed'],
'select one scenario to run: \n'
'custom: Provision VMs and install memcached ourselves. \n'
'managed: Use the specified provider\'s managed memcache.')
flags.DEFINE_enum('memcached_elasticache_region', 'us-west-1',
['ap-northeast-1', 'ap-northeast-2', 'ap-southeast-1',
'ap-southeast-2', 'ap-south-1', 'cn-north-1', 'eu-central-1',
'eu-west-1', 'us-gov-west-1', 'sa-east-1', 'us-east-1',
'us-east-2', 'us-west-1', 'us-west-2'],
'The region to use for AWS ElastiCache memcached servers.')
flags.DEFINE_enum('memcached_elasticache_node_type', 'cache.m3.medium',
['cache.t2.micro', 'cache.t2.small', 'cache.t2.medium',
'cache.m3.medium', 'cache.m3.large', 'cache.m3.xlarge',
'cache.m3.2xlarge', 'cache.m4.large', 'cache.m4.xlarge',
'cache.m4.2xlarge', 'cache.m4.4xlarge', 'cache.m4.10xlarge'],
'The node type to use for AWS ElastiCache memcached servers.')
flags.DEFINE_integer('memcached_elasticache_num_servers', 1,
'The number of memcached instances for AWS ElastiCache.')
BENCHMARK_NAME = 'memcached_ycsb'
BENCHMARK_CONFIG = """
memcached_ycsb:
description: >
Run YCSB against an memcached
installation. Specify the number of YCSB client VMs with
--ycsb_client_vms and the number of YCSB server VMS with
--num_vms.
flags:
ycsb_client_vms: 1
num_vms: 1
vm_groups:
servers:
vm_spec: *default_single_core
clients:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['ycsb_client_vms'].present:
config['vm_groups']['clients']['vm_count'] = FLAGS.ycsb_client_vms
config['vm_groups']['servers']['vm_count'] = FLAGS.num_vms
return config
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
ycsb.CheckPrerequisites()
def Prepare(benchmark_spec):
"""Prepare the virtual machines to run YCSB against memcached.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
clients = benchmark_spec.vm_groups['clients']
assert clients, benchmark_spec.vm_groups
hosts = []
if FLAGS.memcached_scenario == 'managed':
# We need to delete the managed memcached backend when we're done
benchmark_spec.always_call_cleanup = True
if FLAGS.memcached_managed == providers.GCP:
raise NotImplementedError("GCP managed memcached backend not implemented "
"yet")
elif FLAGS.memcached_managed == providers.AWS:
cluster_id = 'pkb%s' % FLAGS.run_uri
service = providers.aws.elasticache.ElastiCacheMemcacheService(
aws_network.AwsNetwork.GetNetwork(clients[0]),
cluster_id, FLAGS.memcached_elasticache_region,
FLAGS.memcached_elasticache_node_type,
FLAGS.memcached_elasticache_num_servers)
service.Create()
hosts = service.GetHosts()
benchmark_spec.service = service
benchmark_spec.metadata = service.GetMetadata()
else:
# custom scenario
# Install memcached on all the servers
servers = benchmark_spec.vm_groups['servers']
assert servers, 'No memcached servers: {0}'.format(benchmark_spec.vm_groups)
memcached_install_fns = \
[functools.partial(memcached_server.ConfigureAndStart, vm)
for vm in servers]
vm_util.RunThreaded(lambda f: f(), memcached_install_fns)
hosts = ['%s:%s' % (vm.internal_ip, memcached_server.MEMCACHED_PORT)
for vm in servers]
benchmark_spec.metadata = {'ycsb_client_vms': FLAGS.ycsb_client_vms,
'ycsb_server_vms': len(servers),
'cache_size': FLAGS.memcached_size_mb}
assert len(hosts) > 0
ycsb_install_fns = [functools.partial(vm.Install, 'ycsb')
for vm in clients]
vm_util.RunThreaded(lambda f: f(), ycsb_install_fns)
benchmark_spec.executor = ycsb.YCSBExecutor(
'memcached',
**{'memcached.hosts': ','.join(hosts)})
def Run(benchmark_spec):
"""Spawn YCSB and gather the results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample instances.
"""
logging.info('Start benchmarking memcached service, scenario is %s.',
FLAGS.memcached_scenario)
clients = benchmark_spec.vm_groups['clients']
samples = list(benchmark_spec.executor.LoadAndRun(clients))
for sample in samples:
sample.metadata.update(benchmark_spec.metadata)
return samples
def Cleanup(benchmark_spec):
"""Cleanup.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if FLAGS.memcached_scenario == 'managed':
service = benchmark_spec.service
service.Destroy()
else:
# Custom scenario
servers = benchmark_spec.vm_groups['servers']
vm_util.RunThreaded(memcached_server.StopMemcached, servers)
|
import unittest
from common import gpu_test
import mxnet as mx
from gluonnlp import Vocab
from gluonnlp.data import count_tokens
from gluoncv.data.transforms.image import imresize
class TestMxNet(unittest.TestCase):
def test_array(self):
x = mx.nd.array([[1, 2, 3], [4, 5, 6]])
self.assertEqual((2, 3), x.shape)
@gpu_test
def test_array_gpu(self):
x = mx.nd.array([2, 2, 2], ctx=mx.gpu(0))
y = mx.nd.array([1, 1, 1], ctx=mx.gpu(0))
self.assertEqual(3, ((x - y).sum().asscalar()))
def test_gluon_nlp(self):
# get corpus statistics
counter = count_tokens(['alpha', 'beta', 'gamma', 'beta'])
# create Vocab
vocab = Vocab(counter)
# find index based on token
self.assertEqual(4, vocab['beta'])
def test_gluon_cv(self):
# create fake RGB image of 300x300 of shape: Height x Width x Channel as OpenCV expects
img = mx.random.uniform(0, 255, (300, 300, 3)).astype('uint8')
# resize image to 200x200. This call uses OpenCV
# GluonCV is not of much use if OpenCV is not there or fails
img = imresize(img, 200, 200)
self.assertEqual((200, 200, 3), img.shape)
|
import logging
import os
import unittest
from unittest import SkipTest
import multiprocessing as mp
from functools import partial
import numpy as np
from gensim.matutils import argsort
from gensim.models.coherencemodel import CoherenceModel, BOOLEAN_DOCUMENT_BASED
from gensim.models.ldamodel import LdaModel
from gensim.models.wrappers import LdaMallet
from gensim.models.wrappers import LdaVowpalWabbit
from gensim.test.utils import get_tmpfile, common_texts, common_dictionary, common_corpus
class TestCoherenceModel(unittest.TestCase):
# set up vars used in testing ("Deerwester" from the web tutorial)
texts = common_texts
dictionary = common_dictionary
corpus = common_corpus
def setUp(self):
# Suppose given below are the topics which two different LdaModels come up with.
# `topics1` is clearly better as it has a clear distinction between system-human
# interaction and graphs. Hence both the coherence measures for `topics1` should be
# greater.
self.topics1 = [
['human', 'computer', 'system', 'interface'],
['graph', 'minors', 'trees', 'eps']
]
self.topics2 = [
['user', 'graph', 'minors', 'system'],
['time', 'graph', 'survey', 'minors']
]
self.ldamodel = LdaModel(
corpus=self.corpus, id2word=self.dictionary, num_topics=2,
passes=0, iterations=0
)
mallet_home = os.environ.get('MALLET_HOME', None)
self.mallet_path = os.path.join(mallet_home, 'bin', 'mallet') if mallet_home else None
if self.mallet_path:
self.malletmodel = LdaMallet(
mallet_path=self.mallet_path, corpus=self.corpus,
id2word=self.dictionary, num_topics=2, iterations=0
)
vw_path = os.environ.get('VOWPAL_WABBIT_PATH', None)
if not vw_path:
logging.info(
"Environment variable 'VOWPAL_WABBIT_PATH' not specified, skipping sanity checks for LDA Model"
)
self.vw_path = None
else:
self.vw_path = vw_path
self.vwmodel = LdaVowpalWabbit(
self.vw_path, corpus=self.corpus, id2word=self.dictionary,
num_topics=2, passes=0
)
def check_coherence_measure(self, coherence):
"""Check provided topic coherence algorithm on given topics"""
if coherence in BOOLEAN_DOCUMENT_BASED:
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence=coherence)
else:
kwargs = dict(texts=self.texts, dictionary=self.dictionary, coherence=coherence)
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm2 = CoherenceModel(topics=self.topics2, **kwargs)
self.assertGreater(cm1.get_coherence(), cm2.get_coherence())
def testUMass(self):
"""Test U_Mass topic coherence algorithm on given topics"""
self.check_coherence_measure('u_mass')
def testCv(self):
"""Test C_v topic coherence algorithm on given topics"""
self.check_coherence_measure('c_v')
def testCuci(self):
"""Test C_uci topic coherence algorithm on given topics"""
self.check_coherence_measure('c_uci')
def testCnpmi(self):
"""Test C_npmi topic coherence algorithm on given topics"""
self.check_coherence_measure('c_npmi')
def testUMassLdaModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA Model"""
# Note that this is just a sanity check because LDA does not guarantee a better coherence
# value on the topics if iterations are increased. This can be seen here:
# https://gist.github.com/dsquareindia/60fd9ab65b673711c3fa00509287ddde
CoherenceModel(model=self.ldamodel, corpus=self.corpus, coherence='u_mass')
def testCvLdaModel(self):
"""Perform sanity check to see if c_v coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_v')
def testCw2vLdaModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDAModel."""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_w2v')
def testCuciLdaModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_uci')
def testCnpmiLdaModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA Model"""
CoherenceModel(model=self.ldamodel, texts=self.texts, coherence='c_npmi')
def testUMassMalletModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, corpus=self.corpus, coherence='u_mass')
def _check_for_mallet(self):
if not self.mallet_path:
raise SkipTest("Mallet not installed")
def testCvMalletModel(self):
"""Perform sanity check to see if c_v coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_v')
def testCw2vMalletModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_w2v')
def testCuciMalletModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_uci')
def testCnpmiMalletModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA Mallet gensim wrapper"""
self._check_for_mallet()
CoherenceModel(model=self.malletmodel, texts=self.texts, coherence='c_npmi')
def testUMassVWModel(self):
"""Perform sanity check to see if u_mass coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, corpus=self.corpus, coherence='u_mass')
def _check_for_vw(self):
if not self.vw_path:
raise SkipTest("Vowpal Wabbit not installed")
def testCvVWModel(self):
"""Perform sanity check to see if c_v coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_v')
def testCw2vVWModel(self):
"""Perform sanity check to see if c_w2v coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_w2v')
def testCuciVWModel(self):
"""Perform sanity check to see if c_uci coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_uci')
def testCnpmiVWModel(self):
"""Perform sanity check to see if c_npmi coherence works with LDA VW gensim wrapper"""
self._check_for_vw()
CoherenceModel(model=self.vwmodel, texts=self.texts, coherence='c_npmi')
def testErrors(self):
"""Test if errors are raised on bad input"""
# not providing dictionary
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,
coherence='u_mass'
)
# not providing texts for c_v and instead providing corpus
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, corpus=self.corpus,
dictionary=self.dictionary, coherence='c_v'
)
# not providing corpus or texts for u_mass
self.assertRaises(
ValueError, CoherenceModel, topics=self.topics1, dictionary=self.dictionary,
coherence='u_mass'
)
def testProcesses(self):
get_model = partial(CoherenceModel,
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model, used_cpus = get_model(), mp.cpu_count() - 1
self.assertEqual(model.processes, used_cpus)
for p in range(-2, 1):
self.assertEqual(get_model(processes=p).processes, used_cpus)
for p in range(1, 4):
self.assertEqual(get_model(processes=p).processes, p)
def testPersistence(self):
fname = get_tmpfile('gensim_models_coherence.tst')
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceCompressed(self):
fname = get_tmpfile('gensim_models_coherence.tst.gz')
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceAfterProbabilityEstimationUsingCorpus(self):
fname = get_tmpfile('gensim_similarities.tst.pkl')
model = CoherenceModel(
topics=self.topics1, corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass'
)
model.estimate_probabilities()
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertIsNotNone(model2._accumulator)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testPersistenceAfterProbabilityEstimationUsingTexts(self):
fname = get_tmpfile('gensim_similarities.tst.pkl')
model = CoherenceModel(
topics=self.topics1, texts=self.texts, dictionary=self.dictionary, coherence='c_v'
)
model.estimate_probabilities()
model.save(fname)
model2 = CoherenceModel.load(fname)
self.assertIsNotNone(model2._accumulator)
self.assertTrue(model.get_coherence() == model2.get_coherence())
def testAccumulatorCachingSameSizeTopics(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
accumulator = cm1._accumulator
self.assertIsNotNone(accumulator)
cm1.topics = self.topics1
self.assertEqual(accumulator, cm1._accumulator)
cm1.topics = self.topics2
self.assertEqual(None, cm1._accumulator)
def testAccumulatorCachingTopicSubsets(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
accumulator = cm1._accumulator
self.assertIsNotNone(accumulator)
cm1.topics = [t[:2] for t in self.topics1]
self.assertEqual(accumulator, cm1._accumulator)
cm1.topics = self.topics1
self.assertEqual(accumulator, cm1._accumulator)
def testAccumulatorCachingWithModelSetting(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
cm1.model = self.ldamodel
topics = []
for topic in self.ldamodel.state.get_lambda():
bestn = argsort(topic, topn=cm1.topn, reverse=True)
topics.append(bestn)
self.assertTrue(np.array_equal(topics, cm1.topics))
self.assertIsNone(cm1._accumulator)
def testAccumulatorCachingWithTopnSettingGivenTopics(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, topn=5, coherence='u_mass')
cm1 = CoherenceModel(topics=self.topics1, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
accumulator = cm1._accumulator
topics_before = cm1._topics
cm1.topn = 3
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(3, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
# Topics should not have been truncated, so topn settings below 5 should work
cm1.topn = 4
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(4, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
with self.assertRaises(ValueError):
cm1.topn = 6 # can't expand topics any further without model
def testAccumulatorCachingWithTopnSettingGivenModel(self):
kwargs = dict(corpus=self.corpus, dictionary=self.dictionary, topn=5, coherence='u_mass')
cm1 = CoherenceModel(model=self.ldamodel, **kwargs)
cm1.estimate_probabilities()
self.assertIsNotNone(cm1._accumulator)
accumulator = cm1._accumulator
topics_before = cm1._topics
cm1.topn = 3
self.assertEqual(accumulator, cm1._accumulator)
self.assertEqual(3, len(cm1.topics[0]))
self.assertEqual(topics_before, cm1._topics)
cm1.topn = 6 # should be able to expand given the model
self.assertEqual(6, len(cm1.topics[0]))
def testCompareCoherenceForTopics(self):
topics = [self.topics1, self.topics2]
cm = CoherenceModel.for_topics(
topics, dictionary=self.dictionary, texts=self.texts, coherence='c_v')
self.assertIsNotNone(cm._accumulator)
# Accumulator should have all relevant IDs.
for topic_list in topics:
cm.topics = topic_list
self.assertIsNotNone(cm._accumulator)
(coherence_topics1, coherence1), (coherence_topics2, coherence2) = \
cm.compare_model_topics(topics)
self.assertAlmostEqual(np.mean(coherence_topics1), coherence1, 4)
self.assertAlmostEqual(np.mean(coherence_topics2), coherence2, 4)
self.assertGreater(coherence1, coherence2)
def testCompareCoherenceForModels(self):
models = [self.ldamodel, self.ldamodel]
cm = CoherenceModel.for_models(
models, dictionary=self.dictionary, texts=self.texts, coherence='c_v')
self.assertIsNotNone(cm._accumulator)
# Accumulator should have all relevant IDs.
for model in models:
cm.model = model
self.assertIsNotNone(cm._accumulator)
(coherence_topics1, coherence1), (coherence_topics2, coherence2) = \
cm.compare_models(models)
self.assertAlmostEqual(np.mean(coherence_topics1), coherence1, 4)
self.assertAlmostEqual(np.mean(coherence_topics2), coherence2, 4)
self.assertAlmostEqual(coherence1, coherence2, places=4)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
from functools import wraps
import voluptuous as vol
from homeassistant.components import websocket_api
# mypy: allow-untyped-calls, allow-untyped-defs
DATA_STORAGE = "frontend_storage"
STORAGE_VERSION_USER_DATA = 1
async def async_setup_frontend_storage(hass):
"""Set up frontend storage."""
hass.data[DATA_STORAGE] = ({}, {})
hass.components.websocket_api.async_register_command(websocket_set_user_data)
hass.components.websocket_api.async_register_command(websocket_get_user_data)
def with_store(orig_func):
"""Decorate function to provide data."""
@wraps(orig_func)
async def with_store_func(hass, connection, msg):
"""Provide user specific data and store to function."""
stores, data = hass.data[DATA_STORAGE]
user_id = connection.user.id
store = stores.get(user_id)
if store is None:
store = stores[user_id] = hass.helpers.storage.Store(
STORAGE_VERSION_USER_DATA, f"frontend.user_data_{connection.user.id}"
)
if user_id not in data:
data[user_id] = await store.async_load() or {}
await orig_func(hass, connection, msg, store, data[user_id])
return with_store_func
@websocket_api.websocket_command(
{
vol.Required("type"): "frontend/set_user_data",
vol.Required("key"): str,
vol.Required("value"): vol.Any(bool, str, int, float, dict, list, None),
}
)
@websocket_api.async_response
@with_store
async def websocket_set_user_data(hass, connection, msg, store, data):
"""Handle set global data command.
Async friendly.
"""
data[msg["key"]] = msg["value"]
await store.async_save(data)
connection.send_message(websocket_api.result_message(msg["id"]))
@websocket_api.websocket_command(
{vol.Required("type"): "frontend/get_user_data", vol.Optional("key"): str}
)
@websocket_api.async_response
@with_store
async def websocket_get_user_data(hass, connection, msg, store, data):
"""Handle get global data command.
Async friendly.
"""
connection.send_message(
websocket_api.result_message(
msg["id"], {"value": data.get(msg["key"]) if "key" in msg else data}
)
)
|
from homeassistant.const import (
ELECTRICAL_CURRENT_AMPERE,
ENERGY_WATT_HOUR,
POWER_WATT,
TEMP_CELSIUS,
TIME_SECONDS,
VOLT,
)
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, JUICENET_API, JUICENET_COORDINATOR
from .entity import JuiceNetDevice
SENSOR_TYPES = {
"status": ["Charging Status", None],
"temperature": ["Temperature", TEMP_CELSIUS],
"voltage": ["Voltage", VOLT],
"amps": ["Amps", ELECTRICAL_CURRENT_AMPERE],
"watts": ["Watts", POWER_WATT],
"charge_time": ["Charge time", TIME_SECONDS],
"energy_added": ["Energy added", ENERGY_WATT_HOUR],
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the JuiceNet Sensors."""
entities = []
juicenet_data = hass.data[DOMAIN][config_entry.entry_id]
api = juicenet_data[JUICENET_API]
coordinator = juicenet_data[JUICENET_COORDINATOR]
for device in api.devices:
for sensor in SENSOR_TYPES:
entities.append(JuiceNetSensorDevice(device, sensor, coordinator))
async_add_entities(entities)
class JuiceNetSensorDevice(JuiceNetDevice, Entity):
"""Implementation of a JuiceNet sensor."""
def __init__(self, device, sensor_type, coordinator):
"""Initialise the sensor."""
super().__init__(device, sensor_type, coordinator)
self._name = SENSOR_TYPES[sensor_type][0]
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the device."""
return f"{self.device.name} {self._name}"
@property
def icon(self):
"""Return the icon of the sensor."""
icon = None
if self.type == "status":
status = self.device.status
if status == "standby":
icon = "mdi:power-plug-off"
elif status == "plugged":
icon = "mdi:power-plug"
elif status == "charging":
icon = "mdi:battery-positive"
elif self.type == "temperature":
icon = "mdi:thermometer"
elif self.type == "voltage":
icon = "mdi:flash"
elif self.type == "amps":
icon = "mdi:flash"
elif self.type == "watts":
icon = "mdi:flash"
elif self.type == "charge_time":
icon = "mdi:timer-outline"
elif self.type == "energy_added":
icon = "mdi:flash"
return icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state."""
state = None
if self.type == "status":
state = self.device.status
elif self.type == "temperature":
state = self.device.temperature
elif self.type == "voltage":
state = self.device.voltage
elif self.type == "amps":
state = self.device.amps
elif self.type == "watts":
state = self.device.watts
elif self.type == "charge_time":
state = self.device.charge_time
elif self.type == "energy_added":
state = self.device.energy_added
else:
state = "Unknown"
return state
|
from typing import Dict, List
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_CONDITION,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers import condition, config_validation as cv, entity_registry
from homeassistant.helpers.config_validation import DEVICE_CONDITION_BASE_SCHEMA
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import DOMAIN
# TODO specify your supported condition types.
CONDITION_TYPES = {"is_on", "is_off"}
CONDITION_SCHEMA = DEVICE_CONDITION_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(CONDITION_TYPES),
}
)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> List[Dict[str, str]]:
"""List device conditions for NEW_NAME devices."""
registry = await entity_registry.async_get_registry(hass)
conditions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
# Add conditions for each entity that belongs to this integration
# TODO add your own conditions.
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_on",
}
)
conditions.append(
{
CONF_CONDITION: "device",
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "is_off",
}
)
return conditions
@callback
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> condition.ConditionCheckerType:
"""Create a function to test a device condition."""
if config_validation:
config = CONDITION_SCHEMA(config)
if config[CONF_TYPE] == "is_on":
state = STATE_ON
else:
state = STATE_OFF
@callback
def test_is_state(hass: HomeAssistant, variables: TemplateVarsType) -> bool:
"""Test if an entity is a certain state."""
return condition.state(hass, config[ATTR_ENTITY_ID], state)
return test_is_state
|
import logging
from homeassistant.components import mythicbeastsdns
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
_LOGGER = logging.getLogger(__name__)
async def mbddns_update_mock(domain, password, host, ttl=60, session=None):
"""Mock out mythic beasts updater."""
if password == "incorrect":
_LOGGER.error("Updating Mythic Beasts failed: Not authenticated")
return False
if host[0] == "$":
_LOGGER.error("Updating Mythic Beasts failed: Invalid Character")
return False
return True
@patch("mbddns.update", new=mbddns_update_mock)
async def test_update(hass):
"""Run with correct values and check true is returned."""
result = await async_setup_component(
hass,
mythicbeastsdns.DOMAIN,
{
mythicbeastsdns.DOMAIN: {
"domain": "example.org",
"password": "correct",
"host": "hass",
}
},
)
assert result
@patch("mbddns.update", new=mbddns_update_mock)
async def test_update_fails_if_wrong_token(hass):
"""Run with incorrect token and check false is returned."""
result = await async_setup_component(
hass,
mythicbeastsdns.DOMAIN,
{
mythicbeastsdns.DOMAIN: {
"domain": "example.org",
"password": "incorrect",
"host": "hass",
}
},
)
assert not result
@patch("mbddns.update", new=mbddns_update_mock)
async def test_update_fails_if_invalid_host(hass):
"""Run with invalid characters in host and check false is returned."""
result = await async_setup_component(
hass,
mythicbeastsdns.DOMAIN,
{
mythicbeastsdns.DOMAIN: {
"domain": "example.org",
"password": "correct",
"host": "$hass",
}
},
)
assert not result
|
import logging
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
def CreateDisks(disk_specs, vm_name):
"""Creates scratch disks (Docker Volumes)."""
scratch_disks = []
for disk_num, disk_spec in enumerate(disk_specs):
logging.info('Creating Disk number: %d', disk_num)
volume_disk = DockerDisk(disk_spec, disk_num, vm_name)
volume_disk.Create()
scratch_disks.append(volume_disk)
return scratch_disks
class DockerDisk(disk.BaseDisk):
"""Object representing a Docker Volume."""
def __init__(self, disk_spec, disk_num, vm_name):
super(DockerDisk, self).__init__(disk_spec)
self.vm_name = vm_name
self.disk_num = disk_num
self.volume_name = self.vm_name + '-volume' + str(self.disk_num)
def Attach(self, vm):
pass
def Detach(self):
pass
def GetDevicePath(self):
raise errors.Error('GetDevicePath not supported for Docker.')
def _Create(self):
cmd = ['docker', 'volume', 'create', self.volume_name]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _Delete(self):
cmd = ['docker', 'volume', 'rm', self.volume_name]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def AttachVolumeInfo(self, volume_mounts):
pass
|
import argparse
import logging
import sys
from paasta_tools.mesos_maintenance import get_draining_hosts
from paasta_tools.mesos_maintenance import get_hosts_forgotten_down
from paasta_tools.mesos_maintenance import get_hosts_forgotten_draining
from paasta_tools.mesos_maintenance import reserve_all_resources
from paasta_tools.mesos_maintenance import seconds_to_nanoseconds
from paasta_tools.mesos_maintenance import undrain
from paasta_tools.mesos_maintenance import unreserve_all_resources
from paasta_tools.mesos_maintenance import up
from paasta_tools.mesos_tools import get_slaves
log = logging.getLogger(__name__)
def parse_args():
parser = argparse.ArgumentParser(
description="Cleans up forgotten maintenance cruft."
)
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose", default=False
)
parser.add_argument(
"--disable-reservation-cleanup",
action="store_true",
dest="disable_reservation_cleanup",
default=False,
)
args = parser.parse_args()
return args
def cleanup_forgotten_draining():
"""Clean up hosts forgotten draining"""
log.debug("Cleaning up hosts forgotten draining")
hosts_forgotten_draining = get_hosts_forgotten_draining(
grace=seconds_to_nanoseconds(10 * 60)
)
if hosts_forgotten_draining:
undrain(hostnames=hosts_forgotten_draining)
else:
log.debug("No hosts forgotten draining")
def cleanup_forgotten_down():
"""Clean up hosts forgotten down"""
log.debug("Cleaning up hosts forgotten down")
hosts_forgotten_down = get_hosts_forgotten_down(
grace=seconds_to_nanoseconds(10 * 60)
)
if hosts_forgotten_down:
up(hostnames=hosts_forgotten_down)
else:
log.debug("No hosts forgotten down")
def unreserve_all_resources_on_non_draining_hosts():
"""Unreserve all resources on non-draining hosts"""
log.debug("Unreserving all resources on non-draining hosts")
slaves = get_slaves()
hostnames = [slave["hostname"] for slave in slaves]
draining_hosts = get_draining_hosts()
non_draining_hosts = list(set(hostnames) - set(draining_hosts))
if non_draining_hosts:
unreserve_all_resources(hostnames=non_draining_hosts)
else:
log.debug("No non-draining hosts")
def reserve_all_resources_on_draining_hosts():
"""Reserve all resources on draining hosts"""
log.debug("Reserving all resources on draining hosts")
draining_hosts = get_draining_hosts()
if draining_hosts:
reserve_all_resources(hostnames=draining_hosts)
else:
log.debug("No draining hosts")
def main():
log.debug("Cleaning up maintenance cruft")
args = parse_args()
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
cleanup_forgotten_draining()
cleanup_forgotten_down()
if not args.disable_reservation_cleanup:
unreserve_all_resources_on_non_draining_hosts()
reserve_all_resources_on_draining_hosts()
if __name__ == "__main__":
if main():
sys.exit(0)
sys.exit(1)
|
from django.test import TestCase
from weblate.checks.tests.test_checks import MockUnit
from weblate.trans.autofixes import fix_target
from weblate.trans.autofixes.chars import (
RemoveControlChars,
RemoveZeroSpace,
ReplaceTrailingDotsWithEllipsis,
)
from weblate.trans.autofixes.custom import DoubleApostrophes
from weblate.trans.autofixes.html import BleachHTML
from weblate.trans.autofixes.whitespace import SameBookendingWhitespace
class AutoFixTest(TestCase):
def test_ellipsis(self):
unit = MockUnit(source="Foo…")
fix = ReplaceTrailingDotsWithEllipsis()
self.assertEqual(fix.fix_target(["Bar..."], unit), (["Bar…"], True))
self.assertEqual(fix.fix_target(["Bar... "], unit), (["Bar... "], False))
def test_no_ellipsis(self):
unit = MockUnit(source="Foo...")
fix = ReplaceTrailingDotsWithEllipsis()
self.assertEqual(fix.fix_target(["Bar..."], unit), (["Bar..."], False))
self.assertEqual(fix.fix_target(["Bar…"], unit), (["Bar…"], False))
def test_whitespace(self):
unit = MockUnit(source="Foo\n")
fix = SameBookendingWhitespace()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar\n"], True))
self.assertEqual(fix.fix_target(["Bar\n"], unit), (["Bar\n"], False))
unit = MockUnit(source=" ")
self.assertEqual(fix.fix_target([" "], unit), ([" "], False))
def test_no_whitespace(self):
unit = MockUnit(source="Foo")
fix = SameBookendingWhitespace()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\n"], unit), (["Bar"], True))
def test_whitespace_flags(self):
fix = SameBookendingWhitespace()
unit = MockUnit(source="str", flags="ignore-begin-space")
self.assertEqual(fix.fix_target([" str"], unit), ([" str"], False))
unit = MockUnit(source="str", flags="ignore-end-space")
self.assertEqual(fix.fix_target([" str "], unit), (["str "], True))
def test_html(self):
fix = BleachHTML()
unit = MockUnit(source='<a href="script:foo()">link</a>', flags="safe-html")
self.assertEqual(
fix.fix_target(['<a href="script:foo()">link</a>'], unit),
(["<a>link</a>"], True),
)
self.assertEqual(
fix.fix_target(['<a href="#" onclick="foo()">link</a>'], unit),
(['<a href="#">link</a>'], True),
)
def test_zerospace(self):
unit = MockUnit(source="Foo\u200b")
fix = RemoveZeroSpace()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\u200b"], unit), (["Bar\u200b"], False))
def test_no_zerospace(self):
unit = MockUnit(source="Foo")
fix = RemoveZeroSpace()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\u200b"], unit), (["Bar"], True))
def test_controlchars(self):
unit = MockUnit(source="Foo\x1b")
fix = RemoveControlChars()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\x1b"], unit), (["Bar\x1b"], False))
self.assertEqual(fix.fix_target(["Bar\n"], unit), (["Bar\n"], False))
def test_no_controlchars(self):
unit = MockUnit(source="Foo")
fix = RemoveControlChars()
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
self.assertEqual(fix.fix_target(["Bar\x1b"], unit), (["Bar"], True))
self.assertEqual(fix.fix_target(["Bar\n"], unit), (["Bar\n"], False))
def test_fix_target(self):
unit = MockUnit(source="Foo…")
fixed, fixups = fix_target(["Bar..."], unit)
self.assertEqual(fixed, ["Bar…"])
self.assertEqual(len(fixups), 1)
self.assertEqual(str(fixups[0]), "Trailing ellipsis")
def test_apostrophes(self):
unit = MockUnit(source="Foo")
fix = DoubleApostrophes()
# No flags
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
# No format string, but forced
unit.flags = "java-messageformat"
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
# No format string
unit.flags = "auto-java-messageformat"
self.assertEqual(fix.fix_target(["Bar"], unit), (["Bar"], False))
unit.source = "test {0}"
# Nothing to fix
self.assertEqual(fix.fix_target(["r {0}"], unit), (["r {0}"], False))
# Correct string
self.assertEqual(fix.fix_target(["''r'' {0}"], unit), (["''r'' {0}"], False))
# String with quoted format string
self.assertEqual(
fix.fix_target(["''r'' '{0}'"], unit), (["''r'' '{0}'"], False)
)
# Fixes
self.assertEqual(fix.fix_target(["'r''' {0}"], unit), (["''r'' {0}"], True))
# Fixes keeping double ones
self.assertEqual(
fix.fix_target(["'''''''r'''' {0}"], unit), (["''''r'''' {0}"], True)
)
# Quoted format
self.assertEqual(fix.fix_target(["'r''' {0}"], unit), (["''r'' {0}"], True))
unit.source = "foo"
unit.flags = "java-messageformat"
self.assertEqual(fix.fix_target(["bar'"], unit), (["bar''"], True))
|
import os
from celery.beat import Service
from django.conf import settings
from weblate.utils.celery import app
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
help = "removes incompatible celery schedule file"
@staticmethod
def try_remove(filename):
if os.path.exists(filename):
os.remove(filename)
@staticmethod
def setup_schedule():
service = Service(app=app)
scheduler = service.get_scheduler()
scheduler.setup_schedule()
def handle(self, *args, **options):
try:
self.setup_schedule()
except Exception as error:
self.stderr.write(f"Removing corrupted schedule file: {error!r}")
self.try_remove(settings.CELERY_BEAT_SCHEDULE_FILENAME)
self.try_remove(settings.CELERY_BEAT_SCHEDULE_FILENAME + ".db")
self.setup_schedule()
|
import logging
from typing import Optional
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.util import dt
from .const import DEFAULT_ICON, DOMAIN, FEED
_LOGGER = logging.getLogger(__name__)
ATTR_STATUS = "status"
ATTR_LAST_UPDATE = "last_update"
ATTR_LAST_UPDATE_SUCCESSFUL = "last_update_successful"
ATTR_LAST_TIMESTAMP = "last_timestamp"
ATTR_CREATED = "created"
ATTR_UPDATED = "updated"
ATTR_REMOVED = "removed"
DEFAULT_UNIT_OF_MEASUREMENT = "alerts"
# An update of this entity is not making a web request, but uses internal data only.
PARALLEL_UPDATES = 0
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the GDACS Feed platform."""
manager = hass.data[DOMAIN][FEED][entry.entry_id]
sensor = GdacsSensor(entry.entry_id, entry.unique_id, entry.title, manager)
async_add_entities([sensor])
_LOGGER.debug("Sensor setup done")
class GdacsSensor(Entity):
"""This is a status sensor for the GDACS integration."""
def __init__(self, config_entry_id, config_unique_id, config_title, manager):
"""Initialize entity."""
self._config_entry_id = config_entry_id
self._config_unique_id = config_unique_id
self._config_title = config_title
self._manager = manager
self._status = None
self._last_update = None
self._last_update_successful = None
self._last_timestamp = None
self._total = None
self._created = None
self._updated = None
self._removed = None
self._remove_signal_status = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_status = async_dispatcher_connect(
self.hass,
f"gdacs_status_{self._config_entry_id}",
self._update_status_callback,
)
_LOGGER.debug("Waiting for updates %s", self._config_entry_id)
# First update is manual because of how the feed entity manager is updated.
await self.async_update()
async def async_will_remove_from_hass(self) -> None:
"""Call when entity will be removed from hass."""
if self._remove_signal_status:
self._remove_signal_status()
@callback
def _update_status_callback(self):
"""Call status update method."""
_LOGGER.debug("Received status update for %s", self._config_entry_id)
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for GDACS status sensor."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._config_entry_id)
if self._manager:
status_info = self._manager.status_info()
if status_info:
self._update_from_status_info(status_info)
def _update_from_status_info(self, status_info):
"""Update the internal state from the provided information."""
self._status = status_info.status
self._last_update = (
dt.as_utc(status_info.last_update) if status_info.last_update else None
)
if status_info.last_update_successful:
self._last_update_successful = dt.as_utc(status_info.last_update_successful)
else:
self._last_update_successful = None
self._last_timestamp = status_info.last_timestamp
self._total = status_info.total
self._created = status_info.created
self._updated = status_info.updated
self._removed = status_info.removed
@property
def state(self):
"""Return the state of the sensor."""
return self._total
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID containing latitude/longitude."""
return self._config_unique_id
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return f"GDACS ({self._config_title})"
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return DEFAULT_ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return DEFAULT_UNIT_OF_MEASUREMENT
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
for key, value in (
(ATTR_STATUS, self._status),
(ATTR_LAST_UPDATE, self._last_update),
(ATTR_LAST_UPDATE_SUCCESSFUL, self._last_update_successful),
(ATTR_LAST_TIMESTAMP, self._last_timestamp),
(ATTR_CREATED, self._created),
(ATTR_UPDATED, self._updated),
(ATTR_REMOVED, self._removed),
):
if value or isinstance(value, bool):
attributes[key] = value
return attributes
|
import pytest
from homeassistant.const import (
VOLUME_FLUID_OUNCE,
VOLUME_GALLONS,
VOLUME_LITERS,
VOLUME_MILLILITERS,
)
import homeassistant.util.volume as volume_util
INVALID_SYMBOL = "bob"
VALID_SYMBOL = VOLUME_LITERS
def test_convert_same_unit():
"""Test conversion from any unit to same unit."""
assert volume_util.convert(2, VOLUME_LITERS, VOLUME_LITERS) == 2
assert volume_util.convert(3, VOLUME_MILLILITERS, VOLUME_MILLILITERS) == 3
assert volume_util.convert(4, VOLUME_GALLONS, VOLUME_GALLONS) == 4
assert volume_util.convert(5, VOLUME_FLUID_OUNCE, VOLUME_FLUID_OUNCE) == 5
def test_convert_invalid_unit():
"""Test exception is thrown for invalid units."""
with pytest.raises(ValueError):
volume_util.convert(5, INVALID_SYMBOL, VALID_SYMBOL)
with pytest.raises(ValueError):
volume_util.convert(5, VALID_SYMBOL, INVALID_SYMBOL)
def test_convert_nonnumeric_value():
"""Test exception is thrown for nonnumeric type."""
with pytest.raises(TypeError):
volume_util.convert("a", VOLUME_GALLONS, VOLUME_LITERS)
def test_convert_from_liters():
"""Test conversion from liters to other units."""
liters = 5
assert volume_util.convert(liters, VOLUME_LITERS, VOLUME_GALLONS) == 1.321
def test_convert_from_gallons():
"""Test conversion from gallons to other units."""
gallons = 5
assert volume_util.convert(gallons, VOLUME_GALLONS, VOLUME_LITERS) == 18.925
|
import re
from django.conf import settings
from django.http import QueryDict
from django.test.utils import override_settings
from django.urls import reverse
from weblate.trans.models import Component
from weblate.trans.tests.test_views import ViewTestCase
from weblate.utils.ratelimit import reset_rate_limit
from weblate.utils.state import STATE_FUZZY, STATE_READONLY, STATE_TRANSLATED
class SearchViewTest(ViewTestCase):
@classmethod
def _databases_support_transactions(cls):
# This is workaroud for MySQL as FULL TEXT index does not work
# well inside a transaction, so we avoid using transactions for
# tests. Otherwise we end up with no matches for the query.
# See https://dev.mysql.com/doc/refman/5.6/en/innodb-fulltext-index.html
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
return False
return super()._databases_support_transactions()
def setUp(self):
super().setUp()
self.translation = self.component.translation_set.get(language_code="cs")
self.translate_url = self.translation.get_translate_url()
self.update_fulltext_index()
reset_rate_limit("search", address="127.0.0.1")
def do_search(self, params, expected, url=None):
"""Helper method for performing search test."""
if url is None:
url = self.translate_url
response = self.client.get(url, params)
if expected is None:
self.assertRedirects(response, self.translation.get_absolute_url())
else:
self.assertContains(response, expected)
return response
def do_search_url(self, url):
"""Test search on given URL."""
response = self.client.get(url, {"q": "hello"})
self.assertContains(response, '<span class="hlmatch">Hello</span>, world')
response = self.client.get(url, {"q": "changed:>=2010-01-10"})
self.assertContains(response, "2010-01-10")
@override_settings(RATELIMIT_SEARCH_ATTEMPTS=20000)
def test_all_search(self):
"""Searching in all projects."""
response = self.client.get(reverse("search"), {"q": "hello"})
self.assertContains(response, '<span class="hlmatch">Hello</span>, world')
response = self.client.get(reverse("search"), {"q": 'source:r"^Hello"'})
self.assertContains(response, "Hello, world")
response = self.client.get(reverse("search"), {"q": 'source:r"^(Hello"'})
self.assertContains(response, "Invalid regular expression")
response = self.client.get(
reverse("search"), {"q": "hello AND state:<translated"}
)
self.assertContains(response, "Hello, world")
response = self.client.get(reverse("search"), {"q": "hello AND state:empty"})
self.assertContains(response, "Hello, world")
response = self.client.get(reverse("search"), {"q": "check:php_format"})
self.assertContains(response, "No matching strings found.")
response = self.client.get(
reverse("search"), {"q": "check:php_format", "ignored": "1"}
)
self.assertContains(response, "No matching strings found.")
self.do_search_url(reverse("search"))
def test_pagination(self):
response = self.client.get(reverse("search"), {"q": "hello", "page": 1})
self.assertContains(response, '<span class="hlmatch">Hello</span>, world')
response = self.client.get(reverse("search"), {"q": "hello", "page": 10})
self.assertContains(response, '<span class="hlmatch">Hello</span>, world')
response = self.client.get(reverse("search"), {"q": "hello", "page": "x"})
self.assertContains(response, '<span class="hlmatch">Hello</span>, world')
def test_language_search(self):
"""Searching in all projects."""
response = self.client.get(reverse("search"), {"q": "hello", "lang": "cs"})
self.assertContains(response, '<span class="hlmatch">Hello</span>, world')
def test_project_search(self):
"""Searching within project."""
self.do_search_url(reverse("search", kwargs=self.kw_project))
def test_component_search(self):
"""Searching within component."""
self.do_search_url(reverse("search", kwargs=self.kw_component))
def test_project_language_search(self):
"""Searching within project."""
self.do_search_url(
reverse("search", kwargs={"project": self.project.slug, "lang": "cs"})
)
def test_translation_search(self):
"""Searching within translation."""
# Default
self.do_search({"q": "source:hello"}, "source:hello")
# Short exact
self.do_search({"q": "x", "search": "exact"}, None)
def test_review(self):
# Review
self.do_search({"q": "changed:>=2010-01-10"}, None)
self.do_search({"q": "changed:>=2010-01-10 AND NOT changed_by:testuser"}, None)
self.do_search({"q": "changed:>2010-01-10 AND changed_by:testuser"}, None)
self.do_search({"q": "changed_by:testuser"}, None)
# Review, partial date
self.do_search({"q": "changed:>=2010-01-"}, "Unknown string format: 2010-01-")
def extract_params(self, response):
search_url = re.findall(r'data-params="([^"]*)"', response.content.decode())[0]
return QueryDict(search_url, mutable=True)
def test_search_links(self):
response = self.do_search({"q": "source:Weblate"}, "source:Weblate")
# Extract search URL
params = self.extract_params(response)
# Try access to pages
params["offset"] = 1
response = self.client.get(self.translate_url, params)
self.assertContains(response, "https://demo.weblate.org/")
params["offset"] = 2
response = self.client.get(self.translate_url, params)
self.assertContains(response, "Thank you for using Weblate.")
# Invalid offset
params["offset"] = "bug"
response = self.client.get(self.translate_url, params)
self.assertContains(response, "https://demo.weblate.org/")
# Go to end
params["offset"] = 3
response = self.client.get(self.translate_url, params)
self.assertRedirects(response, self.translation.get_absolute_url())
# Try no longer cached query (should be deleted above)
params["offset"] = 2
response = self.client.get(self.translate_url, params)
self.assertContains(response, "Thank you for using Weblate.")
def test_search_checksum(self):
unit = self.translation.unit_set.get(
source="Try Weblate at <https://demo.weblate.org/>!\n"
)
self.do_search({"checksum": unit.checksum}, "3 / 4")
def test_search_offset(self):
"""Test offset navigation."""
self.do_search({"offset": 1}, "1 / 4")
self.do_search({"offset": 4}, "4 / 4")
self.do_search({"offset": 5}, None)
def test_search_type(self):
self.do_search({"q": "state:<translated"}, "Strings needing action")
self.do_search({"q": "state:needs-editing"}, None)
self.do_search({"q": "has:suggestion"}, None)
self.do_search({"q": "has:check"}, None)
self.do_search({"q": "check:plurals"}, None)
self.do_search({"q": ""}, "1 / 4")
def test_search_plural(self):
response = self.do_search({"q": "banana"}, "banana")
self.assertContains(response, "One")
self.assertContains(response, "Few")
self.assertContains(response, "Other")
self.assertNotContains(response, "Plural form ")
def test_checksum(self):
self.do_search({"checksum": "invalid"}, "Invalid checksum specified!")
class ReplaceTest(ViewTestCase):
"""Test for search and replace functionality."""
def setUp(self):
super().setUp()
self.edit_unit("Hello, world!\n", "Nazdar svete!\n")
self.unit = self.get_unit()
def do_replace_test(self, url, confirm=True):
response = self.client.post(
url, {"search": "Nazdar", "replacement": "Ahoj"}, follow=True
)
self.assertContains(
response, "Please review and confirm the search and replace results."
)
payload = {"search": "Nazdar", "replacement": "Ahoj", "confirm": "1"}
if confirm:
payload["units"] = self.unit.pk
response = self.client.post(url, payload, follow=True)
unit = self.get_unit()
if confirm:
self.assertContains(
response, "Search and replace completed, 1 string was updated."
)
self.assertEqual(unit.target, "Ahoj svete!\n")
else:
self.assertContains(
response, "Search and replace completed, no strings were updated."
)
self.assertEqual(unit.target, "Nazdar svete!\n")
def test_no_match(self):
response = self.client.post(
reverse("replace", kwargs=self.kw_translation),
{"search": "Ahoj", "replacement": "Cau"},
follow=True,
)
self.assertContains(
response, "Search and replace completed, no strings were updated."
)
unit = self.get_unit()
self.assertEqual(unit.target, "Nazdar svete!\n")
def test_replace(self):
self.do_replace_test(reverse("replace", kwargs=self.kw_translation))
def test_replace_project(self):
self.do_replace_test(reverse("replace", kwargs=self.kw_project))
def test_replace_component(self):
self.do_replace_test(reverse("replace", kwargs=self.kw_component))
class BulkEditTest(ViewTestCase):
"""Test for buld edit functionality."""
def setUp(self):
super().setUp()
self.edit_unit("Hello, world!\n", "Nazdar svete!\n", fuzzy=True)
self.unit = self.get_unit()
self.make_manager()
def do_bulk_edit_test(self, url):
response = self.client.post(
url, {"q": "state:needs-editing", "state": STATE_TRANSLATED}, follow=True
)
self.assertContains(response, "Bulk edit completed, 1 string was updated.")
self.assertEqual(self.get_unit().state, STATE_TRANSLATED)
def test_no_match(self):
response = self.client.post(
reverse("bulk-edit", kwargs=self.kw_project),
{"q": "state:approved", "state": STATE_FUZZY},
follow=True,
)
self.assertContains(response, "Bulk edit completed, no strings were updated.")
unit = self.get_unit()
self.assertEqual(unit.state, STATE_FUZZY)
def test_bulk_edit(self):
self.do_bulk_edit_test(reverse("bulk-edit", kwargs=self.kw_translation))
def test_bulk_edit_project(self):
self.do_bulk_edit_test(reverse("bulk-edit", kwargs=self.kw_project))
def test_bulk_edit_component(self):
self.do_bulk_edit_test(reverse("bulk-edit", kwargs=self.kw_component))
def test_bulk_flags(self):
response = self.client.post(
reverse("bulk-edit", kwargs=self.kw_project),
{"q": "state:needs-editing", "state": -1, "add_flags": "python-format"},
follow=True,
)
self.assertContains(response, "Bulk edit completed, 1 string was updated.")
unit = self.get_unit()
self.assertTrue("python-format" in unit.all_flags)
response = self.client.post(
reverse("bulk-edit", kwargs=self.kw_project),
{"q": "state:needs-editing", "state": -1, "remove_flags": "python-format"},
follow=True,
)
self.assertContains(response, "Bulk edit completed, 1 string was updated.")
unit = self.get_unit()
self.assertFalse("python-format" in unit.all_flags)
def test_bulk_read_only(self):
response = self.client.post(
reverse("bulk-edit", kwargs=self.kw_project),
{"q": "language:en", "state": -1, "add_flags": "read-only"},
follow=True,
)
self.assertContains(response, "Bulk edit completed, 4 strings were updated.")
unit = self.get_unit()
self.assertTrue("read-only" in unit.all_flags)
response = self.client.post(
reverse("bulk-edit", kwargs=self.kw_project),
{"q": "language:en", "state": -1, "remove_flags": "read-only"},
follow=True,
)
self.assertContains(response, "Bulk edit completed, 4 strings were updated.")
unit = self.get_unit()
self.assertFalse("read-only" in unit.all_flags)
def test_bulk_labels(self):
label = self.project.label_set.create(name="Test label", color="black")
response = self.client.post(
reverse("bulk-edit", kwargs=self.kw_project),
{"q": "state:needs-editing", "state": -1, "add_labels": label.pk},
follow=True,
)
self.assertContains(response, "Bulk edit completed, 1 string was updated.")
unit = self.get_unit()
self.assertTrue(label in unit.all_labels)
self.assertEqual(getattr(unit.translation.stats, f"label:{label.name}"), 1)
# Clear local outdated cache
unit.source_unit.translation.stats.clear()
self.assertEqual(
getattr(unit.source_unit.translation.stats, f"label:{label.name}"),
1,
)
response = self.client.post(
reverse("bulk-edit", kwargs=self.kw_project),
{"q": "state:needs-editing", "state": -1, "remove_labels": label.pk},
follow=True,
)
self.assertContains(response, "Bulk edit completed, 1 string was updated.")
unit = self.get_unit()
self.assertFalse(label in unit.labels.all())
self.assertEqual(getattr(unit.translation.stats, f"label:{label.name}"), 0)
# Clear local outdated cache
unit.source_unit.translation.stats.clear()
self.assertEqual(
getattr(unit.source_unit.translation.stats, f"label:{label.name}"),
0,
)
def test_source_state(self):
mono = Component.objects.create(
name="Test2",
slug="test2",
project=self.project,
repo="weblate://test/test",
file_format="json",
filemask="json-mono/*.json",
template="json-mono/en.json",
)
# Translate single unit
translation = mono.translation_set.get(language_code="cs")
translation.unit_set.get(context="hello").translate(
self.user, "Ahoj světe", STATE_TRANSLATED
)
self.assertEqual(translation.unit_set.filter(state=STATE_READONLY).count(), 0)
self.assertEqual(translation.unit_set.filter(state=STATE_TRANSLATED).count(), 1)
url = reverse(
"bulk-edit", kwargs={"project": self.project.slug, "component": mono.slug}
)
# Mark all source strings as needing edit and that should turn all
# translated strings read-only
response = self.client.post(
url, {"q": "language:en", "state": STATE_FUZZY}, follow=True
)
self.assertContains(response, "Bulk edit completed, 4 strings were updated.")
self.assertEqual(translation.unit_set.filter(state=STATE_READONLY).count(), 4)
self.assertEqual(translation.unit_set.filter(state=STATE_TRANSLATED).count(), 0)
# Mark all source strings as needing edit and that should turn all
# translated strings back to translated
response = self.client.post(
url, {"q": "language:en", "state": STATE_TRANSLATED}, follow=True
)
self.assertContains(response, "Bulk edit completed, 4 strings were updated.")
self.assertEqual(translation.unit_set.filter(state=STATE_READONLY).count(), 0)
self.assertEqual(translation.unit_set.filter(state=STATE_TRANSLATED).count(), 1)
|
import functools
import json
import sys
from typing import Sequence
from a_sync import block
from paasta_tools.mesos.exceptions import MasterNotAvailableException
from paasta_tools.mesos_tools import get_mesos_master
from paasta_tools.metrics.metastatus_lib import (
calculate_resource_utilization_for_slaves,
)
from paasta_tools.metrics.metastatus_lib import filter_tasks_for_slaves
from paasta_tools.metrics.metastatus_lib import get_all_tasks_from_state
from paasta_tools.metrics.metastatus_lib import (
resource_utillizations_from_resource_info,
)
from paasta_tools.utils import PaastaColors
def main(hostnames: Sequence[str]) -> None:
master = get_mesos_master()
try:
mesos_state = block(master.state)
except MasterNotAvailableException as e:
print(PaastaColors.red("CRITICAL: %s" % e.message))
sys.exit(2)
slaves = [
slave
for slave in mesos_state.get("slaves", [])
if slave["hostname"] in hostnames
]
tasks = get_all_tasks_from_state(mesos_state, include_orphans=True)
filtered_tasks = filter_tasks_for_slaves(slaves, tasks)
resource_info_dict = calculate_resource_utilization_for_slaves(
slaves, filtered_tasks
)
resource_utilizations = resource_utillizations_from_resource_info(
total=resource_info_dict["total"], free=resource_info_dict["free"]
)
output = {}
for metric in resource_utilizations:
utilization = metric.total - metric.free
if int(metric.total) == 0:
utilization_perc = 100
else:
utilization_perc = utilization / float(metric.total) * 100
output[metric.metric] = {
"total": metric.total,
"used": utilization,
"perc": utilization_perc,
}
print(json.dumps(output))
if __name__ == "__main__":
hostnames = functools.reduce(lambda x, y: x + [y.strip()], sys.stdin, [])
main(hostnames)
|
import sh
from molecule import logger
from molecule import util
LOG = logger.get_logger(__name__)
class AnsiblePlaybook(object):
def __init__(self, playbook, config, out=LOG.out, err=LOG.error):
"""
Sets up the requirements to execute ``ansible-playbook`` and returns
None.
:param playbook: A string containing the path to the playbook.
:param config: An instance of a Molecule config.
:param out: An optional function to process STDOUT for underlying
:func:``sh`` call.
:param err: An optional function to process STDERR for underlying
:func:``sh`` call.
:returns: None
"""
self._ansible_command = None
self._playbook = playbook
self._config = config
self._out = out
self._err = err
self._cli = {}
self._env = self._config.provisioner.env
def bake(self):
"""
Bake an ``ansible-playbook`` command so it's ready to execute and
returns ``None``.
:return: None
"""
# Pass a directory as inventory to let Ansible merge the multiple
# inventory sources located under
self.add_cli_arg('inventory',
self._config.provisioner.inventory_directory)
options = util.merge_dicts(self._config.provisioner.options, self._cli)
verbose_flag = util.verbose_flag(options)
if self._playbook != self._config.provisioner.playbooks.converge:
if options.get('become'):
del options['become']
self._ansible_command = sh.ansible_playbook.bake(
options,
self._playbook,
*verbose_flag,
_cwd=self._config.scenario.directory,
_env=self._env,
_out=self._out,
_err=self._err)
ansible_args = (list(self._config.provisioner.ansible_args) + list(
self._config.ansible_args))
if ansible_args:
if self._config.action not in ['create', 'destroy']:
self._ansible_command = self._ansible_command.bake(
ansible_args)
def execute(self):
"""
Executes ``ansible-playbook`` and returns a string.
:return: str
"""
if self._ansible_command is None:
self.bake()
try:
self._config.driver.sanity_checks()
cmd = util.run_command(
self._ansible_command, debug=self._config.debug)
return cmd.stdout.decode('utf-8')
except sh.ErrorReturnCode as e:
out = e.stdout.decode('utf-8')
util.sysexit_with_message(str(out), e.exit_code)
def add_cli_arg(self, name, value):
"""
Adds argument to CLI passed to ansible-playbook and returns None.
:param name: A string containing the name of argument to be added.
:param value: The value of argument to be added.
:return: None
"""
if value:
self._cli[name] = value
def add_env_arg(self, name, value):
"""
Adds argument to environment passed to ansible-playbook and returns
None.
:param name: A string containing the name of argument to be added.
:param value: The value of argument to be added.
:return: None
"""
self._env[name] = value
|
import io
import os
import re
from coverage import env
from coverage.backward import configparser, path_types
from coverage.misc import CoverageException, substitute_variables
class TomlDecodeError(Exception):
"""An exception class that exists even when toml isn't installed."""
pass
class TomlConfigParser:
"""TOML file reading with the interface of HandyConfigParser."""
# This class has the same interface as config.HandyConfigParser, no
# need for docstrings.
# pylint: disable=missing-function-docstring
def __init__(self, our_file):
self.our_file = our_file
self.data = None
def read(self, filenames):
from coverage.optional import toml
# RawConfigParser takes a filename or list of filenames, but we only
# ever call this with a single filename.
assert isinstance(filenames, path_types)
filename = filenames
if env.PYVERSION >= (3, 6):
filename = os.fspath(filename)
try:
with io.open(filename, encoding='utf-8') as fp:
toml_text = fp.read()
except IOError:
return []
if toml:
toml_text = substitute_variables(toml_text, os.environ)
try:
self.data = toml.loads(toml_text)
except toml.TomlDecodeError as err:
raise TomlDecodeError(*err.args)
return [filename]
else:
has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE)
if self.our_file or has_toml:
# Looks like they meant to read TOML, but we can't read it.
msg = "Can't read {!r} without TOML support. Install with [toml] extra"
raise CoverageException(msg.format(filename))
return []
def _get_section(self, section):
"""Get a section from the data.
Arguments:
section (str): A section name, which can be dotted.
Returns:
name (str): the actual name of the section that was found, if any,
or None.
data (str): the dict of data in the section, or None if not found.
"""
prefixes = ["tool.coverage."]
if self.our_file:
prefixes.append("")
for prefix in prefixes:
real_section = prefix + section
parts = real_section.split(".")
try:
data = self.data[parts[0]]
for part in parts[1:]:
data = data[part]
except KeyError:
continue
break
else:
return None, None
return real_section, data
def _get(self, section, option):
"""Like .get, but returns the real section name and the value."""
name, data = self._get_section(section)
if data is None:
raise configparser.NoSectionError(section)
try:
return name, data[option]
except KeyError:
raise configparser.NoOptionError(option, name)
def has_option(self, section, option):
_, data = self._get_section(section)
if data is None:
return False
return option in data
def has_section(self, section):
name, _ = self._get_section(section)
return name
def options(self, section):
_, data = self._get_section(section)
if data is None:
raise configparser.NoSectionError(section)
return list(data.keys())
def get_section(self, section):
_, data = self._get_section(section)
return data
def get(self, section, option):
_, value = self._get(section, option)
return value
def _check_type(self, section, option, value, type_, type_desc):
if not isinstance(value, type_):
raise ValueError(
'Option {!r} in section {!r} is not {}: {!r}'
.format(option, section, type_desc, value)
)
def getboolean(self, section, option):
name, value = self._get(section, option)
self._check_type(name, option, value, bool, "a boolean")
return value
def getlist(self, section, option):
name, values = self._get(section, option)
self._check_type(name, option, values, list, "a list")
return values
def getregexlist(self, section, option):
name, values = self._get(section, option)
self._check_type(name, option, values, list, "a list")
for value in values:
value = value.strip()
try:
re.compile(value)
except re.error as e:
raise CoverageException(
"Invalid [%s].%s value %r: %s" % (name, option, value, e)
)
return values
def getint(self, section, option):
name, value = self._get(section, option)
self._check_type(name, option, value, int, "an integer")
return value
def getfloat(self, section, option):
name, value = self._get(section, option)
if isinstance(value, int):
value = float(value)
self._check_type(name, option, value, float, "a float")
return value
|
import re
import urllib2
import base64
import csv
import socket
import diamond.collector
class HAProxyCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(HAProxyCollector, self).get_default_config_help()
config_help.update({
'method': "Method to use for data collection. Possible values: " +
"http, unix",
'url': "Url to stats in csv format",
'user': "Username",
'pass': "Password",
'sock': "Path to admin UNIX-domain socket",
'ignore_servers': "Ignore servers, just collect frontend and " +
"backend stats",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(HAProxyCollector, self).get_default_config()
config.update({
'method': 'http',
'path': 'haproxy',
'url': 'http://localhost/haproxy?stats;csv',
'user': 'admin',
'pass': 'password',
'sock': '/var/run/haproxy.sock',
'ignore_servers': False,
})
return config
def _get_config_value(self, section, key):
if section:
if section not in self.config:
self.log.error("Error: Config section '%s' not found", section)
return None
return self.config[section].get(key, self.config[key])
else:
return self.config[key]
def http_get_csv_data(self, section=None):
"""
Request stats from HAProxy Server
"""
metrics = []
req = urllib2.Request(self._get_config_value(section, 'url'))
try:
handle = urllib2.urlopen(req)
return handle.readlines()
except Exception as e:
if not hasattr(e, 'code') or e.code != 401:
self.log.error("Error retrieving HAProxy stats. %s", e)
return metrics
# get the www-authenticate line from the headers
# which has the authentication scheme and realm in it
authline = e.headers['www-authenticate']
# this regular expression is used to extract scheme and realm
authre = (r'''(?:\s*www-authenticate\s*:)?\s*''' +
'''(\w*)\s+realm=['"]([^'"]+)['"]''')
authobj = re.compile(authre, re.IGNORECASE)
matchobj = authobj.match(authline)
if not matchobj:
# if the authline isn't matched by the regular expression
# then something is wrong
self.log.error('The authentication header is malformed.')
return metrics
scheme = matchobj.group(1)
# here we've extracted the scheme
# and the realm from the header
if scheme.lower() != 'basic':
self.log.error('Invalid authentication scheme.')
return metrics
base64string = base64.encodestring(
'%s:%s' % (self._get_config_value(section, 'user'),
self._get_config_value(section, 'pass')))[:-1]
authheader = 'Basic %s' % base64string
req.add_header("Authorization", authheader)
try:
handle = urllib2.urlopen(req)
metrics = handle.readlines()
return metrics
except IOError as e:
# here we shouldn't fail if the USER/PASS is right
self.log.error("Error retrieving HAProxy stats. " +
"(Invalid username or password?) %s", e)
return metrics
def unix_get_csv_data(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
data = str()
try:
sock.connect(self.config['sock'])
sock.send('show stat\n')
while 1:
buf = sock.recv(4096)
if not buf:
break
data += buf
except socket.error as e:
self.log.error("Error retrieving HAProxy stats. %s", e)
return []
return data.strip().split('\n')
def _generate_headings(self, row):
headings = {}
for index, heading in enumerate(row):
headings[index] = self._sanitize(heading)
return headings
def _collect(self, section=None):
"""
Collect HAProxy Stats
"""
if self.config['method'] == 'http':
csv_data = self.http_get_csv_data(section)
elif self.config['method'] == 'unix':
csv_data = self.unix_get_csv_data()
else:
self.log.error("Unknown collection method: %s",
self.config['method'])
csv_data = []
data = list(csv.reader(csv_data))
headings = self._generate_headings(data[0])
section_name = section and self._sanitize(section.lower()) + '.' or ''
for row in data:
if ((self._get_config_value(section, 'ignore_servers') and
row[1].lower() not in ['frontend', 'backend'])):
continue
part_one = self._sanitize(row[0].lower())
part_two = self._sanitize(row[1].lower())
metric_name = '%s%s.%s' % (section_name, part_one, part_two)
for index, metric_string in enumerate(row):
try:
metric_value = float(metric_string)
except ValueError:
continue
stat_name = '%s.%s' % (metric_name, headings[index])
self.publish(stat_name, metric_value, metric_type='GAUGE')
def collect(self):
if 'servers' in self.config:
if isinstance(self.config['servers'], list):
for serv in self.config['servers']:
self._collect(serv)
else:
self._collect(self.config['servers'])
else:
self._collect()
def _sanitize(self, s):
"""Sanitize the name of a metric to remove unwanted chars
"""
return re.sub('[^\w-]', '_', s)
|
from homeassistant import data_entry_flow
from homeassistant.components.enocean.config_flow import EnOceanFlowHandler
from homeassistant.components.enocean.const import DOMAIN
from homeassistant.const import CONF_DEVICE
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
DONGLE_VALIDATE_PATH_METHOD = "homeassistant.components.enocean.dongle.validate_path"
DONGLE_DETECT_METHOD = "homeassistant.components.enocean.dongle.detect"
async def test_user_flow_cannot_create_multiple_instances(hass):
"""Test that the user flow aborts if an instance is already configured."""
entry = MockConfigEntry(
domain=DOMAIN, data={CONF_DEVICE: "/already/configured/path"}
)
entry.add_to_hass(hass)
with patch(DONGLE_VALIDATE_PATH_METHOD, Mock(return_value=True)):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_user_flow_with_detected_dongle(hass):
"""Test the user flow with a detected ENOcean dongle."""
FAKE_DONGLE_PATH = "/fake/dongle"
with patch(DONGLE_DETECT_METHOD, Mock(return_value=[FAKE_DONGLE_PATH])):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "detect"
devices = result["data_schema"].schema.get("device").container
assert FAKE_DONGLE_PATH in devices
assert EnOceanFlowHandler.MANUAL_PATH_VALUE in devices
async def test_user_flow_with_no_detected_dongle(hass):
"""Test the user flow with a detected ENOcean dongle."""
with patch(DONGLE_DETECT_METHOD, Mock(return_value=[])):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual"
async def test_detection_flow_with_valid_path(hass):
"""Test the detection flow with a valid path selected."""
USER_PROVIDED_PATH = "/user/provided/path"
with patch(DONGLE_VALIDATE_PATH_METHOD, Mock(return_value=True)):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "detect"}, data={CONF_DEVICE: USER_PROVIDED_PATH}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_DEVICE] == USER_PROVIDED_PATH
async def test_detection_flow_with_custom_path(hass):
"""Test the detection flow with custom path selected."""
USER_PROVIDED_PATH = EnOceanFlowHandler.MANUAL_PATH_VALUE
FAKE_DONGLE_PATH = "/fake/dongle"
with patch(DONGLE_VALIDATE_PATH_METHOD, Mock(return_value=True)):
with patch(DONGLE_DETECT_METHOD, Mock(return_value=[FAKE_DONGLE_PATH])):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "detect"},
data={CONF_DEVICE: USER_PROVIDED_PATH},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual"
async def test_detection_flow_with_invalid_path(hass):
"""Test the detection flow with an invalid path selected."""
USER_PROVIDED_PATH = "/invalid/path"
FAKE_DONGLE_PATH = "/fake/dongle"
with patch(DONGLE_VALIDATE_PATH_METHOD, Mock(return_value=False)):
with patch(DONGLE_DETECT_METHOD, Mock(return_value=[FAKE_DONGLE_PATH])):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "detect"},
data={CONF_DEVICE: USER_PROVIDED_PATH},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "detect"
assert CONF_DEVICE in result["errors"]
async def test_manual_flow_with_valid_path(hass):
"""Test the manual flow with a valid path."""
USER_PROVIDED_PATH = "/user/provided/path"
with patch(DONGLE_VALIDATE_PATH_METHOD, Mock(return_value=True)):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "manual"}, data={CONF_DEVICE: USER_PROVIDED_PATH}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_DEVICE] == USER_PROVIDED_PATH
async def test_manual_flow_with_invalid_path(hass):
"""Test the manual flow with an invalid path."""
USER_PROVIDED_PATH = "/user/provided/path"
with patch(
DONGLE_VALIDATE_PATH_METHOD,
Mock(return_value=False),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "manual"}, data={CONF_DEVICE: USER_PROVIDED_PATH}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "manual"
assert CONF_DEVICE in result["errors"]
async def test_import_flow_with_valid_path(hass):
"""Test the import flow with a valid path."""
DATA_TO_IMPORT = {CONF_DEVICE: "/valid/path/to/import"}
with patch(DONGLE_VALIDATE_PATH_METHOD, Mock(return_value=True)):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}, data=DATA_TO_IMPORT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_DEVICE] == DATA_TO_IMPORT[CONF_DEVICE]
async def test_import_flow_with_invalid_path(hass):
"""Test the import flow with an invalid path."""
DATA_TO_IMPORT = {CONF_DEVICE: "/invalid/path/to/import"}
with patch(
DONGLE_VALIDATE_PATH_METHOD,
Mock(return_value=False),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "import"}, data=DATA_TO_IMPORT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "invalid_dongle_path"
|
from weblate.formats.convert import (
HTMLFormat,
IDMLFormat,
OpenDocumentFormat,
WindowsRCFormat,
)
from weblate.formats.helpers import BytesIOMode
from weblate.formats.tests.test_formats import AutoFormatTest
from weblate.trans.tests.utils import get_test_file
IDML_FILE = get_test_file("en.idml")
HTML_FILE = get_test_file("cs.html")
OPENDOCUMENT_FILE = get_test_file("cs.odt")
TEST_RC = get_test_file("cs-CZ.rc")
class ConvertFormatTest(AutoFormatTest):
NEW_UNIT_MATCH = None
EXPECTED_FLAGS = ""
def parse_file(self, filename):
return self.FORMAT(filename, template_store=self.FORMAT(filename))
class HTMLFormatTest(ConvertFormatTest):
FORMAT = HTMLFormat
FILE = HTML_FILE
MIME = "text/html"
EXT = "html"
COUNT = 5
MASK = "*/translations.html"
EXPECTED_PATH = "cs_CZ/translations.html"
FIND_CONTEXT = "+html.body.p:5-1"
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"<body>"
NEW_UNIT_MATCH = None
BASE = HTML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
class OpenDocumentFormatTest(ConvertFormatTest):
FORMAT = OpenDocumentFormat
FILE = OPENDOCUMENT_FILE
MIME = "application/vnd.oasis.opendocument.text"
EXT = "odt"
COUNT = 4
MASK = "*/translations.odt"
EXPECTED_PATH = "cs_CZ/translations.odt"
FIND_CONTEXT = (
"odf///office:document-content[0]/office:body[0]/office:text[0]/text:p[1]"
)
FIND_MATCH = "Orangutan has five bananas."
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = OPENDOCUMENT_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
OpenDocumentFormat.convertfile(BytesIOMode("test.odt", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class IDMLFormatTest(ConvertFormatTest):
FORMAT = IDMLFormat
FILE = IDML_FILE
MIME = "application/octet-stream"
EXT = "idml"
COUNT = 6
MASK = "*/translations.idml"
EXPECTED_PATH = "cs_CZ/translations.idml"
FIND_CONTEXT = (
"idPkg:Story[0]/{}Story[0]/{}XMLElement[0]/{}ParagraphStyleRange[0]"
"Stories/Story_mainmainmainmainmainmainmainmainmainmainmainu188.xml"
)
FIND_MATCH = """<g id="0"><g id="1">THE HEADLINE HERE</g></g>"""
MATCH = b"PK"
NEW_UNIT_MATCH = None
BASE = IDML_FILE
EXPECTED_FLAGS = ""
EDIT_OFFSET = 1
@staticmethod
def extract_document(content):
return bytes(
IDMLFormat.convertfile(BytesIOMode("test.idml", content), None)
).decode()
def assert_same(self, newdata, testdata):
self.assertEqual(
self.extract_document(newdata),
self.extract_document(testdata),
)
class WindowsRCFormatTest(ConvertFormatTest):
FORMAT = WindowsRCFormat
FILE = TEST_RC
BASE = TEST_RC
MIME = "text/plain"
EXT = "rc"
COUNT = 5
MASK = "rc/*.rc"
EXPECTED_PATH = "rc/cs-CZ.rc"
MATCH = "STRINGTABLE"
FIND_CONTEXT = "STRINGTABLE.IDS_MSG1"
FIND_MATCH = "Hello, world!\n"
EDIT_OFFSET = 1
|
import logging
from atenpdu import AtenPE, AtenPEError
import voluptuous as vol
from homeassistant.components.switch import (
DEVICE_CLASS_OUTLET,
PLATFORM_SCHEMA,
SwitchEntity,
)
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_USERNAME
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_AUTH_KEY = "auth_key"
CONF_COMMUNITY = "community"
CONF_PRIV_KEY = "priv_key"
DEFAULT_COMMUNITY = "private"
DEFAULT_PORT = "161"
DEFAULT_USERNAME = "administrator"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_AUTH_KEY): cv.string,
vol.Optional(CONF_PRIV_KEY): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the ATEN PE switch."""
node = config[CONF_HOST]
serv = config[CONF_PORT]
dev = AtenPE(
node=node,
serv=serv,
community=config[CONF_COMMUNITY],
username=config[CONF_USERNAME],
authkey=config.get(CONF_AUTH_KEY),
privkey=config.get(CONF_PRIV_KEY),
)
try:
await hass.async_add_executor_job(dev.initialize)
mac = await dev.deviceMAC()
outlets = dev.outlets()
except AtenPEError as exc:
_LOGGER.error("Failed to initialize %s:%s: %s", node, serv, str(exc))
raise PlatformNotReady from exc
switches = []
async for outlet in outlets:
switches.append(AtenSwitch(dev, mac, outlet.id, outlet.name))
async_add_entities(switches)
class AtenSwitch(SwitchEntity):
"""Represents an ATEN PE switch."""
def __init__(self, device, mac, outlet, name):
"""Initialize an ATEN PE switch."""
self._device = device
self._mac = mac
self._outlet = outlet
self._name = name or f"Outlet {outlet}"
self._enabled = False
self._outlet_power = 0.0
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._mac}-{self._outlet}"
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def device_class(self) -> str:
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_OUTLET
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
return self._enabled
@property
def current_power_w(self) -> float:
"""Return the current power usage in W."""
return self._outlet_power
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
await self._device.setOutletStatus(self._outlet, "on")
self._enabled = True
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
await self._device.setOutletStatus(self._outlet, "off")
self._enabled = False
async def async_update(self):
"""Process update from entity."""
status = await self._device.displayOutletStatus(self._outlet)
if status == "on":
self._enabled = True
self._outlet_power = await self._device.outletPower(self._outlet)
elif status == "off":
self._enabled = False
self._outlet_power = 0.0
|
import arrow
import requests
import json
import sys
from flask import current_app
from retrying import retry
from lemur.constants import CRLReason
from lemur.plugins import lemur_entrust as entrust
from lemur.plugins.bases import IssuerPlugin, SourcePlugin
from lemur.extensions import metrics
from lemur.common.utils import validate_conf
def log_status_code(r, *args, **kwargs):
"""
Is a request hook that logs all status codes to the ENTRUST api.
:param r:
:param args:
:param kwargs:
:return:
"""
if r.status_code != 200:
log_data = {
"reason": (r.reason if r.reason else ""),
"status_code": r.status_code,
"url": (r.url if r.url else ""),
}
metrics.send(f"entrust_status_code_{r.status_code}", "counter", 1)
current_app.logger.info(log_data)
def determine_end_date(end_date):
"""
Determine appropriate end date
:param end_date:
:return: validity_end as string
"""
# ENTRUST only allows 13 months of max certificate duration
max_validity_end = arrow.utcnow().shift(years=1, months=+1)
if not end_date:
end_date = max_validity_end
elif end_date > max_validity_end:
end_date = max_validity_end
return end_date.format('YYYY-MM-DD')
def process_options(options, client_id):
"""
Processes and maps the incoming issuer options to fields/options that
Entrust understands
:param options:
:return: dict of valid entrust options
"""
# if there is a config variable ENTRUST_PRODUCT_<upper(authority.name)>
# take the value as Cert product-type
# else default to "STANDARD_SSL"
authority = options.get("authority").name.upper()
# STANDARD_SSL (cn=domain, san=www.domain),
# ADVANTAGE_SSL (cn=domain, san=[www.domain, one_more_option]),
# WILDCARD_SSL (unlimited sans, and wildcard)
product_type = current_app.config.get(f"ENTRUST_PRODUCT_{authority}", "STANDARD_SSL")
if options.get("validity_end"):
validity_end = determine_end_date(options.get("validity_end"))
else:
validity_end = determine_end_date(False)
tracking_data = {
"requesterName": current_app.config.get("ENTRUST_NAME"),
"requesterEmail": current_app.config.get("ENTRUST_EMAIL"),
"requesterPhone": current_app.config.get("ENTRUST_PHONE")
}
data = {
"signingAlg": "SHA-2",
"eku": "SERVER_AND_CLIENT_AUTH",
"certType": product_type,
"certExpiryDate": validity_end,
# "keyType": "RSA", Entrust complaining about this parameter
"tracking": tracking_data,
"org": options.get("organization"),
"clientId": client_id
}
return data
def get_client_id(my_response, organization):
"""
Helper function for parsing responses from the Entrust API.
:param content:
:return: :raise Exception:
"""
try:
d = json.loads(my_response.content)
except ValueError:
# catch an empty json object here
d = {'response': 'No detailed message'}
found = False
for y in d["organizations"]:
if y["name"] == organization:
found = True
client_id = y["clientId"]
if found:
return client_id
else:
raise Exception(f"Error on Organization - Use on of the List: {d['organizations']}")
def handle_response(my_response):
"""
Helper function for parsing responses from the Entrust API.
:param my_response:
:return: :raise Exception:
"""
msg = {
200: "The request had the validateOnly flag set to true and validation was successful.",
201: "Certificate created",
202: "Request accepted and queued for approval",
400: "Invalid request parameters",
404: "Unknown jobId",
429: "Too many requests"
}
try:
data = json.loads(my_response.content)
except ValueError:
# catch an empty jason object here
data = {'response': 'No detailed message'}
status_code = my_response.status_code
if status_code > 399:
raise Exception(f"ENTRUST error: {msg.get(status_code, status_code)}\n{data['errors']}")
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Response",
"status": status_code,
"response": data
}
current_app.logger.info(log_data)
if data == {'response': 'No detailed message'}:
# status if no data
return status_code
else:
# return data from the response
return data
@retry(stop_max_attempt_number=3, wait_fixed=5000)
def order_and_download_certificate(session, url, data):
"""
Helper function to place a certificacte order and download it
:param session:
:param url: Entrust endpoint url
:param data: CSR, and the required order details, such as validity length
:return: the cert chain
:raise Exception:
"""
try:
response = session.post(url, json=data, timeout=(15, 40))
except requests.exceptions.Timeout:
raise Exception("Timeout for POST")
except requests.exceptions.RequestException as e:
raise Exception(f"Error for POST {e}")
return handle_response(response)
class EntrustIssuerPlugin(IssuerPlugin):
title = "Entrust"
slug = "entrust-issuer"
description = "Enables the creation of certificates by ENTRUST"
version = entrust.VERSION
author = "sirferl"
author_url = "https://github.com/sirferl/lemur"
def __init__(self, *args, **kwargs):
"""Initialize the issuer with the appropriate details."""
required_vars = [
"ENTRUST_API_CERT",
"ENTRUST_API_KEY",
"ENTRUST_API_USER",
"ENTRUST_API_PASS",
"ENTRUST_URL",
"ENTRUST_ROOT",
"ENTRUST_NAME",
"ENTRUST_EMAIL",
"ENTRUST_PHONE",
]
validate_conf(current_app, required_vars)
self.session = requests.Session()
cert_file = current_app.config.get("ENTRUST_API_CERT")
key_file = current_app.config.get("ENTRUST_API_KEY")
user = current_app.config.get("ENTRUST_API_USER")
password = current_app.config.get("ENTRUST_API_PASS")
self.session.cert = (cert_file, key_file)
self.session.auth = (user, password)
self.session.hooks = dict(response=log_status_code)
# self.session.config['keep_alive'] = False
super(EntrustIssuerPlugin, self).__init__(*args, **kwargs)
def create_certificate(self, csr, issuer_options):
"""
Creates an Entrust certificate.
:param csr:
:param issuer_options:
:return: :raise Exception:
"""
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Requesting options",
"options": issuer_options
}
current_app.logger.info(log_data)
# firstly we need the organization ID
url = current_app.config.get("ENTRUST_URL") + "/organizations"
try:
response = self.session.get(url, timeout=(15, 40))
except requests.exceptions.Timeout:
raise Exception("Timeout for Getting Organizations")
except requests.exceptions.RequestException as e:
raise Exception(f"Error for Getting Organization {e}")
client_id = get_client_id(response, issuer_options.get("organization"))
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": f"Organization id: {client_id}"
}
current_app.logger.info(log_data)
url = current_app.config.get("ENTRUST_URL") + "/certificates"
data = process_options(issuer_options, client_id)
data["csr"] = csr
response_dict = order_and_download_certificate(self.session, url, data)
external_id = response_dict['trackingId']
cert = response_dict['endEntityCert']
if len(response_dict['chainCerts']) < 2:
# certificate signed by CA directly, no ICA included in the chain
chain = None
else:
chain = response_dict['chainCerts'][1]
log_data["message"] = "Received Chain"
log_data["options"] = f"chain: {chain}"
current_app.logger.info(log_data)
return cert, chain, external_id
@retry(stop_max_attempt_number=3, wait_fixed=1000)
def revoke_certificate(self, certificate, reason):
"""Revoke an Entrust certificate."""
base_url = current_app.config.get("ENTRUST_URL")
# make certificate revoke request
revoke_url = f"{base_url}/certificates/{certificate.external_id}/revocations"
if "comments" not in reason or reason["comments"] == '':
comments = "revoked via API"
crl_reason = CRLReason.unspecified
if "crl_reason" in reason:
crl_reason = CRLReason[reason["crl_reason"]]
data = {
"crlReason": crl_reason, # per RFC 5280 section 5.3.1
"revocationComment": comments
}
response = self.session.post(revoke_url, json=data)
metrics.send("entrust_revoke_certificate", "counter", 1)
return handle_response(response)
@retry(stop_max_attempt_number=3, wait_fixed=1000)
def deactivate_certificate(self, certificate):
"""Deactivates an Entrust certificate."""
base_url = current_app.config.get("ENTRUST_URL")
deactivate_url = f"{base_url}/certificates/{certificate.external_id}/deactivations"
response = self.session.post(deactivate_url)
metrics.send("entrust_deactivate_certificate", "counter", 1)
return handle_response(response)
@staticmethod
def create_authority(options):
"""Create an authority.
Creates an authority, this authority is then used by Lemur to
allow a user to specify which Certificate Authority they want
to sign their certificate.
:param options:
:return:
"""
entrust_root = current_app.config.get("ENTRUST_ROOT")
entrust_issuing = current_app.config.get("ENTRUST_ISSUING")
role = {"username": "", "password": "", "name": "entrust"}
current_app.logger.info(f"Creating Auth: {options} {entrust_issuing}")
# body, chain, role
return entrust_root, "", [role]
def get_ordered_certificate(self, order_id):
raise NotImplementedError("Not implemented\n", self, order_id)
def cancel_ordered_certificate(self, pending_cert, **kwargs):
raise NotImplementedError("Not implemented\n", self, pending_cert, **kwargs)
class EntrustSourcePlugin(SourcePlugin):
title = "Entrust"
slug = "entrust-source"
description = "Enables the collection of certificates"
version = entrust.VERSION
author = "sirferl"
author_url = "https://github.com/sirferl/lemur"
def __init__(self, *args, **kwargs):
"""Initialize the issuer with the appropriate details."""
required_vars = [
"ENTRUST_API_CERT",
"ENTRUST_API_KEY",
"ENTRUST_API_USER",
"ENTRUST_API_PASS",
"ENTRUST_URL",
"ENTRUST_ROOT",
"ENTRUST_NAME",
"ENTRUST_EMAIL",
"ENTRUST_PHONE",
]
validate_conf(current_app, required_vars)
self.session = requests.Session()
cert_file = current_app.config.get("ENTRUST_API_CERT")
key_file = current_app.config.get("ENTRUST_API_KEY")
user = current_app.config.get("ENTRUST_API_USER")
password = current_app.config.get("ENTRUST_API_PASS")
self.session.cert = (cert_file, key_file)
self.session.auth = (user, password)
self.session.hooks = dict(response=log_status_code)
super(EntrustSourcePlugin, self).__init__(*args, **kwargs)
def get_certificates(self, options, **kwargs):
""" Fetch all Entrust certificates """
base_url = current_app.config.get("ENTRUST_URL")
host = base_url.replace('/enterprise/v2', '')
get_url = f"{base_url}/certificates"
certs = []
processed_certs = 0
offset = 0
while True:
response = self.session.get(get_url,
params={
"status": "ACTIVE",
"isThirdParty": "false",
"fields": "uri,dn",
"offset": offset
}
)
try:
data = json.loads(response.content)
except ValueError:
# catch an empty jason object here
data = {'response': 'No detailed message'}
status_code = response.status_code
if status_code > 399:
raise Exception(f"ENTRUST error: {status_code}\n{data['errors']}")
for c in data["certificates"]:
download_url = "{0}{1}".format(
host, c["uri"]
)
cert_response = self.session.get(download_url)
certificate = json.loads(cert_response.content)
# normalize serial
serial = str(int(certificate["serialNumber"], 16))
cert = {
"body": certificate["endEntityCert"],
"serial": serial,
"external_id": str(certificate["trackingId"]),
"csr": certificate["csr"],
"owner": certificate["tracking"]["requesterEmail"],
"description": f"Imported by Lemur; Type: Entrust {certificate['certType']}\nExtended Key Usage: {certificate['eku']}"
}
certs.append(cert)
processed_certs += 1
if data["summary"]["limit"] * offset >= data["summary"]["total"]:
break
else:
offset += 1
current_app.logger.info(f"Retrieved {processed_certs} ertificates")
return certs
def get_endpoints(self, options, **kwargs):
# There are no endpoints in ENTRUST
raise NotImplementedError("Not implemented\n", self, options, **kwargs)
|
import rumps
rumps.debug_mode(True)
@rumps.clicked('Icon', 'On')
def a(_):
app.icon = 'test.png'
@rumps.clicked('Icon', 'Off')
def b(_):
app.icon = None
@rumps.clicked('Title', 'On')
def c(_):
app.title = 'Buzz'
@rumps.clicked('Title', 'Off')
def d(_):
app.title = None
app = rumps.App('Buzz Application', quit_button=rumps.MenuItem('Quit Buzz', key='q'))
app.menu = [
('Icon', ('On', 'Off')),
('Title', ('On', 'Off'))
]
app.run()
|
import asyncio
from pyheos import CommandFailedError, HeosError, const
import pytest
from homeassistant.components.heos import (
ControllerManager,
async_setup_entry,
async_unload_entry,
)
from homeassistant.components.heos.const import (
DATA_CONTROLLER_MANAGER,
DATA_SOURCE_MANAGER,
DOMAIN,
)
from homeassistant.components.media_player.const import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.const import CONF_HOST
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, patch
async def test_async_setup_creates_entry(hass, config):
"""Test component setup creates entry from config."""
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
entry = entries[0]
assert entry.title == "Controller (127.0.0.1)"
assert entry.data == {CONF_HOST: "127.0.0.1"}
assert entry.unique_id == DOMAIN
async def test_async_setup_updates_entry(hass, config_entry, config, controller):
"""Test component setup updates entry from config."""
config[DOMAIN][CONF_HOST] = "127.0.0.2"
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
entry = entries[0]
assert entry.title == "Controller (127.0.0.2)"
assert entry.data == {CONF_HOST: "127.0.0.2"}
assert entry.unique_id == DOMAIN
async def test_async_setup_returns_true(hass, config_entry, config):
"""Test component setup from config."""
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0] == config_entry
async def test_async_setup_no_config_returns_true(hass, config_entry):
"""Test component setup from entry only."""
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(DOMAIN)
assert len(entries) == 1
assert entries[0] == config_entry
async def test_async_setup_entry_loads_platforms(
hass, config_entry, controller, input_sources, favorites
):
"""Test load connects to heos, retrieves players, and loads platforms."""
config_entry.add_to_hass(hass)
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == 1
assert controller.connect.call_count == 1
assert controller.get_players.call_count == 1
assert controller.get_favorites.call_count == 1
assert controller.get_input_sources.call_count == 1
controller.disconnect.assert_not_called()
assert hass.data[DOMAIN][DATA_CONTROLLER_MANAGER].controller == controller
assert hass.data[DOMAIN][MEDIA_PLAYER_DOMAIN] == controller.players
assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].favorites == favorites
assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].inputs == input_sources
async def test_async_setup_entry_not_signed_in_loads_platforms(
hass, config_entry, controller, input_sources, caplog
):
"""Test setup does not retrieve favorites when not logged in."""
config_entry.add_to_hass(hass)
controller.is_signed_in = False
controller.signed_in_username = None
with patch.object(hass.config_entries, "async_forward_entry_setup") as forward_mock:
assert await async_setup_entry(hass, config_entry)
# Assert platforms loaded
await hass.async_block_till_done()
assert forward_mock.call_count == 1
assert controller.connect.call_count == 1
assert controller.get_players.call_count == 1
assert controller.get_favorites.call_count == 0
assert controller.get_input_sources.call_count == 1
controller.disconnect.assert_not_called()
assert hass.data[DOMAIN][DATA_CONTROLLER_MANAGER].controller == controller
assert hass.data[DOMAIN][MEDIA_PLAYER_DOMAIN] == controller.players
assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].favorites == {}
assert hass.data[DOMAIN][DATA_SOURCE_MANAGER].inputs == input_sources
assert (
"127.0.0.1 is not logged in to a HEOS account and will be unable to retrieve "
"HEOS favorites: Use the 'heos.sign_in' service to sign-in to a HEOS account"
in caplog.text
)
async def test_async_setup_entry_connect_failure(hass, config_entry, controller):
"""Connection failure raises ConfigEntryNotReady."""
config_entry.add_to_hass(hass)
controller.connect.side_effect = HeosError()
with pytest.raises(ConfigEntryNotReady):
await async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
controller.connect.reset_mock()
controller.disconnect.reset_mock()
async def test_async_setup_entry_player_failure(hass, config_entry, controller):
"""Failure to retrieve players/sources raises ConfigEntryNotReady."""
config_entry.add_to_hass(hass)
controller.get_players.side_effect = HeosError()
with pytest.raises(ConfigEntryNotReady):
await async_setup_entry(hass, config_entry)
await hass.async_block_till_done()
assert controller.connect.call_count == 1
assert controller.disconnect.call_count == 1
controller.connect.reset_mock()
controller.disconnect.reset_mock()
async def test_unload_entry(hass, config_entry, controller):
"""Test entries are unloaded correctly."""
controller_manager = Mock(ControllerManager)
hass.data[DOMAIN] = {DATA_CONTROLLER_MANAGER: controller_manager}
with patch.object(
hass.config_entries, "async_forward_entry_unload", return_value=True
) as unload:
assert await async_unload_entry(hass, config_entry)
await hass.async_block_till_done()
assert controller_manager.disconnect.call_count == 1
assert unload.call_count == 1
assert DOMAIN not in hass.data
async def test_update_sources_retry(hass, config_entry, config, controller, caplog):
"""Test update sources retries on failures to max attempts."""
config_entry.add_to_hass(hass)
assert await async_setup_component(hass, DOMAIN, config)
controller.get_favorites.reset_mock()
controller.get_input_sources.reset_mock()
source_manager = hass.data[DOMAIN][DATA_SOURCE_MANAGER]
source_manager.retry_delay = 0
source_manager.max_retry_attempts = 1
controller.get_favorites.side_effect = CommandFailedError("Test", "test", 0)
controller.dispatcher.send(
const.SIGNAL_CONTROLLER_EVENT, const.EVENT_SOURCES_CHANGED, {}
)
# Wait until it's finished
while "Unable to update sources" not in caplog.text:
await asyncio.sleep(0.1)
assert controller.get_favorites.call_count == 2
|
import logging
import messagebird
from messagebird.client import ErrorException
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_API_KEY, CONF_SENDER
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_SENDER, default="HA"): vol.All(
cv.string, vol.Match(r"^(\+?[1-9]\d{1,14}|\w{1,11})$")
),
}
)
def get_service(hass, config, discovery_info=None):
"""Get the MessageBird notification service."""
client = messagebird.Client(config[CONF_API_KEY])
try:
# validates the api key
client.balance()
except messagebird.client.ErrorException:
_LOGGER.error("The specified MessageBird API key is invalid")
return None
return MessageBirdNotificationService(config.get(CONF_SENDER), client)
class MessageBirdNotificationService(BaseNotificationService):
"""Implement the notification service for MessageBird."""
def __init__(self, sender, client):
"""Initialize the service."""
self.sender = sender
self.client = client
def send_message(self, message=None, **kwargs):
"""Send a message to a specified target."""
targets = kwargs.get(ATTR_TARGET)
if not targets:
_LOGGER.error("No target specified")
return
for target in targets:
try:
self.client.message_create(
self.sender, target, message, {"reference": "HA"}
)
except ErrorException as exception:
_LOGGER.error("Failed to notify %s: %s", target, exception)
continue
|
import asyncio
from accuweather import AccuWeather, ApiError, InvalidApiKeyError, RequestsExceededError
from aiohttp import ClientError
from aiohttp.client_exceptions import ClientConnectorError
from async_timeout import timeout
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY, CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from .const import CONF_FORECAST, DOMAIN # pylint:disable=unused-import
class AccuWeatherFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Config flow for AccuWeather."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
# Under the terms of use of the API, one user can use one free API key. Due to
# the small number of requests allowed, we only allow one integration instance.
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
errors = {}
if user_input is not None:
websession = async_get_clientsession(self.hass)
try:
async with timeout(10):
accuweather = AccuWeather(
user_input[CONF_API_KEY],
websession,
latitude=user_input[CONF_LATITUDE],
longitude=user_input[CONF_LONGITUDE],
)
await accuweather.async_get_location()
except (ApiError, ClientConnectorError, asyncio.TimeoutError, ClientError):
errors["base"] = "cannot_connect"
except InvalidApiKeyError:
errors[CONF_API_KEY] = "invalid_api_key"
except RequestsExceededError:
errors[CONF_API_KEY] = "requests_exceeded"
else:
await self.async_set_unique_id(
accuweather.location_key, raise_on_progress=False
)
return self.async_create_entry(
title=user_input[CONF_NAME], data=user_input
)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Required(CONF_API_KEY): str,
vol.Optional(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Optional(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
vol.Optional(
CONF_NAME, default=self.hass.config.location_name
): str,
}
),
errors=errors,
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Options callback for AccuWeather."""
return AccuWeatherOptionsFlowHandler(config_entry)
class AccuWeatherOptionsFlowHandler(config_entries.OptionsFlow):
"""Config flow options for AccuWeather."""
def __init__(self, config_entry):
"""Initialize AccuWeather options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
return await self.async_step_user()
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{
vol.Optional(
CONF_FORECAST,
default=self.config_entry.options.get(CONF_FORECAST, False),
): bool
}
),
)
|
import asyncio
from functools import partial
import logging
from aiohttp import client_exceptions
import aiohue
import async_timeout
import slugify as unicode_slug
import voluptuous as vol
from homeassistant import core
from homeassistant.const import HTTP_INTERNAL_SERVER_ERROR
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, config_validation as cv
from .const import (
CONF_ALLOW_HUE_GROUPS,
CONF_ALLOW_UNREACHABLE,
DEFAULT_ALLOW_HUE_GROUPS,
DEFAULT_ALLOW_UNREACHABLE,
LOGGER,
)
from .errors import AuthenticationRequired, CannotConnect
from .helpers import create_config_flow
from .sensor_base import SensorManager
SERVICE_HUE_SCENE = "hue_activate_scene"
ATTR_GROUP_NAME = "group_name"
ATTR_SCENE_NAME = "scene_name"
SCENE_SCHEMA = vol.Schema(
{vol.Required(ATTR_GROUP_NAME): cv.string, vol.Required(ATTR_SCENE_NAME): cv.string}
)
# How long should we sleep if the hub is busy
HUB_BUSY_SLEEP = 0.5
_LOGGER = logging.getLogger(__name__)
class HueBridge:
"""Manages a single Hue bridge."""
def __init__(self, hass, config_entry):
"""Initialize the system."""
self.config_entry = config_entry
self.hass = hass
self.available = True
self.authorized = False
self.api = None
self.parallel_updates_semaphore = None
# Jobs to be executed when API is reset.
self.reset_jobs = []
self.sensor_manager = None
self.unsub_config_entry_listener = None
@property
def host(self):
"""Return the host of this bridge."""
return self.config_entry.data["host"]
@property
def allow_unreachable(self):
"""Allow unreachable light bulbs."""
return self.config_entry.options.get(
CONF_ALLOW_UNREACHABLE, DEFAULT_ALLOW_UNREACHABLE
)
@property
def allow_groups(self):
"""Allow groups defined in the Hue bridge."""
return self.config_entry.options.get(
CONF_ALLOW_HUE_GROUPS, DEFAULT_ALLOW_HUE_GROUPS
)
async def async_setup(self, tries=0):
"""Set up a phue bridge based on host parameter."""
host = self.host
hass = self.hass
bridge = aiohue.Bridge(
host,
username=self.config_entry.data["username"],
websession=aiohttp_client.async_get_clientsession(hass),
)
try:
await authenticate_bridge(hass, bridge)
except AuthenticationRequired:
# Usernames can become invalid if hub is reset or user removed.
# We are going to fail the config entry setup and initiate a new
# linking procedure. When linking succeeds, it will remove the
# old config entry.
create_config_flow(hass, host)
return False
except CannotConnect as err:
LOGGER.error("Error connecting to the Hue bridge at %s", host)
raise ConfigEntryNotReady from err
except Exception: # pylint: disable=broad-except
LOGGER.exception("Unknown error connecting with Hue bridge at %s", host)
return False
self.api = bridge
self.sensor_manager = SensorManager(self)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(self.config_entry, "light")
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(
self.config_entry, "binary_sensor"
)
)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(self.config_entry, "sensor")
)
self.parallel_updates_semaphore = asyncio.Semaphore(
3 if self.api.config.modelid == "BSB001" else 10
)
self.unsub_config_entry_listener = self.config_entry.add_update_listener(
_update_listener
)
self.authorized = True
return True
async def async_request_call(self, task):
"""Limit parallel requests to Hue hub.
The Hue hub can only handle a certain amount of parallel requests, total.
Although we limit our parallel requests, we still will run into issues because
other products are hitting up Hue.
ClientOSError means hub closed the socket on us.
ContentResponseError means hub raised an error.
Since we don't make bad requests, this is on them.
"""
async with self.parallel_updates_semaphore:
for tries in range(4):
try:
return await task()
except (
client_exceptions.ClientOSError,
client_exceptions.ClientResponseError,
client_exceptions.ServerDisconnectedError,
) as err:
if tries == 3:
_LOGGER.error("Request failed %s times, giving up", tries)
raise
# We only retry if it's a server error. So raise on all 4XX errors.
if (
isinstance(err, client_exceptions.ClientResponseError)
and err.status < HTTP_INTERNAL_SERVER_ERROR
):
raise
await asyncio.sleep(HUB_BUSY_SLEEP * tries)
async def async_reset(self):
"""Reset this bridge to default state.
Will cancel any scheduled setup retry and will unload
the config entry.
"""
# The bridge can be in 3 states:
# - Setup was successful, self.api is not None
# - Authentication was wrong, self.api is None, not retrying setup.
# If the authentication was wrong.
if self.api is None:
return True
while self.reset_jobs:
self.reset_jobs.pop()()
if self.unsub_config_entry_listener is not None:
self.unsub_config_entry_listener()
# If setup was successful, we set api variable, forwarded entry and
# register service
results = await asyncio.gather(
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, "light"
),
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, "binary_sensor"
),
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, "sensor"
),
)
# None and True are OK
return False not in results
async def hue_activate_scene(self, call, updated=False, hide_warnings=False):
"""Service to call directly into bridge to set scenes."""
group_name = call.data[ATTR_GROUP_NAME]
scene_name = call.data[ATTR_SCENE_NAME]
group = next(
(group for group in self.api.groups.values() if group.name == group_name),
None,
)
# Additional scene logic to handle duplicate scene names across groups
scene = next(
(
scene
for scene in self.api.scenes.values()
if scene.name == scene_name
and group is not None
and sorted(scene.lights) == sorted(group.lights)
),
None,
)
# If we can't find it, fetch latest info.
if not updated and (group is None or scene is None):
await self.async_request_call(self.api.groups.update)
await self.async_request_call(self.api.scenes.update)
return await self.hue_activate_scene(call, updated=True)
if group is None:
if not hide_warnings:
LOGGER.warning(
"Unable to find group %s" " on bridge %s", group_name, self.host
)
return False
if scene is None:
LOGGER.warning("Unable to find scene %s", scene_name)
return False
return await self.async_request_call(partial(group.set_action, scene=scene.id))
async def handle_unauthorized_error(self):
"""Create a new config flow when the authorization is no longer valid."""
if not self.authorized:
# we already created a new config flow, no need to do it again
return
LOGGER.error(
"Unable to authorize to bridge %s, setup the linking again.", self.host
)
self.authorized = False
create_config_flow(self.hass, self.host)
async def authenticate_bridge(hass: core.HomeAssistant, bridge: aiohue.Bridge):
"""Create a bridge object and verify authentication."""
try:
with async_timeout.timeout(10):
# Create username if we don't have one
if not bridge.username:
device_name = unicode_slug.slugify(
hass.config.location_name, max_length=19
)
await bridge.create_user(f"home-assistant#{device_name}")
# Initialize bridge (and validate our username)
await bridge.initialize()
except (aiohue.LinkButtonNotPressed, aiohue.Unauthorized) as err:
raise AuthenticationRequired from err
except (
asyncio.TimeoutError,
client_exceptions.ClientOSError,
client_exceptions.ServerDisconnectedError,
client_exceptions.ContentTypeError,
) as err:
raise CannotConnect from err
except aiohue.AiohueException as err:
LOGGER.exception("Unknown Hue linking error occurred")
raise AuthenticationRequired from err
async def _update_listener(hass, entry):
"""Handle options update."""
await hass.config_entries.async_reload(entry.entry_id)
|
from __future__ import division
from math import ceil
import numpy as np
import chainer
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from chainercv.experimental.links.model.pspnet.transforms import \
convolution_crop
from chainercv.links import Conv2DBNActiv
from chainercv.links.model.resnet import ResBlock
from chainercv.links import PickableSequentialChain
from chainercv import transforms
from chainercv import utils
_imagenet_mean = np.array(
(123.68, 116.779, 103.939), dtype=np.float32)[:, None, None]
class PyramidPoolingModule(chainer.ChainList):
def __init__(self, in_channels, feat_size, pyramids, initialW=None):
out_channels = in_channels // len(pyramids)
super(PyramidPoolingModule, self).__init__(
Conv2DBNActiv(
in_channels, out_channels, 1, 1, 0, 1, initialW=initialW),
Conv2DBNActiv(
in_channels, out_channels, 1, 1, 0, 1, initialW=initialW),
Conv2DBNActiv(
in_channels, out_channels, 1, 1, 0, 1, initialW=initialW),
Conv2DBNActiv(
in_channels, out_channels, 1, 1, 0, 1, initialW=initialW),
)
kh = feat_size[0] // np.array(pyramids)
kw = feat_size[1] // np.array(pyramids)
self.ksizes = list(zip(kh, kw))
def forward(self, x):
ys = [x]
H, W = x.shape[2:]
for f, ksize in zip(self, self.ksizes):
y = F.average_pooling_2d(x, ksize, ksize)
y = f(y)
y = F.resize_images(y, (H, W))
ys.append(y)
return F.concat(ys, axis=1)
class DilatedResNet(PickableSequentialChain):
_blocks = {
50: [3, 4, 6, 3],
101: [3, 4, 23, 3],
}
_models = {
50: {
'imagenet': {
'url': 'https://chainercv-models.preferred.jp/'
'pspnet_resnet50_imagenet_trained_2018_11_26.npz',
'cv2': True
},
},
101: {
'imagenet': {
'url': 'https://chainercv-models.preferred.jp/'
'pspnet_resnet101_imagenet_trained_2018_11_26.npz',
'cv2': True
},
},
}
def __init__(self, n_layer, pretrained_model=None,
initialW=None):
n_block = self._blocks[n_layer]
_, path = utils.prepare_pretrained_model(
{},
pretrained_model,
self._models[n_layer])
super(DilatedResNet, self).__init__()
with self.init_scope():
self.conv1_1 = Conv2DBNActiv(
None, 64, 3, 2, 1, 1, initialW=initialW)
self.conv1_2 = Conv2DBNActiv(
64, 64, 3, 1, 1, 1, initialW=initialW)
self.conv1_3 = Conv2DBNActiv(
64, 128, 3, 1, 1, 1, initialW=initialW)
self.pool1 = lambda x: F.max_pooling_2d(
x, ksize=3, stride=2, pad=1)
self.res2 = ResBlock(
n_block[0], 128, 64, 256, 1, 1,
initialW=initialW, stride_first=False)
self.res3 = ResBlock(
n_block[1], 256, 128, 512, 2, 1,
initialW=initialW, stride_first=False)
self.res4 = ResBlock(
n_block[2], 512, 256, 1024, 1, 2,
initialW=initialW, stride_first=False)
self.res5 = ResBlock(
n_block[3], 1024, 512, 2048, 1, 4,
initialW=initialW, stride_first=False)
if path:
chainer.serializers.load_npz(path, self, ignore_names=None)
class PSPNet(chainer.Chain):
"""Pyramid Scene Parsing Network.
This is a PSPNet [#]_ model for semantic segmentation. This is based on
the implementation found here_.
.. [#] Hengshuang Zhao, Jianping Shi, Xiaojuan Qi, Xiaogang Wang \
Jiaya Jia "Pyramid Scene Parsing Network" \
CVPR, 2017
.. _here: https://github.com/hszhao/PSPNet
Args:
n_class (int): The number of channels in the last convolution layer.
pretrained_model (string): The weight file to be loaded.
This can take :obj:`'cityscapes'`, `filepath` or :obj:`None`.
The default value is :obj:`None`.
* :obj:`'cityscapes'`: Load weights trained on the train split of \
Cityscapes dataset. \
:obj:`n_class` must be :obj:`19` or :obj:`None`.
* :obj:`'ade20k'`: Load weights trained on the train split of \
ADE20K dataset. \
:obj:`n_class` must be :obj:`150` or :obj:`None`.
* :obj:`'imagenet'`: Load ImageNet pretrained weights for \
the extractor.
* `filepath`: A path of npz file. In this case, :obj:`n_class` \
must be specified properly.
* :obj:`None`: Do not load weights.
input_size (tuple): The size of the input.
This value is :math:`(height, width)`.
initialW (callable): Initializer for the weights of
convolution kernels.
"""
def __init__(self, n_class=None, pretrained_model=None,
input_size=None, initialW=None):
super(PSPNet, self).__init__()
if pretrained_model == 'imagenet':
extractor_pretrained_model = 'imagenet'
pretrained_model = None
else:
extractor_pretrained_model = None
param, path = utils.prepare_pretrained_model(
{'n_class': n_class, 'input_size': input_size},
pretrained_model, self._models,
default={'input_size': (713, 713)})
n_class = param['n_class']
input_size = param['input_size']
if not isinstance(input_size, (list, tuple)):
input_size = (int(input_size), int(input_size))
self.input_size = input_size
if initialW is None:
if pretrained_model:
initialW = initializers.constant.Zero()
kwargs = self._extractor_kwargs
kwargs.update({'pretrained_model': extractor_pretrained_model,
'initialW': initialW})
extractor = self._extractor_cls(**kwargs)
extractor.pick = self._extractor_pick
self.scales = None
self.mean = _imagenet_mean
pyramids = [6, 3, 2, 1]
feat_size = (input_size[0] // 8, input_size[1] // 8)
with self.init_scope():
self.extractor = extractor
self.ppm = PyramidPoolingModule(
2048, feat_size, pyramids, initialW=initialW)
self.head_conv1 = Conv2DBNActiv(
4096, 512, 3, 1, 1, initialW=initialW)
self.head_conv2 = L.Convolution2D(
512, n_class, 1, 1, 0, False, initialW)
if path:
chainer.serializers.load_npz(path, self)
@property
def n_class(self):
return self.head_conv2.out_channels
def forward(self, x):
_, res5 = self.extractor(x)
h = self.ppm(res5)
h = self.head_conv1(h)
h = self.head_conv2(h)
h = F.resize_images(h, x.shape[2:])
return h
def _tile_predict(self, img):
if self.mean is not None:
img = img - self.mean
ori_H, ori_W = img.shape[1:]
long_size = max(ori_H, ori_W)
if long_size > max(self.input_size):
stride_rate = 2 / 3
stride = (int(ceil(self.input_size[0] * stride_rate)),
int(ceil(self.input_size[1] * stride_rate)))
imgs, param = convolution_crop(
img, self.input_size, stride, return_param=True)
counts = self.xp.zeros((1, ori_H, ori_W), dtype=np.float32)
preds = self.xp.zeros((1, self.n_class, ori_H, ori_W),
dtype=np.float32)
N = len(param['y_slices'])
for i in range(N):
img_i = imgs[i:i+1]
y_slice = param['y_slices'][i]
x_slice = param['x_slices'][i]
crop_y_slice = param['crop_y_slices'][i]
crop_x_slice = param['crop_x_slices'][i]
scores_i = self._predict(img_i)
# Flip horizontally flipped score maps again
flipped_scores_i = self._predict(
img_i[:, :, :, ::-1])[:, :, :, ::-1]
preds[0, :, y_slice, x_slice] +=\
scores_i[0, :, crop_y_slice, crop_x_slice]
preds[0, :, y_slice, x_slice] +=\
flipped_scores_i[0, :, crop_y_slice, crop_x_slice]
counts[0, y_slice, x_slice] += 2
scores = preds / counts[:, None]
else:
img, param = transforms.resize_contain(
img, self.input_size, return_param=True)
preds1 = self._predict(img[np.newaxis])
preds2 = self._predict(img[np.newaxis, :, :, ::-1])
preds = (preds1 + preds2[:, :, :, ::-1]) / 2
y_start = param['y_offset']
y_end = y_start + param['scaled_size'][0]
x_start = param['x_offset']
x_end = x_start + param['scaled_size'][1]
scores = preds[:, :, y_start:y_end, x_start:x_end]
scores = F.resize_images(scores, (ori_H, ori_W))[0].array
return scores
def _predict(self, imgs):
xs = chainer.Variable(self.xp.asarray(imgs))
with chainer.using_config('train', False):
scores = F.softmax(self.forward(xs)).array
return scores
def predict(self, imgs):
"""Conduct semantic segmentation from images.
Args:
imgs (iterable of numpy.ndarray): Arrays holding images.
All images are in CHW and RGB format
and the range of their values are :math:`[0, 255]`.
Returns:
list of numpy.ndarray:
List of integer labels predicted from each image in the input \
list.
"""
labels = []
for img in imgs:
with chainer.using_config('train', False), \
chainer.function.no_backprop_mode():
if self.scales is not None:
scores = _multiscale_predict(
self._tile_predict, img, self.scales)
else:
scores = self._tile_predict(img)
labels.append(chainer.backends.cuda.to_cpu(
self.xp.argmax(scores, axis=0).astype(np.int32)))
return labels
class PSPNetResNet101(PSPNet):
"""PSPNet with Dilated ResNet101 as the feature extractor.
.. seealso::
:class:`chainercv.experimental.links.model.pspnet.PSPNet`
"""
_extractor_cls = DilatedResNet
_extractor_kwargs = {'n_layer': 101}
_extractor_pick = ('res4', 'res5')
_models = {
'cityscapes': {
'param': {'n_class': 19, 'input_size': (713, 713)},
'url': 'https://chainercv-models.preferred.jp/'
'pspnet_resnet101_cityscapes_trained_2018_12_19.npz',
},
'ade20k': {
'param': {'n_class': 150, 'input_size': (473, 473)},
'url': 'https://chainercv-models.preferred.jp/'
'pspnet_resnet101_ade20k_trained_2018_12_23.npz',
},
}
class PSPNetResNet50(PSPNet):
"""PSPNet with Dilated ResNet50 as the feature extractor.
.. seealso::
:class:`chainercv.experimental.links.model.pspnet.PSPNet`
"""
_extractor_cls = DilatedResNet
_extractor_kwargs = {'n_layer': 50}
_extractor_pick = ('res4', 'res5')
_models = {
'cityscapes': {
'param': {'n_class': 19, 'input_size': (713, 713)},
'url': 'https://chainercv-models.preferred.jp/'
'pspnet_resnet50_cityscapes_trained_2018_12_19.npz',
},
'ade20k': {
'param': {'n_class': 150, 'input_size': (473, 473)},
'url': 'https://chainercv-models.preferred.jp/'
'pspnet_resnet50_ade20k_trained_2018_12_23.npz',
},
}
def _multiscale_predict(predict_method, img, scales):
orig_H, orig_W = img.shape[1:]
scores = []
orig_img = img
for scale in scales:
img = orig_img.copy()
if scale != 1.0:
img = transforms.resize(
img, (int(orig_H * scale), int(orig_W * scale)))
# This method should return scores
y = predict_method(img)[None]
assert y.shape[2:] == img.shape[1:]
if scale != 1.0:
y = F.resize_images(y, (orig_H, orig_W)).array
scores.append(y)
xp = chainer.backends.cuda.get_array_module(scores[0])
scores = xp.stack(scores)
return scores.mean(0)[0] # (C, H, W)
|
import asyncio
from asyncio import CancelledError
from functools import partial
import logging
from typing import Dict
from dsmr_parser import obis_references as obis_ref
from dsmr_parser.clients.protocol import create_dsmr_reader, create_tcp_dsmr_reader
import serial
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_STOP,
TIME_HOURS,
)
from homeassistant.core import CoreState, callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
CONF_DSMR_VERSION,
CONF_PRECISION,
CONF_RECONNECT_INTERVAL,
CONF_SERIAL_ID,
CONF_SERIAL_ID_GAS,
DATA_TASK,
DEFAULT_DSMR_VERSION,
DEFAULT_PORT,
DEFAULT_PRECISION,
DEFAULT_RECONNECT_INTERVAL,
DEVICE_NAME_ENERGY,
DEVICE_NAME_GAS,
DOMAIN,
ICON_GAS,
ICON_POWER,
ICON_POWER_FAILURE,
ICON_SWELL_SAG,
)
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.string,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_DSMR_VERSION, default=DEFAULT_DSMR_VERSION): vol.All(
cv.string, vol.In(["5B", "5", "4", "2.2"])
),
vol.Optional(CONF_RECONNECT_INTERVAL, default=DEFAULT_RECONNECT_INTERVAL): int,
vol.Optional(CONF_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import the platform into a config entry."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the DSMR sensor."""
# Suppress logging
logging.getLogger("dsmr_parser").setLevel(logging.ERROR)
config = entry.data
dsmr_version = config[CONF_DSMR_VERSION]
# Define list of name,obis mappings to generate entities
obis_mapping = [
["Power Consumption", obis_ref.CURRENT_ELECTRICITY_USAGE],
["Power Production", obis_ref.CURRENT_ELECTRICITY_DELIVERY],
["Power Tariff", obis_ref.ELECTRICITY_ACTIVE_TARIFF],
["Energy Consumption (total)", obis_ref.ELECTRICITY_IMPORTED_TOTAL],
["Energy Consumption (tarif 1)", obis_ref.ELECTRICITY_USED_TARIFF_1],
["Energy Consumption (tarif 2)", obis_ref.ELECTRICITY_USED_TARIFF_2],
["Energy Production (tarif 1)", obis_ref.ELECTRICITY_DELIVERED_TARIFF_1],
["Energy Production (tarif 2)", obis_ref.ELECTRICITY_DELIVERED_TARIFF_2],
["Power Consumption Phase L1", obis_ref.INSTANTANEOUS_ACTIVE_POWER_L1_POSITIVE],
["Power Consumption Phase L2", obis_ref.INSTANTANEOUS_ACTIVE_POWER_L2_POSITIVE],
["Power Consumption Phase L3", obis_ref.INSTANTANEOUS_ACTIVE_POWER_L3_POSITIVE],
["Power Production Phase L1", obis_ref.INSTANTANEOUS_ACTIVE_POWER_L1_NEGATIVE],
["Power Production Phase L2", obis_ref.INSTANTANEOUS_ACTIVE_POWER_L2_NEGATIVE],
["Power Production Phase L3", obis_ref.INSTANTANEOUS_ACTIVE_POWER_L3_NEGATIVE],
["Short Power Failure Count", obis_ref.SHORT_POWER_FAILURE_COUNT],
["Long Power Failure Count", obis_ref.LONG_POWER_FAILURE_COUNT],
["Voltage Sags Phase L1", obis_ref.VOLTAGE_SAG_L1_COUNT],
["Voltage Sags Phase L2", obis_ref.VOLTAGE_SAG_L2_COUNT],
["Voltage Sags Phase L3", obis_ref.VOLTAGE_SAG_L3_COUNT],
["Voltage Swells Phase L1", obis_ref.VOLTAGE_SWELL_L1_COUNT],
["Voltage Swells Phase L2", obis_ref.VOLTAGE_SWELL_L2_COUNT],
["Voltage Swells Phase L3", obis_ref.VOLTAGE_SWELL_L3_COUNT],
["Voltage Phase L1", obis_ref.INSTANTANEOUS_VOLTAGE_L1],
["Voltage Phase L2", obis_ref.INSTANTANEOUS_VOLTAGE_L2],
["Voltage Phase L3", obis_ref.INSTANTANEOUS_VOLTAGE_L3],
["Current Phase L1", obis_ref.INSTANTANEOUS_CURRENT_L1],
["Current Phase L2", obis_ref.INSTANTANEOUS_CURRENT_L2],
["Current Phase L3", obis_ref.INSTANTANEOUS_CURRENT_L3],
]
# Generate device entities
devices = [
DSMREntity(name, DEVICE_NAME_ENERGY, config[CONF_SERIAL_ID], obis, config)
for name, obis in obis_mapping
]
# Protocol version specific obis
if CONF_SERIAL_ID_GAS in config:
if dsmr_version in ("4", "5"):
gas_obis = obis_ref.HOURLY_GAS_METER_READING
elif dsmr_version in ("5B",):
gas_obis = obis_ref.BELGIUM_HOURLY_GAS_METER_READING
else:
gas_obis = obis_ref.GAS_METER_READING
# Add gas meter reading and derivative for usage
devices += [
DSMREntity(
"Gas Consumption",
DEVICE_NAME_GAS,
config[CONF_SERIAL_ID_GAS],
gas_obis,
config,
),
DerivativeDSMREntity(
"Hourly Gas Consumption",
DEVICE_NAME_GAS,
config[CONF_SERIAL_ID_GAS],
gas_obis,
config,
),
]
async_add_entities(devices)
def update_entities_telegram(telegram):
"""Update entities with latest telegram and trigger state update."""
# Make all device entities aware of new telegram
for device in devices:
device.update_data(telegram)
# Creates an asyncio.Protocol factory for reading DSMR telegrams from
# serial and calls update_entities_telegram to update entities on arrival
if CONF_HOST in config:
reader_factory = partial(
create_tcp_dsmr_reader,
config[CONF_HOST],
config[CONF_PORT],
config[CONF_DSMR_VERSION],
update_entities_telegram,
loop=hass.loop,
)
else:
reader_factory = partial(
create_dsmr_reader,
config[CONF_PORT],
config[CONF_DSMR_VERSION],
update_entities_telegram,
loop=hass.loop,
)
async def connect_and_reconnect():
"""Connect to DSMR and keep reconnecting until Home Assistant stops."""
while hass.state != CoreState.stopping:
# Start DSMR asyncio.Protocol reader
try:
transport, protocol = await hass.loop.create_task(reader_factory())
if transport:
# Register listener to close transport on HA shutdown
stop_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, transport.close
)
# Wait for reader to close
await protocol.wait_closed()
# Unexpected disconnect
if transport:
# remove listener
stop_listener()
transport = None
protocol = None
# Reflect disconnect state in devices state by setting an
# empty telegram resulting in `unknown` states
update_entities_telegram({})
# throttle reconnect attempts
await asyncio.sleep(config[CONF_RECONNECT_INTERVAL])
except (serial.serialutil.SerialException, OSError):
# Log any error while establishing connection and drop to retry
# connection wait
_LOGGER.exception("Error connecting to DSMR")
transport = None
protocol = None
except CancelledError:
if stop_listener:
stop_listener()
if transport:
transport.close()
if protocol:
await protocol.wait_closed()
return
# Can't be hass.async_add_job because job runs forever
task = asyncio.create_task(connect_and_reconnect())
# Save the task to be able to cancel it when unloading
hass.data[DOMAIN][entry.entry_id][DATA_TASK] = task
class DSMREntity(Entity):
"""Entity reading values from DSMR telegram."""
def __init__(self, name, device_name, device_serial, obis, config):
"""Initialize entity."""
self._name = name
self._obis = obis
self._config = config
self.telegram = {}
self._device_name = device_name
self._device_serial = device_serial
self._unique_id = f"{device_serial}_{name}".replace(" ", "_")
@callback
def update_data(self, telegram):
"""Update data."""
self.telegram = telegram
if self.hass:
self.async_write_ha_state()
def get_dsmr_object_attr(self, attribute):
"""Read attribute from last received telegram for this DSMR object."""
# Make sure telegram contains an object for this entities obis
if self._obis not in self.telegram:
return None
# Get the attribute value if the object has it
dsmr_object = self.telegram[self._obis]
return getattr(dsmr_object, attribute, None)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if "Sags" in self._name or "Swells" in self.name:
return ICON_SWELL_SAG
if "Failure" in self._name:
return ICON_POWER_FAILURE
if "Power" in self._name:
return ICON_POWER
if "Gas" in self._name:
return ICON_GAS
@property
def state(self):
"""Return the state of sensor, if available, translate if needed."""
value = self.get_dsmr_object_attr("value")
if self._obis == obis_ref.ELECTRICITY_ACTIVE_TARIFF:
return self.translate_tariff(value, self._config[CONF_DSMR_VERSION])
try:
value = round(float(value), self._config[CONF_PRECISION])
except TypeError:
pass
if value is not None:
return value
return None
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self.get_dsmr_object_attr("unit")
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._device_serial)},
"name": self._device_name,
}
@property
def force_update(self):
"""Force update."""
return True
@property
def should_poll(self):
"""Disable polling."""
return False
@staticmethod
def translate_tariff(value, dsmr_version):
"""Convert 2/1 to normal/low depending on DSMR version."""
# DSMR V5B: Note: In Belgium values are swapped:
# Rate code 2 is used for low rate and rate code 1 is used for normal rate.
if dsmr_version in ("5B",):
if value == "0001":
value = "0002"
elif value == "0002":
value = "0001"
# DSMR V2.2: Note: Rate code 1 is used for low rate and rate code 2 is
# used for normal rate.
if value == "0002":
return "normal"
if value == "0001":
return "low"
return None
class DerivativeDSMREntity(DSMREntity):
"""Calculated derivative for values where the DSMR doesn't offer one.
Gas readings are only reported per hour and don't offer a rate only
the current meter reading. This entity converts subsequents readings
into a hourly rate.
"""
_previous_reading = None
_previous_timestamp = None
_state = None
@property
def state(self):
"""Return the calculated current hourly rate."""
return self._state
@property
def force_update(self):
"""Disable force update."""
return False
@property
def should_poll(self):
"""Enable polling."""
return True
async def async_update(self):
"""Recalculate hourly rate if timestamp has changed.
DSMR updates gas meter reading every hour. Along with the new
value a timestamp is provided for the reading. Test if the last
known timestamp differs from the current one then calculate a
new rate for the previous hour.
"""
# check if the timestamp for the object differs from the previous one
timestamp = self.get_dsmr_object_attr("datetime")
if timestamp and timestamp != self._previous_timestamp:
current_reading = self.get_dsmr_object_attr("value")
if self._previous_reading is None:
# Can't calculate rate without previous datapoint
# just store current point
pass
else:
# Recalculate the rate
diff = current_reading - self._previous_reading
timediff = timestamp - self._previous_timestamp
total_seconds = timediff.total_seconds()
self._state = round(float(diff) / total_seconds * 3600, 3)
self._previous_reading = current_reading
self._previous_timestamp = timestamp
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, per hour, if any."""
unit = self.get_dsmr_object_attr("unit")
if unit:
return f"{unit}/{TIME_HOURS}"
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
assert logging.root.getEffectiveLevel() == logging.WARN, (
'default logging.root level should be WARN, but found {}'.format(
logging.root.getEffectiveLevel()))
# This is here to test importing logging won't change the level.
logging.root.setLevel(logging.ERROR)
assert logging.root.getEffectiveLevel() == logging.ERROR, (
'logging.root level should be changed to ERROR, but found {}'.format(
logging.root.getEffectiveLevel()))
from absl import flags
from absl import logging as _ # pylint: disable=unused-import
from absl.testing import absltest
FLAGS = flags.FLAGS
assert FLAGS['verbosity'].value == -1, (
'-v/--verbosity should be -1 before flags are parsed.')
assert logging.root.getEffectiveLevel() == logging.ERROR, (
'logging.root level should be kept to ERROR, but found {}'.format(
logging.root.getEffectiveLevel()))
class VerbosityFlagTest(absltest.TestCase):
def test_default_value_after_init(self):
self.assertEqual(0, FLAGS.verbosity)
self.assertEqual(logging.INFO, logging.root.getEffectiveLevel())
if __name__ == '__main__':
absltest.main()
|
import tests
from pyVim import connect
from pyVmomi import vim
from pyVmomi.VmomiSupport import VmomiJSONEncoder, templateOf
from six import PY3
import unittest
import atexit
import inspect
import json
import os
class JSONTests(tests.VCRTestBase):
@property
def datacenter(self):
return getattr(vim, 'Datacenter')('datacenter-2', self.si._stub)
@property
def datastore(self):
return getattr(vim, 'Datastore')('datastore-15', self.si._stub)
@property
def host(self):
return getattr(vim, 'HostSystem')('host-14', self.si._stub)
@property
def network(self):
return getattr(vim, 'Network')('network-16', self.si._stub)
@property
def si(self):
if not hasattr(self, '_si'):
self._si = connect.SmartConnectNoSSL(
host='vcenter', user='my_user', pwd='my_pass')
atexit.register(connect.Disconnect, self._si)
return self._si
@property
def vm(self):
return getattr(vim, 'VirtualMachine')('vm-22', self.si._stub)
@property
def vm2(self):
return getattr(vim, 'VirtualMachine')('vm-227', self.si._stub)
def expect(self, data):
"""
Handle results expectation. If the expect data file does not
exist we write one out. Otherwise we read and compare. This
means it is the responsibility of the individual creating the
.expect files to ensure they are correct.
"""
testname = inspect.stack()[1][3]
expectfile = 'tests/files/{0}.expect'.format(testname)
if os.path.exists(expectfile):
with open(expectfile, 'r') as file:
expectdata = file.read()
self.assertEqual(data, expectdata)
else:
with open(expectfile, 'w') as file:
file.write(data)
# NOTE: sort_keys is needed for expect comparison to work;
# not required for consumption
# Explodes the VM
# By definition if the input is a ManagedObject then it explodes.
@unittest.skip("skipped for 7.0 release")
@tests.VCRTestBase.my_vcr.use_cassette(
'test_json_vm_explode_default.yaml',
cassette_library_dir=tests.fixtures_path, record_mode='once')
def test_json_vm_explode_default(self):
raw = json.dumps(self.vm, cls=VmomiJSONEncoder, sort_keys=True)
self.expect(raw)
# Basic sanity check
data = json.loads(raw)
self.assertEqual(data['_vimid'], 'vm-22')
self.assertEqual(data['_vimref'], 'vim.VirtualMachine:vm-22')
self.assertEqual(data['_vimtype'], 'vim.VirtualMachine')
self.assertEqual(data['overallStatus'], 'green')
self.assertEqual(len(data['network']), 1)
self.assertEqual(len(data['capability']['dynamicProperty']), 0)
self.assertIsNone(data['capability']['dynamicType'])
# Explodes the VM disabling the dynamic field stripping
# By definition if the input is a ManagedObject then it explodes.
@unittest.skip("skipped for 7.0 release")
@tests.VCRTestBase.my_vcr.use_cassette(
'test_json_vm_explode_strip_dynamic.yaml',
cassette_library_dir=tests.fixtures_path, record_mode='once')
def test_json_vm_explode_strip_dynamic(self):
raw = json.dumps(self.vm, cls=VmomiJSONEncoder, sort_keys=True,
strip_dynamic=True)
self.expect(raw)
# Basic sanity check
data = json.loads(raw)
self.assertEqual(data['_vimid'], 'vm-22')
self.assertEqual(data['_vimref'], 'vim.VirtualMachine:vm-22')
self.assertEqual(data['_vimtype'], 'vim.VirtualMachine')
self.assertEqual(data['overallStatus'], 'green')
self.assertEqual(len(data['network']), 1)
self.assertTrue('dynamicProperty' not in data['capability'])
self.assertTrue('dynamicType' not in data['capability'])
# Explodes the VM and the VM's networks
# Here self.vm is redundant (see above) but not harmful.
@unittest.skip("skipped for 7.0 release")
@tests.VCRTestBase.my_vcr.use_cassette(
'test_json_vm_explode_objs.yaml',
cassette_library_dir=tests.fixtures_path, record_mode='once')
def test_json_vm_explode_objs_match(self):
to_explode = [self.vm]
for item in self.vm.network:
to_explode.append(item)
self.expect(json.dumps(self.vm, cls=VmomiJSONEncoder, sort_keys=True,
explode=to_explode))
# Explodes by type: all VirtualMachine and all of its snapshots
@unittest.skip("skipped for 7.0 release")
@tests.VCRTestBase.my_vcr.use_cassette(
'test_json_vm_explode_type.yaml',
cassette_library_dir=tests.fixtures_path, record_mode='once')
def test_json_vm_explode_type_match(self):
self.expect(json.dumps([self.vm, self.vm2], cls=VmomiJSONEncoder,
sort_keys=True,
explode=[templateOf('VirtualMachine'),
templateOf('VirtualMachineSnapshot')]))
# Test Datacenter
@unittest.skip("skipped for 7.0 release")
@tests.VCRTestBase.my_vcr.use_cassette(
'test_json_datacenter_explode.yaml',
cassette_library_dir=tests.fixtures_path, record_mode='once')
def test_json_datacenter_explode(self):
self.expect(json.dumps(self.datacenter, cls=VmomiJSONEncoder,
sort_keys=True))
# Test Datastore
@unittest.skip("skipped for 7.0 release")
@tests.VCRTestBase.my_vcr.use_cassette(
'test_json_datastore_explode.yaml',
cassette_library_dir=tests.fixtures_path, record_mode='once')
def test_json_datastore_explode(self):
self.expect(json.dumps(self.datastore, cls=VmomiJSONEncoder,
sort_keys=True))
# Test HostSystem
@unittest.skip("skipped for 7.0 release")
@tests.VCRTestBase.my_vcr.use_cassette(
'test_json_host_explode.yaml',
cassette_library_dir=tests.fixtures_path, record_mode='once')
def test_json_host_explode(self):
self.expect(json.dumps(self.host, cls=VmomiJSONEncoder,
sort_keys=True))
# Test Network
@unittest.skip("skipped for 7.0 release")
@tests.VCRTestBase.my_vcr.use_cassette(
'test_json_network_explode.yaml',
cassette_library_dir=tests.fixtures_path, record_mode='once')
def test_json_network_explode(self):
self.expect(json.dumps(self.network, cls=VmomiJSONEncoder,
sort_keys=True))
|
from homeassistant.components.kodi.const import DOMAIN
from homeassistant.config_entries import ENTRY_STATE_LOADED, ENTRY_STATE_NOT_LOADED
from . import init_integration
from tests.async_mock import patch
async def test_unload_entry(hass):
"""Test successful unload of entry."""
with patch(
"homeassistant.components.kodi.media_player.async_setup_entry",
return_value=True,
):
entry = await init_integration(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_NOT_LOADED
assert not hass.data.get(DOMAIN)
|
from typing import List
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_PROBLEM,
BinarySensorEntity,
)
from .const import DOMAIN as FLO_DOMAIN
from .device import FloDeviceDataUpdateCoordinator
from .entity import FloEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Flo sensors from config entry."""
devices: List[FloDeviceDataUpdateCoordinator] = hass.data[FLO_DOMAIN][
config_entry.entry_id
]["devices"]
entities = [FloPendingAlertsBinarySensor(device) for device in devices]
async_add_entities(entities)
class FloPendingAlertsBinarySensor(FloEntity, BinarySensorEntity):
"""Binary sensor that reports on if there are any pending system alerts."""
def __init__(self, device):
"""Initialize the pending alerts binary sensor."""
super().__init__("pending_system_alerts", "Pending System Alerts", device)
@property
def is_on(self):
"""Return true if the Flo device has pending alerts."""
return self._device.has_alerts
@property
def device_state_attributes(self):
"""Return the state attributes."""
if not self._device.has_alerts:
return {}
return {
"info": self._device.pending_info_alerts_count,
"warning": self._device.pending_warning_alerts_count,
"critical": self._device.pending_critical_alerts_count,
}
@property
def device_class(self):
"""Return the device class for the binary sensor."""
return DEVICE_CLASS_PROBLEM
|
import unittest
import numpy as np
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
from chainercv.datasets import voc_bbox_label_names
from chainercv.datasets import VOCBboxDataset
from chainercv.utils import assert_is_bbox_dataset
def _create_paramters():
split_years = testing.product({
'split': ['train', 'trainval', 'val'],
'year': ['2007', '2012']})
split_years += [{'split': 'test', 'year': '2007'}]
params = testing.product_dict(
split_years,
[{'use_difficult': True, 'return_difficult': True},
{'use_difficult': True, 'return_difficult': False},
{'use_difficult': False, 'return_difficult': True},
{'use_difficult': False, 'return_difficult': False}])
return params
@testing.parameterize(*_create_paramters())
class TestVOCBboxDataset(unittest.TestCase):
def setUp(self):
self.dataset = VOCBboxDataset(
split=self.split,
year=self.year,
use_difficult=self.use_difficult,
return_difficult=self.return_difficult)
self.n_out = 4 if self.return_difficult else 3
@attr.slow
def test_as_bbox_dataset(self):
assert_is_bbox_dataset(
self.dataset, len(voc_bbox_label_names), n_example=10)
@attr.slow
@condition.repeat(10)
def test_difficult(self):
if not self.return_difficult:
return
i = np.random.randint(0, len(self.dataset))
_, bbox, _, difficult = self.dataset[i]
self.assertIsInstance(difficult, np.ndarray)
self.assertEqual(difficult.dtype, np.bool)
self.assertEqual(difficult.shape, (bbox.shape[0],))
if not self.use_difficult:
np.testing.assert_equal(difficult, 0)
testing.run_module(__name__, __file__)
|
from typing import Any
import pypck
from homeassistant.components.scene import Scene
from homeassistant.const import CONF_ADDRESS
from . import LcnDevice
from .const import (
CONF_CONNECTIONS,
CONF_OUTPUTS,
CONF_REGISTER,
CONF_SCENE,
CONF_TRANSITION,
DATA_LCN,
OUTPUT_PORTS,
)
from .helpers import get_connection
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Set up the LCN scene platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
devices.append(LcnScene(config, address_connection))
async_add_entities(devices)
class LcnScene(LcnDevice, Scene):
"""Representation of a LCN scene."""
def __init__(self, config, address_connection):
"""Initialize the LCN scene."""
super().__init__(config, address_connection)
self.register_id = config[CONF_REGISTER]
self.scene_id = config[CONF_SCENE]
self.output_ports = []
self.relay_ports = []
for port in config[CONF_OUTPUTS]:
if port in OUTPUT_PORTS:
self.output_ports.append(pypck.lcn_defs.OutputPort[port])
else: # in RELEAY_PORTS
self.relay_ports.append(pypck.lcn_defs.RelayPort[port])
if config[CONF_TRANSITION] is None:
self.transition = None
else:
self.transition = pypck.lcn_defs.time_to_ramp_value(config[CONF_TRANSITION])
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
async def async_activate(self, **kwargs: Any) -> None:
"""Activate scene."""
self.address_connection.activate_scene(
self.register_id,
self.scene_id,
self.output_ports,
self.relay_ports,
self.transition,
)
|
import os
import re
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hostname(host):
assert re.search(r'instance-[12]', host.check_output('hostname -s'))
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
filename = '/etc/molecule/{}'.format(host.check_output('hostname -s'))
f = host.file(filename)
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
import ast
import sys
class MockChecker(ast.NodeVisitor):
def __init__(self):
self.errors = 0
self.init_module_imports()
def init_module_imports(self):
self.imported_patch = False
self.imported_mock = False
def check_files(self, files):
for file in files:
self.check_file(file)
def check_file(self, filename):
self.current_filename = filename
try:
with open(filename, "r") as fd:
try:
file_ast = ast.parse(fd.read())
except SyntaxError as error:
print("SyntaxError on file %s:%d" % (filename, error.lineno))
return
except IOError:
print("Error opening filename: %s" % filename)
return
self.init_module_imports()
self.visit(file_ast)
def _call_uses_patch(self, node):
try:
return node.func.id == "patch"
except AttributeError:
return False
def _call_uses_mock_patch(self, node):
try:
return node.func.value.id == "mock" and node.func.attr == "patch"
except AttributeError:
return False
def visit_Import(self, node):
if [name for name in node.names if "mock" == name.name]:
self.imported_mock = True
def visit_ImportFrom(self, node):
if node.module == "mock" and (
name for name in node.names if "patch" == name.name
):
self.imported_patch = True
def visit_Call(self, node):
try:
if (self.imported_patch and self._call_uses_patch(node)) or (
self.imported_mock and self._call_uses_mock_patch(node)
):
if not any(
[keyword for keyword in node.keywords if keyword.arg == "autospec"]
):
print(
"%s:%d: Found a mock without an autospec!"
% (self.current_filename, node.lineno)
)
self.errors += 1
except AttributeError:
pass
self.generic_visit(node)
def main(filenames):
checker = MockChecker()
checker.check_files(filenames)
if checker.errors == 0:
sys.exit(0)
else:
print("You probably meant to specify 'autospec=True' in these tests.")
print("If you really don't want to, specify 'autospec=None'")
sys.exit(1)
if __name__ == "__main__":
main(sys.argv[1:])
|
import logging
from pymystrom.exceptions import MyStromConnectionError
from pymystrom.switch import MyStromSwitch as _MyStromSwitch
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = "myStrom Switch"
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the myStrom switch/plug integration."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
try:
plug = _MyStromSwitch(host)
await plug.get_state()
except MyStromConnectionError as err:
_LOGGER.error("No route to myStrom plug: %s", host)
raise PlatformNotReady() from err
async_add_entities([MyStromSwitch(plug, name)])
class MyStromSwitch(SwitchEntity):
"""Representation of a myStrom switch/plug."""
def __init__(self, plug, name):
"""Initialize the myStrom switch/plug."""
self._name = name
self.plug = plug
self._available = True
self.relay = None
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return bool(self.relay)
@property
def unique_id(self):
"""Return a unique ID."""
return self.plug._mac # pylint: disable=protected-access
@property
def current_power_w(self):
"""Return the current power consumption in W."""
return self.plug.consumption
@property
def available(self):
"""Could the device be accessed during the last update call."""
return self._available
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
try:
await self.plug.turn_on()
except MyStromConnectionError:
_LOGGER.error("No route to myStrom plug")
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
try:
await self.plug.turn_off()
except MyStromConnectionError:
_LOGGER.error("No route to myStrom plug")
async def async_update(self):
"""Get the latest data from the device and update the data."""
try:
await self.plug.get_state()
self.relay = self.plug.relay
self._available = True
except MyStromConnectionError:
self._available = False
_LOGGER.error("No route to myStrom plug")
|
import socket
from contextlib import closing
from time import sleep
import pytest
import kombu
class BasicFunctionality:
def test_connect(self, connection):
assert connection.connect()
assert connection.connection
connection.close()
assert connection.connection is None
assert connection.connect()
assert connection.connection
connection.close()
def test_failed_connect(self, invalid_connection):
# method raises transport exception
with pytest.raises(Exception):
invalid_connection.connect()
def test_failed_connection(self, invalid_connection):
# method raises transport exception
with pytest.raises(Exception):
invalid_connection.connection
def test_failed_channel(self, invalid_connection):
# method raises transport exception
with pytest.raises(Exception):
invalid_connection.channel()
def test_failed_default_channel(self, invalid_connection):
invalid_connection.transport_options = {'max_retries': 1}
# method raises transport exception
with pytest.raises(Exception):
invalid_connection.default_channel
def test_default_channel_autoconnect(self, connection):
connection.connect()
connection.close()
assert connection.connection is None
assert connection.default_channel
assert connection.connection
connection.close()
def test_channel(self, connection):
chan = connection.channel()
assert chan
assert connection.connection
def test_default_channel(self, connection):
chan = connection.default_channel
assert chan
assert connection.connection
def test_publish_consume(self, connection):
test_queue = kombu.Queue('test', routing_key='test')
def callback(body, message):
assert body == {'hello': 'world'}
assert message.content_type == 'application/x-python-serialize'
message.delivery_info['routing_key'] == 'test'
message.delivery_info['exchange'] == ''
message.ack()
assert message.payload == body
with connection as conn:
with conn.channel() as channel:
producer = kombu.Producer(channel)
producer.publish(
{'hello': 'world'},
retry=True,
exchange=test_queue.exchange,
routing_key=test_queue.routing_key,
declare=[test_queue],
serializer='pickle'
)
consumer = kombu.Consumer(
conn, [test_queue], accept=['pickle']
)
consumer.register_callback(callback)
with consumer:
conn.drain_events(timeout=1)
def test_consume_empty_queue(self, connection):
def callback(body, message):
assert False, 'Callback should not be called'
test_queue = kombu.Queue('test_empty', routing_key='test_empty')
with connection as conn:
with conn.channel():
consumer = kombu.Consumer(
conn, [test_queue], accept=['pickle']
)
consumer.register_callback(callback)
with consumer:
with pytest.raises(socket.timeout):
conn.drain_events(timeout=1)
def test_simple_queue_publish_consume(self, connection):
with connection as conn:
with closing(conn.SimpleQueue('simple_queue_test')) as queue:
queue.put({'Hello': 'World'}, headers={'k1': 'v1'})
message = queue.get(timeout=1)
assert message.payload == {'Hello': 'World'}
assert message.content_type == 'application/json'
assert message.content_encoding == 'utf-8'
assert message.headers == {'k1': 'v1'}
message.ack()
def test_simple_buffer_publish_consume(self, connection):
with connection as conn:
with closing(conn.SimpleBuffer('simple_buffer_test')) as buf:
buf.put({'Hello': 'World'}, headers={'k1': 'v1'})
message = buf.get(timeout=1)
assert message.payload == {'Hello': 'World'}
assert message.content_type == 'application/json'
assert message.content_encoding == 'utf-8'
assert message.headers == {'k1': 'v1'}
message.ack()
class BaseExchangeTypes:
def _callback(self, body, message):
message.ack()
assert body == {'hello': 'world'}
assert message.content_type == 'application/x-python-serialize'
message.delivery_info['routing_key'] == 'test'
message.delivery_info['exchange'] == ''
assert message.payload == body
def _consume(self, connection, queue):
consumer = kombu.Consumer(
connection, [queue], accept=['pickle']
)
consumer.register_callback(self._callback)
with consumer:
connection.drain_events(timeout=1)
def _publish(self, channel, exchange, queues, routing_key=None):
producer = kombu.Producer(channel, exchange=exchange)
if routing_key:
producer.publish(
{'hello': 'world'},
declare=list(queues),
serializer='pickle',
routing_key=routing_key
)
else:
producer.publish(
{'hello': 'world'},
declare=list(queues),
serializer='pickle'
)
def test_direct(self, connection):
ex = kombu.Exchange('test_direct', type='direct')
test_queue = kombu.Queue('direct1', exchange=ex)
with connection as conn:
with conn.channel() as channel:
self._publish(channel, ex, [test_queue])
self._consume(conn, test_queue)
def test_direct_routing_keys(self, connection):
ex = kombu.Exchange('test_rk_direct', type='direct')
test_queue1 = kombu.Queue('rk_direct1', exchange=ex, routing_key='d1')
test_queue2 = kombu.Queue('rk_direct2', exchange=ex, routing_key='d2')
with connection as conn:
with conn.channel() as channel:
self._publish(channel, ex, [test_queue1, test_queue2], 'd1')
self._consume(conn, test_queue1)
# direct2 queue should not have data
with pytest.raises(socket.timeout):
self._consume(conn, test_queue2)
def test_fanout(self, connection):
ex = kombu.Exchange('test_fanout', type='fanout')
test_queue1 = kombu.Queue('fanout1', exchange=ex)
test_queue2 = kombu.Queue('fanout2', exchange=ex)
with connection as conn:
with conn.channel() as channel:
self._publish(channel, ex, [test_queue1, test_queue2])
self._consume(conn, test_queue1)
self._consume(conn, test_queue2)
def test_topic(self, connection):
ex = kombu.Exchange('test_topic', type='topic')
test_queue1 = kombu.Queue('topic1', exchange=ex, routing_key='t.*')
test_queue2 = kombu.Queue('topic2', exchange=ex, routing_key='t.*')
test_queue3 = kombu.Queue('topic3', exchange=ex, routing_key='t')
with connection as conn:
with conn.channel() as channel:
self._publish(
channel, ex, [test_queue1, test_queue2, test_queue3],
routing_key='t.1'
)
self._consume(conn, test_queue1)
self._consume(conn, test_queue2)
with pytest.raises(socket.timeout):
# topic3 queue should not have data
self._consume(conn, test_queue3)
class BaseTimeToLive:
def test_publish_consume(self, connection):
test_queue = kombu.Queue('ttl_test', routing_key='ttl_test')
def callback(body, message):
assert False, 'Callback should not be called'
with connection as conn:
with conn.channel() as channel:
producer = kombu.Producer(channel)
producer.publish(
{'hello': 'world'},
retry=True,
exchange=test_queue.exchange,
routing_key=test_queue.routing_key,
declare=[test_queue],
serializer='pickle',
expiration=2
)
consumer = kombu.Consumer(
conn, [test_queue], accept=['pickle']
)
consumer.register_callback(callback)
sleep(3)
with consumer:
with pytest.raises(socket.timeout):
conn.drain_events(timeout=1)
def test_simple_queue_publish_consume(self, connection):
with connection as conn:
with closing(conn.SimpleQueue('ttl_simple_queue_test')) as queue:
queue.put(
{'Hello': 'World'}, headers={'k1': 'v1'}, expiration=2
)
sleep(3)
with pytest.raises(queue.Empty):
queue.get(timeout=1)
def test_simple_buffer_publish_consume(self, connection):
with connection as conn:
with closing(conn.SimpleBuffer('ttl_simple_buffer_test')) as buf:
buf.put({'Hello': 'World'}, headers={'k1': 'v1'}, expiration=2)
sleep(3)
with pytest.raises(buf.Empty):
buf.get(timeout=1)
class BasePriority:
PRIORITY_ORDER = 'asc'
def test_publish_consume(self, connection):
# py-amqp transport has higher numbers higher priority
# redis transport has lower numbers higher priority
if self.PRIORITY_ORDER == 'asc':
prio_high = 6
prio_low = 3
else:
prio_high = 3
prio_low = 6
test_queue = kombu.Queue(
'priority_test', routing_key='priority_test', max_priority=10
)
received_messages = []
def callback(body, message):
received_messages.append(body)
message.ack()
with connection as conn:
with conn.channel() as channel:
producer = kombu.Producer(channel)
for msg, prio in [
[{'msg': 'first'}, prio_low],
[{'msg': 'second'}, prio_high],
[{'msg': 'third'}, prio_low],
]:
producer.publish(
msg,
retry=True,
exchange=test_queue.exchange,
routing_key=test_queue.routing_key,
declare=[test_queue],
serializer='pickle',
priority=prio
)
# Sleep to make sure that queue sorted based on priority
sleep(0.5)
consumer = kombu.Consumer(
conn, [test_queue], accept=['pickle']
)
consumer.register_callback(callback)
with consumer:
conn.drain_events(timeout=1)
# Second message must be received first
assert received_messages[0] == {'msg': 'second'}
assert received_messages[1] == {'msg': 'first'}
assert received_messages[2] == {'msg': 'third'}
def test_simple_queue_publish_consume(self, connection):
if self.PRIORITY_ORDER == 'asc':
prio_high = 7
prio_low = 1
else:
prio_high = 1
prio_low = 7
with connection as conn:
with closing(
conn.SimpleQueue(
'priority_simple_queue_test',
queue_opts={'max_priority': 10}
)
) as queue:
for msg, prio in [
[{'msg': 'first'}, prio_low],
[{'msg': 'second'}, prio_high],
[{'msg': 'third'}, prio_low],
]:
queue.put(
msg, headers={'k1': 'v1'}, priority=prio
)
# Sleep to make sure that queue sorted based on priority
sleep(0.5)
# Second message must be received first
for data in [
{'msg': 'second'}, {'msg': 'first'}, {'msg': 'third'},
]:
msg = queue.get(timeout=1)
msg.ack()
assert msg.payload == data
def test_simple_buffer_publish_consume(self, connection):
if self.PRIORITY_ORDER == 'asc':
prio_high = 6
prio_low = 2
else:
prio_high = 2
prio_low = 6
with connection as conn:
with closing(
conn.SimpleBuffer(
'priority_simple_buffer_test',
queue_opts={'max_priority': 10}
)
) as buf:
for msg, prio in [
[{'msg': 'first'}, prio_low],
[{'msg': 'second'}, prio_high],
[{'msg': 'third'}, prio_low],
]:
buf.put(
msg, headers={'k1': 'v1'}, priority=prio
)
# Sleep to make sure that queue sorted based on priority
sleep(0.5)
# Second message must be received first
for data in [
{'msg': 'second'}, {'msg': 'first'}, {'msg': 'third'},
]:
msg = buf.get(timeout=1)
msg.ack()
assert msg.payload == data
class BaseFailover(BasicFunctionality):
def test_connect(self, failover_connection):
super().test_connect(failover_connection)
def test_publish_consume(self, failover_connection):
super().test_publish_consume(failover_connection)
def test_consume_empty_queue(self, failover_connection):
super().test_consume_empty_queue(failover_connection)
def test_simple_buffer_publish_consume(self, failover_connection):
super().test_simple_buffer_publish_consume(
failover_connection
)
|
import voluptuous as vol
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.const import (
CONF_TURNED_OFF,
CONF_TURNED_ON,
)
from homeassistant.components.homeassistant.triggers import state as state_trigger
from homeassistant.const import ATTR_DEVICE_CLASS, CONF_ENTITY_ID, CONF_FOR, CONF_TYPE
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity_registry import async_entries_for_device
from . import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_BATTERY_CHARGING,
DEVICE_CLASS_COLD,
DEVICE_CLASS_CONNECTIVITY,
DEVICE_CLASS_DOOR,
DEVICE_CLASS_GARAGE_DOOR,
DEVICE_CLASS_GAS,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_MOVING,
DEVICE_CLASS_OCCUPANCY,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PLUG,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PRESENCE,
DEVICE_CLASS_PROBLEM,
DEVICE_CLASS_SAFETY,
DEVICE_CLASS_SMOKE,
DEVICE_CLASS_SOUND,
DEVICE_CLASS_VIBRATION,
DEVICE_CLASS_WINDOW,
DOMAIN,
)
# mypy: allow-untyped-defs, no-check-untyped-defs
DEVICE_CLASS_NONE = "none"
CONF_BAT_LOW = "bat_low"
CONF_NOT_BAT_LOW = "not_bat_low"
CONF_CHARGING = "charging"
CONF_NOT_CHARGING = "not_charging"
CONF_COLD = "cold"
CONF_NOT_COLD = "not_cold"
CONF_CONNECTED = "connected"
CONF_NOT_CONNECTED = "not_connected"
CONF_GAS = "gas"
CONF_NO_GAS = "no_gas"
CONF_HOT = "hot"
CONF_NOT_HOT = "not_hot"
CONF_LIGHT = "light"
CONF_NO_LIGHT = "no_light"
CONF_LOCKED = "locked"
CONF_NOT_LOCKED = "not_locked"
CONF_MOIST = "moist"
CONF_NOT_MOIST = "not_moist"
CONF_MOTION = "motion"
CONF_NO_MOTION = "no_motion"
CONF_MOVING = "moving"
CONF_NOT_MOVING = "not_moving"
CONF_OCCUPIED = "occupied"
CONF_NOT_OCCUPIED = "not_occupied"
CONF_PLUGGED_IN = "plugged_in"
CONF_NOT_PLUGGED_IN = "not_plugged_in"
CONF_POWERED = "powered"
CONF_NOT_POWERED = "not_powered"
CONF_PRESENT = "present"
CONF_NOT_PRESENT = "not_present"
CONF_PROBLEM = "problem"
CONF_NO_PROBLEM = "no_problem"
CONF_UNSAFE = "unsafe"
CONF_NOT_UNSAFE = "not_unsafe"
CONF_SMOKE = "smoke"
CONF_NO_SMOKE = "no_smoke"
CONF_SOUND = "sound"
CONF_NO_SOUND = "no_sound"
CONF_VIBRATION = "vibration"
CONF_NO_VIBRATION = "no_vibration"
CONF_OPENED = "opened"
CONF_NOT_OPENED = "not_opened"
TURNED_ON = [
CONF_BAT_LOW,
CONF_COLD,
CONF_CONNECTED,
CONF_GAS,
CONF_HOT,
CONF_LIGHT,
CONF_NOT_LOCKED,
CONF_MOIST,
CONF_MOTION,
CONF_MOVING,
CONF_OCCUPIED,
CONF_OPENED,
CONF_PLUGGED_IN,
CONF_POWERED,
CONF_PRESENT,
CONF_PROBLEM,
CONF_SMOKE,
CONF_SOUND,
CONF_UNSAFE,
CONF_VIBRATION,
CONF_TURNED_ON,
]
TURNED_OFF = [
CONF_NOT_BAT_LOW,
CONF_NOT_COLD,
CONF_NOT_CONNECTED,
CONF_NOT_HOT,
CONF_LOCKED,
CONF_NOT_MOIST,
CONF_NOT_MOVING,
CONF_NOT_OCCUPIED,
CONF_NOT_OPENED,
CONF_NOT_PLUGGED_IN,
CONF_NOT_POWERED,
CONF_NOT_PRESENT,
CONF_NOT_UNSAFE,
CONF_NO_GAS,
CONF_NO_LIGHT,
CONF_NO_MOTION,
CONF_NO_PROBLEM,
CONF_NO_SMOKE,
CONF_NO_SOUND,
CONF_NO_VIBRATION,
CONF_TURNED_OFF,
]
ENTITY_TRIGGERS = {
DEVICE_CLASS_BATTERY: [{CONF_TYPE: CONF_BAT_LOW}, {CONF_TYPE: CONF_NOT_BAT_LOW}],
DEVICE_CLASS_BATTERY_CHARGING: [
{CONF_TYPE: CONF_CHARGING},
{CONF_TYPE: CONF_NOT_CHARGING},
],
DEVICE_CLASS_COLD: [{CONF_TYPE: CONF_COLD}, {CONF_TYPE: CONF_NOT_COLD}],
DEVICE_CLASS_CONNECTIVITY: [
{CONF_TYPE: CONF_CONNECTED},
{CONF_TYPE: CONF_NOT_CONNECTED},
],
DEVICE_CLASS_DOOR: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_GARAGE_DOOR: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_GAS: [{CONF_TYPE: CONF_GAS}, {CONF_TYPE: CONF_NO_GAS}],
DEVICE_CLASS_HEAT: [{CONF_TYPE: CONF_HOT}, {CONF_TYPE: CONF_NOT_HOT}],
DEVICE_CLASS_LIGHT: [{CONF_TYPE: CONF_LIGHT}, {CONF_TYPE: CONF_NO_LIGHT}],
DEVICE_CLASS_LOCK: [{CONF_TYPE: CONF_LOCKED}, {CONF_TYPE: CONF_NOT_LOCKED}],
DEVICE_CLASS_MOISTURE: [{CONF_TYPE: CONF_MOIST}, {CONF_TYPE: CONF_NOT_MOIST}],
DEVICE_CLASS_MOTION: [{CONF_TYPE: CONF_MOTION}, {CONF_TYPE: CONF_NO_MOTION}],
DEVICE_CLASS_MOVING: [{CONF_TYPE: CONF_MOVING}, {CONF_TYPE: CONF_NOT_MOVING}],
DEVICE_CLASS_OCCUPANCY: [
{CONF_TYPE: CONF_OCCUPIED},
{CONF_TYPE: CONF_NOT_OCCUPIED},
],
DEVICE_CLASS_OPENING: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_PLUG: [{CONF_TYPE: CONF_PLUGGED_IN}, {CONF_TYPE: CONF_NOT_PLUGGED_IN}],
DEVICE_CLASS_POWER: [{CONF_TYPE: CONF_POWERED}, {CONF_TYPE: CONF_NOT_POWERED}],
DEVICE_CLASS_PRESENCE: [{CONF_TYPE: CONF_PRESENT}, {CONF_TYPE: CONF_NOT_PRESENT}],
DEVICE_CLASS_PROBLEM: [{CONF_TYPE: CONF_PROBLEM}, {CONF_TYPE: CONF_NO_PROBLEM}],
DEVICE_CLASS_SAFETY: [{CONF_TYPE: CONF_UNSAFE}, {CONF_TYPE: CONF_NOT_UNSAFE}],
DEVICE_CLASS_SMOKE: [{CONF_TYPE: CONF_SMOKE}, {CONF_TYPE: CONF_NO_SMOKE}],
DEVICE_CLASS_SOUND: [{CONF_TYPE: CONF_SOUND}, {CONF_TYPE: CONF_NO_SOUND}],
DEVICE_CLASS_VIBRATION: [
{CONF_TYPE: CONF_VIBRATION},
{CONF_TYPE: CONF_NO_VIBRATION},
],
DEVICE_CLASS_WINDOW: [{CONF_TYPE: CONF_OPENED}, {CONF_TYPE: CONF_NOT_OPENED}],
DEVICE_CLASS_NONE: [{CONF_TYPE: CONF_TURNED_ON}, {CONF_TYPE: CONF_TURNED_OFF}],
}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TYPE): vol.In(TURNED_OFF + TURNED_ON),
vol.Optional(CONF_FOR): cv.positive_time_period_dict,
}
)
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
trigger_type = config[CONF_TYPE]
if trigger_type in TURNED_ON:
from_state = "off"
to_state = "on"
else:
from_state = "on"
to_state = "off"
state_config = {
state_trigger.CONF_PLATFORM: "state",
state_trigger.CONF_ENTITY_ID: config[CONF_ENTITY_ID],
state_trigger.CONF_FROM: from_state,
state_trigger.CONF_TO: to_state,
}
if CONF_FOR in config:
state_config[CONF_FOR] = config[CONF_FOR]
state_config = state_trigger.TRIGGER_SCHEMA(state_config)
return await state_trigger.async_attach_trigger(
hass, state_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers."""
triggers = []
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entries = [
entry
for entry in async_entries_for_device(entity_registry, device_id)
if entry.domain == DOMAIN
]
for entry in entries:
device_class = DEVICE_CLASS_NONE
state = hass.states.get(entry.entity_id)
if state:
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
templates = ENTITY_TRIGGERS.get(
device_class, ENTITY_TRIGGERS[DEVICE_CLASS_NONE]
)
triggers.extend(
{
**automation,
"platform": "device",
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
}
for automation in templates
)
return triggers
async def async_get_trigger_capabilities(hass, config):
"""List trigger capabilities."""
return {
"extra_fields": vol.Schema(
{vol.Optional(CONF_FOR): cv.positive_time_period_dict}
)
}
|
import logging
import requests
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_COMFORT,
PRESET_ECO,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_WHOLE, TEMP_CELSIUS
from . import (
DOMAIN as VICARE_DOMAIN,
PYVICARE_ERROR,
VICARE_API,
VICARE_HEATING_TYPE,
VICARE_NAME,
HeatingType,
)
_LOGGER = logging.getLogger(__name__)
VICARE_MODE_DHW = "dhw"
VICARE_MODE_DHWANDHEATING = "dhwAndHeating"
VICARE_MODE_DHWANDHEATINGCOOLING = "dhwAndHeatingCooling"
VICARE_MODE_FORCEDREDUCED = "forcedReduced"
VICARE_MODE_FORCEDNORMAL = "forcedNormal"
VICARE_MODE_OFF = "standby"
VICARE_PROGRAM_ACTIVE = "active"
VICARE_PROGRAM_COMFORT = "comfort"
VICARE_PROGRAM_ECO = "eco"
VICARE_PROGRAM_EXTERNAL = "external"
VICARE_PROGRAM_HOLIDAY = "holiday"
VICARE_PROGRAM_NORMAL = "normal"
VICARE_PROGRAM_REDUCED = "reduced"
VICARE_PROGRAM_STANDBY = "standby"
VICARE_HOLD_MODE_AWAY = "away"
VICARE_HOLD_MODE_HOME = "home"
VICARE_HOLD_MODE_OFF = "off"
VICARE_TEMP_HEATING_MIN = 3
VICARE_TEMP_HEATING_MAX = 37
SUPPORT_FLAGS_HEATING = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
VICARE_TO_HA_HVAC_HEATING = {
VICARE_MODE_DHW: HVAC_MODE_OFF,
VICARE_MODE_DHWANDHEATING: HVAC_MODE_AUTO,
VICARE_MODE_DHWANDHEATINGCOOLING: HVAC_MODE_AUTO,
VICARE_MODE_FORCEDREDUCED: HVAC_MODE_OFF,
VICARE_MODE_FORCEDNORMAL: HVAC_MODE_HEAT,
VICARE_MODE_OFF: HVAC_MODE_OFF,
}
HA_TO_VICARE_HVAC_HEATING = {
HVAC_MODE_HEAT: VICARE_MODE_FORCEDNORMAL,
HVAC_MODE_OFF: VICARE_MODE_FORCEDREDUCED,
HVAC_MODE_AUTO: VICARE_MODE_DHWANDHEATING,
}
VICARE_TO_HA_PRESET_HEATING = {
VICARE_PROGRAM_COMFORT: PRESET_COMFORT,
VICARE_PROGRAM_ECO: PRESET_ECO,
}
HA_TO_VICARE_PRESET_HEATING = {
PRESET_COMFORT: VICARE_PROGRAM_COMFORT,
PRESET_ECO: VICARE_PROGRAM_ECO,
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the ViCare climate devices."""
if discovery_info is None:
return
vicare_api = hass.data[VICARE_DOMAIN][VICARE_API]
heating_type = hass.data[VICARE_DOMAIN][VICARE_HEATING_TYPE]
add_entities(
[
ViCareClimate(
f"{hass.data[VICARE_DOMAIN][VICARE_NAME]} Heating",
vicare_api,
heating_type,
)
]
)
class ViCareClimate(ClimateEntity):
"""Representation of the ViCare heating climate device."""
def __init__(self, name, api, heating_type):
"""Initialize the climate device."""
self._name = name
self._state = None
self._api = api
self._attributes = {}
self._target_temperature = None
self._current_mode = None
self._current_temperature = None
self._current_program = None
self._heating_type = heating_type
self._current_action = None
def update(self):
"""Let HA know there has been an update from the ViCare API."""
try:
_room_temperature = self._api.getRoomTemperature()
_supply_temperature = self._api.getSupplyTemperature()
if _room_temperature is not None and _room_temperature != PYVICARE_ERROR:
self._current_temperature = _room_temperature
elif _supply_temperature != PYVICARE_ERROR:
self._current_temperature = _supply_temperature
else:
self._current_temperature = None
self._current_program = self._api.getActiveProgram()
# The getCurrentDesiredTemperature call can yield 'error' (str) when the system is in standby
desired_temperature = self._api.getCurrentDesiredTemperature()
if desired_temperature == PYVICARE_ERROR:
desired_temperature = None
self._target_temperature = desired_temperature
self._current_mode = self._api.getActiveMode()
# Update the generic device attributes
self._attributes = {}
self._attributes["room_temperature"] = _room_temperature
self._attributes["active_vicare_program"] = self._current_program
self._attributes["active_vicare_mode"] = self._current_mode
self._attributes["heating_curve_slope"] = self._api.getHeatingCurveSlope()
self._attributes["heating_curve_shift"] = self._api.getHeatingCurveShift()
self._attributes[
"month_since_last_service"
] = self._api.getMonthSinceLastService()
self._attributes["date_last_service"] = self._api.getLastServiceDate()
self._attributes["error_history"] = self._api.getErrorHistory()
self._attributes["active_error"] = self._api.getActiveError()
# Update the specific device attributes
if self._heating_type == HeatingType.gas:
self._current_action = self._api.getBurnerActive()
elif self._heating_type == HeatingType.heatpump:
self._current_action = self._api.getCompressorActive()
except requests.exceptions.ConnectionError:
_LOGGER.error("Unable to retrieve data from ViCare server")
except ValueError:
_LOGGER.error("Unable to decode data from ViCare server")
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS_HEATING
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def hvac_mode(self):
"""Return current hvac mode."""
return VICARE_TO_HA_HVAC_HEATING.get(self._current_mode)
def set_hvac_mode(self, hvac_mode):
"""Set a new hvac mode on the ViCare API."""
vicare_mode = HA_TO_VICARE_HVAC_HEATING.get(hvac_mode)
if vicare_mode is None:
_LOGGER.error(
"Cannot set invalid vicare mode: %s / %s", hvac_mode, vicare_mode
)
return
_LOGGER.debug("Setting hvac mode to %s / %s", hvac_mode, vicare_mode)
self._api.setMode(vicare_mode)
@property
def hvac_modes(self):
"""Return the list of available hvac modes."""
return list(HA_TO_VICARE_HVAC_HEATING)
@property
def hvac_action(self):
"""Return the current hvac action."""
if self._current_action:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def min_temp(self):
"""Return the minimum temperature."""
return VICARE_TEMP_HEATING_MIN
@property
def max_temp(self):
"""Return the maximum temperature."""
return VICARE_TEMP_HEATING_MAX
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_WHOLE
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is not None:
self._api.setProgramTemperature(self._current_program, temp)
self._target_temperature = temp
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
return VICARE_TO_HA_PRESET_HEATING.get(self._current_program)
@property
def preset_modes(self):
"""Return the available preset mode."""
return list(VICARE_TO_HA_PRESET_HEATING)
def set_preset_mode(self, preset_mode):
"""Set new preset mode and deactivate any existing programs."""
vicare_program = HA_TO_VICARE_PRESET_HEATING.get(preset_mode)
if vicare_program is None:
_LOGGER.error(
"Cannot set invalid vicare program: %s / %s",
preset_mode,
vicare_program,
)
return
_LOGGER.debug("Setting preset to %s / %s", preset_mode, vicare_program)
self._api.deactivateProgram(self._current_program)
self._api.activateProgram(vicare_program)
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return self._attributes
|
from homeassistant.components.media_player.const import (
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_MUSIC,
MEDIA_TYPE_MUSIC,
)
from homeassistant.components.media_source import const, models
async def test_browse_media_as_dict():
"""Test BrowseMediaSource conversion to media player item dict."""
base = models.BrowseMediaSource(
domain=const.DOMAIN,
identifier="media",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="folder",
title="media/",
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_MUSIC,
)
base.children = [
models.BrowseMediaSource(
domain=const.DOMAIN,
identifier="media/test.mp3",
media_class=MEDIA_CLASS_MUSIC,
media_content_type=MEDIA_TYPE_MUSIC,
title="test.mp3",
can_play=True,
can_expand=False,
)
]
item = base.as_dict()
assert item["title"] == "media/"
assert item["media_class"] == MEDIA_CLASS_DIRECTORY
assert item["media_content_type"] == "folder"
assert item["media_content_id"] == f"{const.URI_SCHEME}{const.DOMAIN}/media"
assert not item["can_play"]
assert item["can_expand"]
assert item["children_media_class"] == MEDIA_CLASS_MUSIC
assert len(item["children"]) == 1
assert item["children"][0]["title"] == "test.mp3"
assert item["children"][0]["media_class"] == MEDIA_CLASS_MUSIC
async def test_browse_media_parent_no_children():
"""Test BrowseMediaSource conversion to media player item dict."""
base = models.BrowseMediaSource(
domain=const.DOMAIN,
identifier="media",
media_class=MEDIA_CLASS_DIRECTORY,
media_content_type="folder",
title="media/",
can_play=False,
can_expand=True,
)
item = base.as_dict()
assert item["title"] == "media/"
assert item["media_class"] == MEDIA_CLASS_DIRECTORY
assert item["media_content_type"] == "folder"
assert item["media_content_id"] == f"{const.URI_SCHEME}{const.DOMAIN}/media"
assert not item["can_play"]
assert item["can_expand"]
assert len(item["children"]) == 0
assert item["children_media_class"] is None
async def test_media_source_default_name():
"""Test MediaSource uses domain as default name."""
source = models.MediaSource(const.DOMAIN)
assert source.name == const.DOMAIN
|
import configparser
import os
import yaml
import pytest
from yandextank.config_converter.converter import convert_ini, parse_package_name, parse_sections, combine_sections, \
convert_single_option, OptionsConflict
from yandextank.core.tankworker import load_cfg, load_core_base_cfg, cfg_folder_loader
from yandextank.validator.validator import TankConfig
@pytest.mark.parametrize('ini_file, expected', [
('test_config1.ini',
{'phantom': 'Phantom', 'telegraf': 'Telegraf', 'meta': 'DataUploader'}),
('test_config2.ini',
{'phantom': 'Phantom', 'telegraf': 'Telegraf', 'phantom-1': 'Phantom',
'meta': 'DataUploader', 'autostop': 'Autostop'}),
])
def test_parse_sections(ini_file, expected):
cfg_ini = configparser.RawConfigParser()
cfg_ini.read(os.path.join(os.path.dirname(__file__), ini_file))
assert {section.name: section.plugin for section in parse_sections(cfg_ini)} == expected
@pytest.mark.parametrize('ini_file, expected', [
(
'test_config2.ini',
{
'meta': {
'ignore_target_lock': True,
'task': 'MAPSJAMS-1946',
'api_address': 'https://lunapark.yandex-team.ru/'},
'phantom': {
'load_profile': {'load_type': 'rps', 'schedule': 'line(1,6000,20m)'},
'autocases': 0,
'multi': [
{'ammofile': '/var/bmpt-data/goods/ligreen/projects/regress/analyser-usershandler/get-segmentshandler.ammo',
'instances': 10,
'load_profile': {'load_type': 'rps', 'schedule': 'const(0.2,20m)'},
'autocases': 1,
'address': 'foo.example.org'}],
'instances': 10000,
'address': 'foo.example.net',
'port': '80'},
'telegraf': {'config': 'monitoring.xml'},
'autostop': {'autostop': [
'quantile(50,20,30s)',
'http(4xx,50%,5)',
'http(5xx,5%,4)',
'net(1xx,10,5)',
'net(43,10,5)',
'metric_higher(foo.example.net,group1_usershandler-average-task-age,3,70)'
]
}
})])
def test_combine_sections(ini_file, expected):
cfg_ini = configparser.RawConfigParser()
cfg_ini.read(os.path.join(os.path.dirname(__file__), ini_file))
assert {section.name: section.merged_options for section in combine_sections(parse_sections(cfg_ini))} == expected
@pytest.mark.parametrize('package_path, expected', [
('Tank/Plugins/Aggregator.py', 'Aggregator'),
('Tank/Plugins/Overload.py', 'DataUploader'),
('yandextank.plugins.Overload', 'DataUploader'),
('yatank_internal.plugins.DataUploader', 'DataUploader'),
('yandextank.plugins.Console', 'Console')
])
def test_parse_package(package_path, expected):
assert parse_package_name(package_path) == expected
# TODO: get test configs list automatically
@pytest.mark.parametrize('ini_file, yaml_file', [
('test_config1.ini', 'test_config1.yaml'),
('test_config2.ini', 'test_config2.yaml'),
('test_config5.ini', 'test_config5.yaml'),
('test_config5.1.ini', 'test_config5.1.yaml'),
('test_config7.ini', 'test_config7.yaml'),
('test_config8.ini', 'test_config8.yaml'),
('test_config9.ini', 'test_config9.yaml'),
('test_config10.ini', 'test_config10.yaml'),
('test_config11.ini', 'test_config11.yaml'),
('test_config12.ini', 'test_config12.yaml'),
('test_config13.ini', 'test_config13.yaml'),
])
def test_convert_ini_phantom(ini_file, yaml_file):
with open(os.path.join(os.path.dirname(__file__), yaml_file), 'r') as f:
assert convert_ini(os.path.join(os.path.dirname(__file__), ini_file)) == yaml.load(f, Loader=yaml.FullLoader)
@pytest.mark.parametrize('ini_file, msgs', [
('test_config2.1.ini', ['stpd_file', 'rps_schedule'])
])
def test_conflict_opts(ini_file, msgs):
with pytest.raises(OptionsConflict) as e:
convert_ini(os.path.join(os.path.dirname(__file__), ini_file))
assert all([msg in e.value.message for msg in msgs])
@pytest.mark.parametrize('ini_file', [
'test_config1.ini',
'test_config2.ini',
'test_config5.ini',
'test_config7.ini',
'test_config10.yaml',
'test_config11.yaml',
'test_config12.ini',
])
def test_validate(ini_file):
# noinspection PyStatementEffect
TankConfig([load_core_base_cfg()]
+ cfg_folder_loader(os.path.join(os.path.dirname(__file__), 'etc_cfg'))
+ [load_cfg(os.path.join(os.path.dirname(__file__), ini_file))]).validated
@pytest.mark.parametrize('key, value, expected', [
('phantom.uris', '/',
{'phantom': {'package': 'yandextank.plugins.Phantom', 'uris': ['/']}}),
('tank.plugin_uploader', 'yandextank.plugins.DataUploader',
{'uploader': {'enabled': True, 'package': 'yandextank.plugins.DataUploader'}}),
('phantom.rps_schedule', 'line(1,10)',
{'phantom': {
'load_profile': {'load_type': 'rps', 'schedule': 'line(1,10)'},
'package': 'yandextank.plugins.Phantom'}}),
('bfg.gun_config.module_name', 'bayan_load',
{'bfg': {'package': 'yandextank.plugins.Bfg', 'gun_config': {'module_name': 'bayan_load'}}})
])
def test_convert_single_option(key, value, expected):
assert convert_single_option(key, value) == expected
|
from homeassistant.components.humidifier import HumidifierEntity
from homeassistant.components.humidifier.const import (
DEVICE_CLASS_DEHUMIDIFIER,
DEVICE_CLASS_HUMIDIFIER,
SUPPORT_MODES,
)
SUPPORT_FLAGS = 0
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Demo humidifier devices."""
async_add_entities(
[
DemoHumidifier(
name="Humidifier",
mode=None,
target_humidity=68,
device_class=DEVICE_CLASS_HUMIDIFIER,
),
DemoHumidifier(
name="Dehumidifier",
mode=None,
target_humidity=54,
device_class=DEVICE_CLASS_DEHUMIDIFIER,
),
DemoHumidifier(
name="Hygrostat",
mode="home",
available_modes=["home", "eco"],
target_humidity=50,
),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo humidifier devices config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoHumidifier(HumidifierEntity):
"""Representation of a demo humidifier device."""
def __init__(
self,
name,
mode,
target_humidity,
available_modes=None,
is_on=True,
device_class=None,
):
"""Initialize the humidifier device."""
self._name = name
self._state = is_on
self._support_flags = SUPPORT_FLAGS
if mode is not None:
self._support_flags = self._support_flags | SUPPORT_MODES
self._target_humidity = target_humidity
self._mode = mode
self._available_modes = available_modes
self._device_class = device_class
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support_flags
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def name(self):
"""Return the name of the humidity device."""
return self._name
@property
def target_humidity(self):
"""Return the humidity we try to reach."""
return self._target_humidity
@property
def mode(self):
"""Return current mode."""
return self._mode
@property
def available_modes(self):
"""Return available modes."""
return self._available_modes
@property
def is_on(self):
"""Return true if the humidifier is on."""
return self._state
@property
def device_class(self):
"""Return the device class of the humidifier."""
return self._device_class
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
self._state = False
self.async_write_ha_state()
async def async_set_humidity(self, humidity):
"""Set new humidity level."""
self._target_humidity = humidity
self.async_write_ha_state()
async def async_set_mode(self, mode):
"""Update mode."""
self._mode = mode
self.async_write_ha_state()
|
import os
def get_stash_dir():
"""
Returns the StaSh root directory, detected from this file.
:return: the StaSh root directory
:rtype: str
"""
return os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
def main():
"""
The main function.
"""
print("StaSh root directory: {}".format(get_stash_dir()))
if __name__ == "__main__":
main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import tempfile
from absl import flags
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from six.moves import range
FLAGS = flags.FLAGS
GIT_REPO = 'https://github.com/aerospike/act.git'
ACT_DIR = '%s/act' % linux_packages.INSTALL_DIR
flags.DEFINE_list('act_load', ['1.0'],
'Load multiplier for act test per device.')
flags.DEFINE_boolean('act_parallel', False,
'Run act tools in parallel. One copy per device.')
flags.DEFINE_integer('act_duration', 86400, 'Duration of act test in seconds.')
flags.DEFINE_integer('act_reserved_partitions', 0,
'Number of partitions reserved (not being used by act).')
flags.DEFINE_integer('act_num_queues', None,
'Total number of transaction queues. Default is number of'
' cores, detected by ACT at runtime.')
flags.DEFINE_integer('act_threads_per_queue', None, 'Number of threads per '
'transaction queue. Default is 4 threads/queue.')
# TODO(user): Support user provided config file.
ACT_CONFIG_TEMPLATE = """
device-names: {devices}
test-duration-sec: {duration}
read-reqs-per-sec: {read_iops}
write-reqs-per-sec: {write_iops}
"""
_READ_1X_1D = 2000
_WRITE_1X_1D = 1000
ACT_COMMIT = 'db9961ff7e0ad2691ddb41fb080561f6a1cdcdc9' # ACT 5.1
def _Install(vm):
"""Installs the act on the VM."""
vm.Install('build_tools')
vm.Install('openssl')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, ACT_DIR))
# In certain system, O_DSYNC resulting 10x slow down.
# Patching O_DSYNC to speed up salting process.
# https://github.com/aerospike/act/issues/39
vm.RemoteCommand(
'cd {0} && git checkout {1} && '
'sed -i "s/O_DSYNC |//" src/prep/act_prep.c && make'.format(
ACT_DIR, ACT_COMMIT))
def YumInstall(vm):
"""Installs act package on the VM."""
vm.InstallPackages('zlib-devel')
_Install(vm)
def AptInstall(vm):
"""Installs act package on the VM."""
vm.InstallPackages('zlib1g-dev')
_Install(vm)
def Uninstall(vm):
vm.RemoteCommand('rm -rf %s' % ACT_DIR)
def RunActPrep(vm):
"""Runs actprep binary to initialize the drive."""
def _RunActPrep(device):
vm.RobustRemoteCommand('cd {0} && sudo ./target/bin/act_prep {1}'.format(
ACT_DIR, device.GetDevicePath()))
assert len(vm.scratch_disks) > FLAGS.act_reserved_partitions, (
'More reserved partition than total partitions available.')
# Only salt partitions will be used.
vm_util.RunThreaded(
_RunActPrep, vm.scratch_disks[FLAGS.act_reserved_partitions:])
def PrepActConfig(vm, load, index=None):
"""Prepare act config file at remote VM."""
if index is None:
disk_lst = vm.scratch_disks
# Treat first few partitions as reserved.
disk_lst = disk_lst[FLAGS.act_reserved_partitions:]
config_file = 'actconfig_{0}.txt'.format(load)
else:
disk_lst = [vm.scratch_disks[index]]
config_file = 'actconfig_{0}_{1}.txt'.format(index, load)
devices = ','.join([d.GetDevicePath() for d in disk_lst])
num_disk = len(disk_lst)
# render template:
content = ACT_CONFIG_TEMPLATE.format(
devices=devices,
duration=FLAGS.act_duration,
read_iops=_CalculateReadIops(num_disk, load),
write_iops=_CalculateWriteIops(num_disk, load))
if FLAGS.act_num_queues:
content += 'num-queues: %d\n' % FLAGS.act_num_queues
if FLAGS.act_threads_per_queue:
content += 'threads-per-queue: %d\n' % FLAGS.act_threads_per_queue
logging.info('ACT config: %s', content)
with tempfile.NamedTemporaryFile(delete=False, mode='w+') as tf:
tf.write(content)
tf.close()
vm.PushDataFile(tf.name, config_file)
def RunAct(vm, load, index=None):
"""Runs act binary with provided config."""
if index is None:
config = 'actconfig_{0}.txt'.format(load)
output = 'output_{0}'.format(load)
act_config_metadata = {'device_index': 'all'}
else:
config = 'actconfig_{0}_{1}.txt'.format(index, load)
output = 'output_{0}_{1}'.format(index, load)
act_config_metadata = {'device_index': index}
# Push config file to remote VM.
vm.RobustRemoteCommand(
'cd {0} && sudo ./target/bin/act_storage ~/{1} > ~/{2}'.format(
ACT_DIR, config, output))
# Shows 1,2,4,8,..,64.
out, _ = vm.RemoteCommand(
'cd {0} ; ./analysis/act_latency.py -n 7 -e 1 -x -l ~/{1}; exit 0'.format(
ACT_DIR, output), ignore_failure=True)
samples = ParseRunAct(out)
last_output_block, _ = vm.RemoteCommand('tail -n 100 ~/{0}'.format(output))
# Early termination.
if 'drive(s) can\'t keep up - test stopped' in last_output_block:
act_config_metadata['ERROR'] = 'cannot keep up'
act_config_metadata.update(
GetActMetadata(
len(vm.scratch_disks) - FLAGS.act_reserved_partitions, load))
for s in samples:
s.metadata.update(act_config_metadata)
return samples
def ParseRunAct(out):
"""Parse act output.
Raw output format:
reads device-reads
%>(ms) %>(ms)
slice 1 2 4 rate 1 2 4 rate
----- ------ ------ ------ ---------- ------ ------ ------ ----------
1 0.00 0.00 0.00 6000.0 0.00 0.00 0.00 6000.0
2 0.00 0.00 0.00 6000.0 0.00 0.00 0.00 6000.0
3 0.01 0.00 0.00 6000.0 0.01 0.00 0.00 6000.0
----- ------ ------ ------ ---------- ------ ------ ------ ----------
avg 0.00 0.00 0.00 6000.0 0.00 0.00 0.00 6000.0
max 0.01 0.00 0.00 6000.0 0.01 0.00 0.00 6000.0
Args:
out: string. Output from act test.
Returns:
A list of sample.Sample objects.
"""
ret = []
if 'could not find 3600 seconds of data' in out:
ret.append(sample.Sample('Failed:NotEnoughSample', 0, '',
{}))
return ret
lines = out.split('\n')
buckets = []
for line in lines:
vals = line.split()
if not vals or '-' in vals[0]:
continue
if vals[0] == 'slice':
for v in vals[1:]:
buckets.append(v)
continue
if not buckets:
continue
matrix = ''
if vals[0] in ('avg', 'max'):
matrix = '_' + vals[0]
num_buckets = (len(vals) - 1) // 2
for i in range(num_buckets - 1):
assert buckets[i] == buckets[i + num_buckets]
ret.append(
sample.Sample('reads' + matrix, float(vals[i + 1]), '%>(ms)',
{'slice': vals[0],
'bucket': int(buckets[i])}))
ret.append(
sample.Sample('device_reads' + matrix,
float(vals[i + num_buckets + 1]), '%>(ms)',
{'slice': vals[0],
'bucket': int(buckets[i + num_buckets])}))
ret.append(
sample.Sample('read_rate' + matrix,
float(vals[num_buckets]), 'iops',
{'slice': vals[0]}))
ret.append(
sample.Sample('device_read_rate' + matrix,
float(vals[-1]), 'iops',
{'slice': vals[0]}))
return ret
def GetActMetadata(num_disk, load):
"""Returns metadata for act test."""
# TODO(user): Expose more stats and flags.
metadata = {
'act-version': '5.0',
'act-parallel': FLAGS.act_parallel,
'reserved_partition': FLAGS.act_reserved_partitions,
'device-count': num_disk,
'test-duration-sec': FLAGS.act_duration,
'report-interval-sec': 1,
'large-block-op-kbytes': 128,
'record-bytes': 1536,
'read-reqs-per-sec': _CalculateReadIops(num_disk, load),
'write-reqs-per-sec': _CalculateWriteIops(num_disk, load),
'microsecond-histograms': 'no',
'scheduler-mode': 'noop'}
metadata['num-queues'] = FLAGS.act_num_queues or 'default'
metadata['threads-per-queues'] = FLAGS.act_threads_per_queue or 'default'
return metadata
def _CalculateReadIops(num_disk, load_multiplier):
return int(_READ_1X_1D * num_disk * load_multiplier)
def _CalculateWriteIops(num_disk, load_multiplier):
return int(_WRITE_1X_1D * num_disk * load_multiplier)
def IsRunComplete(samples):
"""Decides if the run is able to complete (regardless of latency)."""
for s in samples:
if s.metric == 'Failed:NotEnoughSample':
return False
if 'ERROR' in s.metadata:
return False
return True
|
from kalliope.core.TTS.TTSModule import TTSModule, MissingTTSParameter
import logging
import sys
import subprocess
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Espeak(TTSModule):
def __init__(self, **kwargs):
super(Espeak, self).__init__(language="any", **kwargs)
# set parameter from what we receive from the settings
self.variant = kwargs.get('variant', None)
self.speed = str(kwargs.get('speed', '160'))
self.amplitude = str(kwargs.get('amplitude', '100'))
self.pitch = str(kwargs.get('pitch', '50'))
self.espeak_exec_path = kwargs.get('path', r'/usr/bin/espeak')
if self.voice == 'default' or self.voice is None:
raise MissingTTSParameter("voice parameter is required by the eSpeak TTS")
# if voice = default, don't add voice option to espeak
if self.variant is None:
self.voice_and_variant = self.voice
else:
self.voice_and_variant = self.voice + '+' + self.variant
def say(self, words):
"""
:param words: The sentence to say
"""
self.generate_and_play(words, self._generate_audio_file)
def _generate_audio_file(self):
"""
Generic method used as a Callback in TTSModule
- must provided the audio file and write it on the disk
.. raises:: FailToLoadSoundFile
"""
options = {
'v': '-v' + self.voice_and_variant,
's': '-s' + self.speed,
'a': '-a' + self.amplitude,
'p': '-p' + self.pitch,
'w': '-w' + self.file_path
}
final_command = [self.espeak_exec_path, options['v'], options['s'], options['a'],
options['p'], options['w'], self.words]
# generate the file with eSpeak
subprocess.call(final_command, stderr=sys.stderr)
|
import asyncio
from contextlib import closing
import logging
import aiohttp
import async_timeout
import requests
from requests.auth import HTTPBasicAuth, HTTPDigestAuth
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import (
CONF_AUTHENTICATION,
CONF_NAME,
CONF_PASSWORD,
CONF_USERNAME,
CONF_VERIFY_SSL,
HTTP_BASIC_AUTHENTICATION,
HTTP_DIGEST_AUTHENTICATION,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import (
async_aiohttp_proxy_web,
async_get_clientsession,
)
_LOGGER = logging.getLogger(__name__)
CONF_MJPEG_URL = "mjpeg_url"
CONF_STILL_IMAGE_URL = "still_image_url"
CONTENT_TYPE_HEADER = "Content-Type"
DEFAULT_NAME = "Mjpeg Camera"
DEFAULT_VERIFY_SSL = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MJPEG_URL): cv.url,
vol.Optional(CONF_STILL_IMAGE_URL): cv.url,
vol.Optional(CONF_AUTHENTICATION, default=HTTP_BASIC_AUTHENTICATION): vol.In(
[HTTP_BASIC_AUTHENTICATION, HTTP_DIGEST_AUTHENTICATION]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a MJPEG IP Camera."""
filter_urllib3_logging()
if discovery_info:
config = PLATFORM_SCHEMA(discovery_info)
async_add_entities([MjpegCamera(config)])
def filter_urllib3_logging():
"""Filter header errors from urllib3 due to a urllib3 bug."""
urllib3_logger = logging.getLogger("urllib3.connectionpool")
if not any(isinstance(x, NoHeaderErrorFilter) for x in urllib3_logger.filters):
urllib3_logger.addFilter(NoHeaderErrorFilter())
def extract_image_from_mjpeg(stream):
"""Take in a MJPEG stream object, return the jpg from it."""
data = b""
for chunk in stream:
data += chunk
jpg_end = data.find(b"\xff\xd9")
if jpg_end == -1:
continue
jpg_start = data.find(b"\xff\xd8")
if jpg_start == -1:
continue
return data[jpg_start : jpg_end + 2]
class MjpegCamera(Camera):
"""An implementation of an IP camera that is reachable over a URL."""
def __init__(self, device_info):
"""Initialize a MJPEG camera."""
super().__init__()
self._name = device_info.get(CONF_NAME)
self._authentication = device_info.get(CONF_AUTHENTICATION)
self._username = device_info.get(CONF_USERNAME)
self._password = device_info.get(CONF_PASSWORD)
self._mjpeg_url = device_info[CONF_MJPEG_URL]
self._still_image_url = device_info.get(CONF_STILL_IMAGE_URL)
self._auth = None
if self._username and self._password:
if self._authentication == HTTP_BASIC_AUTHENTICATION:
self._auth = aiohttp.BasicAuth(self._username, password=self._password)
self._verify_ssl = device_info.get(CONF_VERIFY_SSL)
async def async_camera_image(self):
"""Return a still image response from the camera."""
# DigestAuth is not supported
if (
self._authentication == HTTP_DIGEST_AUTHENTICATION
or self._still_image_url is None
):
image = await self.hass.async_add_executor_job(self.camera_image)
return image
websession = async_get_clientsession(self.hass, verify_ssl=self._verify_ssl)
try:
with async_timeout.timeout(10):
response = await websession.get(self._still_image_url, auth=self._auth)
image = await response.read()
return image
except asyncio.TimeoutError:
_LOGGER.error("Timeout getting camera image from %s", self._name)
except aiohttp.ClientError as err:
_LOGGER.error("Error getting new camera image from %s: %s", self._name, err)
def camera_image(self):
"""Return a still image response from the camera."""
if self._username and self._password:
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
auth = HTTPDigestAuth(self._username, self._password)
else:
auth = HTTPBasicAuth(self._username, self._password)
req = requests.get(
self._mjpeg_url,
auth=auth,
stream=True,
timeout=10,
verify=self._verify_ssl,
)
else:
req = requests.get(self._mjpeg_url, stream=True, timeout=10)
# https://github.com/PyCQA/pylint/issues/1437
# pylint: disable=no-member
with closing(req) as response:
return extract_image_from_mjpeg(response.iter_content(102400))
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
# aiohttp don't support DigestAuth -> Fallback
if self._authentication == HTTP_DIGEST_AUTHENTICATION:
return await super().handle_async_mjpeg_stream(request)
# connect to stream
websession = async_get_clientsession(self.hass, verify_ssl=self._verify_ssl)
stream_coro = websession.get(self._mjpeg_url, auth=self._auth)
return await async_aiohttp_proxy_web(self.hass, request, stream_coro)
@property
def name(self):
"""Return the name of this camera."""
return self._name
class NoHeaderErrorFilter(logging.Filter):
"""Filter out urllib3 Header Parsing Errors due to a urllib3 bug."""
def filter(self, record):
"""Filter out Header Parsing Errors."""
return "Failed to parse headers" not in record.getMessage()
|
import datetime
import re
from env_canada import ECData # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME, TEMP_CELSIUS
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt
CONF_FORECAST = "forecast"
CONF_ATTRIBUTION = "Data provided by Environment Canada"
CONF_STATION = "station"
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
if not re.fullmatch(r"[A-Z]{2}/s0000\d{3}", station):
raise vol.error.Invalid('Station ID must be of the form "XX/s0000###"')
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_STATION): validate_station,
vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude,
vol.Optional(CONF_FORECAST, default="daily"): vol.In(["daily", "hourly"]),
}
)
# Icon codes from http://dd.weatheroffice.ec.gc.ca/citypage_weather/
# docs/current_conditions_icon_code_descriptions_e.csv
ICON_CONDITION_MAP = {
"sunny": [0, 1],
"clear-night": [30, 31],
"partlycloudy": [2, 3, 4, 5, 22, 32, 33, 34, 35],
"cloudy": [10],
"rainy": [6, 9, 11, 12, 28, 36],
"lightning-rainy": [19, 39, 46, 47],
"pouring": [13],
"snowy-rainy": [7, 14, 15, 27, 37],
"snowy": [8, 16, 17, 18, 25, 26, 38, 40],
"windy": [43],
"fog": [20, 21, 23, 24, 44],
"hail": [26, 27],
}
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the Environment Canada weather."""
if config.get(CONF_STATION):
ec_data = ECData(station_id=config[CONF_STATION])
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
ec_data = ECData(coordinates=(lat, lon))
add_devices([ECWeather(ec_data, config)])
class ECWeather(WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, ec_data, config):
"""Initialize Environment Canada weather."""
self.ec_data = ec_data
self.platform_name = config.get(CONF_NAME)
self.forecast_type = config[CONF_FORECAST]
@property
def attribution(self):
"""Return the attribution."""
return CONF_ATTRIBUTION
@property
def name(self):
"""Return the name of the weather entity."""
if self.platform_name:
return self.platform_name
return self.ec_data.metadata.get("location")
@property
def temperature(self):
"""Return the temperature."""
if self.ec_data.conditions.get("temperature", {}).get("value"):
return float(self.ec_data.conditions["temperature"]["value"])
if self.ec_data.hourly_forecasts[0].get("temperature"):
return float(self.ec_data.hourly_forecasts[0]["temperature"])
return None
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def humidity(self):
"""Return the humidity."""
if self.ec_data.conditions.get("humidity", {}).get("value"):
return float(self.ec_data.conditions["humidity"]["value"])
return None
@property
def wind_speed(self):
"""Return the wind speed."""
if self.ec_data.conditions.get("wind_speed", {}).get("value"):
return float(self.ec_data.conditions["wind_speed"]["value"])
return None
@property
def wind_bearing(self):
"""Return the wind bearing."""
if self.ec_data.conditions.get("wind_bearing", {}).get("value"):
return float(self.ec_data.conditions["wind_bearing"]["value"])
return None
@property
def pressure(self):
"""Return the pressure."""
if self.ec_data.conditions.get("pressure", {}).get("value"):
return 10 * float(self.ec_data.conditions["pressure"]["value"])
return None
@property
def visibility(self):
"""Return the visibility."""
if self.ec_data.conditions.get("visibility", {}).get("value"):
return float(self.ec_data.conditions["visibility"]["value"])
return None
@property
def condition(self):
"""Return the weather condition."""
icon_code = None
if self.ec_data.conditions.get("icon_code", {}).get("value"):
icon_code = self.ec_data.conditions["icon_code"]["value"]
elif self.ec_data.hourly_forecasts[0].get("icon_code"):
icon_code = self.ec_data.hourly_forecasts[0]["icon_code"]
if icon_code:
return icon_code_to_condition(int(icon_code))
return ""
@property
def forecast(self):
"""Return the forecast array."""
return get_forecast(self.ec_data, self.forecast_type)
def update(self):
"""Get the latest data from Environment Canada."""
self.ec_data.update()
def get_forecast(ec_data, forecast_type):
"""Build the forecast array."""
forecast_array = []
if forecast_type == "daily":
half_days = ec_data.daily_forecasts
if half_days[0]["temperature_class"] == "high":
forecast_array.append(
{
ATTR_FORECAST_TIME: dt.now().isoformat(),
ATTR_FORECAST_TEMP: int(half_days[0]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[1]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[0]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[0]["precip_probability"]
),
}
)
half_days = half_days[2:]
else:
half_days = half_days[1:]
for day, high, low in zip(range(1, 6), range(0, 9, 2), range(1, 10, 2)):
forecast_array.append(
{
ATTR_FORECAST_TIME: (
dt.now() + datetime.timedelta(days=day)
).isoformat(),
ATTR_FORECAST_TEMP: int(half_days[high]["temperature"]),
ATTR_FORECAST_TEMP_LOW: int(half_days[low]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(half_days[high]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
half_days[high]["precip_probability"]
),
}
)
elif forecast_type == "hourly":
hours = ec_data.hourly_forecasts
for hour in range(0, 24):
forecast_array.append(
{
ATTR_FORECAST_TIME: dt.as_local(
datetime.datetime.strptime(hours[hour]["period"], "%Y%m%d%H%M")
).isoformat(),
ATTR_FORECAST_TEMP: int(hours[hour]["temperature"]),
ATTR_FORECAST_CONDITION: icon_code_to_condition(
int(hours[hour]["icon_code"])
),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: int(
hours[hour]["precip_probability"]
),
}
)
return forecast_array
def icon_code_to_condition(icon_code):
"""Return the condition corresponding to an icon code."""
for condition, codes in ICON_CONDITION_MAP.items():
if icon_code in codes:
return condition
return None
|
from PyQt5.QtCore import QPoint
class SyncScroll:
def __init__(self, previewFrame,
editorPositionToSourceLineFunc,
sourceLineToEditorPositionFunc):
self.posmap = {}
self.frame = previewFrame
self.editorPositionToSourceLine = editorPositionToSourceLineFunc
self.sourceLineToEditorPosition = sourceLineToEditorPositionFunc
self.previewPositionBeforeLoad = QPoint()
self.contentIsLoading = False
self.editorViewportHeight = 0
self.editorViewportOffset = 0
self.editorCursorPosition = 0
self.frame.contentsSizeChanged.connect(self._handlePreviewResized)
self.frame.loadStarted.connect(self._handleLoadStarted)
self.frame.loadFinished.connect(self._handleLoadFinished)
def isActive(self):
return bool(self.posmap)
def handleEditorResized(self, editorViewportHeight):
self.editorViewportHeight = editorViewportHeight
self._updatePreviewScrollPosition()
def handleEditorScrolled(self, editorViewportOffset):
self.editorViewportOffset = editorViewportOffset
return self._updatePreviewScrollPosition()
def handleCursorPositionChanged(self, editorCursorPosition):
self.editorCursorPosition = editorCursorPosition
return self._updatePreviewScrollPosition()
def _handleLoadStarted(self):
# Store the current scroll position so it can be restored when the new
# content is presented
self.previewPositionBeforeLoad = self.frame.scrollPosition()
self.contentIsLoading = True
def _handleLoadFinished(self):
self.frame.setScrollPosition(self.previewPositionBeforeLoad)
self.contentIsLoading = False
self._recalculatePositionMap()
def _handlePreviewResized(self):
self._recalculatePositionMap()
self._updatePreviewScrollPosition()
def _linearScale(self, fromValue, fromMin, fromMax, toMin, toMax):
fromRange = fromMax - fromMin
toRange = toMax - toMin
toValue = toMin
if fromRange:
toValue += ((fromValue - fromMin) * toRange) / float(fromRange)
return toValue
def _updatePreviewScrollPosition(self):
if not self.posmap:
# Loading new content resets the scroll position to the top. If we
# don't have a posmap to calculate the new best position, then
# restore the position stored at the beginning of the load.
if self.contentIsLoading:
self.frame.setScrollPosition(self.previewPositionBeforeLoad)
return
textedit_pixel_to_scroll_to = self.editorCursorPosition
if textedit_pixel_to_scroll_to < self.editorViewportOffset:
textedit_pixel_to_scroll_to = self.editorViewportOffset
last_viewport_pixel = self.editorViewportOffset + self.editorViewportHeight
if textedit_pixel_to_scroll_to > last_viewport_pixel:
textedit_pixel_to_scroll_to = last_viewport_pixel
line_to_scroll_to = self.editorPositionToSourceLine(textedit_pixel_to_scroll_to)
# Do a binary search through the posmap to find the nearest line above
# and below the line to scroll to for which the rendered position is
# known.
posmap_lines = [0] + sorted(self.posmap.keys())
min_index = 0
max_index = len(posmap_lines) - 1
while max_index - min_index > 1:
current_index = int((min_index + max_index) / 2)
if posmap_lines[current_index] > line_to_scroll_to:
max_index = current_index
else:
min_index = current_index
# number of nearest line above and below for which we have a position
min_line = posmap_lines[min_index]
max_line = posmap_lines[max_index]
min_textedit_pos = self.sourceLineToEditorPosition(min_line)
max_textedit_pos = self.sourceLineToEditorPosition(max_line)
# rendered pixel position of nearest line above and below
min_preview_pos = self.posmap[min_line]
max_preview_pos = self.posmap[max_line]
# calculate rendered pixel position of line corresponding to cursor
# (0 == top of document)
preview_pixel_to_scroll_to = self._linearScale(textedit_pixel_to_scroll_to,
min_textedit_pos, max_textedit_pos,
min_preview_pos, max_preview_pos)
distance_to_top_of_viewport = textedit_pixel_to_scroll_to - self.editorViewportOffset
preview_scroll_offset = preview_pixel_to_scroll_to - distance_to_top_of_viewport
pos = self.frame.scrollPosition()
pos.setY(preview_scroll_offset)
self.frame.setScrollPosition(pos)
def _setPositionMap(self, posmap):
self.posmap = posmap
if posmap:
self.posmap[0] = 0
def _recalculatePositionMap(self):
if hasattr(self.frame, 'getPositionMap'):
# For WebEngine the update has to be asynchronous
self.frame.getPositionMap(self._setPositionMap)
return
# Create a list of input line positions mapped to vertical pixel positions in the preview
self.posmap = {}
elements = self.frame.findAllElements('[data-posmap]')
if elements:
# If there are posmap attributes, then build a posmap
# dictionary from them that will be used whenever the
# cursor is moved.
for el in elements:
value = el.attribute('data-posmap', 'invalid')
bottom = el.geometry().bottom()
# Ignore data-posmap entries that do not have integer values
try:
self.posmap[int(value)] = bottom
except ValueError:
pass
self.posmap[0] = 0
|
import pickle
import pytest
from unittest.mock import Mock, call
from kombu import Connection, Exchange, Producer, Queue, binding
from kombu.abstract import MaybeChannelBound
from kombu.exceptions import NotBoundError
from kombu.serialization import registry
from t.mocks import Transport
def get_conn():
return Connection(transport=Transport)
class test_binding:
def test_constructor(self):
x = binding(
Exchange('foo'), 'rkey',
arguments={'barg': 'bval'},
unbind_arguments={'uarg': 'uval'},
)
assert x.exchange == Exchange('foo')
assert x.routing_key == 'rkey'
assert x.arguments == {'barg': 'bval'}
assert x.unbind_arguments == {'uarg': 'uval'}
def test_declare(self):
chan = get_conn().channel()
x = binding(Exchange('foo'), 'rkey')
x.declare(chan)
assert 'exchange_declare' in chan
def test_declare_no_exchange(self):
chan = get_conn().channel()
x = binding()
x.declare(chan)
assert 'exchange_declare' not in chan
def test_bind(self):
chan = get_conn().channel()
x = binding(Exchange('foo'))
x.bind(Exchange('bar')(chan))
assert 'exchange_bind' in chan
def test_unbind(self):
chan = get_conn().channel()
x = binding(Exchange('foo'))
x.unbind(Exchange('bar')(chan))
assert 'exchange_unbind' in chan
def test_repr(self):
b = binding(Exchange('foo'), 'rkey')
assert 'foo' in repr(b)
assert 'rkey' in repr(b)
class test_Exchange:
def test_bound(self):
exchange = Exchange('foo', 'direct')
assert not exchange.is_bound
assert '<unbound' in repr(exchange)
chan = get_conn().channel()
bound = exchange.bind(chan)
assert bound.is_bound
assert bound.channel is chan
assert f'bound to chan:{chan.channel_id!r}' in repr(bound)
def test_hash(self):
assert hash(Exchange('a')) == hash(Exchange('a'))
assert hash(Exchange('a')) != hash(Exchange('b'))
def test_can_cache_declaration(self):
assert Exchange('a', durable=True).can_cache_declaration
assert Exchange('a', durable=False).can_cache_declaration
assert not Exchange('a', auto_delete=True).can_cache_declaration
assert not Exchange(
'a', durable=True, auto_delete=True,
).can_cache_declaration
def test_pickle(self):
e1 = Exchange('foo', 'direct')
e2 = pickle.loads(pickle.dumps(e1))
assert e1 == e2
def test_eq(self):
e1 = Exchange('foo', 'direct')
e2 = Exchange('foo', 'direct')
assert e1 == e2
e3 = Exchange('foo', 'topic')
assert e1 != e3
assert e1.__eq__(True) == NotImplemented
def test_revive(self):
exchange = Exchange('foo', 'direct')
conn = get_conn()
chan = conn.channel()
# reviving unbound channel is a noop.
exchange.revive(chan)
assert not exchange.is_bound
assert exchange._channel is None
bound = exchange.bind(chan)
assert bound.is_bound
assert bound.channel is chan
chan2 = conn.channel()
bound.revive(chan2)
assert bound.is_bound
assert bound._channel is chan2
def test_assert_is_bound(self):
exchange = Exchange('foo', 'direct')
with pytest.raises(NotBoundError):
exchange.declare()
conn = get_conn()
chan = conn.channel()
exchange.bind(chan).declare()
assert 'exchange_declare' in chan
def test_set_transient_delivery_mode(self):
exc = Exchange('foo', 'direct', delivery_mode='transient')
assert exc.delivery_mode == Exchange.TRANSIENT_DELIVERY_MODE
def test_set_passive_mode(self):
exc = Exchange('foo', 'direct', passive=True)
assert exc.passive
def test_set_persistent_delivery_mode(self):
exc = Exchange('foo', 'direct', delivery_mode='persistent')
assert exc.delivery_mode == Exchange.PERSISTENT_DELIVERY_MODE
def test_bind_at_instantiation(self):
assert Exchange('foo', channel=get_conn().channel()).is_bound
def test_create_message(self):
chan = get_conn().channel()
Exchange('foo', channel=chan).Message({'foo': 'bar'})
assert 'prepare_message' in chan
def test_publish(self):
chan = get_conn().channel()
Exchange('foo', channel=chan).publish('the quick brown fox')
assert 'basic_publish' in chan
def test_delete(self):
chan = get_conn().channel()
Exchange('foo', channel=chan).delete()
assert 'exchange_delete' in chan
def test__repr__(self):
b = Exchange('foo', 'topic')
assert 'foo(topic)' in repr(b)
assert 'Exchange' in repr(b)
def test_bind_to(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
bar = Exchange('bar', 'topic')
foo(chan).bind_to(bar)
assert 'exchange_bind' in chan
def test_bind_to_by_name(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
foo(chan).bind_to('bar')
assert 'exchange_bind' in chan
def test_unbind_from(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
bar = Exchange('bar', 'topic')
foo(chan).unbind_from(bar)
assert 'exchange_unbind' in chan
def test_unbind_from_by_name(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic')
foo(chan).unbind_from('bar')
assert 'exchange_unbind' in chan
def test_declare__no_declare(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic', no_declare=True)
foo(chan).declare()
assert 'exchange_declare' not in chan
def test_declare__internal_exchange(self):
chan = get_conn().channel()
foo = Exchange('amq.rabbitmq.trace', 'topic')
foo(chan).declare()
assert 'exchange_declare' not in chan
def test_declare(self):
chan = get_conn().channel()
foo = Exchange('foo', 'topic', no_declare=False)
foo(chan).declare()
assert 'exchange_declare' in chan
class test_Queue:
def setup(self):
self.exchange = Exchange('foo', 'direct')
def test_constructor_with_actual_exchange(self):
exchange = Exchange('exchange_name', 'direct')
queue = Queue(name='queue_name', exchange=exchange)
assert queue.exchange == exchange
def test_constructor_with_string_exchange(self):
exchange_name = 'exchange_name'
queue = Queue(name='queue_name', exchange=exchange_name)
assert queue.exchange == Exchange(exchange_name)
def test_constructor_with_default_exchange(self):
queue = Queue(name='queue_name')
assert queue.exchange == Exchange('')
def test_hash(self):
assert hash(Queue('a')) == hash(Queue('a'))
assert hash(Queue('a')) != hash(Queue('b'))
def test_repr_with_bindings(self):
ex = Exchange('foo')
x = Queue('foo', bindings=[ex.binding('A'), ex.binding('B')])
assert repr(x)
def test_anonymous(self):
chan = Mock()
x = Queue(bindings=[binding(Exchange('foo'), 'rkey')])
chan.queue_declare.return_value = 'generated', 0, 0
xx = x(chan)
xx.declare()
assert xx.name == 'generated'
def test_basic_get__accept_disallowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = Producer(conn)
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
message = q(conn).get(no_ack=True)
assert message is not None
with pytest.raises(q.ContentDisallowed):
message.decode()
def test_basic_get__accept_allowed(self):
conn = Connection('memory://')
q = Queue('foo', exchange=self.exchange)
p = Producer(conn)
p.publish(
{'complex': object()},
declare=[q], exchange=self.exchange, serializer='pickle',
)
message = q(conn).get(accept=['pickle'], no_ack=True)
assert message is not None
payload = message.decode()
assert payload['complex']
def test_when_bound_but_no_exchange(self):
q = Queue('a')
q.exchange = None
assert q.when_bound() is None
def test_declare_but_no_exchange(self):
q = Queue('a')
q.queue_declare = Mock()
q.queue_bind = Mock()
q.exchange = None
q.declare()
q.queue_declare.assert_called_with(
channel=None, nowait=False, passive=False)
def test_declare__no_declare(self):
q = Queue('a', no_declare=True)
q.queue_declare = Mock()
q.queue_bind = Mock()
q.exchange = None
q.declare()
q.queue_declare.assert_not_called()
q.queue_bind.assert_not_called()
def test_bind_to_when_name(self):
chan = Mock()
q = Queue('a')
q(chan).bind_to('ex')
chan.queue_bind.assert_called()
def test_get_when_no_m2p(self):
chan = Mock()
q = Queue('a')(chan)
chan.message_to_python = None
assert q.get()
def test_multiple_bindings(self):
chan = Mock()
q = Queue('mul', [
binding(Exchange('mul1'), 'rkey1'),
binding(Exchange('mul2'), 'rkey2'),
binding(Exchange('mul3'), 'rkey3'),
])
q(chan).declare()
assert call(
nowait=False,
exchange='mul1',
auto_delete=False,
passive=False,
arguments=None,
type='direct',
durable=True,
) in chan.exchange_declare.call_args_list
def test_can_cache_declaration(self):
assert Queue('a', durable=True).can_cache_declaration
assert Queue('a', durable=False).can_cache_declaration
assert not Queue(
'a', queue_arguments={'x-expires': 100}
).can_cache_declaration
def test_eq(self):
q1 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
q2 = Queue('xxx', Exchange('xxx', 'direct'), 'xxx')
assert q1 == q2
assert q1.__eq__(True) == NotImplemented
q3 = Queue('yyy', Exchange('xxx', 'direct'), 'xxx')
assert q1 != q3
def test_exclusive_implies_auto_delete(self):
assert Queue('foo', self.exchange, exclusive=True).auto_delete
def test_binds_at_instantiation(self):
assert Queue('foo', self.exchange,
channel=get_conn().channel()).is_bound
def test_also_binds_exchange(self):
chan = get_conn().channel()
b = Queue('foo', self.exchange)
assert not b.is_bound
assert not b.exchange.is_bound
b = b.bind(chan)
assert b.is_bound
assert b.exchange.is_bound
assert b.channel is b.exchange.channel
assert b.exchange is not self.exchange
def test_declare(self):
chan = get_conn().channel()
b = Queue('foo', self.exchange, 'foo', channel=chan)
assert b.is_bound
b.declare()
assert 'exchange_declare' in chan
assert 'queue_declare' in chan
assert 'queue_bind' in chan
def test_get(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.get()
assert 'basic_get' in b.channel
def test_purge(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.purge()
assert 'queue_purge' in b.channel
def test_consume(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.consume('fifafo', None)
assert 'basic_consume' in b.channel
def test_cancel(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.cancel('fifafo')
assert 'basic_cancel' in b.channel
def test_delete(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.delete()
assert 'queue_delete' in b.channel
def test_queue_unbind(self):
b = Queue('foo', self.exchange, 'foo', channel=get_conn().channel())
b.queue_unbind()
assert 'queue_unbind' in b.channel
def test_as_dict(self):
q = Queue('foo', self.exchange, 'rk')
d = q.as_dict(recurse=True)
assert d['exchange']['name'] == self.exchange.name
def test_queue_dump(self):
b = binding(self.exchange, 'rk')
q = Queue('foo', self.exchange, 'rk', bindings=[b])
d = q.as_dict(recurse=True)
assert d['bindings'][0]['routing_key'] == 'rk'
registry.dumps(d)
def test__repr__(self):
b = Queue('foo', self.exchange, 'foo')
assert 'foo' in repr(b)
assert 'Queue' in repr(b)
class test_MaybeChannelBound:
def test_repr(self):
assert repr(MaybeChannelBound())
|
import time
from yandextank.plugins.Console.screen import Sparkline
class TestSparkline(object):
def test_unusual_vals(self):
data = [0, 1, -100, 0.1, 1000, -0.1, 50]
expected = ' _ _▇ _'
sparkline = Sparkline(len(data))
start = int(time.time()) - len(data)
for num, val in enumerate(data):
sparkline.add(start + num, 'data', val)
spark = ''.join(sparkline.get_sparkline('data'))
assert (len(spark) == len(data))
assert (spark == expected)
zero = sparkline.get_sparkline('continous', spark_len=0)
assert (len(zero) == 0)
negative = sparkline.get_sparkline('continous', spark_len=-1)
assert (len(negative) == 0)
def test_non_continuos(self):
data = range(20)
expected = ' _▁▂▃▄▅▆▇ ▃▄▅▆▇ _'
expected_short = '▆▇ _'
expected_long = ' _▁▂▃▄▅▆▇ ▃▄▅▆▇ _'
spark_len = 24
sparkline = Sparkline(spark_len)
start = int(time.time()) - len(data)
for num, val in enumerate(data):
if val <= 8 or val > 12:
sparkline.add(start + num, 'data', val % 9)
spark = ''.join(sparkline.get_sparkline('data', spark_len=len(data)))
assert (spark == expected)
short_spark = ''.join(sparkline.get_sparkline('data', spark_len=4))
assert (short_spark == expected_short)
long_spark = ''.join(sparkline.get_sparkline('data'))
assert (long_spark == expected_long)
def test_multi_graphs(self):
expected_continous = '__▁▁▂▂▃▃▄▄▅▅▆▆▇▇'
expected_spotty = '_ ▁ ▂ ▃ ▄ ▅ ▆ ▇ '
continous_vals = range(1, 17)
sparkline = Sparkline(len(continous_vals))
start = int(time.time()) - len(continous_vals)
for val in continous_vals:
sparkline.add(start + val, 'continous', val)
if val % 2 == 1:
sparkline.add(start + val, 'spotty', val)
continous = ''.join(sparkline.get_sparkline('continous'))
spotty = ''.join(sparkline.get_sparkline('spotty'))
assert (continous == expected_continous)
assert (spotty == expected_spotty)
|
import datetime
from aio_geojson_nsw_rfs_incidents import NswRuralFireServiceIncidentsFeed
from homeassistant.components import geo_location
from homeassistant.components.geo_location import ATTR_SOURCE
from homeassistant.components.nsw_rural_fire_service_feed.geo_location import (
ATTR_CATEGORY,
ATTR_COUNCIL_AREA,
ATTR_EXTERNAL_ID,
ATTR_FIRE,
ATTR_LOCATION,
ATTR_PUBLICATION_DATE,
ATTR_RESPONSIBLE_AGENCY,
ATTR_SIZE,
ATTR_STATUS,
ATTR_TYPE,
SCAN_INTERVAL,
)
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
LENGTH_KILOMETERS,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import ANY, MagicMock, call, patch
from tests.common import assert_setup_component, async_fire_time_changed
CONFIG = {
geo_location.DOMAIN: [{"platform": "nsw_rural_fire_service_feed", CONF_RADIUS: 200}]
}
CONFIG_WITH_CUSTOM_LOCATION = {
geo_location.DOMAIN: [
{
"platform": "nsw_rural_fire_service_feed",
CONF_RADIUS: 200,
CONF_LATITUDE: 15.1,
CONF_LONGITUDE: 25.2,
}
]
}
def _generate_mock_feed_entry(
external_id,
title,
distance_to_home,
coordinates,
category=None,
location=None,
attribution=None,
publication_date=None,
council_area=None,
status=None,
entry_type=None,
fire=True,
size=None,
responsible_agency=None,
):
"""Construct a mock feed entry for testing purposes."""
feed_entry = MagicMock()
feed_entry.external_id = external_id
feed_entry.title = title
feed_entry.distance_to_home = distance_to_home
feed_entry.coordinates = coordinates
feed_entry.category = category
feed_entry.location = location
feed_entry.attribution = attribution
feed_entry.publication_date = publication_date
feed_entry.council_area = council_area
feed_entry.status = status
feed_entry.type = entry_type
feed_entry.fire = fire
feed_entry.size = size
feed_entry.responsible_agency = responsible_agency
return feed_entry
async def test_setup(hass):
"""Test the general setup of the platform."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(-31.0, 150.0),
category="Category 1",
location="Location 1",
attribution="Attribution 1",
publication_date=datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
council_area="Council Area 1",
status="Status 1",
entry_type="Type 1",
size="Size 1",
responsible_agency="Agency 1",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345", "Title 2", 20.5, (-31.1, 150.1), fire=False
)
mock_entry_3 = _generate_mock_feed_entry("3456", "Title 3", 25.5, (-31.2, 150.2))
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (-31.3, 150.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"aio_geojson_client.feed.GeoJsonFeed.update"
) as mock_feed_update:
mock_feed_update.return_value = (
"OK",
[mock_entry_1, mock_entry_2, mock_entry_3],
)
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
await hass.async_block_till_done()
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
state = hass.states.get("geo_location.title_1")
assert state is not None
assert state.name == "Title 1"
assert state.attributes == {
ATTR_EXTERNAL_ID: "1234",
ATTR_LATITUDE: -31.0,
ATTR_LONGITUDE: 150.0,
ATTR_FRIENDLY_NAME: "Title 1",
ATTR_CATEGORY: "Category 1",
ATTR_LOCATION: "Location 1",
ATTR_ATTRIBUTION: "Attribution 1",
ATTR_PUBLICATION_DATE: datetime.datetime(
2018, 9, 22, 8, 0, tzinfo=datetime.timezone.utc
),
ATTR_FIRE: True,
ATTR_COUNCIL_AREA: "Council Area 1",
ATTR_STATUS: "Status 1",
ATTR_TYPE: "Type 1",
ATTR_SIZE: "Size 1",
ATTR_RESPONSIBLE_AGENCY: "Agency 1",
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "nsw_rural_fire_service_feed",
ATTR_ICON: "mdi:fire",
}
assert round(abs(float(state.state) - 15.5), 7) == 0
state = hass.states.get("geo_location.title_2")
assert state is not None
assert state.name == "Title 2"
assert state.attributes == {
ATTR_EXTERNAL_ID: "2345",
ATTR_LATITUDE: -31.1,
ATTR_LONGITUDE: 150.1,
ATTR_FRIENDLY_NAME: "Title 2",
ATTR_FIRE: False,
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "nsw_rural_fire_service_feed",
ATTR_ICON: "mdi:alarm-light",
}
assert round(abs(float(state.state) - 20.5), 7) == 0
state = hass.states.get("geo_location.title_3")
assert state is not None
assert state.name == "Title 3"
assert state.attributes == {
ATTR_EXTERNAL_ID: "3456",
ATTR_LATITUDE: -31.2,
ATTR_LONGITUDE: 150.2,
ATTR_FRIENDLY_NAME: "Title 3",
ATTR_FIRE: True,
ATTR_UNIT_OF_MEASUREMENT: LENGTH_KILOMETERS,
ATTR_SOURCE: "nsw_rural_fire_service_feed",
ATTR_ICON: "mdi:fire",
}
assert round(abs(float(state.state) - 25.5), 7) == 0
# Simulate an update - one existing, one new entry,
# one outdated entry
mock_feed_update.return_value = (
"OK",
[mock_entry_1, mock_entry_4, mock_entry_3],
)
async_fire_time_changed(hass, utcnow + SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed_update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 3
# Simulate an update - empty data, removes all entities
mock_feed_update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 0
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
# Collect events.
await hass.async_block_till_done()
async def test_setup_with_custom_location(hass):
"""Test the setup with a custom location."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry("1234", "Title 1", 20.5, (-31.1, 150.1))
with patch(
"aio_geojson_nsw_rfs_incidents.feed_manager.NswRuralFireServiceIncidentsFeed",
wraps=NswRuralFireServiceIncidentsFeed,
) as mock_feed_manager, patch(
"aio_geojson_client.feed.GeoJsonFeed.update"
) as mock_feed_update:
mock_feed_update.return_value = "OK", [mock_entry_1]
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(
hass, geo_location.DOMAIN, CONFIG_WITH_CUSTOM_LOCATION
)
await hass.async_block_till_done()
# Artificially trigger update.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
# Collect events.
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
assert mock_feed_manager.call_args == call(
ANY, (15.1, 25.2), filter_categories=[], filter_radius=200.0
)
|
import abc
import os
import cookiecutter
import cookiecutter.main
from molecule import logger
from molecule import util
LOG = logger.get_logger(__name__)
class Base(object):
__metaclass__ = abc.ABCMeta
def _process_templates(self,
template_dir,
extra_context,
output_dir,
overwrite=True):
"""
Process templates as found in the named directory.
:param template_dir: A string containing an absolute or relative path
to a directory where the templates are located. If the provided
directory is a relative path, it is resolved using a known location.
:param extra_context: A dict of values that are used to override
default or user specified values.
:param output_dir: An string with an absolute path to a directory where
the templates should be written to.
:param overwrite: An optional bool whether or not to overwrite existing
templates.
:return: None
"""
template_dir = self._resolve_template_dir(template_dir)
self._validate_template_dir(template_dir)
try:
cookiecutter.main.cookiecutter(
template_dir,
extra_context=extra_context,
output_dir=output_dir,
overwrite_if_exists=overwrite,
no_input=True,
)
except cookiecutter.exceptions.NonTemplatedInputDirException:
util.sysexit_with_message("The specified template directory (" +
str(template_dir) +
") is in an invalid format")
def _resolve_template_dir(self, template_dir):
if not os.path.isabs(template_dir):
template_dir = os.path.join(
os.path.dirname(__file__), os.path.pardir, os.path.pardir,
'cookiecutter', template_dir)
return template_dir
def _validate_template_dir(self, template_dir):
if not os.path.isdir(template_dir):
util.sysexit_with_message("The specified template directory (" +
str(template_dir) + ") does not exist")
|
from homeassistant.components.sensor import ENTITY_ID_FORMAT
from homeassistant.const import PERCENTAGE, POWER_WATT, TEMP_FAHRENHEIT
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
from . import DOMAIN as WF_DOMAIN, UPDATE_TOPIC
class WFSensorConfig:
"""Water Furnace Sensor configuration."""
def __init__(
self, friendly_name, field, icon="mdi:gauge", unit_of_measurement=None
):
"""Initialize configuration."""
self.friendly_name = friendly_name
self.field = field
self.icon = icon
self.unit_of_measurement = unit_of_measurement
SENSORS = [
WFSensorConfig("Furnace Mode", "mode"),
WFSensorConfig("Total Power", "totalunitpower", "mdi:flash", POWER_WATT),
WFSensorConfig(
"Active Setpoint", "tstatactivesetpoint", "mdi:thermometer", TEMP_FAHRENHEIT
),
WFSensorConfig("Leaving Air", "leavingairtemp", "mdi:thermometer", TEMP_FAHRENHEIT),
WFSensorConfig("Room Temp", "tstatroomtemp", "mdi:thermometer", TEMP_FAHRENHEIT),
WFSensorConfig(
"Loop Temp", "enteringwatertemp", "mdi:thermometer", TEMP_FAHRENHEIT
),
WFSensorConfig(
"Humidity Set Point", "tstathumidsetpoint", "mdi:water-percent", PERCENTAGE
),
WFSensorConfig(
"Humidity", "tstatrelativehumidity", "mdi:water-percent", PERCENTAGE
),
WFSensorConfig("Compressor Power", "compressorpower", "mdi:flash", POWER_WATT),
WFSensorConfig("Fan Power", "fanpower", "mdi:flash", POWER_WATT),
WFSensorConfig("Aux Power", "auxpower", "mdi:flash", POWER_WATT),
WFSensorConfig("Loop Pump Power", "looppumppower", "mdi:flash", POWER_WATT),
WFSensorConfig("Compressor Speed", "actualcompressorspeed", "mdi:speedometer"),
WFSensorConfig("Fan Speed", "airflowcurrentspeed", "mdi:fan"),
]
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Waterfurnace sensor."""
if discovery_info is None:
return
sensors = []
client = hass.data[WF_DOMAIN]
for sconfig in SENSORS:
sensors.append(WaterFurnaceSensor(client, sconfig))
add_entities(sensors)
class WaterFurnaceSensor(Entity):
"""Implementing the Waterfurnace sensor."""
def __init__(self, client, config):
"""Initialize the sensor."""
self.client = client
self._name = config.friendly_name
self._attr = config.field
self._state = None
self._icon = config.icon
self._unit_of_measurement = config.unit_of_measurement
# This ensures that the sensors are isolated per waterfurnace unit
self.entity_id = ENTITY_ID_FORMAT.format(
"wf_{}_{}".format(slugify(self.client.unit), slugify(self._attr))
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return icon."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit_of_measurement
@property
def should_poll(self):
"""Return the polling state."""
return False
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
UPDATE_TOPIC, self.async_update_callback
)
)
@callback
def async_update_callback(self):
"""Update state."""
if self.client.data is not None:
self._state = getattr(self.client.data, self._attr, None)
self.async_write_ha_state()
|
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_API_KEY, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import CONF_SITE_ID, DEFAULT_NAME, DOMAIN
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SITE_ID): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Platform setup, do nothing."""
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=dict(config[DOMAIN])
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Load the saved entities."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "sensor")
)
return True
|
import mock
import pytest
from paasta_tools.monitoring.check_mesos_quorum import check_mesos_quorum
def test_check_mesos_quorum_ok(capfd):
with mock.patch(
"paasta_tools.metrics.metastatus_lib.get_num_masters",
autospec=True,
return_value=3,
), mock.patch(
"paasta_tools.metrics.metastatus_lib.get_mesos_quorum",
autospec=True,
return_value=2,
):
with pytest.raises(SystemExit) as error:
check_mesos_quorum()
out, err = capfd.readouterr()
assert "OK" in out
assert error.value.code == 0
def test_check_mesos_quorum_critical(capfd):
with mock.patch(
"paasta_tools.metrics.metastatus_lib.get_num_masters",
autospec=True,
return_value=1,
), mock.patch(
"paasta_tools.metrics.metastatus_lib.get_mesos_quorum",
autospec=True,
return_value=2,
):
with pytest.raises(SystemExit) as error:
check_mesos_quorum()
out, err = capfd.readouterr()
assert "CRITICAL" in out
assert error.value.code == 2
|
import pathlib
import pytest
from scripts import importer
_samples = pathlib.Path('tests/unit/scripts/importer_sample')
def qm_expected(input_format):
"""Read expected quickmark-formatted output."""
return (_samples / input_format / 'quickmarks').read_text(encoding='utf-8')
def bm_expected(input_format):
"""Read expected bookmark-formatted output."""
return (_samples / input_format / 'bookmarks').read_text(encoding='utf-8')
def search_expected(input_format):
"""Read expected search-formatted (config.py) output."""
return (_samples / input_format / 'config_py').read_text(encoding='utf-8')
def sample_input(input_format):
"""Get the sample input path."""
return str(_samples / input_format / 'input')
def test_opensearch_convert():
urls = [
# simple search query
('http://foo.bar/s?q={searchTerms}', 'http://foo.bar/s?q={}'),
# simple search query with supported additional parameter
('http://foo.bar/s?q={searchTerms}&enc={inputEncoding}',
'http://foo.bar/s?q={}&enc=UTF-8'),
# same as above but with supported optional parameter
('http://foo.bar/s?q={searchTerms}&enc={inputEncoding?}',
'http://foo.bar/s?q={}&enc='),
# unsupported-but-optional parameter
('http://foo.bar/s?q={searchTerms}&opt={unsupported?}',
'http://foo.bar/s?q={}&opt='),
# unsupported-but-optional subset parameter
('http://foo.bar/s?q={searchTerms}&opt={unsupported:unsupported?}',
'http://foo.bar/s?q={}&opt=')
]
for os_url, qb_url in urls:
assert importer.opensearch_convert(os_url) == qb_url
def test_opensearch_convert_unsupported():
"""pass an unsupported, required parameter."""
with pytest.raises(KeyError):
os_url = 'http://foo.bar/s?q={searchTerms}&req={unsupported}'
importer.opensearch_convert(os_url)
def test_chrome_bookmarks(capsys):
"""Read sample bookmarks from chrome profile."""
importer.import_chrome(sample_input('chrome'), ['bookmark'], 'bookmark')
imported = capsys.readouterr()[0]
assert imported == bm_expected('chrome')
def test_chrome_quickmarks(capsys):
"""Read sample bookmarks from chrome profile."""
importer.import_chrome(sample_input('chrome'), ['bookmark'], 'quickmark')
imported = capsys.readouterr()[0]
assert imported == qm_expected('chrome')
def test_chrome_searches(capsys):
"""Read sample searches from chrome profile."""
importer.import_chrome(sample_input('chrome'), ['search'], 'search')
imported = capsys.readouterr()[0]
assert imported == search_expected('chrome')
def test_netscape_bookmarks(capsys):
importer.import_netscape_bookmarks(
sample_input('netscape'), ['bookmark', 'keyword'], 'bookmark')
imported = capsys.readouterr()[0]
assert imported == bm_expected('netscape')
def test_netscape_quickmarks(capsys):
importer.import_netscape_bookmarks(
sample_input('netscape'), ['bookmark', 'keyword'], 'quickmark')
imported = capsys.readouterr()[0]
assert imported == qm_expected('netscape')
def test_netscape_searches(capsys):
importer.import_netscape_bookmarks(
sample_input('netscape'), ['search'], 'search')
imported = capsys.readouterr()[0]
assert imported == search_expected('netscape')
def test_mozilla_bookmarks(capsys):
importer.import_moz_places(
sample_input('mozilla'), ['bookmark', 'keyword'], 'bookmark')
imported = capsys.readouterr()[0]
assert imported == bm_expected('mozilla')
def test_mozilla_quickmarks(capsys):
importer.import_moz_places(
sample_input('mozilla'), ['bookmark', 'keyword'], 'quickmark')
imported = capsys.readouterr()[0]
assert imported == qm_expected('mozilla')
def test_mozilla_searches(capsys):
importer.import_moz_places(sample_input('mozilla'), ['search'], 'search')
imported = capsys.readouterr()[0]
assert imported == search_expected('mozilla')
|
import hashlib
import hmac
import json
import logging
import voluptuous as vol
from homeassistant.const import CONF_API_KEY, CONF_DOMAIN, CONF_WEBHOOK_ID
from homeassistant.helpers import config_entry_flow
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
CONF_SANDBOX = "sandbox"
DEFAULT_SANDBOX = False
MESSAGE_RECEIVED = f"{DOMAIN}_message_received"
CONFIG_SCHEMA = vol.Schema(
{
vol.Optional(DOMAIN): vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_DOMAIN): cv.string,
vol.Optional(CONF_SANDBOX, default=DEFAULT_SANDBOX): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Mailgun component."""
if DOMAIN not in config:
return True
hass.data[DOMAIN] = config[DOMAIN]
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook with Mailgun inbound messages."""
body = await request.text()
try:
data = json.loads(body) if body else {}
except ValueError:
return None
if isinstance(data, dict) and "signature" in data:
if await verify_webhook(hass, **data["signature"]):
data["webhook_id"] = webhook_id
hass.bus.async_fire(MESSAGE_RECEIVED, data)
return
_LOGGER.warning(
"Mailgun webhook received an unauthenticated message - webhook_id: %s",
webhook_id,
)
async def verify_webhook(hass, token=None, timestamp=None, signature=None):
"""Verify webhook was signed by Mailgun."""
if DOMAIN not in hass.data:
_LOGGER.warning("Cannot validate Mailgun webhook, missing API Key")
return True
if not (token and timestamp and signature):
return False
hmac_digest = hmac.new(
key=bytes(hass.data[DOMAIN][CONF_API_KEY], "utf-8"),
msg=bytes(f"{timestamp}{token}", "utf-8"),
digestmod=hashlib.sha256,
).hexdigest()
return hmac.compare_digest(signature, hmac_digest)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, "Mailgun", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
return True
async_remove_entry = config_entry_flow.webhook_async_remove_entry
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import inception
class InceptionTest(tf.test.TestCase):
def testBuildLogits(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testBuildEndPoints(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue('Logits' in end_points)
logits = end_points['Logits']
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('AuxLogits' in end_points)
aux_logits = end_points['AuxLogits']
self.assertListEqual(aux_logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['PrePool']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 8, 8, 1536])
def testVariablesSetDevice(self):
batch_size = 5
height, width = 299, 299
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
# Force all Variables to reside on the device.
with tf.variable_scope('on_cpu'), tf.device('/cpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
with tf.variable_scope('on_gpu'), tf.device('/gpu:0'):
inception.inception_resnet_v2(inputs, num_classes)
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_cpu'):
self.assertDeviceEqual(v.device, '/cpu:0')
for v in tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope='on_gpu'):
self.assertDeviceEqual(v.device, '/gpu:0')
def testHalfSizeImages(self):
batch_size = 5
height, width = 150, 150
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['PrePool']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 3, 3, 1536])
def testUnknownBatchSize(self):
batch_size = 1
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_resnet_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionResnetV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 299, 299
num_classes = 1000
with self.test_session() as sess:
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
with self.test_session() as sess:
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_resnet_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_resnet_v2(eval_inputs,
num_classes,
is_training=False,
reuse=True)
predictions = tf.argmax(logits, 1)
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
if __name__ == '__main__':
tf.test.main()
|
from pprint import pprint
from riko.bado import coroutine
from riko.collections import SyncPipe, AsyncPipe
p120_conf = {'type': 'text'}
p120_inputs = {'format': '%B %d, %Y'}
p112_conf = {'type': 'date', 'default': '5/4/82', 'prompt': 'enter a date'}
p151_conf = {'format': {'terminal': 'format', 'path': 'format'}}
p100_conf = {
'attrs': {
'value': {'terminal': 'value', 'path': 'dateformat'}, 'key': 'date'}}
p120_kwargs = {'conf': p120_conf, 'inputs': p120_inputs, 'assign': 'format'}
def pipe(test=False):
s1 = SyncPipe('input', test=test, **p120_kwargs).output
s2 = (SyncPipe('input', conf=p112_conf, test=test)
.dateformat(conf=p151_conf, format=s1)
.output)
stream = (SyncPipe('itembuilder', conf=p100_conf, value=s2, test=test)
.list)
for i in stream:
pprint(i)
return stream
@coroutine
def async_pipe(reactor, test=False):
s1 = yield AsyncPipe('input', test=test, **p120_kwargs).output
s2 = yield (AsyncPipe('input', conf=p112_conf, test=test)
.dateformat(conf=p151_conf, format=s1)
.output)
output_kwargs = {'conf': p100_conf, 'value': s2, 'test': test}
output = yield (AsyncPipe('itembuilder', **output_kwargs)
.list)
for i in output:
pprint(i)
|
from homeassistant.components.binary_sensor import DEVICE_CLASSES, BinarySensorEntity
from . import DOMAIN as TESLA_DOMAIN, TeslaDevice
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Tesla binary_sensors by config_entry."""
async_add_entities(
[
TeslaBinarySensor(
device,
hass.data[TESLA_DOMAIN][config_entry.entry_id]["coordinator"],
)
for device in hass.data[TESLA_DOMAIN][config_entry.entry_id]["devices"][
"binary_sensor"
]
],
True,
)
class TeslaBinarySensor(TeslaDevice, BinarySensorEntity):
"""Implement an Tesla binary sensor for parking and charger."""
@property
def device_class(self):
"""Return the class of this binary sensor."""
return (
self.tesla_device.sensor_type
if self.tesla_device.sensor_type in DEVICE_CLASSES
else None
)
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self.tesla_device.get_value()
|
import subprocess
import os
import time
import sys
import errno
import signal
import traceback
from plumbum.commands.processes import ProcessExecutionError
class _fake_lock(object):
"""Needed to allow normal os.exit() to work without error"""
def acquire(self, val):
return True
def release(self):
pass
def posix_daemonize(command, cwd, stdout=None, stderr=None, append=True):
if stdout is None:
stdout = os.devnull
if stderr is None:
stderr = stdout
MAX_SIZE = 16384
rfd, wfd = os.pipe()
argv = command.formulate()
firstpid = os.fork()
if firstpid == 0:
# first child: become session leader,
os.close(rfd)
rc = 0
try:
os.setsid()
os.umask(0)
stdin = open(os.devnull, "r")
stdout = open(stdout, "a" if append else "w")
stderr = open(stderr, "a" if append else "w")
signal.signal(signal.SIGHUP, signal.SIG_IGN)
proc = command.popen(
cwd=cwd,
close_fds=True,
stdin=stdin.fileno(),
stdout=stdout.fileno(),
stderr=stderr.fileno())
os.write(wfd, str(proc.pid).encode("utf8"))
except:
rc = 1
tbtext = "".join(
traceback.format_exception(*sys.exc_info()))[-MAX_SIZE:]
os.write(wfd, tbtext.encode("utf8"))
finally:
os.close(wfd)
os._exit(rc)
else:
# wait for first child to die
os.close(wfd)
_, rc = os.waitpid(firstpid, 0)
output = os.read(rfd, MAX_SIZE)
os.close(rfd)
try:
output = output.decode("utf8")
except UnicodeError:
pass
if rc == 0 and output.isdigit():
secondpid = int(output)
else:
raise ProcessExecutionError(argv, rc, "", output)
proc = subprocess.Popen.__new__(subprocess.Popen)
proc._child_created = True
proc.returncode = None
proc.stdout = None
proc.stdin = None
proc.stderr = None
proc.pid = secondpid
proc.universal_newlines = False
proc._input = None
proc._waitpid_lock = _fake_lock()
proc._communication_started = False
proc.args = argv
proc.argv = argv
def poll(self=proc):
if self.returncode is None:
try:
os.kill(self.pid, 0)
except OSError:
ex = sys.exc_info()[1]
if ex.errno == errno.ESRCH:
# process does not exist
self.returncode = 0
else:
raise
return self.returncode
def wait(self=proc):
while self.returncode is None:
if self.poll() is None:
time.sleep(0.5)
return proc.returncode
proc.poll = poll
proc.wait = wait
return proc
def win32_daemonize(command, cwd, stdout=None, stderr=None, append=True):
if stdout is None:
stdout = os.devnull
if stderr is None:
stderr = stdout
DETACHED_PROCESS = 0x00000008
stdin = open(os.devnull, "r")
stdout = open(stdout, "a" if append else "w")
stderr = open(stderr, "a" if append else "w")
return command.popen(
cwd=cwd,
stdin=stdin.fileno(),
stdout=stdout.fileno(),
stderr=stderr.fileno(),
creationflags=subprocess.CREATE_NEW_PROCESS_GROUP | DETACHED_PROCESS)
|
import docker
import mock
import pytest
from paasta_tools.paasta_execute_docker_command import execute_in_container
from paasta_tools.paasta_execute_docker_command import main
from paasta_tools.paasta_execute_docker_command import TimeoutException
def test_execute_in_container():
fake_container_id = "fake_container_id"
fake_return_code = 0
fake_output = "fake_output"
fake_command = "fake_cmd"
mock_docker_client = mock.MagicMock(spec_set=docker.Client)
mock_docker_client.exec_start.return_value = fake_output
mock_docker_client.exec_inspect.return_value = {"ExitCode": fake_return_code}
assert execute_in_container(
mock_docker_client, fake_container_id, fake_command, 1
) == (fake_output, fake_return_code)
expected_cmd = ["/bin/sh", "-c", fake_command]
mock_docker_client.exec_create.assert_called_once_with(
fake_container_id, expected_cmd
)
def test_execute_in_container_reuses_exec():
fake_container_id = "fake_container_id"
fake_execid = "fake_execid"
fake_return_code = 0
fake_output = "fake_output"
fake_command = "fake_cmd"
mock_docker_client = mock.MagicMock(spec_set=docker.Client)
mock_docker_client.inspect_container.return_value = {"ExecIDs": [fake_execid]}
mock_docker_client.exec_start.return_value = fake_output
mock_docker_client.exec_inspect.return_value = {
"ExitCode": fake_return_code,
"ProcessConfig": {"entrypoint": "/bin/sh", "arguments": ["-c", fake_command]},
}
assert execute_in_container(
mock_docker_client, fake_container_id, fake_command, 1
) == (fake_output, fake_return_code)
assert mock_docker_client.exec_create.call_count == 0
mock_docker_client.exec_start.assert_called_once_with(fake_execid, stream=False)
def test_execute_in_container_reuses_only_valid_exec():
fake_container_id = "fake_container_id"
fake_execid = "fake_execid"
fake_return_code = 0
fake_output = "fake_output"
fake_command = "fake_cmd"
bad_exec = {
"ExitCode": fake_return_code,
"ProcessConfig": {
"entrypoint": "/bin/sh",
"arguments": ["-c", "some_other_command"],
},
}
good_exec = {
"ExitCode": fake_return_code,
"ProcessConfig": {"entrypoint": "/bin/sh", "arguments": ["-c", fake_command]},
}
mock_docker_client = mock.MagicMock(spec_set=docker.Client)
mock_docker_client.inspect_container.return_value = {
"ExecIDs": ["fake_other_exec", fake_execid, "fake_other_exec"]
}
mock_docker_client.exec_start.return_value = fake_output
# the last side effect is used to check the exit code of the command
mock_docker_client.exec_inspect.side_effect = [
bad_exec,
good_exec,
bad_exec,
good_exec,
]
assert execute_in_container(
mock_docker_client, fake_container_id, fake_command, 1
) == (fake_output, fake_return_code)
assert mock_docker_client.exec_create.call_count == 0
mock_docker_client.exec_start.assert_called_once_with(fake_execid, stream=False)
def test_main():
fake_container_id = "fake_container_id"
fake_timeout = 3
with mock.patch(
"paasta_tools.paasta_execute_docker_command.get_container_id_for_mesos_id",
return_value=fake_container_id,
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.parse_args", autospec=True
) as args_patch, mock.patch(
"paasta_tools.paasta_execute_docker_command.execute_in_container",
return_value=("fake_output", 0),
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.time_limit", autospec=True
) as time_limit_patch:
args_patch.return_value.mesos_id = "fake_task_id"
args_patch.return_value.timeout = fake_timeout
with pytest.raises(SystemExit) as excinfo:
main()
time_limit_patch.assert_called_once_with(fake_timeout)
assert excinfo.value.code == 0
def test_main_with_empty_task_id():
fake_container_id = "fake_container_id"
fake_timeout = 3
with mock.patch(
"paasta_tools.paasta_execute_docker_command.get_container_id_for_mesos_id",
return_value=fake_container_id,
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.parse_args", autospec=True
) as args_patch, mock.patch(
"paasta_tools.paasta_execute_docker_command.execute_in_container",
return_value=("fake_output", 0),
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.time_limit", autospec=True
):
args_patch.return_value.mesos_id = ""
args_patch.return_value.timeout = fake_timeout
with pytest.raises(SystemExit) as excinfo:
main()
assert excinfo.value.code == 2
def test_main_container_not_found_failure():
with mock.patch(
"paasta_tools.paasta_execute_docker_command.get_container_id_for_mesos_id",
return_value=None,
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.execute_in_container",
return_value=("fake_output", 2),
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.parse_args", autospec=True
) as args_patch, mock.patch(
"paasta_tools.paasta_execute_docker_command.time_limit", autospec=True
):
args_patch.return_value.mesos_id = "fake_task_id"
with pytest.raises(SystemExit) as excinfo:
main()
assert excinfo.value.code == 1
def test_main_cmd_unclean_exit_failure():
fake_container_id = "fake_container_id"
with mock.patch(
"paasta_tools.paasta_execute_docker_command.get_container_id_for_mesos_id",
return_value=fake_container_id,
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.execute_in_container",
return_value=("fake_output", 2),
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.parse_args", autospec=True
) as args_patch, mock.patch(
"paasta_tools.paasta_execute_docker_command.time_limit", autospec=True
):
args_patch.return_value.mesos_id = "fake_task_id"
with pytest.raises(SystemExit) as excinfo:
main()
assert excinfo.value.code == 2
def test_main_timeout_failure():
fake_container_id = "fake_container_id"
fake_timeout = 3
with mock.patch(
"paasta_tools.paasta_execute_docker_command.get_container_id_for_mesos_id",
return_value=fake_container_id,
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.parse_args", autospec=True
) as args_patch, mock.patch(
"paasta_tools.paasta_execute_docker_command.execute_in_container",
return_value=("fake_output", 0),
autospec=True,
), mock.patch(
"paasta_tools.paasta_execute_docker_command.time_limit",
side_effect=TimeoutException,
autospec=True,
) as time_limit_patch:
args_patch.return_value.mesos_id = "fake_task_id"
args_patch.return_value.timeout = fake_timeout
with pytest.raises(SystemExit) as excinfo:
main()
time_limit_patch.assert_called_once_with(fake_timeout)
assert excinfo.value.code == 1
|
import logging
from aiohttp import ClientError
from homeassistant.util.dt import utcnow
from .const import ACTIVITY_UPDATE_INTERVAL
from .subscriber import AugustSubscriberMixin
_LOGGER = logging.getLogger(__name__)
ACTIVITY_STREAM_FETCH_LIMIT = 10
ACTIVITY_CATCH_UP_FETCH_LIMIT = 1000
class ActivityStream(AugustSubscriberMixin):
"""August activity stream handler."""
def __init__(self, hass, api, august_gateway, house_ids):
"""Init August activity stream object."""
super().__init__(hass, ACTIVITY_UPDATE_INTERVAL)
self._hass = hass
self._august_gateway = august_gateway
self._api = api
self._house_ids = house_ids
self._latest_activities_by_id_type = {}
self._last_update_time = None
self._abort_async_track_time_interval = None
async def async_setup(self):
"""Token refresh check and catch up the activity stream."""
await self._async_refresh(utcnow)
def get_latest_device_activity(self, device_id, activity_types):
"""Return latest activity that is one of the acitivty_types."""
if device_id not in self._latest_activities_by_id_type:
return None
latest_device_activities = self._latest_activities_by_id_type[device_id]
latest_activity = None
for activity_type in activity_types:
if activity_type in latest_device_activities:
if (
latest_activity is not None
and latest_device_activities[activity_type].activity_start_time
<= latest_activity.activity_start_time
):
continue
latest_activity = latest_device_activities[activity_type]
return latest_activity
async def _async_refresh(self, time):
"""Update the activity stream from August."""
# This is the only place we refresh the api token
await self._august_gateway.async_refresh_access_token_if_needed()
await self._async_update_device_activities(time)
async def _async_update_device_activities(self, time):
_LOGGER.debug("Start retrieving device activities")
limit = (
ACTIVITY_STREAM_FETCH_LIMIT
if self._last_update_time
else ACTIVITY_CATCH_UP_FETCH_LIMIT
)
for house_id in self._house_ids:
_LOGGER.debug("Updating device activity for house id %s", house_id)
try:
activities = await self._api.async_get_house_activities(
self._august_gateway.access_token, house_id, limit=limit
)
except ClientError as ex:
_LOGGER.error(
"Request error trying to retrieve activity for house id %s: %s",
house_id,
ex,
)
# Make sure we process the next house if one of them fails
continue
_LOGGER.debug(
"Completed retrieving device activities for house id %s", house_id
)
updated_device_ids = self._process_newer_device_activities(activities)
if updated_device_ids:
for device_id in updated_device_ids:
_LOGGER.debug(
"async_signal_device_id_update (from activity stream): %s",
device_id,
)
self.async_signal_device_id_update(device_id)
self._last_update_time = time
def _process_newer_device_activities(self, activities):
updated_device_ids = set()
for activity in activities:
self._latest_activities_by_id_type.setdefault(activity.device_id, {})
lastest_activity = self._latest_activities_by_id_type[
activity.device_id
].get(activity.activity_type)
# Ignore activities that are older than the latest one
if (
lastest_activity
and lastest_activity.activity_start_time >= activity.activity_start_time
):
continue
self._latest_activities_by_id_type[activity.device_id][
activity.activity_type
] = activity
updated_device_ids.add(activity.device_id)
return updated_device_ids
|
from qstrader.execution.execution_handler import (
ExecutionHandler
)
from qstrader.execution.execution_algo.market_order import (
MarketOrderExecutionAlgorithm
)
from qstrader.portcon.pcm import (
PortfolioConstructionModel
)
from qstrader.portcon.optimiser.fixed_weight import (
FixedWeightPortfolioOptimiser
)
from qstrader.portcon.order_sizer.dollar_weighted import (
DollarWeightedCashBufferedOrderSizer
)
from qstrader.portcon.order_sizer.long_short import (
LongShortLeveragedOrderSizer
)
class QuantTradingSystem(object):
"""
Encapsulates all components associated with the quantitative
trading system. This includes the alpha model(s), the risk
model, the transaction cost model along with portfolio construction
and execution mechanism.
Parameters
----------
universe : `Universe`
The Asset Universe.
broker : `Broker`
The Broker to execute orders against.
broker_portfolio_id : `str`
The specific broker portfolio to send orders to.
data_handler : `DataHandler`
The data handler instance used for all market/fundamental data.
alpha_model : `AlphaModel`
The alpha model used within the portfolio construction.
risk_model : `AlphaModel`, optional
An optional risk model used within the portfolio construction.
long_only : `Boolean`, optional
Whether to invoke the long only order sizer or allow
long/short leveraged portfolios. Defaults to long/short leveraged.
submit_orders : `Boolean`, optional
Whether to actually submit generated orders. Defaults to no submission.
"""
def __init__(
self,
universe,
broker,
broker_portfolio_id,
data_handler,
alpha_model,
*args,
risk_model=None,
long_only=False,
submit_orders=False,
**kwargs
):
self.universe = universe
self.broker = broker
self.broker_portfolio_id = broker_portfolio_id
self.data_handler = data_handler
self.alpha_model = alpha_model
self.risk_model = risk_model
self.long_only = long_only
self.submit_orders = submit_orders
self._initialise_models(**kwargs)
def _create_order_sizer(self, **kwargs):
"""
Depending upon whether the quant trading system has been
set to be long only, determine the appropriate order sizing
mechanism.
Returns
-------
`OrderSizer`
The order sizing mechanism for the portfolio construction.
"""
if self.long_only:
if 'cash_buffer_percentage' not in kwargs:
raise ValueError(
'Long only portfolio specified for Quant Trading System '
'but no cash buffer percentage supplied.'
)
cash_buffer_percentage = kwargs['cash_buffer_percentage']
order_sizer = DollarWeightedCashBufferedOrderSizer(
self.broker,
self.broker_portfolio_id,
self.data_handler,
cash_buffer_percentage=cash_buffer_percentage
)
else:
if 'gross_leverage' not in kwargs:
raise ValueError(
'Long/short leveraged portfolio specified for Quant '
'Trading System but no gross leverage percentage supplied.'
)
gross_leverage = kwargs['gross_leverage']
order_sizer = LongShortLeveragedOrderSizer(
self.broker,
self.broker_portfolio_id,
self.data_handler,
gross_leverage=gross_leverage
)
return order_sizer
def _initialise_models(self, **kwargs):
"""
Initialise the various models for the quantitative
trading strategy. This includes the portfolio
construction and the execution.
TODO: Add TransactionCostModel
TODO: Ensure this is dynamically generated from config.
"""
# Determine the appropriate order sizing mechanism
order_sizer = self._create_order_sizer(**kwargs)
# TODO: Allow optimiser to be generated from config
optimiser = FixedWeightPortfolioOptimiser(
data_handler=self.data_handler
)
# Generate the portfolio construction
self.portfolio_construction_model = PortfolioConstructionModel(
self.broker,
self.broker_portfolio_id,
self.universe,
order_sizer,
optimiser,
alpha_model=self.alpha_model,
risk_model=self.risk_model,
data_handler=self.data_handler
)
# Execution
execution_algo = MarketOrderExecutionAlgorithm()
self.execution_handler = ExecutionHandler(
self.broker,
self.broker_portfolio_id,
self.universe,
submit_orders=self.submit_orders,
execution_algo=execution_algo,
data_handler=self.data_handler
)
def __call__(self, dt, stats=None):
"""
Construct the portfolio and (optionally) execute the orders
with the broker.
Parameters
----------
dt : `pd.Timestamp`
The current time.
stats : `dict`, optional
An optional statistics dictionary to append values to
throughout the simulation lifetime.
Returns
-------
`None`
"""
# Construct the target portfolio
rebalance_orders = self.portfolio_construction_model(dt, stats=stats)
# Execute the orders
self.execution_handler(dt, rebalance_orders)
|
import logging
import weakref
from gi.repository import GObject
log = logging.getLogger(__name__)
class GroupAction:
"""A group action combines several actions into one logical action.
"""
def __init__(self, seq):
self.seq = seq
# TODO: If a GroupAction affects more than one sequence, our logic
# breaks. Currently, this isn't a problem.
self.buffer = seq.actions[0].buffer
def undo(self):
actions = []
while self.seq.can_undo():
actions.extend(self.seq.undo())
return actions
def redo(self):
actions = []
while self.seq.can_redo():
actions.extend(self.seq.redo())
return actions
class UndoSequence(GObject.GObject):
"""A manager class for operations which can be undone/redone.
"""
__gsignals__ = {
'can-undo': (
GObject.SignalFlags.RUN_FIRST,
None, (GObject.TYPE_BOOLEAN,)
),
'can-redo': (
GObject.SignalFlags.RUN_FIRST,
None, (GObject.TYPE_BOOLEAN,)
),
'checkpointed': (
GObject.SignalFlags.RUN_FIRST,
None, (GObject.TYPE_OBJECT, GObject.TYPE_BOOLEAN,)
),
}
def __init__(self, buffers):
"""Create an empty UndoSequence
An undo sequence is tied to a collection of GtkTextBuffers, and
expects to maintain undo checkpoints for the same set of
buffers for the lifetime of the UndoSequence.
"""
super().__init__()
self.buffer_refs = [weakref.ref(buf) for buf in buffers]
self.clear()
def clear(self):
"""Remove all undo and redo actions from this sequence
If the sequence was previously able to undo and/or redo, the
'can-undo' and 'can-redo' signals are emitted.
"""
if self.can_undo():
self.emit('can-undo', 0)
if self.can_redo():
self.emit('can-redo', 0)
self.actions = []
self.next_redo = 0
self.checkpoints = {
# Each buffer's checkpoint starts at zero and has no end
ref(): [0, None] for ref in self.buffer_refs
}
self.group = None
self.busy = False
def can_undo(self):
"""Return whether an undo is possible."""
return getattr(self, 'next_redo', 0) > 0
def can_redo(self):
"""Return whether a redo is possible."""
next_redo = getattr(self, 'next_redo', 0)
return next_redo < len(getattr(self, 'actions', []))
def add_action(self, action):
"""Add an action to the undo list.
Arguments:
action -- A class with two callable attributes: 'undo' and 'redo'
which are called by this sequence during an undo or redo.
"""
if self.busy:
return
if self.group is None:
if self.checkpointed(action.buffer):
self.checkpoints[action.buffer][1] = self.next_redo
self.emit('checkpointed', action.buffer, False)
else:
# If we go back in the undo stack before the checkpoint starts,
# and then modify the buffer, we lose the checkpoint altogether
start, end = self.checkpoints.get(action.buffer, (None, None))
if start is not None and start > self.next_redo:
self.checkpoints[action.buffer] = (None, None)
could_undo = self.can_undo()
could_redo = self.can_redo()
self.actions[self.next_redo:] = []
self.actions.append(action)
self.next_redo += 1
if not could_undo:
self.emit('can-undo', 1)
if could_redo:
self.emit('can-redo', 0)
else:
self.group.add_action(action)
def undo(self):
"""Undo an action.
Raises an AssertionError if the sequence is not undoable.
"""
assert self.next_redo > 0
self.busy = True
buf = self.actions[self.next_redo - 1].buffer
if self.checkpointed(buf):
self.emit('checkpointed', buf, False)
could_redo = self.can_redo()
self.next_redo -= 1
actions = self.actions[self.next_redo].undo()
self.busy = False
if not self.can_undo():
self.emit('can-undo', 0)
if not could_redo:
self.emit('can-redo', 1)
if self.checkpointed(buf):
self.emit('checkpointed', buf, True)
return actions
def redo(self):
"""Redo an action.
Raises and AssertionError if the sequence is not undoable.
"""
assert self.next_redo < len(self.actions)
self.busy = True
buf = self.actions[self.next_redo].buffer
if self.checkpointed(buf):
self.emit('checkpointed', buf, False)
could_undo = self.can_undo()
a = self.actions[self.next_redo]
self.next_redo += 1
actions = a.redo()
self.busy = False
if not could_undo:
self.emit('can-undo', 1)
if not self.can_redo():
self.emit('can-redo', 0)
if self.checkpointed(buf):
self.emit('checkpointed', buf, True)
return actions
def checkpoint(self, buf):
start = self.next_redo
while start > 0 and self.actions[start - 1].buffer != buf:
start -= 1
end = self.next_redo
while (end < len(self.actions) - 1 and
self.actions[end + 1].buffer != buf):
end += 1
if end == len(self.actions):
end = None
self.checkpoints[buf] = [start, end]
self.emit('checkpointed', buf, True)
def checkpointed(self, buf):
# While the main undo sequence should always have checkpoints
# recorded, grouped subsequences won't.
start, end = self.checkpoints.get(buf, (None, None))
if start is None:
return False
if end is None:
end = len(self.actions)
return start <= self.next_redo <= end
def begin_group(self):
"""Group several actions into a single logical action.
When you wrap several calls to add_action() inside begin_group()
and end_group(), all the intervening actions are considered
one logical action. For instance a 'replace' action may be
implemented as a pair of 'delete' and 'create' actions, but
undoing should undo both of them.
"""
if self.busy:
return
if self.group:
self.group.begin_group()
else:
buffers = [ref() for ref in self.buffer_refs]
self.group = UndoSequence(buffers)
def end_group(self):
"""End a logical group action
This must always be paired with a begin_group() call. However,
we don't complain if this is not the case because we rely on
external libraries (i.e., GTK+ and GtkSourceView) also pairing
these correctly.
See also begin_group().
"""
if self.busy:
return
if self.group is None:
log.warning('Tried to end a non-existent group')
return
if self.group.group is not None:
self.group.end_group()
else:
group = self.group
self.group = None
# Collapse single action groups
if len(group.actions) == 1:
self.add_action(group.actions[0])
elif len(group.actions) > 1:
self.add_action(GroupAction(group))
def abort_group(self):
"""Clear the currently grouped actions
This discards all actions since the last begin_group() was
called. Note that it does not actually undo the actions
themselves.
"""
if self.busy:
return
if self.group is None:
log.warning('Tried to abort a non-existent group')
return
if self.group.group is not None:
self.group.abort_group()
else:
self.group = None
def in_grouped_action(self):
return self.group is not None
|
import asyncio
from datetime import timedelta
from typing import Callable, List
from haffmpeg.camera import CameraMjpeg
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util import Throttle
from .const import (
CONF_FFMPEG_ARGUMENTS,
DATA_COORDINATOR,
DEFAULT_FFMPEG_ARGUMENTS,
DEFAULT_TIMEOUT,
DOMAIN,
MANUFACTURER,
)
from .coordinator import CanaryDataUpdateCoordinator
MIN_TIME_BETWEEN_SESSION_RENEW = timedelta(seconds=90)
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_FFMPEG_ARGUMENTS, invalidation_version="0.118"),
PLATFORM_SCHEMA.extend(
{
vol.Optional(
CONF_FFMPEG_ARGUMENTS, default=DEFAULT_FFMPEG_ARGUMENTS
): cv.string
}
),
)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Canary sensors based on a config entry."""
coordinator: CanaryDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
ffmpeg_arguments = entry.options.get(
CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS
)
cameras = []
for location_id, location in coordinator.data["locations"].items():
for device in location.devices:
if device.is_online:
cameras.append(
CanaryCamera(
hass,
coordinator,
location_id,
device,
DEFAULT_TIMEOUT,
ffmpeg_arguments,
)
)
async_add_entities(cameras, True)
class CanaryCamera(CoordinatorEntity, Camera):
"""An implementation of a Canary security camera."""
def __init__(self, hass, coordinator, location_id, device, timeout, ffmpeg_args):
"""Initialize a Canary security camera."""
super().__init__(coordinator)
Camera.__init__(self)
self._ffmpeg = hass.data[DATA_FFMPEG]
self._ffmpeg_arguments = ffmpeg_args
self._location_id = location_id
self._device = device
self._device_id = device.device_id
self._device_name = device.name
self._device_type_name = device.device_type["name"]
self._timeout = timeout
self._live_stream_session = None
@property
def location(self):
"""Return information about the location."""
return self.coordinator.data["locations"][self._location_id]
@property
def name(self):
"""Return the name of this device."""
return self._device_name
@property
def unique_id(self):
"""Return the unique ID of this camera."""
return str(self._device_id)
@property
def device_info(self):
"""Return the device_info of the device."""
return {
"identifiers": {(DOMAIN, str(self._device_id))},
"name": self._device_name,
"model": self._device_type_name,
"manufacturer": MANUFACTURER,
}
@property
def is_recording(self):
"""Return true if the device is recording."""
return self.location.is_recording
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return not self.location.is_recording
async def async_camera_image(self):
"""Return a still image response from the camera."""
await self.hass.async_add_executor_job(self.renew_live_stream_session)
ffmpeg = ImageFrame(self._ffmpeg.binary, loop=self.hass.loop)
image = await asyncio.shield(
ffmpeg.get_image(
self._live_stream_session.live_stream_url,
output_format=IMAGE_JPEG,
extra_cmd=self._ffmpeg_arguments,
)
)
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
if self._live_stream_session is None:
return
stream = CameraMjpeg(self._ffmpeg.binary, loop=self.hass.loop)
await stream.open_camera(
self._live_stream_session.live_stream_url, extra_cmd=self._ffmpeg_arguments
)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
self._ffmpeg.ffmpeg_stream_content_type,
)
finally:
await stream.close()
@Throttle(MIN_TIME_BETWEEN_SESSION_RENEW)
def renew_live_stream_session(self):
"""Renew live stream session."""
self._live_stream_session = self.coordinator.canary.get_live_stream_session(
self._device
)
|
import copy
import os
from os import path as op
import shutil
import numpy as np
from numpy import array_equal
from numpy.testing import assert_allclose, assert_array_equal
import pytest
import mne
from mne import (pick_types, read_annotations, create_info,
events_from_annotations, make_forward_solution)
from mne.transforms import apply_trans
from mne.io import read_raw_fif, read_raw_ctf, RawArray
from mne.io.compensator import get_current_comp
from mne.io.ctf.constants import CTF
from mne.io.tests.test_raw import _test_raw_reader
from mne.tests.test_annotations import _assert_annotations_equal
from mne.utils import (run_tests_if_main, _clean_names, catch_logging,
_stamp_to_dt)
from mne.datasets import testing, spm_face, brainstorm
from mne.io.constants import FIFF
ctf_dir = op.join(testing.data_path(download=False), 'CTF')
ctf_fname_continuous = 'testdata_ctf.ds'
ctf_fname_1_trial = 'testdata_ctf_short.ds'
ctf_fname_2_trials = 'testdata_ctf_pseudocontinuous.ds'
ctf_fname_discont = 'testdata_ctf_short_discontinuous.ds'
ctf_fname_somato = 'somMDYO-18av.ds'
ctf_fname_catch = 'catch-alp-good-f.ds'
somato_fname = op.join(
brainstorm.bst_raw.data_path(download=False), 'MEG', 'bst_raw',
'subj001_somatosensory_20111109_01_AUX-f.ds'
)
block_sizes = {
ctf_fname_continuous: 12000,
ctf_fname_1_trial: 4801,
ctf_fname_2_trials: 12000,
ctf_fname_discont: 1201,
ctf_fname_somato: 313,
ctf_fname_catch: 2500,
}
single_trials = (
ctf_fname_continuous,
ctf_fname_1_trial,
)
ctf_fnames = tuple(sorted(block_sizes.keys()))
@pytest.mark.slowtest
@testing.requires_testing_data
def test_read_ctf(tmpdir):
"""Test CTF reader."""
temp_dir = str(tmpdir)
out_fname = op.join(temp_dir, 'test_py_raw.fif')
# Create a dummy .eeg file so we can test our reading/application of it
os.mkdir(op.join(temp_dir, 'randpos'))
ctf_eeg_fname = op.join(temp_dir, 'randpos', ctf_fname_catch)
shutil.copytree(op.join(ctf_dir, ctf_fname_catch), ctf_eeg_fname)
with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'):
raw = _test_raw_reader(read_raw_ctf, directory=ctf_eeg_fname)
picks = pick_types(raw.info, meg=False, eeg=True)
pos = np.random.RandomState(42).randn(len(picks), 3)
fake_eeg_fname = op.join(ctf_eeg_fname, 'catch-alp-good-f.eeg')
# Create a bad file
with open(fake_eeg_fname, 'wb') as fid:
fid.write('foo\n'.encode('ascii'))
pytest.raises(RuntimeError, read_raw_ctf, ctf_eeg_fname)
# Create a good file
with open(fake_eeg_fname, 'wb') as fid:
for ii, ch_num in enumerate(picks):
args = (str(ch_num + 1), raw.ch_names[ch_num],) + tuple(
'%0.5f' % x for x in 100 * pos[ii]) # convert to cm
fid.write(('\t'.join(args) + '\n').encode('ascii'))
pos_read_old = np.array([raw.info['chs'][p]['loc'][:3] for p in picks])
with pytest.warns(RuntimeWarning, match='RMSP .* changed to a MISC ch'):
raw = read_raw_ctf(ctf_eeg_fname) # read modified data
pos_read = np.array([raw.info['chs'][p]['loc'][:3] for p in picks])
assert_allclose(apply_trans(raw.info['ctf_head_t'], pos), pos_read,
rtol=1e-5, atol=1e-5)
assert (pos_read == pos_read_old).mean() < 0.1
shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_randpos_raw.fif'),
op.join(temp_dir, 'randpos', 'catch-alp-good-f.ds_raw.fif'))
# Create a version with no hc, starting out *with* EEG pos (error)
os.mkdir(op.join(temp_dir, 'nohc'))
ctf_no_hc_fname = op.join(temp_dir, 'no_hc', ctf_fname_catch)
shutil.copytree(ctf_eeg_fname, ctf_no_hc_fname)
remove_base = op.join(ctf_no_hc_fname, op.basename(ctf_fname_catch[:-3]))
os.remove(remove_base + '.hc')
with pytest.warns(RuntimeWarning, match='MISC channel'):
pytest.raises(RuntimeError, read_raw_ctf, ctf_no_hc_fname)
os.remove(remove_base + '.eeg')
shutil.copy(op.join(ctf_dir, 'catch-alp-good-f.ds_nohc_raw.fif'),
op.join(temp_dir, 'no_hc', 'catch-alp-good-f.ds_raw.fif'))
# All our files
use_fnames = [op.join(ctf_dir, c) for c in ctf_fnames]
for fname in use_fnames:
raw_c = read_raw_fif(fname + '_raw.fif', preload=True)
with pytest.warns(None): # sometimes matches "MISC channel"
raw = read_raw_ctf(fname)
# check info match
assert_array_equal(raw.ch_names, raw_c.ch_names)
assert_allclose(raw.times, raw_c.times)
assert_allclose(raw._cals, raw_c._cals)
assert (raw.info['meas_id']['version'] ==
raw_c.info['meas_id']['version'] + 1)
for t in ('dev_head_t', 'dev_ctf_t', 'ctf_head_t'):
assert_allclose(raw.info[t]['trans'], raw_c.info[t]['trans'],
rtol=1e-4, atol=1e-7)
# XXX 2019/11/29 : MNC-C FIF conversion files don't have meas_date set.
# Consider adding meas_date to below checks once this is addressed in
# MNE-C
for key in ('acq_pars', 'acq_stim', 'bads',
'ch_names', 'custom_ref_applied', 'description',
'events', 'experimenter', 'highpass', 'line_freq',
'lowpass', 'nchan', 'proj_id', 'proj_name',
'projs', 'sfreq', 'subject_info'):
assert raw.info[key] == raw_c.info[key], key
if op.basename(fname) not in single_trials:
# We don't force buffer size to be smaller like MNE-C
assert raw.buffer_size_sec == raw_c.buffer_size_sec
assert len(raw.info['comps']) == len(raw_c.info['comps'])
for c1, c2 in zip(raw.info['comps'], raw_c.info['comps']):
for key in ('colcals', 'rowcals'):
assert_allclose(c1[key], c2[key])
assert c1['save_calibrated'] == c2['save_calibrated']
for key in ('row_names', 'col_names', 'nrow', 'ncol'):
assert_array_equal(c1['data'][key], c2['data'][key])
assert_allclose(c1['data']['data'], c2['data']['data'], atol=1e-7,
rtol=1e-5)
assert_allclose(raw.info['hpi_results'][0]['coord_trans']['trans'],
raw_c.info['hpi_results'][0]['coord_trans']['trans'],
rtol=1e-5, atol=1e-7)
assert len(raw.info['chs']) == len(raw_c.info['chs'])
for ii, (c1, c2) in enumerate(zip(raw.info['chs'], raw_c.info['chs'])):
for key in ('kind', 'scanno', 'unit', 'ch_name', 'unit_mul',
'range', 'coord_frame', 'coil_type', 'logno'):
if c1['ch_name'] == 'RMSP' and \
'catch-alp-good-f' in fname and \
key in ('kind', 'unit', 'coord_frame', 'coil_type',
'logno'):
continue # XXX see below...
if key == 'coil_type' and c1[key] == FIFF.FIFFV_COIL_EEG:
# XXX MNE-C bug that this is not set
assert c2[key] == FIFF.FIFFV_COIL_NONE
continue
assert c1[key] == c2[key], key
for key in ('cal',):
assert_allclose(c1[key], c2[key], atol=1e-6, rtol=1e-4,
err_msg='raw.info["chs"][%d][%s]' % (ii, key))
# XXX 2016/02/24: fixed bug with normal computation that used
# to exist, once mne-C tools are updated we should update our FIF
# conversion files, then the slices can go away (and the check
# can be combined with that for "cal")
for key in ('loc',):
if c1['ch_name'] == 'RMSP' and 'catch-alp-good-f' in fname:
continue
if (c2[key][:3] == 0.).all():
check = [np.nan] * 3
else:
check = c2[key][:3]
assert_allclose(c1[key][:3], check, atol=1e-6, rtol=1e-4,
err_msg='raw.info["chs"][%d][%s]' % (ii, key))
if (c2[key][3:] == 0.).all():
check = [np.nan] * 3
else:
check = c2[key][9:12]
assert_allclose(c1[key][9:12], check, atol=1e-6, rtol=1e-4,
err_msg='raw.info["chs"][%d][%s]' % (ii, key))
# Make sure all digitization points are in the MNE head coord frame
for p in raw.info['dig']:
assert p['coord_frame'] == FIFF.FIFFV_COORD_HEAD, \
'dig points must be in FIFF.FIFFV_COORD_HEAD'
if fname.endswith('catch-alp-good-f.ds'): # omit points from .pos file
raw.info['dig'] = raw.info['dig'][:-10]
# XXX: Next test would fail because c-tools assign the fiducials from
# CTF data as HPI. Should eventually clarify/unify with Matti.
# assert_dig_allclose(raw.info, raw_c.info)
# check data match
raw_c.save(out_fname, overwrite=True, buffer_size_sec=1.)
raw_read = read_raw_fif(out_fname)
# so let's check tricky cases based on sample boundaries
rng = np.random.RandomState(0)
pick_ch = rng.permutation(np.arange(len(raw.ch_names)))[:10]
bnd = int(round(raw.info['sfreq'] * raw.buffer_size_sec))
assert bnd == raw._raw_extras[0]['block_size']
assert bnd == block_sizes[op.basename(fname)]
slices = (slice(0, bnd), slice(bnd - 1, bnd), slice(3, bnd),
slice(3, 300), slice(None))
if len(raw.times) >= 2 * bnd: # at least two complete blocks
slices = slices + (slice(bnd, 2 * bnd), slice(bnd, bnd + 1),
slice(0, bnd + 100))
for sl_time in slices:
assert_allclose(raw[pick_ch, sl_time][0],
raw_c[pick_ch, sl_time][0])
assert_allclose(raw_read[pick_ch, sl_time][0],
raw_c[pick_ch, sl_time][0])
# all data / preload
raw.load_data()
assert_allclose(raw[:][0], raw_c[:][0], atol=1e-15)
# test bad segment annotations
if 'testdata_ctf_short.ds' in fname:
assert 'bad' in raw.annotations.description[0]
assert_allclose(raw.annotations.onset, [2.15])
assert_allclose(raw.annotations.duration, [0.0225])
pytest.raises(TypeError, read_raw_ctf, 1)
pytest.raises(ValueError, read_raw_ctf, ctf_fname_continuous + 'foo.ds')
# test ignoring of system clock
read_raw_ctf(op.join(ctf_dir, ctf_fname_continuous), 'ignore')
pytest.raises(ValueError, read_raw_ctf,
op.join(ctf_dir, ctf_fname_continuous), 'foo')
@testing.requires_testing_data
def test_rawctf_clean_names():
"""Test RawCTF _clean_names method."""
# read test data
with pytest.warns(RuntimeWarning, match='ref channel RMSP did not'):
raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch))
raw_cleaned = read_raw_ctf(op.join(ctf_dir, ctf_fname_catch),
clean_names=True)
test_channel_names = _clean_names(raw.ch_names)
test_info_comps = copy.deepcopy(raw.info['comps'])
# channel names should not be cleaned by default
assert raw.ch_names != test_channel_names
chs_ch_names = [ch['ch_name'] for ch in raw.info['chs']]
assert chs_ch_names != test_channel_names
for test_comp, comp in zip(test_info_comps, raw.info['comps']):
for key in ('row_names', 'col_names'):
assert not array_equal(_clean_names(test_comp['data'][key]),
comp['data'][key])
# channel names should be cleaned if clean_names=True
assert raw_cleaned.ch_names == test_channel_names
for ch, test_ch_name in zip(raw_cleaned.info['chs'], test_channel_names):
assert ch['ch_name'] == test_ch_name
for test_comp, comp in zip(test_info_comps, raw_cleaned.info['comps']):
for key in ('row_names', 'col_names'):
assert _clean_names(test_comp['data'][key]) == comp['data'][key]
@spm_face.requires_spm_data
def test_read_spm_ctf():
"""Test CTF reader with omitted samples."""
data_path = spm_face.data_path()
raw_fname = op.join(data_path, 'MEG', 'spm',
'SPM_CTF_MEG_example_faces1_3D.ds')
raw = read_raw_ctf(raw_fname)
extras = raw._raw_extras[0]
assert extras['n_samp'] == raw.n_times
assert extras['n_samp'] != extras['n_samp_tot']
# Test that LPA, nasion and RPA are correct.
coord_frames = np.array([d['coord_frame'] for d in raw.info['dig']])
assert np.all(coord_frames == FIFF.FIFFV_COORD_HEAD)
cardinals = {d['ident']: d['r'] for d in raw.info['dig']}
assert cardinals[1][0] < cardinals[2][0] < cardinals[3][0] # x coord
assert cardinals[1][1] < cardinals[2][1] # y coord
assert cardinals[3][1] < cardinals[2][1] # y coord
for key in cardinals.keys():
assert_allclose(cardinals[key][2], 0, atol=1e-6) # z coord
@testing.requires_testing_data
@pytest.mark.parametrize('comp_grade', [0, 1])
def test_saving_picked(tmpdir, comp_grade):
"""Test saving picked CTF instances."""
temp_dir = str(tmpdir)
out_fname = op.join(temp_dir, 'test_py_raw.fif')
raw = read_raw_ctf(op.join(ctf_dir, ctf_fname_1_trial))
assert raw.info['meas_date'] == _stamp_to_dt((1367228160, 0))
raw.crop(0, 1).load_data()
assert raw.compensation_grade == get_current_comp(raw.info) == 0
assert len(raw.info['comps']) == 5
pick_kwargs = dict(meg=True, ref_meg=False, verbose=True)
raw.apply_gradient_compensation(comp_grade)
with catch_logging() as log:
raw_pick = raw.copy().pick_types(**pick_kwargs)
assert len(raw.info['comps']) == 5
assert len(raw_pick.info['comps']) == 0
log = log.getvalue()
assert 'Removing 5 compensators' in log
raw_pick.save(out_fname, overwrite=True) # should work
raw2 = read_raw_fif(out_fname)
assert (raw_pick.ch_names == raw2.ch_names)
assert_array_equal(raw_pick.times, raw2.times)
assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6,
atol=1e-20) # atol is very small but > 0
raw2 = read_raw_fif(out_fname, preload=True)
assert (raw_pick.ch_names == raw2.ch_names)
assert_array_equal(raw_pick.times, raw2.times)
assert_allclose(raw2[0:20][0], raw_pick[0:20][0], rtol=1e-6,
atol=1e-20) # atol is very small but > 0
@brainstorm.bst_raw.requires_bstraw_data
def test_read_ctf_annotations():
"""Test reading CTF marker file."""
EXPECTED_LATENCIES = np.array([
5640, 7950, 9990, 12253, 14171, 16557, 18896, 20846, # noqa
22702, 24990, 26830, 28974, 30906, 33077, 34985, 36907, # noqa
38922, 40760, 42881, 45222, 47457, 49618, 51802, 54227, # noqa
56171, 58274, 60394, 62375, 64444, 66767, 68827, 71109, # noqa
73499, 75807, 78146, 80415, 82554, 84508, 86403, 88426, # noqa
90746, 92893, 94779, 96822, 98996, 99001, 100949, 103325, # noqa
105322, 107678, 109667, 111844, 113682, 115817, 117691, 119663, # noqa
121966, 123831, 126110, 128490, 130521, 132808, 135204, 137210, # noqa
139130, 141390, 143660, 145748, 147889, 150205, 152528, 154646, # noqa
156897, 159191, 161446, 163722, 166077, 168467, 170624, 172519, # noqa
174719, 176886, 179062, 181405, 183709, 186034, 188454, 190330, # noqa
192660, 194682, 196834, 199161, 201035, 203008, 204999, 207409, # noqa
209661, 211895, 213957, 216005, 218040, 220178, 222137, 224305, # noqa
226297, 228654, 230755, 232909, 235205, 237373, 239723, 241762, # noqa
243748, 245762, 247801, 250055, 251886, 254252, 256441, 258354, # noqa
260680, 263026, 265048, 267073, 269235, 271556, 273927, 276197, # noqa
278436, 280536, 282691, 284933, 287061, 288936, 290941, 293183, # noqa
295369, 297729, 299626, 301546, 303449, 305548, 307882, 310124, # noqa
312374, 314509, 316815, 318789, 320981, 322879, 324878, 326959, # noqa
329341, 331200, 331201, 333469, 335584, 337984, 340143, 342034, # noqa
344360, 346309, 348544, 350970, 353052, 355227, 357449, 359603, # noqa
361725, 363676, 365735, 367799, 369777, 371904, 373856, 376204, # noqa
378391, 380800, 382859, 385161, 387093, 389434, 391624, 393785, # noqa
396093, 398214, 400198, 402166, 404104, 406047, 408372, 410686, # noqa
413029, 414975, 416850, 418797, 420824, 422959, 425026, 427215, # noqa
429278, 431668 # noqa
]) - 1 # Fieldtrip has 1 sample difference with MNE
raw = RawArray(
data=np.empty((1, 432000), dtype=np.float64),
info=create_info(ch_names=1, sfreq=1200.0))
raw.set_meas_date(read_raw_ctf(somato_fname).info['meas_date'])
raw.set_annotations(read_annotations(somato_fname))
events, _ = events_from_annotations(raw)
latencies = np.sort(events[:, 0])
assert_allclose(latencies, EXPECTED_LATENCIES, atol=1e-6)
@testing.requires_testing_data
def test_read_ctf_annotations_smoke_test():
"""Test reading CTF marker file.
`testdata_ctf_mc.ds` has no trials or offsets therefore its a plain reading
of whatever is in the MarkerFile.mrk.
"""
EXPECTED_ONSET = [
0., 0.1425, 0.285, 0.42833333, 0.57083333, 0.71416667, 0.85666667,
0.99916667, 1.1425, 1.285, 1.4275, 1.57083333, 1.71333333, 1.85666667,
1.99916667, 2.14166667, 2.285, 2.4275, 2.57083333, 2.71333333,
2.85583333, 2.99916667, 3.14166667, 3.28416667, 3.4275, 3.57,
3.71333333, 3.85583333, 3.99833333, 4.14166667, 4.28416667, 4.42666667,
4.57, 4.7125, 4.85583333, 4.99833333
]
fname = op.join(ctf_dir, 'testdata_ctf_mc.ds')
annot = read_annotations(fname)
assert_allclose(annot.onset, EXPECTED_ONSET)
raw = read_raw_ctf(fname)
_assert_annotations_equal(raw.annotations, annot, 1e-6)
def _read_res4_mag_comp(dsdir):
res = mne.io.ctf.res4._read_res4(dsdir)
for ch in res['chs']:
if ch['sensor_type_index'] == CTF.CTFV_REF_MAG_CH:
ch['grad_order_no'] = 1
return res
def _bad_res4_grad_comp(dsdir):
res = mne.io.ctf.res4._read_res4(dsdir)
for ch in res['chs']:
if ch['sensor_type_index'] == CTF.CTFV_MEG_CH:
ch['grad_order_no'] = 1
break
return res
@testing.requires_testing_data
def test_read_ctf_mag_bad_comp(tmpdir, monkeypatch):
"""Test CTF reader with mag comps and bad comps."""
path = op.join(ctf_dir, ctf_fname_continuous)
raw_orig = read_raw_ctf(path)
assert raw_orig.compensation_grade == 0
monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _read_res4_mag_comp)
raw_mag_comp = read_raw_ctf(path)
assert raw_mag_comp.compensation_grade == 0
sphere = mne.make_sphere_model()
src = mne.setup_volume_source_space(pos=50., exclude=5., bem=sphere)
assert src[0]['nuse'] == 26
for grade in (0, 1):
raw_orig.apply_gradient_compensation(grade)
raw_mag_comp.apply_gradient_compensation(grade)
args = (None, src, sphere, True, False)
fwd_orig = make_forward_solution(raw_orig.info, *args)
fwd_mag_comp = make_forward_solution(raw_mag_comp.info, *args)
assert_allclose(fwd_orig['sol']['data'], fwd_mag_comp['sol']['data'])
monkeypatch.setattr(mne.io.ctf.ctf, '_read_res4', _bad_res4_grad_comp)
with pytest.raises(RuntimeError, match='inconsistent compensation grade'):
read_raw_ctf(path)
run_tests_if_main()
|
from copy import deepcopy
import pytest
import homeassistant.components.alert as alert
from homeassistant.components.alert import DOMAIN
import homeassistant.components.notify as notify
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_ENTITY_ID,
CONF_NAME,
CONF_STATE,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_IDLE,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
NAME = "alert_test"
DONE_MESSAGE = "alert_gone"
NOTIFIER = "test"
TEMPLATE = "{{ states.sensor.test.entity_id }}"
TEST_ENTITY = "sensor.test"
TITLE = "{{ states.sensor.test.entity_id }}"
TEST_TITLE = "sensor.test"
TEST_DATA = {"data": {"inline_keyboard": ["Close garage:/close_garage"]}}
TEST_CONFIG = {
alert.DOMAIN: {
NAME: {
CONF_NAME: NAME,
alert.CONF_DONE_MESSAGE: DONE_MESSAGE,
CONF_ENTITY_ID: TEST_ENTITY,
CONF_STATE: STATE_ON,
alert.CONF_REPEAT: 30,
alert.CONF_SKIP_FIRST: False,
alert.CONF_NOTIFIERS: [NOTIFIER],
alert.CONF_TITLE: TITLE,
alert.CONF_DATA: {},
}
}
}
TEST_NOACK = [
NAME,
NAME,
"sensor.test",
STATE_ON,
[30],
False,
None,
None,
NOTIFIER,
False,
None,
None,
]
ENTITY_ID = f"{alert.DOMAIN}.{NAME}"
@callback
def async_turn_on(hass, entity_id):
"""Async reset the alert.
This is a legacy helper method. Do not use it for new tests.
"""
data = {ATTR_ENTITY_ID: entity_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data))
@callback
def async_turn_off(hass, entity_id):
"""Async acknowledge the alert.
This is a legacy helper method. Do not use it for new tests.
"""
data = {ATTR_ENTITY_ID: entity_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data))
@callback
def async_toggle(hass, entity_id):
"""Async toggle acknowledgment of alert.
This is a legacy helper method. Do not use it for new tests.
"""
data = {ATTR_ENTITY_ID: entity_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_TOGGLE, data))
@pytest.fixture
def mock_notifier(hass):
"""Mock for notifier."""
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
hass.services.async_register(notify.DOMAIN, NOTIFIER, record_event)
return events
async def test_is_on(hass):
"""Test is_on method."""
hass.states.async_set(ENTITY_ID, STATE_ON)
await hass.async_block_till_done()
assert alert.is_on(hass, ENTITY_ID)
hass.states.async_set(ENTITY_ID, STATE_OFF)
await hass.async_block_till_done()
assert not alert.is_on(hass, ENTITY_ID)
async def test_setup(hass):
"""Test setup method."""
assert await async_setup_component(hass, alert.DOMAIN, TEST_CONFIG)
assert STATE_IDLE == hass.states.get(ENTITY_ID).state
async def test_fire(hass, mock_notifier):
"""Test the alert firing."""
assert await async_setup_component(hass, alert.DOMAIN, TEST_CONFIG)
hass.states.async_set("sensor.test", STATE_ON)
await hass.async_block_till_done()
assert STATE_ON == hass.states.get(ENTITY_ID).state
async def test_silence(hass, mock_notifier):
"""Test silencing the alert."""
assert await async_setup_component(hass, alert.DOMAIN, TEST_CONFIG)
hass.states.async_set("sensor.test", STATE_ON)
await hass.async_block_till_done()
async_turn_off(hass, ENTITY_ID)
await hass.async_block_till_done()
assert STATE_OFF == hass.states.get(ENTITY_ID).state
# alert should not be silenced on next fire
hass.states.async_set("sensor.test", STATE_OFF)
await hass.async_block_till_done()
assert STATE_IDLE == hass.states.get(ENTITY_ID).state
hass.states.async_set("sensor.test", STATE_ON)
await hass.async_block_till_done()
assert STATE_ON == hass.states.get(ENTITY_ID).state
async def test_reset(hass, mock_notifier):
"""Test resetting the alert."""
assert await async_setup_component(hass, alert.DOMAIN, TEST_CONFIG)
hass.states.async_set("sensor.test", STATE_ON)
await hass.async_block_till_done()
async_turn_off(hass, ENTITY_ID)
await hass.async_block_till_done()
assert STATE_OFF == hass.states.get(ENTITY_ID).state
async_turn_on(hass, ENTITY_ID)
await hass.async_block_till_done()
assert STATE_ON == hass.states.get(ENTITY_ID).state
async def test_toggle(hass, mock_notifier):
"""Test toggling alert."""
assert await async_setup_component(hass, alert.DOMAIN, TEST_CONFIG)
hass.states.async_set("sensor.test", STATE_ON)
await hass.async_block_till_done()
assert STATE_ON == hass.states.get(ENTITY_ID).state
async_toggle(hass, ENTITY_ID)
await hass.async_block_till_done()
assert STATE_OFF == hass.states.get(ENTITY_ID).state
async_toggle(hass, ENTITY_ID)
await hass.async_block_till_done()
assert STATE_ON == hass.states.get(ENTITY_ID).state
async def test_notification_no_done_message(hass):
"""Test notifications."""
events = []
config = deepcopy(TEST_CONFIG)
del config[alert.DOMAIN][NAME][alert.CONF_DONE_MESSAGE]
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
hass.services.async_register(notify.DOMAIN, NOTIFIER, record_event)
assert await async_setup_component(hass, alert.DOMAIN, config)
assert len(events) == 0
hass.states.async_set("sensor.test", STATE_ON)
await hass.async_block_till_done()
assert len(events) == 1
hass.states.async_set("sensor.test", STATE_OFF)
await hass.async_block_till_done()
assert len(events) == 1
async def test_notification(hass):
"""Test notifications."""
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
hass.services.async_register(notify.DOMAIN, NOTIFIER, record_event)
assert await async_setup_component(hass, alert.DOMAIN, TEST_CONFIG)
assert len(events) == 0
hass.states.async_set("sensor.test", STATE_ON)
await hass.async_block_till_done()
assert len(events) == 1
hass.states.async_set("sensor.test", STATE_OFF)
await hass.async_block_till_done()
assert len(events) == 2
async def test_sending_non_templated_notification(hass, mock_notifier):
"""Test notifications."""
assert await async_setup_component(hass, alert.DOMAIN, TEST_CONFIG)
hass.states.async_set(TEST_ENTITY, STATE_ON)
await hass.async_block_till_done()
assert len(mock_notifier) == 1
last_event = mock_notifier[-1]
assert last_event.data[notify.ATTR_MESSAGE] == NAME
async def test_sending_templated_notification(hass, mock_notifier):
"""Test templated notification."""
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_ALERT_MESSAGE] = TEMPLATE
assert await async_setup_component(hass, alert.DOMAIN, config)
hass.states.async_set(TEST_ENTITY, STATE_ON)
await hass.async_block_till_done()
assert len(mock_notifier) == 1
last_event = mock_notifier[-1]
assert last_event.data[notify.ATTR_MESSAGE] == TEST_ENTITY
async def test_sending_templated_done_notification(hass, mock_notifier):
"""Test templated notification."""
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_DONE_MESSAGE] = TEMPLATE
assert await async_setup_component(hass, alert.DOMAIN, config)
hass.states.async_set(TEST_ENTITY, STATE_ON)
await hass.async_block_till_done()
hass.states.async_set(TEST_ENTITY, STATE_OFF)
await hass.async_block_till_done()
assert len(mock_notifier) == 2
last_event = mock_notifier[-1]
assert last_event.data[notify.ATTR_MESSAGE] == TEST_ENTITY
async def test_sending_titled_notification(hass, mock_notifier):
"""Test notifications."""
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_TITLE] = TITLE
assert await async_setup_component(hass, alert.DOMAIN, config)
hass.states.async_set(TEST_ENTITY, STATE_ON)
await hass.async_block_till_done()
assert len(mock_notifier) == 1
last_event = mock_notifier[-1]
assert last_event.data[notify.ATTR_TITLE] == TEST_TITLE
async def test_sending_data_notification(hass, mock_notifier):
"""Test notifications."""
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_DATA] = TEST_DATA
assert await async_setup_component(hass, alert.DOMAIN, config)
hass.states.async_set(TEST_ENTITY, STATE_ON)
await hass.async_block_till_done()
assert len(mock_notifier) == 1
last_event = mock_notifier[-1]
assert last_event.data[notify.ATTR_DATA] == TEST_DATA
async def test_skipfirst(hass):
"""Test skipping first notification."""
config = deepcopy(TEST_CONFIG)
config[alert.DOMAIN][NAME][alert.CONF_SKIP_FIRST] = True
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
hass.services.async_register(notify.DOMAIN, NOTIFIER, record_event)
assert await async_setup_component(hass, alert.DOMAIN, config)
assert len(events) == 0
hass.states.async_set("sensor.test", STATE_ON)
await hass.async_block_till_done()
assert len(events) == 0
async def test_noack(hass):
"""Test no ack feature."""
entity = alert.Alert(hass, *TEST_NOACK)
hass.async_add_job(entity.begin_alerting)
await hass.async_block_till_done()
async def test_done_message_state_tracker_reset_on_cancel(hass):
"""Test that the done message is reset when canceled."""
entity = alert.Alert(hass, *TEST_NOACK)
entity._cancel = lambda *args: None
assert entity._send_done_message is False
entity._send_done_message = True
hass.async_add_job(entity.end_alerting)
await hass.async_block_till_done()
assert entity._send_done_message is False
|
import eventlet
from kombu import Connection
eventlet.monkey_patch()
def wait_many(timeout=1):
#: Create connection
#: If hostname, userid, password and virtual_host is not specified
#: the values below are the default, but listed here so it can
#: be easily changed.
with Connection('amqp://guest:guest@localhost:5672//') as connection:
#: SimpleQueue mimics the interface of the Python Queue module.
#: First argument can either be a queue name or a kombu.Queue object.
#: If a name, then the queue will be declared with the name as the
#: queue name, exchange name and routing key.
with connection.SimpleQueue('kombu_demo') as queue:
while True:
try:
message = queue.get(block=False, timeout=timeout)
except queue.Empty:
break
else:
message.ack()
print(message.payload)
eventlet.spawn(wait_many).wait()
|
import sys
import mne
from mne.io import read_raw_bti
def run():
"""Run command."""
from mne.commands.utils import get_optparser
parser = get_optparser(__file__)
parser.add_option('-p', '--pdf', dest='pdf_fname',
help='Input data file name', metavar='FILE')
parser.add_option('-c', '--config', dest='config_fname',
help='Input config file name', metavar='FILE',
default='config')
parser.add_option('--head_shape', dest='head_shape_fname',
help='Headshape file name', metavar='FILE',
default='hs_file')
parser.add_option('-o', '--out_fname', dest='out_fname',
help='Name of the resulting fiff file',
default='as_data_fname')
parser.add_option('-r', '--rotation_x', dest='rotation_x', type='float',
help='Compensatory rotation about Neuromag x axis, deg',
default=2.0)
parser.add_option('-T', '--translation', dest='translation', type='str',
help='Default translation, meter',
default=(0.00, 0.02, 0.11))
parser.add_option('--ecg_ch', dest='ecg_ch', type='str',
help='4D ECG channel name',
default='E31')
parser.add_option('--eog_ch', dest='eog_ch', type='str',
help='4D EOG channel names',
default='E63,E64')
options, args = parser.parse_args()
pdf_fname = options.pdf_fname
if pdf_fname is None:
parser.print_help()
sys.exit(1)
config_fname = options.config_fname
head_shape_fname = options.head_shape_fname
out_fname = options.out_fname
rotation_x = options.rotation_x
translation = options.translation
ecg_ch = options.ecg_ch
eog_ch = options.ecg_ch.split(',')
if out_fname == 'as_data_fname':
out_fname = pdf_fname + '_raw.fif'
raw = read_raw_bti(pdf_fname=pdf_fname, config_fname=config_fname,
head_shape_fname=head_shape_fname,
rotation_x=rotation_x, translation=translation,
ecg_ch=ecg_ch, eog_ch=eog_ch)
raw.save(out_fname)
raw.close()
mne.utils.run_command_if_main()
|
import json
from typing import Dict
from .model import Config, Integration
BASE = """
\"\"\"Automatically generated by hassfest.
To update, run python3 -m script.hassfest
\"\"\"
# fmt: off
FLOWS = {}
""".strip()
UNIQUE_ID_IGNORE = {"huawei_lte", "mqtt", "adguard"}
def validate_integration(config: Config, integration: Integration):
"""Validate config flow of an integration."""
config_flow_file = integration.path / "config_flow.py"
if not config_flow_file.is_file():
if integration.manifest.get("config_flow"):
integration.add_error(
"config_flow",
"Config flows need to be defined in the file config_flow.py",
)
if integration.manifest.get("homekit"):
integration.add_error(
"config_flow",
"HomeKit information in a manifest requires a config flow to exist",
)
if integration.manifest.get("mqtt"):
integration.add_error(
"config_flow",
"MQTT information in a manifest requires a config flow to exist",
)
if integration.manifest.get("ssdp"):
integration.add_error(
"config_flow",
"SSDP information in a manifest requires a config flow to exist",
)
if integration.manifest.get("zeroconf"):
integration.add_error(
"config_flow",
"Zeroconf information in a manifest requires a config flow to exist",
)
return
config_flow = config_flow_file.read_text()
needs_unique_id = integration.domain not in UNIQUE_ID_IGNORE and (
"async_step_discovery" in config_flow
or "async_step_hassio" in config_flow
or "async_step_homekit" in config_flow
or "async_step_mqtt" in config_flow
or "async_step_ssdp" in config_flow
or "async_step_zeroconf" in config_flow
)
if not needs_unique_id:
return
has_unique_id = (
"self.async_set_unique_id" in config_flow
or "self._async_handle_discovery_without_unique_id" in config_flow
or "register_discovery_flow" in config_flow
or "AbstractOAuth2FlowHandler" in config_flow
)
if has_unique_id:
return
if config.specific_integrations:
notice_method = integration.add_warning
else:
notice_method = integration.add_error
notice_method(
"config_flow", "Config flows that are discoverable need to set a unique ID"
)
def generate_and_validate(integrations: Dict[str, Integration], config: Config):
"""Validate and generate config flow data."""
domains = []
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
continue
if not (
integration.manifest.get("config_flow")
or integration.manifest.get("homekit")
or integration.manifest.get("mqtt")
or integration.manifest.get("ssdp")
or integration.manifest.get("zeroconf")
):
continue
validate_integration(config, integration)
domains.append(domain)
return BASE.format(json.dumps(domains, indent=4))
def validate(integrations: Dict[str, Integration], config: Config):
"""Validate config flow file."""
config_flow_path = config.root / "homeassistant/generated/config_flows.py"
config.cache["config_flow"] = content = generate_and_validate(integrations, config)
if config.specific_integrations:
return
with open(str(config_flow_path)) as fp:
if fp.read().strip() != content:
config.add_error(
"config_flow",
"File config_flows.py is not up to date. "
"Run python3 -m script.hassfest",
fixable=True,
)
return
def generate(integrations: Dict[str, Integration], config: Config):
"""Generate config flow file."""
config_flow_path = config.root / "homeassistant/generated/config_flows.py"
with open(str(config_flow_path), "w") as fp:
fp.write(f"{config.cache['config_flow']}\n")
|
from logging import getLogger
from homeassistant.const import (
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_STATE,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_SHOW_ON_MAP,
CONF_STATE,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.core import callback
from . import AirVisualEntity
from .const import (
CONF_CITY,
CONF_COUNTRY,
CONF_INTEGRATION_TYPE,
DATA_COORDINATOR,
DOMAIN,
INTEGRATION_TYPE_GEOGRAPHY,
)
_LOGGER = getLogger(__name__)
ATTR_CITY = "city"
ATTR_COUNTRY = "country"
ATTR_POLLUTANT_SYMBOL = "pollutant_symbol"
ATTR_POLLUTANT_UNIT = "pollutant_unit"
ATTR_REGION = "region"
SENSOR_KIND_LEVEL = "air_pollution_level"
SENSOR_KIND_AQI = "air_quality_index"
SENSOR_KIND_POLLUTANT = "main_pollutant"
SENSOR_KIND_BATTERY_LEVEL = "battery_level"
SENSOR_KIND_HUMIDITY = "humidity"
SENSOR_KIND_TEMPERATURE = "temperature"
GEOGRAPHY_SENSORS = [
(SENSOR_KIND_LEVEL, "Air Pollution Level", "mdi:gauge", None),
(SENSOR_KIND_AQI, "Air Quality Index", "mdi:chart-line", "AQI"),
(SENSOR_KIND_POLLUTANT, "Main Pollutant", "mdi:chemical-weapon", None),
]
GEOGRAPHY_SENSOR_LOCALES = {"cn": "Chinese", "us": "U.S."}
NODE_PRO_SENSORS = [
(SENSOR_KIND_BATTERY_LEVEL, "Battery", DEVICE_CLASS_BATTERY, PERCENTAGE),
(SENSOR_KIND_HUMIDITY, "Humidity", DEVICE_CLASS_HUMIDITY, PERCENTAGE),
(SENSOR_KIND_TEMPERATURE, "Temperature", DEVICE_CLASS_TEMPERATURE, TEMP_CELSIUS),
]
POLLUTANT_LEVEL_MAPPING = [
{"label": "Good", "icon": "mdi:emoticon-excited", "minimum": 0, "maximum": 50},
{"label": "Moderate", "icon": "mdi:emoticon-happy", "minimum": 51, "maximum": 100},
{
"label": "Unhealthy for sensitive groups",
"icon": "mdi:emoticon-neutral",
"minimum": 101,
"maximum": 150,
},
{"label": "Unhealthy", "icon": "mdi:emoticon-sad", "minimum": 151, "maximum": 200},
{
"label": "Very Unhealthy",
"icon": "mdi:emoticon-dead",
"minimum": 201,
"maximum": 300,
},
{"label": "Hazardous", "icon": "mdi:biohazard", "minimum": 301, "maximum": 10000},
]
POLLUTANT_MAPPING = {
"co": {"label": "Carbon Monoxide", "unit": CONCENTRATION_PARTS_PER_MILLION},
"n2": {"label": "Nitrogen Dioxide", "unit": CONCENTRATION_PARTS_PER_BILLION},
"o3": {"label": "Ozone", "unit": CONCENTRATION_PARTS_PER_BILLION},
"p1": {"label": "PM10", "unit": CONCENTRATION_MICROGRAMS_PER_CUBIC_METER},
"p2": {"label": "PM2.5", "unit": CONCENTRATION_MICROGRAMS_PER_CUBIC_METER},
"s2": {"label": "Sulfur Dioxide", "unit": CONCENTRATION_PARTS_PER_BILLION},
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up AirVisual sensors based on a config entry."""
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id]
if config_entry.data[CONF_INTEGRATION_TYPE] == INTEGRATION_TYPE_GEOGRAPHY:
sensors = [
AirVisualGeographySensor(
coordinator,
config_entry,
kind,
name,
icon,
unit,
locale,
)
for locale in GEOGRAPHY_SENSOR_LOCALES
for kind, name, icon, unit in GEOGRAPHY_SENSORS
]
else:
sensors = [
AirVisualNodeProSensor(coordinator, kind, name, device_class, unit)
for kind, name, device_class, unit in NODE_PRO_SENSORS
]
async_add_entities(sensors, True)
class AirVisualGeographySensor(AirVisualEntity):
"""Define an AirVisual sensor related to geography data via the Cloud API."""
def __init__(self, coordinator, config_entry, kind, name, icon, unit, locale):
"""Initialize."""
super().__init__(coordinator)
self._attrs.update(
{
ATTR_CITY: config_entry.data.get(CONF_CITY),
ATTR_STATE: config_entry.data.get(CONF_STATE),
ATTR_COUNTRY: config_entry.data.get(CONF_COUNTRY),
}
)
self._config_entry = config_entry
self._icon = icon
self._kind = kind
self._locale = locale
self._name = name
self._state = None
self._unit = unit
@property
def available(self):
"""Return True if entity is available."""
try:
return self.coordinator.last_update_success and bool(
self.coordinator.data["current"]["pollution"]
)
except KeyError:
return False
@property
def name(self):
"""Return the name."""
return f"{GEOGRAPHY_SENSOR_LOCALES[self._locale]} {self._name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return f"{self._config_entry.unique_id}_{self._locale}_{self._kind}"
@callback
def update_from_latest_data(self):
"""Update the entity from the latest data."""
try:
data = self.coordinator.data["current"]["pollution"]
except KeyError:
return
if self._kind == SENSOR_KIND_LEVEL:
aqi = data[f"aqi{self._locale}"]
[level] = [
i
for i in POLLUTANT_LEVEL_MAPPING
if i["minimum"] <= aqi <= i["maximum"]
]
self._state = level["label"]
self._icon = level["icon"]
elif self._kind == SENSOR_KIND_AQI:
self._state = data[f"aqi{self._locale}"]
elif self._kind == SENSOR_KIND_POLLUTANT:
symbol = data[f"main{self._locale}"]
self._state = POLLUTANT_MAPPING[symbol]["label"]
self._attrs.update(
{
ATTR_POLLUTANT_SYMBOL: symbol,
ATTR_POLLUTANT_UNIT: POLLUTANT_MAPPING[symbol]["unit"],
}
)
if CONF_LATITUDE in self._config_entry.data:
if self._config_entry.options[CONF_SHOW_ON_MAP]:
self._attrs[ATTR_LATITUDE] = self._config_entry.data[CONF_LATITUDE]
self._attrs[ATTR_LONGITUDE] = self._config_entry.data[CONF_LONGITUDE]
self._attrs.pop("lati", None)
self._attrs.pop("long", None)
else:
self._attrs["lati"] = self._config_entry.data[CONF_LATITUDE]
self._attrs["long"] = self._config_entry.data[CONF_LONGITUDE]
self._attrs.pop(ATTR_LATITUDE, None)
self._attrs.pop(ATTR_LONGITUDE, None)
class AirVisualNodeProSensor(AirVisualEntity):
"""Define an AirVisual sensor related to a Node/Pro unit."""
def __init__(self, coordinator, kind, name, device_class, unit):
"""Initialize."""
super().__init__(coordinator)
self._device_class = device_class
self._kind = kind
self._name = name
self._state = None
self._unit = unit
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self.coordinator.data["serial_number"])},
"name": self.coordinator.data["settings"]["node_name"],
"manufacturer": "AirVisual",
"model": f'{self.coordinator.data["status"]["model"]}',
"sw_version": (
f'Version {self.coordinator.data["status"]["system_version"]}'
f'{self.coordinator.data["status"]["app_version"]}'
),
}
@property
def name(self):
"""Return the name."""
node_name = self.coordinator.data["settings"]["node_name"]
return f"{node_name} Node/Pro: {self._name}"
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return f"{self.coordinator.data['serial_number']}_{self._kind}"
@callback
def update_from_latest_data(self):
"""Update the entity from the latest data."""
if self._kind == SENSOR_KIND_BATTERY_LEVEL:
self._state = self.coordinator.data["status"]["battery"]
elif self._kind == SENSOR_KIND_HUMIDITY:
self._state = self.coordinator.data["measurements"].get("humidity")
elif self._kind == SENSOR_KIND_TEMPERATURE:
self._state = self.coordinator.data["measurements"].get("temperature_C")
|
from homeassistant.components import mysensors
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DOMAIN,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
DICT_HA_TO_MYS = {
HVAC_MODE_AUTO: "AutoChangeOver",
HVAC_MODE_COOL: "CoolOn",
HVAC_MODE_HEAT: "HeatOn",
HVAC_MODE_OFF: "Off",
}
DICT_MYS_TO_HA = {
"AutoChangeOver": HVAC_MODE_AUTO,
"CoolOn": HVAC_MODE_COOL,
"HeatOn": HVAC_MODE_HEAT,
"Off": HVAC_MODE_OFF,
}
FAN_LIST = ["Auto", "Min", "Normal", "Max"]
OPERATION_LIST = [HVAC_MODE_OFF, HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_HEAT]
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors climate."""
mysensors.setup_mysensors_platform(
hass,
DOMAIN,
discovery_info,
MySensorsHVAC,
async_add_entities=async_add_entities,
)
class MySensorsHVAC(mysensors.device.MySensorsEntity, ClimateEntity):
"""Representation of a MySensors HVAC."""
@property
def supported_features(self):
"""Return the list of supported features."""
features = 0
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SPEED in self._values:
features = features | SUPPORT_FAN_MODE
if (
set_req.V_HVAC_SETPOINT_COOL in self._values
and set_req.V_HVAC_SETPOINT_HEAT in self._values
):
features = features | SUPPORT_TARGET_TEMPERATURE_RANGE
else:
features = features | SUPPORT_TARGET_TEMPERATURE
return features
@property
def assumed_state(self):
"""Return True if unable to access real state of entity."""
return self.gateway.optimistic
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS if self.gateway.metric else TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
value = self._values.get(self.gateway.const.SetReq.V_TEMP)
if value is not None:
value = float(value)
return value
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
set_req = self.gateway.const.SetReq
if (
set_req.V_HVAC_SETPOINT_COOL in self._values
and set_req.V_HVAC_SETPOINT_HEAT in self._values
):
return None
temp = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
if temp is None:
temp = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
return float(temp) if temp is not None else None
@property
def target_temperature_high(self):
"""Return the highbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_HEAT in self._values:
temp = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
return float(temp) if temp is not None else None
@property
def target_temperature_low(self):
"""Return the lowbound target temperature we try to reach."""
set_req = self.gateway.const.SetReq
if set_req.V_HVAC_SETPOINT_COOL in self._values:
temp = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
return float(temp) if temp is not None else None
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._values.get(self.value_type)
@property
def hvac_modes(self):
"""List of available operation modes."""
return OPERATION_LIST
@property
def fan_mode(self):
"""Return the fan setting."""
return self._values.get(self.gateway.const.SetReq.V_HVAC_SPEED)
@property
def fan_modes(self):
"""List of available fan modes."""
return FAN_LIST
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
temp = kwargs.get(ATTR_TEMPERATURE)
low = kwargs.get(ATTR_TARGET_TEMP_LOW)
high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
heat = self._values.get(set_req.V_HVAC_SETPOINT_HEAT)
cool = self._values.get(set_req.V_HVAC_SETPOINT_COOL)
updates = []
if temp is not None:
if heat is not None:
# Set HEAT Target temperature
value_type = set_req.V_HVAC_SETPOINT_HEAT
elif cool is not None:
# Set COOL Target temperature
value_type = set_req.V_HVAC_SETPOINT_COOL
if heat is not None or cool is not None:
updates = [(value_type, temp)]
elif all(val is not None for val in (low, high, heat, cool)):
updates = [
(set_req.V_HVAC_SETPOINT_HEAT, low),
(set_req.V_HVAC_SETPOINT_COOL, high),
]
for value_type, value in updates:
self.gateway.set_child_value(
self.node_id, self.child_id, value_type, value, ack=1
)
if self.gateway.optimistic:
# Optimistically assume that device has changed state
self._values[value_type] = value
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set new target temperature."""
set_req = self.gateway.const.SetReq
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_HVAC_SPEED, fan_mode, ack=1
)
if self.gateway.optimistic:
# Optimistically assume that device has changed state
self._values[set_req.V_HVAC_SPEED] = fan_mode
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode):
"""Set new target temperature."""
self.gateway.set_child_value(
self.node_id,
self.child_id,
self.value_type,
DICT_HA_TO_MYS[hvac_mode],
ack=1,
)
if self.gateway.optimistic:
# Optimistically assume that device has changed state
self._values[self.value_type] = hvac_mode
self.async_write_ha_state()
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
await super().async_update()
self._values[self.value_type] = DICT_MYS_TO_HA[self._values[self.value_type]]
|
from __future__ import absolute_import, print_function
import numpy as np
import copy
import h5py
import json
from pyspark.ml.param.shared import HasOutputCol, HasFeaturesCol, HasLabelCol
from pyspark import keyword_only
from pyspark.ml import Estimator, Model
from pyspark.sql.types import StringType, DoubleType, StructField
from keras.models import model_from_yaml
from keras.optimizers import get as get_optimizer
from .spark_model import SparkModel
from .utils.rdd_utils import from_vector
from .ml.adapter import df_to_simple_rdd
from .ml.params import *
class ElephasEstimator(Estimator, HasCategoricalLabels, HasValidationSplit, HasKerasModelConfig, HasFeaturesCol,
HasLabelCol, HasMode, HasEpochs, HasBatchSize, HasFrequency, HasVerbosity, HasNumberOfClasses,
HasNumberOfWorkers, HasOutputCol, HasLoss,
HasMetrics, HasKerasOptimizerConfig):
"""
SparkML Estimator implementation of an elephas model. This estimator takes all relevant arguments for model
compilation and training.
Returns a trained model in form of a SparkML Model, which is also a Transformer.
"""
@keyword_only
def __init__(self, **kwargs):
super(ElephasEstimator, self).__init__()
self.set_params(**kwargs)
def get_config(self):
return {'keras_model_config': self.get_keras_model_config(),
'mode': self.get_mode(),
'frequency': self.get_frequency(),
'num_workers': self.get_num_workers(),
'categorical': self.get_categorical_labels(),
'loss': self.get_loss(),
'metrics': self.get_metrics(),
'validation_split': self.get_validation_split(),
'featuresCol': self.getFeaturesCol(),
'labelCol': self.getLabelCol(),
'epochs': self.get_epochs(),
'batch_size': self.get_batch_size(),
'verbose': self.get_verbosity(),
'nb_classes': self.get_nb_classes(),
'outputCol': self.getOutputCol()}
def save(self, file_name):
f = h5py.File(file_name, mode='w')
f.attrs['distributed_config'] = json.dumps({
'class_name': self.__class__.__name__,
'config': self.get_config()
}).encode('utf8')
f.flush()
f.close()
@keyword_only
def set_params(self, **kwargs):
"""Set all provided parameters, otherwise set defaults
"""
return self._set(**kwargs)
def _fit(self, df):
"""Private fit method of the Estimator, which trains the model.
"""
simple_rdd = df_to_simple_rdd(df, categorical=self.get_categorical_labels(), nb_classes=self.get_nb_classes(),
features_col=self.getFeaturesCol(), label_col=self.getLabelCol())
simple_rdd = simple_rdd.repartition(self.get_num_workers())
keras_model = model_from_yaml(self.get_keras_model_config())
metrics = self.get_metrics()
loss = self.get_loss()
optimizer = get_optimizer(self.get_optimizer_config())
keras_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)
spark_model = SparkModel(model=keras_model,
mode=self.get_mode(),
frequency=self.get_frequency(),
num_workers=self.get_num_workers())
spark_model.fit(simple_rdd,
epochs=self.get_epochs(),
batch_size=self.get_batch_size(),
verbose=self.get_verbosity(),
validation_split=self.get_validation_split())
model_weights = spark_model.master_network.get_weights()
weights = simple_rdd.ctx.broadcast(model_weights)
return ElephasTransformer(labelCol=self.getLabelCol(),
outputCol='prediction',
keras_model_config=spark_model.master_network.to_yaml(),
weights=weights)
def load_ml_estimator(file_name):
f = h5py.File(file_name, mode='r')
elephas_conf = json.loads(f.attrs.get('distributed_config'))
config = elephas_conf.get('config')
return ElephasEstimator(**config)
class ElephasTransformer(Model, HasKerasModelConfig, HasLabelCol, HasOutputCol):
"""SparkML Transformer implementation. Contains a trained model,
with which new feature data can be transformed into labels.
"""
@keyword_only
def __init__(self, **kwargs):
super(ElephasTransformer, self).__init__()
if "weights" in kwargs.keys():
# Strip model weights from parameters to init Transformer
self.weights = kwargs.pop('weights')
self.set_params(**kwargs)
@keyword_only
def set_params(self, **kwargs):
"""Set all provided parameters, otherwise set defaults
"""
return self._set(**kwargs)
def get_config(self):
return {'keras_model_config': self.get_keras_model_config(),
'labelCol': self.getLabelCol(),
'outputCol': self.getOutputCol()}
def save(self, file_name):
f = h5py.File(file_name, mode='w')
f.attrs['distributed_config'] = json.dumps({
'class_name': self.__class__.__name__,
'config': self.get_config()
}).encode('utf8')
f.flush()
f.close()
def get_model(self):
return model_from_yaml(self.get_keras_model_config())
def _transform(self, df):
"""Private transform method of a Transformer. This serves as batch-prediction method for our purposes.
"""
output_col = self.getOutputCol()
label_col = self.getLabelCol()
new_schema = copy.deepcopy(df.schema)
new_schema.add(StructField(output_col, StringType(), True))
rdd = df.rdd.coalesce(1)
features = np.asarray(
rdd.map(lambda x: from_vector(x.features)).collect())
# Note that we collect, since executing this on the rdd would require model serialization once again
model = model_from_yaml(self.get_keras_model_config())
model.set_weights(self.weights.value)
predictions = rdd.ctx.parallelize(
model.predict_classes(features)).coalesce(1)
predictions = predictions.map(lambda x: tuple(str(x)))
results_rdd = rdd.zip(predictions).map(lambda x: x[0] + x[1])
results_df = df.sql_ctx.createDataFrame(results_rdd, new_schema)
results_df = results_df.withColumn(
output_col, results_df[output_col].cast(DoubleType()))
results_df = results_df.withColumn(
label_col, results_df[label_col].cast(DoubleType()))
return results_df
def load_ml_transformer(file_name):
f = h5py.File(file_name, mode='r')
elephas_conf = json.loads(f.attrs.get('distributed_config'))
config = elephas_conf.get('config')
return ElephasTransformer(**config)
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from nagios import NagiosStatsCollector
##########################################################################
class TestNagiosStatsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NagiosStatsCollector', {
'interval': 10,
'bin': 'true',
'use_sudo': False
})
self.collector = NagiosStatsCollector(config, None)
def test_import(self):
self.assertTrue(NagiosStatsCollector)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('nagiostat').getvalue(),
'')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
metrics = {
'AVGACTHSTLAT': 196,
'AVGACTSVCLAT': 242,
'AVGACTHSTEXT': 4037,
'AVGACTSVCEXT': 340,
'NUMHSTUP': 63,
'NUMHSTDOWN': 0,
'NUMHSTUNR': 0,
'NUMSVCOK': 1409,
'NUMSVCWARN': 3,
'NUMSVCUNKN': 0,
'NUMSVCCRIT': 7,
'NUMHSTACTCHK5M': 56,
'NUMHSTPSVCHK5M': 0,
'NUMSVCACTCHK5M': 541,
'NUMSVCPSVCHK5M': 0,
'NUMACTHSTCHECKS5M': 56,
'NUMOACTHSTCHECKS5M': 1,
'NUMCACHEDHSTCHECKS5M': 1,
'NUMSACTHSTCHECKS5M': 55,
'NUMPARHSTCHECKS5M': 55,
'NUMSERHSTCHECKS5M': 0,
'NUMPSVHSTCHECKS5M': 0,
'NUMACTSVCCHECKS5M': 1101,
'NUMOACTSVCCHECKS5M': 0,
'NUMCACHEDSVCCHECKS5M': 0,
'NUMSACTSVCCHECKS5M': 1101,
'NUMPSVSVCCHECKS5M': 0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import haffmpeg.sensor as ffmpeg_sensor
import voluptuous as vol
from homeassistant.components.binary_sensor import DEVICE_CLASS_SOUND, PLATFORM_SCHEMA
from homeassistant.components.ffmpeg import (
CONF_EXTRA_ARGUMENTS,
CONF_INITIAL_STATE,
CONF_INPUT,
CONF_OUTPUT,
DATA_FFMPEG,
)
from homeassistant.components.ffmpeg_motion.binary_sensor import FFmpegBinarySensor
from homeassistant.const import CONF_NAME
import homeassistant.helpers.config_validation as cv
CONF_PEAK = "peak"
CONF_DURATION = "duration"
CONF_RESET = "reset"
DEFAULT_NAME = "FFmpeg Noise"
DEFAULT_INIT_STATE = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_INPUT): cv.string,
vol.Optional(CONF_INITIAL_STATE, default=DEFAULT_INIT_STATE): cv.boolean,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_EXTRA_ARGUMENTS): cv.string,
vol.Optional(CONF_OUTPUT): cv.string,
vol.Optional(CONF_PEAK, default=-30): vol.Coerce(int),
vol.Optional(CONF_DURATION, default=1): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
vol.Optional(CONF_RESET, default=10): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the FFmpeg noise binary sensor."""
manager = hass.data[DATA_FFMPEG]
entity = FFmpegNoise(hass, manager, config)
async_add_entities([entity])
class FFmpegNoise(FFmpegBinarySensor):
"""A binary sensor which use FFmpeg for noise detection."""
def __init__(self, hass, manager, config):
"""Initialize FFmpeg noise binary sensor."""
super().__init__(config)
self.ffmpeg = ffmpeg_sensor.SensorNoise(
manager.binary, hass.loop, self._async_callback
)
async def _async_start_ffmpeg(self, entity_ids):
"""Start a FFmpeg instance.
This method is a coroutine.
"""
if entity_ids is not None and self.entity_id not in entity_ids:
return
self.ffmpeg.set_options(
time_duration=self._config.get(CONF_DURATION),
time_reset=self._config.get(CONF_RESET),
peak=self._config.get(CONF_PEAK),
)
await self.ffmpeg.open_sensor(
input_source=self._config.get(CONF_INPUT),
output_dest=self._config.get(CONF_OUTPUT),
extra_cmd=self._config.get(CONF_EXTRA_ARGUMENTS),
)
@property
def device_class(self):
"""Return the class of this sensor, from DEVICE_CLASSES."""
return DEVICE_CLASS_SOUND
|
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import docker
flags.DEFINE_integer('cloudsuite_graph_analytics_worker_mem',
2,
'Amount of memory for the worker, in gigabytes')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cloudsuite_graph_analytics'
BENCHMARK_CONFIG = """
cloudsuite_graph_analytics:
description: >
Run Cloudsuite graph analytics benchmark. Specify the number of worker
VMs with --num_vms.
vm_groups:
master:
vm_spec: *default_single_core
vm_count: 1
workers:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
"""Reads the config file and overwrites vm_count with num_vms."""
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['num_vms'].present:
config['vm_groups']['workers']['vm_count'] = FLAGS.num_vms
return config
def Prepare(benchmark_spec):
"""Install docker.
Pull the required images from DockerHub, create datasets, and
start Spark master and workers.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
master = benchmark_spec.vm_groups['master'][0]
workers = benchmark_spec.vm_groups['workers']
def PrepareCommon(vm):
if not docker.IsInstalled(vm):
vm.Install('docker')
vm.Install('cloudsuite/spark')
vm.Install('cloudsuite/twitter-dataset-graph')
vm.RemoteCommand('sudo docker create --name data '
'cloudsuite/twitter-dataset-graph')
def PrepareMaster(vm):
PrepareCommon(vm)
vm.Install('cloudsuite/graph-analytics')
master_cmd = ('sudo docker run -d --net host -e SPARK_MASTER_IP=%s '
'--name spark-master cloudsuite/spark master' %
vm.internal_ip)
vm.RemoteCommand(master_cmd)
def PrepareWorker(vm):
PrepareCommon(vm)
worker_cmd = ('sudo docker run -d --net host --volumes-from data '
'--name spark-worker cloudsuite/spark worker '
'spark://%s:7077' % master.internal_ip)
vm.RemoteCommand(worker_cmd)
target_arg_tuples = ([(PrepareWorker, [vm], {}) for vm in workers] +
[(PrepareMaster, [master], {})])
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
def Run(benchmark_spec):
"""Run the graph analytics benchmark.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
master = benchmark_spec.vm_groups['master'][0]
results = []
memory_option = ('--executor-memory %dg' %
(FLAGS.cloudsuite_graph_analytics_worker_mem))
benchmark_cmd = ('sudo docker run --rm --net host --volumes-from data '
'cloudsuite/graph-analytics %s --master spark://%s:7077' %
(memory_option, master.internal_ip))
stdout, _ = master.RemoteCommand(benchmark_cmd, should_log=True)
matches = re.findall(r'Running time = (\d+)', stdout)
if len(matches) != 1:
errors.Benchmarks.RunError('Expected to find benchmark execution '
'time')
execution_time = matches[0]
results.append(sample.Sample('Benchmark execution time',
float(execution_time) / 1000, 'seconds'))
return results
def Cleanup(benchmark_spec):
"""Stop and remove docker containers. Remove images.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
master = benchmark_spec.vm_groups['master'][0]
workers = benchmark_spec.vm_groups['workers']
def CleanupCommon(vm):
vm.RemoteCommand('sudo docker rm -v data')
def CleanupMaster(vm):
vm.RemoteCommand('sudo docker stop spark-master')
vm.RemoteCommand('sudo docker rm spark-master')
CleanupCommon(vm)
def CleanupWorker(vm):
vm.RemoteCommand('sudo docker stop spark-worker')
vm.RemoteCommand('sudo docker rm spark-worker')
CleanupCommon(vm)
target_arg_tuples = ([(CleanupWorker, [vm], {}) for vm in workers] +
[(CleanupMaster, [master], {})])
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
|
import enum
import logging
import pipes
import shlex
import string
import subprocess
import sys
from typing import Iterable, Sequence
from gi.repository import Gdk, Gio, GLib, GObject, Gtk
from meld.conf import _
from meld.recent import RecentType
from meld.settings import settings
from meld.task import FifoScheduler
log = logging.getLogger(__name__)
def make_custom_editor_command(path: str, line: int = 0) -> Sequence[str]:
custom_command = settings.get_string('custom-editor-command')
fmt = string.Formatter()
replacements = [tok[1] for tok in fmt.parse(custom_command)]
if not any(replacements):
return [custom_command, path]
elif not all(r in (None, 'file', 'line') for r in replacements):
log.error("Unsupported fields found", )
return [custom_command, path]
else:
cmd = custom_command.format(file=pipes.quote(path), line=line)
return shlex.split(cmd)
class ComparisonState(enum.IntEnum):
# TODO: Consider use-cases for states in gedit-enum-types.c
Normal = 0
Closing = 1
SavingError = 2
class LabeledObjectMixin(GObject.GObject):
label_text = _("untitled")
tooltip_text = None
@GObject.Signal
def label_changed(self, label_text: str, tooltip_text: str) -> None:
...
class MeldDoc(LabeledObjectMixin, GObject.GObject):
"""Base class for documents in the meld application.
"""
@GObject.Signal(name='close')
def close_signal(self, exit_code: int) -> None:
...
@GObject.Signal(name='create-diff')
def create_diff_signal(
self, gfiles: object, options: object) -> None:
...
@GObject.Signal('file-changed')
def file_changed_signal(self, path: str) -> None:
...
@GObject.Signal
def tab_state_changed(self, old_state: int, new_state: int) -> None:
...
@GObject.Signal(
name='move-diff',
flags=GObject.SignalFlags.RUN_FIRST | GObject.SignalFlags.ACTION,
)
def move_diff(self, direction: int) -> None:
self.next_diff(direction)
def __init__(self) -> None:
super().__init__()
self.scheduler = FifoScheduler()
self.num_panes = 0
self.view_action_group = Gio.SimpleActionGroup()
self._state = ComparisonState.Normal
@property
def state(self) -> ComparisonState:
return self._state
@state.setter
def state(self, value: ComparisonState) -> None:
if value == self._state:
return
self.tab_state_changed.emit(self._state, value)
self._state = value
def get_comparison(self) -> RecentType:
"""Get the comparison type and URI(s) being compared"""
pass
def action_stop(self, *args) -> None:
if self.scheduler.tasks_pending():
self.scheduler.remove_task(self.scheduler.get_current_task())
def _open_files(self, selected: Iterable[str], line: int = 0) -> None:
query_attrs = ",".join((Gio.FILE_ATTRIBUTE_STANDARD_TYPE,
Gio.FILE_ATTRIBUTE_STANDARD_CONTENT_TYPE))
def os_open(path: str, uri: str):
if not path:
return
if sys.platform == "win32":
subprocess.Popen(["start", path], shell=True)
elif sys.platform == "darwin":
subprocess.Popen(["open", path])
else:
Gtk.show_uri(Gdk.Screen.get_default(), uri,
Gtk.get_current_event_time())
def open_cb(source, result, *data):
info = source.query_info_finish(result)
file_type = info.get_file_type()
path, uri = source.get_path(), source.get_uri()
if file_type == Gio.FileType.DIRECTORY:
os_open(path, uri)
elif file_type == Gio.FileType.REGULAR:
content_type = info.get_content_type()
# FIXME: Content types are broken on Windows with current gio
# If we can't access a content type, assume it's text.
if not content_type or Gio.content_type_is_a(
content_type, "text/plain"):
if settings.get_boolean('use-system-editor'):
gfile = Gio.File.new_for_path(path)
if sys.platform == "win32":
handler = gfile.query_default_handler(None)
result = handler.launch([gfile], None)
else:
uri = gfile.get_uri()
Gio.AppInfo.launch_default_for_uri(
uri, None)
else:
editor = make_custom_editor_command(path, line)
if editor:
# TODO: If the editor is badly set up, this fails
# silently
subprocess.Popen(editor)
else:
os_open(path, uri)
else:
os_open(path, uri)
else:
# TODO: Add some kind of 'failed to open' notification
pass
for f in [Gio.File.new_for_path(s) for s in selected]:
f.query_info_async(
query_attrs, Gio.FileQueryInfoFlags.NONE, GLib.PRIORITY_LOW,
None, open_cb, None)
def on_file_changed(self, filename: str):
pass
def set_labels(self, lst: Sequence[str]) -> None:
pass
def get_action_state(self, action_name: str):
action = self.view_action_group.lookup_action(action_name)
if not action:
log.error(f'No action {action_name!r} found')
return
return action.get_state().unpack()
def set_action_state(self, action_name: str, state) -> None:
# TODO: Try to do GLib.Variant things here instead of in callers
action = self.view_action_group.lookup_action(action_name)
if not action:
log.error(f'No action {action_name!r} found')
return
action.set_state(state)
def set_action_enabled(self, action_name: str, enabled: bool) -> None:
action = self.view_action_group.lookup_action(action_name)
if not action:
log.error(f'No action {action_name!r} found')
return
action.set_enabled(enabled)
def on_container_switch_in_event(self, window):
"""Called when the container app switches to this tab"""
window.insert_action_group(
'view', getattr(self, 'view_action_group', None))
if hasattr(self, "focus_pane") and self.focus_pane:
self.scheduler.add_task(self.focus_pane.grab_focus)
def on_container_switch_out_event(self, window):
"""Called when the container app switches away from this tab"""
window.insert_action_group('view', None)
# FIXME: Here and in subclasses, on_delete_event are not real GTK+
# event handlers, and should be renamed.
def on_delete_event(self) -> Gtk.ResponseType:
"""Called when the docs container is about to close.
A doc normally returns Gtk.ResponseType.OK, but may instead return
Gtk.ResponseType.CANCEL to request that the container not delete it.
"""
return Gtk.ResponseType.OK
|
import datetime
from collections import defaultdict
import natsort
import nikola.utils
from nikola.plugin_categories import Taxonomy
class Archive(Taxonomy):
"""Classify the post archives."""
name = "classify_archive"
classification_name = "archive"
overview_page_variable_name = "archive"
more_than_one_classifications_per_post = False
has_hierarchy = True
include_posts_from_subhierarchies = True
include_posts_into_hierarchy_root = True
subcategories_list_template = "list.tmpl"
template_for_classification_overview = None
always_disable_rss = True
always_disable_atom = True
apply_to_posts = True
apply_to_pages = False
minimum_post_count_per_classification_in_overview = 1
omit_empty_classifications = False
add_other_languages_variable = True
path_handler_docstrings = {
'archive_index': False,
'archive': """Link to archive path, name is the year.
Example:
link://archive/2013 => /archives/2013/index.html""",
'archive_atom': False,
'archive_rss': False,
}
def set_site(self, site):
"""Set Nikola site."""
# Sanity checks
if (site.config['CREATE_MONTHLY_ARCHIVE'] and site.config['CREATE_SINGLE_ARCHIVE']) and not site.config['CREATE_FULL_ARCHIVES']:
raise Exception('Cannot create monthly and single archives at the same time.')
# Finish setup
self.show_list_as_subcategories_list = not site.config['CREATE_FULL_ARCHIVES']
self.show_list_as_index = site.config['ARCHIVES_ARE_INDEXES']
self.template_for_single_list = "archiveindex.tmpl" if site.config['ARCHIVES_ARE_INDEXES'] else "archive.tmpl"
# Determine maximum hierarchy height
if site.config['CREATE_DAILY_ARCHIVE'] or site.config['CREATE_FULL_ARCHIVES']:
self.max_levels = 3
elif site.config['CREATE_MONTHLY_ARCHIVE']:
self.max_levels = 2
elif site.config['CREATE_SINGLE_ARCHIVE']:
self.max_levels = 0
else:
self.max_levels = 1
return super().set_site(site)
def get_implicit_classifications(self, lang):
"""Return a list of classification strings which should always appear in posts_per_classification."""
return ['']
def classify(self, post, lang):
"""Classify the given post for the given language."""
levels = [str(post.date.year).zfill(4), str(post.date.month).zfill(2), str(post.date.day).zfill(2)]
return ['/'.join(levels[:self.max_levels])]
def sort_classifications(self, classifications, lang, level=None):
"""Sort the given list of classification strings."""
if level in (0, 1):
# Years or months: sort descending
classifications.sort()
classifications.reverse()
def get_classification_friendly_name(self, classification, lang, only_last_component=False):
"""Extract a friendly name from the classification."""
classification = self.extract_hierarchy(classification)
if len(classification) == 0:
return self.site.MESSAGES[lang]['Archive']
elif len(classification) == 1:
return classification[0]
elif len(classification) == 2:
if only_last_component:
date_str = "{month}"
else:
date_str = "{month_year}"
return nikola.utils.LocaleBorg().format_date_in_string(
date_str,
datetime.date(int(classification[0]), int(classification[1]), 1),
lang)
else:
if only_last_component:
return str(classification[2])
return nikola.utils.LocaleBorg().format_date_in_string(
"{month_day_year}",
datetime.date(int(classification[0]), int(classification[1]), int(classification[2])),
lang)
def get_path(self, classification, lang, dest_type='page'):
"""Return a path for the given classification."""
components = [self.site.config['ARCHIVE_PATH'](lang)]
if classification:
components.extend(classification)
add_index = 'always'
else:
components.append(self.site.config['ARCHIVE_FILENAME'](lang))
add_index = 'never'
return [_f for _f in components if _f], add_index
def extract_hierarchy(self, classification):
"""Given a classification, return a list of parts in the hierarchy."""
return classification.split('/') if classification else []
def recombine_classification_from_hierarchy(self, hierarchy):
"""Given a list of parts in the hierarchy, return the classification string."""
return '/'.join(hierarchy)
def provide_context_and_uptodate(self, classification, lang, node=None):
"""Provide data for the context and the uptodate list for the list of the given classifiation."""
hierarchy = self.extract_hierarchy(classification)
kw = {
"messages": self.site.MESSAGES,
}
page_kind = "list"
if self.show_list_as_index:
if not self.show_list_as_subcategories_list or len(hierarchy) == self.max_levels:
page_kind = "index"
if len(hierarchy) == 0:
title = kw["messages"][lang]["Archive"]
elif len(hierarchy) == 1:
title = kw["messages"][lang]["Posts for year %s"] % hierarchy[0]
elif len(hierarchy) == 2:
title = nikola.utils.LocaleBorg().format_date_in_string(
kw["messages"][lang]["Posts for {month_year}"],
datetime.date(int(hierarchy[0]), int(hierarchy[1]), 1),
lang)
elif len(hierarchy) == 3:
title = nikola.utils.LocaleBorg().format_date_in_string(
kw["messages"][lang]["Posts for {month_day_year}"],
datetime.date(int(hierarchy[0]), int(hierarchy[1]), int(hierarchy[2])),
lang)
else:
raise Exception("Cannot interpret classification {}!".format(repr(classification)))
context = {
"title": title,
"pagekind": [page_kind, "archive_page"],
"create_archive_navigation": self.site.config["CREATE_ARCHIVE_NAVIGATION"],
"archive_name": classification
}
# Generate links for hierarchies
if context["create_archive_navigation"]:
if hierarchy:
# Up level link makes sense only if this is not the top-level
# page (hierarchy is empty)
parent = '/'.join(hierarchy[:-1])
context["up_archive"] = self.site.link('archive', parent, lang)
context["up_archive_name"] = self.get_classification_friendly_name(parent, lang)
else:
context["up_archive"] = None
context["up_archive_name"] = None
nodelevel = len(hierarchy)
flat_samelevel = self.archive_navigation[lang][nodelevel]
idx = flat_samelevel.index(classification)
if idx == -1:
raise Exception("Cannot find classification {0} in flat hierarchy!".format(classification))
previdx, nextidx = idx - 1, idx + 1
# If the previous index is -1, or the next index is 1, the previous/next archive does not exist.
context["previous_archive"] = self.site.link('archive', flat_samelevel[previdx], lang) if previdx != -1 else None
context["previous_archive_name"] = self.get_classification_friendly_name(flat_samelevel[previdx], lang) if previdx != -1 else None
context["next_archive"] = self.site.link('archive', flat_samelevel[nextidx], lang) if nextidx != len(flat_samelevel) else None
context["next_archive_name"] = self.get_classification_friendly_name(flat_samelevel[nextidx], lang) if nextidx != len(flat_samelevel) else None
context["archive_nodelevel"] = nodelevel
context["has_archive_navigation"] = bool(context["previous_archive"] or context["up_archive"] or context["next_archive"])
else:
context["has_archive_navigation"] = False
kw.update(context)
return context, kw
def postprocess_posts_per_classification(self, posts_per_classification_per_language, flat_hierarchy_per_lang=None, hierarchy_lookup_per_lang=None):
"""Rearrange, modify or otherwise use the list of posts per classification and per language."""
# Build a lookup table for archive navigation, if we’ll need one.
if self.site.config['CREATE_ARCHIVE_NAVIGATION']:
if flat_hierarchy_per_lang is None:
raise ValueError('Archives need flat_hierarchy_per_lang')
self.archive_navigation = {}
for lang, flat_hierarchy in flat_hierarchy_per_lang.items():
self.archive_navigation[lang] = defaultdict(list)
for node in flat_hierarchy:
if not self.site.config["SHOW_UNTRANSLATED_POSTS"]:
if not [x for x in posts_per_classification_per_language[lang][node.classification_name] if x.is_translation_available(lang)]:
continue
self.archive_navigation[lang][len(node.classification_path)].append(node.classification_name)
# We need to sort it. Natsort means it’s year 10000 compatible!
for k, v in self.archive_navigation[lang].items():
self.archive_navigation[lang][k] = natsort.natsorted(v, alg=natsort.ns.F | natsort.ns.IC)
return super().postprocess_posts_per_classification(posts_per_classification_per_language, flat_hierarchy_per_lang, hierarchy_lookup_per_lang)
def should_generate_classification_page(self, classification, post_list, lang):
"""Only generates list of posts for classification if this function returns True."""
return classification == '' or len(post_list) > 0
def get_other_language_variants(self, classification, lang, classifications_per_language):
"""Return a list of variants of the same classification in other languages."""
return [(other_lang, classification) for other_lang, lookup in classifications_per_language.items() if classification in lookup and other_lang != lang]
|
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import docker
flags.DEFINE_integer('cloudsuite_web_serving_pm_max_children', 150,
'The maximum number php-fpm pm children.', lower_bound=8)
flags.DEFINE_integer('cloudsuite_web_serving_load_scale', 100,
'The maximum number of concurrent users '
'that can be simulated.', lower_bound=2)
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cloudsuite_web_serving'
BENCHMARK_CONFIG = """
cloudsuite_web_serving:
description: >
Run Cloudsuite web serving benchmark.
vm_groups:
backend:
vm_spec: *default_single_core
vm_count: 1
frontend:
vm_spec: *default_single_core
vm_count: 1
client:
vm_spec: *default_single_core
vm_count: 1
"""
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
if FLAGS['num_vms'].present and FLAGS.num_vms < 3:
raise ValueError('Web Serving requires at least 3 VMs')
def Prepare(benchmark_spec):
"""Install docker. Pull images. Start nginx, mysql, and memcached.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
frontend = benchmark_spec.vm_groups['frontend'][0]
backend = benchmark_spec.vm_groups['backend'][0]
client = benchmark_spec.vm_groups['client'][0]
def PrepareCommon(vm):
if not docker.IsInstalled(vm):
vm.Install('docker')
vm.RemoteCommand("sudo sh -c 'echo %s web_server >>/etc/hosts'" %
frontend.internal_ip)
vm.RemoteCommand("sudo sh -c 'echo %s memcache_server >>/etc/hosts'" %
frontend.internal_ip)
vm.RemoteCommand("sudo sh -c 'echo %s mysql_server >>/etc/hosts'" %
backend.internal_ip)
vm.RemoteCommand("sudo sh -c 'echo %s faban_client >>/etc/hosts'" %
client.internal_ip)
def PrepareFrontend(vm):
PrepareCommon(vm)
vm.Install('cloudsuite/web-serving:web_server')
vm.Install('cloudsuite/web-serving:memcached_server')
vm.RemoteCommand('sudo docker run -dt --net host --name web_server '
'cloudsuite/web-serving:web_server '
'/etc/bootstrap.sh mysql_server memcache_server %s' %
(FLAGS.cloudsuite_web_serving_pm_max_children))
vm.RemoteCommand('sudo docker run -dt --net host --name memcache_server '
'cloudsuite/web-serving:memcached_server')
def PrepareBackend(vm):
PrepareCommon(vm)
vm.Install('cloudsuite/web-serving:db_server')
vm.RemoteCommand('sudo docker run -dt --net host --name mysql_server '
'cloudsuite/web-serving:db_server web_server')
def PrepareClient(vm):
PrepareCommon(vm)
vm.Install('cloudsuite/web-serving:faban_client')
target_arg_tuples = [(PrepareFrontend, [frontend], {}),
(PrepareBackend, [backend], {}),
(PrepareClient, [client], {})]
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
def Run(benchmark_spec):
"""Run the web serving benchmark.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
frontend = benchmark_spec.vm_groups['frontend'][0]
client = benchmark_spec.vm_groups['client'][0]
results = []
cmd = ('sudo docker run --net host --name faban_client '
'cloudsuite/web-serving:faban_client %s %s' %
(frontend.internal_ip, FLAGS.cloudsuite_web_serving_load_scale))
stdout, _ = client.RemoteCommand(cmd)
# The output contains a faban summary xml.
# Example: http://faban.org/1.1/docs/guide/driver/samplesummary_xml.html
match = re.search(r'\<metric unit="ops/sec"\>(.+)\</metric\>',
stdout, re.MULTILINE)
if match:
results.append(sample.Sample('Throughput', float(match.group(1)), 'ops/s'))
matches = re.findall(r'\<avg\>(.+)\</avg\>', stdout, re.MULTILINE)
if len(matches) > 0:
sum_avg = 0.0
for m in matches:
sum_avg += float(m)
avg_avg = 1000 * sum_avg / len(matches)
results.append(sample.Sample('Average response time', avg_avg, 'ms'))
return results
def Cleanup(benchmark_spec):
"""Stop and remove docker containers. Remove images.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
frontend = benchmark_spec.vm_groups['frontend'][0]
backend = benchmark_spec.vm_groups['backend'][0]
client = benchmark_spec.vm_groups['client'][0]
def CleanupFrontend(vm):
vm.RemoteCommand('sudo docker stop memcache_server')
vm.RemoteCommand('sudo docker rm memcache_server')
vm.RemoteCommand('sudo docker stop web_server')
vm.RemoteCommand('sudo docker rm web_server')
def CleanupBackend(vm):
vm.RemoteCommand('sudo docker stop mysql_server')
vm.RemoteCommand('sudo docker rm mysql_server')
def CleanupClient(vm):
vm.RemoteCommand('sudo docker rm faban_client')
target_arg_tuples = [(CleanupFrontend, [frontend], {}),
(CleanupBackend, [backend], {}),
(CleanupClient, [client], {})]
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
|
from weblate.trans.management.commands import WeblateLangCommand
from weblate.trans.tasks import perform_load
class Command(WeblateLangCommand):
help = "(re)loads translations from disk"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--force",
action="store_true",
default=False,
help="Force rereading files even when they should be up to date",
)
parser.add_argument(
"--foreground",
action="store_true",
default=False,
help="Perform load in foreground (by default backgroud task is used)",
)
def handle(self, *args, **options):
langs = None
if options["lang"] is not None:
langs = options["lang"].split(",")
if options["foreground"]:
loader = perform_load
else:
loader = perform_load.delay
for component in self.get_components(**options):
loader(component.pk, force=options["force"], langs=langs)
|
from homeassistant.components import binary_sensor, tellduslive
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .entry import TelldusLiveEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up tellduslive sensors dynamically."""
async def async_discover_binary_sensor(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[tellduslive.DOMAIN]
async_add_entities([TelldusLiveSensor(client, device_id)])
async_dispatcher_connect(
hass,
tellduslive.TELLDUS_DISCOVERY_NEW.format(
binary_sensor.DOMAIN, tellduslive.DOMAIN
),
async_discover_binary_sensor,
)
class TelldusLiveSensor(TelldusLiveEntity, BinarySensorEntity):
"""Representation of a Tellstick sensor."""
@property
def is_on(self):
"""Return true if switch is on."""
return self.device.is_on
|
import json
from unittest.mock import patch
import pytest
from homeassistant.components.mobile_app.const import CONF_SECRET, DOMAIN
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.setup import async_setup_component
from .const import REGISTER, REGISTER_CLEARTEXT, RENDER_TEMPLATE
from tests.common import mock_coro
async def test_registration(hass, hass_client, hass_admin_user):
"""Test that registrations happen."""
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
api_client = await hass_client()
with patch(
"homeassistant.components.person.async_add_user_device_tracker",
spec=True,
return_value=mock_coro(),
) as add_user_dev_track:
resp = await api_client.post(
"/api/mobile_app/registrations", json=REGISTER_CLEARTEXT
)
assert len(add_user_dev_track.mock_calls) == 1
assert add_user_dev_track.mock_calls[0][1][1] == hass_admin_user.id
assert add_user_dev_track.mock_calls[0][1][2] == "device_tracker.test_1"
assert resp.status == 201
register_json = await resp.json()
assert CONF_WEBHOOK_ID in register_json
assert CONF_SECRET in register_json
entries = hass.config_entries.async_entries(DOMAIN)
assert entries[0].unique_id == "io.homeassistant.mobile_app_test-mock-device-id"
assert entries[0].data["device_id"] == REGISTER_CLEARTEXT["device_id"]
assert entries[0].data["app_data"] == REGISTER_CLEARTEXT["app_data"]
assert entries[0].data["app_id"] == REGISTER_CLEARTEXT["app_id"]
assert entries[0].data["app_name"] == REGISTER_CLEARTEXT["app_name"]
assert entries[0].data["app_version"] == REGISTER_CLEARTEXT["app_version"]
assert entries[0].data["device_name"] == REGISTER_CLEARTEXT["device_name"]
assert entries[0].data["manufacturer"] == REGISTER_CLEARTEXT["manufacturer"]
assert entries[0].data["model"] == REGISTER_CLEARTEXT["model"]
assert entries[0].data["os_name"] == REGISTER_CLEARTEXT["os_name"]
assert entries[0].data["os_version"] == REGISTER_CLEARTEXT["os_version"]
assert (
entries[0].data["supports_encryption"]
== REGISTER_CLEARTEXT["supports_encryption"]
)
async def test_registration_encryption(hass, hass_client):
"""Test that registrations happen."""
try:
from nacl.encoding import Base64Encoder
from nacl.secret import SecretBox
except (ImportError, OSError):
pytest.skip("libnacl/libsodium is not installed")
return
await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
api_client = await hass_client()
resp = await api_client.post("/api/mobile_app/registrations", json=REGISTER)
assert resp.status == 201
register_json = await resp.json()
keylen = SecretBox.KEY_SIZE
key = register_json[CONF_SECRET].encode("utf-8")
key = key[:keylen]
key = key.ljust(keylen, b"\0")
payload = json.dumps(RENDER_TEMPLATE["data"]).encode("utf-8")
data = SecretBox(key).encrypt(payload, encoder=Base64Encoder).decode("utf-8")
container = {"type": "render_template", "encrypted": True, "encrypted_data": data}
resp = await api_client.post(
"/api/webhook/{}".format(register_json[CONF_WEBHOOK_ID]), json=container
)
assert resp.status == 200
webhook_json = await resp.json()
assert "encrypted_data" in webhook_json
decrypted_data = SecretBox(key).decrypt(
webhook_json["encrypted_data"], encoder=Base64Encoder
)
decrypted_data = decrypted_data.decode("utf-8")
assert json.loads(decrypted_data) == {"one": "Hello world"}
|
from pydexcom import SessionError
from homeassistant.components.dexcom.const import MMOL_L
from homeassistant.const import (
CONF_UNIT_OF_MEASUREMENT,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from tests.async_mock import patch
from tests.components.dexcom import GLUCOSE_READING, init_integration
async def test_sensors(hass):
"""Test we get sensor data."""
await init_integration(hass)
test_username_glucose_value = hass.states.get(
"sensor.dexcom_test_username_glucose_value"
)
assert test_username_glucose_value.state == str(GLUCOSE_READING.value)
test_username_glucose_trend = hass.states.get(
"sensor.dexcom_test_username_glucose_trend"
)
assert test_username_glucose_trend.state == GLUCOSE_READING.trend_description
async def test_sensors_unknown(hass):
"""Test we handle sensor state unknown."""
await init_integration(hass)
with patch(
"homeassistant.components.dexcom.Dexcom.get_current_glucose_reading",
return_value=None,
):
await hass.helpers.entity_component.async_update_entity(
"sensor.dexcom_test_username_glucose_value"
)
await hass.helpers.entity_component.async_update_entity(
"sensor.dexcom_test_username_glucose_trend"
)
test_username_glucose_value = hass.states.get(
"sensor.dexcom_test_username_glucose_value"
)
assert test_username_glucose_value.state == STATE_UNKNOWN
test_username_glucose_trend = hass.states.get(
"sensor.dexcom_test_username_glucose_trend"
)
assert test_username_glucose_trend.state == STATE_UNKNOWN
async def test_sensors_update_failed(hass):
"""Test we handle sensor update failed."""
await init_integration(hass)
with patch(
"homeassistant.components.dexcom.Dexcom.get_current_glucose_reading",
side_effect=SessionError,
):
await hass.helpers.entity_component.async_update_entity(
"sensor.dexcom_test_username_glucose_value"
)
await hass.helpers.entity_component.async_update_entity(
"sensor.dexcom_test_username_glucose_trend"
)
test_username_glucose_value = hass.states.get(
"sensor.dexcom_test_username_glucose_value"
)
assert test_username_glucose_value.state == STATE_UNAVAILABLE
test_username_glucose_trend = hass.states.get(
"sensor.dexcom_test_username_glucose_trend"
)
assert test_username_glucose_trend.state == STATE_UNAVAILABLE
async def test_sensors_options_changed(hass):
"""Test we handle sensor unavailable."""
entry = await init_integration(hass)
test_username_glucose_value = hass.states.get(
"sensor.dexcom_test_username_glucose_value"
)
assert test_username_glucose_value.state == str(GLUCOSE_READING.value)
test_username_glucose_trend = hass.states.get(
"sensor.dexcom_test_username_glucose_trend"
)
assert test_username_glucose_trend.state == GLUCOSE_READING.trend_description
with patch(
"homeassistant.components.dexcom.Dexcom.get_current_glucose_reading",
return_value=GLUCOSE_READING,
), patch(
"homeassistant.components.dexcom.Dexcom.create_session",
return_value="test_session_id",
):
hass.config_entries.async_update_entry(
entry=entry,
options={CONF_UNIT_OF_MEASUREMENT: MMOL_L},
)
await hass.async_block_till_done()
assert entry.options == {CONF_UNIT_OF_MEASUREMENT: MMOL_L}
test_username_glucose_value = hass.states.get(
"sensor.dexcom_test_username_glucose_value"
)
assert test_username_glucose_value.state == str(GLUCOSE_READING.mmol_l)
test_username_glucose_trend = hass.states.get(
"sensor.dexcom_test_username_glucose_trend"
)
assert test_username_glucose_trend.state == GLUCOSE_READING.trend_description
|
import voluptuous as vol
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_validation as cv
from . import config_flow
from .const import DOMAIN
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up Ambiclimate components."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
config_flow.register_flow_implementation(
hass, conf[CONF_CLIENT_ID], conf[CONF_CLIENT_SECRET]
)
return True
async def async_setup_entry(hass, entry):
"""Set up Ambiclimate from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "climate")
)
return True
|
from pygal.graph.box import Box
def test_quartiles():
"""Test box points for the 1.5IQR computation method"""
a = [-2.0, 3.0, 4.0, 5.0, 8.0] # odd test data
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
a, mode='1.5IQR'
)
assert q1 == 7.0 / 4.0
assert q2 == 4.0
assert q3 == 23 / 4.0
assert q0 == 7.0 / 4.0 - 6.0 # q1 - 1.5 * iqr
assert q4 == 23 / 4.0 + 6.0 # q3 + 1.5 * iqr
b = [1.0, 4.0, 6.0, 8.0] # even test data
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
b, mode='1.5IQR'
)
assert q2 == 5.0
c = [2.0, None, 4.0, 6.0, None] # odd with None elements
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
c, mode='1.5IQR'
)
assert q2 == 4.0
d = [4]
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
d, mode='1.5IQR'
)
assert q0 == 4
assert q1 == 4
assert q2 == 4
assert q3 == 4
assert q4 == 4
def test_quartiles_min_extremes():
"""Test box points for the extremes computation method"""
a = [-2.0, 3.0, 4.0, 5.0, 8.0] # odd test data
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
a, mode='extremes'
)
assert q1 == 7.0 / 4.0
assert q2 == 4.0
assert q3 == 23 / 4.0
assert q0 == -2.0 # min
assert q4 == 8.0 # max
b = [1.0, 4.0, 6.0, 8.0] # even test data
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
b, mode='extremes'
)
assert q2 == 5.0
c = [2.0, None, 4.0, 6.0, None] # odd with None elements
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
c, mode='extremes'
)
assert q2 == 4.0
d = [4]
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
d, mode='extremes'
)
assert q0 == 4
assert q1 == 4
assert q2 == 4
assert q3 == 4
assert q4 == 4
def test_quartiles_tukey():
"""Test box points for the tukey computation method"""
a = [] # empty data
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
a, mode='tukey'
)
assert min_s == q0 == q1 == q2 == q3 == q4 == 0
assert outliers == []
# https://en.wikipedia.org/wiki/Quartile example 1
b = [6, 7, 15, 36, 39, 40, 41, 42, 43, 47, 49]
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
b, mode='tukey'
)
assert min_s == q0 == 6
assert q1 == 20.25
assert q2 == 40
assert q3 == 42.75
assert max_s == q4 == 49
assert outliers == []
# previous test with added outlier 75
c = [6, 7, 15, 36, 39, 40, 41, 42, 43, 47, 49, 75]
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
c, mode='tukey'
)
assert min_s == q0 == 6
assert q1 == 25.5
assert q2 == (40 + 41) / 2.0
assert q3 == 45
assert max_s == 75
assert outliers == [75]
# one more outlier, 77
c = [6, 7, 15, 36, 39, 40, 41, 42, 43, 47, 49, 75, 77]
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
c, mode='tukey'
)
assert min_s == q0 == 6
assert q1 == 30.75
assert q2 == 41
assert q3 == 47.5
assert max_s == 77
assert 75 in outliers
assert 77 in outliers
def test_quartiles_stdev():
"""Test box points for the stdev computation method"""
a = [
35, 42, 35, 41, 36, 6, 12, 51, 33, 27, 46, 36, 44, 53, 75, 46, 16, 51,
45, 29, 25, 26, 54, 61, 27, 40, 23, 34, 51, 37
]
SD = 14.67
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
a, mode='stdev'
)
assert min_s == min(a)
assert max_s == max(a)
assert q2 == 36.5
assert q4 <= q2 + SD
assert q0 >= q2 - SD
assert all(n in outliers for n in [6, 12, 16, 53, 54, 61, 75])
b = [5] # test for posible zero division
(min_s, q0, q1, q2, q3, q4, max_s), outliers = Box._box_points(
b, mode='stdev'
)
assert min_s == q0 == q1 == q2 == q3 == q4 == max_s == b[0]
assert outliers == []
def test_simple_box():
"""Simple box test"""
box = Box()
box.add('test1', [-1, 2, 3, 3.1, 3.2, 4, 5])
box.add('test2', [2, 3, 5, 6, 6, 4])
box.title = 'Box test'
q = box.render_pyquery()
assert len(q(".axis.y")) == 1
assert len(q(".legend")) == 2
assert len(q(".plot .series rect")) == 2
|
import numpy as np
import os
from six.moves.urllib import request
import unittest
from chainer import testing
from chainercv.evaluations import eval_instance_segmentation_coco
try:
import pycocotools # NOQA
_available = True
except ImportError:
_available = False
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalInstanceSegmentationCOCOSimple(unittest.TestCase):
def setUp(self):
self.pred_masks = np.array(
[[[[True, True], [True, True]],
[[True, False], [False, True]]]])
self.pred_labels = np.array([[0, 0]])
self.pred_scores = np.array([[0.8, 0.9]])
self.gt_masks = np.array([[[[True, True], [True, True]]]])
self.gt_labels = np.array([[0, 0]])
def test_crowded(self):
result = eval_instance_segmentation_coco(
self.pred_masks, self.pred_labels, self.pred_scores,
self.gt_masks, self.gt_labels, gt_crowdeds=[[True]])
# When the only ground truth is crowded, nothing is evaluated.
# In that case, all the results are nan.
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=all/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.50/area=all/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.75/area=all/max_dets=100']))
def test_area_not_supplied(self):
result = eval_instance_segmentation_coco(
self.pred_masks, self.pred_labels, self.pred_scores,
self.gt_masks, self.gt_labels)
self.assertFalse(
'map/iou=0.50:0.95/area=small/max_dets=100' in result)
self.assertFalse(
'map/iou=0.50:0.95/area=medium/max_dets=100' in result)
self.assertFalse(
'map/iou=0.50:0.95/area=large/max_dets=100' in result)
def test_area_specified(self):
result = eval_instance_segmentation_coco(
self.pred_masks, self.pred_labels, self.pred_scores,
self.gt_masks, self.gt_labels, gt_areas=[[2048]])
self.assertFalse(
np.isnan(result['map/iou=0.50:0.95/area=medium/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=small/max_dets=100']))
self.assertTrue(
np.isnan(result['map/iou=0.50:0.95/area=large/max_dets=100']))
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalInstanceSegmentationCOCOSomeClassNonExistent(unittest.TestCase):
def setUp(self):
self.pred_masks = np.array(
[[[[True, True], [True, True]],
[[True, False], [False, True]]]])
self.pred_labels = np.array([[1, 2]])
self.pred_scores = np.array([[0.8, 0.9]])
self.gt_masks = np.array([[[[True, True], [True, True]]]])
self.gt_labels = np.array([[1, 2]])
def test(self):
result = eval_instance_segmentation_coco(
self.pred_masks, self.pred_labels, self.pred_scores,
self.gt_masks, self.gt_labels)
self.assertEqual(
result['ap/iou=0.50:0.95/area=all/max_dets=100'].shape, (3,))
self.assertTrue(
np.isnan(result['ap/iou=0.50:0.95/area=all/max_dets=100'][0]))
self.assertEqual(
np.nanmean(result['ap/iou=0.50:0.95/area=all/max_dets=100'][1:]),
result['map/iou=0.50:0.95/area=all/max_dets=100'])
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalInstanceSegmentationCOCOEmptyPred(unittest.TestCase):
def setUp(self):
self.pred_masks = np.zeros((1, 0, 2, 2), dtype=np.bool)
self.pred_labels = np.zeros((1, 0), dtype=np.int32)
self.pred_scores = np.zeros((1, 0), dtype=np.float32)
self.gt_masks = np.array([[[[True, True], [True, True]]]])
self.gt_labels = np.array([[1, 2]])
def test(self):
result = eval_instance_segmentation_coco(
self.pred_masks, self.pred_labels, self.pred_scores,
self.gt_masks, self.gt_labels)
self.assertEqual(
result['ap/iou=0.50:0.95/area=all/max_dets=100'].shape, (2,))
self.assertTrue(
np.isnan(result['ap/iou=0.50:0.95/area=all/max_dets=100'][0]))
self.assertEqual(
np.nanmean(result['ap/iou=0.50:0.95/area=all/max_dets=100'][1:]),
result['map/iou=0.50:0.95/area=all/max_dets=100'])
@unittest.skipUnless(_available, 'pycocotools is not installed')
class TestEvalInstanceSegmentationCOCO(unittest.TestCase):
@classmethod
def setUpClass(cls):
base_url = 'https://chainercv-models.preferred.jp/tests'
cls.dataset = np.load(request.urlretrieve(os.path.join(
base_url,
'eval_instance_segmentation_coco_dataset_2018_07_06.npz'))[0],
encoding='latin1',
allow_pickle=True)
cls.result = np.load(request.urlretrieve(os.path.join(
base_url,
'eval_instance_segmentation_coco_result_2019_02_12.npz'))[0],
encoding='latin1',
allow_pickle=True)
def test_eval_instance_segmentation_coco(self):
pred_masks = self.result['masks']
pred_labels = self.result['labels']
pred_scores = self.result['scores']
gt_masks = self.dataset['masks']
gt_labels = self.dataset['labels']
gt_crowdeds = self.dataset['crowdeds']
gt_areas = self.dataset['areas']
result = eval_instance_segmentation_coco(
pred_masks, pred_labels, pred_scores,
gt_masks, gt_labels, gt_areas, gt_crowdeds)
expected = {
'map/iou=0.50:0.95/area=all/max_dets=100': 0.32170935,
'map/iou=0.50/area=all/max_dets=100': 0.56469292,
'map/iou=0.75/area=all/max_dets=100': 0.30133106,
'map/iou=0.50:0.95/area=small/max_dets=100': 0.38737403,
'map/iou=0.50:0.95/area=medium/max_dets=100': 0.31018272,
'map/iou=0.50:0.95/area=large/max_dets=100': 0.32693391,
'mar/iou=0.50:0.95/area=all/max_dets=1': 0.27037258,
'mar/iou=0.50:0.95/area=all/max_dets=10': 0.41759154,
'mar/iou=0.50:0.95/area=all/max_dets=100': 0.41898236,
'mar/iou=0.50:0.95/area=small/max_dets=100': 0.46944986,
'mar/iou=0.50:0.95/area=medium/max_dets=100': 0.37675923,
'mar/iou=0.50:0.95/area=large/max_dets=100': 0.38147151
}
non_existent_labels = np.setdiff1d(
np.arange(max(result['existent_labels'])),
result['existent_labels'])
for key, item in expected.items():
non_mean_key = key[1:]
self.assertIsInstance(result[non_mean_key], np.ndarray)
self.assertEqual(result[non_mean_key].shape, (80,))
self.assertTrue(
np.all(np.isnan(result[non_mean_key][non_existent_labels])))
np.testing.assert_almost_equal(
result[key], expected[key], decimal=5)
testing.run_module(__name__, __file__)
|
import asyncio
from datetime import timedelta
import logging
import aiohttp
import voluptuous as vol
from waqiasync import WaqiClient
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_TEMPERATURE,
ATTR_TIME,
CONF_TOKEN,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_DOMINENTPOL = "dominentpol"
ATTR_HUMIDITY = "humidity"
ATTR_NITROGEN_DIOXIDE = "nitrogen_dioxide"
ATTR_OZONE = "ozone"
ATTR_PM10 = "pm_10"
ATTR_PM2_5 = "pm_2_5"
ATTR_PRESSURE = "pressure"
ATTR_SULFUR_DIOXIDE = "sulfur_dioxide"
KEY_TO_ATTR = {
"pm25": ATTR_PM2_5,
"pm10": ATTR_PM10,
"h": ATTR_HUMIDITY,
"p": ATTR_PRESSURE,
"t": ATTR_TEMPERATURE,
"o3": ATTR_OZONE,
"no2": ATTR_NITROGEN_DIOXIDE,
"so2": ATTR_SULFUR_DIOXIDE,
}
ATTRIBUTION = "Data provided by the World Air Quality Index project"
CONF_LOCATIONS = "locations"
CONF_STATIONS = "stations"
SCAN_INTERVAL = timedelta(minutes=5)
TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_STATIONS): cv.ensure_list,
vol.Required(CONF_TOKEN): cv.string,
vol.Required(CONF_LOCATIONS): cv.ensure_list,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the requested World Air Quality Index locations."""
token = config.get(CONF_TOKEN)
station_filter = config.get(CONF_STATIONS)
locations = config.get(CONF_LOCATIONS)
client = WaqiClient(token, async_get_clientsession(hass), timeout=TIMEOUT)
dev = []
try:
for location_name in locations:
stations = await client.search(location_name)
_LOGGER.debug("The following stations were returned: %s", stations)
for station in stations:
waqi_sensor = WaqiSensor(client, station)
if (
not station_filter
or {
waqi_sensor.uid,
waqi_sensor.url,
waqi_sensor.station_name,
}
& set(station_filter)
):
dev.append(waqi_sensor)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
) as err:
_LOGGER.exception("Failed to connect to WAQI servers")
raise PlatformNotReady from err
async_add_entities(dev, True)
class WaqiSensor(Entity):
"""Implementation of a WAQI sensor."""
def __init__(self, client, station):
"""Initialize the sensor."""
self._client = client
try:
self.uid = station["uid"]
except (KeyError, TypeError):
self.uid = None
try:
self.url = station["station"]["url"]
except (KeyError, TypeError):
self.url = None
try:
self.station_name = station["station"]["name"]
except (KeyError, TypeError):
self.station_name = None
self._data = None
@property
def name(self):
"""Return the name of the sensor."""
if self.station_name:
return f"WAQI {self.station_name}"
return "WAQI {}".format(self.url if self.url else self.uid)
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return "mdi:cloud"
@property
def state(self):
"""Return the state of the device."""
if self._data is not None:
return self._data.get("aqi")
return None
@property
def available(self):
"""Return sensor availability."""
return self._data is not None
@property
def unique_id(self):
"""Return unique ID."""
return self.uid
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return "AQI"
@property
def device_state_attributes(self):
"""Return the state attributes of the last update."""
attrs = {}
if self._data is not None:
try:
attrs[ATTR_ATTRIBUTION] = " and ".join(
[ATTRIBUTION]
+ [v["name"] for v in self._data.get("attributions", [])]
)
attrs[ATTR_TIME] = self._data["time"]["s"]
attrs[ATTR_DOMINENTPOL] = self._data.get("dominentpol")
iaqi = self._data["iaqi"]
for key in iaqi:
if key in KEY_TO_ATTR:
attrs[KEY_TO_ATTR[key]] = iaqi[key]["v"]
else:
attrs[key] = iaqi[key]["v"]
return attrs
except (IndexError, KeyError):
return {ATTR_ATTRIBUTION: ATTRIBUTION}
async def async_update(self):
"""Get the latest data and updates the states."""
if self.uid:
result = await self._client.get_station_by_number(self.uid)
elif self.url:
result = await self._client.get_station_by_name(self.url)
else:
result = None
self._data = result
|
import collections
import itertools
import operator
from typing import (
TYPE_CHECKING, Any, Dict, Iterable, Iterator, List, Optional, Sequence, Set, Union,
MutableMapping)
from PyQt5.QtCore import QUrl
from qutebrowser.utils import utils, urlmatch, usertypes, qtutils
from qutebrowser.config import configexc
if TYPE_CHECKING:
from qutebrowser.config import configdata
def _widened_hostnames(hostname: str) -> Iterable[str]:
"""A generator for widening string hostnames.
Ex: a.c.foo -> [a.c.foo, c.foo, foo]"""
while hostname:
yield hostname
hostname = hostname.partition(".")[-1]
class ScopedValue:
"""A configuration value which is valid for a UrlPattern.
Attributes:
value: The value itself.
pattern: The UrlPattern for the value, or None for global values.
hide_userconfig: Hide this customization from config.dump_userconfig().
"""
id_gen = itertools.count(0)
def __init__(self, value: Any,
pattern: Optional[urlmatch.UrlPattern],
hide_userconfig: bool = False) -> None:
self.value = value
self.pattern = pattern
self.hide_userconfig = hide_userconfig
self.pattern_id = next(ScopedValue.id_gen)
def __repr__(self) -> str:
return utils.get_repr(self, value=self.value, pattern=self.pattern,
hide_userconfig=self.hide_userconfig,
pattern_id=self.pattern_id)
class Values:
"""A collection of values for a single setting.
Currently, we store patterns in two dictionaries for different types of
lookups. A ordered, pattern keyed map, and an unordered, domain keyed map.
This means that finding a value based on a pattern is fast, and matching
url patterns is fast if all domains are unique.
If there are many patterns under the domain (or subdomain) that is being
evaluated, or any patterns that cannot have a concrete domain found, this
will become slow again.
Attributes:
opt: The Option being customized.
_vmap: A mapping of all pattern objects to ScopedValues.
_domain_map: A mapping from hostnames to all associated ScopedValues.
"""
_VmapKeyType = Optional[urlmatch.UrlPattern]
def __init__(self,
opt: 'configdata.Option',
values: Sequence[ScopedValue] = ()) -> None:
self.opt = opt
self._vmap: MutableMapping[
Values._VmapKeyType, ScopedValue] = collections.OrderedDict()
# A map from domain parts to rules that fall under them.
self._domain_map: Dict[
Optional[str], Set[ScopedValue]] = collections.defaultdict(set)
for scoped in values:
self._add_scoped(scoped)
def __repr__(self) -> str:
return utils.get_repr(self, opt=self.opt,
values=list(self._vmap.values()),
constructor=True)
def __str__(self) -> str:
"""Get the values as human-readable string."""
lines = self.dump(include_hidden=True)
if lines:
return '\n'.join(lines)
return '{}: <unchanged>'.format(self.opt.name)
def dump(self, include_hidden: bool = False) -> Sequence[str]:
"""Dump all customizations for this value.
Arguments:
include_hidden: Also show values with hide_userconfig=True.
"""
lines = []
for scoped in self._vmap.values():
if scoped.hide_userconfig and not include_hidden:
continue
str_value = self.opt.typ.to_str(scoped.value)
if scoped.pattern is None:
lines.append('{} = {}'.format(self.opt.name, str_value))
else:
lines.append('{}: {} = {}'.format(
scoped.pattern, self.opt.name, str_value))
return lines
def __iter__(self) -> Iterator['ScopedValue']:
"""Yield ScopedValue elements.
This yields in "normal" order, i.e. global and then first-set settings
first.
"""
yield from self._vmap.values()
def __bool__(self) -> bool:
"""Check whether this value is customized."""
return bool(self._vmap)
def _check_pattern_support(
self, arg: Union[urlmatch.UrlPattern, QUrl, None]) -> None:
"""Make sure patterns are supported if one was given."""
if arg is not None and not self.opt.supports_pattern:
raise configexc.NoPatternError(self.opt.name)
def add(self, value: Any,
pattern: urlmatch.UrlPattern = None, *,
hide_userconfig: bool = False) -> None:
"""Add a value with the given pattern to the list of values.
If hide_userconfig is given, the value is hidden from
config.dump_userconfig() and thus qute://configdiff.
"""
scoped = ScopedValue(value, pattern, hide_userconfig=hide_userconfig)
self._add_scoped(scoped)
def _add_scoped(self, scoped: ScopedValue) -> None:
"""Add an existing ScopedValue object."""
self._check_pattern_support(scoped.pattern)
self.remove(scoped.pattern)
self._vmap[scoped.pattern] = scoped
host = scoped.pattern.host if scoped.pattern else None
self._domain_map[host].add(scoped)
def remove(self, pattern: urlmatch.UrlPattern = None) -> bool:
"""Remove the value with the given pattern.
If a matching pattern was removed, True is returned.
If no matching pattern was found, False is returned.
"""
self._check_pattern_support(pattern)
if pattern not in self._vmap:
return False
host = pattern.host if pattern else None
scoped_value = self._vmap[pattern]
# If we error here, that means domain_map and vmap are out of sync,
# report a bug!
assert host in self._domain_map
self._domain_map[host].remove(scoped_value)
del self._vmap[pattern]
return True
def clear(self) -> None:
"""Clear all customization for this value."""
self._vmap.clear()
self._domain_map.clear()
def _get_fallback(self, fallback: bool) -> Any:
"""Get the fallback global/default value."""
if None in self._vmap:
return self._vmap[None].value
if fallback:
return self.opt.default
else:
return usertypes.UNSET
def get_for_url(self, url: QUrl = None, *, fallback: bool = True) -> Any:
"""Get a config value, falling back when needed.
This first tries to find a value matching the URL (if given).
If there's no match:
With fallback=True, the global/default setting is returned.
With fallback=False, usertypes.UNSET is returned.
"""
self._check_pattern_support(url)
if url is None:
return self._get_fallback(fallback)
qtutils.ensure_valid(url)
candidates: List[ScopedValue] = []
# Urls trailing with '.' are equivalent to non-trailing types.
# urlutils strips them, so in order to match we will need to as well.
widened_hosts = _widened_hostnames(url.host().rstrip('.'))
# We must check the 'None' key as well, in case any patterns that
# did not have a domain match.
for host in itertools.chain(widened_hosts, [None]):
host_set = self._domain_map.get(host, ())
for scoped in host_set:
if scoped.pattern is not None and scoped.pattern.matches(url):
candidates.append(scoped)
if candidates:
scoped = max(candidates, key=operator.attrgetter('pattern_id'))
return scoped.value
if not fallback:
return usertypes.UNSET
return self._get_fallback(fallback)
def get_for_pattern(self,
pattern: Optional[urlmatch.UrlPattern], *,
fallback: bool = True) -> Any:
"""Get a value only if it's been overridden for the given pattern.
This is useful when showing values to the user.
If there's no match:
With fallback=True, the global/default setting is returned.
With fallback=False, usertypes.UNSET is returned.
"""
self._check_pattern_support(pattern)
if pattern is not None:
if pattern in self._vmap:
return self._vmap[pattern].value
if not fallback:
return usertypes.UNSET
return self._get_fallback(fallback)
class FontFamilies:
"""A list of font family names."""
def __init__(self, families: Sequence[str]) -> None:
self._families = families
self.family = families[0] if families else None
def __iter__(self) -> Iterator[str]:
yield from self._families
def __repr__(self) -> str:
return utils.get_repr(self, families=self._families, constructor=True)
def __str__(self) -> str:
return self.to_str()
def _quoted_families(self) -> Iterator[str]:
for f in self._families:
needs_quoting = any(c in f for c in ', ')
yield '"{}"'.format(f) if needs_quoting else f
def to_str(self, *, quote: bool = True) -> str:
families = self._quoted_families() if quote else self._families
return ', '.join(families)
@classmethod
def from_str(cls, family_str: str) -> 'FontFamilies':
"""Parse a CSS-like string of font families."""
families = []
for part in family_str.split(','):
part = part.strip()
# The Qt CSS parser handles " and ' before passing the string to
# QFont.setFamily.
if ((part.startswith("'") and part.endswith("'")) or
(part.startswith('"') and part.endswith('"'))):
part = part[1:-1]
if not part:
continue
families.append(part)
return cls(families)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.