text
stringlengths 213
32.3k
|
---|
import logging
from typing import Optional
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.util import dt
from .const import DOMAIN, FEED
_LOGGER = logging.getLogger(__name__)
ATTR_STATUS = "status"
ATTR_LAST_UPDATE = "last_update"
ATTR_LAST_UPDATE_SUCCESSFUL = "last_update_successful"
ATTR_LAST_TIMESTAMP = "last_timestamp"
ATTR_CREATED = "created"
ATTR_UPDATED = "updated"
ATTR_REMOVED = "removed"
DEFAULT_ICON = "mdi:pulse"
DEFAULT_UNIT_OF_MEASUREMENT = "quakes"
# An update of this entity is not making a web request, but uses internal data only.
PARALLEL_UPDATES = 0
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the GeoNet NZ Quakes Feed platform."""
manager = hass.data[DOMAIN][FEED][entry.entry_id]
sensor = GeonetnzQuakesSensor(entry.entry_id, entry.unique_id, entry.title, manager)
async_add_entities([sensor])
_LOGGER.debug("Sensor setup done")
class GeonetnzQuakesSensor(Entity):
"""This is a status sensor for the GeoNet NZ Quakes integration."""
def __init__(self, config_entry_id, config_unique_id, config_title, manager):
"""Initialize entity."""
self._config_entry_id = config_entry_id
self._config_unique_id = config_unique_id
self._config_title = config_title
self._manager = manager
self._status = None
self._last_update = None
self._last_update_successful = None
self._last_timestamp = None
self._total = None
self._created = None
self._updated = None
self._removed = None
self._remove_signal_status = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_status = async_dispatcher_connect(
self.hass,
f"geonetnz_quakes_status_{self._config_entry_id}",
self._update_status_callback,
)
_LOGGER.debug("Waiting for updates %s", self._config_entry_id)
# First update is manual because of how the feed entity manager is updated.
await self.async_update()
async def async_will_remove_from_hass(self) -> None:
"""Call when entity will be removed from hass."""
if self._remove_signal_status:
self._remove_signal_status()
@callback
def _update_status_callback(self):
"""Call status update method."""
_LOGGER.debug("Received status update for %s", self._config_entry_id)
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for GeoNet NZ Quakes status sensor."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._config_entry_id)
if self._manager:
status_info = self._manager.status_info()
if status_info:
self._update_from_status_info(status_info)
def _update_from_status_info(self, status_info):
"""Update the internal state from the provided information."""
self._status = status_info.status
self._last_update = (
dt.as_utc(status_info.last_update) if status_info.last_update else None
)
if status_info.last_update_successful:
self._last_update_successful = dt.as_utc(status_info.last_update_successful)
else:
self._last_update_successful = None
self._last_timestamp = status_info.last_timestamp
self._total = status_info.total
self._created = status_info.created
self._updated = status_info.updated
self._removed = status_info.removed
@property
def state(self):
"""Return the state of the sensor."""
return self._total
@property
def unique_id(self) -> str:
"""Return a unique ID containing latitude/longitude."""
return self._config_unique_id
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return f"GeoNet NZ Quakes ({self._config_title})"
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return DEFAULT_ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return DEFAULT_UNIT_OF_MEASUREMENT
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
for key, value in (
(ATTR_STATUS, self._status),
(ATTR_LAST_UPDATE, self._last_update),
(ATTR_LAST_UPDATE_SUCCESSFUL, self._last_update_successful),
(ATTR_LAST_TIMESTAMP, self._last_timestamp),
(ATTR_CREATED, self._created),
(ATTR_UPDATED, self._updated),
(ATTR_REMOVED, self._removed),
):
if value or isinstance(value, bool):
attributes[key] = value
return attributes
|
import os
import re
import cherrypy
from cherrypy.test import helper
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
def read_process(cmd, args=''):
pipein, pipeout = os.popen4('%s %s' % (cmd, args))
try:
firstline = pipeout.readline()
if (re.search(r'(not recognized|No such file|not found)', firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
APACHE_PATH = 'httpd'
CONF_PATH = 'test_mp.conf'
conf_modpython_gateway = """
# Apache2 server conf file for testing CherryPy with modpython_gateway.
ServerName 127.0.0.1
DocumentRoot "/"
Listen %(port)s
LoadModule python_module modules/mod_python.so
SetHandler python-program
PythonFixupHandler cherrypy.test.modpy::wsgisetup
PythonOption testmod %(modulename)s
PythonHandler modpython_gateway::handler
PythonOption wsgi.application cherrypy::tree
PythonOption socket_host %(host)s
PythonDebug On
"""
conf_cpmodpy = """
# Apache2 server conf file for testing CherryPy with _cpmodpy.
ServerName 127.0.0.1
DocumentRoot "/"
Listen %(port)s
LoadModule python_module modules/mod_python.so
SetHandler python-program
PythonFixupHandler cherrypy.test.modpy::cpmodpysetup
PythonHandler cherrypy._cpmodpy::handler
PythonOption cherrypy.setup cherrypy.test.%(modulename)s::setup_server
PythonOption socket_host %(host)s
PythonDebug On
"""
class ModPythonSupervisor(helper.Supervisor):
using_apache = True
using_wsgi = False
template = None
def __str__(self):
return 'ModPython Server on %s:%s' % (self.host, self.port)
def start(self, modulename):
mpconf = CONF_PATH
if not os.path.isabs(mpconf):
mpconf = os.path.join(curdir, mpconf)
f = open(mpconf, 'wb')
try:
f.write(self.template %
{'port': self.port, 'modulename': modulename,
'host': self.host})
finally:
f.close()
result = read_process(APACHE_PATH, '-k start -f %s' % mpconf)
if result:
print(result)
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
read_process(APACHE_PATH, '-k stop')
loaded = False
def wsgisetup(req):
global loaded
if not loaded:
loaded = True
options = req.get_options()
cherrypy.config.update({
'log.error_file': os.path.join(curdir, 'test.log'),
'environment': 'test_suite',
'server.socket_host': options['socket_host'],
})
modname = options['testmod']
mod = __import__(modname, globals(), locals(), [''])
mod.setup_server()
cherrypy.server.unsubscribe()
cherrypy.engine.start()
from mod_python import apache
return apache.OK
def cpmodpysetup(req):
global loaded
if not loaded:
loaded = True
options = req.get_options()
cherrypy.config.update({
'log.error_file': os.path.join(curdir, 'test.log'),
'environment': 'test_suite',
'server.socket_host': options['socket_host'],
})
from mod_python import apache
return apache.OK
|
from pygal._compat import is_str
from pygal.graph.graph import Graph
from pygal.util import compute_scale, cut
class Dual(Graph):
_dual = True
def _value_format(self, value):
"""
Format value for dual value display.
"""
return '%s: %s' % (self._x_format(value[0]), self._y_format(value[1]))
def _compute_x_labels(self):
x_pos = compute_scale(
self._box.xmin, self._box.xmax, self.logarithmic, self.order_min,
self.min_scale, self.max_scale
)
if self.x_labels:
self._x_labels = []
for i, x_label in enumerate(self.x_labels):
if isinstance(x_label, dict):
pos = self._x_adapt(x_label.get('value'))
title = x_label.get('label', self._x_format(pos))
elif is_str(x_label):
pos = self._x_adapt(x_pos[i % len(x_pos)])
title = x_label
else:
pos = self._x_adapt(x_label)
title = self._x_format(pos)
self._x_labels.append((title, pos))
self._box.xmin = min(self._box.xmin, min(cut(self._x_labels, 1)))
self._box.xmax = max(self._box.xmax, max(cut(self._x_labels, 1)))
else:
self._x_labels = list(zip(map(self._x_format, x_pos), x_pos))
def _compute_x_labels_major(self):
# In case of dual, x labels must adapters and so majors too
self.x_labels_major = self.x_labels_major and list(
map(self._x_adapt, self.x_labels_major)
)
super(Dual, self)._compute_x_labels_major()
def _get_x_label(self, i):
"""Convenience function to get the x_label of a value index"""
return
|
import asyncio
from datetime import timedelta
import logging
import aiocoap
import aioshelly
import async_timeout
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, device_registry, update_coordinator
from .const import COAP_CONTEXT, DATA_CONFIG_ENTRY, DOMAIN
PLATFORMS = ["binary_sensor", "cover", "light", "sensor", "switch"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Shelly component."""
hass.data[DOMAIN] = {DATA_CONFIG_ENTRY: {}}
hass.data[DOMAIN][COAP_CONTEXT] = await aiocoap.Context.create_client_context()
async def shutdown_listener(*_):
"""Home Assistant shutdown listener."""
await hass.data[DOMAIN][COAP_CONTEXT].shutdown()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown_listener)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Shelly from a config entry."""
temperature_unit = "C" if hass.config.units.is_metric else "F"
options = aioshelly.ConnectionOptions(
entry.data[CONF_HOST],
entry.data.get(CONF_USERNAME),
entry.data.get(CONF_PASSWORD),
temperature_unit,
)
coap_context = hass.data[DOMAIN][COAP_CONTEXT]
try:
async with async_timeout.timeout(10):
device = await aioshelly.Device.create(
aiohttp_client.async_get_clientsession(hass),
coap_context,
options,
)
except (asyncio.TimeoutError, OSError) as err:
raise ConfigEntryNotReady from err
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][
entry.entry_id
] = ShellyDeviceWrapper(hass, entry, device)
await wrapper.async_setup()
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
class ShellyDeviceWrapper(update_coordinator.DataUpdateCoordinator):
"""Wrapper for a Shelly device with Home Assistant specific functions."""
def __init__(self, hass, entry, device: aioshelly.Device):
"""Initialize the Shelly device wrapper."""
super().__init__(
hass,
_LOGGER,
name=device.settings["name"] or device.settings["device"]["hostname"],
update_interval=timedelta(seconds=5),
)
self.hass = hass
self.entry = entry
self.device = device
async def _async_update_data(self):
"""Fetch data."""
try:
async with async_timeout.timeout(5):
return await self.device.update()
except (aiocoap.error.Error, OSError) as err:
raise update_coordinator.UpdateFailed("Error fetching data") from err
@property
def model(self):
"""Model of the device."""
return self.device.settings["device"]["type"]
@property
def mac(self):
"""Mac address of the device."""
return self.device.settings["device"]["mac"]
async def async_setup(self):
"""Set up the wrapper."""
dev_reg = await device_registry.async_get_registry(self.hass)
model_type = self.device.settings["device"]["type"]
dev_reg.async_get_or_create(
config_entry_id=self.entry.entry_id,
name=self.name,
connections={(device_registry.CONNECTION_NETWORK_MAC, self.mac)},
# This is duplicate but otherwise via_device can't work
identifiers={(DOMAIN, self.mac)},
manufacturer="Shelly",
model=aioshelly.MODEL_NAMES.get(model_type, model_type),
sw_version=self.device.settings["fw"],
)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_CONFIG_ENTRY].pop(entry.entry_id)
return unload_ok
|
from app import socketio
from app.utils import AuthUtil, RequestUtil
import flask_socketio
@socketio.on('connect')
def on_socketio_connect():
# 连接时自动监听所有有权限的 webhook
user_id = RequestUtil.get_login_user().get('id', '')
# 未登录,拒绝连接
if not user_id:
return False
webhooks = AuthUtil.has_auth_webhooks(user_id)
for webhook in webhooks:
flask_socketio.join_room(webhook.id)
|
import os.path as op
import numpy as np
from scipy import linalg
from ...io import BaseRaw
from ...io.constants import FIFF
from ...utils import _validate_type
from ..nirs import source_detector_distances, _channel_frequencies,\
_check_channels_ordered
def beer_lambert_law(raw, ppf=0.1):
r"""Convert NIRS optical density data to haemoglobin concentration.
Parameters
----------
raw : instance of Raw
The optical density data.
ppf : float
The partial pathlength factor.
Returns
-------
raw : instance of Raw
The modified raw instance.
"""
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, 'raw')
freqs = np.unique(_channel_frequencies(raw))
picks = _check_channels_ordered(raw, freqs)
abs_coef = _load_absorption(freqs)
distances = source_detector_distances(raw.info)
for ii in picks[::2]:
EL = abs_coef * distances[ii] * ppf
iEL = linalg.pinv(EL)
raw._data[[ii, ii + 1]] = (raw._data[[ii, ii + 1]].T @ iEL.T).T * 1e-3
# Update channel information
coil_dict = dict(hbo=FIFF.FIFFV_COIL_FNIRS_HBO,
hbr=FIFF.FIFFV_COIL_FNIRS_HBR)
for ki, kind in enumerate(('hbo', 'hbr')):
ch = raw.info['chs'][ii + ki]
ch.update(coil_type=coil_dict[kind], unit=FIFF.FIFF_UNIT_MOL)
raw.rename_channels({
ch['ch_name']: '%s %s' % (ch['ch_name'][:-4], kind)})
return raw
def _load_absorption(freqs):
"""Load molar extinction coefficients."""
# Data from https://omlc.org/spectra/hemoglobin/summary.html
# The text was copied to a text file. The text before and
# after the table was deleted. The the following was run in
# matlab
# extinct_coef=importdata('extinction_coef.txt')
# save('extinction_coef.mat', 'extinct_coef')
#
# Returns data as [[HbO2(freq1), Hb(freq1)],
# [HbO2(freq2), Hb(freq2)]]
from scipy.io import loadmat
from scipy.interpolate import interp1d
extinction_fname = op.join(op.dirname(__file__), '..', '..', 'data',
'extinction_coef.mat')
a = loadmat(extinction_fname)['extinct_coef']
interp_hbo = interp1d(a[:, 0], a[:, 1], kind='linear')
interp_hb = interp1d(a[:, 0], a[:, 2], kind='linear')
ext_coef = np.array([[interp_hbo(freqs[0]), interp_hb(freqs[0])],
[interp_hbo(freqs[1]), interp_hb(freqs[1])]])
abs_coef = ext_coef * 0.2303
return abs_coef
|
import logging
import os
import yaml
logging.basicConfig()
logger = logging.getLogger("kalliope")
class YAMLFileNotFound(Exception):
"""
YAML file has not been found
"""
pass
class YAMLFileEmpty(Exception):
"""
YAML file empty
"""
pass
class YAMLLoader:
"""
Simple Class to Verify / Load a YAML file.
"""
def __init__(self):
pass
@classmethod
def get_config(cls, yaml_file):
"""
Return the provided YAML configuration file
:param yaml_file: The path of the configuration file
:type yaml_file: String
:return: the configuration file
:rtype: String
:Example:
YAMLLoader.get_config(brain_file_path)
.. seealso:: SettingLoader, BrainLoader
.. raises:: YAMLFileNotFound
.. warnings:: Class Method and Public
"""
cls.file_path_to_load = yaml_file
logger.debug("File path to load: %s " % cls.file_path_to_load)
if os.path.isfile(cls.file_path_to_load):
inc_import = IncludeImport(cls.file_path_to_load)
data = inc_import.get_data()
return data
else:
raise YAMLFileNotFound("File %s not found" % cls.file_path_to_load)
class IncludeImport(object):
"""
This class manages the Include Import statement in the brain.yml file
"""
def __init__(self, file_path):
"""
Load yaml file, with includes statement
:param file_path: path to the yaml file to load
"""
# get the parent dir. will be used in case of relative path
parent_dir = os.path.normpath(file_path + os.sep + os.pardir)
# load the yaml file
with open(file_path, "r") as f:
self.data = yaml.full_load(f)
if self.data is None:
raise YAMLFileEmpty("[YAMLLoader] File %s is empty" % file_path)
# add included brain
if isinstance(self.data, list):
for el in self.data:
if "includes" in el:
for inc in el["includes"]:
# if the path is relative, we add the root path
if not os.path.isabs(inc): # os.path.isabs returns True if the path is absolute
# logger.debug("File path %s is relative, adding the root path" % inc)
inc = os.path.join(parent_dir, inc)
# logger.debug("New path: %s" % inc)
with open(inc, "r") as f:
self.update(yaml.full_load(f))
def get_data(self):
"""
:return: the data for the IncludeImport
"""
return self.data
def update(self, data_to_add):
"""
Method to Add an other Include statement to the original brain.yml file
:param data_to_add: the data to add to the current brain.yml, provided by an Include Statement
"""
# we add each synapse inside the extended brain into the main brain data
if data_to_add is not None:
for el in data_to_add:
self.data.append(el)
|
from datetime import timedelta
import logging
import pylacrosse
from serial import SerialException
import voluptuous as vol
from homeassistant.components.sensor import ENTITY_ID_FORMAT, PLATFORM_SCHEMA
from homeassistant.const import (
CONF_DEVICE,
CONF_ID,
CONF_NAME,
CONF_SENSORS,
CONF_TYPE,
EVENT_HOMEASSISTANT_STOP,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity, async_generate_entity_id
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_BAUD = "baud"
CONF_DATARATE = "datarate"
CONF_EXPIRE_AFTER = "expire_after"
CONF_FREQUENCY = "frequency"
CONF_JEELINK_LED = "led"
CONF_TOGGLE_INTERVAL = "toggle_interval"
CONF_TOGGLE_MASK = "toggle_mask"
DEFAULT_DEVICE = "/dev/ttyUSB0"
DEFAULT_BAUD = "57600"
DEFAULT_EXPIRE_AFTER = 300
TYPES = ["battery", "humidity", "temperature"]
SENSOR_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Required(CONF_TYPE): vol.In(TYPES),
vol.Optional(CONF_EXPIRE_AFTER): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA),
vol.Optional(CONF_BAUD, default=DEFAULT_BAUD): cv.string,
vol.Optional(CONF_DATARATE): cv.positive_int,
vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string,
vol.Optional(CONF_FREQUENCY): cv.positive_int,
vol.Optional(CONF_JEELINK_LED): cv.boolean,
vol.Optional(CONF_TOGGLE_INTERVAL): cv.positive_int,
vol.Optional(CONF_TOGGLE_MASK): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the LaCrosse sensors."""
usb_device = config.get(CONF_DEVICE)
baud = int(config.get(CONF_BAUD))
expire_after = config.get(CONF_EXPIRE_AFTER)
_LOGGER.debug("%s %s", usb_device, baud)
try:
lacrosse = pylacrosse.LaCrosse(usb_device, baud)
lacrosse.open()
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return False
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, lacrosse.close)
if CONF_JEELINK_LED in config:
lacrosse.led_mode_state(config.get(CONF_JEELINK_LED))
if CONF_FREQUENCY in config:
lacrosse.set_frequency(config.get(CONF_FREQUENCY))
if CONF_DATARATE in config:
lacrosse.set_datarate(config.get(CONF_DATARATE))
if CONF_TOGGLE_INTERVAL in config:
lacrosse.set_toggle_interval(config.get(CONF_TOGGLE_INTERVAL))
if CONF_TOGGLE_MASK in config:
lacrosse.set_toggle_mask(config.get(CONF_TOGGLE_MASK))
lacrosse.start_scan()
sensors = []
for device, device_config in config[CONF_SENSORS].items():
_LOGGER.debug("%s %s", device, device_config)
typ = device_config.get(CONF_TYPE)
sensor_class = TYPE_CLASSES[typ]
name = device_config.get(CONF_NAME, device)
sensors.append(
sensor_class(hass, lacrosse, device, name, expire_after, device_config)
)
add_entities(sensors)
class LaCrosseSensor(Entity):
"""Implementation of a Lacrosse sensor."""
_temperature = None
_humidity = None
_low_battery = None
_new_battery = None
def __init__(self, hass, lacrosse, device_id, name, expire_after, config):
"""Initialize the sensor."""
self.hass = hass
self.entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, device_id, hass=hass
)
self._config = config
self._name = name
self._value = None
self._expire_after = expire_after
self._expiration_trigger = None
lacrosse.register_callback(
int(self._config["id"]), self._callback_lacrosse, None
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
attributes = {
"low_battery": self._low_battery,
"new_battery": self._new_battery,
}
return attributes
def _callback_lacrosse(self, lacrosse_sensor, user_data):
"""Handle a function that is called from pylacrosse with new values."""
if self._expire_after is not None and self._expire_after > 0:
# Reset old trigger
if self._expiration_trigger:
self._expiration_trigger()
self._expiration_trigger = None
# Set new trigger
expiration_at = dt_util.utcnow() + timedelta(seconds=self._expire_after)
self._expiration_trigger = async_track_point_in_utc_time(
self.hass, self.value_is_expired, expiration_at
)
self._temperature = lacrosse_sensor.temperature
self._humidity = lacrosse_sensor.humidity
self._low_battery = lacrosse_sensor.low_battery
self._new_battery = lacrosse_sensor.new_battery
@callback
def value_is_expired(self, *_):
"""Triggered when value is expired."""
self._expiration_trigger = None
self._value = None
self.async_write_ha_state()
class LaCrosseTemperature(LaCrosseSensor):
"""Implementation of a Lacrosse temperature sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def state(self):
"""Return the state of the sensor."""
return self._temperature
class LaCrosseHumidity(LaCrosseSensor):
"""Implementation of a Lacrosse humidity sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return PERCENTAGE
@property
def state(self):
"""Return the state of the sensor."""
return self._humidity
@property
def icon(self):
"""Icon to use in the frontend."""
return "mdi:water-percent"
class LaCrosseBattery(LaCrosseSensor):
"""Implementation of a Lacrosse battery sensor."""
@property
def state(self):
"""Return the state of the sensor."""
if self._low_battery is None:
state = None
elif self._low_battery is True:
state = "low"
else:
state = "ok"
return state
@property
def icon(self):
"""Icon to use in the frontend."""
if self._low_battery is None:
icon = "mdi:battery-unknown"
elif self._low_battery is True:
icon = "mdi:battery-alert"
else:
icon = "mdi:battery"
return icon
TYPE_CLASSES = {
"temperature": LaCrosseTemperature,
"humidity": LaCrosseHumidity,
"battery": LaCrosseBattery,
}
|
import tensornetwork as tn
import pytest
import numpy as np
import tensorflow as tf
import torch
import jax
np_dtypes = [np.float32, np.float64, np.complex64, np.complex128, np.int32]
tf_dtypes = [tf.float32, tf.float64, tf.complex64, tf.complex128, tf.int32]
torch_dtypes = [torch.float32, torch.float64, torch.int32, torch.int64]
jax_dtypes = [
jax.numpy.float32, jax.numpy.float64, jax.numpy.complex64,
jax.numpy.complex128, jax.numpy.int32
]
def test_tnwork_copy_conj(backend):
if backend == "pytorch":
pytest.skip("Pytorch does not support complex numbers")
a = tn.Node(np.array([1.0 + 2.0j, 2.0 - 1.0j]))
nodes, _ = tn.copy({a}, conjugate=True)
np.testing.assert_allclose(nodes[a].tensor, np.array([1.0 - 2.0j,
2.0 + 1.0j]))
def test_tnwork_copy(backend):
a = tn.Node(np.random.rand(3, 3, 3), backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), backend=backend)
c = tn.Node(np.random.rand(3, 3, 3), backend=backend)
a[0] ^ b[1]
a[1] ^ c[2]
b[2] ^ c[0]
node_dict, _ = tn.copy({a, b, c})
tn.check_correct({node_dict[n] for n in {a, b, c}})
res = a @ b @ c
res_copy = node_dict[a] @ node_dict[b] @ node_dict[c]
np.testing.assert_allclose(res.tensor, res_copy.tensor)
def test_tnwork_copy_names(backend):
a = tn.Node(np.random.rand(3, 3, 3), name='a', backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), name='b', backend=backend)
c = tn.Node(np.random.rand(3, 3, 3), name='c', backend=backend)
a[0] ^ b[1]
b[2] ^ c[0]
node_dict, edge_dict = tn.copy({a, b, c})
for node in {a, b, c}:
assert node_dict[node].name == node.name
for edge in tn.get_all_edges({a, b, c}):
assert edge_dict[edge].name == edge.name
def test_tnwork_copy_identities(backend):
a = tn.Node(np.random.rand(3, 3, 3), name='a', backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), name='b', backend=backend)
c = tn.Node(np.random.rand(3, 3, 3), name='c', backend=backend)
a[0] ^ b[1]
b[2] ^ c[0]
node_dict, edge_dict = tn.copy({a, b, c})
for node in {a, b, c}:
assert not node_dict[node] is node
for edge in tn.get_all_edges({a, b, c}):
assert not edge_dict[edge] is edge
def test_tnwork_copy_subgraph(backend):
a = tn.Node(np.random.rand(3, 3, 3), name='a', backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), name='b', backend=backend)
c = tn.Node(np.random.rand(3, 3, 3), name='c', backend=backend)
a[0] ^ b[1]
edge2 = b[2] ^ c[0]
node_dict, edge_dict = tn.copy({a, b})
cut_edge = edge_dict[edge2]
assert cut_edge.is_dangling()
assert cut_edge.axis1 == 2
assert cut_edge.get_nodes() == [node_dict[b], None]
assert len(a.get_all_nondangling()) == 1
def test_tnwork_copy_subgraph_2(backend):
a = tn.Node(np.random.rand(3, 3, 3), name='a', backend=backend)
b = tn.Node(np.random.rand(3, 3, 3), name='b', backend=backend)
c = tn.Node(np.random.rand(3, 3, 3), name='c', backend=backend)
a[0] ^ b[1]
edge2 = c[2] ^ b[0]
node_dict, edge_dict = tn.copy({a, b})
cut_edge = edge_dict[edge2]
assert cut_edge.is_dangling()
assert cut_edge.axis1 == 0
assert cut_edge.get_nodes() == [node_dict[b], None]
assert len(a.get_all_nondangling()) == 1
def test_connect_axis_names(backend):
a = tn.Node(np.ones((3,)), name="a", axis_names=["one"], backend=backend)
b = tn.Node(np.ones((3,)), name="b", axis_names=["one"], backend=backend)
tn.connect(a["one"], b["one"])
assert a.edges == b.edges
def test_connect_twice_edge_axis_value_error(backend):
a = tn.Node(np.array([2.]), name="a", backend=backend)
b = tn.Node(np.array([2.]), name="b", backend=backend)
tn.connect(a[0], b[0])
with pytest.raises(ValueError):
tn.connect(a[0], b[0])
def test_connect_same_edge_value_error(backend):
a = tn.Node(np.eye(2), backend=backend)
with pytest.raises(ValueError):
tn.connect(a[0], a[0])
def test_disconnect_edge(backend):
a = tn.Node(np.array([1.0] * 5), "a", backend=backend)
b = tn.Node(np.array([1.0] * 5), "b", backend=backend)
e = tn.connect(a[0], b[0])
assert not e.is_dangling()
dangling_edge_1, dangling_edge_2 = tn.disconnect(e)
assert dangling_edge_1.is_dangling()
assert dangling_edge_2.is_dangling()
assert a.get_edge(0) == dangling_edge_1
assert b.get_edge(0) == dangling_edge_2
def test_disconnect_dangling_edge_value_error(backend):
a = tn.Node(np.eye(2), backend=backend)
with pytest.raises(ValueError):
tn.disconnect(a[0])
def test_contract_trace_single_node(backend):
a = tn.Node(np.ones([10, 10]), name="a", backend=backend)
edge = tn.connect(a[0], a[1], "edge")
result = tn.contract(edge)
np.testing.assert_allclose(result.tensor, 10.0)
def test_contract_single_edge(backend):
a = tn.Node(np.array([1.0] * 5), "a", backend=backend)
b = tn.Node(np.array([1.0] * 5), "b", backend=backend)
e = tn.connect(a[0], b[0])
c = tn.contract(e)
tn.check_correct({c})
val = c.tensor
np.testing.assert_allclose(val, 5.0)
def test_contract_name_contracted_node(backend):
node = tn.Node(np.eye(2), name="Identity Matrix", backend=backend)
assert node.name == "Identity Matrix"
edge = tn.connect(node[0], node[1], name="Trace Edge")
assert edge.name == "Trace Edge"
final_result = tn.contract(edge, name="Trace Of Identity")
assert final_result.name == "Trace Of Identity"
def test_contract_edge_twice_value_error(backend):
a = tn.Node(np.eye(2), backend=backend)
e = tn.connect(a[0], a[1], name="edge")
tn.contract(e)
with pytest.raises(ValueError):
tn.contract(e)
def test_contract_dangling_edge_value_error(backend):
a = tn.Node(np.array([1.0]), backend=backend)
e = a[0]
with pytest.raises(ValueError):
tn.contract(e)
def test_contract_copy_node(backend):
a = tn.Node(np.array([1, 2, 3]), backend=backend)
b = tn.Node(np.array([10, 20, 30]), backend=backend)
c = tn.Node(np.array([5, 6, 7]), backend=backend)
d = tn.Node(np.array([1, -1, 1]), backend=backend)
cn = tn.CopyNode(rank=4, dimension=3, backend=backend)
tn.connect(a[0], cn[0])
tn.connect(b[0], cn[1])
tn.connect(c[0], cn[2])
tn.connect(d[0], cn[3])
val = tn.contract_copy_node(cn)
result = val.tensor
assert list(result.shape) == []
np.testing.assert_allclose(result, 50 - 240 + 630)
def test_contract_copy_node_dangling_edge_value_error(backend):
a = tn.Node(np.array([1, 2, 3]), backend=backend)
b = tn.Node(np.array([10, 20, 30]), backend=backend)
c = tn.Node(np.array([5, 6, 7]), backend=backend)
cn = tn.CopyNode(rank=4, dimension=3, backend=backend)
tn.connect(a[0], cn[0])
tn.connect(b[0], cn[1])
tn.connect(c[0], cn[2])
with pytest.raises(ValueError):
tn.contract_copy_node(cn)
def test_outer_product(backend):
a = tn.Node(np.ones((2, 4, 5)), name="A", backend=backend)
b = tn.Node(np.ones((4, 3, 6)), name="B", backend=backend)
c = tn.Node(np.ones((3, 2)), name="C", backend=backend)
tn.connect(a[1], b[0])
tn.connect(a[0], c[1])
tn.connect(b[1], c[0])
# Purposely leave b's 3rd axis undefined.
d = tn.outer_product(a, b, name="D")
tn.check_correct({c, d})
assert d.shape == (2, 4, 5, 4, 3, 6)
np.testing.assert_allclose(d.tensor, np.ones((2, 4, 5, 4, 3, 6)))
assert d.name == "D"
@pytest.mark.parametrize("a, b, expected_val, expected_shape, expected_name", [
pytest.param(
np.ones((2, 4, 5)), np.ones(()), np.ones((2, 4, 5)), (2, 4, 5), "C"),
pytest.param(
np.ones(()), np.ones((2, 4, 5)), np.ones((2, 4, 5)), (2, 4, 5), "C"),
])
def test_outer_product_without_legs(a, b, expected_val, expected_shape,
expected_name, backend):
node1 = tn.Node(a, name="A", backend=backend)
node2 = tn.Node(b, name="B", backend=backend)
node3 = tn.outer_product(node1, node2, name=expected_name)
np.testing.assert_allclose(node3.tensor, expected_val)
assert node3.shape == expected_shape
assert node3.name == expected_name
def test_get_all_nondangling(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
edge1 = tn.connect(a[0], b[0])
c = tn.Node(np.eye(2), backend=backend)
d = tn.Node(np.eye(2), backend=backend)
edge2 = tn.connect(c[0], d[0])
edge3 = tn.connect(a[1], c[1])
assert {edge1, edge2, edge3} == tn.get_all_nondangling({a, b, c, d})
def test_get_all_edges(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
assert {a[0], a[1], b[0], b[1]} == tn.get_all_edges({a, b})
def test_check_connected_value_error(backend):
a = tn.Node(np.array([2, 2.]), backend=backend)
b = tn.Node(np.array([2, 2.]), backend=backend)
tn.connect(a[0], b[0])
c = tn.Node(np.array([2, 2.]), backend=backend)
d = tn.Node(np.array([2, 2.]), backend=backend)
tn.connect(c[0], d[0])
with pytest.raises(ValueError):
tn.check_connected({a, b, c, d})
def test_flatten_trace_edges(backend):
a = tn.Node(np.zeros((2, 3, 4, 3, 5, 5)), backend=backend)
c = tn.Node(np.zeros((2, 4)), backend=backend)
e1 = tn.connect(a[1], a[3])
e2 = tn.connect(a[4], a[5])
external_1 = tn.connect(a[0], c[0])
external_2 = tn.connect(c[1], a[2])
new_edge = tn.flatten_edges([e1, e2], "New Edge")
tn.check_correct({a, c})
assert a.shape == (2, 4, 15, 15)
assert a.edges == [external_1, external_2, new_edge, new_edge]
assert new_edge.name == "New Edge"
def test_flatten_edges_standard(backend):
a = tn.Node(np.zeros((2, 3, 5)), name="A", backend=backend)
b = tn.Node(np.zeros((2, 3, 4, 5)), name="B", backend=backend)
e1 = tn.connect(a[0], b[0], "Edge_1_1")
e2 = tn.connect(a[2], b[3], "Edge_2_3")
edge_a_1 = a[1]
edge_b_1 = b[1]
edge_b_2 = b[2]
new_edge = tn.flatten_edges([e1, e2], new_edge_name="New Edge")
assert a.shape == (3, 10)
assert b.shape == (3, 4, 10)
assert a.edges == [edge_a_1, new_edge]
assert b.edges == [edge_b_1, edge_b_2, new_edge]
tn.check_correct({a, b})
def test_flatten_edges_dangling(backend):
a = tn.Node(np.zeros((2, 3, 4, 5)), name="A", backend=backend)
e1 = a[0]
e2 = a[1]
e3 = a[2]
e4 = a[3]
flattened_edge = tn.flatten_edges([e1, e3], new_edge_name="New Edge")
assert a.shape == (3, 5, 8)
assert a.edges == [e2, e4, flattened_edge]
assert flattened_edge.name == "New Edge"
tn.check_correct({a})
def test_flatten_edges_empty_list_value_error(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
tn.connect(a[0], b[0])
with pytest.raises(ValueError):
tn.flatten_edges([])
def test_flatten_edges_different_nodes_value_error(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
c = tn.Node(np.eye(2), backend=backend)
e1 = tn.connect(a[0], b[0])
e2 = tn.connect(a[1], c[0])
tn.connect(b[1], c[1])
with pytest.raises(ValueError):
tn.flatten_edges([e1, e2])
def test_split_trace_edge(backend):
a = tn.Node(np.zeros((2, 6, 4, 6, 5, 5)), backend=backend)
c = tn.Node(np.zeros((2, 4)), backend=backend)
e1 = tn.connect(a[1], a[3])
e2 = tn.connect(a[4], a[5])
external_1 = tn.connect(a[0], c[0])
external_2 = tn.connect(c[1], a[2])
shape = (2, 1, 3)
new_edge_names = ["New Edge 2", "New Edge 1", "New Edge 3"]
new_edges = tn.split_edge(e1, shape, new_edge_names)
assert a.shape == (2, 4, 5, 5) + shape + shape
assert a.edges == [external_1, external_2, e2, e2, *new_edges, *new_edges]
for new_edge, dim in zip(new_edges, shape):
assert new_edge.dimension == dim
for new_edge, new_name in zip(new_edges, new_edge_names):
assert new_edge.name == new_name
tn.check_correct({a, c})
def test_split_edges_standard(backend):
a = tn.Node(np.zeros((6, 3, 5)), name="A", backend=backend)
b = tn.Node(np.zeros((2, 4, 6, 3)), name="B", backend=backend)
e1 = tn.connect(a[0], b[2], "Edge_1_1") # to be split
e2 = tn.connect(a[1], b[3], "Edge_1_2") # background standard edge
edge_a_2 = a[2] # dangling
edge_b_0 = b[0] # dangling
edge_b_1 = b[1] # dangling
shape = (2, 1, 3)
new_edge_names = ["New Edge 2", "New Edge 1", "New Edge 3"]
new_edges = tn.split_edge(e1, shape, new_edge_names)
assert a.shape == (3, 5) + shape
assert b.shape == (2, 4, 3) + shape
assert a.edges == [e2, edge_a_2, *new_edges]
assert b.edges == [edge_b_0, edge_b_1, e2, *new_edges]
for new_edge, dim in zip(new_edges, shape):
assert new_edge.dimension == dim
for new_edge, new_name in zip(new_edges, new_edge_names):
assert new_edge.name == new_name
tn.check_correct({a, b})
def test_split_edges_standard_contract_between(backend):
a = tn.Node(np.random.randn(6, 3, 5), name="A", backend=backend)
b = tn.Node(np.random.randn(2, 4, 6, 3), name="B", backend=backend)
e1 = tn.connect(a[0], b[2], "Edge_1_1") # to be split
tn.connect(a[1], b[3], "Edge_1_2") # background standard edge
node_dict, _ = tn.copy({a, b})
c_prior = node_dict[a] @ node_dict[b]
shape = (2, 1, 3)
tn.split_edge(e1, shape)
tn.check_correct({a, b})
c_post = tn.contract_between(a, b)
np.testing.assert_allclose(c_prior.tensor, c_post.tensor)
def test_split_edges_dangling(backend):
a = tn.Node(np.zeros((2, 10, 4, 5)), name="A", backend=backend)
e1 = a[0]
e2 = a[1]
e3 = a[2]
e4 = a[3]
shape = (2, 5)
new_edge_names = ["New Edge 2", "New Edge 5"]
new_edges = tn.split_edge(e2, shape, new_edge_names)
assert a.shape == (2, 4, 5, 2, 5)
assert a.edges == [e1, e3, e4, *new_edges]
for new_edge, dim in zip(new_edges, shape):
assert new_edge.dimension == dim
for new_edge, new_name in zip(new_edges, new_edge_names):
assert new_edge.name == new_name
tn.check_correct({a})
def test_split_edges_dimension_mismatch_value_error(backend):
a = tn.Node(np.eye(5), backend=backend)
e1 = tn.connect(a[0], a[1])
with pytest.raises(ValueError):
tn.split_edge(e1, (2, 2))
def test_get_shared_edges(backend):
a = tn.Node(np.ones((2, 2, 2)), backend=backend)
b = tn.Node(np.ones((2, 2, 2)), backend=backend)
c = tn.Node(np.ones((2, 2, 2)), backend=backend)
e1 = tn.connect(a[0], b[0])
e2 = tn.connect(b[1], c[1])
e3 = tn.connect(a[2], b[2])
assert tn.get_shared_edges(a, b) == {e1, e3}
assert tn.get_shared_edges(b, c) == {e2}
def test_get_parallel_edge(backend):
a = tn.Node(np.ones((2,) * 5), backend=backend)
b = tn.Node(np.ones((2,) * 5), backend=backend)
edges = set()
for i in {0, 1, 3}:
edges.add(tn.connect(a[i], b[i]))
for e in edges:
assert set(tn.get_parallel_edges(e)) == edges
def test_flatten_edges_between(backend):
a = tn.Node(np.ones((3, 4, 5)), backend=backend)
b = tn.Node(np.ones((5, 4, 3)), backend=backend)
tn.connect(a[0], b[2])
tn.connect(a[1], b[1])
tn.connect(a[2], b[0])
tn.flatten_edges_between(a, b)
tn.check_correct({a, b})
np.testing.assert_allclose(a.tensor, np.ones((60,)))
np.testing.assert_allclose(b.tensor, np.ones((60,)))
def test_flatten_edges_between_no_edges(backend):
a = tn.Node(np.ones((3)), backend=backend)
b = tn.Node(np.ones((3)), backend=backend)
assert tn.flatten_edges_between(a, b) is None
def test_flatten_all_edges(backend):
a = tn.Node(np.ones((3, 3, 5, 6, 2, 2)), backend=backend)
b = tn.Node(np.ones((5, 6, 7)), backend=backend)
c = tn.Node(np.ones((7,)), backend=backend)
trace_edge1 = tn.connect(a[0], a[1])
trace_edge2 = tn.connect(a[4], a[5])
split_edge1 = tn.connect(a[2], b[0])
split_edge2 = tn.connect(a[3], b[1])
ok_edge = tn.connect(b[2], c[0])
flat_edges = tn.flatten_all_edges({a, b, c})
tn.check_correct({a, b, c})
assert len(flat_edges) == 3
assert trace_edge1 not in flat_edges
assert trace_edge2 not in flat_edges
assert split_edge1 not in flat_edges
assert split_edge2 not in flat_edges
assert ok_edge in flat_edges
def test_contract_between(backend):
a_val = np.random.rand(2, 3, 4, 5)
b_val = np.random.rand(3, 5, 6, 2)
a = tn.Node(a_val, backend=backend)
b = tn.Node(b_val, backend=backend)
tn.connect(a[0], b[3])
tn.connect(b[1], a[3])
tn.connect(a[1], b[0])
output_axis_names = ["a2", "b2"]
c = tn.contract_between(a, b, name="New Node", axis_names=output_axis_names)
tn.check_correct({c})
# Check expected values.
a_flat = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30))
b_flat = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (6, 30))
final_val = np.matmul(a_flat, b_flat.T)
assert c.name == "New Node"
assert c.axis_names == output_axis_names
np.testing.assert_allclose(c.tensor, final_val)
def test_contract_between_output_edge_order(backend):
a_val = np.random.rand(2, 3, 4, 5)
b_val = np.random.rand(3, 5, 6, 2)
a = tn.Node(a_val, backend=backend)
b = tn.Node(b_val, backend=backend)
tn.connect(a[0], b[3])
tn.connect(b[1], a[3])
tn.connect(a[1], b[0])
output_axis_names = ["b2", "a2"]
c = tn.contract_between(
a,
b,
name="New Node",
axis_names=output_axis_names,
output_edge_order=[b[2], a[2]])
# Check expected values.
a_flat = np.reshape(np.transpose(a_val, (2, 1, 0, 3)), (4, 30))
b_flat = np.reshape(np.transpose(b_val, (2, 0, 3, 1)), (6, 30))
final_val = np.matmul(a_flat, b_flat.T)
assert c.name == "New Node"
assert c.axis_names == output_axis_names
np.testing.assert_allclose(c.tensor, final_val.T)
def test_contract_between_no_outer_product_value_error(backend):
a_val = np.ones((2, 3, 4))
b_val = np.ones((5, 6, 7))
a = tn.Node(a_val, backend=backend)
b = tn.Node(b_val, backend=backend)
with pytest.raises(ValueError):
tn.contract_between(a, b)
def test_contract_between_outer_product_no_value_error(backend):
a_val = np.ones((2, 3, 4))
b_val = np.ones((5, 6, 7))
a = tn.Node(a_val, backend=backend)
b = tn.Node(b_val, backend=backend)
output_axis_names = ["a0", "a1", "a2", "b0", "b1", "b2"]
c = tn.contract_between(
a, b, allow_outer_product=True, axis_names=output_axis_names)
assert c.shape == (2, 3, 4, 5, 6, 7)
assert c.axis_names == output_axis_names
def test_contract_between_outer_product_output_edge_order(backend):
a_val = np.ones((2, 3, 4))
b_val = np.ones((5, 6, 7))
a = tn.Node(a_val, backend=backend)
b = tn.Node(b_val, backend=backend)
output_axis_names = ["b0", "b1", "a0", "b2", "a1", "a2"]
c = tn.contract_between(
a,
b,
allow_outer_product=True,
output_edge_order=[b[0], b[1], a[0], b[2], a[1], a[2]],
axis_names=output_axis_names)
assert c.shape == (5, 6, 2, 7, 3, 4)
assert c.axis_names == output_axis_names
def test_contract_between_trace(backend):
a_val = np.ones((2, 3, 2, 4))
a = tn.Node(a_val, backend=backend)
tn.connect(a[0], a[2])
c = tn.contract_between(a, a, axis_names=["1", "3"])
assert c.shape == (3, 4)
assert c.axis_names == ["1", "3"]
def test_contract_between_trace_output_edge_order(backend):
a_val = np.ones((2, 3, 2, 4))
a = tn.Node(a_val, backend=backend)
tn.connect(a[0], a[2])
c = tn.contract_between(
a, a, output_edge_order=[a[3], a[1]], axis_names=["3", "1"])
assert c.shape == (4, 3)
assert c.axis_names == ["3", "1"]
def test_contract_parallel(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
edge1 = tn.connect(a[0], b[0])
tn.connect(a[1], b[1])
c = tn.contract_parallel(edge1)
np.testing.assert_allclose(c.tensor, 2.0)
def test_remove_node(backend):
a = tn.Node(np.eye(2), backend=backend)
b = tn.Node(np.eye(2), backend=backend)
tn.connect(a[0], b[0])
broken_edges_by_name, broken_edges_by_axis = tn.remove_node(b)
assert broken_edges_by_name == {"0": a[0]}
assert broken_edges_by_axis == {0: a[0]}
def test_from_topology(backend):
#pylint: disable=unbalanced-tuple-unpacking
x, y, z = tn.from_topology(
"abc,bceg,adef",
[np.ones((2,) * n) for n in [3, 4, 4]],
backend=backend)
assert x.axis_names == ['a', 'b', 'c']
assert y.axis_names == ['b', 'c', 'e', 'g']
assert z.axis_names == ['a', 'd', 'e', 'f']
assert x['a'] is z['a']
assert x['b'] is y['b']
assert x['c'] is y['c']
assert z['d'].is_dangling()
assert y['e'] is z['e']
assert z['f'].is_dangling()
assert y['g'].is_dangling()
|
from __future__ import print_function
import argparse
import os
import sys
import threading
import time
import logging
_stash = globals()["_stash"]
try:
import pyftpdlib
except ImportError:
print("Installing pyftpdlib...")
_stash("pip install pyftpdlib")
es = os.getenv("?")
if es != 0:
print(_stash.text_color("Failed to install pyftpdlib!", "red"))
sys.exit(1)
from pyftpdlib.authorizers import DummyAuthorizer
from pyftpdlib.servers import FTPServer
from pyftpdlib.handlers import FTPHandler
def run(ns):
"""starts the server."""
auth = DummyAuthorizer()
if ns.user is not None:
auth.add_user(ns.user, ns.pswd, ns.path, perm=ns.perm)
else:
auth.add_anonymous(ns.path, perm=ns.perm)
handler = FTPHandler
handler.authorizer = auth
handler.banner = "StaSh v{v} FTP-Server".format(v=_stash.__version__)
address = ("0.0.0.0", ns.port)
server = FTPServer(address, handler)
server.max_cons = 128
server.max_cons_per_ip = 128
# setup logging
logger = logging.getLogger("pyftpdlib")
logger.setLevel(logging.CRITICAL)
logger.propagate = False
# server needs to run in a thread to be killable
thr = threading.Thread(name="FTP-Server Thread", target=server.serve_forever)
thr.daemon = True
thr.start()
print("FTP-Server started on {h}:{p}".format(h=address[0], p=str(address[1])))
try:
while True:
time.sleep(0.2)
except KeyboardInterrupt:
print("Stopping Server...")
server.close_all()
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("-p", "--port", action="store", type=int, default=21, dest="port", help="port to listen on")
parser.add_argument("-u", "--user", action="store", default=None, dest="user", help="username (default: anonymous)")
parser.add_argument("--pswd", action="store", default=None, dest="pswd", help="password")
parser.add_argument("--perm", action="store", default="elradfmwM", dest="perm", help="permissions of the user")
parser.add_argument("--path", action="store", default=os.getcwd(), dest="path", help="path to serve")
ns = parser.parse_args()
if (ns.user is not None) and (ns.pswd is None):
print(_stash.text_color("Error: If user is given, pswd must also be given!", "red"))
sys.exit(1)
if (ns.pswd is not None) and (ns.user is None):
print(_stash.text_color("Error: If pswd is given, user must also be given!", "red"))
sys.exit(1)
run(ns)
|
from datetime import datetime
import logging
from regenmaschine.errors import RequestError
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import ATTR_ID
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import RainMachineEntity
from .const import (
CONF_ZONE_RUN_TIME,
DATA_CLIENT,
DATA_PROGRAMS,
DATA_ZONES,
DATA_ZONES_DETAILS,
DOMAIN as RAINMACHINE_DOMAIN,
PROGRAM_UPDATE_TOPIC,
ZONE_UPDATE_TOPIC,
)
_LOGGER = logging.getLogger(__name__)
ATTR_AREA = "area"
ATTR_CS_ON = "cs_on"
ATTR_CURRENT_CYCLE = "current_cycle"
ATTR_CYCLES = "cycles"
ATTR_DELAY = "delay"
ATTR_DELAY_ON = "delay_on"
ATTR_FIELD_CAPACITY = "field_capacity"
ATTR_NEXT_RUN = "next_run"
ATTR_NO_CYCLES = "number_of_cycles"
ATTR_PRECIP_RATE = "sprinkler_head_precipitation_rate"
ATTR_RESTRICTIONS = "restrictions"
ATTR_SLOPE = "slope"
ATTR_SOAK = "soak"
ATTR_SOIL_TYPE = "soil_type"
ATTR_SPRINKLER_TYPE = "sprinkler_head_type"
ATTR_STATUS = "status"
ATTR_SUN_EXPOSURE = "sun_exposure"
ATTR_TIME_REMAINING = "time_remaining"
ATTR_VEGETATION_TYPE = "vegetation_type"
ATTR_ZONES = "zones"
DAYS = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
RUN_STATUS_MAP = {0: "Not Running", 1: "Running", 2: "Queued"}
SOIL_TYPE_MAP = {
0: "Not Set",
1: "Clay Loam",
2: "Silty Clay",
3: "Clay",
4: "Loam",
5: "Sandy Loam",
6: "Loamy Sand",
7: "Sand",
8: "Sandy Clay",
9: "Silt Loam",
10: "Silt",
99: "Other",
}
SLOPE_TYPE_MAP = {
0: "Not Set",
1: "Flat",
2: "Moderate",
3: "High",
4: "Very High",
99: "Other",
}
SPRINKLER_TYPE_MAP = {
0: "Not Set",
1: "Popup Spray",
2: "Rotors",
3: "Surface Drip",
4: "Bubblers Drip",
99: "Other",
}
SUN_EXPOSURE_MAP = {0: "Not Set", 1: "Full Sun", 2: "Partial Shade", 3: "Full Shade"}
VEGETATION_MAP = {
0: "Not Set",
2: "Cool Season Grass",
3: "Fruit Trees",
4: "Flowers",
5: "Vegetables",
6: "Citrus",
7: "Trees and Bushes",
9: "Drought Tolerant Plants",
10: "Warm Season Grass",
99: "Other",
}
SWITCH_TYPE_PROGRAM = "program"
SWITCH_TYPE_ZONE = "zone"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up RainMachine switches based on a config entry."""
rainmachine = hass.data[RAINMACHINE_DOMAIN][DATA_CLIENT][entry.entry_id]
entities = []
for program in rainmachine.data[DATA_PROGRAMS]:
entities.append(RainMachineProgram(rainmachine, program))
for zone in rainmachine.data[DATA_ZONES]:
entities.append(RainMachineZone(rainmachine, zone))
async_add_entities(entities, True)
class RainMachineSwitch(RainMachineEntity, SwitchEntity):
"""A class to represent a generic RainMachine switch."""
def __init__(self, rainmachine, switch_data):
"""Initialize a generic RainMachine switch."""
super().__init__(rainmachine)
self._is_on = False
self._name = switch_data["name"]
self._switch_data = switch_data
self._rainmachine_entity_id = switch_data["uid"]
self._switch_type = None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._switch_data["active"]
@property
def icon(self) -> str:
"""Return the icon."""
return "mdi:water"
@property
def is_on(self) -> bool:
"""Return whether the program is running."""
return self._is_on
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return "{}_{}_{}".format(
self.rainmachine.device_mac.replace(":", ""),
self._switch_type,
self._rainmachine_entity_id,
)
async def _async_run_switch_coroutine(self, api_coro) -> None:
"""Run a coroutine to toggle the switch."""
try:
resp = await api_coro
except RequestError as err:
_LOGGER.error(
'Error while toggling %s "%s": %s',
self._switch_type,
self.unique_id,
err,
)
return
if resp["statusCode"] != 0:
_LOGGER.error(
'Error while toggling %s "%s": %s',
self._switch_type,
self.unique_id,
resp["message"],
)
return
self.hass.async_create_task(self.rainmachine.async_update_programs_and_zones())
class RainMachineProgram(RainMachineSwitch):
"""A RainMachine program."""
def __init__(self, rainmachine, switch_data):
"""Initialize a generic RainMachine switch."""
super().__init__(rainmachine, switch_data)
self._switch_type = SWITCH_TYPE_PROGRAM
@property
def zones(self) -> list:
"""Return a list of active zones associated with this program."""
return [z for z in self._switch_data["wateringTimes"] if z["active"]]
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, PROGRAM_UPDATE_TOPIC, self._update_state
)
)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the program off."""
await self._async_run_switch_coroutine(
self.rainmachine.controller.programs.stop(self._rainmachine_entity_id)
)
async def async_turn_on(self, **kwargs) -> None:
"""Turn the program on."""
await self._async_run_switch_coroutine(
self.rainmachine.controller.programs.start(self._rainmachine_entity_id)
)
@callback
def update_from_latest_data(self) -> None:
"""Update info for the program."""
[self._switch_data] = [
p
for p in self.rainmachine.data[DATA_PROGRAMS]
if p["uid"] == self._rainmachine_entity_id
]
self._is_on = bool(self._switch_data["status"])
try:
next_run = datetime.strptime(
"{} {}".format(
self._switch_data["nextRun"], self._switch_data["startTime"]
),
"%Y-%m-%d %H:%M",
).isoformat()
except ValueError:
next_run = None
self._attrs.update(
{
ATTR_ID: self._switch_data["uid"],
ATTR_NEXT_RUN: next_run,
ATTR_SOAK: self._switch_data.get("soak"),
ATTR_STATUS: RUN_STATUS_MAP[self._switch_data["status"]],
ATTR_ZONES: ", ".join(z["name"] for z in self.zones),
}
)
class RainMachineZone(RainMachineSwitch):
"""A RainMachine zone."""
def __init__(self, rainmachine, switch_data):
"""Initialize a RainMachine zone."""
super().__init__(rainmachine, switch_data)
self._switch_type = SWITCH_TYPE_ZONE
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, PROGRAM_UPDATE_TOPIC, self._update_state
)
)
self.async_on_remove(
async_dispatcher_connect(self.hass, ZONE_UPDATE_TOPIC, self._update_state)
)
async def async_turn_off(self, **kwargs) -> None:
"""Turn the zone off."""
await self._async_run_switch_coroutine(
self.rainmachine.controller.zones.stop(self._rainmachine_entity_id)
)
async def async_turn_on(self, **kwargs) -> None:
"""Turn the zone on."""
await self._async_run_switch_coroutine(
self.rainmachine.controller.zones.start(
self._rainmachine_entity_id,
self.rainmachine.config_entry.options[CONF_ZONE_RUN_TIME],
)
)
@callback
def update_from_latest_data(self) -> None:
"""Update info for the zone."""
[self._switch_data] = [
z
for z in self.rainmachine.data[DATA_ZONES]
if z["uid"] == self._rainmachine_entity_id
]
[details] = [
z
for z in self.rainmachine.data[DATA_ZONES_DETAILS]
if z["uid"] == self._rainmachine_entity_id
]
self._is_on = bool(self._switch_data["state"])
self._attrs.update(
{
ATTR_STATUS: RUN_STATUS_MAP[self._switch_data["state"]],
ATTR_AREA: details.get("waterSense").get("area"),
ATTR_CURRENT_CYCLE: self._switch_data.get("cycle"),
ATTR_FIELD_CAPACITY: details.get("waterSense").get("fieldCapacity"),
ATTR_ID: self._switch_data["uid"],
ATTR_NO_CYCLES: self._switch_data.get("noOfCycles"),
ATTR_PRECIP_RATE: details.get("waterSense").get("precipitationRate"),
ATTR_RESTRICTIONS: self._switch_data.get("restriction"),
ATTR_SLOPE: SLOPE_TYPE_MAP.get(details.get("slope")),
ATTR_SOIL_TYPE: SOIL_TYPE_MAP.get(details.get("sun")),
ATTR_SPRINKLER_TYPE: SPRINKLER_TYPE_MAP.get(details.get("group_id")),
ATTR_SUN_EXPOSURE: SUN_EXPOSURE_MAP.get(details.get("sun")),
ATTR_TIME_REMAINING: self._switch_data.get("remaining"),
ATTR_VEGETATION_TYPE: VEGETATION_MAP.get(self._switch_data.get("type")),
}
)
|
import pandas as pd
from unittest import TestCase
import numpy as np
from sklearn.decomposition import TruncatedSVD
from scattertext import EmbeddingsResolver, ClassPercentageCompactor, CorpusFromPandas, whitespace_nlp, \
CorpusFromParsedDocuments, ParsedCorpus
from scattertext.test.test_CorpusFromParsedDocuments import build_term_doc_matrix
from scattertext.test.test_corpusFromPandas import get_docs_categories
class WV:
def __init__(self, vocab):
self.vocab = vocab
class MockWord2Vec:
def __init__(self, vocab):
self.wv = WV(vocab)
self.corpus_count = 5
def train(self, *args, **kwargs):
pass
def build_vocab(self, *args):
pass
def __getitem__(self, item):
assert item in self.wv.vocab
return np.zeros(30)
class TestEmbeddingsResolver(TestCase):
@classmethod
def setUp(cls):
categories, documents = get_docs_categories()
cls.df = pd.DataFrame({'category': categories,
'text': documents})
cls.df['parsed'] = cls.df.text.apply(whitespace_nlp)
cls.corpus = CorpusFromParsedDocuments(cls.df, 'category', 'parsed').build()
def test_resolve_embeddings(self):
tdm = self.corpus.get_unigram_corpus().select(ClassPercentageCompactor(term_count=1))
embeddings_resolver = EmbeddingsResolver(tdm)
# embeddings = TruncatedSVD(n_components=20).fit_transform(tdm.get_term_doc_mat().T).T
# embeddings_resolver.set_embeddings(embeddings)
embeddings_resolver = embeddings_resolver.set_embeddings(tdm.get_term_doc_mat())
if self.assertRaisesRegex:
with self.assertRaisesRegex(Exception,
"You have already set embeddings by running set_embeddings or set_embeddings_model."):
embeddings_resolver.set_embeddings_model(None)
embeddings_resolver = EmbeddingsResolver(tdm)
embeddings_resolver = embeddings_resolver.set_embeddings_model(MockWord2Vec(tdm.get_terms()))
if self.assertRaisesRegex:
with self.assertRaisesRegex(Exception,
"You have already set embeddings by running set_embeddings or set_embeddings_model."):
embeddings_resolver.set_embeddings(tdm.get_term_doc_mat())
c, axes = embeddings_resolver.project_embeddings(projection_model=TruncatedSVD(3))
self.assertIsInstance(c, ParsedCorpus)
self.assertEqual(axes.to_dict(), pd.DataFrame(index=['speak'], data={'x': [0.,], 'y':[0.,]}).to_dict())
|
import os
from babelfish import Language
import pytest
from vcr import VCR
from subliminal.exceptions import ConfigurationError
from subliminal.providers.opensubtitles import (
OpenSubtitlesProvider, OpenSubtitlesVipProvider, OpenSubtitlesSubtitle, Unauthorized
)
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
match_on=['method', 'scheme', 'host', 'port', 'path', 'query', 'body'],
cassette_library_dir=os.path.realpath(os.path.join('tests', 'cassettes', 'opensubtitles')))
def test_get_matches_movie_hash(movies):
subtitle = OpenSubtitlesSubtitle(Language('deu'), False, None, '1953771409', 'moviehash', 'movie',
'5b8f8f4e41ccb21e', 'Man of Steel',
'Man.of.Steel.German.720p.BluRay.x264-EXQUiSiTE', 2013, 'tt0770828', 0, 0,
'Man.of.Steel.German.720p.BluRay.x264-EXQUiSiTE.srt', None)
matches = subtitle.get_matches(movies['man_of_steel'])
assert matches == {'title', 'year', 'country', 'video_codec', 'imdb_id', 'hash', 'resolution', 'source'}
def test_get_matches_episode(episodes):
subtitle = OpenSubtitlesSubtitle(Language('ell'), False, None, '1953579014', 'fulltext', 'episode',
'0', '"Game of Thrones" Mhysa',
' Game.of.Thrones.S03E10.HDTV.XviD-AFG', 2013, 'tt2178796', 3, 10,
'Game.of.Thrones.S03E10.HDTV.XviD-AFG.srt', None)
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == {'imdb_id', 'series', 'year', 'country', 'episode', 'season', 'title'}
def test_get_matches_episode_year(episodes):
subtitle = OpenSubtitlesSubtitle(Language('spa'), False, None, '1953369959', 'tag', 'episode',
'0', '"Dallas" The Price You Pay',
' Dallas.2012.S01E03.HDTV.x264-LOL', 2012, 'tt2205526', 1, 3,
'Dallas.2012.S01E03.HDTV.x264-LOL.srt', 'cp1252')
matches = subtitle.get_matches(episodes['dallas_2012_s01e03'])
assert matches == {'imdb_id', 'series', 'year', 'episode', 'season', 'title'}
def test_get_matches_episode_filename(episodes):
subtitle = OpenSubtitlesSubtitle(Language('por', country='BR'), False, None, '1954453973', 'fulltext', 'episode',
'0', '"Agents of S.H.I.E.L.D." A Fractured House',
'HDTV.x264-KILLERS-mSD-AFG-EVO-KILLERS', 2014, 'tt4078580', 2, 6,
'Marvels.Agents.of.S.H.I.E.L.D.S02E06.720p.HDTV.x264-KILLERS.srt', 'cp1252')
matches = subtitle.get_matches(episodes['marvels_agents_of_shield_s02e06'])
assert matches == {'series', 'year', 'country', 'season', 'episode', 'release_group', 'source', 'resolution',
'video_codec'}
def test_get_matches_episode_tag(episodes):
subtitle = OpenSubtitlesSubtitle(Language('por', country='BR'), False, None, '1954453973', 'tag', 'episode',
'0', '"Agents of S.H.I.E.L.D." A Fractured House',
'HDTV.x264-KILLERS-mSD-AFG-EVO-KILLERS', 2014, 'tt4078580', 2, 6,
'', 'cp1252')
matches = subtitle.get_matches(episodes['marvels_agents_of_shield_s02e06'])
assert matches == {'series', 'year', 'country', 'season', 'episode', 'source', 'video_codec'}
def test_get_matches_imdb_id(movies):
subtitle = OpenSubtitlesSubtitle(Language('fra'), True, None, '1953767650', 'imdbid', 'movie', 0, 'Man of Steel',
'man.of.steel.2013.720p.bluray.x264-felony', 2013, 'tt0770828', 0, 0,
'man.of.steel.2013.720p.bluray.x264-felony.srt', None)
matches = subtitle.get_matches(movies['man_of_steel'])
assert matches == {'title', 'year', 'country', 'video_codec', 'imdb_id', 'resolution', 'source', 'release_group'}
def test_get_matches_no_match(episodes):
subtitle = OpenSubtitlesSubtitle(Language('fra'), False, None, '1953767650', 'imdbid', 'movie', 0, 'Man of Steel',
'man.of.steel.2013.720p.bluray.x264-felony', 2013, 770828, 0, 0,
'man.of.steel.2013.720p.bluray.x264-felony.srt', None)
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == set()
def test_configuration_error_no_username():
with pytest.raises(ConfigurationError):
OpenSubtitlesProvider(password='subliminal')
def test_configuration_error_no_password():
with pytest.raises(ConfigurationError):
OpenSubtitlesProvider(username='subliminal')
@pytest.mark.integration
@vcr.use_cassette
def test_login():
provider = OpenSubtitlesProvider('python-subliminal', 'subliminal')
assert provider.token is None
provider.initialize()
assert provider.token is not None
@pytest.mark.integration
@vcr.use_cassette
def test_login_bad_password():
provider = OpenSubtitlesProvider('python-subliminal', 'lanimilbus')
with pytest.raises(Unauthorized):
provider.initialize()
@pytest.mark.integration
@vcr.use_cassette
def test_login_vip_login():
provider = OpenSubtitlesVipProvider('python-subliminal', 'subliminal')
with pytest.raises(Unauthorized):
provider.initialize()
@pytest.mark.integration
@vcr.use_cassette
def test_login_vip_bad_password():
provider = OpenSubtitlesVipProvider('python-subliminal', 'lanimilbus')
with pytest.raises(Unauthorized):
provider.initialize()
@pytest.mark.integration
@vcr.use_cassette
def test_logout():
provider = OpenSubtitlesProvider('python-subliminal', 'subliminal')
provider.initialize()
provider.terminate()
assert provider.token is None
@pytest.mark.integration
@vcr.use_cassette
def test_no_operation():
with OpenSubtitlesProvider() as provider:
provider.no_operation()
@pytest.mark.integration
@vcr.use_cassette
def test_query_not_enough_information():
languages = {Language('eng')}
with OpenSubtitlesProvider() as provider:
with pytest.raises(ValueError) as excinfo:
provider.query(languages)
assert str(excinfo.value) == 'Not enough information'
@pytest.mark.integration
@vcr.use_cassette
def test_query_query_movie(movies):
video = movies['man_of_steel']
languages = {Language('fra')}
expected_subtitles = {'1953767244', '1953770526', '1953150292', '1953647841', '1953767650', '1955181172'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, query=video.title)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_query_episode(episodes):
video = episodes['dallas_2012_s01e03']
languages = {Language('fra')}
expected_subtitles = {'1953147577'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, query=video.series, season=video.season, episode=video.episode)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_tag_movie(movies):
video = movies['enders_game']
languages = {Language('fra')}
expected_subtitles = {'1954121830'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, tag=video.name)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_imdb_id(movies):
video = movies['man_of_steel']
languages = {Language('deu')}
expected_subtitles = {'1953771409', '1953768982'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, imdb_id=video.imdb_id)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_hash_size(movies):
video = movies['man_of_steel']
languages = {Language('eng')}
expected_subtitles = {'1953767678', '1953800590', '1953766751', '1953621994', '1953766883', '1953767330',
'1953766488', '1953766413', '1953766280', '1953767141', '1953766279', '1953785668',
'1953767218'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, hash=video.hashes['opensubtitles'], size=video.size)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_wrong_hash_wrong_size():
languages = {Language('eng')}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, hash='123456787654321', size=99999)
assert len(subtitles) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_query_query_season_episode(episodes):
video = episodes['bbt_s07e05']
languages = {Language('deu')}
expected_subtitles = {'1953771908'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.query(languages, query=video.series, season=video.season, episode=video.episode)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_movie(movies):
video = movies['man_of_steel']
languages = {Language('deu'), Language('fra')}
expected_subtitles = {'1953767244', '1953647841', '1953767650', '1953771409', '1953768982', '1953770526',
'1953608995', '1953608996', '1953150292', '1953600788', '1954879110', '1955181172'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_movie_no_hash(movies):
video = movies['enders_game']
languages = {Language('deu')}
expected_subtitles = {'1954157398', '1954156756', '1954443141'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode(episodes):
video = episodes['marvels_agents_of_shield_s02e06']
languages = {Language('hun')}
expected_subtitles = {'1954464403', '1954454544'}
with OpenSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.id for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(movies):
video = movies['man_of_steel']
languages = {Language('deu'), Language('fra')}
with OpenSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
provider.download_subtitle(subtitles[0])
assert subtitles[0].content is not None
assert subtitles[0].is_valid() is True
assert subtitles[0].encoding == 'cp1252'
@pytest.mark.integration
@vcr.use_cassette
def test_tag_match(episodes):
video = episodes['the fall']
languages = {Language('por', 'BR')}
unwanted_subtitle_id = '1954369181' # 'Doc.Martin.S03E01.(24 September 2007).[TVRip (Xvid)]-spa.srt'
with OpenSubtitlesProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
found_subtitle = [s for s in subtitles if s.id == unwanted_subtitle_id and s.matched_by == 'tag'][0]
matches = found_subtitle.get_matches(video)
assert len(subtitles) > 0
assert unwanted_subtitle_id in {subtitle.id for subtitle in subtitles}
# Assert is not a tag match: {'series', 'year', 'season', 'episode'}
assert matches == {'episode', 'year', 'country', 'season'}
|
import time
from flask import g, request
from lemur import factory
from lemur.extensions import metrics
from lemur.users.views import mod as users_bp
from lemur.roles.views import mod as roles_bp
from lemur.auth.views import mod as auth_bp
from lemur.domains.views import mod as domains_bp
from lemur.destinations.views import mod as destinations_bp
from lemur.authorities.views import mod as authorities_bp
from lemur.certificates.views import mod as certificates_bp
from lemur.defaults.views import mod as defaults_bp
from lemur.plugins.views import mod as plugins_bp
from lemur.notifications.views import mod as notifications_bp
from lemur.sources.views import mod as sources_bp
from lemur.endpoints.views import mod as endpoints_bp
from lemur.logs.views import mod as logs_bp
from lemur.api_keys.views import mod as api_key_bp
from lemur.pending_certificates.views import mod as pending_certificates_bp
from lemur.dns_providers.views import mod as dns_providers_bp
from lemur.__about__ import (
__author__,
__copyright__,
__email__,
__license__,
__summary__,
__title__,
__uri__,
__version__,
)
__all__ = [
"__title__",
"__summary__",
"__uri__",
"__version__",
"__author__",
"__email__",
"__license__",
"__copyright__",
]
LEMUR_BLUEPRINTS = (
users_bp,
roles_bp,
auth_bp,
domains_bp,
destinations_bp,
authorities_bp,
certificates_bp,
defaults_bp,
plugins_bp,
notifications_bp,
sources_bp,
endpoints_bp,
logs_bp,
api_key_bp,
pending_certificates_bp,
dns_providers_bp,
)
def create_app(config_path=None):
app = factory.create_app(
app_name=__name__, blueprints=LEMUR_BLUEPRINTS, config=config_path
)
configure_hook(app)
return app
def configure_hook(app):
"""
:param app:
:return:
"""
from flask import jsonify
from werkzeug.exceptions import HTTPException
@app.errorhandler(Exception)
def handle_error(e):
code = 500
if isinstance(e, HTTPException):
code = e.code
app.logger.exception(e)
return jsonify(error=str(e)), code
@app.before_request
def before_request():
g.request_start_time = time.time()
@app.after_request
def after_request(response):
# Return early if we don't have the start time
if not hasattr(g, "request_start_time"):
return response
# Get elapsed time in milliseconds
elapsed = time.time() - g.request_start_time
elapsed = int(round(1000 * elapsed))
# Collect request/response tags
tags = {
"endpoint": request.endpoint,
"request_method": request.method.lower(),
"status_code": response.status_code,
}
# Record our response time metric
metrics.send("response_time", "TIMER", elapsed, metric_tags=tags)
metrics.send("status_code_{}".format(response.status_code), "counter", 1)
return response
|
from datetime import timedelta
import time
from clementineremote import ClementineRemote
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_ACCESS_TOKEN,
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = "Clementine Remote"
DEFAULT_PORT = 5500
SCAN_INTERVAL = timedelta(seconds=5)
SUPPORT_CLEMENTINE = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_VOLUME_SET
| SUPPORT_NEXT_TRACK
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_ACCESS_TOKEN): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Clementine platform."""
host = config[CONF_HOST]
port = config[CONF_PORT]
token = config.get(CONF_ACCESS_TOKEN)
client = ClementineRemote(host, port, token, reconnect=True)
add_entities([ClementineDevice(client, config[CONF_NAME])])
class ClementineDevice(MediaPlayerEntity):
"""Representation of Clementine Player."""
def __init__(self, client, name):
"""Initialize the Clementine device."""
self._client = client
self._name = name
self._muted = False
self._volume = 0.0
self._track_id = 0
self._last_track_id = 0
self._track_name = ""
self._track_artist = ""
self._track_album_name = ""
self._state = None
def update(self):
"""Retrieve the latest data from the Clementine Player."""
try:
client = self._client
if client.state == "Playing":
self._state = STATE_PLAYING
elif client.state == "Paused":
self._state = STATE_PAUSED
elif client.state == "Disconnected":
self._state = STATE_OFF
else:
self._state = STATE_PAUSED
if client.last_update and (time.time() - client.last_update > 40):
self._state = STATE_OFF
self._volume = float(client.volume) if client.volume else 0.0
if client.current_track:
self._track_id = client.current_track["track_id"]
self._track_name = client.current_track["title"]
self._track_artist = client.current_track["track_artist"]
self._track_album_name = client.current_track["track_album"]
except Exception:
self._state = STATE_OFF
raise
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume / 100.0
@property
def source(self):
"""Return current source name."""
source_name = "Unknown"
client = self._client
if client.active_playlist_id in client.playlists:
source_name = client.playlists[client.active_playlist_id]["name"]
return source_name
@property
def source_list(self):
"""List of available input sources."""
source_names = [s["name"] for s in self._client.playlists.values()]
return source_names
def select_source(self, source):
"""Select input source."""
client = self._client
sources = [s for s in client.playlists.values() if s["name"] == source]
if len(sources) == 1:
client.change_song(sources[0]["id"], 0)
@property
def media_content_type(self):
"""Content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_title(self):
"""Title of current playing media."""
return self._track_name
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
return self._track_artist
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._track_album_name
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_CLEMENTINE
@property
def media_image_hash(self):
"""Hash value for media image."""
if self._client.current_track:
return self._client.current_track["track_id"]
return None
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
if self._client.current_track:
image = bytes(self._client.current_track["art"])
return (image, "image/png")
return None, None
def volume_up(self):
"""Volume up the media player."""
newvolume = min(self._client.volume + 4, 100)
self._client.set_volume(newvolume)
def volume_down(self):
"""Volume down media player."""
newvolume = max(self._client.volume - 4, 0)
self._client.set_volume(newvolume)
def mute_volume(self, mute):
"""Send mute command."""
self._client.set_volume(0)
def set_volume_level(self, volume):
"""Set volume level."""
self._client.set_volume(int(100 * volume))
def media_play_pause(self):
"""Simulate play pause media player."""
if self._state == STATE_PLAYING:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._state = STATE_PLAYING
self._client.play()
def media_pause(self):
"""Send media pause command to media player."""
self._state = STATE_PAUSED
self._client.pause()
def media_next_track(self):
"""Send next track command."""
self._client.next()
def media_previous_track(self):
"""Send the previous track command."""
self._client.previous()
|
import textwrap
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.utils import usertypes
from qutebrowser.browser import browsertab
@pytest.fixture
def caret(web_tab, qtbot, mode_manager):
web_tab.container.expose()
with qtbot.wait_signal(web_tab.load_finished, timeout=10000):
web_tab.load_url(QUrl('qute://testdata/data/caret.html'))
with qtbot.wait_signal(web_tab.caret.selection_toggled):
mode_manager.enter(usertypes.KeyMode.caret)
return web_tab.caret
class Selection:
"""Helper to interact with the caret selection."""
def __init__(self, qtbot, caret):
self._qtbot = qtbot
self._caret = caret
def check(self, expected, *, strip=False):
"""Check whether we got the expected selection.
Since (especially on Windows) the selection is empty if we're checking
too quickly, we try to read it multiple times.
"""
for _ in range(10):
with self._qtbot.wait_callback() as callback:
self._caret.selection(callback)
selection = callback.args[0]
if selection:
if strip:
selection = selection.strip()
assert selection == expected
return
elif not selection and not expected:
return
self._qtbot.wait(50)
assert False, 'Failed to get selection!'
def check_multiline(self, expected, *, strip=False):
self.check(textwrap.dedent(expected).strip(), strip=strip)
def toggle(self, *, line=False):
"""Toggle the selection and return the new selection state."""
with self._qtbot.wait_signal(self._caret.selection_toggled) as blocker:
self._caret.toggle_selection(line=line)
return blocker.args[0]
@pytest.fixture
def selection(qtbot, caret):
return Selection(qtbot, caret)
def test_toggle(caret, selection, qtbot):
"""Make sure calling toggleSelection produces the correct callback values.
This also makes sure that the SelectionState enum in JS lines up with the
Python browsertab.SelectionState enum.
"""
assert selection.toggle() == browsertab.SelectionState.normal
assert selection.toggle(line=True) == browsertab.SelectionState.line
assert selection.toggle() == browsertab.SelectionState.normal
assert selection.toggle() == browsertab.SelectionState.none
def test_selection_callback_wrong_mode(qtbot, caplog,
webengine_tab, mode_manager):
"""Test what calling the selection callback outside of caret mode.
It should be ignored, as something could have left caret mode while the
async callback was happening, so we don't want to mess with the status bar.
"""
assert mode_manager.mode == usertypes.KeyMode.normal
with qtbot.assertNotEmitted(webengine_tab.caret.selection_toggled):
webengine_tab.caret._toggle_sel_translate('normal')
msg = 'Ignoring caret selection callback in KeyMode.normal'
assert caplog.messages == [msg]
class TestDocument:
def test_selecting_entire_document(self, caret, selection):
selection.toggle()
caret.move_to_end_of_document()
selection.check_multiline("""
one two three
eins zwei drei
four five six
vier fünf sechs
""", strip=True)
def test_moving_to_end_and_start(self, caret, selection):
caret.move_to_end_of_document()
caret.move_to_start_of_document()
selection.toggle()
caret.move_to_end_of_word()
selection.check("one")
def test_moving_to_end_and_start_with_selection(self, caret, selection):
caret.move_to_end_of_document()
selection.toggle()
caret.move_to_start_of_document()
selection.check_multiline("""
one two three
eins zwei drei
four five six
vier fünf sechs
""", strip=True)
class TestBlock:
def test_selecting_block(self, caret, selection):
selection.toggle()
caret.move_to_end_of_next_block()
selection.check_multiline("""
one two three
eins zwei drei
""")
def test_moving_back_to_the_end_of_prev_block_with_sel(self, caret, selection):
caret.move_to_end_of_next_block(2)
selection.toggle()
caret.move_to_end_of_prev_block()
caret.move_to_prev_word()
selection.check_multiline("""
drei
four five six
""")
def test_moving_back_to_the_end_of_prev_block(self, caret, selection):
caret.move_to_end_of_next_block(2)
caret.move_to_end_of_prev_block()
selection.toggle()
caret.move_to_prev_word()
selection.check("drei")
def test_moving_back_to_the_start_of_prev_block_with_sel(self, caret, selection):
caret.move_to_end_of_next_block(2)
selection.toggle()
caret.move_to_start_of_prev_block()
selection.check_multiline("""
eins zwei drei
four five six
""")
def test_moving_back_to_the_start_of_prev_block(self, caret, selection):
caret.move_to_end_of_next_block(2)
caret.move_to_start_of_prev_block()
selection.toggle()
caret.move_to_next_word()
selection.check("eins ")
def test_moving_to_the_start_of_next_block_with_sel(self, caret, selection):
selection.toggle()
caret.move_to_start_of_next_block()
selection.check("one two three\n")
def test_moving_to_the_start_of_next_block(self, caret, selection):
caret.move_to_start_of_next_block()
selection.toggle()
caret.move_to_end_of_word()
selection.check("eins")
class TestLine:
def test_selecting_a_line(self, caret, selection):
selection.toggle()
caret.move_to_end_of_line()
selection.check("one two three")
def test_moving_and_selecting_a_line(self, caret, selection):
caret.move_to_next_line()
selection.toggle()
caret.move_to_end_of_line()
selection.check("eins zwei drei")
def test_selecting_next_line(self, caret, selection):
selection.toggle()
caret.move_to_next_line()
selection.check("one two three\n")
def test_moving_to_end_and_to_start_of_line(self, caret, selection):
caret.move_to_end_of_line()
caret.move_to_start_of_line()
selection.toggle()
caret.move_to_end_of_word()
selection.check("one")
def test_selecting_a_line_backwards(self, caret, selection):
caret.move_to_end_of_line()
selection.toggle()
caret.move_to_start_of_line()
selection.check("one two three")
def test_selecting_previous_line(self, caret, selection):
caret.move_to_next_line()
selection.toggle()
caret.move_to_prev_line()
selection.check("one two three\n")
def test_moving_to_previous_line(self, caret, selection):
caret.move_to_next_line()
caret.move_to_prev_line()
selection.toggle()
caret.move_to_next_line()
selection.check("one two three\n")
class TestWord:
def test_selecting_a_word(self, caret, selection):
selection.toggle()
caret.move_to_end_of_word()
selection.check("one")
def test_moving_to_end_and_selecting_a_word(self, caret, selection):
caret.move_to_end_of_word()
selection.toggle()
caret.move_to_end_of_word()
selection.check(" two")
def test_moving_to_next_word_and_selecting_a_word(self, caret, selection):
caret.move_to_next_word()
selection.toggle()
caret.move_to_end_of_word()
selection.check("two")
def test_moving_to_next_word_and_selecting_until_next_word(self, caret, selection):
caret.move_to_next_word()
selection.toggle()
caret.move_to_next_word()
selection.check("two ")
def test_moving_to_previous_word_and_selecting_a_word(self, caret, selection):
caret.move_to_end_of_word()
selection.toggle()
caret.move_to_prev_word()
selection.check("one")
def test_moving_to_previous_word(self, caret, selection):
caret.move_to_end_of_word()
caret.move_to_prev_word()
selection.toggle()
caret.move_to_end_of_word()
selection.check("one")
class TestChar:
def test_selecting_a_char(self, caret, selection):
selection.toggle()
caret.move_to_next_char()
selection.check("o")
def test_moving_and_selecting_a_char(self, caret, selection):
caret.move_to_next_char()
selection.toggle()
caret.move_to_next_char()
selection.check("n")
def test_selecting_previous_char(self, caret, selection):
caret.move_to_end_of_word()
selection.toggle()
caret.move_to_prev_char()
selection.check("e")
def test_moving_to_previous_char(self, caret, selection):
caret.move_to_end_of_word()
caret.move_to_prev_char()
selection.toggle()
caret.move_to_end_of_word()
selection.check("e")
def test_drop_selection(caret, selection):
selection.toggle()
caret.move_to_end_of_word()
caret.drop_selection()
selection.check("")
class TestSearch:
@pytest.mark.no_xvfb
def test_yanking_a_searched_line(self, caret, selection, mode_manager, web_tab, qtbot):
mode_manager.leave(usertypes.KeyMode.caret)
with qtbot.wait_callback() as callback:
web_tab.search.search('fiv', result_cb=callback)
callback.assert_called_with(True)
mode_manager.enter(usertypes.KeyMode.caret)
caret.move_to_end_of_line()
selection.check('five six')
@pytest.mark.no_xvfb
def test_yanking_a_searched_line_with_multiple_matches(self, caret, selection, mode_manager, web_tab, qtbot):
mode_manager.leave(usertypes.KeyMode.caret)
with qtbot.wait_callback() as callback:
web_tab.search.search('w', result_cb=callback)
callback.assert_called_with(True)
with qtbot.wait_callback() as callback:
web_tab.search.next_result(result_cb=callback)
callback.assert_called_with(True)
mode_manager.enter(usertypes.KeyMode.caret)
caret.move_to_end_of_line()
selection.check('wei drei')
class TestFollowSelected:
LOAD_STARTED_DELAY = 50
@pytest.fixture(params=[True, False], autouse=True)
def toggle_js(self, request, config_stub):
config_stub.val.content.javascript.enabled = request.param
def test_follow_selected_without_a_selection(self, qtbot, caret, selection, web_tab,
mode_manager):
caret.move_to_next_word() # Move cursor away from the link
mode_manager.leave(usertypes.KeyMode.caret)
with qtbot.wait_signal(caret.follow_selected_done):
with qtbot.assert_not_emitted(web_tab.load_started,
wait=self.LOAD_STARTED_DELAY):
caret.follow_selected()
def test_follow_selected_with_text(self, qtbot, caret, selection, web_tab):
caret.move_to_next_word()
selection.toggle()
caret.move_to_end_of_word()
with qtbot.wait_signal(caret.follow_selected_done):
with qtbot.assert_not_emitted(web_tab.load_started,
wait=self.LOAD_STARTED_DELAY):
caret.follow_selected()
def test_follow_selected_with_link(self, caret, selection, config_stub,
qtbot, web_tab):
selection.toggle()
caret.move_to_end_of_word()
with qtbot.wait_signal(web_tab.load_finished):
with qtbot.wait_signal(caret.follow_selected_done):
caret.follow_selected()
assert web_tab.url().path() == '/data/hello.txt'
class TestReverse:
def test_does_not_change_selection(self, caret, selection):
selection.toggle()
caret.reverse_selection()
selection.check("")
def test_repetition_of_movement_results_in_empty_selection(self, caret, selection):
selection.toggle()
caret.move_to_end_of_word()
caret.reverse_selection()
caret.move_to_end_of_word()
selection.check("")
def test_reverse(self, caret, selection):
selection.toggle()
caret.move_to_end_of_word()
caret.reverse_selection()
caret.move_to_next_char()
selection.check("ne")
caret.reverse_selection()
caret.move_to_next_char()
selection.check("ne ")
caret.move_to_end_of_line()
selection.check("ne two three")
caret.reverse_selection()
caret.move_to_start_of_line()
selection.check("one two three")
class TestLineSelection:
def test_toggle(self, caret, selection):
selection.toggle(line=True)
selection.check("one two three")
def test_toggle_untoggle(self, caret, selection):
selection.toggle()
selection.check("")
selection.toggle(line=True)
selection.check("one two three")
selection.toggle()
selection.check("one two three")
def test_from_center(self, caret, selection):
caret.move_to_next_char(4)
selection.toggle(line=True)
selection.check("one two three")
def test_more_lines(self, caret, selection):
selection.toggle(line=True)
caret.move_to_next_line(2)
selection.check_multiline("""
one two three
eins zwei drei
four five six
""", strip=True)
def test_not_selecting_char(self, caret, selection):
selection.toggle(line=True)
caret.move_to_next_char()
selection.check("one two three")
caret.move_to_prev_char()
selection.check("one two three")
def test_selecting_prev_next_word(self, caret, selection):
selection.toggle(line=True)
caret.move_to_next_word()
selection.check("one two three")
caret.move_to_prev_word()
selection.check("one two three")
def test_selecting_end_word(self, caret, selection):
selection.toggle(line=True)
caret.move_to_end_of_word()
selection.check("one two three")
def test_selecting_prev_next_line(self, caret, selection):
selection.toggle(line=True)
caret.move_to_next_line()
selection.check_multiline("""
one two three
eins zwei drei
""", strip=True)
caret.move_to_prev_line()
selection.check("one two three")
def test_not_selecting_start_end_line(self, caret, selection):
selection.toggle(line=True)
caret.move_to_end_of_line()
selection.check("one two three")
caret.move_to_start_of_line()
selection.check("one two three")
def test_selecting_block(self, caret, selection):
selection.toggle(line=True)
caret.move_to_end_of_next_block()
selection.check_multiline("""
one two three
eins zwei drei
""", strip=True)
@pytest.mark.not_mac(
reason='https://github.com/qutebrowser/qutebrowser/issues/5459')
def test_selecting_start_end_document(self, caret, selection):
selection.toggle(line=True)
caret.move_to_end_of_document()
selection.check_multiline("""
one two three
eins zwei drei
four five six
vier fünf sechs
""", strip=True)
caret.move_to_start_of_document()
selection.check("one two three")
|
import asyncio
import os
from unittest.mock import Mock
import pytest
from homeassistant import bootstrap, core, runner
import homeassistant.config as config_util
from homeassistant.exceptions import HomeAssistantError
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import (
MockModule,
MockPlatform,
get_test_config_dir,
mock_coro,
mock_entity_platform,
mock_integration,
)
ORIG_TIMEZONE = dt_util.DEFAULT_TIME_ZONE
VERSION_PATH = os.path.join(get_test_config_dir(), config_util.VERSION_FILE)
@pytest.fixture(autouse=True)
def apply_mock_storage(hass_storage):
"""Apply the storage mock."""
@pytest.fixture(autouse=True)
async def apply_stop_hass(stop_hass):
"""Make sure all hass are stopped."""
@pytest.fixture(autouse=True)
def mock_http_start_stop():
"""Mock HTTP start and stop."""
with patch(
"homeassistant.components.http.start_http_server_and_save_config"
), patch("homeassistant.components.http.HomeAssistantHTTP.stop"):
yield
@patch("homeassistant.bootstrap.async_enable_logging", Mock())
async def test_home_assistant_core_config_validation(hass):
"""Test if we pass in wrong information for HA conf."""
# Extensive HA conf validation testing is done
result = await bootstrap.async_from_config_dict(
{"homeassistant": {"latitude": "some string"}}, hass
)
assert result is None
async def test_async_enable_logging(hass):
"""Test to ensure logging is migrated to the queue handlers."""
with patch("logging.getLogger"), patch(
"homeassistant.bootstrap.async_activate_log_queue_handler"
) as mock_async_activate_log_queue_handler:
bootstrap.async_enable_logging(hass)
mock_async_activate_log_queue_handler.assert_called_once()
async def test_load_hassio(hass):
"""Test that we load Hass.io component."""
with patch.dict(os.environ, {}, clear=True):
assert bootstrap._get_domains(hass, {}) == set()
with patch.dict(os.environ, {"HASSIO": "1"}):
assert bootstrap._get_domains(hass, {}) == {"hassio"}
async def test_empty_setup(hass):
"""Test an empty set up loads the core."""
await bootstrap.async_from_config_dict({}, hass)
for domain in bootstrap.CORE_INTEGRATIONS:
assert domain in hass.config.components, domain
async def test_core_failure_loads_safe_mode(hass, caplog):
"""Test failing core setup aborts further setup."""
with patch(
"homeassistant.components.homeassistant.async_setup",
return_value=mock_coro(False),
):
await bootstrap.async_from_config_dict({"group": {}}, hass)
assert "core failed to initialize" in caplog.text
# We aborted early, group not set up
assert "group" not in hass.config.components
async def test_setting_up_config(hass):
"""Test we set up domains in config."""
await bootstrap._async_set_up_integrations(
hass, {"group hello": {}, "homeassistant": {}}
)
assert "group" in hass.config.components
async def test_setup_after_deps_all_present(hass):
"""Test after_dependencies when all present."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
with patch(
"homeassistant.components.logger.async_setup", gen_domain_setup("logger")
):
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}, "logger": {}}
)
assert "root" in hass.config.components
assert "first_dep" in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["logger", "root", "first_dep", "second_dep"]
async def test_setup_after_deps_in_stage_1_ignored(hass):
"""Test after_dependencies are ignored in stage 1."""
# This test relies on this
assert "cloud" in bootstrap.STAGE_1_INTEGRATIONS
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="normal_integration",
async_setup=gen_domain_setup("normal_integration"),
partial_manifest={"after_dependencies": ["an_after_dep"]},
),
)
mock_integration(
hass,
MockModule(
domain="an_after_dep",
async_setup=gen_domain_setup("an_after_dep"),
),
)
mock_integration(
hass,
MockModule(
domain="cloud",
async_setup=gen_domain_setup("cloud"),
partial_manifest={"after_dependencies": ["normal_integration"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"cloud": {}, "normal_integration": {}, "an_after_dep": {}}
)
assert "normal_integration" in hass.config.components
assert "cloud" in hass.config.components
assert order == ["cloud", "an_after_dep", "normal_integration"]
async def test_setup_after_deps_via_platform(hass):
"""Test after_dependencies set up via platform."""
order = []
after_dep_event = asyncio.Event()
def gen_domain_setup(domain):
async def async_setup(hass, config):
if domain == "after_dep_of_platform_int":
await after_dep_event.wait()
order.append(domain)
return True
return async_setup
mock_integration(
hass,
MockModule(
domain="after_dep_of_platform_int",
async_setup=gen_domain_setup("after_dep_of_platform_int"),
),
)
mock_integration(
hass,
MockModule(
domain="platform_int",
async_setup=gen_domain_setup("platform_int"),
partial_manifest={"after_dependencies": ["after_dep_of_platform_int"]},
),
)
mock_entity_platform(hass, "light.platform_int", MockPlatform())
@core.callback
def continue_loading(_):
"""When light component loaded, continue other loading."""
after_dep_event.set()
hass.bus.async_listen_once("component_loaded", continue_loading)
await bootstrap._async_set_up_integrations(
hass, {"light": {"platform": "platform_int"}, "after_dep_of_platform_int": {}}
)
assert "light" in hass.config.components
assert "after_dep_of_platform_int" in hass.config.components
assert "platform_int" in hass.config.components
assert order == ["after_dep_of_platform_int", "platform_int"]
async def test_setup_after_deps_not_trigger_load(hass):
"""Test after_dependencies does not trigger loading it."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="first_dep",
async_setup=gen_domain_setup("first_dep"),
partial_manifest={"after_dependencies": ["root"]},
),
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(hass, {"root": {}, "second_dep": {}})
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
async def test_setup_after_deps_not_present(hass):
"""Test after_dependencies when referenced integration doesn't exist."""
order = []
def gen_domain_setup(domain):
async def async_setup(hass, config):
order.append(domain)
return True
return async_setup
mock_integration(
hass, MockModule(domain="root", async_setup=gen_domain_setup("root"))
)
mock_integration(
hass,
MockModule(
domain="second_dep",
async_setup=gen_domain_setup("second_dep"),
partial_manifest={"after_dependencies": ["first_dep"]},
),
)
await bootstrap._async_set_up_integrations(
hass, {"root": {}, "first_dep": {}, "second_dep": {}}
)
assert "root" in hass.config.components
assert "first_dep" not in hass.config.components
assert "second_dep" in hass.config.components
assert order == ["root", "second_dep"]
@pytest.fixture
def mock_is_virtual_env():
"""Mock enable logging."""
with patch(
"homeassistant.bootstrap.is_virtual_env", return_value=False
) as is_virtual_env:
yield is_virtual_env
@pytest.fixture
def mock_enable_logging():
"""Mock enable logging."""
with patch("homeassistant.bootstrap.async_enable_logging") as enable_logging:
yield enable_logging
@pytest.fixture
def mock_mount_local_lib_path():
"""Mock enable logging."""
with patch(
"homeassistant.bootstrap.async_mount_local_lib_path"
) as mount_local_lib_path:
yield mount_local_lib_path
@pytest.fixture
def mock_process_ha_config_upgrade():
"""Mock enable logging."""
with patch(
"homeassistant.config.process_ha_config_upgrade"
) as process_ha_config_upgrade:
yield process_ha_config_upgrade
@pytest.fixture
def mock_ensure_config_exists():
"""Mock enable logging."""
with patch(
"homeassistant.config.async_ensure_config_exists", return_value=True
) as ensure_config_exists:
yield ensure_config_exists
async def test_setup_hass(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
caplog,
loop,
):
"""Test it works."""
verbose = Mock()
log_rotate_days = Mock()
log_file = Mock()
log_no_color = Mock()
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={"browser": {}, "frontend": {}},
), patch.object(bootstrap, "LOG_SLOW_STARTUP_INTERVAL", 5000):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=verbose,
log_rotate_days=log_rotate_days,
log_file=log_file,
log_no_color=log_no_color,
skip_pip=True,
safe_mode=False,
),
)
assert "Waiting on integrations to complete setup" not in caplog.text
assert "browser" in hass.config.components
assert "safe_mode" not in hass.config.components
assert len(mock_enable_logging.mock_calls) == 1
assert mock_enable_logging.mock_calls[0][1] == (
hass,
verbose,
log_rotate_days,
log_file,
log_no_color,
)
assert len(mock_mount_local_lib_path.mock_calls) == 1
assert len(mock_ensure_config_exists.mock_calls) == 1
assert len(mock_process_ha_config_upgrade.mock_calls) == 1
async def test_setup_hass_takes_longer_than_log_slow_startup(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
caplog,
loop,
):
"""Test it works."""
verbose = Mock()
log_rotate_days = Mock()
log_file = Mock()
log_no_color = Mock()
async def _async_setup_that_blocks_startup(*args, **kwargs):
await asyncio.sleep(0.6)
return True
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={"browser": {}, "frontend": {}},
), patch.object(bootstrap, "LOG_SLOW_STARTUP_INTERVAL", 0.3), patch(
"homeassistant.components.frontend.async_setup",
side_effect=_async_setup_that_blocks_startup,
):
await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=verbose,
log_rotate_days=log_rotate_days,
log_file=log_file,
log_no_color=log_no_color,
skip_pip=True,
safe_mode=False,
),
)
assert "Waiting on integrations to complete setup" in caplog.text
async def test_setup_hass_invalid_yaml(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
with patch(
"homeassistant.config.async_hass_config_yaml", side_effect=HomeAssistantError
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=False,
),
)
assert "safe_mode" in hass.config.components
assert len(mock_mount_local_lib_path.mock_calls) == 0
async def test_setup_hass_config_dir_nonexistent(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
mock_ensure_config_exists.return_value = False
assert (
await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=False,
),
)
is None
)
async def test_setup_hass_safe_mode(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
with patch("homeassistant.components.browser.setup") as browser_setup, patch(
"homeassistant.config_entries.ConfigEntries.async_domains",
return_value=["browser"],
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=True,
),
)
assert "safe_mode" in hass.config.components
assert len(mock_mount_local_lib_path.mock_calls) == 0
# Validate we didn't try to set up config entry.
assert "browser" not in hass.config.components
assert len(browser_setup.mock_calls) == 0
async def test_setup_hass_invalid_core_config(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test it works."""
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={"homeassistant": {"non-existing": 1}},
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=False,
log_rotate_days=10,
log_file="",
log_no_color=False,
skip_pip=True,
safe_mode=False,
),
)
assert "safe_mode" in hass.config.components
async def test_setup_safe_mode_if_no_frontend(
mock_enable_logging,
mock_is_virtual_env,
mock_mount_local_lib_path,
mock_ensure_config_exists,
mock_process_ha_config_upgrade,
loop,
):
"""Test we setup safe mode if frontend didn't load."""
verbose = Mock()
log_rotate_days = Mock()
log_file = Mock()
log_no_color = Mock()
with patch(
"homeassistant.config.async_hass_config_yaml",
return_value={
"homeassistant": {
"internal_url": "http://192.168.1.100:8123",
"external_url": "https://abcdef.ui.nabu.casa",
},
"map": {},
"person": {"invalid": True},
},
):
hass = await bootstrap.async_setup_hass(
runner.RuntimeConfig(
config_dir=get_test_config_dir(),
verbose=verbose,
log_rotate_days=log_rotate_days,
log_file=log_file,
log_no_color=log_no_color,
skip_pip=True,
safe_mode=False,
),
)
assert "safe_mode" in hass.config.components
assert hass.config.config_dir == get_test_config_dir()
assert hass.config.skip_pip
assert hass.config.internal_url == "http://192.168.1.100:8123"
assert hass.config.external_url == "https://abcdef.ui.nabu.casa"
|
import logging
from homeassistant.components.sensor import DOMAIN
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util.dt import parse_datetime
from . import MinutPointEntity
from .const import DOMAIN as POINT_DOMAIN, POINT_DISCOVERY_NEW
_LOGGER = logging.getLogger(__name__)
DEVICE_CLASS_SOUND = "sound_level"
SENSOR_TYPES = {
DEVICE_CLASS_TEMPERATURE: (None, 1, TEMP_CELSIUS),
DEVICE_CLASS_PRESSURE: (None, 0, PRESSURE_HPA),
DEVICE_CLASS_HUMIDITY: (None, 1, PERCENTAGE),
DEVICE_CLASS_SOUND: ("mdi:ear-hearing", 1, "dBa"),
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up a Point's sensors based on a config entry."""
async def async_discover_sensor(device_id):
"""Discover and add a discovered sensor."""
client = hass.data[POINT_DOMAIN][config_entry.entry_id]
async_add_entities(
(
MinutPointSensor(client, device_id, sensor_type)
for sensor_type in SENSOR_TYPES
),
True,
)
async_dispatcher_connect(
hass, POINT_DISCOVERY_NEW.format(DOMAIN, POINT_DOMAIN), async_discover_sensor
)
class MinutPointSensor(MinutPointEntity):
"""The platform class required by Home Assistant."""
def __init__(self, point_client, device_id, device_class):
"""Initialize the sensor."""
super().__init__(point_client, device_id, device_class)
self._device_prop = SENSOR_TYPES[device_class]
async def _update_callback(self):
"""Update the value of the sensor."""
_LOGGER.debug("Update sensor value for %s", self)
if self.is_updated:
self._value = await self.device.sensor(self.device_class)
self._updated = parse_datetime(self.device.last_update)
self.async_write_ha_state()
@property
def icon(self):
"""Return the icon representation."""
return self._device_prop[0]
@property
def state(self):
"""Return the state of the sensor."""
if self.value is None:
return None
return round(self.value, self._device_prop[1])
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._device_prop[2]
|
import os
import re
import sys
import time
import portend
from cheroot.test import webtest
import cherrypy
from cherrypy.test import helper
curdir = os.path.abspath(os.path.dirname(__file__))
def read_process(cmd, args=''):
pipein, pipeout = os.popen4('%s %s' % (cmd, args))
try:
firstline = pipeout.readline()
if (re.search(r'(not recognized|No such file|not found)', firstline,
re.IGNORECASE)):
raise IOError('%s must be on your system path.' % cmd)
output = firstline + pipeout.read()
finally:
pipeout.close()
return output
if sys.platform == 'win32':
APACHE_PATH = 'httpd'
else:
APACHE_PATH = 'apache'
CONF_PATH = 'test_mw.conf'
conf_modwsgi = r"""
# Apache2 server conf file for testing CherryPy with modpython_gateway.
ServerName 127.0.0.1
DocumentRoot "/"
Listen %(port)s
AllowEncodedSlashes On
LoadModule rewrite_module modules/mod_rewrite.so
RewriteEngine on
RewriteMap escaping int:escape
LoadModule log_config_module modules/mod_log_config.so
LogFormat "%%h %%l %%u %%t \"%%r\" %%>s %%b \"%%{Referer}i\" \"%%{User-agent}i\"" combined
CustomLog "%(curdir)s/apache.access.log" combined
ErrorLog "%(curdir)s/apache.error.log"
LogLevel debug
LoadModule wsgi_module modules/mod_wsgi.so
LoadModule env_module modules/mod_env.so
WSGIScriptAlias / "%(curdir)s/modwsgi.py"
SetEnv testmod %(testmod)s
""" # noqa E501
class ModWSGISupervisor(helper.Supervisor):
"""Server Controller for ModWSGI and CherryPy."""
using_apache = True
using_wsgi = True
template = conf_modwsgi
def __str__(self):
return 'ModWSGI Server on %s:%s' % (self.host, self.port)
def start(self, modulename):
mpconf = CONF_PATH
if not os.path.isabs(mpconf):
mpconf = os.path.join(curdir, mpconf)
f = open(mpconf, 'wb')
try:
output = (self.template %
{'port': self.port, 'testmod': modulename,
'curdir': curdir})
f.write(output)
finally:
f.close()
result = read_process(APACHE_PATH, '-k start -f %s' % mpconf)
if result:
print(result)
# Make a request so mod_wsgi starts up our app.
# If we don't, concurrent initial requests will 404.
portend.occupied('127.0.0.1', self.port, timeout=5)
webtest.openURL('/ihopetheresnodefault', port=self.port)
time.sleep(1)
def stop(self):
"""Gracefully shutdown a server that is serving forever."""
read_process(APACHE_PATH, '-k stop')
loaded = False
def application(environ, start_response):
global loaded
if not loaded:
loaded = True
modname = 'cherrypy.test.' + environ['testmod']
mod = __import__(modname, globals(), locals(), [''])
mod.setup_server()
cherrypy.config.update({
'log.error_file': os.path.join(curdir, 'test.error.log'),
'log.access_file': os.path.join(curdir, 'test.access.log'),
'environment': 'test_suite',
'engine.SIGHUP': None,
'engine.SIGTERM': None,
})
return cherrypy.tree(environ, start_response)
|
from nuheat.config import SCHEDULE_HOLD, SCHEDULE_RUN, SCHEDULE_TEMPORARY_HOLD
from homeassistant.components.nuheat.const import DOMAIN
from homeassistant.const import CONF_DEVICES, CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import MagicMock, Mock
def _get_mock_thermostat_run():
serial_number = "12345"
thermostat = Mock(
serial_number=serial_number,
room="Master bathroom",
online=True,
heating=True,
temperature=2222,
celsius=22,
fahrenheit=72,
max_celsius=69,
max_fahrenheit=157,
min_celsius=5,
min_fahrenheit=41,
schedule_mode=SCHEDULE_RUN,
target_celsius=22,
target_fahrenheit=72,
target_temperature=2217,
)
thermostat.get_data = Mock()
thermostat.resume_schedule = Mock()
thermostat.schedule_mode = Mock()
return thermostat
def _get_mock_thermostat_schedule_hold_unavailable():
serial_number = "876"
thermostat = Mock(
serial_number=serial_number,
room="Guest bathroom",
online=False,
heating=False,
temperature=12,
celsius=12,
fahrenheit=102,
max_celsius=99,
max_fahrenheit=357,
min_celsius=9,
min_fahrenheit=21,
schedule_mode=SCHEDULE_HOLD,
target_celsius=23,
target_fahrenheit=79,
target_temperature=2609,
)
thermostat.get_data = Mock()
thermostat.resume_schedule = Mock()
thermostat.schedule_mode = Mock()
return thermostat
def _get_mock_thermostat_schedule_hold_available():
serial_number = "876"
thermostat = Mock(
serial_number=serial_number,
room="Available bathroom",
online=True,
heating=False,
temperature=12,
celsius=12,
fahrenheit=102,
max_celsius=99,
max_fahrenheit=357,
min_celsius=9,
min_fahrenheit=21,
schedule_mode=SCHEDULE_HOLD,
target_celsius=23,
target_fahrenheit=79,
target_temperature=2609,
)
thermostat.get_data = Mock()
thermostat.resume_schedule = Mock()
thermostat.schedule_mode = Mock()
return thermostat
def _get_mock_thermostat_schedule_temporary_hold():
serial_number = "999"
thermostat = Mock(
serial_number=serial_number,
room="Temp bathroom",
online=True,
heating=False,
temperature=14,
celsius=13,
fahrenheit=202,
max_celsius=39,
max_fahrenheit=357,
min_celsius=3,
min_fahrenheit=31,
schedule_mode=SCHEDULE_TEMPORARY_HOLD,
target_celsius=43,
target_fahrenheit=99,
target_temperature=3729,
max_temperature=5000,
min_temperature=1,
)
thermostat.get_data = Mock()
thermostat.resume_schedule = Mock()
thermostat.schedule_mode = Mock()
return thermostat
def _get_mock_nuheat(authenticate=None, get_thermostat=None):
nuheat_mock = MagicMock()
type(nuheat_mock).authenticate = MagicMock()
type(nuheat_mock).get_thermostat = MagicMock(return_value=get_thermostat)
return nuheat_mock
def _mock_get_config():
"""Return a default nuheat config."""
return {
DOMAIN: {CONF_USERNAME: "me", CONF_PASSWORD: "secret", CONF_DEVICES: [12345]}
}
|
import logging
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import silo
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'silo'
BENCHMARK_CONFIG = """
silo:
description: Runs Silo
vm_groups:
default:
vm_spec: *default_single_core
"""
flags.DEFINE_string('silo_benchmark', 'tpcc',
'benchmark to run with silo. Options include tpcc, ycsb,'
' queue, bid')
AGG_THPUT_REGEX = \
r'(agg_throughput):\s+(\d+\.?\d*e?[+-]?\d*)\s+([a-z/]+)'
PER_CORE_THPUT_REGEX = \
r'(avg_per_core_throughput):\s+(\d+\.?\d*e?[+-]?\d*)\s+([a-z/]+)'
LAT_REGEX = r'(avg_latency):\s+(\d+\.?\d*e?[+-]?\d*)\s+([a-z]+)'
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Install Silo on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('Preparing Silo on %s', vm)
vm.Install('silo')
def ParseResults(results):
"""Result parser for Silo.
This is what a smaple output looks like:
--- table statistics ---
table customer_0 size 30000 (+0 records)
table customer_name_idx_0 size 30000 (+0 records)
table district_0 size 10 (+0 records)
table history_0 size 792182 (+762182 records)
table item_0 size 100000 (+0 records)
table new_order_0 size 122238 (+113238 records)
table oorder_0 size 829578 (+799578 records)
table oorder_c_id_idx_0 size 829578 (+799578 records)
table order_line_0 size 8300509 (+8000949 records)
table stock_0 size 100000 (+0 records)
table stock_data_0 size 100000 (+0 records)
table warehouse_0 size 1 (+0 records)
--- benchmark statistics ---
runtime: 30.0007 sec
memory delta: 768.336 MB
memory delta rate: 25.6106 MB/sec
logical memory delta: 112.705 MB
logical memory delta rate: 3.75673 MB/sec
agg_nosync_throughput: 59150.1 ops/sec
avg_nosync_per_core_throughput: 59150.1 ops/sec/core
agg_throughput: 59150.1 ops/sec
avg_per_core_throughput: 59150.1 ops/sec/core
agg_persist_throughput: 59150.1 ops/sec
avg_per_core_persist_throughput: 59150.1 ops/sec/core
avg_latency: 0.0168378 ms
avg_persist_latency: 0 ms
agg_abort_rate: 0 aborts/sec
avg_per_core_abort_rate: 0 aborts/sec/core
txn breakdown: [[Delivery, 70967], [NewOrder, 799578], [OrderStatus, 70813],
[Payment, 762182], [StockLevel, 71006]]
--- system counters (for benchmark) ---
--- perf counters (if enabled, for benchmark) ---
--- allocator stats ---
[allocator] ncpus=0
---------------------------------------
"""
samples = []
# agg throughput
match = regex_util.ExtractAllMatches(AGG_THPUT_REGEX, results)[0]
samples.append(sample.Sample(
match[0], float(match[1]), match[2]))
# per core throughput
match = regex_util.ExtractAllMatches(PER_CORE_THPUT_REGEX, results)[0]
samples.append(sample.Sample(
match[0], float(match[1]), match[2]))
# avg latency
match = regex_util.ExtractAllMatches(LAT_REGEX, results)[0]
samples.append(sample.Sample(
match[0], float(match[1]), match[2]))
return samples
def Run(benchmark_spec):
"""Run Silo on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
vm = vms[0]
nthreads = vm.NumCpusForBenchmark()
logging.info('Silo running on %s', vm)
command = 'cd {0} && '\
'out-perf.masstree/benchmarks/dbtest '\
'--bench {1} --num-threads {2} --verbose'.format(
silo.SILO_DIR,
FLAGS.silo_benchmark,
nthreads)
logging.info('Silo Results:')
stdout, stderr = vm.RemoteCommand(command, should_log=True)
return ParseResults(stderr)
def Cleanup(benchmark_spec):
"""Cleanup Silo on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
|
import os
from flask import Flask
from flask_github import GitHub
from flask_sqlalchemy import SQLAlchemy
from celery import Celery, platforms
from app.utils.validator import Validator
from flask_socketio import SocketIO
# 版本号
__version__ = '0.0.6'
# flask
app = Flask(__name__)
app.config.from_object('app.config_default')
# 加载默认 home 目录配置 git_webhook_config.py
config_file = os.path.join(os.path.expanduser('~'),
'.git-webhook/git_webhook_config.py')
if 'GIT_WEBHOOK_CONFIG' in os.environ:
app.config.from_envvar('GIT_WEBHOOK_CONFIG')
elif os.path.exists(config_file):
app.config.from_pyfile(config_file)
else:
# 最后从代码目录加载配置
try:
# 兼容老的写法
app.config.from_object('app.config')
except:
app.config.from_object('app.config_example')
socketio = SocketIO(app,
message_queue=app.config['SOCKET_MESSAGE_QUEUE'])
# validator
v = Validator()
# flask-sqlalchemy
app.config['SQLALCHEMY_DATABASE_URI'] = app.config['DATABASE_URI']
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = True
SQLAlchemyDB = SQLAlchemy(app)
# celery
def make_celery(app):
celery = Celery(app.import_name, broker=app.config['CELERY_BROKER_URL'])
celery.conf.update(app.config)
TaskBase = celery.Task
class ContextTask(TaskBase):
abstract = True
def __call__(self, *args, **kwargs):
with app.app_context():
return TaskBase.__call__(self, *args, **kwargs)
celery.Task = ContextTask
return celery
platforms.C_FORCE_ROOT = True
celeryInstance = make_celery(app)
# github login
github = GitHub(app)
from app.database import model # noqa
from app import views # noqa
|
import ssl
import sys
import logging
import os.path
import flask
import webserver_sub
app = flask.Flask(__name__)
@app.route('/')
def hello_world():
return "Hello World via SSL!"
@app.route('/favicon.ico')
def favicon():
return webserver_sub.favicon()
@app.after_request
def log_request(response):
return webserver_sub.log_request(response)
@app.before_first_request
def turn_off_logging():
# Turn off werkzeug logging after the startup message has been printed.
logging.getLogger('werkzeug').setLevel(logging.ERROR)
def main():
ssl_dir = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..', 'data', 'ssl')
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1_2)
context.load_cert_chain(os.path.join(ssl_dir, 'cert.pem'),
os.path.join(ssl_dir, 'key.pem'))
app.run(port=int(sys.argv[1]), debug=False, ssl_context=context)
if __name__ == '__main__':
main()
|
import posixpath
from perfkitbenchmarker import linux_packages
ANT_TAR = 'apache-ant-1.9.6-bin.tar.gz'
ANT_TAR_URL = 'https://archive.apache.org/dist/ant/binaries/' + ANT_TAR
PACKAGE_NAME = 'ant'
PREPROVISIONED_DATA = {
ANT_TAR:
'90d28c0202871bd9875a5da6d982f362bb3114d346b9d8ae58860b8d3312c21c'}
PACKAGE_DATA_URL = {ANT_TAR: ANT_TAR_URL}
ANT_HOME_DIR = posixpath.join(linux_packages.INSTALL_DIR, PACKAGE_NAME)
def _Install(vm):
"""Installs the Ant package on the VM."""
vm.Install('wget')
vm.InstallPreprovisionedPackageData(PACKAGE_NAME, PREPROVISIONED_DATA.keys(),
linux_packages.INSTALL_DIR)
vm.RemoteCommand('cd {0} && tar -zxf apache-ant-1.9.6-bin.tar.gz && '
'ln -s {0}/apache-ant-1.9.6/ {1}'.format(
linux_packages.INSTALL_DIR, ANT_HOME_DIR))
def YumInstall(vm):
"""Installs the Ant package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the Ant package on the VM."""
_Install(vm)
|
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.const import CONF_PLATFORM, CONF_VALUE_TEMPLATE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import EVENT_DEVICE_REGISTRY_UPDATED
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
ATTR_DISCOVERY_HASH,
ATTR_DISCOVERY_TOPIC,
CONF_CONNECTIONS,
CONF_DEVICE,
CONF_IDENTIFIERS,
CONF_QOS,
CONF_TOPIC,
DOMAIN,
cleanup_device_registry,
subscription,
)
from .discovery import MQTT_DISCOVERY_NEW, MQTT_DISCOVERY_UPDATED, clear_discovery_hash
from .util import valid_subscribe_topic
_LOGGER = logging.getLogger(__name__)
TAG = "tag"
TAGS = "mqtt_tags"
PLATFORM_SCHEMA = mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_PLATFORM): "mqtt",
vol.Required(CONF_TOPIC): valid_subscribe_topic,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
},
mqtt.validate_device_has_at_least_one_identifier,
)
async def async_setup_entry(hass, config_entry):
"""Set up MQTT tag scan dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add MQTT tag scan."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await async_setup_tag(hass, config, config_entry, discovery_data)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format("tag", "mqtt"), async_discover
)
async def async_setup_tag(hass, config, config_entry, discovery_data):
"""Set up the MQTT tag scanner."""
discovery_hash = discovery_data[ATTR_DISCOVERY_HASH]
discovery_id = discovery_hash[1]
device_id = None
if CONF_DEVICE in config:
await _update_device(hass, config_entry, config)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get_device(
{(DOMAIN, id_) for id_ in config[CONF_DEVICE][CONF_IDENTIFIERS]},
{tuple(x) for x in config[CONF_DEVICE][CONF_CONNECTIONS]},
)
if device is None:
return
device_id = device.id
if TAGS not in hass.data:
hass.data[TAGS] = {}
if device_id not in hass.data[TAGS]:
hass.data[TAGS][device_id] = {}
tag_scanner = MQTTTagScanner(
hass,
config,
device_id,
discovery_data,
config_entry,
)
await tag_scanner.setup()
if device_id:
hass.data[TAGS][device_id][discovery_id] = tag_scanner
def async_has_tags(hass, device_id):
"""Device has tag scanners."""
if TAGS not in hass.data or device_id not in hass.data[TAGS]:
return False
return hass.data[TAGS][device_id] != {}
class MQTTTagScanner:
"""MQTT Tag scanner."""
def __init__(self, hass, config, device_id, discovery_data, config_entry):
"""Initialize."""
self._config = config
self._config_entry = config_entry
self.device_id = device_id
self.discovery_data = discovery_data
self.hass = hass
self._remove_discovery = None
self._remove_device_updated = None
self._sub_state = None
self._value_template = None
self._setup_from_config(config)
async def discovery_update(self, payload):
"""Handle discovery update."""
discovery_hash = self.discovery_data[ATTR_DISCOVERY_HASH]
_LOGGER.info(
"Got update for tag scanner with hash: %s '%s'", discovery_hash, payload
)
if not payload:
# Empty payload: Remove tag scanner
_LOGGER.info("Removing tag scanner: %s", discovery_hash)
await self.tear_down()
if self.device_id:
await cleanup_device_registry(self.hass, self.device_id)
else:
# Non-empty payload: Update tag scanner
_LOGGER.info("Updating tag scanner: %s", discovery_hash)
config = PLATFORM_SCHEMA(payload)
self._config = config
if self.device_id:
await _update_device(self.hass, self._config_entry, config)
self._setup_from_config(config)
await self.subscribe_topics()
def _setup_from_config(self, config):
self._value_template = lambda value, error_value: value
if CONF_VALUE_TEMPLATE in config:
value_template = config.get(CONF_VALUE_TEMPLATE)
value_template.hass = self.hass
self._value_template = value_template.async_render_with_possible_json_value
async def setup(self):
"""Set up the MQTT tag scanner."""
discovery_hash = self.discovery_data[ATTR_DISCOVERY_HASH]
await self.subscribe_topics()
if self.device_id:
self._remove_device_updated = self.hass.bus.async_listen(
EVENT_DEVICE_REGISTRY_UPDATED, self.device_removed
)
self._remove_discovery = async_dispatcher_connect(
self.hass,
MQTT_DISCOVERY_UPDATED.format(discovery_hash),
self.discovery_update,
)
async def subscribe_topics(self):
"""Subscribe to MQTT topics."""
async def tag_scanned(msg):
tag_id = self._value_template(msg.payload, error_value="").strip()
if not tag_id: # No output from template, ignore
return
await self.hass.components.tag.async_scan_tag(tag_id, self.device_id)
self._sub_state = await subscription.async_subscribe_topics(
self.hass,
self._sub_state,
{
"state_topic": {
"topic": self._config[CONF_TOPIC],
"msg_callback": tag_scanned,
"qos": self._config[CONF_QOS],
}
},
)
async def device_removed(self, event):
"""Handle the removal of a device."""
device_id = event.data["device_id"]
if event.data["action"] != "remove" or device_id != self.device_id:
return
await self.tear_down()
async def tear_down(self):
"""Cleanup tag scanner."""
discovery_hash = self.discovery_data[ATTR_DISCOVERY_HASH]
discovery_id = discovery_hash[1]
discovery_topic = self.discovery_data[ATTR_DISCOVERY_TOPIC]
clear_discovery_hash(self.hass, discovery_hash)
if self.device_id:
self._remove_device_updated()
self._remove_discovery()
mqtt.publish(self.hass, discovery_topic, "", retain=True)
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
if self.device_id:
self.hass.data[TAGS][self.device_id].pop(discovery_id)
async def _update_device(hass, config_entry, config):
"""Update device registry."""
device_registry = await hass.helpers.device_registry.async_get_registry()
config_entry_id = config_entry.entry_id
device_info = mqtt.device_info_from_config(config[CONF_DEVICE])
if config_entry_id is not None and device_info is not None:
device_info["config_entry_id"] = config_entry_id
device_registry.async_get_or_create(**device_info)
|
import multiprocessing
import threading
import weakref
from typing import Any, MutableMapping, Optional
try:
from dask.utils import SerializableLock
except ImportError:
# no need to worry about serializing the lock
SerializableLock = threading.Lock
try:
from dask.distributed import Lock as DistributedLock
except ImportError:
DistributedLock = None
# Locks used by multiple backends.
# Neither HDF5 nor the netCDF-C library are thread-safe.
HDF5_LOCK = SerializableLock()
NETCDFC_LOCK = SerializableLock()
_FILE_LOCKS: MutableMapping[Any, threading.Lock] = weakref.WeakValueDictionary()
def _get_threaded_lock(key):
try:
lock = _FILE_LOCKS[key]
except KeyError:
lock = _FILE_LOCKS[key] = threading.Lock()
return lock
def _get_multiprocessing_lock(key):
# TODO: make use of the key -- maybe use locket.py?
# https://github.com/mwilliamson/locket.py
del key # unused
return multiprocessing.Lock()
_LOCK_MAKERS = {
None: _get_threaded_lock,
"threaded": _get_threaded_lock,
"multiprocessing": _get_multiprocessing_lock,
"distributed": DistributedLock,
}
def _get_lock_maker(scheduler=None):
"""Returns an appropriate function for creating resource locks.
Parameters
----------
scheduler : str or None
Dask scheduler being used.
See Also
--------
dask.utils.get_scheduler_lock
"""
return _LOCK_MAKERS[scheduler]
def _get_scheduler(get=None, collection=None) -> Optional[str]:
"""Determine the dask scheduler that is being used.
None is returned if no dask scheduler is active.
See also
--------
dask.base.get_scheduler
"""
try:
# Fix for bug caused by dask installation that doesn't involve the toolz library
# Issue: 4164
import dask
from dask.base import get_scheduler # noqa: F401
actual_get = get_scheduler(get, collection)
except ImportError:
return None
try:
from dask.distributed import Client
if isinstance(actual_get.__self__, Client):
return "distributed"
except (ImportError, AttributeError):
pass
try:
# As of dask=2.6, dask.multiprocessing requires cloudpickle to be installed
# Dependency removed in https://github.com/dask/dask/pull/5511
if actual_get is dask.multiprocessing.get:
return "multiprocessing"
except AttributeError:
pass
return "threaded"
def get_write_lock(key):
"""Get a scheduler appropriate lock for writing to the given resource.
Parameters
----------
key : str
Name of the resource for which to acquire a lock. Typically a filename.
Returns
-------
Lock object that can be used like a threading.Lock object.
"""
scheduler = _get_scheduler()
lock_maker = _get_lock_maker(scheduler)
return lock_maker(key)
def acquire(lock, blocking=True):
"""Acquire a lock, possibly in a non-blocking fashion.
Includes backwards compatibility hacks for old versions of Python, dask
and dask-distributed.
"""
if blocking:
# no arguments needed
return lock.acquire()
elif DistributedLock is not None and isinstance(lock, DistributedLock):
# distributed.Lock doesn't support the blocking argument yet:
# https://github.com/dask/distributed/pull/2412
return lock.acquire(timeout=0)
else:
# "blocking" keyword argument not supported for:
# - threading.Lock on Python 2.
# - dask.SerializableLock with dask v1.0.0 or earlier.
# - multiprocessing.Lock calls the argument "block" instead.
return lock.acquire(blocking)
class CombinedLock:
"""A combination of multiple locks.
Like a locked door, a CombinedLock is locked if any of its constituent
locks are locked.
"""
def __init__(self, locks):
self.locks = tuple(set(locks)) # remove duplicates
def acquire(self, blocking=True):
return all(acquire(lock, blocking=blocking) for lock in self.locks)
def release(self):
for lock in self.locks:
lock.release()
def __enter__(self):
for lock in self.locks:
lock.__enter__()
def __exit__(self, *args):
for lock in self.locks:
lock.__exit__(*args)
def locked(self):
return any(lock.locked for lock in self.locks)
def __repr__(self):
return "CombinedLock(%r)" % list(self.locks)
class DummyLock:
"""DummyLock provides the lock API without any actual locking."""
def acquire(self, blocking=True):
pass
def release(self):
pass
def __enter__(self):
pass
def __exit__(self, *args):
pass
def locked(self):
return False
def combine_locks(locks):
"""Combine a sequence of locks into a single lock."""
all_locks = []
for lock in locks:
if isinstance(lock, CombinedLock):
all_locks.extend(lock.locks)
elif lock is not None:
all_locks.append(lock)
num_locks = len(all_locks)
if num_locks > 1:
return CombinedLock(all_locks)
elif num_locks == 1:
return all_locks[0]
else:
return DummyLock()
def ensure_lock(lock):
"""Ensure that the given object is a lock."""
if lock is None or lock is False:
return DummyLock()
return lock
|
from . import pprint
def parser(subparsers, _):
"""Adds the switch parser to the given subparsers object."""
desc = 'switch branches'
switch_parser = subparsers.add_parser(
'switch', help=desc, description=desc.capitalize(), aliases=['sw'])
switch_parser.add_argument('branch', help='switch to branch')
switch_parser.add_argument(
'-mo', '--move-over',
help='move uncomitted changes made in the current branch to the '
'destination branch',
action='store_true')
switch_parser.add_argument('-mi', '--move-ignored',
help='move ignored files to the destination branch, '
'has no effect if --move-over is also set',
action='store_true')
switch_parser.set_defaults(func=main)
def main(args, repo):
b = repo.lookup_branch(args.branch)
if not b:
pprint.err('Branch {0} doesn\'t exist'.format(args.branch))
pprint.err_exp('to list existing branches do gl branch')
pprint.err_exp('to create a new branch do gl branch -c {0}'.format(args.branch))
return False
repo.switch_current_branch(b, move_over=args.move_over, move_ignored=args.move_ignored)
pprint.ok('Switched to branch {0}'.format(args.branch))
return True
|
import logging
import re
from typing import Any, Callable, Dict, Iterable, Optional
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID, ATTR_SUPPORTED_FEATURES
from homeassistant.core import Context, State, T, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
_SlotsType = Dict[str, Any]
INTENT_TURN_OFF = "HassTurnOff"
INTENT_TURN_ON = "HassTurnOn"
INTENT_TOGGLE = "HassToggle"
SLOT_SCHEMA = vol.Schema({}, extra=vol.ALLOW_EXTRA)
DATA_KEY = "intent"
SPEECH_TYPE_PLAIN = "plain"
SPEECH_TYPE_SSML = "ssml"
@callback
@bind_hass
def async_register(hass: HomeAssistantType, handler: "IntentHandler") -> None:
"""Register an intent with Home Assistant."""
intents = hass.data.get(DATA_KEY)
if intents is None:
intents = hass.data[DATA_KEY] = {}
assert handler.intent_type is not None, "intent_type cannot be None"
if handler.intent_type in intents:
_LOGGER.warning(
"Intent %s is being overwritten by %s", handler.intent_type, handler
)
intents[handler.intent_type] = handler
@bind_hass
async def async_handle(
hass: HomeAssistantType,
platform: str,
intent_type: str,
slots: Optional[_SlotsType] = None,
text_input: Optional[str] = None,
context: Optional[Context] = None,
) -> "IntentResponse":
"""Handle an intent."""
handler: IntentHandler = hass.data.get(DATA_KEY, {}).get(intent_type)
if handler is None:
raise UnknownIntent(f"Unknown intent {intent_type}")
if context is None:
context = Context()
intent = Intent(hass, platform, intent_type, slots or {}, text_input, context)
try:
_LOGGER.info("Triggering intent handler %s", handler)
result = await handler.async_handle(intent)
return result
except vol.Invalid as err:
_LOGGER.warning("Received invalid slot info for %s: %s", intent_type, err)
raise InvalidSlotInfo(f"Received invalid slot info for {intent_type}") from err
except IntentHandleError:
raise
except Exception as err:
raise IntentUnexpectedError(f"Error handling {intent_type}") from err
class IntentError(HomeAssistantError):
"""Base class for intent related errors."""
class UnknownIntent(IntentError):
"""When the intent is not registered."""
class InvalidSlotInfo(IntentError):
"""When the slot data is invalid."""
class IntentHandleError(IntentError):
"""Error while handling intent."""
class IntentUnexpectedError(IntentError):
"""Unexpected error while handling intent."""
@callback
@bind_hass
def async_match_state(
hass: HomeAssistantType, name: str, states: Optional[Iterable[State]] = None
) -> State:
"""Find a state that matches the name."""
if states is None:
states = hass.states.async_all()
state = _fuzzymatch(name, states, lambda state: state.name)
if state is None:
raise IntentHandleError(f"Unable to find an entity called {name}")
return state
@callback
def async_test_feature(state: State, feature: int, feature_name: str) -> None:
"""Test is state supports a feature."""
if state.attributes.get(ATTR_SUPPORTED_FEATURES, 0) & feature == 0:
raise IntentHandleError(f"Entity {state.name} does not support {feature_name}")
class IntentHandler:
"""Intent handler registration."""
intent_type: Optional[str] = None
slot_schema: Optional[vol.Schema] = None
_slot_schema: Optional[vol.Schema] = None
platforms: Optional[Iterable[str]] = []
@callback
def async_can_handle(self, intent_obj: "Intent") -> bool:
"""Test if an intent can be handled."""
return self.platforms is None or intent_obj.platform in self.platforms
@callback
def async_validate_slots(self, slots: _SlotsType) -> _SlotsType:
"""Validate slot information."""
if self.slot_schema is None:
return slots
if self._slot_schema is None:
self._slot_schema = vol.Schema(
{
key: SLOT_SCHEMA.extend({"value": validator})
for key, validator in self.slot_schema.items()
},
extra=vol.ALLOW_EXTRA,
)
return self._slot_schema(slots) # type: ignore
async def async_handle(self, intent_obj: "Intent") -> "IntentResponse":
"""Handle the intent."""
raise NotImplementedError()
def __repr__(self) -> str:
"""Represent a string of an intent handler."""
return f"<{self.__class__.__name__} - {self.intent_type}>"
def _fuzzymatch(name: str, items: Iterable[T], key: Callable[[T], str]) -> Optional[T]:
"""Fuzzy matching function."""
matches = []
pattern = ".*?".join(name)
regex = re.compile(pattern, re.IGNORECASE)
for idx, item in enumerate(items):
match = regex.search(key(item))
if match:
# Add index so we pick first match in case same group and start
matches.append((len(match.group()), match.start(), idx, item))
return sorted(matches)[0][3] if matches else None
class ServiceIntentHandler(IntentHandler):
"""Service Intent handler registration.
Service specific intent handler that calls a service by name/entity_id.
"""
slot_schema = {vol.Required("name"): cv.string}
def __init__(
self, intent_type: str, domain: str, service: str, speech: str
) -> None:
"""Create Service Intent Handler."""
self.intent_type = intent_type
self.domain = domain
self.service = service
self.speech = speech
async def async_handle(self, intent_obj: "Intent") -> "IntentResponse":
"""Handle the hass intent."""
hass = intent_obj.hass
slots = self.async_validate_slots(intent_obj.slots)
state = async_match_state(hass, slots["name"]["value"])
await hass.services.async_call(
self.domain,
self.service,
{ATTR_ENTITY_ID: state.entity_id},
context=intent_obj.context,
)
response = intent_obj.create_response()
response.async_set_speech(self.speech.format(state.name))
return response
class Intent:
"""Hold the intent."""
__slots__ = ["hass", "platform", "intent_type", "slots", "text_input", "context"]
def __init__(
self,
hass: HomeAssistantType,
platform: str,
intent_type: str,
slots: _SlotsType,
text_input: Optional[str],
context: Context,
) -> None:
"""Initialize an intent."""
self.hass = hass
self.platform = platform
self.intent_type = intent_type
self.slots = slots
self.text_input = text_input
self.context = context
@callback
def create_response(self) -> "IntentResponse":
"""Create a response."""
return IntentResponse(self)
class IntentResponse:
"""Response to an intent."""
def __init__(self, intent: Optional[Intent] = None) -> None:
"""Initialize an IntentResponse."""
self.intent = intent
self.speech: Dict[str, Dict[str, Any]] = {}
self.card: Dict[str, Dict[str, str]] = {}
@callback
def async_set_speech(
self, speech: str, speech_type: str = "plain", extra_data: Optional[Any] = None
) -> None:
"""Set speech response."""
self.speech[speech_type] = {"speech": speech, "extra_data": extra_data}
@callback
def async_set_card(
self, title: str, content: str, card_type: str = "simple"
) -> None:
"""Set speech response."""
self.card[card_type] = {"title": title, "content": content}
@callback
def as_dict(self) -> Dict[str, Dict[str, Dict[str, Any]]]:
"""Return a dictionary representation of an intent response."""
return {"speech": self.speech, "card": self.card}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from nets import inception
slim = tf.contrib.slim
class InceptionV2Test(tf.test.TestCase):
def testBuildClassificationNetwork(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
self.assertTrue('Predictions' in end_points)
self.assertListEqual(end_points['Predictions'].get_shape().as_list(),
[batch_size, num_classes])
def testBuildBaseNetwork(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
mixed_5c, end_points = inception.inception_v2_base(inputs)
self.assertTrue(mixed_5c.op.name.startswith('InceptionV2/Mixed_5c'))
self.assertListEqual(mixed_5c.get_shape().as_list(),
[batch_size, 7, 7, 1024])
expected_endpoints = ['Mixed_3b', 'Mixed_3c', 'Mixed_4a', 'Mixed_4b',
'Mixed_4c', 'Mixed_4d', 'Mixed_4e', 'Mixed_5a',
'Mixed_5b', 'Mixed_5c', 'Conv2d_1a_7x7',
'MaxPool_2a_3x3', 'Conv2d_2b_1x1', 'Conv2d_2c_3x3',
'MaxPool_3a_3x3']
self.assertItemsEqual(end_points.keys(), expected_endpoints)
def testBuildOnlyUptoFinalEndpoint(self):
batch_size = 5
height, width = 224, 224
endpoints = ['Conv2d_1a_7x7', 'MaxPool_2a_3x3', 'Conv2d_2b_1x1',
'Conv2d_2c_3x3', 'MaxPool_3a_3x3', 'Mixed_3b', 'Mixed_3c',
'Mixed_4a', 'Mixed_4b', 'Mixed_4c', 'Mixed_4d', 'Mixed_4e',
'Mixed_5a', 'Mixed_5b', 'Mixed_5c']
for index, endpoint in enumerate(endpoints):
with tf.Graph().as_default():
inputs = tf.random_uniform((batch_size, height, width, 3))
out_tensor, end_points = inception.inception_v2_base(
inputs, final_endpoint=endpoint)
self.assertTrue(out_tensor.op.name.startswith(
'InceptionV2/' + endpoint))
self.assertItemsEqual(endpoints[:index+1], end_points)
def testBuildAndCheckAllEndPointsUptoMixed5c(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2_base(inputs,
final_endpoint='Mixed_5c')
endpoints_shapes = {'Mixed_3b': [batch_size, 28, 28, 256],
'Mixed_3c': [batch_size, 28, 28, 320],
'Mixed_4a': [batch_size, 14, 14, 576],
'Mixed_4b': [batch_size, 14, 14, 576],
'Mixed_4c': [batch_size, 14, 14, 576],
'Mixed_4d': [batch_size, 14, 14, 576],
'Mixed_4e': [batch_size, 14, 14, 576],
'Mixed_5a': [batch_size, 7, 7, 1024],
'Mixed_5b': [batch_size, 7, 7, 1024],
'Mixed_5c': [batch_size, 7, 7, 1024],
'Conv2d_1a_7x7': [batch_size, 112, 112, 64],
'MaxPool_2a_3x3': [batch_size, 56, 56, 64],
'Conv2d_2b_1x1': [batch_size, 56, 56, 64],
'Conv2d_2c_3x3': [batch_size, 56, 56, 192],
'MaxPool_3a_3x3': [batch_size, 28, 28, 192]}
self.assertItemsEqual(endpoints_shapes.keys(), end_points.keys())
for endpoint_name in endpoints_shapes:
expected_shape = endpoints_shapes[endpoint_name]
self.assertTrue(endpoint_name in end_points)
self.assertListEqual(end_points[endpoint_name].get_shape().as_list(),
expected_shape)
def testModelHasExpectedNumberOfParameters(self):
batch_size = 5
height, width = 224, 224
inputs = tf.random_uniform((batch_size, height, width, 3))
with slim.arg_scope(inception.inception_v2_arg_scope()):
inception.inception_v2_base(inputs)
total_params, _ = slim.model_analyzer.analyze_vars(
slim.get_model_variables())
self.assertAlmostEqual(10173112, total_params)
def testBuildEndPointsWithDepthMultiplierLessThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v2(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=0.5)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(0.5 * original_depth, new_depth)
def testBuildEndPointsWithDepthMultiplierGreaterThanOne(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = inception.inception_v2(inputs, num_classes)
endpoint_keys = [key for key in end_points.keys()
if key.startswith('Mixed') or key.startswith('Conv')]
_, end_points_with_multiplier = inception.inception_v2(
inputs, num_classes, scope='depth_multiplied_net',
depth_multiplier=2.0)
for key in endpoint_keys:
original_depth = end_points[key].get_shape().as_list()[3]
new_depth = end_points_with_multiplier[key].get_shape().as_list()[3]
self.assertEqual(2.0 * original_depth, new_depth)
def testRaiseValueErrorWithInvalidDepthMultiplier(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
with self.assertRaises(ValueError):
_ = inception.inception_v2(inputs, num_classes, depth_multiplier=-0.1)
with self.assertRaises(ValueError):
_ = inception.inception_v2(inputs, num_classes, depth_multiplier=0.0)
def testHalfSizeImages(self):
batch_size = 5
height, width = 112, 112
num_classes = 1000
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
self.assertListEqual(pre_pool.get_shape().as_list(),
[batch_size, 4, 4, 1024])
def testUnknownImageShape(self):
tf.reset_default_graph()
batch_size = 2
height, width = 224, 224
num_classes = 1000
input_np = np.random.uniform(0, 1, (batch_size, height, width, 3))
with self.test_session() as sess:
inputs = tf.placeholder(tf.float32, shape=(batch_size, None, None, 3))
logits, end_points = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
pre_pool = end_points['Mixed_5c']
feed_dict = {inputs: input_np}
tf.global_variables_initializer().run()
pre_pool_out = sess.run(pre_pool, feed_dict=feed_dict)
self.assertListEqual(list(pre_pool_out.shape), [batch_size, 7, 7, 1024])
def testUnknowBatchSize(self):
batch_size = 1
height, width = 224, 224
num_classes = 1000
inputs = tf.placeholder(tf.float32, (None, height, width, 3))
logits, _ = inception.inception_v2(inputs, num_classes)
self.assertTrue(logits.op.name.startswith('InceptionV2/Logits'))
self.assertListEqual(logits.get_shape().as_list(),
[None, num_classes])
images = tf.random_uniform((batch_size, height, width, 3))
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(logits, {inputs: images.eval()})
self.assertEquals(output.shape, (batch_size, num_classes))
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes,
is_training=False)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (batch_size,))
def testTrainEvalWithReuse(self):
train_batch_size = 5
eval_batch_size = 2
height, width = 150, 150
num_classes = 1000
train_inputs = tf.random_uniform((train_batch_size, height, width, 3))
inception.inception_v2(train_inputs, num_classes)
eval_inputs = tf.random_uniform((eval_batch_size, height, width, 3))
logits, _ = inception.inception_v2(eval_inputs, num_classes, reuse=True)
predictions = tf.argmax(logits, 1)
with self.test_session() as sess:
sess.run(tf.global_variables_initializer())
output = sess.run(predictions)
self.assertEquals(output.shape, (eval_batch_size,))
def testLogitsNotSqueezed(self):
num_classes = 25
images = tf.random_uniform([1, 224, 224, 3])
logits, _ = inception.inception_v2(images,
num_classes=num_classes,
spatial_squeeze=False)
with self.test_session() as sess:
tf.global_variables_initializer().run()
logits_out = sess.run(logits)
self.assertListEqual(list(logits_out.shape), [1, 1, 1, num_classes])
if __name__ == '__main__':
tf.test.main()
|
from __future__ import print_function
import sys
import argparse
from six.moves.urllib.request import urlopen
try:
import console
except ImportError:
console = None
_stash = globals()["_stash"]
def get_status_string(downloaded, total):
"""Return a string showing the current progress"""
if _stash is not None and hasattr(_stash, "libcore"):
hdr = _stash.libcore.sizeof_fmt(downloaded)
else:
hdr = "%10d" % downloaded
if total:
total = float(total)
percent = min((downloaded / total) * 100.0, 100.0)
total_c = 20
nc = int(total_c * (downloaded / total))
sh = ">" if downloaded != total else "="
bar = "[" + "=" * (nc - 1) + sh + " " * (total_c - nc) + "]"
# status = r"%10d [%3.2f%%]" % downloaded, percent
status = r"%s %3.2f%% | %s" % (bar, percent, hdr)
else:
status = hdr
return status
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('-o', '--output-file', nargs='?', help='save content as file')
ap.add_argument('url', nargs='?', help='the url to read from (default to clipboard)')
ns = ap.parse_args(args)
url = ns.url or _stash.libdist.clipboard_get()
output_file = ns.output_file or url.split('/')[-1]
if console is not None:
console.show_activity()
try:
print('Opening: %s\n' % url)
u = urlopen(url)
meta = u.info()
try:
if _stash.PY3:
file_size = int(meta["Content-Length"])
else:
file_size = int(meta.getheaders("Content-Length")[0])
except (IndexError, ValueError, TypeError):
file_size = 0
print("Save as: {} ".format(output_file), end="")
print("({} bytes)".format(file_size if file_size else "???"))
with open(output_file, 'wb') as f:
file_size_dl = 0.0
block_sz = 8192
while True:
buf = u.read(block_sz)
if not buf:
break
file_size_dl += len(buf)
f.write(buf)
status = get_status_string(file_size_dl, file_size)
print('\r' + status + " " * 10, end="")
print("")
except Exception as e:
print('Invalid url: %s' % url)
sys.exit(1)
finally:
if console is not None:
console.hide_activity()
sys.exit(0)
if __name__ == '__main__':
main(sys.argv[1:])
|
import staticconf
CLUSTERMAN_YAML_FILE_PATH = "/nail/srv/configs/clusterman.yaml"
CLUSTERMAN_METRICS_YAML_FILE_PATH = "/nail/srv/configs/clusterman_metrics.yaml"
def get_clusterman_metrics():
try:
import clusterman_metrics
import clusterman_metrics.util.costs
clusterman_yaml = CLUSTERMAN_YAML_FILE_PATH
staticconf.YamlConfiguration(
CLUSTERMAN_METRICS_YAML_FILE_PATH, namespace="clusterman_metrics"
)
except (ImportError, FileNotFoundError):
# our cluster autoscaler is not currently open source, sorry!
clusterman_metrics = None
clusterman_yaml = None
return clusterman_metrics, clusterman_yaml
|
import asyncio
from functools import partial
from nest.nest import AUTHORIZE_URL, AuthorizationError, NestAuth
from homeassistant.const import HTTP_UNAUTHORIZED
from homeassistant.core import callback
from . import config_flow
from .const import DOMAIN
@callback
def initialize(hass, client_id, client_secret):
"""Initialize a local auth provider."""
config_flow.register_flow_implementation(
hass,
DOMAIN,
"configuration.yaml",
partial(generate_auth_url, client_id),
partial(resolve_auth_code, hass, client_id, client_secret),
)
async def generate_auth_url(client_id, flow_id):
"""Generate an authorize url."""
return AUTHORIZE_URL.format(client_id, flow_id)
async def resolve_auth_code(hass, client_id, client_secret, code):
"""Resolve an authorization code."""
result = asyncio.Future()
auth = NestAuth(
client_id=client_id,
client_secret=client_secret,
auth_callback=result.set_result,
)
auth.pin = code
try:
await hass.async_add_executor_job(auth.login)
return await result
except AuthorizationError as err:
if err.response.status_code == HTTP_UNAUTHORIZED:
raise config_flow.CodeInvalid()
raise config_flow.NestAuthError(
f"Unknown error: {err} ({err.response.status_code})"
)
|
import logging
from socket import timeout
from threading import Lock
import time
from maxcube.connection import MaxCubeConnection
from maxcube.cube import MaxCube
import voluptuous as vol
from homeassistant.const import CONF_HOST, CONF_PORT, CONF_SCAN_INTERVAL
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 62910
DOMAIN = "maxcube"
DATA_KEY = "maxcube"
NOTIFICATION_ID = "maxcube_notification"
NOTIFICATION_TITLE = "Max!Cube gateway setup"
CONF_GATEWAYS = "gateways"
CONFIG_GATEWAY = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SCAN_INTERVAL, default=300): cv.time_period,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_GATEWAYS, default={}): vol.All(
cv.ensure_list, [CONFIG_GATEWAY]
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Establish connection to MAX! Cube."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
connection_failed = 0
gateways = config[DOMAIN][CONF_GATEWAYS]
for gateway in gateways:
host = gateway[CONF_HOST]
port = gateway[CONF_PORT]
scan_interval = gateway[CONF_SCAN_INTERVAL].total_seconds()
try:
cube = MaxCube(MaxCubeConnection(host, port))
hass.data[DATA_KEY][host] = MaxCubeHandle(cube, scan_interval)
except timeout as ex:
_LOGGER.error("Unable to connect to Max!Cube gateway: %s", str(ex))
hass.components.persistent_notification.create(
f"Error: {ex}<br />You will need to restart Home Assistant after fixing.",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
connection_failed += 1
if connection_failed >= len(gateways):
return False
load_platform(hass, "climate", DOMAIN, {}, config)
load_platform(hass, "binary_sensor", DOMAIN, {}, config)
return True
class MaxCubeHandle:
"""Keep the cube instance in one place and centralize the update."""
def __init__(self, cube, scan_interval):
"""Initialize the Cube Handle."""
self.cube = cube
self.scan_interval = scan_interval
self.mutex = Lock()
self._updatets = time.monotonic()
def update(self):
"""Pull the latest data from the MAX! Cube."""
# Acquire mutex to prevent simultaneous update from multiple threads
with self.mutex:
# Only update every update_interval
if (time.monotonic() - self._updatets) >= self.scan_interval:
_LOGGER.debug("Updating")
try:
self.cube.update()
except timeout:
_LOGGER.error("Max!Cube connection failed")
return False
self._updatets = time.monotonic()
else:
_LOGGER.debug("Skipping update")
|
import pypck
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import CONF_ADDRESS
from . import LcnDevice
from .const import CONF_CONNECTIONS, CONF_OUTPUT, DATA_LCN, OUTPUT_PORTS
from .helpers import get_connection
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Set up the LCN switch platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
if config[CONF_OUTPUT] in OUTPUT_PORTS:
device = LcnOutputSwitch(config, address_connection)
else: # in RELAY_PORTS
device = LcnRelaySwitch(config, address_connection)
devices.append(device)
async_add_entities(devices)
class LcnOutputSwitch(LcnDevice, SwitchEntity):
"""Representation of a LCN switch for output ports."""
def __init__(self, config, address_connection):
"""Initialize the LCN switch."""
super().__init__(config, address_connection)
self.output = pypck.lcn_defs.OutputPort[config[CONF_OUTPUT]]
self._is_on = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.output)
@property
def is_on(self):
"""Return True if entity is on."""
return self._is_on
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
self._is_on = True
self.address_connection.dim_output(self.output.value, 100, 0)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
self._is_on = False
self.address_connection.dim_output(self.output.value, 0, 0)
self.async_write_ha_state()
def input_received(self, input_obj):
"""Set switch state when LCN input object (command) is received."""
if (
not isinstance(input_obj, pypck.inputs.ModStatusOutput)
or input_obj.get_output_id() != self.output.value
):
return
self._is_on = input_obj.get_percent() > 0
self.async_write_ha_state()
class LcnRelaySwitch(LcnDevice, SwitchEntity):
"""Representation of a LCN switch for relay ports."""
def __init__(self, config, address_connection):
"""Initialize the LCN switch."""
super().__init__(config, address_connection)
self.output = pypck.lcn_defs.RelayPort[config[CONF_OUTPUT]]
self._is_on = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.output)
@property
def is_on(self):
"""Return True if entity is on."""
return self._is_on
async def async_turn_on(self, **kwargs):
"""Turn the entity on."""
self._is_on = True
states = [pypck.lcn_defs.RelayStateModifier.NOCHANGE] * 8
states[self.output.value] = pypck.lcn_defs.RelayStateModifier.ON
self.address_connection.control_relays(states)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the entity off."""
self._is_on = False
states = [pypck.lcn_defs.RelayStateModifier.NOCHANGE] * 8
states[self.output.value] = pypck.lcn_defs.RelayStateModifier.OFF
self.address_connection.control_relays(states)
self.async_write_ha_state()
def input_received(self, input_obj):
"""Set switch state when LCN input object (command) is received."""
if not isinstance(input_obj, pypck.inputs.ModStatusRelays):
return
self._is_on = input_obj.get_state(self.output.value)
self.async_write_ha_state()
|
import asyncio
from homeassistant import bootstrap, config_entries
from homeassistant.const import ATTR_ENTITY_ID, EVENT_HOMEASSISTANT_START
import homeassistant.core as ha
DOMAIN = "demo"
COMPONENTS_WITH_CONFIG_ENTRY_DEMO_PLATFORM = [
"air_quality",
"alarm_control_panel",
"binary_sensor",
"camera",
"climate",
"cover",
"fan",
"humidifier",
"light",
"lock",
"media_player",
"sensor",
"switch",
"vacuum",
"water_heater",
]
COMPONENTS_WITH_DEMO_PLATFORM = [
"tts",
"stt",
"mailbox",
"notify",
"image_processing",
"calendar",
"device_tracker",
]
async def async_setup(hass, config):
"""Set up the demo environment."""
if DOMAIN not in config:
return True
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data={}
)
)
# Set up demo platforms
for component in COMPONENTS_WITH_DEMO_PLATFORM:
hass.async_create_task(
hass.helpers.discovery.async_load_platform(component, DOMAIN, {}, config)
)
config.setdefault(ha.DOMAIN, {})
config.setdefault(DOMAIN, {})
# Set up sun
if not hass.config.latitude:
hass.config.latitude = 32.87336
if not hass.config.longitude:
hass.config.longitude = 117.22743
tasks = [bootstrap.async_setup_component(hass, "sun", config)]
# Set up input select
tasks.append(
bootstrap.async_setup_component(
hass,
"input_select",
{
"input_select": {
"living_room_preset": {
"options": ["Visitors", "Visitors with kids", "Home Alone"]
},
"who_cooks": {
"icon": "mdi:panda",
"initial": "Anne Therese",
"name": "Cook today",
"options": ["Paulus", "Anne Therese"],
},
}
},
)
)
# Set up input boolean
tasks.append(
bootstrap.async_setup_component(
hass,
"input_boolean",
{
"input_boolean": {
"notify": {
"icon": "mdi:car",
"initial": False,
"name": "Notify Anne Therese is home",
}
}
},
)
)
# Set up input number
tasks.append(
bootstrap.async_setup_component(
hass,
"input_number",
{
"input_number": {
"noise_allowance": {
"icon": "mdi:bell-ring",
"min": 0,
"max": 10,
"name": "Allowed Noise",
"unit_of_measurement": "dB",
}
}
},
)
)
results = await asyncio.gather(*tasks)
if any(not result for result in results):
return False
# Set up example persistent notification
hass.components.persistent_notification.async_create(
"This is an example of a persistent notification.", title="Example Notification"
)
async def demo_start_listener(_event):
"""Finish set up."""
await finish_setup(hass, config)
hass.bus.async_listen(EVENT_HOMEASSISTANT_START, demo_start_listener)
return True
async def async_setup_entry(hass, config_entry):
"""Set the config entry up."""
# Set up demo platforms with config entry
for component in COMPONENTS_WITH_CONFIG_ENTRY_DEMO_PLATFORM:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def finish_setup(hass, config):
"""Finish set up once demo platforms are set up."""
switches = None
lights = None
while not switches and not lights:
# Not all platforms might be loaded.
if switches is not None:
await asyncio.sleep(0)
switches = sorted(hass.states.async_entity_ids("switch"))
lights = sorted(hass.states.async_entity_ids("light"))
# Set up scripts
await bootstrap.async_setup_component(
hass,
"script",
{
"script": {
"demo": {
"alias": f"Toggle {lights[0].split('.')[1]}",
"sequence": [
{
"service": "light.turn_off",
"data": {ATTR_ENTITY_ID: lights[0]},
},
{"delay": {"seconds": 5}},
{
"service": "light.turn_on",
"data": {ATTR_ENTITY_ID: lights[0]},
},
{"delay": {"seconds": 5}},
{
"service": "light.turn_off",
"data": {ATTR_ENTITY_ID: lights[0]},
},
],
}
}
},
)
# Set up scenes
await bootstrap.async_setup_component(
hass,
"scene",
{
"scene": [
{
"name": "Romantic lights",
"entities": {
lights[0]: True,
lights[1]: {
"state": "on",
"xy_color": [0.33, 0.66],
"brightness": 200,
},
},
},
{
"name": "Switch on and off",
"entities": {switches[0]: True, switches[1]: False},
},
]
},
)
|
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_SWITCHES
import homeassistant.helpers.config_validation as cv
from .base_class import SWITCHES_SCHEMA, PilightBaseDevice
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SWITCHES): vol.Schema({cv.string: SWITCHES_SCHEMA})}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Pilight platform."""
switches = config.get(CONF_SWITCHES)
devices = []
for dev_name, dev_config in switches.items():
devices.append(PilightSwitch(hass, dev_name, dev_config))
add_entities(devices)
class PilightSwitch(PilightBaseDevice, SwitchEntity):
"""Representation of a Pilight switch."""
|
class CannotOverwriteExistingCassetteException(Exception):
def __init__(self, *args, **kwargs):
self.cassette = kwargs["cassette"]
self.failed_request = kwargs["failed_request"]
message = self._get_message(kwargs["cassette"], kwargs["failed_request"])
super().__init__(message)
@staticmethod
def _get_message(cassette, failed_request):
"""Get the final message related to the exception"""
# Get the similar requests in the cassette that
# have match the most with the request.
best_matches = cassette.find_requests_with_most_matches(failed_request)
if best_matches:
# Build a comprehensible message to put in the exception.
best_matches_msg = "Found {} similar requests with {} different matcher(s) :\n".format(
len(best_matches), len(best_matches[0][2])
)
for idx, best_match in enumerate(best_matches, start=1):
request, succeeded_matchers, failed_matchers_assertion_msgs = best_match
best_matches_msg += (
"\n%s - (%r).\n"
"Matchers succeeded : %s\n"
"Matchers failed :\n" % (idx, request, succeeded_matchers)
)
for failed_matcher, assertion_msg in failed_matchers_assertion_msgs:
best_matches_msg += "%s - assertion failure :\n" "%s\n" % (failed_matcher, assertion_msg)
else:
best_matches_msg = "No similar requests, that have not been played, found."
return (
"Can't overwrite existing cassette (%r) in "
"your current record mode (%r).\n"
"No match for the request (%r) was found.\n"
"%s" % (cassette._path, cassette.record_mode, failed_request, best_matches_msg)
)
class UnhandledHTTPRequestError(KeyError):
"""Raised when a cassette does not contain the request we want."""
pass
|
import unittest
from unittest.mock import patch, Mock
from flask import Flask
from acme import challenges
from lemur.plugins.lemur_acme import plugin
class TestAcmeHttp(unittest.TestCase):
def setUp(self):
self.ACMEHttpIssuerPlugin = plugin.ACMEHttpIssuerPlugin()
self.acme = plugin.AcmeHandler()
# Creates a new Flask application for a test duration. In python 3.8, manual push of application context is
# needed to run tests in dev environment without getting error 'Working outside of application context'.
_app = Flask('lemur_test_acme')
self.ctx = _app.app_context()
assert self.ctx
self.ctx.push()
def tearDown(self):
self.ctx.pop()
def test_create_authority(self):
options = {
"plugin": {"plugin_options": [{"name": "certificate", "value": "123"}]}
}
acme_root, b, role = self.ACMEHttpIssuerPlugin.create_authority(options)
self.assertEqual(acme_root, "123")
self.assertEqual(b, "")
self.assertEqual(role, [{"username": "", "password": "", "name": "acme"}])
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.setup_acme_client")
@patch("lemur.plugins.base.manager.PluginManager.get")
@patch("lemur.plugins.lemur_acme.challenge_types.destination_service")
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.request_certificate")
@patch("lemur.plugins.lemur_acme.plugin.authorization_service")
def test_create_certificate(
self,
mock_authorization_service,
mock_request_certificate,
mock_destination_service,
mock_plugin_manager_get,
mock_acme,
):
provider = plugin.ACMEHttpIssuerPlugin()
mock_authority = Mock()
mock_authority.options = '[{"name": "tokenDestination", "value": "mock-sftp-destination"}]'
mock_order_resource = Mock()
mock_order_resource.authorizations = [Mock()]
mock_order_resource.authorizations[0].body.challenges = [Mock()]
mock_order_resource.authorizations[0].body.challenges[0].response_and_validation.return_value = (Mock(), "Anything-goes")
mock_order_resource.authorizations[0].body.challenges[0].chall = challenges.HTTP01(
token=b'\x0f\x1c\xbe#od\xd1\x9c\xa6j\\\xa4\r\xed\xe5\xbf0pz\xeaxnl)\xea[i\xbc\x95\x08\x96\x1f')
mock_client = Mock()
mock_client.new_order.return_value = mock_order_resource
mock_client.answer_challenge.return_value = True
mock_finalized_order = Mock()
mock_finalized_order.fullchain_pem = "-----BEGIN CERTIFICATE-----\nMIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw\nGjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2\nMDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0\n8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym\noLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0\nZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN\nxDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56\ndhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9\nAgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw\nHQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0\nBggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu\nb3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu\nY3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq\nhkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF\nUGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9\nAFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp\nDQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7\nIkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf\nzWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI\nPTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w\nSVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em\n2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0\nWzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt\nn5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=\n-----END CERTIFICATE-----\n-----BEGIN CERTIFICATE-----\nMIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw\nGjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2\nMDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0\n8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym\noLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0\nZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN\nxDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56\ndhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9\nAgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw\nHQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0\nBggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu\nb3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu\nY3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq\nhkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF\nUGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9\nAFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp\nDQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7\nIkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf\nzWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI\nPTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w\nSVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em\n2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0\nWzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt\nn5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=\n-----END CERTIFICATE-----\n"
mock_client.poll_and_finalize.return_value = mock_finalized_order
mock_acme.return_value = (mock_client, "")
mock_destination = Mock()
mock_destination.label = "mock-sftp-destination"
mock_destination.plugin_name = "SFTPDestinationPlugin"
mock_destination_service.get.return_value = mock_destination
mock_destination_plugin = Mock()
mock_destination_plugin.upload_acme_token.return_value = True
mock_plugin_manager_get.return_value = mock_destination_plugin
issuer_options = {
"authority": mock_authority,
"tokenDestination": "mock-sftp-destination",
"common_name": "test.netflix.net",
}
csr = "123"
mock_request_certificate.return_value = ("pem_certificate", "chain")
pem_certificate, pem_certificate_chain, _ = provider.create_certificate(csr, issuer_options)
self.assertEqual(pem_certificate, "-----BEGIN CERTIFICATE-----\nMIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw\nGjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2\nMDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0\n8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym\noLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0\nZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN\nxDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56\ndhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9\nAgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw\nHQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0\nBggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu\nb3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu\nY3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq\nhkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF\nUGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9\nAFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp\nDQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7\nIkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf\nzWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI\nPTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w\nSVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em\n2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0\nWzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt\nn5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=\n-----END CERTIFICATE-----\n")
self.assertEqual(pem_certificate_chain,
"-----BEGIN CERTIFICATE-----\nMIIEqzCCApOgAwIBAgIRAIvhKg5ZRO08VGQx8JdhT+UwDQYJKoZIhvcNAQELBQAw\nGjEYMBYGA1UEAwwPRmFrZSBMRSBSb290IFgxMB4XDTE2MDUyMzIyMDc1OVoXDTM2\nMDUyMzIyMDc1OVowIjEgMB4GA1UEAwwXRmFrZSBMRSBJbnRlcm1lZGlhdGUgWDEw\nggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDtWKySDn7rWZc5ggjz3ZB0\n8jO4xti3uzINfD5sQ7Lj7hzetUT+wQob+iXSZkhnvx+IvdbXF5/yt8aWPpUKnPym\noLxsYiI5gQBLxNDzIec0OIaflWqAr29m7J8+NNtApEN8nZFnf3bhehZW7AxmS1m0\nZnSsdHw0Fw+bgixPg2MQ9k9oefFeqa+7Kqdlz5bbrUYV2volxhDFtnI4Mh8BiWCN\nxDH1Hizq+GKCcHsinDZWurCqder/afJBnQs+SBSL6MVApHt+d35zjBD92fO2Je56\ndhMfzCgOKXeJ340WhW3TjD1zqLZXeaCyUNRnfOmWZV8nEhtHOFbUCU7r/KkjMZO9\nAgMBAAGjgeMwgeAwDgYDVR0PAQH/BAQDAgGGMBIGA1UdEwEB/wQIMAYBAf8CAQAw\nHQYDVR0OBBYEFMDMA0a5WCDMXHJw8+EuyyCm9Wg6MHoGCCsGAQUFBwEBBG4wbDA0\nBggrBgEFBQcwAYYoaHR0cDovL29jc3Auc3RnLXJvb3QteDEubGV0c2VuY3J5cHQu\nb3JnLzA0BggrBgEFBQcwAoYoaHR0cDovL2NlcnQuc3RnLXJvb3QteDEubGV0c2Vu\nY3J5cHQub3JnLzAfBgNVHSMEGDAWgBTBJnSkikSg5vogKNhcI5pFiBh54DANBgkq\nhkiG9w0BAQsFAAOCAgEABYSu4Il+fI0MYU42OTmEj+1HqQ5DvyAeyCA6sGuZdwjF\nUGeVOv3NnLyfofuUOjEbY5irFCDtnv+0ckukUZN9lz4Q2YjWGUpW4TTu3ieTsaC9\nAFvCSgNHJyWSVtWvB5XDxsqawl1KzHzzwr132bF2rtGtazSqVqK9E07sGHMCf+zp\nDQVDVVGtqZPHwX3KqUtefE621b8RI6VCl4oD30Olf8pjuzG4JKBFRFclzLRjo/h7\nIkkfjZ8wDa7faOjVXx6n+eUQ29cIMCzr8/rNWHS9pYGGQKJiY2xmVC9h12H99Xyf\nzWE9vb5zKP3MVG6neX1hSdo7PEAb9fqRhHkqVsqUvJlIRmvXvVKTwNCP3eCjRCCI\nPTAvjV+4ni786iXwwFYNz8l3PmPLCyQXWGohnJ8iBm+5nk7O2ynaPVW0U2W+pt2w\nSVuvdDM5zGv2f9ltNWUiYZHJ1mmO97jSY/6YfdOUH66iRtQtDkHBRdkNBsMbD+Em\n2TgBldtHNSJBfB3pm9FblgOcJ0FSWcUDWJ7vO0+NTXlgrRofRT6pVywzxVo6dND0\nWzYlTWeUVsO40xJqhgUQRER9YLOLxJ0O6C8i0xFxAMKOtSdodMB3RIwt7RFQ0uyt\nn5Z5MqkYhlMI3J1tPRTp1nEt9fyGspBOO05gi148Qasp+3N+svqKomoQglNoAxU=\n-----END CERTIFICATE-----\n")
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.setup_acme_client")
@patch("lemur.plugins.base.manager.PluginManager.get")
@patch("lemur.plugins.lemur_acme.challenge_types.destination_service")
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.request_certificate")
@patch("lemur.plugins.lemur_acme.plugin.authorization_service")
def test_create_certificate_missing_destination_token(
self,
mock_authorization_service,
mock_request_certificate,
mock_destination_service,
mock_plugin_manager_get,
mock_acme,
):
provider = plugin.ACMEHttpIssuerPlugin()
mock_authority = Mock()
mock_authority.options = '[{"name": "mock_name", "value": "mock_value"}]'
mock_order_resource = Mock()
mock_order_resource.authorizations = [Mock()]
mock_order_resource.authorizations[0].body.challenges = [Mock()]
mock_order_resource.authorizations[0].body.challenges[0].chall = challenges.HTTP01(
token=b'\x0f\x1c\xbe#od\xd1\x9c\xa6j\\\xa4\r\xed\xe5\xbf0pz\xeaxnl)\xea[i\xbc\x95\x08\x96\x1f')
mock_client = Mock()
mock_client.new_order.return_value = mock_order_resource
mock_acme.return_value = (mock_client, "")
mock_destination = Mock()
mock_destination.label = "mock-sftp-destination"
mock_destination.plugin_name = "SFTPDestinationPlugin"
mock_destination_service.get_by_label.return_value = mock_destination
mock_destination_plugin = Mock()
mock_destination_plugin.upload_acme_token.return_value = True
mock_plugin_manager_get.return_value = mock_destination_plugin
issuer_options = {
"authority": mock_authority,
"tokenDestination": "mock-sftp-destination",
"common_name": "test.netflix.net",
}
csr = "123"
mock_request_certificate.return_value = ("pem_certificate", "chain")
with self.assertRaisesRegex(Exception, "No token_destination configured"):
provider.create_certificate(csr, issuer_options)
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.setup_acme_client")
@patch("lemur.plugins.base.manager.PluginManager.get")
@patch("lemur.plugins.lemur_acme.challenge_types.destination_service")
@patch("lemur.plugins.lemur_acme.plugin.AcmeHandler.request_certificate")
@patch("lemur.plugins.lemur_acme.plugin.authorization_service")
def test_create_certificate_missing_http_challenge(
self,
mock_authorization_service,
mock_request_certificate,
mock_destination_service,
mock_plugin_manager_get,
mock_acme,
):
provider = plugin.ACMEHttpIssuerPlugin()
mock_authority = Mock()
mock_authority.options = '[{"name": "tokenDestination", "value": "mock-sftp-destination"}]'
mock_order_resource = Mock()
mock_order_resource.authorizations = [Mock()]
mock_order_resource.authorizations[0].body.challenges = [Mock()]
mock_order_resource.authorizations[0].body.challenges[0].chall = challenges.DNS01(
token=b'\x0f\x1c\xbe#od\xd1\x9c\xa6j\\\xa4\r\xed\xe5\xbf0pz\xeaxnl)\xea[i\xbc\x95\x08\x96\x1f')
mock_client = Mock()
mock_client.new_order.return_value = mock_order_resource
mock_acme.return_value = (mock_client, "")
issuer_options = {
"authority": mock_authority,
"tokenDestination": "mock-sftp-destination",
"common_name": "test.netflix.net",
}
csr = "123"
mock_request_certificate.return_value = ("pem_certificate", "chain")
with self.assertRaisesRegex(Exception, "HTTP-01 challenge was not offered"):
provider.create_certificate(csr, issuer_options)
|
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OPENING,
BinarySensorEntity,
)
from . import DATA_HIVE, DOMAIN, HiveEntity
DEVICETYPE_DEVICE_CLASS = {
"motionsensor": DEVICE_CLASS_MOTION,
"contactsensor": DEVICE_CLASS_OPENING,
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Hive sensor devices."""
if discovery_info is None:
return
session = hass.data.get(DATA_HIVE)
devs = []
for dev in discovery_info:
devs.append(HiveBinarySensorEntity(session, dev))
add_entities(devs)
class HiveBinarySensorEntity(HiveEntity, BinarySensorEntity):
"""Representation of a Hive binary sensor."""
@property
def unique_id(self):
"""Return unique ID of entity."""
return self._unique_id
@property
def device_info(self):
"""Return device information."""
return {"identifiers": {(DOMAIN, self.unique_id)}, "name": self.name}
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICETYPE_DEVICE_CLASS.get(self.node_device_type)
@property
def name(self):
"""Return the name of the binary sensor."""
return self.node_name
@property
def device_state_attributes(self):
"""Show Device Attributes."""
return self.attributes
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.session.sensor.get_state(self.node_id, self.node_device_type)
def update(self):
"""Update all Node data from Hive."""
self.session.core.update_data(self.node_id)
self.attributes = self.session.attributes.state_attributes(self.node_id)
|
from homeassistant.components.climate import (
DEFAULT_MAX_TEMP,
DEFAULT_MIN_TEMP,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
SUPPORT_TARGET_TEMPERATURE,
ClimateEntity,
)
from homeassistant.components.climate.const import CURRENT_HVAC_HEAT, CURRENT_HVAC_OFF
from homeassistant.const import ATTR_TEMPERATURE, CONF_NAME, TEMP_CELSIUS
from . import CONF_SERIAL, LIGHTWAVE_LINK
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Find and return LightWave lights."""
if discovery_info is None:
return
entities = []
lwlink = hass.data[LIGHTWAVE_LINK]
for device_id, device_config in discovery_info.items():
name = device_config[CONF_NAME]
serial = device_config[CONF_SERIAL]
entities.append(LightwaveTrv(name, device_id, lwlink, serial))
async_add_entities(entities)
class LightwaveTrv(ClimateEntity):
"""Representation of a LightWaveRF TRV."""
def __init__(self, name, device_id, lwlink, serial):
"""Initialize LightwaveTrv entity."""
self._name = name
self._device_id = device_id
self._state = None
self._current_temperature = None
self._target_temperature = None
self._hvac_action = None
self._lwlink = lwlink
self._serial = serial
# inhibit is used to prevent race condition on update. If non zero, skip next update cycle.
self._inhibit = 0
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_TARGET_TEMPERATURE
def update(self):
"""Communicate with a Lightwave RTF Proxy to get state."""
(temp, targ, _, trv_output) = self._lwlink.read_trv_status(self._serial)
if temp is not None:
self._current_temperature = temp
if targ is not None:
if self._inhibit == 0:
self._target_temperature = targ
if targ == 0:
# TRV off
self._target_temperature = None
if targ >= 40:
# Call for heat mode, or TRV in a fixed position
self._target_temperature = None
else:
# Done the job - use proxy next iteration
self._inhibit = 0
if trv_output is not None:
if trv_output > 0:
self._hvac_action = CURRENT_HVAC_HEAT
else:
self._hvac_action = CURRENT_HVAC_OFF
@property
def name(self):
"""Lightwave trv name."""
return self._name
@property
def current_temperature(self):
"""Property giving the current room temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Target room temperature."""
if self._inhibit > 0:
# If we get an update before the new temp has
# propagated, the target temp is set back to the
# old target on the next poll, showing a false
# reading temporarily.
self._target_temperature = self._inhibit
return self._target_temperature
@property
def hvac_modes(self):
"""HVAC modes."""
return [HVAC_MODE_HEAT, HVAC_MODE_OFF]
@property
def hvac_mode(self):
"""HVAC mode."""
return HVAC_MODE_HEAT
@property
def hvac_action(self):
"""HVAC action."""
return self._hvac_action
@property
def min_temp(self):
"""Min Temp."""
return DEFAULT_MIN_TEMP
@property
def max_temp(self):
"""Max Temp."""
return DEFAULT_MAX_TEMP
@property
def temperature_unit(self):
"""Set temperature unit."""
return TEMP_CELSIUS
@property
def target_temperature_step(self):
"""Set temperature step."""
return 0.5
def set_temperature(self, **kwargs):
"""Set TRV target temperature."""
if ATTR_TEMPERATURE in kwargs:
self._target_temperature = kwargs[ATTR_TEMPERATURE]
self._inhibit = self._target_temperature
self._lwlink.set_temperature(
self._device_id, self._target_temperature, self._name
)
async def async_set_hvac_mode(self, hvac_mode):
"""Set HVAC Mode for TRV."""
|
from kalliope._version import version_str
from kalliope.core.ConfigurationManager import SettingLoader
import logging
from kalliope.core.Utils.google_tracking import GoogleTracking
logging.basicConfig()
logger = logging.getLogger("kalliope")
class HookManager(object):
@classmethod
def on_start(cls):
return cls.execute_synapses_in_hook_name("on_start")
@classmethod
def on_waiting_for_trigger(cls):
return cls.execute_synapses_in_hook_name("on_waiting_for_trigger")
@classmethod
def on_triggered(cls):
sl = SettingLoader()
if sl.settings.tracker_anonymous_usage_stats_id != "not_defined_id": # the user allow to send hit
gt = GoogleTracking(cid=sl.settings.send_anonymous_usage_stats,
kalliope_version=version_str,
category='synapse',
action='execute')
gt.start()
return cls.execute_synapses_in_hook_name("on_triggered")
@classmethod
def on_start_listening(cls):
return cls.execute_synapses_in_hook_name("on_start_listening")
@classmethod
def on_stop_listening(cls):
return cls.execute_synapses_in_hook_name("on_stop_listening")
@classmethod
def on_order_found(cls):
return cls.execute_synapses_in_hook_name("on_order_found")
@classmethod
def on_order_not_found(cls):
return cls.execute_synapses_in_hook_name("on_order_not_found")
@classmethod
def on_processed_synapses(cls):
return cls.execute_synapses_in_hook_name("on_processed_synapses")
@classmethod
def on_deaf(cls):
return cls.execute_synapses_in_hook_name("on_deaf")
@classmethod
def on_undeaf(cls):
return cls.execute_synapses_in_hook_name("on_undeaf")
@classmethod
def on_mute(cls):
return cls.execute_synapses_in_hook_name("on_mute")
@classmethod
def on_unmute(cls):
return cls.execute_synapses_in_hook_name("on_unmute")
@classmethod
def on_start_speaking(cls):
return cls.execute_synapses_in_hook_name("on_start_speaking")
@classmethod
def on_stop_speaking(cls):
return cls.execute_synapses_in_hook_name("on_stop_speaking")
@classmethod
def on_stt_error(cls):
return cls.execute_synapses_in_hook_name("on_stt_error")
@classmethod
def execute_synapses_in_hook_name(cls, hook_name):
# need to import SynapseLauncher from here to avoid cross import
from kalliope.core.SynapseLauncher import SynapseLauncher
logger.debug("[HookManager] calling synapses in hook name: %s" % hook_name)
settings = SettingLoader().settings
# list of synapse to execute
try:
list_synapse = settings.hooks[hook_name]
logger.debug("[HookManager] hook: %s , type: %s" % (hook_name, type(list_synapse)))
except KeyError:
# the hook haven't been set in setting. just skip the execution
logger.debug("[HookManager] hook not set: %s" % hook_name)
return None
if isinstance(list_synapse, str):
list_synapse = [list_synapse]
return SynapseLauncher.start_synapse_by_list_name(list_synapse, new_lifo=True)
|
import logging
import threading
from pymochad import controller, exceptions
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_COMM_TYPE = "comm_type"
DOMAIN = "mochad"
REQ_LOCK = threading.Lock()
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_HOST, default="localhost"): cv.string,
vol.Optional(CONF_PORT, default=1099): cv.port,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the mochad component."""
conf = config[DOMAIN]
host = conf.get(CONF_HOST)
port = conf.get(CONF_PORT)
try:
mochad_controller = MochadCtrl(host, port)
except exceptions.ConfigurationError:
_LOGGER.exception()
return False
def stop_mochad(event):
"""Stop the Mochad service."""
mochad_controller.disconnect()
def start_mochad(event):
"""Start the Mochad service."""
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, stop_mochad)
hass.bus.listen_once(EVENT_HOMEASSISTANT_START, start_mochad)
hass.data[DOMAIN] = mochad_controller
return True
class MochadCtrl:
"""Mochad controller."""
def __init__(self, host, port):
"""Initialize a PyMochad controller."""
super().__init__()
self._host = host
self._port = port
self.ctrl = controller.PyMochad(server=self._host, port=self._port)
@property
def host(self):
"""Return the server where mochad is running."""
return self._host
@property
def port(self):
"""Return the port mochad is running on."""
return self._port
def disconnect(self):
"""Close the connection to the mochad socket."""
self.ctrl.socket.close()
|
import voluptuous as vol
from homeassistant.const import CONF_DEVICE
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
DOMAIN = "elv"
DEFAULT_DEVICE = "/dev/ttyUSB0"
ELV_PLATFORMS = ["switch"]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{vol.Optional(CONF_DEVICE, default=DEFAULT_DEVICE): cv.string}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the PCA switch platform."""
for platform in ELV_PLATFORMS:
discovery.load_platform(
hass, platform, DOMAIN, {"device": config[DOMAIN][CONF_DEVICE]}, config
)
return True
|
import pickle
import pytest
import xarray as xr
from . import raises_regex
@xr.register_dataset_accessor("example_accessor")
@xr.register_dataarray_accessor("example_accessor")
class ExampleAccessor:
"""For the pickling tests below."""
def __init__(self, xarray_obj):
self.obj = xarray_obj
class TestAccessor:
def test_register(self):
@xr.register_dataset_accessor("demo")
@xr.register_dataarray_accessor("demo")
class DemoAccessor:
"""Demo accessor."""
def __init__(self, xarray_obj):
self._obj = xarray_obj
@property
def foo(self):
return "bar"
ds = xr.Dataset()
assert ds.demo.foo == "bar"
da = xr.DataArray(0)
assert da.demo.foo == "bar"
# accessor is cached
assert ds.demo is ds.demo
# check descriptor
assert ds.demo.__doc__ == "Demo accessor."
assert xr.Dataset.demo.__doc__ == "Demo accessor."
assert isinstance(ds.demo, DemoAccessor)
assert xr.Dataset.demo is DemoAccessor
# ensure we can remove it
del xr.Dataset.demo
assert not hasattr(xr.Dataset, "demo")
with pytest.warns(Warning, match="overriding a preexisting attribute"):
@xr.register_dataarray_accessor("demo")
class Foo:
pass
# it didn't get registered again
assert not hasattr(xr.Dataset, "demo")
def test_pickle_dataset(self):
ds = xr.Dataset()
ds_restored = pickle.loads(pickle.dumps(ds))
assert ds.identical(ds_restored)
# state save on the accessor is restored
assert ds.example_accessor is ds.example_accessor
ds.example_accessor.value = "foo"
ds_restored = pickle.loads(pickle.dumps(ds))
assert ds.identical(ds_restored)
assert ds_restored.example_accessor.value == "foo"
def test_pickle_dataarray(self):
array = xr.Dataset()
assert array.example_accessor is array.example_accessor
array_restored = pickle.loads(pickle.dumps(array))
assert array.identical(array_restored)
def test_broken_accessor(self):
# regression test for GH933
@xr.register_dataset_accessor("stupid_accessor")
class BrokenAccessor:
def __init__(self, xarray_obj):
raise AttributeError("broken")
with raises_regex(RuntimeError, "error initializing"):
xr.Dataset().stupid_accessor
|
from homeassistant.core import callback
DOMAIN = "ping"
PLATFORMS = ["binary_sensor"]
PING_ID = "ping_id"
DEFAULT_START_ID = 129
MAX_PING_ID = 65534
@callback
def async_get_next_ping_id(hass):
"""Find the next id to use in the outbound ping.
Must be called in async
"""
current_id = hass.data.setdefault(DOMAIN, {}).get(PING_ID, DEFAULT_START_ID)
if current_id == MAX_PING_ID:
next_id = DEFAULT_START_ID
else:
next_id = current_id + 1
hass.data[DOMAIN][PING_ID] = next_id
return next_id
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import mobilenet
slim = tf.contrib.slim
class MobileNetTest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 224, 224
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, end_points = mobilenet.mobilenet(inputs, num_classes)
self.assertEquals(end_points['MobileNet/conv_ds_2/depthwise_conv'].get_shape().as_list(), [5, 112, 112, 32])
self.assertEquals(end_points['MobileNet/conv_ds_3/depthwise_conv'].get_shape().as_list(), [5, 56, 56, 64])
self.assertEquals(end_points['MobileNet/conv_ds_4/depthwise_conv'].get_shape().as_list(), [5, 56, 56, 128])
self.assertEquals(end_points['MobileNet/conv_ds_5/depthwise_conv'].get_shape().as_list(), [5, 28, 28, 128])
self.assertEquals(end_points['MobileNet/conv_ds_6/depthwise_conv'].get_shape().as_list(), [5, 28, 28, 256])
self.assertEquals(end_points['MobileNet/conv_ds_7/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 256])
self.assertEquals(end_points['MobileNet/conv_ds_8/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_9/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_10/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_11/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_12/depthwise_conv'].get_shape().as_list(), [5, 14, 14, 512])
self.assertEquals(end_points['MobileNet/conv_ds_13/depthwise_conv'].get_shape().as_list(), [5, 7, 7, 512])
self.assertEquals(end_points['MobileNet/conv_ds_14/depthwise_conv'].get_shape().as_list(), [5, 7, 7, 1024])
self.assertEquals(end_points['squeeze'].get_shape().as_list(), [5, 1024])
self.assertEquals(logits.op.name, 'MobileNet/fc_16/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testEvaluation(self):
batch_size = 2
height, width = 224, 224
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = mobilenet.mobilenet(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testForward(self):
batch_size = 1
height, width = 224, 224
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = mobilenet.mobilenet(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
|
from collections import OrderedDict
import logging
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import (
CONF_ABOVE,
CONF_BELOW,
CONF_DEVICE_CLASS,
CONF_ENTITY_ID,
CONF_NAME,
CONF_PLATFORM,
CONF_STATE,
CONF_VALUE_TEMPLATE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import condition
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import (
TrackTemplate,
async_track_state_change_event,
async_track_template_result,
)
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.template import result_as_boolean
from . import DOMAIN, PLATFORMS
ATTR_OBSERVATIONS = "observations"
ATTR_OCCURRED_OBSERVATION_ENTITIES = "occurred_observation_entities"
ATTR_PROBABILITY = "probability"
ATTR_PROBABILITY_THRESHOLD = "probability_threshold"
CONF_OBSERVATIONS = "observations"
CONF_PRIOR = "prior"
CONF_TEMPLATE = "template"
CONF_PROBABILITY_THRESHOLD = "probability_threshold"
CONF_P_GIVEN_F = "prob_given_false"
CONF_P_GIVEN_T = "prob_given_true"
CONF_TO_STATE = "to_state"
DEFAULT_NAME = "Bayesian Binary Sensor"
DEFAULT_PROBABILITY_THRESHOLD = 0.5
_LOGGER = logging.getLogger(__name__)
NUMERIC_STATE_SCHEMA = vol.Schema(
{
CONF_PLATFORM: "numeric_state",
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_ABOVE): vol.Coerce(float),
vol.Optional(CONF_BELOW): vol.Coerce(float),
vol.Required(CONF_P_GIVEN_T): vol.Coerce(float),
vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float),
},
required=True,
)
STATE_SCHEMA = vol.Schema(
{
CONF_PLATFORM: CONF_STATE,
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Required(CONF_TO_STATE): cv.string,
vol.Required(CONF_P_GIVEN_T): vol.Coerce(float),
vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float),
},
required=True,
)
TEMPLATE_SCHEMA = vol.Schema(
{
CONF_PLATFORM: CONF_TEMPLATE,
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Required(CONF_P_GIVEN_T): vol.Coerce(float),
vol.Optional(CONF_P_GIVEN_F): vol.Coerce(float),
},
required=True,
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): cv.string,
vol.Required(CONF_OBSERVATIONS): vol.Schema(
vol.All(
cv.ensure_list,
[vol.Any(NUMERIC_STATE_SCHEMA, STATE_SCHEMA, TEMPLATE_SCHEMA)],
)
),
vol.Required(CONF_PRIOR): vol.Coerce(float),
vol.Optional(
CONF_PROBABILITY_THRESHOLD, default=DEFAULT_PROBABILITY_THRESHOLD
): vol.Coerce(float),
}
)
def update_probability(prior, prob_given_true, prob_given_false):
"""Update probability using Bayes' rule."""
numerator = prob_given_true * prior
denominator = numerator + prob_given_false * (1 - prior)
return numerator / denominator
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Bayesian Binary sensor."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config[CONF_NAME]
observations = config[CONF_OBSERVATIONS]
prior = config[CONF_PRIOR]
probability_threshold = config[CONF_PROBABILITY_THRESHOLD]
device_class = config.get(CONF_DEVICE_CLASS)
async_add_entities(
[
BayesianBinarySensor(
name, prior, observations, probability_threshold, device_class
)
]
)
class BayesianBinarySensor(BinarySensorEntity):
"""Representation of a Bayesian sensor."""
def __init__(self, name, prior, observations, probability_threshold, device_class):
"""Initialize the Bayesian sensor."""
self._name = name
self._observations = observations
self._probability_threshold = probability_threshold
self._device_class = device_class
self._deviation = False
self._callbacks = []
self.prior = prior
self.probability = prior
self.current_observations = OrderedDict({})
self.observations_by_entity = self._build_observations_by_entity()
self.observations_by_template = self._build_observations_by_template()
self.observation_handlers = {
"numeric_state": self._process_numeric_state,
"state": self._process_state,
}
async def async_added_to_hass(self):
"""
Call when entity about to be added.
All relevant update logic for instance attributes occurs within this closure.
Other methods in this class are designed to avoid directly modifying instance
attributes, by instead focusing on returning relevant data back to this method.
The goal of this method is to ensure that `self.current_observations` and `self.probability`
are set on a best-effort basis when this entity is register with hass.
In addition, this method must register the state listener defined within, which
will be called any time a relevant entity changes its state.
"""
@callback
def async_threshold_sensor_state_listener(event):
"""
Handle sensor state changes.
When a state changes, we must update our list of current observations,
then calculate the new probability.
"""
new_state = event.data.get("new_state")
if new_state is None or new_state.state == STATE_UNKNOWN:
return
entity = event.data.get("entity_id")
self.current_observations.update(self._record_entity_observations(entity))
self.async_set_context(event.context)
self._recalculate_and_write_state()
self.async_on_remove(
async_track_state_change_event(
self.hass,
list(self.observations_by_entity),
async_threshold_sensor_state_listener,
)
)
@callback
def _async_template_result_changed(event, updates):
track_template_result = updates.pop()
template = track_template_result.template
result = track_template_result.result
entity = event and event.data.get("entity_id")
if isinstance(result, TemplateError):
_LOGGER.error(
"TemplateError('%s') "
"while processing template '%s' "
"in entity '%s'",
result,
template,
self.entity_id,
)
should_trigger = False
else:
should_trigger = result_as_boolean(result)
for obs in self.observations_by_template[template]:
if should_trigger:
obs_entry = {"entity_id": entity, **obs}
else:
obs_entry = None
self.current_observations[obs["id"]] = obs_entry
if event:
self.async_set_context(event.context)
self._recalculate_and_write_state()
for template in self.observations_by_template:
info = async_track_template_result(
self.hass,
[TrackTemplate(template, None)],
_async_template_result_changed,
)
self._callbacks.append(info)
self.async_on_remove(info.async_remove)
info.async_refresh()
self.current_observations.update(self._initialize_current_observations())
self.probability = self._calculate_new_probability()
self._deviation = bool(self.probability >= self._probability_threshold)
@callback
def _recalculate_and_write_state(self):
self.probability = self._calculate_new_probability()
self._deviation = bool(self.probability >= self._probability_threshold)
self.async_write_ha_state()
def _initialize_current_observations(self):
local_observations = OrderedDict({})
for entity in self.observations_by_entity:
local_observations.update(self._record_entity_observations(entity))
return local_observations
def _record_entity_observations(self, entity):
local_observations = OrderedDict({})
for entity_obs in self.observations_by_entity[entity]:
platform = entity_obs["platform"]
should_trigger = self.observation_handlers[platform](entity_obs)
if should_trigger:
obs_entry = {"entity_id": entity, **entity_obs}
else:
obs_entry = None
local_observations[entity_obs["id"]] = obs_entry
return local_observations
def _calculate_new_probability(self):
prior = self.prior
for obs in self.current_observations.values():
if obs is not None:
prior = update_probability(
prior,
obs["prob_given_true"],
obs.get("prob_given_false", 1 - obs["prob_given_true"]),
)
return prior
def _build_observations_by_entity(self):
"""
Build and return data structure of the form below.
{
"sensor.sensor1": [{"id": 0, ...}, {"id": 1, ...}],
"sensor.sensor2": [{"id": 2, ...}],
...
}
Each "observation" must be recognized uniquely, and it should be possible
for all relevant observations to be looked up via their `entity_id`.
"""
observations_by_entity = {}
for ind, obs in enumerate(self._observations):
obs["id"] = ind
if "entity_id" not in obs:
continue
entity_ids = [obs["entity_id"]]
for e_id in entity_ids:
observations_by_entity.setdefault(e_id, []).append(obs)
return observations_by_entity
def _build_observations_by_template(self):
"""
Build and return data structure of the form below.
{
"template": [{"id": 0, ...}, {"id": 1, ...}],
"template2": [{"id": 2, ...}],
...
}
Each "observation" must be recognized uniquely, and it should be possible
for all relevant observations to be looked up via their `template`.
"""
observations_by_template = {}
for ind, obs in enumerate(self._observations):
obs["id"] = ind
if "value_template" not in obs:
continue
template = obs.get(CONF_VALUE_TEMPLATE)
observations_by_template.setdefault(template, []).append(obs)
return observations_by_template
def _process_numeric_state(self, entity_observation):
"""Return True if numeric condition is met."""
entity = entity_observation["entity_id"]
return condition.async_numeric_state(
self.hass,
entity,
entity_observation.get("below"),
entity_observation.get("above"),
None,
entity_observation,
)
def _process_state(self, entity_observation):
"""Return True if state conditions are met."""
entity = entity_observation["entity_id"]
return condition.state(self.hass, entity, entity_observation.get("to_state"))
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return true if sensor is on."""
return self._deviation
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_class(self):
"""Return the sensor class of the sensor."""
return self._device_class
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
attr_observations_list = [
obs.copy() for obs in self.current_observations.values() if obs is not None
]
for item in attr_observations_list:
item.pop("value_template", None)
return {
ATTR_OBSERVATIONS: attr_observations_list,
ATTR_OCCURRED_OBSERVATION_ENTITIES: list(
{
obs.get("entity_id")
for obs in self.current_observations.values()
if obs is not None and obs.get("entity_id") is not None
}
),
ATTR_PROBABILITY: round(self.probability, 2),
ATTR_PROBABILITY_THRESHOLD: self._probability_threshold,
}
async def async_update(self):
"""Get the latest data and update the states."""
if not self._callbacks:
self._recalculate_and_write_state()
return
# Force recalc of the templates. The states will
# update automatically.
for call in self._callbacks:
call.async_refresh()
|
from collections import Counter, defaultdict
from ..utils import bfs, fzset, classify
from ..exceptions import GrammarError
from ..grammar import Rule, Terminal, NonTerminal
class RulePtr(object):
__slots__ = ('rule', 'index')
def __init__(self, rule, index):
assert isinstance(rule, Rule)
assert index <= len(rule.expansion)
self.rule = rule
self.index = index
def __repr__(self):
before = [x.name for x in self.rule.expansion[:self.index]]
after = [x.name for x in self.rule.expansion[self.index:]]
return '<%s : %s * %s>' % (self.rule.origin.name, ' '.join(before), ' '.join(after))
@property
def next(self):
return self.rule.expansion[self.index]
def advance(self, sym):
assert self.next == sym
return RulePtr(self.rule, self.index+1)
@property
def is_satisfied(self):
return self.index == len(self.rule.expansion)
def __eq__(self, other):
return self.rule == other.rule and self.index == other.index
def __hash__(self):
return hash((self.rule, self.index))
# state generation ensures no duplicate LR0ItemSets
class LR0ItemSet(object):
__slots__ = ('kernel', 'closure', 'transitions', 'lookaheads')
def __init__(self, kernel, closure):
self.kernel = fzset(kernel)
self.closure = fzset(closure)
self.transitions = {}
self.lookaheads = defaultdict(set)
def __repr__(self):
return '{%s | %s}' % (', '.join([repr(r) for r in self.kernel]), ', '.join([repr(r) for r in self.closure]))
def update_set(set1, set2):
if not set2 or set1 > set2:
return False
copy = set(set1)
set1 |= set2
return set1 != copy
def calculate_sets(rules):
"""Calculate FOLLOW sets.
Adapted from: http://lara.epfl.ch/w/cc09:algorithm_for_first_and_follow_sets"""
symbols = {sym for rule in rules for sym in rule.expansion} | {rule.origin for rule in rules}
# foreach grammar rule X ::= Y(1) ... Y(k)
# if k=0 or {Y(1),...,Y(k)} subset of NULLABLE then
# NULLABLE = NULLABLE union {X}
# for i = 1 to k
# if i=1 or {Y(1),...,Y(i-1)} subset of NULLABLE then
# FIRST(X) = FIRST(X) union FIRST(Y(i))
# for j = i+1 to k
# if i=k or {Y(i+1),...Y(k)} subset of NULLABLE then
# FOLLOW(Y(i)) = FOLLOW(Y(i)) union FOLLOW(X)
# if i+1=j or {Y(i+1),...,Y(j-1)} subset of NULLABLE then
# FOLLOW(Y(i)) = FOLLOW(Y(i)) union FIRST(Y(j))
# until none of NULLABLE,FIRST,FOLLOW changed in last iteration
NULLABLE = set()
FIRST = {}
FOLLOW = {}
for sym in symbols:
FIRST[sym]={sym} if sym.is_term else set()
FOLLOW[sym]=set()
# Calculate NULLABLE and FIRST
changed = True
while changed:
changed = False
for rule in rules:
if set(rule.expansion) <= NULLABLE:
if update_set(NULLABLE, {rule.origin}):
changed = True
for i, sym in enumerate(rule.expansion):
if set(rule.expansion[:i]) <= NULLABLE:
if update_set(FIRST[rule.origin], FIRST[sym]):
changed = True
else:
break
# Calculate FOLLOW
changed = True
while changed:
changed = False
for rule in rules:
for i, sym in enumerate(rule.expansion):
if i==len(rule.expansion)-1 or set(rule.expansion[i+1:]) <= NULLABLE:
if update_set(FOLLOW[sym], FOLLOW[rule.origin]):
changed = True
for j in range(i+1, len(rule.expansion)):
if set(rule.expansion[i+1:j]) <= NULLABLE:
if update_set(FOLLOW[sym], FIRST[rule.expansion[j]]):
changed = True
return FIRST, FOLLOW, NULLABLE
class GrammarAnalyzer(object):
def __init__(self, parser_conf, debug=False):
self.debug = debug
root_rules = {start: Rule(NonTerminal('$root_' + start), [NonTerminal(start), Terminal('$END')])
for start in parser_conf.start}
rules = parser_conf.rules + list(root_rules.values())
self.rules_by_origin = classify(rules, lambda r: r.origin)
if len(rules) != len(set(rules)):
duplicates = [item for item, count in Counter(rules).items() if count > 1]
raise GrammarError("Rules defined twice: %s" % ', '.join(str(i) for i in duplicates))
for r in rules:
for sym in r.expansion:
if not (sym.is_term or sym in self.rules_by_origin):
raise GrammarError("Using an undefined rule: %s" % sym)
self.start_states = {start: self.expand_rule(root_rule.origin)
for start, root_rule in root_rules.items()}
self.end_states = {start: fzset({RulePtr(root_rule, len(root_rule.expansion))})
for start, root_rule in root_rules.items()}
lr0_root_rules = {start: Rule(NonTerminal('$root_' + start), [NonTerminal(start)])
for start in parser_conf.start}
lr0_rules = parser_conf.rules + list(lr0_root_rules.values())
assert(len(lr0_rules) == len(set(lr0_rules)))
self.lr0_rules_by_origin = classify(lr0_rules, lambda r: r.origin)
# cache RulePtr(r, 0) in r (no duplicate RulePtr objects)
self.lr0_start_states = {start: LR0ItemSet([RulePtr(root_rule, 0)], self.expand_rule(root_rule.origin, self.lr0_rules_by_origin))
for start, root_rule in lr0_root_rules.items()}
self.FIRST, self.FOLLOW, self.NULLABLE = calculate_sets(rules)
def expand_rule(self, source_rule, rules_by_origin=None):
"Returns all init_ptrs accessible by rule (recursive)"
if rules_by_origin is None:
rules_by_origin = self.rules_by_origin
init_ptrs = set()
def _expand_rule(rule):
assert not rule.is_term, rule
for r in rules_by_origin[rule]:
init_ptr = RulePtr(r, 0)
init_ptrs.add(init_ptr)
if r.expansion: # if not empty rule
new_r = init_ptr.next
if not new_r.is_term:
yield new_r
for _ in bfs([source_rule], _expand_rule):
pass
return fzset(init_ptrs)
|
from __future__ import unicode_literals
import json
import logging
import os.path
import unittest
import numpy as np
from gensim import utils
from gensim.scripts.segment_wiki import segment_all_articles, segment_and_write_all_articles
from gensim.test.utils import datapath, get_tmpfile
from gensim.scripts.word2vec2tensor import word2vec2tensor
from gensim.models import KeyedVectors
class TestSegmentWiki(unittest.TestCase):
def setUp(self):
self.fname = datapath('enwiki-latest-pages-articles1.xml-p000000010p000030302-shortened.bz2')
self.expected_title = 'Anarchism'
self.expected_section_titles = [
'Introduction',
'Etymology and terminology',
'History',
'Anarchist schools of thought',
'Internal issues and debates',
'Topics of interest',
'Criticisms',
'References',
'Further reading',
'External links'
]
def tearDown(self):
# remove all temporary test files
fname = get_tmpfile('script.tst')
extensions = ['', '.json']
for ext in extensions:
try:
os.remove(fname + ext)
except OSError:
pass
def test_segment_all_articles(self):
title, sections, interlinks = next(segment_all_articles(self.fname, include_interlinks=True))
# Check title
self.assertEqual(title, self.expected_title)
# Check section titles
section_titles = [s[0] for s in sections]
self.assertEqual(section_titles, self.expected_section_titles)
# Check text
first_section_text = sections[0][1]
first_sentence = "'''Anarchism''' is a political philosophy that advocates self-governed societies"
self.assertTrue(first_sentence in first_section_text)
# Check interlinks
self.assertEqual(len(interlinks), 685)
self.assertTrue(interlinks[0] == ("political philosophy", "political philosophy"))
self.assertTrue(interlinks[1] == ("self-governance", "self-governed"))
self.assertTrue(interlinks[2] == ("stateless society", "stateless societies"))
def test_generator_len(self):
expected_num_articles = 106
num_articles = sum(1 for x in segment_all_articles(self.fname))
self.assertEqual(num_articles, expected_num_articles)
def test_json_len(self):
tmpf = get_tmpfile('script.tst.json')
segment_and_write_all_articles(self.fname, tmpf, workers=1)
expected_num_articles = 106
with utils.open(tmpf, 'rb') as f:
num_articles = sum(1 for line in f)
self.assertEqual(num_articles, expected_num_articles)
def test_segment_and_write_all_articles(self):
tmpf = get_tmpfile('script.tst.json')
segment_and_write_all_articles(self.fname, tmpf, workers=1, include_interlinks=True)
# Get the first line from the text file we created.
with open(tmpf) as f:
first = next(f)
# decode JSON line into a Python dictionary object
article = json.loads(first)
title, section_titles, interlinks = article['title'], article['section_titles'], article['interlinks']
self.assertEqual(title, self.expected_title)
self.assertEqual(section_titles, self.expected_section_titles)
# Check interlinks
# JSON has no tuples, only lists. So, we convert lists to tuples explicitly before comparison.
self.assertEqual(len(interlinks), 685)
self.assertEqual(tuple(interlinks[0]), ("political philosophy", "political philosophy"))
self.assertEqual(tuple(interlinks[1]), ("self-governance", "self-governed"))
self.assertEqual(tuple(interlinks[2]), ("stateless society", "stateless societies"))
class TestWord2Vec2Tensor(unittest.TestCase):
def setUp(self):
self.datapath = datapath('word2vec_pre_kv_c')
self.output_folder = get_tmpfile('w2v2t_test')
self.metadata_file = self.output_folder + '_metadata.tsv'
self.tensor_file = self.output_folder + '_tensor.tsv'
self.vector_file = self.output_folder + '_vector.tsv'
def testConversion(self):
word2vec2tensor(word2vec_model_path=self.datapath, tensor_filename=self.output_folder)
with utils.open(self.metadata_file, 'rb') as f:
metadata = f.readlines()
with utils.open(self.tensor_file, 'rb') as f:
vectors = f.readlines()
# check if number of words and vector size in tensor file line up with word2vec
with utils.open(self.datapath, 'rb') as f:
first_line = f.readline().strip()
number_words, vector_size = map(int, first_line.split(b' '))
self.assertTrue(len(metadata) == len(vectors) == number_words,
('Metadata file %s and tensor file %s imply different number of rows.'
% (self.metadata_file, self.tensor_file)))
# grab metadata and vectors from written file
metadata = [word.strip() for word in metadata]
vectors = [vector.replace(b'\t', b' ') for vector in vectors]
# get the originaly vector KV model
orig_model = KeyedVectors.load_word2vec_format(self.datapath, binary=False)
# check that the KV model and tensor files have the same values key-wise
for word, vector in zip(metadata, vectors):
word_string = word.decode("utf8")
vector_string = vector.decode("utf8")
vector_array = np.array(list(map(float, vector_string.split())))
np.testing.assert_almost_equal(orig_model[word_string], vector_array, decimal=5)
if __name__ == '__main__':
logging.basicConfig(level=logging.DEBUG)
unittest.main()
|
import sys
import string
from html5lib import HTMLParser as _HTMLParser
from html5lib.treebuilders.etree_lxml import TreeBuilder
from lxml import etree
from lxml.html import Element, XHTML_NAMESPACE, _contains_block_level_tag
# python3 compatibility
try:
_strings = basestring
except NameError:
_strings = (bytes, str)
try:
from urllib2 import urlopen
except ImportError:
from urllib.request import urlopen
try:
from urlparse import urlparse
except ImportError:
from urllib.parse import urlparse
class HTMLParser(_HTMLParser):
"""An html5lib HTML parser with lxml as tree."""
def __init__(self, strict=False, **kwargs):
_HTMLParser.__init__(self, strict=strict, tree=TreeBuilder, **kwargs)
try:
from html5lib import XHTMLParser as _XHTMLParser
except ImportError:
pass
else:
class XHTMLParser(_XHTMLParser):
"""An html5lib XHTML Parser with lxml as tree."""
def __init__(self, strict=False, **kwargs):
_XHTMLParser.__init__(self, strict=strict, tree=TreeBuilder, **kwargs)
xhtml_parser = XHTMLParser()
def _find_tag(tree, tag):
elem = tree.find(tag)
if elem is not None:
return elem
return tree.find('{%s}%s' % (XHTML_NAMESPACE, tag))
def document_fromstring(html, guess_charset=None, parser=None):
"""
Parse a whole document into a string.
If `guess_charset` is true, or if the input is not Unicode but a
byte string, the `chardet` library will perform charset guessing
on the string.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
if parser is None:
parser = html_parser
options = {}
if guess_charset is None and isinstance(html, bytes):
# html5lib does not accept useChardet as an argument, if it
# detected the html argument would produce unicode objects.
guess_charset = True
if guess_charset is not None:
options['useChardet'] = guess_charset
return parser.parse(html, **options).getroot()
def fragments_fromstring(html, no_leading_text=False,
guess_charset=None, parser=None):
"""Parses several HTML elements, returning a list of elements.
The first item in the list may be a string. If no_leading_text is true,
then it will be an error if there is leading text, and it will always be
a list of only elements.
If `guess_charset` is true, the `chardet` library will perform charset
guessing on the string.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
if parser is None:
parser = html_parser
options = {}
if guess_charset is None and isinstance(html, bytes):
# html5lib does not accept useChardet as an argument, if it
# detected the html argument would produce unicode objects.
guess_charset = False
if guess_charset is not None:
options['useChardet'] = guess_charset
children = parser.parseFragment(html, 'div', **options)
if children and isinstance(children[0], _strings):
if no_leading_text:
if children[0].strip():
raise etree.ParserError('There is leading text: %r' %
children[0])
del children[0]
return children
def fragment_fromstring(html, create_parent=False,
guess_charset=None, parser=None):
"""Parses a single HTML element; it is an error if there is more than
one element, or if anything but whitespace precedes or follows the
element.
If 'create_parent' is true (or is a tag name) then a parent node
will be created to encapsulate the HTML in a single element. In
this case, leading or trailing text is allowed.
If `guess_charset` is true, the `chardet` library will perform charset
guessing on the string.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
accept_leading_text = bool(create_parent)
elements = fragments_fromstring(
html, guess_charset=guess_charset, parser=parser,
no_leading_text=not accept_leading_text)
if create_parent:
if not isinstance(create_parent, _strings):
create_parent = 'div'
new_root = Element(create_parent)
if elements:
if isinstance(elements[0], _strings):
new_root.text = elements[0]
del elements[0]
new_root.extend(elements)
return new_root
if not elements:
raise etree.ParserError('No elements found')
if len(elements) > 1:
raise etree.ParserError('Multiple elements found')
result = elements[0]
if result.tail and result.tail.strip():
raise etree.ParserError('Element followed by text: %r' % result.tail)
result.tail = None
return result
def fromstring(html, guess_charset=None, parser=None):
"""Parse the html, returning a single element/document.
This tries to minimally parse the chunk of text, without knowing if it
is a fragment or a document.
'base_url' will set the document's base_url attribute (and the tree's
docinfo.URL)
If `guess_charset` is true, or if the input is not Unicode but a
byte string, the `chardet` library will perform charset guessing
on the string.
"""
if not isinstance(html, _strings):
raise TypeError('string required')
doc = document_fromstring(html, parser=parser,
guess_charset=guess_charset)
# document starts with doctype or <html>, full document!
start = html[:50]
if isinstance(start, bytes):
# Allow text comparison in python3.
# Decode as ascii, that also covers latin-1 and utf-8 for the
# characters we need.
start = start.decode('ascii', 'replace')
start = start.lstrip().lower()
if start.startswith('<html') or start.startswith('<!doctype'):
return doc
head = _find_tag(doc, 'head')
# if the head is not empty we have a full document
if len(head):
return doc
body = _find_tag(doc, 'body')
# The body has just one element, so it was probably a single
# element passed in
if (len(body) == 1 and (not body.text or not body.text.strip())
and (not body[-1].tail or not body[-1].tail.strip())):
return body[0]
# Now we have a body which represents a bunch of tags which have the
# content that was passed in. We will create a fake container, which
# is the body tag, except <body> implies too much structure.
if _contains_block_level_tag(body):
body.tag = 'div'
else:
body.tag = 'span'
return body
def parse(filename_url_or_file, guess_charset=None, parser=None):
"""Parse a filename, URL, or file-like object into an HTML document
tree. Note: this returns a tree, not an element. Use
``parse(...).getroot()`` to get the document root.
If ``guess_charset`` is true, the ``useChardet`` option is passed into
html5lib to enable character detection. This option is on by default
when parsing from URLs, off by default when parsing from file(-like)
objects (which tend to return Unicode more often than not), and on by
default when parsing from a file path (which is read in binary mode).
"""
if parser is None:
parser = html_parser
if not isinstance(filename_url_or_file, _strings):
fp = filename_url_or_file
if guess_charset is None:
# assume that file-like objects return Unicode more often than bytes
guess_charset = False
elif _looks_like_url(filename_url_or_file):
fp = urlopen(filename_url_or_file)
if guess_charset is None:
# assume that URLs return bytes
guess_charset = True
else:
fp = open(filename_url_or_file, 'rb')
if guess_charset is None:
guess_charset = True
options = {}
# html5lib does not accept useChardet as an argument, if it
# detected the html argument would produce unicode objects.
if guess_charset:
options['useChardet'] = guess_charset
return parser.parse(fp, **options)
def _looks_like_url(str):
scheme = urlparse(str)[0]
if not scheme:
return False
elif (sys.platform == 'win32' and
scheme in string.ascii_letters
and len(scheme) == 1):
# looks like a 'normal' absolute path
return False
else:
return True
html_parser = HTMLParser()
|
from pytest_flask.fixtures import client
read_only = True
def test_pagination(client):
"""Do we return paginated results when a 'page' parameter is provided?"""
response = client.get('/artist/?page=2')
assert response.status_code == 200
assert len(response.json['resources']) == 20
assert response.json['resources'][0]['ArtistId'] == 21
def test_filtering(client):
"""Do we return filtered results when a URL parameter is provided?"""
response = client.get('/artist/?Name=AC/DC')
assert response.status_code == 200
assert len(response.json['resources']) == 1
assert response.json['resources'][0]['ArtistId'] == 1
def test_filtering_unkown_field(client):
"""Do we return filtered results when a URL parameter is provided?"""
response = client.get('/artist/?Foo=AC/DC')
assert response.status_code == 400
def test_wildcard_filtering(client):
"""Do we return filtered results when a wildcarded URL parameter is provided?"""
response = client.get('/artist/?Name=%25%25DC')
assert response.status_code == 200
assert len(response.json['resources']) == 1
assert response.json['resources'][0]['ArtistId'] == 1
def test_sorting(client):
"""Do we return sorted results when a 'sort' URL parameter is provided?"""
response = client.get('/artist/?sort=Name')
assert response.status_code == 200
assert len(response.json['resources']) == 276
assert response.json['resources'][0]['ArtistId'] == 43
def test_sorting_descending(client):
"""Can we sort results in descending order?"""
response = client.get('/artist/?sort=-Name')
assert response.status_code == 200
assert len(response.json['resources']) == 276
assert response.json['resources'][0]['ArtistId'] == 155
def test_limit(client):
"""Do we return sorted results when a 'limit' URL parameter is provided?"""
response = client.get('/artist/?limit=5')
assert response.status_code == 200
assert len(response.json['resources']) == 5
assert response.json['resources'][0]['ArtistId'] == 1
def test_sort_limit_and_pagination(client):
"""Can we combine filtering parameters to get targeted results?"""
response = client.get('/artist/?limit=2&sort=ArtistId&page=2')
assert response.status_code == 200
assert len(response.json['resources']) == 2
assert response.json['resources'][0]['ArtistId'] == 3
|
import codecs
import os
import sys
import pickle as pypickle
try:
import cPickle as cpickle
except ImportError: # pragma: no cover
cpickle = None # noqa
from collections import namedtuple
from contextlib import contextmanager
from io import BytesIO
from .exceptions import (
reraise, ContentDisallowed, DecodeError,
EncodeError, SerializerNotInstalled
)
from .utils.compat import entrypoints
from .utils.encoding import bytes_to_str, str_to_bytes
__all__ = ('pickle', 'loads', 'dumps', 'register', 'unregister')
SKIP_DECODE = frozenset(['binary', 'ascii-8bit'])
TRUSTED_CONTENT = frozenset(['application/data', 'application/text'])
if sys.platform.startswith('java'): # pragma: no cover
def _decode(t, coding):
return codecs.getdecoder(coding)(t)[0]
else:
_decode = codecs.decode
pickle = cpickle or pypickle
pickle_load = pickle.load
#: Kombu requires Python 2.5 or later so we use protocol 2 by default.
#: There's a new protocol (3) but this is only supported by Python 3.
pickle_protocol = int(os.environ.get('PICKLE_PROTOCOL', 2))
codec = namedtuple('codec', ('content_type', 'content_encoding', 'encoder'))
@contextmanager
def _reraise_errors(wrapper,
include=(Exception,), exclude=(SerializerNotInstalled,)):
try:
yield
except exclude:
raise
except include as exc:
reraise(wrapper, wrapper(exc), sys.exc_info()[2])
def pickle_loads(s, load=pickle_load):
# used to support buffer objects
return load(BytesIO(s))
def parenthesize_alias(first, second):
return f'{first} ({second})' if first else second
class SerializerRegistry:
"""The registry keeps track of serialization methods."""
def __init__(self):
self._encoders = {}
self._decoders = {}
self._default_encode = None
self._default_content_type = None
self._default_content_encoding = None
self._disabled_content_types = set()
self.type_to_name = {}
self.name_to_type = {}
def register(self, name, encoder, decoder, content_type,
content_encoding='utf-8'):
"""Register a new encoder/decoder.
Arguments:
name (str): A convenience name for the serialization method.
encoder (callable): A method that will be passed a python data
structure and should return a string representing the
serialized data. If :const:`None`, then only a decoder
will be registered. Encoding will not be possible.
decoder (Callable): A method that will be passed a string
representing serialized data and should return a python
data structure. If :const:`None`, then only an encoder
will be registered. Decoding will not be possible.
content_type (str): The mime-type describing the serialized
structure.
content_encoding (str): The content encoding (character set) that
the `decoder` method will be returning. Will usually be
`utf-8`, `us-ascii`, or `binary`.
"""
if encoder:
self._encoders[name] = codec(
content_type, content_encoding, encoder,
)
if decoder:
self._decoders[content_type] = decoder
self.type_to_name[content_type] = name
self.name_to_type[name] = content_type
def enable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.discard(name)
def disable(self, name):
if '/' not in name:
name = self.name_to_type[name]
self._disabled_content_types.add(name)
def unregister(self, name):
"""Unregister registered encoder/decoder.
Arguments:
name (str): Registered serialization method name.
Raises:
SerializerNotInstalled: If a serializer by that name
cannot be found.
"""
try:
content_type = self.name_to_type[name]
self._decoders.pop(content_type, None)
self._encoders.pop(name, None)
self.type_to_name.pop(content_type, None)
self.name_to_type.pop(name, None)
except KeyError:
raise SerializerNotInstalled(
f'No encoder/decoder installed for {name}')
def _set_default_serializer(self, name):
"""Set the default serialization method used by this library.
Arguments:
name (str): The name of the registered serialization method.
For example, `json` (default), `pickle`, `yaml`, `msgpack`,
or any custom methods registered using :meth:`register`.
Raises:
SerializerNotInstalled: If the serialization method
requested is not available.
"""
try:
(self._default_content_type, self._default_content_encoding,
self._default_encode) = self._encoders[name]
except KeyError:
raise SerializerNotInstalled(
f'No encoder installed for {name}')
def dumps(self, data, serializer=None):
"""Encode data.
Serialize a data structure into a string suitable for sending
as an AMQP message body.
Arguments:
data (List, Dict, str): The message data to send.
serializer (str): An optional string representing
the serialization method you want the data marshalled
into. (For example, `json`, `raw`, or `pickle`).
If :const:`None` (default), then json will be used, unless
`data` is a :class:`str` or :class:`unicode` object. In this
latter case, no serialization occurs as it would be
unnecessary.
Note that if `serializer` is specified, then that
serialization method will be used even if a :class:`str`
or :class:`unicode` object is passed in.
Returns:
Tuple[str, str, str]: A three-item tuple containing the
content type (e.g., `application/json`), content encoding, (e.g.,
`utf-8`) and a string containing the serialized data.
Raises:
SerializerNotInstalled: If the serialization method
requested is not available.
"""
if serializer == 'raw':
return raw_encode(data)
if serializer and not self._encoders.get(serializer):
raise SerializerNotInstalled(
f'No encoder installed for {serializer}')
# If a raw string was sent, assume binary encoding
# (it's likely either ASCII or a raw binary file, and a character
# set of 'binary' will encompass both, even if not ideal.
if not serializer and isinstance(data, bytes):
# In Python 3+, this would be "bytes"; allow binary data to be
# sent as a message without getting encoder errors
return 'application/data', 'binary', data
# For Unicode objects, force it into a string
if not serializer and isinstance(data, str):
with _reraise_errors(EncodeError, exclude=()):
payload = data.encode('utf-8')
return 'text/plain', 'utf-8', payload
if serializer:
content_type, content_encoding, encoder = \
self._encoders[serializer]
else:
encoder = self._default_encode
content_type = self._default_content_type
content_encoding = self._default_content_encoding
with _reraise_errors(EncodeError):
payload = encoder(data)
return content_type, content_encoding, payload
def loads(self, data, content_type, content_encoding,
accept=None, force=False, _trusted_content=TRUSTED_CONTENT):
"""Decode serialized data.
Deserialize a data stream as serialized using `dumps`
based on `content_type`.
Arguments:
data (bytes, buffer, str): The message data to deserialize.
content_type (str): The content-type of the data.
(e.g., `application/json`).
content_encoding (str): The content-encoding of the data.
(e.g., `utf-8`, `binary`, or `us-ascii`).
accept (Set): List of content-types to accept.
Raises:
ContentDisallowed: If the content-type is not accepted.
Returns:
Any: The unserialized data.
"""
content_type = (bytes_to_str(content_type) if content_type
else 'application/data')
if accept is not None:
if content_type not in _trusted_content \
and content_type not in accept:
raise self._for_untrusted_content(content_type, 'untrusted')
else:
if content_type in self._disabled_content_types and not force:
raise self._for_untrusted_content(content_type, 'disabled')
content_encoding = (content_encoding or 'utf-8').lower()
if data:
decode = self._decoders.get(content_type)
if decode:
with _reraise_errors(DecodeError):
return decode(data)
if content_encoding not in SKIP_DECODE and \
not isinstance(data, str):
with _reraise_errors(DecodeError):
return _decode(data, content_encoding)
return data
def _for_untrusted_content(self, ctype, why):
return ContentDisallowed(
'Refusing to deserialize {} content of type {}'.format(
why,
parenthesize_alias(self.type_to_name.get(ctype, ctype), ctype),
),
)
#: Global registry of serializers/deserializers.
registry = SerializerRegistry()
dumps = registry.dumps
loads = registry.loads
register = registry.register
unregister = registry.unregister
def raw_encode(data):
"""Special case serializer."""
content_type = 'application/data'
payload = data
if isinstance(payload, str):
content_encoding = 'utf-8'
with _reraise_errors(EncodeError, exclude=()):
payload = payload.encode(content_encoding)
else:
content_encoding = 'binary'
return content_type, content_encoding, payload
def register_json():
"""Register a encoder/decoder for JSON serialization."""
from kombu.utils import json as _json
registry.register('json', _json.dumps, _json.loads,
content_type='application/json',
content_encoding='utf-8')
def register_yaml():
"""Register a encoder/decoder for YAML serialization.
It is slower than JSON, but allows for more data types
to be serialized. Useful if you need to send data such as dates
"""
try:
import yaml
registry.register('yaml', yaml.safe_dump, yaml.safe_load,
content_type='application/x-yaml',
content_encoding='utf-8')
except ImportError:
def not_available(*args, **kwargs):
"""Raise SerializerNotInstalled.
Used in case a client receives a yaml message, but yaml
isn't installed.
"""
raise SerializerNotInstalled(
'No decoder installed for YAML. Install the PyYAML library')
registry.register('yaml', None, not_available, 'application/x-yaml')
def unpickle(s):
return pickle_loads(str_to_bytes(s))
def register_pickle():
"""Register pickle serializer.
The fastest serialization method, but restricts
you to python clients.
"""
def pickle_dumps(obj, dumper=pickle.dumps):
return dumper(obj, protocol=pickle_protocol)
registry.register('pickle', pickle_dumps, unpickle,
content_type='application/x-python-serialize',
content_encoding='binary')
def register_msgpack():
"""Register msgpack serializer.
See Also:
https://msgpack.org/.
"""
pack = unpack = None
try:
import msgpack
if msgpack.version >= (0, 4):
from msgpack import packb, unpackb
def pack(s):
return packb(s, use_bin_type=True)
def unpack(s):
return unpackb(s, raw=False)
else:
def version_mismatch(*args, **kwargs):
raise SerializerNotInstalled(
'msgpack requires msgpack-python >= 0.4.0')
pack = unpack = version_mismatch
except (ImportError, ValueError):
def not_available(*args, **kwargs):
raise SerializerNotInstalled(
'No decoder installed for msgpack. '
'Please install the msgpack-python library')
pack = unpack = not_available
registry.register(
'msgpack', pack, unpack,
content_type='application/x-msgpack',
content_encoding='binary',
)
# Register the base serialization methods.
register_json()
register_pickle()
register_yaml()
register_msgpack()
# Default serializer is 'json'
registry._set_default_serializer('json')
_setupfuns = {
'json': register_json,
'pickle': register_pickle,
'yaml': register_yaml,
'msgpack': register_msgpack,
'application/json': register_json,
'application/x-yaml': register_yaml,
'application/x-python-serialize': register_pickle,
'application/x-msgpack': register_msgpack,
}
NOTSET = object()
def enable_insecure_serializers(choices=NOTSET):
"""Enable serializers that are considered to be unsafe.
Note:
Will enable ``pickle``, ``yaml`` and ``msgpack`` by default, but you
can also specify a list of serializers (by name or content type)
to enable.
"""
choices = ['pickle', 'yaml', 'msgpack'] if choices is NOTSET else choices
if choices is not None:
for choice in choices:
try:
registry.enable(choice)
except KeyError:
pass
def disable_insecure_serializers(allowed=NOTSET):
"""Disable untrusted serializers.
Will disable all serializers except ``json``
or you can specify a list of deserializers to allow.
Note:
Producers will still be able to serialize data
in these formats, but consumers will not accept
incoming data using the untrusted content types.
"""
allowed = ['json'] if allowed is NOTSET else allowed
for name in registry._decoders:
registry.disable(name)
if allowed is not None:
for name in allowed:
registry.enable(name)
# Insecure serializers are disabled by default since v3.0
disable_insecure_serializers()
# Load entrypoints from installed extensions
for ep, args in entrypoints('kombu.serializers'): # pragma: no cover
register(ep.name, *args)
def prepare_accept_content(content_types, name_to_type=None):
name_to_type = registry.name_to_type if not name_to_type else name_to_type
if content_types is not None:
return {n if '/' in n else name_to_type[n] for n in content_types}
return content_types
|
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.interface import *
class IFace1(Interface): pass
class IFace2(Interface): pass
class IFace3(Interface): pass
class A(object):
__implements__ = (IFace1,)
class B(A): pass
class C1(B):
__implements__ = list(B.__implements__) + [IFace3]
class C2(B):
__implements__ = B.__implements__ + (IFace2,)
class D(C1):
__implements__ = ()
class Z(object): pass
class ExtendTC(TestCase):
def setUp(self):
global aimpl, c1impl, c2impl, dimpl
aimpl = A.__implements__
c1impl = C1.__implements__
c2impl = C2.__implements__
dimpl = D.__implements__
def test_base(self):
extend(A, IFace2)
self.assertEqual(A.__implements__, (IFace1, IFace2))
self.assertEqual(B.__implements__, (IFace1, IFace2))
self.assertTrue(B.__implements__ is A.__implements__)
self.assertEqual(C1.__implements__, [IFace1, IFace3, IFace2])
self.assertEqual(C2.__implements__, (IFace1, IFace2))
self.assertTrue(C2.__implements__ is c2impl)
self.assertEqual(D.__implements__, (IFace2,))
def test_already_impl(self):
extend(A, IFace1)
self.assertTrue(A.__implements__ is aimpl)
def test_no_impl(self):
extend(Z, IFace1)
self.assertEqual(Z.__implements__, (IFace1,))
def test_notimpl_explicit(self):
extend(C1, IFace3)
self.assertTrue(C1.__implements__ is c1impl)
self.assertTrue(D.__implements__ is dimpl)
def test_nonregr_implements_baseinterface(self):
class SubIFace(IFace1): pass
class X(object):
__implements__ = (SubIFace,)
self.assertTrue(SubIFace.is_implemented_by(X))
self.assertTrue(IFace1.is_implemented_by(X))
if __name__ == '__main__':
unittest_main()
|
import asyncio
import os
import shutil
import time
import a_sync
import mock
import requests
from itest_utils import cleanup_file
from itest_utils import clear_mesos_tools_cache
from itest_utils import get_service_connection_string
from itest_utils import setup_mesos_cli_config
from itest_utils import wait_for_marathon
from kazoo.client import KazooClient
from kazoo.exceptions import NoNodeError
from paasta_tools import marathon_tools
from paasta_tools import mesos_tools
from paasta_tools.mesos_maintenance import load_credentials
from paasta_tools.mesos_maintenance import undrain
def before_all(context):
context.cluster = "testcluster"
context.mesos_cli_config = os.path.join(os.getcwd(), "mesos-cli.json")
wait_for_marathon()
setup_mesos_cli_config(context.mesos_cli_config, context.cluster)
def after_all(context):
cleanup_file(context.mesos_cli_config)
def _stop_deployd(context):
if hasattr(context, "daemon"):
print("Stopping deployd...")
try:
context.daemon.terminate()
context.daemon.wait()
except OSError:
pass
def _clean_up_marathon_apps(context):
"""If a marathon client object exists in our context, delete any apps in Marathon and wait until they die."""
if hasattr(context, "marathon_clients"):
still_apps = True
while still_apps:
still_apps = False
for client in context.marathon_clients.get_all_clients():
apps = marathon_tools.list_all_marathon_app_ids(client)
if apps:
still_apps = True
else:
continue
print(
"after_scenario: Deleting %d apps to prep for the next scenario. %s"
% (len(apps), ",".join(apps))
)
for app in apps:
if marathon_tools.is_app_id_running(app, client):
print(
"after_scenario: %s does look like it is running. Scaling down and killing it..."
% app
)
client.scale_app(app, instances=0, force=True)
time.sleep(1)
client.delete_app(app, force=True)
else:
print(
"after_scenario: %s showed up in the app_list, but doesn't look like it is running?"
% app
)
time.sleep(0.5)
for client in context.marathon_clients.get_all_clients():
while client.list_deployments():
print(
"after_scenario: There are still marathon deployments in progress. sleeping."
)
time.sleep(0.5)
def _clean_up_mesos_cli_config(context):
"""If a mesos cli config file was written, clean it up."""
if hasattr(context, "mesos_cli_config_filename"):
print("Cleaning up %s" % context.mesos_cli_config_filename)
os.unlink(context.mesos_cli_config_filename)
del context.mesos_cli_config_filename
def _clean_up_soa_dir(context):
"""If a yelpsoa-configs directory was written, clean it up."""
if hasattr(context, "soa_dir"):
print("Cleaning up %s" % context.soa_dir)
shutil.rmtree(context.soa_dir)
del context.soa_dir
def _clean_up_etc_paasta(context):
if hasattr(context, "etc_paasta"):
print("Cleaning up %s" % context.etc_paasta)
shutil.rmtree(context.etc_paasta)
del context.etc_paasta
def _clean_up_zookeeper_autoscaling(context):
"""If max_instances was set for autoscaling, clean up zookeeper"""
if "max_instances" in context:
client = KazooClient(
hosts="%s/mesos-testcluster" % get_service_connection_string("zookeeper"),
read_only=True,
)
client.start()
try:
client.delete("/autoscaling", recursive=True)
except NoNodeError:
pass
client.stop()
client.close()
def _clean_up_paasta_native_frameworks(context):
clear_mesos_tools_cache()
# context.etc_paasta signals that we actually have configured the mesos-cli.json; without this, we don't know where
# to connect to clean up paasta native frameworks.
if hasattr(context, "etc_paasta"):
for framework in a_sync.block(
mesos_tools.get_mesos_master().frameworks, active_only=True
):
if framework.name.startswith("paasta_native ") or framework.name == getattr(
context, "framework_name", ""
):
print("cleaning up framework %s" % framework.name)
try:
mesos_tools.terminate_framework(framework.id)
except requests.exceptions.HTTPError as e:
print(
f"Got exception when terminating framework {framework.id}: {e}"
)
def _clean_up_maintenance(context):
"""If a host is marked as draining/down for maintenance, bring it back up"""
if hasattr(context, "at_risk_host"):
with mock.patch(
"paasta_tools.mesos_maintenance.get_principal", autospec=True
) as mock_get_principal, mock.patch(
"paasta_tools.mesos_maintenance.get_secret", autospec=True
) as mock_get_secret:
credentials = load_credentials(mesos_secrets="/etc/mesos-slave-secret")
mock_get_principal.return_value = credentials.principal
mock_get_secret.return_value = credentials.secret
undrain([context.at_risk_host])
del context.at_risk_host
def _clean_up_current_client(context):
if hasattr(context, "current_client"):
del context.current_client
def _clean_up_event_loop(context):
if hasattr(context, "event_loop"):
del context.event_loop
def after_scenario(context, scenario):
_stop_deployd(context)
_clean_up_marathon_apps(context)
_clean_up_maintenance(context)
_clean_up_mesos_cli_config(context)
_clean_up_soa_dir(context)
_clean_up_zookeeper_autoscaling(context)
_clean_up_maintenance(context)
_clean_up_paasta_native_frameworks(
context
) # this must come before _clean_up_etc_paasta
_clean_up_etc_paasta(context)
_clean_up_current_client(context)
_clean_up_event_loop(context)
def before_feature(context, feature):
if "skip" in feature.tags:
feature.skip("Marked with @skip")
return
def before_scenario(context, scenario):
context.event_loop = asyncio.get_event_loop()
if "skip" in scenario.effective_tags:
scenario.skip("Marked with @skip")
return
|
import math
import typing
import keras
import numpy as np
import pandas as pd
import matchzoo as mz
from matchzoo.data_generator.callbacks import Callback
class DataGenerator(keras.utils.Sequence):
"""
Data Generator.
Used to divide a :class:`matchzoo.DataPack` into batches. This is helpful
for generating batch-wise features and delaying data preprocessing to the
`fit` time.
See `tutorials/data_handling.ipynb` for a walkthrough.
:param data_pack: DataPack to generator data from.
:param mode: One of "point", "pair", and "list". (default: "point")
:param num_dup: Number of duplications per instance, only effective when
`mode` is "pair". (default: 1)
:param num_neg: Number of negative samples per instance, only effective
when `mode` is "pair". (default: 1)
:param resample: Either to resample for each epoch, only effective when
`mode` is "pair". (default: `True`)
:param batch_size: Batch size. (default: 128)
:param shuffle: Either to shuffle the samples/instances. (default: `True`)
:param callbacks: Callbacks. See `matchzoo.data_generator.callbacks` for
more details.
Examples::
>>> import numpy as np
>>> import matchzoo as mz
>>> np.random.seed(0)
>>> data_pack = mz.datasets.toy.load_data()
>>> batch_size = 8
To generate data points:
>>> point_gen = mz.DataGenerator(
... data_pack=data_pack,
... batch_size=batch_size
... )
>>> len(point_gen)
13
>>> x, y = point_gen[0]
>>> for key, value in sorted(x.items()):
... print(key, str(value)[:30])
id_left ['Q6' 'Q17' 'Q1' 'Q13' 'Q16' '
id_right ['D6-6' 'D17-1' 'D1-2' 'D13-3'
text_left ['how long is the term for fed
text_right ['See Article I and Article II
To generate data pairs:
>>> pair_gen = mz.DataGenerator(
... data_pack=data_pack,
... mode='pair',
... num_dup=4,
... num_neg=4,
... batch_size=batch_size,
... shuffle=False
... )
>>> len(pair_gen)
3
>>> x, y = pair_gen[0]
>>> for key, value in sorted(x.items()):
... print(key, str(value)[:30])
id_left ['Q1' 'Q1' 'Q1' 'Q1' 'Q1' 'Q1'
id_right ['D1-3' 'D1-4' 'D1-0' 'D1-1' '
text_left ['how are glacier caves formed
text_right ['A glacier cave is a cave for
To generate data lists:
# TODO:
"""
def __init__(
self,
data_pack: mz.DataPack,
mode='point',
num_dup: int = 1,
num_neg: int = 1,
resample: bool = True,
batch_size: int = 128,
shuffle: bool = True,
callbacks: typing.List[Callback] = None
):
"""Init."""
if callbacks is None:
callbacks = []
if mode not in ('point', 'pair', 'list'):
raise ValueError(f"{mode} is not a valid mode type."
f"Must be one of `point`, `pair` or `list`.")
self._mode = mode
self._num_dup = num_dup
self._num_neg = num_neg
self._batch_size = batch_size
self._shuffle = shuffle
self._resample = resample
self._orig_relation = data_pack.relation
self._callbacks = callbacks
if mode == 'pair':
data_pack.relation = self._reorganize_pair_wise(
data_pack.relation,
num_dup=num_dup,
num_neg=num_neg
)
self._data_pack = data_pack
self._batch_indices = None
self.reset_index()
def __getitem__(self, item: int) -> typing.Tuple[dict, np.ndarray]:
"""Get a batch from index idx.
:param item: the index of the batch.
"""
if isinstance(item, slice):
indices = sum(self._batch_indices[item], [])
else:
indices = self._batch_indices[item]
batch_data_pack = self._data_pack[indices]
self._handle_callbacks_on_batch_data_pack(batch_data_pack)
x, y = batch_data_pack.unpack()
self._handle_callbacks_on_batch_unpacked(x, y)
return x, y
def __len__(self) -> int:
"""Get the total number of batches."""
return len(self._batch_indices)
def on_epoch_end(self):
"""Reorganize the index array while epoch is ended."""
if self._mode == 'pair' and self._resample:
self._data_pack.relation = self._reorganize_pair_wise(
relation=self._orig_relation,
num_dup=self._num_dup,
num_neg=self._num_neg
)
self.reset_index()
def reset_index(self):
"""
Set the :attr:`index_array`.
Here the :attr:`index_array` records the index of all the instances.
"""
# index pool: index -> instance index
if self._mode == 'point':
num_instances = len(self._data_pack)
index_pool = list(range(num_instances))
elif self._mode == 'pair':
index_pool = []
step_size = self._num_neg + 1
num_instances = int(len(self._data_pack) / step_size)
for i in range(num_instances):
lower = i * step_size
upper = (i + 1) * step_size
indices = list(range(lower, upper))
if indices:
index_pool.append(indices)
elif self._mode == 'list':
raise NotImplementedError(
f'{self._mode} data generator not implemented.')
else:
raise ValueError(f"{self._mode} is not a valid mode type"
f"Must be one of `point`, `pair` or `list`.")
if self._shuffle:
np.random.shuffle(index_pool)
# batch_indices: index -> batch of indices
self._batch_indices = []
for i in range(math.ceil(num_instances / self._batch_size)):
lower = self._batch_size * i
upper = self._batch_size * (i + 1)
candidates = index_pool[lower:upper]
if self._mode == 'pair':
candidates = sum(candidates, [])
if candidates:
self._batch_indices.append(candidates)
def _handle_callbacks_on_batch_data_pack(self, batch_data_pack):
for callback in self._callbacks:
callback.on_batch_data_pack(batch_data_pack)
def _handle_callbacks_on_batch_unpacked(self, x, y):
for callback in self._callbacks:
callback.on_batch_unpacked(x, y)
@property
def callbacks(self):
"""`callbacks` getter."""
return self._callbacks
@callbacks.setter
def callbacks(self, value):
"""`callbacks` setter."""
self._callbacks = value
@property
def num_neg(self):
"""`num_neg` getter."""
return self._num_neg
@num_neg.setter
def num_neg(self, value):
"""`num_neg` setter."""
self._num_neg = value
self.reset_index()
@property
def num_dup(self):
"""`num_dup` getter."""
return self._num_dup
@num_dup.setter
def num_dup(self, value):
"""`num_dup` setter."""
self._num_dup = value
self.reset_index()
@property
def mode(self):
"""`mode` getter."""
return self._mode
@mode.setter
def mode(self, value):
"""`mode` setter."""
self._mode = value
self.reset_index()
@property
def batch_size(self):
"""`batch_size` getter."""
return self._batch_size
@batch_size.setter
def batch_size(self, value):
"""`batch_size` setter."""
self._batch_size = value
self.reset_index()
@property
def shuffle(self):
"""`shuffle` getter."""
return self._shuffle
@shuffle.setter
def shuffle(self, value):
"""`shuffle` setter."""
self._shuffle = value
self.reset_index()
@property
def batch_indices(self):
"""`batch_indices` getter."""
return self._batch_indices
@classmethod
def _reorganize_pair_wise(
cls,
relation: pd.DataFrame,
num_dup: int = 1,
num_neg: int = 1
):
"""Re-organize the data pack as pair-wise format."""
pairs = []
groups = relation.sort_values(
'label', ascending=False).groupby('id_left')
for idx, group in groups:
labels = group.label.unique()
for label in labels[:-1]:
pos_samples = group[group.label == label]
pos_samples = pd.concat([pos_samples] * num_dup)
neg_samples = group[group.label < label]
for _, pos_sample in pos_samples.iterrows():
pos_sample = pd.DataFrame([pos_sample])
neg_sample = neg_samples.sample(num_neg, replace=True)
pairs.extend((pos_sample, neg_sample))
new_relation = pd.concat(pairs, ignore_index=True)
return new_relation
|
import unittest
import mock
from kalliope.core import LifoManager
from kalliope.core.Models import Brain, Signal, Singleton
from kalliope.core.Models.MatchedSynapse import MatchedSynapse
from kalliope.core.Models.settings.Settings import Settings
from kalliope.core.SynapseLauncher import SynapseLauncher, SynapseNameNotFound
from kalliope.core.Models import Neuron
from kalliope.core.Models import Synapse
class TestSynapseLauncher(unittest.TestCase):
"""
Test the class SynapseLauncher
"""
def setUp(self):
# Init
neuron1 = Neuron(name='neurone1', parameters={'var1': 'val1'})
neuron2 = Neuron(name='neurone2', parameters={'var2': 'val2'})
neuron3 = Neuron(name='neurone3', parameters={'var3': 'val3'})
neuron4 = Neuron(name='neurone4', parameters={'var4': 'val4'})
signal1 = Signal(name="order", parameters="this is the sentence")
signal2 = Signal(name="order", parameters="this is the second sentence")
signal3 = Signal(name="order", parameters="that is part of the third sentence")
self.synapse1 = Synapse(name="Synapse1", neurons=[neuron1, neuron2], signals=[signal1])
self.synapse2 = Synapse(name="Synapse2", neurons=[neuron3, neuron4], signals=[signal2])
self.synapse3 = Synapse(name="Synapse3", neurons=[neuron2, neuron4], signals=[signal3])
self.synapse4 = Synapse(name="Synapse4", neurons=[neuron4], signals=[signal3])
self.all_synapse_list = [self.synapse1,
self.synapse2,
self.synapse3,
self.synapse4]
self.brain_test = Brain(synapses=self.all_synapse_list)
self.settings_test = Settings()
# clean the LiFO
Singleton._instances = dict()
LifoManager.clean_saved_lifo()
def test_start_synapse_by_list_name_single_synapse(self):
# existing synapse in the brain
with mock.patch("kalliope.core.Lifo.LIFOBuffer.execute"):
should_be_created_matched_synapse = MatchedSynapse(matched_synapse=self.synapse1)
SynapseLauncher.start_synapse_by_list_name(["Synapse1"], brain=self.brain_test)
# we expect that the lifo has been loaded with the synapse to run
expected_result = [[should_be_created_matched_synapse]]
lifo_buffer = LifoManager.get_singleton_lifo()
self.assertEqual(expected_result, lifo_buffer.lifo_list)
# we expect that the lifo has been loaded with the synapse to run and overwritten parameters
Singleton._instances = dict()
LifoManager.clean_saved_lifo()
lifo_buffer = LifoManager.get_singleton_lifo()
overriding_param = {
"val1": "val"
}
SynapseLauncher.start_synapse_by_list_name(["Synapse1"], brain=self.brain_test,
overriding_parameter_dict=overriding_param)
should_be_created_matched_synapse = MatchedSynapse(matched_synapse=self.synapse1,
overriding_parameter=overriding_param)
# we expect that the lifo has been loaded with the synapse to run
expected_result = [[should_be_created_matched_synapse]]
self.assertEqual(expected_result, lifo_buffer.lifo_list)
# non existing synapse in the brain
with self.assertRaises(SynapseNameNotFound):
SynapseLauncher.start_synapse_by_list_name(["not_existing"], brain=self.brain_test)
# check that the cortex is well loaded with temp parameter from a signal
with mock.patch("kalliope.core.Lifo.LIFOBuffer.execute"):
overriding_parameter_dict = {
"parameter1": "value1"
}
with mock.patch("kalliope.core.Cortex.Cortex.add_parameters_from_order") as cortex_mock:
SynapseLauncher.start_synapse_by_list_name(["Synapse1"],
brain=self.brain_test,
overriding_parameter_dict=overriding_parameter_dict)
cortex_mock.assert_called_with(overriding_parameter_dict)
# check disable synapse is not run
self.synapse4.enabled = False
LifoManager.clean_saved_lifo()
with mock.patch("kalliope.core.Lifo.LIFOBuffer.execute"):
SynapseLauncher.start_synapse_by_list_name(["Synapse4"], brain=self.brain_test)
# we expect that the lifo has NOT been loaded with the disabled synapse
expected_result = [[]]
lifo_buffer = LifoManager.get_singleton_lifo()
self.assertEqual(expected_result, lifo_buffer.lifo_list)
def test_start_synapse_by_list_name(self):
# test to start a list of synapse
with mock.patch("kalliope.core.Lifo.LIFOBuffer.execute"):
created_matched_synapse1 = MatchedSynapse(matched_synapse=self.synapse1)
created_matched_synapse2 = MatchedSynapse(matched_synapse=self.synapse2)
expected_list_matched_synapse = [created_matched_synapse1, created_matched_synapse2]
SynapseLauncher.start_synapse_by_list_name(["Synapse1", "Synapse2"], brain=self.brain_test)
# we expect that the lifo has been loaded with the synapse to run
expected_result = [expected_list_matched_synapse]
lifo_buffer = LifoManager.get_singleton_lifo()
self.maxDiff = None
self.assertEqual(expected_result, lifo_buffer.lifo_list)
# empty list should return none
empty_list = list()
self.assertIsNone(SynapseLauncher.start_synapse_by_list_name(empty_list))
# test to start a synapse list with a new lifo
# we create a Lifo that is the current singleton
Singleton._instances = dict()
LifoManager.clean_saved_lifo()
lifo_buffer = LifoManager.get_singleton_lifo()
created_matched_synapse1 = MatchedSynapse(matched_synapse=self.synapse1)
lifo_buffer.lifo_list = [created_matched_synapse1]
# the current status of the singleton lifo should not move even after the call of SynapseLauncher
expected_result = [created_matched_synapse1]
# create a new call
with mock.patch("kalliope.core.Lifo.LIFOBuffer.execute"):
SynapseLauncher.start_synapse_by_list_name(["Synapse2", "Synapse3"],
brain=self.brain_test,
new_lifo=True)
# the current singleton should be the same
self.assertEqual(expected_result, lifo_buffer.lifo_list)
# test to start a synapse list with the singleton lifo
Singleton._instances = dict()
LifoManager.clean_saved_lifo()
lifo_buffer = LifoManager.get_singleton_lifo()
created_matched_synapse1 = MatchedSynapse(matched_synapse=self.synapse1)
# place a synapse in the singleton
lifo_buffer.lifo_list = [created_matched_synapse1]
# the current status of the singleton lifo should contain synapse launched in the next call
created_matched_synapse2 = MatchedSynapse(matched_synapse=self.synapse2)
created_matched_synapse3 = MatchedSynapse(matched_synapse=self.synapse3)
expected_result = [created_matched_synapse1, [created_matched_synapse2, created_matched_synapse3]]
with mock.patch("kalliope.core.Lifo.LIFOBuffer.execute"):
SynapseLauncher.start_synapse_by_list_name(["Synapse2", "Synapse3"],
brain=self.brain_test)
# the singleton should now contains the synapse that was already there and the 2 other synapses
self.assertEqual(expected_result, lifo_buffer.lifo_list)
def test_run_matching_synapse_from_order(self):
# ------------------
# test_match_synapse1
# ------------------
with mock.patch("kalliope.core.Lifo.LIFOBuffer.execute"):
order_to_match = "this is the sentence"
should_be_created_matched_synapse = MatchedSynapse(matched_synapse=self.synapse1,
user_order=order_to_match,
matched_order="this is the sentence")
expected_result = [[should_be_created_matched_synapse]]
SynapseLauncher.run_matching_synapse_from_order(order_to_match,
brain=self.brain_test,
settings=self.settings_test)
lifo_buffer = LifoManager.get_singleton_lifo()
self.assertEqual(expected_result, lifo_buffer.lifo_list)
# -------------------------
# test_match_synapse1_and_2
# -------------------------
# clean LIFO
Singleton._instances = dict()
LifoManager.clean_saved_lifo()
with mock.patch("kalliope.core.Lifo.LIFOBuffer.execute"):
order_to_match = "this is the second sentence"
should_be_created_matched_synapse1 = MatchedSynapse(matched_synapse=self.synapse1,
user_order=order_to_match,
matched_order="this is the sentence")
should_be_created_matched_synapse2 = MatchedSynapse(matched_synapse=self.synapse2,
user_order=order_to_match,
matched_order="this is the second sentence")
expected_result = [[should_be_created_matched_synapse1, should_be_created_matched_synapse2]]
SynapseLauncher.run_matching_synapse_from_order(order_to_match,
brain=self.brain_test,
settings=self.settings_test)
lifo_buffer = LifoManager.get_singleton_lifo()
self.assertEqual(expected_result, lifo_buffer.lifo_list)
# -------------------------
# test_call_hook_order_not_found
# -------------------------
# clean LIFO
Singleton._instances = dict()
LifoManager.clean_saved_lifo()
with mock.patch("kalliope.core.HookManager.on_order_not_found") as mock_hook:
order_to_match = "not existing sentence"
SynapseLauncher.run_matching_synapse_from_order(order_to_match,
brain=self.brain_test,
settings=self.settings_test)
mock_hook.assert_called_with()
mock_hook.reset_mock()
# -------------------------
# test_call_hook_order_found
# -------------------------
# clean LIFO
Singleton._instances = dict()
with mock.patch("kalliope.core.Lifo.LIFOBuffer.execute"):
with mock.patch("kalliope.core.HookManager.on_order_found") as mock_hook:
order_to_match = "this is the second sentence"
new_settings = Settings()
SynapseLauncher.run_matching_synapse_from_order(order_to_match,
brain=self.brain_test,
settings=new_settings)
mock_hook.assert_called_with()
mock_hook.reset_mock()
if __name__ == '__main__':
unittest.main()
# suite = unittest.TestSuite()
# suite.addTest(TestSynapseLauncher("test_start_synapse_by_list_name"))
# runner = unittest.TextTestRunner()
# runner.run(suite)
|
import datetime
import json
import logging
import os
import re
from absl import flags
from perfkitbenchmarker import spark_service
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import gcp
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
class GcpDataproc(spark_service.BaseSparkService):
"""Object representing a GCP Dataproc cluster.
Attributes:
cluster_id: ID of the cluster.
project: ID of the project.
"""
CLOUD = gcp.CLOUD
SERVICE_NAME = 'dataproc'
def __init__(self, spark_service_spec):
super(GcpDataproc, self).__init__(spark_service_spec)
self.project = self.spec.master_group.vm_spec.project
self.region = self.zone.rsplit('-', 1)[0]
@staticmethod
def _ParseTime(state_time):
"""Parses time from json output.
Args:
state_time: string. the state start time.
Returns:
datetime.
"""
try:
return datetime.datetime.strptime(state_time, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
return datetime.datetime.strptime(state_time, '%Y-%m-%dT%H:%M:%SZ')
@staticmethod
def _GetStats(stdout):
results = json.loads(stdout)
stats = {}
done_time = GcpDataproc._ParseTime(results['status']['stateStartTime'])
pending_time = None
start_time = None
for state in results['statusHistory']:
if state['state'] == 'PENDING':
pending_time = GcpDataproc._ParseTime(state['stateStartTime'])
elif state['state'] == 'RUNNING':
start_time = GcpDataproc._ParseTime(state['stateStartTime'])
if done_time and start_time:
stats[spark_service.RUNTIME] = (done_time - start_time).total_seconds()
if start_time and pending_time:
stats[spark_service.WAITING] = (
(start_time - pending_time).total_seconds())
return stats
def DataprocGcloudCommand(self, *args):
all_args = ('dataproc',) + args
cmd = util.GcloudCommand(self, *all_args)
cmd.flags['region'] = self.region
return cmd
def _Create(self):
"""Creates the cluster."""
if self.cluster_id is None:
self.cluster_id = 'pkb-' + FLAGS.run_uri
cmd = self.DataprocGcloudCommand('clusters', 'create', self.cluster_id)
if self.project is not None:
cmd.flags['project'] = self.project
cmd.flags['num-workers'] = self.spec.worker_group.vm_count
for group_type, group_spec in [
('worker', self.spec.worker_group),
('master', self.spec.master_group)]:
flag_name = group_type + '-machine-type'
cmd.flags[flag_name] = group_spec.vm_spec.machine_type
if group_spec.vm_spec.num_local_ssds:
ssd_flag = 'num-{0}-local-ssds'.format(group_type)
cmd.flags[ssd_flag] = group_spec.vm_spec.num_local_ssds
if group_spec.vm_spec.boot_disk_size:
disk_flag = group_type + '-boot-disk-size'
cmd.flags[disk_flag] = group_spec.vm_spec.boot_disk_size
if group_spec.vm_spec.boot_disk_type:
disk_flag = group_type + '-boot-disk-type'
cmd.flags[disk_flag] = group_spec.vm_spec.boot_disk_type
if FLAGS.gcp_dataproc_subnet:
cmd.flags['subnet'] = FLAGS.gcp_dataproc_subnet
cmd.additional_flags.append('--no-address')
if FLAGS.gcp_dataproc_property:
cmd.flags['properties'] = ','.join(FLAGS.gcp_dataproc_property)
if FLAGS.gcp_dataproc_image:
cmd.flags['image'] = FLAGS.gcp_dataproc_image
cmd.flags['metadata'] = util.MakeFormattedDefaultTags()
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
cmd.Issue()
def _Delete(self):
"""Deletes the cluster."""
cmd = self.DataprocGcloudCommand('clusters', 'delete', self.cluster_id)
# If we don't put this here, zone is automatically added, which
# breaks the dataproc clusters delete
cmd.flags['zone'] = []
cmd.Issue(raise_on_failure=False)
def _Exists(self):
"""Check to see whether the cluster exists."""
cmd = self.DataprocGcloudCommand('clusters', 'describe', self.cluster_id)
# If we don't put this here, zone is automatically added to
# the command, which breaks dataproc clusters describe
cmd.flags['zone'] = []
_, _, retcode = cmd.Issue(raise_on_failure=False)
return retcode == 0
def SubmitJob(self, jarfile, classname, job_script=None,
job_poll_interval=None,
job_arguments=None, job_stdout_file=None,
job_type=spark_service.SPARK_JOB_TYPE):
cmd = self.DataprocGcloudCommand('jobs', 'submit', job_type)
cmd.flags['cluster'] = self.cluster_id
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
# If we don't put this here, zone is auotmatically added to the command
# which breaks dataproc jobs submit
cmd.flags['zone'] = []
cmd.additional_flags = []
if classname and jarfile:
cmd.flags['jars'] = jarfile
cmd.flags['class'] = classname
elif jarfile:
cmd.flags['jar'] = jarfile
elif job_script:
cmd.additional_flags += [job_script]
# Dataproc gives as stdout an object describing job execution.
# Its stderr contains a mix of the stderr of the job, and the
# stdout of the job. We can set the driver log level to FATAL
# to suppress those messages, and we can then separate, hopefully
# the job standard out from the log messages.
cmd.flags['driver-log-levels'] = 'root={}'.format(
FLAGS.spark_service_log_level)
if job_arguments:
cmd.additional_flags += ['--'] + job_arguments
stdout, stderr, retcode = cmd.Issue(timeout=None, raise_on_failure=False)
if retcode != 0:
return {spark_service.SUCCESS: False}
stats = self._GetStats(stdout)
stats[spark_service.SUCCESS] = True
if job_stdout_file:
with open(job_stdout_file, 'w') as f:
lines = stderr.splitlines(True)
if (not re.match(r'Job \[.*\] submitted.', lines[0]) or
not re.match(r'Waiting for job output...', lines[1])):
raise Exception('Dataproc output in unexpected format.')
i = 2
if job_type == spark_service.SPARK_JOB_TYPE:
if not re.match(r'\r', lines[i]):
raise Exception('Dataproc output in unexpected format.')
i += 1
# Eat these status lines. They end in \r, so they overwrite
# themselves at the console or when you cat a file. But they
# are part of this string.
while re.match(r'\[Stage \d+:', lines[i]):
i += 1
if not re.match(r' *\r$', lines[i]):
raise Exception('Dataproc output in unexpected format.')
while i < len(lines) and not re.match(r'Job \[.*\]', lines[i]):
f.write(lines[i])
i += 1
if i != len(lines) - 1:
raise Exception('Dataproc output in unexpected format.')
return stats
def ExecuteOnMaster(self, script_path, script_args):
master_name = self.cluster_id + '-m'
script_name = os.path.basename(script_path)
if FLAGS.gcp_internal_ip:
scp_cmd = ['gcloud', 'beta', 'compute', 'scp', '--internal-ip']
else:
scp_cmd = ['gcloud', 'compute', 'scp']
scp_cmd += ['--zone', self.GetZone(), '--quiet', script_path,
'pkb@' + master_name + ':/tmp/' + script_name]
vm_util.IssueCommand(scp_cmd, force_info_log=True)
ssh_cmd = ['gcloud', 'compute', 'ssh']
if FLAGS.gcp_internal_ip:
ssh_cmd += ['--internal-ip']
ssh_cmd += ['--zone=' + self.GetZone(), '--quiet',
'pkb@' + master_name, '--',
'chmod +x /tmp/' + script_name + '; sudo /tmp/' + script_name
+ ' ' + ' '.join(script_args)]
vm_util.IssueCommand(ssh_cmd, force_info_log=True)
def CopyFromMaster(self, remote_path, local_path):
master_name = self.cluster_id + '-m'
if FLAGS.gcp_internal_ip:
scp_cmd = ['gcloud', 'beta', 'compute', 'scp', '--internal-ip']
else:
scp_cmd = ['gcloud', 'compute', 'scp']
scp_cmd += ['--zone=' + self.GetZone(), '--quiet',
'pkb@' + master_name + ':' +
remote_path, local_path]
vm_util.IssueCommand(scp_cmd, force_info_log=True)
def SetClusterProperty(self):
pass
def GetMetadata(self):
basic_data = super(GcpDataproc, self).GetMetadata()
if self.spec.worker_group.vm_spec.num_local_ssds:
basic_data.update(
{'ssd_count': str(self.spec.worker_group.vm_spec.num_local_ssds)})
return basic_data
def GetZone(self):
cmd = self.DataprocGcloudCommand('clusters', 'describe', self.cluster_id)
cmd.flags['zone'] = []
cmd.flags['format'] = ['value(config.gceClusterConfig.zoneUri)']
r = cmd.Issue()
logging.info(r)
zone = r[0].strip().split('/')[-1]
logging.info(zone)
return zone
|
import unittest
import pandas as pd
import numpy as np
from pgmpy.estimators import BaseEstimator
class TestBaseEstimator(unittest.TestCase):
def setUp(self):
self.d1 = pd.DataFrame(
data={"A": [0, 0, 1], "B": [0, 1, 0], "C": [1, 1, 0], "D": ["X", "Y", "Z"]}
)
self.d2 = pd.DataFrame(
data={
"A": [0, np.NaN, 1],
"B": [0, 1, 0],
"C": [1, 1, np.NaN],
"D": [np.NaN, "Y", np.NaN],
}
)
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv"
)
def test_state_count(self):
e = BaseEstimator(self.d1)
self.assertEqual(e.state_counts("A").values.tolist(), [[2], [1]])
self.assertEqual(
e.state_counts("C", ["A", "B"]).values.tolist(),
[[0.0, 0.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0]],
)
def test_missing_data(self):
e = BaseEstimator(
self.d2, state_names={"C": [0, 1]}, complete_samples_only=False
)
self.assertEqual(
e.state_counts("A", complete_samples_only=True).values.tolist(), [[0], [0]]
)
self.assertEqual(e.state_counts("A").values.tolist(), [[1], [1]])
self.assertEqual(
e.state_counts(
"C", parents=["A", "B"], complete_samples_only=True
).values.tolist(),
[[0, 0, 0, 0], [0, 0, 0, 0]],
)
self.assertEqual(
e.state_counts("C", parents=["A", "B"]).values.tolist(),
[[0, 0, 0, 0], [1, 0, 0, 0]],
)
def tearDown(self):
del self.d1
|
import asyncio
from homeassistant.auth import auth_store
from tests.async_mock import patch
async def test_loading_no_group_data_format(hass, hass_storage):
"""Test we correctly load old data without any groups."""
hass_storage[auth_store.STORAGE_KEY] = {
"version": 1,
"data": {
"credentials": [],
"users": [
{
"id": "user-id",
"is_active": True,
"is_owner": True,
"name": "Paulus",
"system_generated": False,
},
{
"id": "system-id",
"is_active": True,
"is_owner": True,
"name": "Hass.io",
"system_generated": True,
},
],
"refresh_tokens": [
{
"access_token_expiration": 1800.0,
"client_id": "http://localhost:8123/",
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "user-token-id",
"jwt_key": "some-key",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "user-id",
},
{
"access_token_expiration": 1800.0,
"client_id": None,
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "system-token-id",
"jwt_key": "some-key",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "system-id",
},
{
"access_token_expiration": 1800.0,
"client_id": "http://localhost:8123/",
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "hidden-because-no-jwt-id",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "user-id",
},
],
},
}
store = auth_store.AuthStore(hass)
groups = await store.async_get_groups()
assert len(groups) == 3
admin_group = groups[0]
assert admin_group.name == auth_store.GROUP_NAME_ADMIN
assert admin_group.system_generated
assert admin_group.id == auth_store.GROUP_ID_ADMIN
read_group = groups[1]
assert read_group.name == auth_store.GROUP_NAME_READ_ONLY
assert read_group.system_generated
assert read_group.id == auth_store.GROUP_ID_READ_ONLY
user_group = groups[2]
assert user_group.name == auth_store.GROUP_NAME_USER
assert user_group.system_generated
assert user_group.id == auth_store.GROUP_ID_USER
users = await store.async_get_users()
assert len(users) == 2
owner, system = users
assert owner.system_generated is False
assert owner.groups == [admin_group]
assert len(owner.refresh_tokens) == 1
owner_token = list(owner.refresh_tokens.values())[0]
assert owner_token.id == "user-token-id"
assert system.system_generated is True
assert system.groups == []
assert len(system.refresh_tokens) == 1
system_token = list(system.refresh_tokens.values())[0]
assert system_token.id == "system-token-id"
async def test_loading_all_access_group_data_format(hass, hass_storage):
"""Test we correctly load old data with single group."""
hass_storage[auth_store.STORAGE_KEY] = {
"version": 1,
"data": {
"credentials": [],
"users": [
{
"id": "user-id",
"is_active": True,
"is_owner": True,
"name": "Paulus",
"system_generated": False,
"group_ids": ["abcd-all-access"],
},
{
"id": "system-id",
"is_active": True,
"is_owner": True,
"name": "Hass.io",
"system_generated": True,
},
],
"groups": [{"id": "abcd-all-access", "name": "All Access"}],
"refresh_tokens": [
{
"access_token_expiration": 1800.0,
"client_id": "http://localhost:8123/",
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "user-token-id",
"jwt_key": "some-key",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "user-id",
},
{
"access_token_expiration": 1800.0,
"client_id": None,
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "system-token-id",
"jwt_key": "some-key",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "system-id",
},
{
"access_token_expiration": 1800.0,
"client_id": "http://localhost:8123/",
"created_at": "2018-10-03T13:43:19.774637+00:00",
"id": "hidden-because-no-jwt-id",
"last_used_at": "2018-10-03T13:43:19.774712+00:00",
"token": "some-token",
"user_id": "user-id",
},
],
},
}
store = auth_store.AuthStore(hass)
groups = await store.async_get_groups()
assert len(groups) == 3
admin_group = groups[0]
assert admin_group.name == auth_store.GROUP_NAME_ADMIN
assert admin_group.system_generated
assert admin_group.id == auth_store.GROUP_ID_ADMIN
read_group = groups[1]
assert read_group.name == auth_store.GROUP_NAME_READ_ONLY
assert read_group.system_generated
assert read_group.id == auth_store.GROUP_ID_READ_ONLY
user_group = groups[2]
assert user_group.name == auth_store.GROUP_NAME_USER
assert user_group.system_generated
assert user_group.id == auth_store.GROUP_ID_USER
users = await store.async_get_users()
assert len(users) == 2
owner, system = users
assert owner.system_generated is False
assert owner.groups == [admin_group]
assert len(owner.refresh_tokens) == 1
owner_token = list(owner.refresh_tokens.values())[0]
assert owner_token.id == "user-token-id"
assert system.system_generated is True
assert system.groups == []
assert len(system.refresh_tokens) == 1
system_token = list(system.refresh_tokens.values())[0]
assert system_token.id == "system-token-id"
async def test_loading_empty_data(hass, hass_storage):
"""Test we correctly load with no existing data."""
store = auth_store.AuthStore(hass)
groups = await store.async_get_groups()
assert len(groups) == 3
admin_group = groups[0]
assert admin_group.name == auth_store.GROUP_NAME_ADMIN
assert admin_group.system_generated
assert admin_group.id == auth_store.GROUP_ID_ADMIN
user_group = groups[1]
assert user_group.name == auth_store.GROUP_NAME_USER
assert user_group.system_generated
assert user_group.id == auth_store.GROUP_ID_USER
read_group = groups[2]
assert read_group.name == auth_store.GROUP_NAME_READ_ONLY
assert read_group.system_generated
assert read_group.id == auth_store.GROUP_ID_READ_ONLY
users = await store.async_get_users()
assert len(users) == 0
async def test_system_groups_store_id_and_name(hass, hass_storage):
"""Test that for system groups we store the ID and name.
Name is stored so that we remain backwards compat with < 0.82.
"""
store = auth_store.AuthStore(hass)
await store._async_load()
data = store._data_to_save()
assert len(data["users"]) == 0
assert data["groups"] == [
{"id": auth_store.GROUP_ID_ADMIN, "name": auth_store.GROUP_NAME_ADMIN},
{"id": auth_store.GROUP_ID_USER, "name": auth_store.GROUP_NAME_USER},
{"id": auth_store.GROUP_ID_READ_ONLY, "name": auth_store.GROUP_NAME_READ_ONLY},
]
async def test_loading_race_condition(hass):
"""Test only one storage load called when concurrent loading occurred ."""
store = auth_store.AuthStore(hass)
with patch(
"homeassistant.helpers.entity_registry.async_get_registry"
) as mock_ent_registry, patch(
"homeassistant.helpers.device_registry.async_get_registry"
) as mock_dev_registry, patch(
"homeassistant.helpers.storage.Store.async_load", return_value=None
) as mock_load:
results = await asyncio.gather(store.async_get_users(), store.async_get_users())
mock_ent_registry.assert_called_once_with(hass)
mock_dev_registry.assert_called_once_with(hass)
mock_load.assert_called_once_with()
assert results[0] == results[1]
|
import numpy as np
from scipy import linalg
from .base import BaseEstimator
from ..cuda import _setup_cuda_fft_multiply_repeated
from ..filter import next_fast_len
from ..fixes import jit
from ..parallel import check_n_jobs
from ..utils import warn, ProgressBar, logger
def _compute_corrs(X, y, smin, smax, n_jobs=1, fit_intercept=False,
edge_correction=True):
"""Compute auto- and cross-correlations."""
if fit_intercept:
# We could do this in the Fourier domain, too, but it should
# be a bit cleaner numerically to do it here.
X_offset = np.mean(X, axis=0)
y_offset = np.mean(y, axis=0)
if X.ndim == 3:
X_offset = X_offset.mean(axis=0)
y_offset = np.mean(y_offset, axis=0)
X = X - X_offset
y = y - y_offset
else:
X_offset = y_offset = 0.
if X.ndim == 2:
assert y.ndim == 2
X = X[:, np.newaxis, :]
y = y[:, np.newaxis, :]
assert X.shape[:2] == y.shape[:2]
len_trf = smax - smin
len_x, n_epochs, n_ch_x = X.shape
len_y, n_epcohs, n_ch_y = y.shape
assert len_x == len_y
n_fft = next_fast_len(2 * X.shape[0] - 1)
n_jobs, cuda_dict = _setup_cuda_fft_multiply_repeated(
n_jobs, [1.], n_fft, 'correlation calculations')
# create our Toeplitz indexer
ij = np.empty((len_trf, len_trf), int)
for ii in range(len_trf):
ij[ii, ii:] = np.arange(len_trf - ii)
x = np.arange(n_fft - 1, n_fft - len_trf + ii, -1)
ij[ii + 1:, ii] = x
x_xt = np.zeros([n_ch_x * len_trf] * 2)
x_y = np.zeros((len_trf, n_ch_x, n_ch_y), order='F')
n = n_epochs * (n_ch_x * (n_ch_x + 1) // 2 + n_ch_x)
logger.info('Fitting %d epochs, %d channels' % (n_epochs, n_ch_x))
pb = ProgressBar(n, mesg='Sample')
count = 0
pb.update(count)
for ei in range(n_epochs):
this_X = X[:, ei, :]
# XXX maybe this is what we should parallelize over CPUs at some point
X_fft = cuda_dict['rfft'](this_X, n=n_fft, axis=0)
X_fft_conj = X_fft.conj()
y_fft = cuda_dict['rfft'](y[:, ei, :], n=n_fft, axis=0)
for ch0 in range(n_ch_x):
for oi, ch1 in enumerate(range(ch0, n_ch_x)):
this_result = cuda_dict['irfft'](
X_fft[:, ch0] * X_fft_conj[:, ch1], n=n_fft, axis=0)
# Our autocorrelation structure is a Toeplitz matrix, but
# it's faster to create the Toeplitz ourselves than use
# linalg.toeplitz.
this_result = this_result[ij]
# However, we need to adjust for coeffs that are cut off,
# i.e. the non-zero delays should not have the same AC value
# as the zero-delay ones (because they actually have fewer
# coefficients).
#
# These adjustments also follow a Toeplitz structure, so we
# construct a matrix of what has been left off, compute their
# inner products, and remove them.
if edge_correction:
_edge_correct(this_result, this_X, smax, smin, ch0, ch1)
# Store the results in our output matrix
x_xt[ch0 * len_trf:(ch0 + 1) * len_trf,
ch1 * len_trf:(ch1 + 1) * len_trf] += this_result
if ch0 != ch1:
x_xt[ch1 * len_trf:(ch1 + 1) * len_trf,
ch0 * len_trf:(ch0 + 1) * len_trf] += this_result.T
count += 1
pb.update(count)
# compute the crosscorrelations
cc_temp = cuda_dict['irfft'](
y_fft * X_fft_conj[:, slice(ch0, ch0 + 1)], n=n_fft, axis=0)
if smin < 0 and smax >= 0:
x_y[:-smin, ch0] += cc_temp[smin:]
x_y[len_trf - smax:, ch0] += cc_temp[:smax]
else:
x_y[:, ch0] += cc_temp[smin:smax]
count += 1
pb.update(count)
x_y = np.reshape(x_y, (n_ch_x * len_trf, n_ch_y), order='F')
return x_xt, x_y, n_ch_x, X_offset, y_offset
@jit()
def _edge_correct(this_result, this_X, smax, smin, ch0, ch1):
if smax > 0:
tail = _toeplitz_dot(this_X[-1:-smax:-1, ch0],
this_X[-1:-smax:-1, ch1])
if smin > 0:
tail = tail[smin - 1:, smin - 1:]
this_result[max(-smin + 1, 0):, max(-smin + 1, 0):] -= tail
if smin < 0:
head = _toeplitz_dot(this_X[:-smin, ch0],
this_X[:-smin, ch1])[::-1, ::-1]
if smax < 0:
head = head[:smax, :smax]
this_result[:-smin, :-smin] -= head
@jit()
def _toeplitz_dot(a, b):
"""Create upper triangular Toeplitz matrices & compute the dot product."""
# This is equivalent to:
# a = linalg.toeplitz(a)
# b = linalg.toeplitz(b)
# a[np.triu_indices(len(a), 1)] = 0
# b[np.triu_indices(len(a), 1)] = 0
# out = np.dot(a.T, b)
assert a.shape == b.shape and a.ndim == 1
out = np.outer(a, b)
for ii in range(1, len(a)):
out[ii, ii:] += out[ii - 1, ii - 1:-1]
out[ii + 1:, ii] += out[ii:-1, ii - 1]
return out
def _compute_reg_neighbors(n_ch_x, n_delays, reg_type, method='direct',
normed=False):
"""Compute regularization parameter from neighbors."""
from scipy.sparse.csgraph import laplacian
known_types = ('ridge', 'laplacian')
if isinstance(reg_type, str):
reg_type = (reg_type,) * 2
if len(reg_type) != 2:
raise ValueError('reg_type must have two elements, got %s'
% (len(reg_type),))
for r in reg_type:
if r not in known_types:
raise ValueError('reg_type entries must be one of %s, got %s'
% (known_types, r))
reg_time = (reg_type[0] == 'laplacian' and n_delays > 1)
reg_chs = (reg_type[1] == 'laplacian' and n_ch_x > 1)
if not reg_time and not reg_chs:
return np.eye(n_ch_x * n_delays)
# regularize time
if reg_time:
reg = np.eye(n_delays)
stride = n_delays + 1
reg.flat[1::stride] += -1
reg.flat[n_delays::stride] += -1
reg.flat[n_delays + 1:-n_delays - 1:stride] += 1
args = [reg] * n_ch_x
reg = linalg.block_diag(*args)
else:
reg = np.zeros((n_delays * n_ch_x,) * 2)
# regularize features
if reg_chs:
block = n_delays * n_delays
row_offset = block * n_ch_x
stride = n_delays * n_ch_x + 1
reg.flat[n_delays:-row_offset:stride] += -1
reg.flat[n_delays + row_offset::stride] += 1
reg.flat[row_offset:-n_delays:stride] += -1
reg.flat[:-(n_delays + row_offset):stride] += 1
assert np.array_equal(reg[::-1, ::-1], reg)
if method == 'direct':
if normed:
norm = np.sqrt(np.diag(reg))
reg /= norm
reg /= norm[:, np.newaxis]
return reg
else:
# Use csgraph. Note that our -1's above are really the neighbors!
# If we ever want to allow arbitrary adjacency matrices, this is how
# we'd want to do it.
reg = laplacian(-reg, normed=normed)
return reg
def _fit_corrs(x_xt, x_y, n_ch_x, reg_type, alpha, n_ch_in):
"""Fit the model using correlation matrices."""
# do the regularized solving
n_ch_out = x_y.shape[1]
assert x_y.shape[0] % n_ch_x == 0
n_delays = x_y.shape[0] // n_ch_x
reg = _compute_reg_neighbors(n_ch_x, n_delays, reg_type)
mat = x_xt + alpha * reg
# From sklearn
try:
# Note: we must use overwrite_a=False in order to be able to
# use the fall-back solution below in case a LinAlgError
# is raised
w = linalg.solve(mat, x_y, sym_pos=True, overwrite_a=False)
except np.linalg.LinAlgError:
warn('Singular matrix in solving dual problem. Using '
'least-squares solution instead.')
w = linalg.lstsq(mat, x_y, lapack_driver='gelsy')[0]
w = w.T.reshape([n_ch_out, n_ch_in, n_delays])
return w
class TimeDelayingRidge(BaseEstimator):
"""Ridge regression of data with time delays.
Parameters
----------
tmin : int | float
The starting lag, in seconds (or samples if ``sfreq`` == 1).
Negative values correspond to times in the past.
tmax : int | float
The ending lag, in seconds (or samples if ``sfreq`` == 1).
Positive values correspond to times in the future.
Must be >= tmin.
sfreq : float
The sampling frequency used to convert times into samples.
alpha : float
The ridge (or laplacian) regularization factor.
reg_type : str | list
Can be "ridge" (default) or "laplacian".
Can also be a 2-element list specifying how to regularize in time
and across adjacent features.
fit_intercept : bool
If True (default), the sample mean is removed before fitting.
n_jobs : int | str
The number of jobs to use. Can be an int (default 1) or ``'cuda'``.
.. versionadded:: 0.18
edge_correction : bool
If True (default), correct the autocorrelation coefficients for
non-zero delays for the fact that fewer samples are available.
Disabling this speeds up performance at the cost of accuracy
depending on the relationship between epoch length and model
duration. Only used if ``estimator`` is float or None.
.. versionadded:: 0.18
See Also
--------
mne.decoding.ReceptiveField
Notes
-----
This class is meant to be used with :class:`mne.decoding.ReceptiveField`
by only implicitly doing the time delaying. For reasonable receptive
field and input signal sizes, it should be more CPU and memory
efficient by using frequency-domain methods (FFTs) to compute the
auto- and cross-correlations.
"""
_estimator_type = "regressor"
def __init__(self, tmin, tmax, sfreq, alpha=0., reg_type='ridge',
fit_intercept=True, n_jobs=1, edge_correction=True):
if tmin > tmax:
raise ValueError('tmin must be <= tmax, got %s and %s'
% (tmin, tmax))
self.tmin = float(tmin)
self.tmax = float(tmax)
self.sfreq = float(sfreq)
self.alpha = float(alpha)
self.reg_type = reg_type
self.fit_intercept = fit_intercept
self.edge_correction = edge_correction
self.n_jobs = n_jobs
@property
def _smin(self):
return int(round(self.tmin * self.sfreq))
@property
def _smax(self):
return int(round(self.tmax * self.sfreq)) + 1
def fit(self, X, y):
"""Estimate the coefficients of the linear model.
Parameters
----------
X : array, shape (n_samples[, n_epochs], n_features)
The training input samples to estimate the linear coefficients.
y : array, shape (n_samples[, n_epochs], n_outputs)
The target values.
Returns
-------
self : instance of TimeDelayingRidge
Returns the modified instance.
"""
if X.ndim == 3:
assert y.ndim == 3
assert X.shape[:2] == y.shape[:2]
else:
assert X.ndim == 2 and y.ndim == 2
assert X.shape[0] == y.shape[0]
n_jobs = check_n_jobs(self.n_jobs, allow_cuda=True)
# These are split into two functions because it's possible that we
# might want to allow people to do them separately (e.g., to test
# different regularization parameters).
self.cov_, x_y_, n_ch_x, X_offset, y_offset = _compute_corrs(
X, y, self._smin, self._smax, n_jobs, self.fit_intercept,
self.edge_correction)
self.coef_ = _fit_corrs(self.cov_, x_y_, n_ch_x,
self.reg_type, self.alpha, n_ch_x)
# This is the sklearn formula from LinearModel (will be 0. for no fit)
if self.fit_intercept:
self.intercept_ = y_offset - np.dot(X_offset, self.coef_.sum(-1).T)
else:
self.intercept_ = 0.
return self
def predict(self, X):
"""Predict the output.
Parameters
----------
X : array, shape (n_samples[, n_epochs], n_features)
The data.
Returns
-------
X : ndarray
The predicted response.
"""
if X.ndim == 2:
X = X[:, np.newaxis, :]
singleton = True
else:
singleton = False
out = np.zeros(X.shape[:2] + (self.coef_.shape[0],))
smin = self._smin
offset = max(smin, 0)
for ei in range(X.shape[1]):
for oi in range(self.coef_.shape[0]):
for fi in range(self.coef_.shape[1]):
temp = np.convolve(X[:, ei, fi], self.coef_[oi, fi])
temp = temp[max(-smin, 0):][:len(out) - offset]
out[offset:len(temp) + offset, ei, oi] += temp
out += self.intercept_
if singleton:
out = out[:, 0, :]
return out
|
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_FLASH,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.helpers import entity_platform
from . import UpbAttachedEntity
from .const import DOMAIN, UPB_BLINK_RATE_SCHEMA, UPB_BRIGHTNESS_RATE_SCHEMA
SERVICE_LIGHT_FADE_START = "light_fade_start"
SERVICE_LIGHT_FADE_STOP = "light_fade_stop"
SERVICE_LIGHT_BLINK = "light_blink"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the UPB light based on a config entry."""
upb = hass.data[DOMAIN][config_entry.entry_id]["upb"]
unique_id = config_entry.entry_id
async_add_entities(
UpbLight(upb.devices[dev], unique_id, upb) for dev in upb.devices
)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_LIGHT_FADE_START, UPB_BRIGHTNESS_RATE_SCHEMA, "async_light_fade_start"
)
platform.async_register_entity_service(
SERVICE_LIGHT_FADE_STOP, {}, "async_light_fade_stop"
)
platform.async_register_entity_service(
SERVICE_LIGHT_BLINK, UPB_BLINK_RATE_SCHEMA, "async_light_blink"
)
class UpbLight(UpbAttachedEntity, LightEntity):
"""Representation of an UPB Light."""
def __init__(self, element, unique_id, upb):
"""Initialize an UpbLight."""
super().__init__(element, unique_id, upb)
self._brightness = self._element.status
@property
def supported_features(self):
"""Flag supported features."""
if self._element.dimmable:
return SUPPORT_BRIGHTNESS | SUPPORT_TRANSITION | SUPPORT_FLASH
return SUPPORT_FLASH
@property
def brightness(self):
"""Get the brightness."""
return self._brightness
@property
def is_on(self) -> bool:
"""Get the current brightness."""
return self._brightness != 0
async def async_turn_on(self, **kwargs):
"""Turn on the light."""
flash = kwargs.get(ATTR_FLASH)
if flash:
await self.async_light_blink(0.5 if flash == "short" else 1.5)
else:
rate = kwargs.get(ATTR_TRANSITION, -1)
brightness = round(kwargs.get(ATTR_BRIGHTNESS, 255) / 2.55)
self._element.turn_on(brightness, rate)
async def async_turn_off(self, **kwargs):
"""Turn off the device."""
rate = kwargs.get(ATTR_TRANSITION, -1)
self._element.turn_off(rate)
async def async_light_fade_start(self, rate, brightness=None, brightness_pct=None):
"""Start dimming of device."""
if brightness is not None:
brightness_pct = round(brightness / 2.55)
self._element.fade_start(brightness_pct, rate)
async def async_light_fade_stop(self):
"""Stop dimming of device."""
self._element.fade_stop()
async def async_light_blink(self, blink_rate):
"""Request device to blink."""
blink_rate = int(blink_rate * 60) # Convert seconds to 60 hz pulses
self._element.blink(blink_rate)
async def async_update(self):
"""Request the device to update its status."""
self._element.update_status()
def _element_changed(self, element, changeset):
status = self._element.status
self._brightness = round(status * 2.55) if status else 0
|
from datetime import timedelta
import logging
from aiohue import AiohueException, Unauthorized
from aiohue.sensors import TYPE_ZLL_PRESENCE
import async_timeout
from homeassistant.core import callback
from homeassistant.helpers import debounce, entity
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import REQUEST_REFRESH_DELAY
from .helpers import remove_devices
from .hue_event import EVENT_CONFIG_MAP
from .sensor_device import GenericHueDevice
SENSOR_CONFIG_MAP = {}
_LOGGER = logging.getLogger(__name__)
def _device_id(aiohue_sensor):
# Work out the shared device ID, as described below
device_id = aiohue_sensor.uniqueid
if device_id and len(device_id) > 23:
device_id = device_id[:23]
return device_id
class SensorManager:
"""Class that handles registering and updating Hue sensor entities.
Intended to be a singleton.
"""
SCAN_INTERVAL = timedelta(seconds=5)
def __init__(self, bridge):
"""Initialize the sensor manager."""
self.bridge = bridge
self._component_add_entities = {}
self.current = {}
self.current_events = {}
self._enabled_platforms = ("binary_sensor", "sensor")
self.coordinator = DataUpdateCoordinator(
bridge.hass,
_LOGGER,
name="sensor",
update_method=self.async_update_data,
update_interval=self.SCAN_INTERVAL,
request_refresh_debouncer=debounce.Debouncer(
bridge.hass, _LOGGER, cooldown=REQUEST_REFRESH_DELAY, immediate=True
),
)
async def async_update_data(self):
"""Update sensor data."""
try:
with async_timeout.timeout(4):
return await self.bridge.async_request_call(
self.bridge.api.sensors.update
)
except Unauthorized as err:
await self.bridge.handle_unauthorized_error()
raise UpdateFailed("Unauthorized") from err
except AiohueException as err:
raise UpdateFailed(f"Hue error: {err}") from err
async def async_register_component(self, platform, async_add_entities):
"""Register async_add_entities methods for components."""
self._component_add_entities[platform] = async_add_entities
if len(self._component_add_entities) < len(self._enabled_platforms):
_LOGGER.debug("Aborting start with %s, waiting for the rest", platform)
return
# We have all components available, start the updating.
self.bridge.reset_jobs.append(
self.coordinator.async_add_listener(self.async_update_items)
)
await self.coordinator.async_refresh()
@callback
def async_update_items(self):
"""Update sensors from the bridge."""
api = self.bridge.api.sensors
if len(self._component_add_entities) < len(self._enabled_platforms):
return
to_add = {}
primary_sensor_devices = {}
current = self.current
# Physical Hue motion sensors present as three sensors in the API: a
# presence sensor, a temperature sensor, and a light level sensor. Of
# these, only the presence sensor is assigned the user-friendly name
# that the user has given to the device. Each of these sensors is
# linked by a common device_id, which is the first twenty-three
# characters of the unique id (then followed by a hyphen and an ID
# specific to the individual sensor).
#
# To set up neat values, and assign the sensor entities to the same
# device, we first, iterate over all the sensors and find the Hue
# presence sensors, then iterate over all the remaining sensors -
# finding the remaining ones that may or may not be related to the
# presence sensors.
for item_id in api:
if api[item_id].type != TYPE_ZLL_PRESENCE:
continue
primary_sensor_devices[_device_id(api[item_id])] = api[item_id]
# Iterate again now we have all the presence sensors, and add the
# related sensors with nice names where appropriate.
for item_id in api:
uniqueid = api[item_id].uniqueid
if current.get(uniqueid, self.current_events.get(uniqueid)) is not None:
continue
sensor_type = api[item_id].type
# Check for event generator devices
event_config = EVENT_CONFIG_MAP.get(sensor_type)
if event_config is not None:
base_name = api[item_id].name
name = event_config["name_format"].format(base_name)
new_event = event_config["class"](api[item_id], name, self.bridge)
self.bridge.hass.async_create_task(
new_event.async_update_device_registry()
)
self.current_events[uniqueid] = new_event
sensor_config = SENSOR_CONFIG_MAP.get(sensor_type)
if sensor_config is None:
continue
base_name = api[item_id].name
primary_sensor = primary_sensor_devices.get(_device_id(api[item_id]))
if primary_sensor is not None:
base_name = primary_sensor.name
name = sensor_config["name_format"].format(base_name)
current[uniqueid] = sensor_config["class"](
api[item_id], name, self.bridge, primary_sensor=primary_sensor
)
to_add.setdefault(sensor_config["platform"], []).append(current[uniqueid])
self.bridge.hass.async_create_task(
remove_devices(
self.bridge,
[value.uniqueid for value in api.values()],
current,
)
)
for platform in to_add:
self._component_add_entities[platform](to_add[platform])
class GenericHueSensor(GenericHueDevice, entity.Entity):
"""Representation of a Hue sensor."""
should_poll = False
async def _async_update_ha_state(self, *args, **kwargs):
raise NotImplementedError
@property
def available(self):
"""Return if sensor is available."""
return self.bridge.sensor_manager.coordinator.last_update_success and (
self.bridge.allow_unreachable
# remotes like Hue Tap (ZGPSwitchSensor) have no _reachability_
or self.sensor.config.get("reachable", True)
)
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(
self.bridge.sensor_manager.coordinator.async_add_listener(
self.async_write_ha_state
)
)
async def async_update(self):
"""Update the entity.
Only used by the generic entity update service.
"""
await self.bridge.sensor_manager.coordinator.async_request_refresh()
class GenericZLLSensor(GenericHueSensor):
"""Representation of a Hue-brand, physical sensor."""
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return {"battery_level": self.sensor.battery}
|
import logging
import gammu # pylint: disable=import-error, no-member
from gammu.asyncworker import ( # pylint: disable=import-error, no-member
GammuAsyncWorker,
)
from homeassistant.core import callback
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
class Gateway:
"""SMS gateway to interact with a GSM modem."""
def __init__(self, worker, hass):
"""Initialize the sms gateway."""
self._worker = worker
self._hass = hass
async def init_async(self):
"""Initialize the sms gateway asynchronously."""
try:
await self._worker.set_incoming_sms_async()
except gammu.ERR_NOTSUPPORTED:
_LOGGER.warning("Your phone does not support incoming SMS notifications!")
else:
await self._worker.set_incoming_callback_async(self.sms_callback)
def sms_callback(self, state_machine, callback_type, callback_data):
"""Receive notification about incoming event.
@param state_machine: state machine which invoked action
@type state_machine: gammu.StateMachine
@param callback_type: type of action, one of Call, SMS, CB, USSD
@type callback_type: string
@param data: event data
@type data: hash
"""
_LOGGER.debug(
"Received incoming event type:%s,data:%s", callback_type, callback_data
)
entries = self.get_and_delete_all_sms(state_machine)
_LOGGER.debug("SMS entries:%s", entries)
data = list()
for entry in entries:
decoded_entry = gammu.DecodeSMS(entry)
message = entry[0]
_LOGGER.debug("Processing sms:%s,decoded:%s", message, decoded_entry)
if decoded_entry is None:
text = message["Text"]
else:
text = ""
for inner_entry in decoded_entry["Entries"]:
if inner_entry["Buffer"] is not None:
text = text + inner_entry["Buffer"]
event_data = {
"phone": message["Number"],
"date": str(message["DateTime"]),
"message": text,
}
_LOGGER.debug("Append event data:%s", event_data)
data.append(event_data)
self._hass.add_job(self._notify_incoming_sms, data)
# pylint: disable=no-self-use
def get_and_delete_all_sms(self, state_machine, force=False):
"""Read and delete all SMS in the modem."""
# Read SMS memory status ...
memory = state_machine.GetSMSStatus()
# ... and calculate number of messages
remaining = memory["SIMUsed"] + memory["PhoneUsed"]
start_remaining = remaining
# Get all sms
start = True
entries = list()
all_parts = -1
all_parts_arrived = False
_LOGGER.debug("Start remaining:%i", start_remaining)
try:
while remaining > 0:
if start:
entry = state_machine.GetNextSMS(Folder=0, Start=True)
all_parts = entry[0]["UDH"]["AllParts"]
part_number = entry[0]["UDH"]["PartNumber"]
is_single_part = all_parts == 0
is_multi_part = 0 <= all_parts < start_remaining
_LOGGER.debug("All parts:%i", all_parts)
_LOGGER.debug("Part Number:%i", part_number)
_LOGGER.debug("Remaining:%i", remaining)
all_parts_arrived = is_multi_part or is_single_part
_LOGGER.debug("Start all_parts_arrived:%s", all_parts_arrived)
start = False
else:
entry = state_machine.GetNextSMS(
Folder=0, Location=entry[0]["Location"]
)
if all_parts_arrived or force:
remaining = remaining - 1
entries.append(entry)
# delete retrieved sms
_LOGGER.debug("Deleting message")
try:
state_machine.DeleteSMS(Folder=0, Location=entry[0]["Location"])
except gammu.ERR_MEMORY_NOT_AVAILABLE:
_LOGGER.error("Error deleting SMS, memory not available")
else:
_LOGGER.debug("Not all parts have arrived")
break
except gammu.ERR_EMPTY:
# error is raised if memory is empty (this induces wrong reported
# memory status)
_LOGGER.info("Failed to read messages!")
# Link all SMS when there are concatenated messages
entries = gammu.LinkSMS(entries)
return entries
@callback
def _notify_incoming_sms(self, messages):
"""Notify hass when an incoming SMS message is received."""
for message in messages:
event_data = {
"phone": message["phone"],
"date": message["date"],
"text": message["message"],
}
self._hass.bus.async_fire(f"{DOMAIN}.incoming_sms", event_data)
async def send_sms_async(self, message):
"""Send sms message via the worker."""
return await self._worker.send_sms_async(message)
async def get_imei_async(self):
"""Get the IMEI of the device."""
return await self._worker.get_imei_async()
async def get_signal_quality_async(self):
"""Get the current signal level of the modem."""
return await self._worker.get_signal_quality_async()
async def terminate_async(self):
"""Terminate modem connection."""
return await self._worker.terminate_async()
async def create_sms_gateway(config, hass):
"""Create the sms gateway."""
try:
worker = GammuAsyncWorker()
worker.configure(config)
await worker.init_async()
gateway = Gateway(worker, hass)
await gateway.init_async()
return gateway
except gammu.GSMError as exc: # pylint: disable=no-member
_LOGGER.error("Failed to initialize, error %s", exc)
return None
|
import unittest
from absl import flags
import mock
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import aws_cluster_parameter_group
from perfkitbenchmarker.providers.aws import util
from tests import pkb_common_test_case
TEST_RUN_URI = 'fakeru'
AWS_ZONE_US_EAST_1A = 'us-east-1a'
FLAGS = flags.FLAGS
class RedshiftClusterParameterGroupTestCase(
pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(RedshiftClusterParameterGroupTestCase, self).setUp()
FLAGS.zones = [AWS_ZONE_US_EAST_1A]
FLAGS.run_uri = TEST_RUN_URI
def testValidClusterParameterGroupCreation(self):
cpg = aws_cluster_parameter_group.RedshiftClusterParameterGroup(
1, list(util.AWS_PREFIX))
self.assertEqual(cpg.name, 'pkb-%s' % TEST_RUN_URI)
with mock.patch(
vm_util.__name__ + '.IssueCommand',
return_value=('out_', 'err_', 0)) as mock_issue:
cpg._Create()
self.assertEqual(mock_issue.call_count, 2)
mock_issue.assert_called_with([
'aws', '--output', 'json', 'redshift',
'modify-cluster-parameter-group', '--parameter-group-name',
'pkb-%s' % TEST_RUN_URI, '--parameters',
('[{"ParameterName":"wlm_json_configuration","ParameterValue":"'
'[{\\"query_concurrency\\":1}]","ApplyType":"dynamic"}]')
])
def testValidClusterParameterGroupDeletion(self):
cpg = aws_cluster_parameter_group.RedshiftClusterParameterGroup(
1, list(util.AWS_PREFIX))
self.assertEqual(cpg.name, 'pkb-%s' % TEST_RUN_URI)
with mock.patch(
vm_util.__name__ + '.IssueCommand',
return_value=('out_', 'err_', 0)) as mock_issue:
cpg._Delete()
mock_issue.assert_called_once()
mock_issue.assert_called_with([
'aws', '--output', 'json', 'redshift',
'delete-cluster-parameter-group', '--parameter-group-name',
'pkb-%s' % TEST_RUN_URI
], raise_on_failure=False)
if __name__ == '__main__':
unittest.main()
|
import pytest
from homeassistant.components.sentry import DOMAIN
from tests.common import MockConfigEntry
@pytest.fixture(name="config_entry")
def config_entry_fixture():
"""Create a mock config entry."""
return MockConfigEntry(domain=DOMAIN, title="Sentry")
@pytest.fixture(name="config")
def config_fixture():
"""Create hass config fixture."""
return {DOMAIN: {"dsn": "http://[email protected]/1"}}
|
import diamond.collector
from subprocess import Popen, PIPE
from re import compile as re_compile
import logging
node_re = re_compile('(?P<node>^node \d+ (free|size)): (?P<size>\d+) \MB')
class NumaCollector(diamond.collector.Collector):
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NumaCollector, self).get_default_config()
config.update({
'path': 'numa',
'bin': self.find_binary('numactl'),
})
return config
def collect(self):
p = Popen([self.config['bin'], '--hardware'], stdout=PIPE, stderr=PIPE)
output, errors = p.communicate()
lines = output.split('\n')
for line in lines:
try:
match = node_re.search(line)
if match:
logging.debug("Matched: %s %s" %
(match.group('node'), match.group('size')))
metric_name = "%s_MB" % match.group('node').replace(' ',
'_')
metric_value = int(match.group('size'))
logging.debug("Publishing %s %s" %
(metric_name, metric_value))
self.publish(metric_name, metric_value)
except Exception as e:
logging.error('Failed because: %s' % str(e))
continue
|
import asyncio
from ftplib import FTP, error_perm
import logging
from haffmpeg.camera import CameraMjpeg
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PATH,
CONF_PORT,
CONF_USERNAME,
)
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
_LOGGER = logging.getLogger(__name__)
DEFAULT_BRAND = "Xiaomi Home Camera"
DEFAULT_PATH = "/media/mmcblk0p1/record"
DEFAULT_PORT = 21
DEFAULT_USERNAME = "root"
DEFAULT_ARGUMENTS = "-pred 1"
CONF_FFMPEG_ARGUMENTS = "ffmpeg_arguments"
CONF_MODEL = "model"
MODEL_YI = "yi"
MODEL_XIAOFANG = "xiaofang"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_HOST): cv.template,
vol.Required(CONF_MODEL): vol.Any(MODEL_YI, MODEL_XIAOFANG),
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PATH, default=DEFAULT_PATH): cv.string,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_FFMPEG_ARGUMENTS, default=DEFAULT_ARGUMENTS): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a Xiaomi Camera."""
_LOGGER.debug("Received configuration for model %s", config[CONF_MODEL])
async_add_entities([XiaomiCamera(hass, config)])
class XiaomiCamera(Camera):
"""Define an implementation of a Xiaomi Camera."""
def __init__(self, hass, config):
"""Initialize."""
super().__init__()
self._extra_arguments = config.get(CONF_FFMPEG_ARGUMENTS)
self._last_image = None
self._last_url = None
self._manager = hass.data[DATA_FFMPEG]
self._name = config[CONF_NAME]
self.host = config[CONF_HOST]
self.host.hass = hass
self._model = config[CONF_MODEL]
self.port = config[CONF_PORT]
self.path = config[CONF_PATH]
self.user = config[CONF_USERNAME]
self.passwd = config[CONF_PASSWORD]
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def brand(self):
"""Return the camera brand."""
return DEFAULT_BRAND
@property
def model(self):
"""Return the camera model."""
return self._model
def get_latest_video_url(self, host):
"""Retrieve the latest video file from the Xiaomi Camera FTP server."""
ftp = FTP(host)
try:
ftp.login(self.user, self.passwd)
except error_perm as exc:
_LOGGER.error("Camera login failed: %s", exc)
return False
try:
ftp.cwd(self.path)
except error_perm as exc:
_LOGGER.error("Unable to find path: %s - %s", self.path, exc)
return False
dirs = [d for d in ftp.nlst() if "." not in d]
if not dirs:
_LOGGER.warning("There don't appear to be any folders")
return False
first_dir = latest_dir = dirs[-1]
try:
ftp.cwd(first_dir)
except error_perm as exc:
_LOGGER.error("Unable to find path: %s - %s", first_dir, exc)
return False
if self._model == MODEL_XIAOFANG:
dirs = [d for d in ftp.nlst() if "." not in d]
if not dirs:
_LOGGER.warning("There don't appear to be any uploaded videos")
return False
latest_dir = dirs[-1]
ftp.cwd(latest_dir)
videos = [v for v in ftp.nlst() if ".tmp" not in v]
if not videos:
_LOGGER.info('Video folder "%s" is empty; delaying', latest_dir)
return False
if self._model == MODEL_XIAOFANG:
video = videos[-2]
else:
video = videos[-1]
return f"ftp://{self.user}:{self.passwd}@{host}:{self.port}{ftp.pwd()}/{video}"
async def async_camera_image(self):
"""Return a still image response from the camera."""
try:
host = self.host.async_render(parse_result=False)
except TemplateError as exc:
_LOGGER.error("Error parsing template %s: %s", self.host, exc)
return self._last_image
url = await self.hass.async_add_executor_job(self.get_latest_video_url, host)
if url != self._last_url:
ffmpeg = ImageFrame(self._manager.binary, loop=self.hass.loop)
self._last_image = await asyncio.shield(
ffmpeg.get_image(
url, output_format=IMAGE_JPEG, extra_cmd=self._extra_arguments
)
)
self._last_url = url
return self._last_image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
stream = CameraMjpeg(self._manager.binary, loop=self.hass.loop)
await stream.open_camera(self._last_url, extra_cmd=self._extra_arguments)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
self._manager.ffmpeg_stream_content_type,
)
finally:
await stream.close()
|
import glob2
import os
import logging
logger = logging.getLogger('psdash.log')
class LogError(Exception):
pass
class ReverseFileSearcher(object):
DEFAULT_CHUNK_SIZE = 8192
def __init__(self, filename, needle, chunk_size=DEFAULT_CHUNK_SIZE):
self._chunk_size = int(chunk_size)
if not needle:
raise ValueError("Needle is empty")
if len(needle) > self._chunk_size:
raise ValueError("Needle size is larger than the chunk size.")
self._filename = filename
self._needle = needle
self._fp = open(filename, "r")
self.reset()
def reset(self):
self._fp.seek(0, os.SEEK_END)
def __iter__(self):
return self
def next(self):
pos = self.find()
if pos < 0:
raise StopIteration
return pos
def _read(self):
"""
Reads and returns a buffer reversely from current file-pointer position.
:rtype : str
"""
filepos = self._fp.tell()
if filepos < 1:
return ""
destpos = max(filepos - self._chunk_size, 0)
self._fp.seek(destpos)
buf = self._fp.read(filepos - destpos)
self._fp.seek(destpos)
return buf
def find(self):
"""
Returns the position of the first occurence of needle.
If the needle was not found, -1 is returned.
:rtype : int
"""
lastbuf = ""
while 0 < self._fp.tell():
buf = self._read()
bufpos = (buf + lastbuf).rfind(self._needle)
if bufpos > -1:
filepos = self._fp.tell() + bufpos
self._fp.seek(filepos)
return filepos
# for it to work when the needle is split between chunks.
lastbuf = buf[:len(self._needle)]
return -1
def find_all(self):
"""
Searches the file for occurences of self.needle
Returns a tuple of positions where occurences was found.
:rtype : tuple
"""
self.reset()
return tuple(pos for pos in self)
class LogReader(object):
BUFFER_SIZE = 8192
def __init__(self, filename, buffer_size=BUFFER_SIZE):
self.filename = filename
self.fp = open(filename, 'r')
self.buffer_size = buffer_size
self._searchers = {}
def __repr__(self):
return '<LogReader filename=%s, file-pos=%d>' % (
self.filename, self.fp.tell()
)
def set_tail_position(self):
stat = os.fstat(self.fp.fileno())
if stat.st_size >= self.buffer_size:
self.fp.seek(-self.buffer_size, os.SEEK_END)
else:
self.fp.seek(0)
def read(self):
buf = self.fp.read(self.buffer_size)
return buf
def search(self, text):
"""
Find text in log file from current position
returns a tuple containing:
absolute position,
position in result buffer,
result buffer (the actual file contents)
"""
key = hash(text)
searcher = self._searchers.get(key)
if not searcher:
searcher = ReverseFileSearcher(self.filename, text)
self._searchers[key] = searcher
position = searcher.find()
if position < 0:
# reset the searcher to start from the tail again.
searcher.reset()
return -1, -1, ''
# try to get some content from before and after the result's position
read_before = self.buffer_size / 2
offset = max(position - read_before, 0)
bufferpos = position if offset == 0 else read_before
self.fp.seek(offset)
return position, bufferpos, self.read()
def close(self):
self.fp.close()
class Logs(object):
def __init__(self):
self.available = set()
self.readers = {}
def add_available(self, filename):
# quick verification that it exists and can be read
try:
filename = filename.decode('utf-8')
f = open(filename)
f.close()
except IOError as e:
raise LogError('Could not read log file "%s" (%s)' % (filename, e))
logger.debug('Adding log file %s', filename)
return self.available.add(filename)
def remove_available(self, filename):
self.remove(filename)
self.available.remove(filename)
def get_available(self):
available = []
to_remove = []
for filename in self.available:
try:
log = self.get(filename)
available.append(log)
except IOError:
logger.info('Failed to get "%s", removing from available logs', filename)
to_remove.append(filename)
if to_remove:
map(self.remove_available, to_remove)
return available
def clear_available(self):
self.clear()
self.available = set()
def add_patterns(self, patterns):
i = 0
for p in patterns:
for log_file in glob2.iglob(p):
if os.path.isfile(log_file):
try:
self.add_available(log_file)
i += 1
except LogError as e:
logger.warning(e)
logger.info('Added %d log file(s)', i)
return i
def clear(self):
for r in self.readers.itervalues():
r.close()
self.readers = {}
def remove(self, filename):
for reader_key, r in self.readers.items():
if reader_key[0] == filename:
r.close()
del self.readers[reader_key]
def create(self, filename, key=None):
if filename not in self.available:
raise KeyError('No log with filename "%s" is available' % filename)
reader_key = (filename, key)
r = LogReader(filename)
self.readers[reader_key] = r
return r
def get(self, filename, key=None):
reader_key = (filename, key)
if reader_key not in self.readers:
return self.create(filename, key)
else:
return self.readers.get(reader_key)
|
from __future__ import unicode_literals
import os
import mimetypes
import traceback
from lib.data.data import pyoptions
from lib.fun.fun import finishprinter, cool, finalsavepath, fun_name
def combiner_magic(*args):
"""[dir]"""
args = list(args[0])
if len(args) == 2:
directory = os.path.abspath(args[1])
if not os.path.isdir(os.path.abspath(directory)):
exit(pyoptions.CRLF + cool.red("[-] path: {} don't exists".format(directory)))
else:
exit(pyoptions.CRLF + cool.fuchsia("[!] Usage: {} {}".format(args[0], pyoptions.tools_info.get(args[0]))))
storepath = finalsavepath(fun_name())
filepaths = []
combine_list = []
for rootpath, subdirsname, filenames in os.walk(directory):
filepaths.extend([os.path.abspath(os.path.join(rootpath, _)) for _ in filenames])
if len(filepaths) > 0:
for _ in filepaths:
if mimetypes.guess_type(_)[0] == 'text/plain':
combine_list.append(_)
try:
with open(storepath, "a") as f:
for onefile in combine_list:
with open(onefile, 'r') as tf:
f.write(tf.read())
finishprinter(storepath)
except Exception as ex:
print(pyoptions.CRLF + cool.red("[-] Combine file failed, Looking: "))
traceback.print_exc()
|
import logging
from typing import Any, Dict
from homematicip.functionalHomes import SecurityAndAlarmHome
from homeassistant.components.alarm_control_panel import AlarmControlPanelEntity
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN
from .hap import HomematicipHAP
_LOGGER = logging.getLogger(__name__)
CONST_ALARM_CONTROL_PANEL_NAME = "HmIP Alarm Control Panel"
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP alrm control panel from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
async_add_entities([HomematicipAlarmControlPanelEntity(hap)])
class HomematicipAlarmControlPanelEntity(AlarmControlPanelEntity):
"""Representation of the HomematicIP alarm control panel."""
def __init__(self, hap: HomematicipHAP) -> None:
"""Initialize the alarm control panel."""
self._home = hap.home
_LOGGER.info("Setting up %s", self.name)
@property
def device_info(self) -> Dict[str, Any]:
"""Return device specific attributes."""
return {
"identifiers": {(HMIPC_DOMAIN, f"ACP {self._home.id}")},
"name": self.name,
"manufacturer": "eQ-3",
"model": CONST_ALARM_CONTROL_PANEL_NAME,
"via_device": (HMIPC_DOMAIN, self._home.id),
}
@property
def state(self) -> str:
"""Return the state of the alarm control panel."""
# check for triggered alarm
if self._security_and_alarm.alarmActive:
return STATE_ALARM_TRIGGERED
activation_state = self._home.get_security_zones_activation()
# check arm_away
if activation_state == (True, True):
return STATE_ALARM_ARMED_AWAY
# check arm_home
if activation_state == (False, True):
return STATE_ALARM_ARMED_HOME
return STATE_ALARM_DISARMED
@property
def _security_and_alarm(self) -> SecurityAndAlarmHome:
return self._home.get_functionalHome(SecurityAndAlarmHome)
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
async def async_alarm_disarm(self, code=None) -> None:
"""Send disarm command."""
await self._home.set_security_zones_activation(False, False)
async def async_alarm_arm_home(self, code=None) -> None:
"""Send arm home command."""
await self._home.set_security_zones_activation(False, True)
async def async_alarm_arm_away(self, code=None) -> None:
"""Send arm away command."""
await self._home.set_security_zones_activation(True, True)
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
self._home.on_update(self._async_device_changed)
@callback
def _async_device_changed(self, *args, **kwargs) -> None:
"""Handle entity state changes."""
# Don't update disabled entities
if self.enabled:
_LOGGER.debug("Event %s (%s)", self.name, CONST_ALARM_CONTROL_PANEL_NAME)
self.async_write_ha_state()
else:
_LOGGER.debug(
"Device Changed Event for %s (Alarm Control Panel) not fired. Entity is disabled",
self.name,
)
@property
def name(self) -> str:
"""Return the name of the generic entity."""
name = CONST_ALARM_CONTROL_PANEL_NAME
if self._home.name:
name = f"{self._home.name} {name}"
return name
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@property
def available(self) -> bool:
"""Return if alarm control panel is available."""
return self._home.connected
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self.__class__.__name__}_{self._home.id}"
|
import asyncio
import binascii
import collections
import functools
import itertools
import logging
from random import uniform
import re
from typing import Any, Callable, Iterator, List, Optional, Tuple
import voluptuous as vol
import zigpy.exceptions
import zigpy.types
import zigpy.util
from homeassistant.core import State, callback
from .const import CLUSTER_TYPE_IN, CLUSTER_TYPE_OUT, DATA_ZHA, DATA_ZHA_GATEWAY
from .registries import BINDABLE_CLUSTERS
ClusterPair = collections.namedtuple("ClusterPair", "source_cluster target_cluster")
async def safe_read(
cluster, attributes, allow_cache=True, only_cache=False, manufacturer=None
):
"""Swallow all exceptions from network read.
If we throw during initialization, setup fails. Rather have an entity that
exists, but is in a maybe wrong state, than no entity. This method should
probably only be used during initialization.
"""
try:
result, _ = await cluster.read_attributes(
attributes,
allow_cache=allow_cache,
only_cache=only_cache,
manufacturer=manufacturer,
)
return result
except Exception: # pylint: disable=broad-except
return {}
async def get_matched_clusters(source_zha_device, target_zha_device):
"""Get matched input/output cluster pairs for 2 devices."""
source_clusters = source_zha_device.async_get_std_clusters()
target_clusters = target_zha_device.async_get_std_clusters()
clusters_to_bind = []
for endpoint_id in source_clusters:
for cluster_id in source_clusters[endpoint_id][CLUSTER_TYPE_OUT]:
if cluster_id not in BINDABLE_CLUSTERS:
continue
for t_endpoint_id in target_clusters:
if cluster_id in target_clusters[t_endpoint_id][CLUSTER_TYPE_IN]:
cluster_pair = ClusterPair(
source_cluster=source_clusters[endpoint_id][CLUSTER_TYPE_OUT][
cluster_id
],
target_cluster=target_clusters[t_endpoint_id][CLUSTER_TYPE_IN][
cluster_id
],
)
clusters_to_bind.append(cluster_pair)
return clusters_to_bind
@callback
def async_is_bindable_target(source_zha_device, target_zha_device):
"""Determine if target is bindable to source."""
source_clusters = source_zha_device.async_get_std_clusters()
target_clusters = target_zha_device.async_get_std_clusters()
for endpoint_id in source_clusters:
for t_endpoint_id in target_clusters:
matches = set(
source_clusters[endpoint_id][CLUSTER_TYPE_OUT].keys()
).intersection(target_clusters[t_endpoint_id][CLUSTER_TYPE_IN].keys())
if any(bindable in BINDABLE_CLUSTERS for bindable in matches):
return True
return False
async def async_get_zha_device(hass, device_id):
"""Get a ZHA device for the given device registry id."""
device_registry = await hass.helpers.device_registry.async_get_registry()
registry_device = device_registry.async_get(device_id)
zha_gateway = hass.data[DATA_ZHA][DATA_ZHA_GATEWAY]
ieee_address = list(list(registry_device.identifiers)[0])[1]
ieee = zigpy.types.EUI64.convert(ieee_address)
return zha_gateway.devices[ieee]
def find_state_attributes(states: List[State], key: str) -> Iterator[Any]:
"""Find attributes with matching key from states."""
for state in states:
value = state.attributes.get(key)
if value is not None:
yield value
def mean_int(*args):
"""Return the mean of the supplied values."""
return int(sum(args) / len(args))
def mean_tuple(*args):
"""Return the mean values along the columns of the supplied values."""
return tuple(sum(x) / len(x) for x in zip(*args))
def reduce_attribute(
states: List[State],
key: str,
default: Optional[Any] = None,
reduce: Callable[..., Any] = mean_int,
) -> Any:
"""Find the first attribute matching key from states.
If none are found, return default.
"""
attrs = list(find_state_attributes(states, key))
if not attrs:
return default
if len(attrs) == 1:
return attrs[0]
return reduce(*attrs)
class LogMixin:
"""Log helper."""
def log(self, level, msg, *args):
"""Log with level."""
raise NotImplementedError
def debug(self, msg, *args):
"""Debug level log."""
return self.log(logging.DEBUG, msg, *args)
def info(self, msg, *args):
"""Info level log."""
return self.log(logging.INFO, msg, *args)
def warning(self, msg, *args):
"""Warning method log."""
return self.log(logging.WARNING, msg, *args)
def error(self, msg, *args):
"""Error level log."""
return self.log(logging.ERROR, msg, *args)
def retryable_req(
delays=(1, 5, 10, 15, 30, 60, 120, 180, 360, 600, 900, 1800), raise_=False
):
"""Make a method with ZCL requests retryable.
This adds delays keyword argument to function.
len(delays) is number of tries.
raise_ if the final attempt should raise the exception.
"""
def decorator(func):
@functools.wraps(func)
async def wrapper(channel, *args, **kwargs):
exceptions = (zigpy.exceptions.ZigbeeException, asyncio.TimeoutError)
try_count, errors = 1, []
for delay in itertools.chain(delays, [None]):
try:
return await func(channel, *args, **kwargs)
except exceptions as ex:
errors.append(ex)
if delay:
delay = uniform(delay * 0.75, delay * 1.25)
channel.debug(
(
"%s: retryable request #%d failed: %s. "
"Retrying in %ss"
),
func.__name__,
try_count,
ex,
round(delay, 1),
)
try_count += 1
await asyncio.sleep(delay)
else:
channel.warning(
"%s: all attempts have failed: %s", func.__name__, errors
)
if raise_:
raise
return wrapper
return decorator
def convert_install_code(value: str) -> bytes:
"""Convert string to install code bytes and validate length."""
try:
code = binascii.unhexlify(value.replace("-", "").lower())
except binascii.Error as exc:
raise vol.Invalid(f"invalid hex string: {value}") from exc
if len(code) != 18: # 16 byte code + 2 crc bytes
raise vol.Invalid("invalid length of the install code")
if zigpy.util.convert_install_code(code) is None:
raise vol.Invalid("invalid install code")
return code
QR_CODES = (
# Consciot
r"^([\da-fA-F]{16})\|([\da-fA-F]{36})$",
# Enbrighten
r"""
^Z:
([0-9a-fA-F]{16}) # IEEE address
\$I:
([0-9a-fA-F]{36}) # install code
$
""",
# Aqara
r"""
\$A:
([0-9a-fA-F]{16}) # IEEE address
\$I:
([0-9a-fA-F]{36}) # install code
$
""",
)
def qr_to_install_code(qr_code: str) -> Tuple[zigpy.types.EUI64, bytes]:
"""Try to parse the QR code.
if successful, return a tuple of a EUI64 address and install code.
"""
for code_pattern in QR_CODES:
match = re.search(code_pattern, qr_code, re.VERBOSE)
if match is None:
continue
ieee_hex = binascii.unhexlify(match[1])
ieee = zigpy.types.EUI64(ieee_hex[::-1])
install_code = match[2]
# install_code sanity check
install_code = convert_install_code(install_code)
return ieee, install_code
raise vol.Invalid(f"couldn't convert qr code: {qr_code}")
|
import json
import os
import shlex
import subprocess
from jinja2 import Environment, BaseLoader
import pytest
## Uncomment following lines for running in shell
# os.environ['TEST_PROFILE_DIR'] = 'profiles/webapp'
# os.environ['PIPDEPTREE_EXE'] = 'profiles/webapp/.env_python3.6_pip-latest/bin/pipdeptree'
test_profile_dir = os.environ['TEST_PROFILE_DIR']
pipdeptree_path = os.environ['PIPDEPTREE_EXE']
def load_test_spec():
test_spec_path = os.path.join(test_profile_dir, 'test_spec.json')
with open(test_spec_path) as f:
return json.load(f)
test_spec = load_test_spec()
def final_command(s):
tmpl = Environment(loader=BaseLoader).from_string(s)
return tmpl.render(pipdeptree=pipdeptree_path)
def _test_cmp_with_file_contents(spec):
p = subprocess.Popen(shlex.split(spec['command']),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
out, err = p.communicate()
assert spec['expected_returncode'] == p.returncode
if spec['expected_output_file'] is not None:
exp_output_file = os.path.join(test_profile_dir,
spec['expected_output_file'])
with open(exp_output_file, 'rb') as f:
expected_output = f.read()
assert expected_output == out
else:
assert out == b''
if spec['expected_err_file'] is not None:
exp_err_file = os.path.join(test_profile_dir,
spec['expected_err_file'])
with open(exp_err_file, 'rb') as f:
expected_err = f.read()
assert expected_err == err
else:
assert err == b''
@pytest.mark.parametrize('spec', test_spec)
def test_all_tests_in_profile(spec):
spec['command'] = final_command(spec['command'])
if spec['method'] == 'cmp_with_file_contents':
_test_cmp_with_file_contents(spec)
|
import pytest
from django.db import models
from shop.models.fields import ChoiceEnum, ChoiceEnumField
class MyChoices(ChoiceEnum):
A = 0, "My choice A"
B = 1, "My choice B"
class MyColor(ChoiceEnum):
RED = '#ff0000', "Pure red"
BLUE = '#0000ff', "Pure blue"
class MyModel(models.Model):
f = ChoiceEnumField(enum_type=MyChoices)
class Meta:
app_label = 'shop'
managed = False
def test_int_enum():
choice_a = MyChoices.A
assert isinstance(choice_a, MyChoices)
assert MyChoices.B.name == 'B'
assert MyChoices.B.value == 1
assert MyChoices.B.label == "My choice B"
choice_b = MyChoices('B')
assert str(choice_b) == "My choice B"
assert MyChoices.default == MyChoices.A
assert MyChoices.choices == [(0, "My choice A"), (1, "My choice B")]
def test_str_enum():
red = MyColor.RED
assert isinstance(red, MyColor)
assert MyColor.BLUE.name == 'BLUE'
assert MyColor.BLUE.value == '#0000ff'
assert MyColor.BLUE.label == "Pure blue"
assert MyColor.BLUE == MyColor('#0000ff')
assert str(MyColor.BLUE) == "Pure blue"
assert MyColor.choices == [('#ff0000', "Pure red"), ('#0000ff', "Pure blue")]
def test_to_python():
f = ChoiceEnumField(enum_type=MyChoices)
assert f.to_python(0) == MyChoices.A
assert f.to_python('A') == MyChoices.A
assert f.to_python(1) == MyChoices.B
with pytest.raises(ValueError):
f.to_python(None)
with pytest.raises(ValueError):
f.to_python(3)
def test_deconstruct():
f = ChoiceEnumField(enum_type=MyChoices)
name, path, args_, kwargs_ = f.deconstruct()
assert name is None
assert path == 'shop.models.fields.ChoiceEnumField'
assert args_ == []
assert kwargs_ == {}
def test_from_db_value():
f = ChoiceEnumField(enum_type=MyChoices)
assert f.from_db_value(0, None, None) is MyChoices.A
assert f.from_db_value(1, None, None) is MyChoices.B
assert f.from_db_value(2, None, None) is 2
def test_get_prep_value():
f = ChoiceEnumField(enum_type=MyChoices)
assert f.get_prep_value(MyChoices.A) is 0
assert f.get_prep_value(MyChoices.B) is 1
def test_value_to_string():
obj = MyModel(f=MyChoices.A)
assert ChoiceEnumField(name='f').value_to_string(obj) == 'A'
with pytest.raises(ValueError):
ChoiceEnumField(name='f').value_to_string(0)
|
import numpy as np
from qstrader.portcon.order_sizer.order_sizer import OrderSizer
class DollarWeightedCashBufferedOrderSizer(OrderSizer):
"""
Creates a target portfolio of quantities for each Asset
using its provided weight and total equity available in the
Broker portfolio.
Includes an optional cash buffer due to the non-fractional amount
of share/unit sizes. The cash buffer defaults to 5% of the total
equity, but can be modified.
Parameters
----------
broker : `Broker`
The derived Broker instance to obtain portfolio equity from.
broker_portfolio_id : `str`
The specific portfolio at the Broker to obtain equity from.
data_handler : `DataHandler`
To obtain latest asset prices from.
cash_buffer_percentage : `float`, optional
The percentage of the portfolio equity to retain in
cash to avoid generating Orders that exceed account
equity (assuming no margin available).
"""
def __init__(
self,
broker,
broker_portfolio_id,
data_handler,
cash_buffer_percentage=0.05
):
self.broker = broker
self.broker_portfolio_id = broker_portfolio_id
self.data_handler = data_handler
self.cash_buffer_percentage = self._check_set_cash_buffer(
cash_buffer_percentage
)
def _check_set_cash_buffer(self, cash_buffer_percentage):
"""
Checks and sets the cash buffer percentage value.
Parameters
----------
cash_buffer_percentage : `float`
The percentage of the portfolio equity to retain in
cash to avoid generating Orders that exceed account
equity (assuming no margin available).
Returns
-------
`float`
The cash buffer percentage value.
"""
if (
cash_buffer_percentage < 0.0 or cash_buffer_percentage > 1.0
):
raise ValueError(
'Cash buffer percentage "%s" provided to dollar-weighted '
'execution algorithm is negative or '
'exceeds 100%.' % cash_buffer_percentage
)
else:
return cash_buffer_percentage
def _obtain_broker_portfolio_total_equity(self):
"""
Obtain the Broker portfolio total equity.
Returns
-------
`float`
The Broker portfolio total equity.
"""
return self.broker.get_portfolio_total_equity(self.broker_portfolio_id)
def _normalise_weights(self, weights):
"""
Rescale provided weight values to ensure
weight vector sums to unity.
Parameters
----------
weights : `dict{Asset: float}`
The un-normalised weight vector.
Returns
-------
`dict{Asset: float}`
The unit sum weight vector.
"""
if any([weight < 0.0 for weight in weights.values()]):
raise ValueError(
'Dollar-weighted cash-buffered order sizing does not support '
'negative weights. All positions must be long-only.'
)
weight_sum = sum(weight for weight in weights.values())
# If the weights are very close or equal to zero then rescaling
# is not possible, so simply return weights unscaled
if np.isclose(weight_sum, 0.0):
return weights
return {
asset: (weight / weight_sum)
for asset, weight in weights.items()
}
def __call__(self, dt, weights):
"""
Creates a dollar-weighted cash-buffered target portfolio from the
provided target weights at a particular timestamp.
Parameters
----------
dt : `pd.Timestamp`
The current date-time timestamp.
weights : `dict{Asset: float}`
The (potentially unnormalised) target weights.
Returns
-------
`dict{Asset: dict}`
The cash-buffered target portfolio dictionary with quantities.
"""
total_equity = self._obtain_broker_portfolio_total_equity()
cash_buffered_total_equity = total_equity * (
1.0 - self.cash_buffer_percentage
)
# Pre-cost dollar weight
N = len(weights)
if N == 0:
# No forecasts so portfolio remains in cash
# or is fully liquidated
return {}
# Ensure weight vector sums to unity
normalised_weights = self._normalise_weights(weights)
target_portfolio = {}
for asset, weight in sorted(normalised_weights.items()):
pre_cost_dollar_weight = cash_buffered_total_equity * weight
# Estimate broker fees for this asset
est_quantity = 0 # TODO: Needs to be added for IB
est_costs = self.broker.fee_model.calc_total_cost(
asset, est_quantity, pre_cost_dollar_weight, broker=self.broker
)
# Calculate integral target asset quantity assuming broker costs
after_cost_dollar_weight = pre_cost_dollar_weight - est_costs
asset_price = self.data_handler.get_asset_latest_ask_price(
dt, asset
)
if np.isnan(asset_price):
raise ValueError(
'Asset price for "%s" at timestamp "%s" is Not-a-Number (NaN). '
'This can occur if the chosen backtest start date is earlier '
'than the first available price for a particular asset. Try '
'modifying the backtest start date and re-running.' % (asset, dt)
)
# TODO: Long only for the time being.
asset_quantity = int(
np.floor(after_cost_dollar_weight / asset_price)
)
# Add to the target portfolio
target_portfolio[asset] = {"quantity": asset_quantity}
return target_portfolio
|
from flask import current_app
from lemur.common.defaults import common_name, bitstrength
from lemur.common.utils import parse_certificate, parse_private_key
from lemur.plugins.bases import DestinationPlugin
from cryptography.hazmat.primitives import serialization
import requests
import json
import sys
def handle_response(my_response):
"""
Helper function for parsing responses from the Entrust API.
:param my_response:
:return: :raise Exception:
"""
msg = {
200: "The request was successful.",
400: "Keyvault Error"
}
try:
data = json.loads(my_response.content)
except ValueError:
# catch an empty jason object here
data = {'response': 'No detailed message'}
status_code = my_response.status_code
if status_code > 399:
raise Exception(f"AZURE error: {msg.get(status_code, status_code)}\n{data}")
log_data = {
"function": f"{__name__}.{sys._getframe().f_code.co_name}",
"message": "Response",
"status": status_code,
"response": data
}
current_app.logger.info(log_data)
if data == {'response': 'No detailed message'}:
# status if no data
return status_code
else:
# return data from the response
return data
def get_access_token(tenant, appID, password, self):
"""
Gets the access token with the appid and the password and returns it
Improvment option: we can try to save it and renew it only when necessary
:param tenant: Tenant used
:param appID: Application ID from Azure
:param password: password for Application ID
:return: Access token to post to the keyvault
"""
# prepare the call for the access_token
auth_url = f"https://login.microsoftonline.com/{tenant}/oauth2/token"
post_data = {
'grant_type': 'client_credentials',
'client_id': appID,
'client_secret': password,
'resource': 'https://vault.azure.net'
}
try:
response = self.session.post(auth_url, data=post_data)
except requests.exceptions.RequestException as e:
current_app.logger.exception(f"AZURE: Error for POST {e}")
access_token = json.loads(response.content)["access_token"]
return access_token
class AzureDestinationPlugin(DestinationPlugin):
"""Azure Keyvault Destination plugin for Lemur"""
title = "Azure"
slug = "azure-keyvault-destination"
description = "Allow the uploading of certificates to Azure key vault"
author = "Sirferl"
author_url = "https://github.com/sirferl/lemur"
options = [
{
"name": "vaultUrl",
"type": "str",
"required": True,
"validation": "^https?://[a-zA-Z0-9.:-]+$",
"helpMessage": "Valid URL to Azure key vault instance",
},
{
"name": "azureTenant",
"type": "str",
"required": True,
"validation": "^([a-zA-Z0-9/-/?)+$",
"helpMessage": "Tenant for the Azure Key Vault",
},
{
"name": "appID",
"type": "str",
"required": True,
"validation": "^([a-zA-Z0-9/-/?)+$",
"helpMessage": "AppID for the Azure Key Vault",
},
{
"name": "azurePassword",
"type": "str",
"required": True,
"validation": "[0-9a-zA-Z.:_-~]+",
"helpMessage": "Tenant password for the Azure Key Vault",
}
]
def __init__(self, *args, **kwargs):
self.session = requests.Session()
super(AzureDestinationPlugin, self).__init__(*args, **kwargs)
def upload(self, name, body, private_key, cert_chain, options, **kwargs):
"""
Upload certificate and private key
:param private_key:
:param cert_chain:
:return:
"""
# we use the common name to identify the certificate
# Azure does not allow "." in the certificate name we replace them with "-"
cert = parse_certificate(body)
certificate_name = common_name(cert).replace(".", "-")
vault_URI = self.get_option("vaultUrl", options)
tenant = self.get_option("azureTenant", options)
app_id = self.get_option("appID", options)
password = self.get_option("azurePassword", options)
access_token = get_access_token(tenant, app_id, password, self)
cert_url = f"{vault_URI}/certificates/{certificate_name}/import?api-version=7.1"
post_header = {
"Authorization": f"Bearer {access_token}"
}
key_pkcs8 = parse_private_key(private_key).private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption(),
)
key_pkcs8 = key_pkcs8.decode("utf-8").replace('\\n', '\n')
cert_package = f"{body}\n{key_pkcs8}"
post_body = {
"value": cert_package,
"policy": {
"key_props": {
"exportable": True,
"kty": "RSA",
"key_size": bitstrength(cert),
"reuse_key": True
},
"secret_props": {
"contentType": "application/x-pem-file"
}
}
}
try:
response = self.session.post(cert_url, headers=post_header, json=post_body)
except requests.exceptions.RequestException as e:
current_app.logger.exception(f"AZURE: Error for POST {e}")
return_value = handle_response(response)
|
from homeassistant.components.camera import Image
from homeassistant.components.homekit.img_util import (
TurboJPEGSingleton,
scale_jpeg_camera_image,
)
from .common import EMPTY_8_6_JPEG, mock_turbo_jpeg
from tests.async_mock import patch
EMPTY_16_12_JPEG = b"empty_16_12"
def test_turbojpeg_singleton():
"""Verify the instance always gives back the same."""
assert TurboJPEGSingleton.instance() == TurboJPEGSingleton.instance()
def test_scale_jpeg_camera_image():
"""Test we can scale a jpeg image."""
camera_image = Image("image/jpeg", EMPTY_16_12_JPEG)
turbo_jpeg = mock_turbo_jpeg(first_width=16, first_height=12)
with patch("turbojpeg.TurboJPEG", return_value=False):
TurboJPEGSingleton()
assert scale_jpeg_camera_image(camera_image, 16, 12) == camera_image.content
turbo_jpeg = mock_turbo_jpeg(first_width=16, first_height=12)
with patch("turbojpeg.TurboJPEG", return_value=turbo_jpeg):
TurboJPEGSingleton()
assert scale_jpeg_camera_image(camera_image, 16, 12) == EMPTY_16_12_JPEG
turbo_jpeg = mock_turbo_jpeg(
first_width=16, first_height=12, second_width=8, second_height=6
)
with patch("turbojpeg.TurboJPEG", return_value=turbo_jpeg):
TurboJPEGSingleton()
jpeg_bytes = scale_jpeg_camera_image(camera_image, 8, 6)
assert jpeg_bytes == EMPTY_8_6_JPEG
def test_turbojpeg_load_failure():
"""Handle libjpegturbo not being installed."""
with patch("turbojpeg.TurboJPEG", side_effect=Exception):
TurboJPEGSingleton()
assert TurboJPEGSingleton.instance() is False
with patch("turbojpeg.TurboJPEG"):
TurboJPEGSingleton()
assert TurboJPEGSingleton.instance()
|
import functools
import numpy as np
import pandas as pd
from ..core.alignment import broadcast
from .facetgrid import _easy_facetgrid
from .utils import (
_add_colorbar,
_is_numeric,
_process_cmap_cbar_kwargs,
get_axis,
label_from_attrs,
)
# copied from seaborn
_MARKERSIZE_RANGE = np.array([18.0, 72.0])
def _infer_meta_data(ds, x, y, hue, hue_style, add_guide):
dvars = set(ds.variables.keys())
error_msg = " must be one of ({:s})".format(", ".join(dvars))
if x not in dvars:
raise ValueError("x" + error_msg)
if y not in dvars:
raise ValueError("y" + error_msg)
if hue is not None and hue not in dvars:
raise ValueError("hue" + error_msg)
if hue:
hue_is_numeric = _is_numeric(ds[hue].values)
if hue_style is None:
hue_style = "continuous" if hue_is_numeric else "discrete"
if not hue_is_numeric and (hue_style == "continuous"):
raise ValueError(
f"Cannot create a colorbar for a non numeric coordinate: {hue}"
)
if add_guide is None or add_guide is True:
add_colorbar = True if hue_style == "continuous" else False
add_legend = True if hue_style == "discrete" else False
else:
add_colorbar = False
add_legend = False
else:
if add_guide is True:
raise ValueError("Cannot set add_guide when hue is None.")
add_legend = False
add_colorbar = False
if hue_style is not None and hue_style not in ["discrete", "continuous"]:
raise ValueError("hue_style must be either None, 'discrete' or 'continuous'.")
if hue:
hue_label = label_from_attrs(ds[hue])
hue = ds[hue]
else:
hue_label = None
hue = None
return {
"add_colorbar": add_colorbar,
"add_legend": add_legend,
"hue_label": hue_label,
"hue_style": hue_style,
"xlabel": label_from_attrs(ds[x]),
"ylabel": label_from_attrs(ds[y]),
"hue": hue,
}
def _infer_scatter_data(ds, x, y, hue, markersize, size_norm, size_mapping=None):
broadcast_keys = ["x", "y"]
to_broadcast = [ds[x], ds[y]]
if hue:
to_broadcast.append(ds[hue])
broadcast_keys.append("hue")
if markersize:
to_broadcast.append(ds[markersize])
broadcast_keys.append("size")
broadcasted = dict(zip(broadcast_keys, broadcast(*to_broadcast)))
data = {"x": broadcasted["x"], "y": broadcasted["y"], "hue": None, "sizes": None}
if hue:
data["hue"] = broadcasted["hue"]
if markersize:
size = broadcasted["size"]
if size_mapping is None:
size_mapping = _parse_size(size, size_norm)
data["sizes"] = size.copy(
data=np.reshape(size_mapping.loc[size.values.ravel()].values, size.shape)
)
return data
# copied from seaborn
def _parse_size(data, norm):
import matplotlib as mpl
if data is None:
return None
data = data.values.flatten()
if not _is_numeric(data):
levels = np.unique(data)
numbers = np.arange(1, 1 + len(levels))[::-1]
else:
levels = numbers = np.sort(np.unique(data))
min_width, max_width = _MARKERSIZE_RANGE
# width_range = min_width, max_width
if norm is None:
norm = mpl.colors.Normalize()
elif isinstance(norm, tuple):
norm = mpl.colors.Normalize(*norm)
elif not isinstance(norm, mpl.colors.Normalize):
err = "``size_norm`` must be None, tuple, or Normalize object."
raise ValueError(err)
norm.clip = True
if not norm.scaled():
norm(np.asarray(numbers))
# limits = norm.vmin, norm.vmax
scl = norm(numbers)
widths = np.asarray(min_width + scl * (max_width - min_width))
if scl.mask.any():
widths[scl.mask] = 0
sizes = dict(zip(levels, widths))
return pd.Series(sizes)
class _Dataset_PlotMethods:
"""
Enables use of xarray.plot functions as attributes on a Dataset.
For example, Dataset.plot.scatter
"""
def __init__(self, dataset):
self._ds = dataset
def __call__(self, *args, **kwargs):
raise ValueError(
"Dataset.plot cannot be called directly. Use "
"an explicit plot method, e.g. ds.plot.scatter(...)"
)
def _dsplot(plotfunc):
commondoc = """
Parameters
----------
ds : Dataset
x, y : str
Variable names for x, y axis.
hue: str, optional
Variable by which to color scattered points
hue_style: str, optional
Can be either 'discrete' (legend) or 'continuous' (color bar).
markersize: str, optional
scatter only. Variable by which to vary size of scattered points.
size_norm: optional
Either None or 'Norm' instance to normalize the 'markersize' variable.
add_guide: bool, optional
Add a guide that depends on hue_style
- for "discrete", build a legend.
This is the default for non-numeric `hue` variables.
- for "continuous", build a colorbar
row : str, optional
If passed, make row faceted plots on this dimension name
col : str, optional
If passed, make column faceted plots on this dimension name
col_wrap : int, optional
Use together with ``col`` to wrap faceted plots
ax : matplotlib axes object, optional
If None, uses the current axis. Not applicable when using facets.
subplot_kws : dict, optional
Dictionary of keyword arguments for matplotlib subplots. Only applies
to FacetGrid plotting.
aspect : scalar, optional
Aspect ratio of plot, so that ``aspect * size`` gives the width in
inches. Only used if a ``size`` is provided.
size : scalar, optional
If provided, create a new figure for the plot with the given size.
Height (in inches) of each plot. See also: ``aspect``.
norm : ``matplotlib.colors.Normalize`` instance, optional
If the ``norm`` has vmin or vmax specified, the corresponding kwarg
must be None.
vmin, vmax : float, optional
Values to anchor the colormap, otherwise they are inferred from the
data and other keyword arguments. When a diverging dataset is inferred,
setting one of these values will fix the other by symmetry around
``center``. Setting both values prevents use of a diverging colormap.
If discrete levels are provided as an explicit list, both of these
values are ignored.
cmap : str or colormap, optional
The mapping from data values to color space. Either a
matplotlib colormap name or object. If not provided, this will
be either ``viridis`` (if the function infers a sequential
dataset) or ``RdBu_r`` (if the function infers a diverging
dataset). When `Seaborn` is installed, ``cmap`` may also be a
`seaborn` color palette. If ``cmap`` is seaborn color palette
and the plot type is not ``contour`` or ``contourf``, ``levels``
must also be specified.
colors : color-like or list of color-like, optional
A single color or a list of colors. If the plot type is not ``contour``
or ``contourf``, the ``levels`` argument is required.
center : float, optional
The value at which to center the colormap. Passing this value implies
use of a diverging colormap. Setting it to ``False`` prevents use of a
diverging colormap.
robust : bool, optional
If True and ``vmin`` or ``vmax`` are absent, the colormap range is
computed with 2nd and 98th percentiles instead of the extreme values.
extend : {"neither", "both", "min", "max"}, optional
How to draw arrows extending the colorbar beyond its limits. If not
provided, extend is inferred from vmin, vmax and the data limits.
levels : int or list-like object, optional
Split the colormap (cmap) into discrete color intervals. If an integer
is provided, "nice" levels are chosen based on the data range: this can
imply that the final number of levels is not exactly the expected one.
Setting ``vmin`` and/or ``vmax`` with ``levels=N`` is equivalent to
setting ``levels=np.linspace(vmin, vmax, N)``.
**kwargs : optional
Additional keyword arguments to matplotlib
"""
# Build on the original docstring
plotfunc.__doc__ = f"{plotfunc.__doc__}\n{commondoc}"
@functools.wraps(plotfunc)
def newplotfunc(
ds,
x=None,
y=None,
hue=None,
hue_style=None,
col=None,
row=None,
ax=None,
figsize=None,
size=None,
col_wrap=None,
sharex=True,
sharey=True,
aspect=None,
subplot_kws=None,
add_guide=None,
cbar_kwargs=None,
cbar_ax=None,
vmin=None,
vmax=None,
norm=None,
infer_intervals=None,
center=None,
levels=None,
robust=None,
colors=None,
extend=None,
cmap=None,
**kwargs,
):
_is_facetgrid = kwargs.pop("_is_facetgrid", False)
if _is_facetgrid: # facetgrid call
meta_data = kwargs.pop("meta_data")
else:
meta_data = _infer_meta_data(ds, x, y, hue, hue_style, add_guide)
hue_style = meta_data["hue_style"]
# handle facetgrids first
if col or row:
allargs = locals().copy()
allargs["plotfunc"] = globals()[plotfunc.__name__]
allargs["data"] = ds
# TODO dcherian: why do I need to remove kwargs?
for arg in ["meta_data", "kwargs", "ds"]:
del allargs[arg]
return _easy_facetgrid(kind="dataset", **allargs, **kwargs)
figsize = kwargs.pop("figsize", None)
ax = get_axis(figsize, size, aspect, ax)
if hue_style == "continuous" and hue is not None:
if _is_facetgrid:
cbar_kwargs = meta_data["cbar_kwargs"]
cmap_params = meta_data["cmap_params"]
else:
cmap_params, cbar_kwargs = _process_cmap_cbar_kwargs(
plotfunc, ds[hue].values, **locals()
)
# subset that can be passed to scatter, hist2d
cmap_params_subset = {
vv: cmap_params[vv] for vv in ["vmin", "vmax", "norm", "cmap"]
}
else:
cmap_params_subset = {}
primitive = plotfunc(
ds=ds,
x=x,
y=y,
hue=hue,
hue_style=hue_style,
ax=ax,
cmap_params=cmap_params_subset,
**kwargs,
)
if _is_facetgrid: # if this was called from Facetgrid.map_dataset,
return primitive # finish here. Else, make labels
if meta_data.get("xlabel", None):
ax.set_xlabel(meta_data.get("xlabel"))
if meta_data.get("ylabel", None):
ax.set_ylabel(meta_data.get("ylabel"))
if meta_data["add_legend"]:
ax.legend(handles=primitive, title=meta_data.get("hue_label", None))
if meta_data["add_colorbar"]:
cbar_kwargs = {} if cbar_kwargs is None else cbar_kwargs
if "label" not in cbar_kwargs:
cbar_kwargs["label"] = meta_data.get("hue_label", None)
_add_colorbar(primitive, ax, cbar_ax, cbar_kwargs, cmap_params)
return primitive
@functools.wraps(newplotfunc)
def plotmethod(
_PlotMethods_obj,
x=None,
y=None,
hue=None,
hue_style=None,
col=None,
row=None,
ax=None,
figsize=None,
col_wrap=None,
sharex=True,
sharey=True,
aspect=None,
size=None,
subplot_kws=None,
add_guide=None,
cbar_kwargs=None,
cbar_ax=None,
vmin=None,
vmax=None,
norm=None,
infer_intervals=None,
center=None,
levels=None,
robust=None,
colors=None,
extend=None,
cmap=None,
**kwargs,
):
"""
The method should have the same signature as the function.
This just makes the method work on Plotmethods objects,
and passes all the other arguments straight through.
"""
allargs = locals()
allargs["ds"] = _PlotMethods_obj._ds
allargs.update(kwargs)
for arg in ["_PlotMethods_obj", "newplotfunc", "kwargs"]:
del allargs[arg]
return newplotfunc(**allargs)
# Add to class _PlotMethods
setattr(_Dataset_PlotMethods, plotmethod.__name__, plotmethod)
return newplotfunc
@_dsplot
def scatter(ds, x, y, ax, **kwargs):
"""
Scatter Dataset data variables against each other.
"""
if "add_colorbar" in kwargs or "add_legend" in kwargs:
raise ValueError(
"Dataset.plot.scatter does not accept "
"'add_colorbar' or 'add_legend'. "
"Use 'add_guide' instead."
)
cmap_params = kwargs.pop("cmap_params")
hue = kwargs.pop("hue")
hue_style = kwargs.pop("hue_style")
markersize = kwargs.pop("markersize", None)
size_norm = kwargs.pop("size_norm", None)
size_mapping = kwargs.pop("size_mapping", None) # set by facetgrid
# need to infer size_mapping with full dataset
data = _infer_scatter_data(ds, x, y, hue, markersize, size_norm, size_mapping)
if hue_style == "discrete":
primitive = []
for label in np.unique(data["hue"].values):
mask = data["hue"] == label
if data["sizes"] is not None:
kwargs.update(s=data["sizes"].where(mask, drop=True).values.flatten())
primitive.append(
ax.scatter(
data["x"].where(mask, drop=True).values.flatten(),
data["y"].where(mask, drop=True).values.flatten(),
label=label,
**kwargs,
)
)
elif hue is None or hue_style == "continuous":
if data["sizes"] is not None:
kwargs.update(s=data["sizes"].values.ravel())
if data["hue"] is not None:
kwargs.update(c=data["hue"].values.ravel())
primitive = ax.scatter(
data["x"].values.ravel(), data["y"].values.ravel(), **cmap_params, **kwargs
)
return primitive
|
import os.path as op
import numpy as np
import pytest
from numpy.testing import assert_allclose
from mne.chpi import read_head_pos
from mne.datasets import testing
from mne.io import read_raw_fif
from mne.preprocessing import (annotate_movement, compute_average_dev_head_t,
annotate_muscle_zscore)
from mne import Annotations
data_path = testing.data_path(download=False)
sss_path = op.join(data_path, 'SSS')
pre = op.join(sss_path, 'test_move_anon_')
raw_fname = pre + 'raw.fif'
pos_fname = op.join(data_path, 'SSS', 'test_move_anon_raw.pos')
@testing.requires_testing_data
def test_movement_annotation_head_correction():
"""Test correct detection movement artifact and dev_head_t."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes').load_data()
pos = read_head_pos(pos_fname)
# Check 5 rotation segments are detected
annot_rot, [] = annotate_movement(raw, pos, rotation_velocity_limit=5)
assert(annot_rot.duration.size == 5)
# Check 2 translation vel. segments are detected
annot_tra, [] = annotate_movement(raw, pos, translation_velocity_limit=.05)
assert(annot_tra.duration.size == 2)
# Check 1 movement distance segment is detected
annot_dis, disp = annotate_movement(raw, pos, mean_distance_limit=.02)
assert(annot_dis.duration.size == 1)
# Check correct trans mat
raw.set_annotations(annot_rot + annot_tra + annot_dis)
dev_head_t = compute_average_dev_head_t(raw, pos)
dev_head_t_ori = np.array([
[0.9957292, -0.08688804, 0.03120615, 0.00698271],
[0.09020767, 0.9875856, -0.12859731, -0.0159098],
[-0.01964518, 0.1308631, 0.99120578, 0.07258289],
[0., 0., 0., 1.]])
assert_allclose(dev_head_t_ori, dev_head_t['trans'], rtol=1e-5, atol=0)
# Smoke test skipping time due to previous annotations.
raw.set_annotations(Annotations([raw.times[0]], 0.1, 'bad'))
annot_dis, disp = annotate_movement(raw, pos, mean_distance_limit=.02)
assert(annot_dis.duration.size == 1)
@testing.requires_testing_data
def test_muscle_annotation():
"""Test correct detection muscle artifacts."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes').load_data()
raw.notch_filter([50, 110, 150])
# Check 2 muscle segments are detected
annot_muscle, scores = annotate_muscle_zscore(raw, ch_type='mag',
threshold=10)
onset = annot_muscle.onset * raw.info['sfreq']
onset = onset.astype(int)
np.testing.assert_array_equal(scores[onset].astype(int), np.array([23,
10]))
assert(annot_muscle.duration.size == 2)
@testing.requires_testing_data
def test_muscle_annotation_without_meeg_data():
"""Call annotate_muscle_zscore with data without meg or eeg."""
raw = read_raw_fif(raw_fname, allow_maxshield='yes')
raw.crop(0, .1).load_data()
raw.pick_types(meg=False, stim=True)
with pytest.raises(ValueError, match="No M/EEG channel types found"):
annot_muscle, scores = annotate_muscle_zscore(raw, threshold=10)
|
from django.utils.translation import gettext_lazy as _
from shop.conf import app_settings
from shop.serializers.cart import ExtraCartRow
from shop.modifiers.base import BaseCartModifier
class CartIncludeTaxModifier(BaseCartModifier):
"""
This tax calculator presumes that unit prices are net prices, hence also the subtotal,
and that the tax is added globally to the carts total.
By placing this modifier after the shipping modifiers, one can add tax to
the shipping costs. Otherwise shipping cost are considered tax free.
"""
identifier = 'taxes'
taxes = app_settings.VALUE_ADDED_TAX / 100
def add_extra_cart_row(self, cart, request):
"""
Add a field on cart.extra_price_fields:
"""
amount = cart.subtotal * self.taxes
instance = {
'label': _("plus {}% VAT").format(app_settings.VALUE_ADDED_TAX),
'amount': amount,
}
cart.extra_rows[self.identifier] = ExtraCartRow(instance)
cart.total += amount
class CartExcludedTaxModifier(BaseCartModifier):
"""
This tax calculator presumes that unit prices are gross prices, hence also the subtotal,
and that the tax is calculated per cart but not added to the cart.
"""
identifier = 'taxes'
taxes = 1 - 1 / (1 + app_settings.VALUE_ADDED_TAX / 100)
def add_extra_cart_row(self, cart, request):
"""
Add a field on cart.extra_price_fields:
"""
amount = cart.subtotal * self.taxes
instance = {
'label': _("{}% VAT incl.").format(app_settings.VALUE_ADDED_TAX),
'amount': amount,
}
cart.extra_rows[self.identifier] = ExtraCartRow(instance)
def add_extra_cart_item_row(self, cart_item, request):
amount = cart_item.line_total * self.taxes
instance = {
'label': _("{}% VAT incl.").format(app_settings.VALUE_ADDED_TAX),
'amount': amount,
}
cart_item.extra_rows[self.identifier] = ExtraCartRow(instance)
|
import logging
from datetime import datetime as dt
import bson
import pandas as pd
import pymongo
import six
from .bson_store import BSONStore
from .._util import indent
from ..decorators import mongo_retry
from ..exceptions import NoDataFoundException
logger = logging.getLogger(__name__)
METADATA_STORE_TYPE = 'MetadataStore'
class MetadataStore(BSONStore):
"""
Metadata Store. This stores metadata with timestamps to allow temporal queries.
Entries are stored in the following format:
'symbol': symbol name
'metadata': metadata to be persisted
'start_time': when entry becomes effective
'end_time': (Optional) when entry expires. If not set, it is still in effect
For each symbol end_time of a entry should match start_time of the next one except for the current entry.
"""
@classmethod
def initialize_library(cls, arctic_lib, hashed=True, **kwargs):
MetadataStore(arctic_lib)._ensure_index()
BSONStore.initialize_library(arctic_lib, hashed, **kwargs)
@mongo_retry
def _ensure_index(self):
self.create_index([('symbol', pymongo.ASCENDING), ('start_time', pymongo.DESCENDING)],
unique=True, background=True)
def __init__(self, arctic_lib):
self._arctic_lib = arctic_lib
self._reset()
def _reset(self):
self._collection = self._arctic_lib.get_top_level_collection().metadata
def __getstate__(self):
return {'arctic_lib': self._arctic_lib}
def __setstate__(self, state):
return MetadataStore.__init__(self, state['arctic_lib'])
def __str__(self):
return """<%s at %s>\n%s""" % (self.__class__.__name__, hex(id(self)), indent(str(self._arctic_lib), 4))
def __repr__(self):
return str(self)
@mongo_retry
def list_symbols(self, regex=None, as_of=None, **kwargs):
"""
Return the symbols in this library.
Parameters
----------
as_of : `datetime.datetime`
filter symbols valid at given time
regex : `str`
filter symbols by the passed in regular expression
kwargs :
kwarg keys are used as fields to query for symbols with metadata matching
the kwargs query
Returns
-------
String list of symbols in the library
"""
# Skip aggregation pipeline
if not (regex or as_of or kwargs):
return self.distinct('symbol')
# Index-based query part
index_query = {}
if as_of is not None:
index_query['start_time'] = {'$lte': as_of}
if regex or as_of:
# make sure that symbol is present in query even if only as_of is specified to avoid document scans
# see 'Pipeline Operators and Indexes' at
# https://docs.mongodb.com/manual/core/aggregation-pipeline/#aggregation-pipeline-operators-and-performance
index_query['symbol'] = {'$regex': regex or '^'}
# Document query part
data_query = {}
if kwargs:
for k, v in six.iteritems(kwargs):
data_query['metadata.' + k] = v
# Sort using index, relying on https://docs.mongodb.com/manual/core/aggregation-pipeline-optimization/
pipeline = [{'$sort': {'symbol': pymongo.ASCENDING,
'start_time': pymongo.DESCENDING}}]
# Index-based filter on symbol and start_time
if index_query:
pipeline.append({'$match': index_query})
# Group by 'symbol' and get the latest known data
pipeline.append({'$group': {'_id': '$symbol',
'metadata': {'$first': '$metadata'}}})
# Match the data fields
if data_query:
pipeline.append({'$match': data_query})
# Return only 'symbol' field value
pipeline.append({'$project': {'_id': 0, 'symbol': '$_id'}})
return sorted(r['symbol'] for r in self.aggregate(pipeline))
@mongo_retry
def has_symbol(self, symbol):
return self.find_one({'symbol': symbol}) is not None
@mongo_retry
def read_history(self, symbol):
"""
Return all metadata saved for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
Returns
-------
pandas.DateFrame containing timestamps and metadata entries
"""
find = self.find({'symbol': symbol}, sort=[('start_time', pymongo.ASCENDING)])
times = []
entries = []
for item in find:
times.append(item['start_time'])
entries.append(item['metadata'])
return pd.DataFrame({symbol: entries}, times)
@mongo_retry
def read(self, symbol, as_of=None):
"""
Return current metadata saved for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
as_of : `datetime.datetime`
return entry valid at given time
Returns
-------
metadata
"""
if as_of is not None:
res = self.find_one({'symbol': symbol, 'start_time': {'$lte': as_of}},
sort=[('start_time', pymongo.DESCENDING)])
else:
res = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
return res['metadata'] if res is not None else None
def write_history(self, collection):
"""
Manually overwrite entire metadata history for symbols in `collection`
Parameters
----------
collection : `list of pandas.DataFrame`
with symbol names as headers and timestamps as indices
(the same format as output of read_history)
Example:
[pandas.DataFrame({'symbol': [{}]}, [datetime.datetime.utcnow()])]
"""
documents = []
for dataframe in collection:
if len(dataframe.columns) != 1:
raise ValueError('More than one symbol found in a DataFrame')
symbol = dataframe.columns[0]
times = dataframe.index
entries = dataframe[symbol].values
if self.has_symbol(symbol):
self.purge(symbol)
doc = {'symbol': symbol, 'metadata': entries[0], 'start_time': times[0]}
for metadata, start_time in zip(entries[1:], times[1:]):
if metadata == doc['metadata']:
continue
doc['end_time'] = start_time
documents.append(doc)
doc = {'symbol': symbol, 'metadata': metadata, 'start_time': start_time}
documents.append(doc)
self.insert_many(documents)
def append(self, symbol, metadata, start_time=None):
"""
Update metadata entry for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
metadata : `dict`
to be persisted
start_time : `datetime.datetime`
when metadata becomes effective
Default: datetime.datetime.utcnow()
"""
if start_time is None:
start_time = dt.utcnow()
old_metadata = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
if old_metadata is not None:
if old_metadata['start_time'] >= start_time:
raise ValueError('start_time={} is earlier than the last metadata @{}'.format(start_time,
old_metadata['start_time']))
if old_metadata['metadata'] == metadata:
return old_metadata
elif metadata is None:
return
self.find_one_and_update({'symbol': symbol}, {'$set': {'end_time': start_time}},
sort=[('start_time', pymongo.DESCENDING)])
document = {'_id': bson.ObjectId(), 'symbol': symbol, 'metadata': metadata, 'start_time': start_time}
mongo_retry(self.insert_one)(document)
logger.debug('Finished writing metadata for %s', symbol)
return document
def prepend(self, symbol, metadata, start_time=None):
"""
Prepend a metadata entry for `symbol`
Parameters
----------
symbol : `str`
symbol name for the item
metadata : `dict`
to be persisted
start_time : `datetime.datetime`
when metadata becomes effective
Default: datetime.datetime.min
"""
if metadata is None:
return
if start_time is None:
start_time = dt.min
old_metadata = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.ASCENDING)])
if old_metadata is not None:
if old_metadata['start_time'] <= start_time:
raise ValueError('start_time={} is later than the first metadata @{}'.format(start_time,
old_metadata['start_time']))
if old_metadata['metadata'] == metadata:
self.find_one_and_update({'symbol': symbol}, {'$set': {'start_time': start_time}},
sort=[('start_time', pymongo.ASCENDING)])
old_metadata['start_time'] = start_time
return old_metadata
end_time = old_metadata.get('start_time')
else:
end_time = None
document = {'_id': bson.ObjectId(), 'symbol': symbol, 'metadata': metadata, 'start_time': start_time}
if end_time is not None:
document['end_time'] = end_time
mongo_retry(self.insert_one)(document)
logger.debug('Finished writing metadata for %s', symbol)
return document
def pop(self, symbol):
"""
Delete current metadata of `symbol`
Parameters
----------
symbol : `str`
symbol name to delete
Returns
-------
Deleted metadata
"""
last_metadata = self.find_one({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
if last_metadata is None:
raise NoDataFoundException('No metadata found for symbol {}'.format(symbol))
self.find_one_and_delete({'symbol': symbol}, sort=[('start_time', pymongo.DESCENDING)])
mongo_retry(self.find_one_and_update)({'symbol': symbol}, {'$unset': {'end_time': ''}},
sort=[('start_time', pymongo.DESCENDING)])
return last_metadata
@mongo_retry
def purge(self, symbol):
"""
Delete all metadata of `symbol`
Parameters
----------
symbol : `str`
symbol name to delete
"""
logger.warning("Deleting entire metadata history for %r from %r" % (symbol, self._arctic_lib.get_name()))
self.delete_many({'symbol': symbol})
|
import logging
from babelfish import Language
from requests import Session
from . import Provider
from ..subtitle import Subtitle
logger = logging.getLogger(__name__)
def get_subhash(hash):
"""Get a second hash based on napiprojekt's hash.
:param str hash: napiprojekt's hash.
:return: the subhash.
:rtype: str
"""
idx = [0xe, 0x3, 0x6, 0x8, 0x2]
mul = [2, 2, 5, 4, 3]
add = [0, 0xd, 0x10, 0xb, 0x5]
b = []
for i in range(len(idx)):
a = add[i]
m = mul[i]
i = idx[i]
t = a + int(hash[i], 16)
v = int(hash[t:t + 2], 16)
b.append(('%x' % (v * m))[-1])
return ''.join(b)
class NapiProjektSubtitle(Subtitle):
"""NapiProjekt Subtitle."""
provider_name = 'napiprojekt'
def __init__(self, language, hash):
super(NapiProjektSubtitle, self).__init__(language)
self.hash = hash
self.content = None
@property
def id(self):
return self.hash
@property
def info(self):
return self.hash
def get_matches(self, video):
matches = set()
# hash
if 'napiprojekt' in video.hashes and video.hashes['napiprojekt'] == self.hash:
matches.add('hash')
return matches
class NapiProjektProvider(Provider):
"""NapiProjekt Provider."""
languages = {Language.fromalpha2(l) for l in ['pl']}
required_hash = 'napiprojekt'
server_url = 'http://napiprojekt.pl/unit_napisy/dl.php'
subtitle_class = NapiProjektSubtitle
def __init__(self):
self.session = None
def initialize(self):
self.session = Session()
self.session.headers['User-Agent'] = self.user_agent
def terminate(self):
self.session.close()
def query(self, language, hash):
params = {
'v': 'dreambox',
'kolejka': 'false',
'nick': '',
'pass': '',
'napios': 'Linux',
'l': language.alpha2.upper(),
'f': hash,
't': get_subhash(hash)}
logger.info('Searching subtitle %r', params)
r = self.session.get(self.server_url, params=params, timeout=10)
r.raise_for_status()
# handle subtitles not found and errors
if r.content[:4] == b'NPc0':
logger.debug('No subtitles found')
return None
subtitle = self.subtitle_class(language, hash)
subtitle.content = r.content
logger.debug('Found subtitle %r', subtitle)
return subtitle
def list_subtitles(self, video, languages):
return [s for s in [self.query(l, video.hashes['napiprojekt']) for l in languages] if s is not None]
def download_subtitle(self, subtitle):
# there is no download step, content is already filled from listing subtitles
pass
|
import itertools
import numpy as np
_counter = itertools.count()
def parameterized(names, params):
def decorator(func):
func.param_names = names
func.params = params
return func
return decorator
def requires_dask():
try:
import dask # noqa: F401
except ImportError:
raise NotImplementedError()
def randn(shape, frac_nan=None, chunks=None, seed=0):
rng = np.random.RandomState(seed)
if chunks is None:
x = rng.standard_normal(shape)
else:
import dask.array as da
rng = da.random.RandomState(seed)
x = rng.standard_normal(shape, chunks=chunks)
if frac_nan is not None:
inds = rng.choice(range(x.size), int(x.size * frac_nan))
x.flat[inds] = np.nan
return x
def randint(low, high=None, size=None, frac_minus=None, seed=0):
rng = np.random.RandomState(seed)
x = rng.randint(low, high, size)
if frac_minus is not None:
inds = rng.choice(range(x.size), int(x.size * frac_minus))
x.flat[inds] = -1
return x
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.metrics import fid_score as fid_score_lib
import numpy as np
import tensorflow as tf
class FIDScoreTest(tf.test.TestCase):
def test_fid_computation(self):
real_data = np.ones((100, 2))
real_data[:50, 0] = 2
gen_data = np.ones((100, 2)) * 9
gen_data[50:, 0] = 2
# mean(real_data) = [1.5, 1]
# Cov(real_data) = [[ 0.2525, 0], [0, 0]]
# mean(gen_data) = [5.5, 9]
# Cov(gen_data) = [[12.37, 0], [0, 0]]
result = fid_score_lib.compute_fid_from_activations(real_data, gen_data)
self.assertNear(result, 89.091, 1e-4)
if __name__ == "__main__":
tf.test.main()
|
from collections import namedtuple
class VersionedItem(namedtuple('VersionedItem', ['symbol', 'library', 'data', 'version', 'metadata', 'host'])):
"""
Class representing a Versioned object in VersionStore.
"""
def __new__(cls, symbol, library, data, version, metadata, host=None):
return super(VersionedItem, cls).__new__(cls, symbol, library, data, version, metadata, host)
def metadata_dict(self):
return {'symbol': self.symbol, 'library': self.library, 'version': self.version}
def __repr__(self):
return str(self)
def __str__(self):
return "VersionedItem(symbol=%s,library=%s,data=%s,version=%s,metadata=%s,host=%s)" % \
(self.symbol, self.library, type(self.data), self.version, self.metadata, self.host)
ChangedItem = namedtuple('ChangedItem', ['symbol', 'orig_version', 'new_version', 'changes'])
|
from cerberus.tests import assert_fail, assert_success
def test_keysrules():
schema = {
'a_dict_with_keysrules': {
'type': 'dict',
'keysrules': {'type': 'string', 'regex': '[a-z]+'},
}
}
assert_success({'a_dict_with_keysrules': {'key': 'value'}}, schema=schema)
assert_fail({'a_dict_with_keysrules': {'KEY': 'value'}}, schema=schema)
|
import arrow
import copy
from flask import current_app
from lemur import database
from lemur.sources.models import Source
from lemur.certificates.models import Certificate
from lemur.certificates import service as certificate_service
from lemur.endpoints import service as endpoint_service
from lemur.extensions import metrics, sentry
from lemur.destinations import service as destination_service
from lemur.certificates.schemas import CertificateUploadInputSchema
from lemur.common.utils import find_matching_certificates_by_hash, parse_certificate
from lemur.common.defaults import serial
from lemur.plugins.base import plugins
from lemur.plugins.utils import get_plugin_option, set_plugin_option
def certificate_create(certificate, source):
data, errors = CertificateUploadInputSchema().load(certificate)
if errors:
raise Exception(
"Unable to import certificate: {reasons}".format(reasons=errors)
)
data["creator"] = certificate["creator"]
cert = certificate_service.import_certificate(**data)
cert.description = "This certificate was automatically discovered by Lemur"
cert.sources.append(source)
sync_update_destination(cert, source)
database.update(cert)
return cert
def certificate_update(certificate, source):
for s in certificate.sources:
if s.label == source.label:
break
else:
certificate.sources.append(source)
sync_update_destination(certificate, source)
database.update(certificate)
def sync_update_destination(certificate, source):
dest = destination_service.get_by_label(source.label)
if dest:
for d in certificate.destinations:
if d.label == source.label:
break
else:
certificate.destinations.append(dest)
def sync_endpoints(source):
new, updated, updated_by_hash = 0, 0, 0
current_app.logger.debug("Retrieving endpoints from {0}".format(source.label))
s = plugins.get(source.plugin_name)
try:
endpoints = s.get_endpoints(source.options)
except NotImplementedError:
current_app.logger.warning(
"Unable to sync endpoints for source {0} plugin has not implemented 'get_endpoints'".format(
source.label
)
)
return new, updated, updated_by_hash
for endpoint in endpoints:
exists = endpoint_service.get_by_dnsname_and_port(
endpoint["dnsname"], endpoint["port"]
)
certificate_name = endpoint.pop("certificate_name")
endpoint["certificate"] = certificate_service.get_by_name(certificate_name)
# if get cert by name failed, we attempt a search via serial number and hash comparison
# and link the endpoint certificate to Lemur certificate
if not endpoint["certificate"]:
certificate_attached_to_endpoint = None
try:
certificate_attached_to_endpoint = s.get_certificate_by_name(certificate_name, source.options)
except NotImplementedError:
current_app.logger.warning(
"Unable to describe server certificate for endpoints in source {0}:"
" plugin has not implemented 'get_certificate_by_name'".format(
source.label
)
)
sentry.captureException()
if certificate_attached_to_endpoint:
lemur_matching_cert, updated_by_hash_tmp = find_cert(certificate_attached_to_endpoint)
updated_by_hash += updated_by_hash_tmp
if lemur_matching_cert:
endpoint["certificate"] = lemur_matching_cert[0]
if len(lemur_matching_cert) > 1:
current_app.logger.error(
"Too Many Certificates Found{0}. Name: {1} Endpoint: {2}".format(
len(lemur_matching_cert), certificate_name, endpoint["name"]
)
)
metrics.send("endpoint.certificate.conflict",
"gauge", len(lemur_matching_cert),
metric_tags={"cert": certificate_name, "endpoint": endpoint["name"],
"acct": s.get_option("accountNumber", source.options)})
if not endpoint["certificate"]:
current_app.logger.error({
"message": "Certificate Not Found",
"certificate_name": certificate_name,
"endpoint_name": endpoint["name"],
"dns_name": endpoint.get("dnsname"),
"account": s.get_option("accountNumber", source.options),
})
metrics.send("endpoint.certificate.not.found",
"counter", 1,
metric_tags={"cert": certificate_name, "endpoint": endpoint["name"],
"acct": s.get_option("accountNumber", source.options),
"dnsname": endpoint.get("dnsname")})
continue
policy = endpoint.pop("policy")
policy_ciphers = []
for nc in policy["ciphers"]:
policy_ciphers.append(endpoint_service.get_or_create_cipher(name=nc))
policy["ciphers"] = policy_ciphers
endpoint["policy"] = endpoint_service.get_or_create_policy(**policy)
endpoint["source"] = source
if not exists:
current_app.logger.debug(
"Endpoint Created: Name: {name}".format(name=endpoint["name"])
)
endpoint_service.create(**endpoint)
new += 1
else:
current_app.logger.debug("Endpoint Updated: {}".format(endpoint))
endpoint_service.update(exists.id, **endpoint)
updated += 1
return new, updated, updated_by_hash
def find_cert(certificate):
updated_by_hash = 0
exists = False
if certificate.get("search", None):
conditions = certificate.pop("search")
exists = certificate_service.get_by_attributes(conditions)
if not exists and certificate.get("name"):
result = certificate_service.get_by_name(certificate["name"])
if result:
exists = [result]
if not exists and certificate.get("serial"):
exists = certificate_service.get_by_serial(certificate["serial"])
if not exists:
cert = parse_certificate(certificate["body"])
matching_serials = certificate_service.get_by_serial(serial(cert))
exists = find_matching_certificates_by_hash(cert, matching_serials)
updated_by_hash += 1
exists = [x for x in exists if x]
return exists, updated_by_hash
# TODO this is very slow as we don't batch update certificates
def sync_certificates(source, user):
new, updated, updated_by_hash = 0, 0, 0
current_app.logger.debug("Retrieving certificates from {0}".format(source.label))
s = plugins.get(source.plugin_name)
certificates = s.get_certificates(source.options)
# emitting the count of certificates on the source
metrics.send("sync_certificates_count",
"gauge", len(certificates),
metric_tags={"source": source.label})
for certificate in certificates:
exists, updated_by_hash = find_cert(certificate)
if not certificate.get("owner"):
certificate["owner"] = user.email
certificate["creator"] = user
if not exists:
certificate_create(certificate, source)
new += 1
else:
for e in exists:
if certificate.get("external_id"):
e.external_id = certificate["external_id"]
if certificate.get("authority_id"):
e.authority_id = certificate["authority_id"]
certificate_update(e, source)
updated += 1
return new, updated, updated_by_hash
def sync(source, user):
new_certs, updated_certs, updated_certs_by_hash = sync_certificates(source, user)
new_endpoints, updated_endpoints, updated_endpoints_by_hash = sync_endpoints(source)
metrics.send("sync.updated_certs_by_hash",
"gauge", updated_certs_by_hash,
metric_tags={"source": source.label})
metrics.send("sync.updated_endpoints_by_hash",
"gauge", updated_endpoints_by_hash,
metric_tags={"source": source.label})
source.last_run = arrow.utcnow()
database.update(source)
return {
"endpoints": (new_endpoints, updated_endpoints),
"certificates": (new_certs, updated_certs),
}
def create(label, plugin_name, options, description=None):
"""
Creates a new source, that can then be used as a source for certificates.
:param label: Source common name
:param plugin_name:
:param options:
:param description:
:rtype : Source
:return: New source
"""
source = Source(
label=label, options=options, plugin_name=plugin_name, description=description
)
return database.create(source)
def update(source_id, label, plugin_name, options, description):
"""
Updates an existing source.
:param source_id: Lemur assigned ID
:param label: Source common name
:param options:
:param plugin_name:
:param description:
:rtype : Source
:return:
"""
source = get(source_id)
source.label = label
source.plugin_name = plugin_name
source.options = options
source.description = description
return database.update(source)
def delete(source_id):
"""
Deletes an source.
:param source_id: Lemur assigned ID
"""
database.delete(get(source_id))
def get(source_id):
"""
Retrieves an source by its lemur assigned ID.
:param source_id: Lemur assigned ID
:rtype : Source
:return:
"""
return database.get(Source, source_id)
def get_by_label(label):
"""
Retrieves a source by its label
:param label:
:return:
"""
return database.get(Source, label, field="label")
def get_all():
"""
Retrieves all source currently known by Lemur.
:return:
"""
query = database.session_query(Source)
return database.find_all(query, Source, {}).all()
def render(args):
filt = args.pop("filter")
certificate_id = args.pop("certificate_id", None)
if certificate_id:
query = database.session_query(Source).join(Certificate, Source.certificate)
query = query.filter(Certificate.id == certificate_id)
else:
query = database.session_query(Source)
if filt:
terms = filt.split(";")
query = database.filter(query, Source, terms)
return database.sort_and_page(query, Source, args)
def add_aws_destination_to_sources(dst):
"""
Given a destination check, if it can be added as sources, and included it if not already a source
We identify qualified destinations based on the sync_as_source attributed of the plugin.
The destination sync_as_source_name reveals the name of the suitable source-plugin.
We rely on account numbers to avoid duplicates.
:return: true for success and false for not adding the destination as source
"""
# a set of all accounts numbers available as sources
src_accounts = set()
sources = get_all()
for src in sources:
src_accounts.add(get_plugin_option("accountNumber", src.options))
# check
destination_plugin = plugins.get(dst.plugin_name)
account_number = get_plugin_option("accountNumber", dst.options)
if (
account_number is not None
and destination_plugin.sync_as_source is not None
and destination_plugin.sync_as_source
and (account_number not in src_accounts)
):
src_options = copy.deepcopy(
plugins.get(destination_plugin.sync_as_source_name).options
)
set_plugin_option("accountNumber", account_number, src_options)
create(
label=dst.label,
plugin_name=destination_plugin.sync_as_source_name,
options=src_options,
description=dst.description,
)
return True
return False
|
import mock
from paasta_tools.autoscale_all_services import main
@mock.patch("paasta_tools.autoscale_all_services.logging", autospec=True)
@mock.patch("paasta_tools.autoscale_all_services.autoscale_services", autospec=True)
@mock.patch("paasta_tools.autoscale_all_services.parse_args", autospec=True)
def test_main(mock_parse_args, mock_autoscale_services, logging):
mock_parse_args.return_value = mock.Mock(soa_dir="/nail/blah", services=None)
main()
mock_autoscale_services.assert_called_with(soa_dir="/nail/blah", services=None)
|
from . import core as html5
from . import utils
class Button(html5.Button):
def __init__(self, txt=None, callback=None, className=None, *args, **kwargs):
super().__init__(*args, **kwargs)
self["class"] = "btn"
if className:
self.addClass(className)
self["type"] = "button"
if txt is not None:
self.setText(txt)
self.callback = callback
self.sinkEvent("onClick")
def setText(self, txt):
if txt is not None:
self.element.innerHTML = txt
self["title"] = txt
else:
self.element.innerHTML = ""
self["title"] = ""
def onClick(self, event):
event.stopPropagation()
event.preventDefault()
if self.callback is not None:
self.callback(self)
class Input(html5.Input):
def __init__(self, type="text", placeholder=None, callback=None, id=None, focusCallback=None, *args, **kwargs):
"""
:param type: Input type. Default: "text
:param placeholder: Placeholder text. Default: None
:param callback: Function to be called onChanged: callback(id, value)
:param id: Optional id of the input element. Will be passed to callback
:return:
"""
super().__init__(*args, **kwargs)
self["class"] = "input"
self["type"] = type
if placeholder is not None:
self["placeholder"] = placeholder
self.callback = callback
if id is not None:
self["id"] = id
self.sinkEvent("onChange")
self.focusCallback = focusCallback
if focusCallback:
self.sinkEvent("onFocus")
def onChange(self, event):
event.stopPropagation()
event.preventDefault()
if self.callback is not None:
self.callback(self, self["id"], self["value"])
def onFocus(self, event):
event.stopPropagation()
event.preventDefault()
if self.focusCallback is not None:
self.focusCallback(self, self["id"], self["value"])
def onDetach(self):
super().onDetach()
self.callback = None
class Popup(html5.Div):
def __init__(self, title=None, id=None, className=None, icon=None, enableShortcuts=True, closeable=True, *args, **kwargs):
super().__init__("""
<div class="box" [name]="popupBox">
<div class="box-head" [name]="popupHead">
<div class="item" [name]="popupHeadItem">
<div class="item-image">
<i class="i i--small" [name]="popupIcon"></i>
</div>
<div class="item-content">
<div class="item-headline" [name]="popupHeadline"></div>
</div>
</div>
</div>
<div class="box-body box--content" [name]="popupBody"></div>
<div class="box-foot box--content bar" [name]="popupFoot"></div>
</div>
""")
self.appendChild = self.popupBody.appendChild
self.fromHTML = lambda *args, **kwargs: self.popupBody.fromHTML(*args, **kwargs) if kwargs.get("bindTo") else self.popupBody.fromHTML(bindTo=self, *args, **kwargs)
self["class"] = "popup popup--center is-active"
if className:
self.addClass(className)
if closeable:
closeBtn = Button("×", self.close, className="item-action")
closeBtn.removeClass("btn")
self.popupHeadItem.appendChild(closeBtn)
if title:
self.popupHeadline.appendChild(title)
if icon:
self.popupIcon.appendChild(icon[0])
elif title:
self.popupIcon.appendChild(title[0])
else:
self.popupIcon.appendChild("Vi") #fixme!!! this _LIBRARY_ is not only used in the Vi...
# id can be used to pass information to callbacks
self.id = id
#FIXME: Implement a global overlay! One popupOverlay next to a list of popups.
self.popupOverlay = html5.Div()
self.popupOverlay["class"] = "popup-overlay is-active"
self.enableShortcuts = enableShortcuts
self.onDocumentKeyDownMethod = None
self.popupOverlay.appendChild(self)
html5.Body().appendChild(self.popupOverlay)
#FIXME: Close/Cancel every popup with click on popupCloseBtn without removing the global overlay.
def onAttach(self):
super(Popup, self).onAttach()
if self.enableShortcuts:
self.onDocumentKeyDownMethod = self.onDocumentKeyDown # safe reference to method
html5.document.addEventListener("keydown", self.onDocumentKeyDownMethod)
def onDetach(self):
super(Popup, self).onDetach()
if self.enableShortcuts:
html5.document.removeEventListener("keydown", self.onDocumentKeyDownMethod)
def onDocumentKeyDown(self, event):
if html5.isEscape(event):
self.close()
def close(self, *args, **kwargs):
html5.Body().removeChild(self.popupOverlay)
self.popupOverlay = None
class InputDialog(Popup):
def __init__(self, text, value="", successHandler=None, abortHandler=None,
successLbl="OK", abortLbl="Cancel", placeholder="", *args, **kwargs):
super().__init__(*args, **kwargs)
self.addClass("popup--inputdialog")
self.sinkEvent("onKeyDown", "onKeyUp")
self.successHandler = successHandler
self.abortHandler = abortHandler
self.fromHTML(
"""
<div class="input-group">
<label class="label">
{{text}}
</label>
<input class="input" [name]="inputElem" value="{{value}}" placeholder="{{placeholder}}" />
</div>
""",
vars={
"text": text,
"value": value,
"placeholder": placeholder
}
)
# Cancel
self.popupFoot.appendChild(Button(abortLbl, self.onCancel, className="btn--cancel btn--danger"))
# Okay
self.okayBtn = Button(successLbl, self.onOkay, className="btn--okay btn--primary")
if not value:
self.okayBtn.disable()
self.popupFoot.appendChild(self.okayBtn)
self.inputElem.focus()
def onKeyDown(self, event):
if html5.isReturn(event) and self.inputElem["value"]:
event.stopPropagation()
event.preventDefault()
self.onOkay()
def onKeyUp(self, event):
if self.inputElem["value"]:
self.okayBtn.enable()
else:
self.okayBtn.disable()
def onDocumentKeyDown(self, event):
if html5.isEscape(event):
event.stopPropagation()
event.preventDefault()
self.onCancel()
def onOkay(self, *args, **kwargs):
if self.successHandler:
self.successHandler(self, self.inputElem["value"])
self.close()
def onCancel(self, *args, **kwargs):
if self.abortHandler:
self.abortHandler(self, self.inputElem["value"])
self.close()
class Alert(Popup):
"""
Just displaying an alerting message box with OK-button.
"""
def __init__(self, msg, title=None, className=None, okCallback=None, okLabel="OK", icon="!", closeable=True, *args, **kwargs):
super().__init__(title, className=None, icon=icon, closeable=closeable, *args, **kwargs)
self.addClass("popup--alert")
if className:
self.addClass(className)
self.okCallback = okCallback
message = html5.Span()
message.addClass("alert-msg")
self.popupBody.appendChild(message)
if isinstance(msg, str):
msg = msg.replace("\n", "<br>")
message.appendChild(msg, bindTo=False)
self.sinkEvent("onKeyDown")
if closeable:
okBtn = Button(okLabel, callback=self.onOkBtnClick)
okBtn.addClass("btn--okay btn--primary")
self.popupFoot.appendChild(okBtn)
okBtn.focus()
def drop(self):
self.okCallback = None
self.close()
def onOkBtnClick(self, sender=None):
if self.okCallback:
self.okCallback(self)
self.drop()
def onKeyDown(self, event):
if html5.isReturn(event):
event.stopPropagation()
event.preventDefault()
self.onOkBtnClick()
class YesNoDialog(Popup):
def __init__(self, question, title=None, yesCallback=None, noCallback=None,
yesLabel="Yes", noLabel="No", icon="?",
closeable=False, *args, **kwargs):
super().__init__(title, closeable=closeable, icon=icon, *args, **kwargs)
self.addClass("popup--yesnodialog")
self.yesCallback = yesCallback
self.noCallback = noCallback
lbl = html5.Span()
lbl["class"].append("question")
self.popupBody.appendChild(lbl)
if isinstance(question, html5.Widget):
lbl.appendChild(question)
else:
utils.textToHtml(lbl, question)
if len(noLabel):
btnNo = Button(noLabel, className="btn--no", callback=self.onNoClicked)
#btnNo["class"].append("btn--no")
self.popupFoot.appendChild(btnNo)
btnYes = Button(yesLabel, callback=self.onYesClicked)
btnYes["class"].append("btn--yes")
self.popupFoot.appendChild(btnYes)
self.sinkEvent("onKeyDown")
btnYes.focus()
def onKeyDown(self, event):
if html5.isReturn(event):
event.stopPropagation()
event.preventDefault()
self.onYesClicked()
def onDocumentKeyDown(self, event):
if html5.isEscape(event):
event.stopPropagation()
event.preventDefault()
self.onNoClicked()
def drop(self):
self.yesCallback = None
self.noCallback = None
self.close()
def onYesClicked(self, *args, **kwargs):
if self.yesCallback:
self.yesCallback(self)
self.drop()
def onNoClicked(self, *args, **kwargs):
if self.noCallback:
self.noCallback(self)
self.drop()
class SelectDialog(Popup):
def __init__(self, prompt, items=None, title=None, okBtn="OK", cancelBtn="Cancel", forceSelect=False,
callback=None, *args, **kwargs):
super().__init__(title, *args, **kwargs)
self["class"].append("popup--selectdialog")
self.callback = callback
self.items = items
assert isinstance(self.items, list)
# Prompt
if prompt:
lbl = html5.Span()
lbl["class"].append("prompt")
if isinstance(prompt, html5.Widget):
lbl.appendChild(prompt)
else:
utils.textToHtml(lbl, prompt)
self.popupBody.appendChild(lbl)
# Items
if not forceSelect and len(items) <= 3:
for idx, item in enumerate(items):
if isinstance(item, dict):
title = item.get("title")
cssc = item.get("class")
elif isinstance(item, tuple):
title = item[1]
cssc = None
else:
title = item
btn = Button(title, callback=self.onAnyBtnClick)
btn.idx = idx
if cssc:
btn.addClass(cssc)
self.popupBody.appendChild(btn)
else:
self.select = html5.Select()
self.popupBody.appendChild(self.select)
for idx, item in enumerate(items):
if isinstance(item, dict):
title = item.get("title")
elif isinstance(item, tuple):
title = item[1]
else:
title = item
opt = html5.Option(title)
opt["value"] = str(idx)
self.select.appendChild(opt)
if okBtn:
self.popupFoot.appendChild(Button(okBtn, callback=self.onOkClick))
if cancelBtn:
self.popupFoot.appendChild(Button(cancelBtn, callback=self.onCancelClick))
def onAnyBtnClick(self, sender):
item = self.items[sender.idx]
if isinstance(item, dict) and item.get("callback") and callable(item["callback"]):
item["callback"](item)
if self.callback:
self.callback(item)
self.items = None
self.close()
def onCancelClick(self, sender=None):
self.close()
def onOkClick(self, sender=None):
assert self.select["selectedIndex"] >= 0
item = self.items[int(self.select.children(self.select["selectedIndex"])["value"])]
if isinstance(item, dict) and item.get("callback") and callable(item["callback"]):
item["callback"](item)
if self.callback:
self.callback(item)
self.items = None
self.select = None
self.close()
class TextareaDialog(Popup):
def __init__(self, text, value="", successHandler=None, abortHandler=None, successLbl="OK", abortLbl="Cancel",
*args, **kwargs):
super().__init__(*args, **kwargs)
self["class"].append("popup--textareadialog")
self.successHandler = successHandler
self.abortHandler = abortHandler
span = html5.Span()
span.element.innerHTML = text
self.popupBody.appendChild(span)
self.inputElem = html5.Textarea()
self.inputElem["value"] = value
self.popupBody.appendChild(self.inputElem)
okayBtn = Button(successLbl, self.onOkay)
okayBtn["class"].append("btn--okay")
self.popupFoot.appendChild(okayBtn)
cancelBtn = Button(abortLbl, self.onCancel)
cancelBtn["class"].append("btn--cancel")
self.popupFoot.appendChild(cancelBtn)
self.sinkEvent("onKeyDown")
self.inputElem.focus()
def onDocumentKeyDown(self, event):
if html5.isEscape(event):
event.stopPropagation()
event.preventDefault()
self.onCancel()
def onOkay(self, *args, **kwargs):
if self.successHandler:
self.successHandler(self, self.inputElem["value"])
self.close()
def onCancel(self, *args, **kwargs):
if self.abortHandler:
self.abortHandler(self, self.inputElem["value"])
self.close()
|
import logging
import time
import traceback
import numpy as np
from ..conventions import cf_encoder
from ..core import indexing
from ..core.pycompat import is_duck_dask_array
from ..core.utils import FrozenDict, NdimSizeLenMixin
# Create a logger object, but don't add any handlers. Leave that to user code.
logger = logging.getLogger(__name__)
NONE_VAR_NAME = "__values__"
def _encode_variable_name(name):
if name is None:
name = NONE_VAR_NAME
return name
def _decode_variable_name(name):
if name == NONE_VAR_NAME:
name = None
return name
def find_root_and_group(ds):
"""Find the root and group name of a netCDF4/h5netcdf dataset."""
hierarchy = ()
while ds.parent is not None:
hierarchy = (ds.name.split("/")[-1],) + hierarchy
ds = ds.parent
group = "/" + "/".join(hierarchy)
return ds, group
def robust_getitem(array, key, catch=Exception, max_retries=6, initial_delay=500):
"""
Robustly index an array, using retry logic with exponential backoff if any
of the errors ``catch`` are raised. The initial_delay is measured in ms.
With the default settings, the maximum delay will be in the range of 32-64
seconds.
"""
assert max_retries >= 0
for n in range(max_retries + 1):
try:
return array[key]
except catch:
if n == max_retries:
raise
base_delay = initial_delay * 2 ** n
next_delay = base_delay + np.random.randint(base_delay)
msg = (
"getitem failed, waiting %s ms before trying again "
"(%s tries remaining). Full traceback: %s"
% (next_delay, max_retries - n, traceback.format_exc())
)
logger.debug(msg)
time.sleep(1e-3 * next_delay)
class BackendArray(NdimSizeLenMixin, indexing.ExplicitlyIndexed):
__slots__ = ()
def __array__(self, dtype=None):
key = indexing.BasicIndexer((slice(None),) * self.ndim)
return np.asarray(self[key], dtype=dtype)
class AbstractDataStore:
__slots__ = ()
def get_dimensions(self): # pragma: no cover
raise NotImplementedError()
def get_attrs(self): # pragma: no cover
raise NotImplementedError()
def get_variables(self): # pragma: no cover
raise NotImplementedError()
def get_encoding(self):
return {}
def load(self):
"""
This loads the variables and attributes simultaneously.
A centralized loading function makes it easier to create
data stores that do automatic encoding/decoding.
For example::
class SuffixAppendingDataStore(AbstractDataStore):
def load(self):
variables, attributes = AbstractDataStore.load(self)
variables = {'%s_suffix' % k: v
for k, v in variables.items()}
attributes = {'%s_suffix' % k: v
for k, v in attributes.items()}
return variables, attributes
This function will be called anytime variables or attributes
are requested, so care should be taken to make sure its fast.
"""
variables = FrozenDict(
(_decode_variable_name(k), v) for k, v in self.get_variables().items()
)
attributes = FrozenDict(self.get_attrs())
return variables, attributes
def close(self):
pass
def __enter__(self):
return self
def __exit__(self, exception_type, exception_value, traceback):
self.close()
class ArrayWriter:
__slots__ = ("sources", "targets", "regions", "lock")
def __init__(self, lock=None):
self.sources = []
self.targets = []
self.regions = []
self.lock = lock
def add(self, source, target, region=None):
if is_duck_dask_array(source):
self.sources.append(source)
self.targets.append(target)
self.regions.append(region)
else:
if region:
target[region] = source
else:
target[...] = source
def sync(self, compute=True):
if self.sources:
import dask.array as da
# TODO: consider wrapping targets with dask.delayed, if this makes
# for any discernable difference in perforance, e.g.,
# targets = [dask.delayed(t) for t in self.targets]
delayed_store = da.store(
self.sources,
self.targets,
lock=self.lock,
compute=compute,
flush=True,
regions=self.regions,
)
self.sources = []
self.targets = []
self.regions = []
return delayed_store
class AbstractWritableDataStore(AbstractDataStore):
__slots__ = ()
def encode(self, variables, attributes):
"""
Encode the variables and attributes in this store
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
Returns
-------
variables : dict-like
attributes : dict-like
"""
variables = {k: self.encode_variable(v) for k, v in variables.items()}
attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}
return variables, attributes
def encode_variable(self, v):
"""encode one variable"""
return v
def encode_attribute(self, a):
"""encode one attribute"""
return a
def set_dimension(self, dim, length): # pragma: no cover
raise NotImplementedError()
def set_attribute(self, k, v): # pragma: no cover
raise NotImplementedError()
def set_variable(self, k, v): # pragma: no cover
raise NotImplementedError()
def store_dataset(self, dataset):
"""
in stores, variables are all variables AND coordinates
in xarray.Dataset variables are variables NOT coordinates,
so here we pass the whole dataset in instead of doing
dataset.variables
"""
self.store(dataset, dataset.attrs)
def store(
self,
variables,
attributes,
check_encoding_set=frozenset(),
writer=None,
unlimited_dims=None,
):
"""
Top level method for putting data on this store, this method:
- encodes variables/attributes
- sets dimensions
- sets variables
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
writer : ArrayWriter
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
if writer is None:
writer = ArrayWriter()
variables, attributes = self.encode(variables, attributes)
self.set_attributes(attributes)
self.set_dimensions(variables, unlimited_dims=unlimited_dims)
self.set_variables(
variables, check_encoding_set, writer, unlimited_dims=unlimited_dims
)
def set_attributes(self, attributes):
"""
This provides a centralized method to set the dataset attributes on the
data store.
Parameters
----------
attributes : dict-like
Dictionary of key/value (attribute name / attribute) pairs
"""
for k, v in attributes.items():
self.set_attribute(k, v)
def set_variables(self, variables, check_encoding_set, writer, unlimited_dims=None):
"""
This provides a centralized method to set the variables on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
check_encoding_set : list-like
List of variables that should be checked for invalid encoding
values
writer : ArrayWriter
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
for vn, v in variables.items():
name = _encode_variable_name(vn)
check = vn in check_encoding_set
target, source = self.prepare_variable(
name, v, check, unlimited_dims=unlimited_dims
)
writer.add(source, target)
def set_dimensions(self, variables, unlimited_dims=None):
"""
This provides a centralized method to set the dimensions on the data
store.
Parameters
----------
variables : dict-like
Dictionary of key/value (variable name / xr.Variable) pairs
unlimited_dims : list-like
List of dimension names that should be treated as unlimited
dimensions.
"""
if unlimited_dims is None:
unlimited_dims = set()
existing_dims = self.get_dimensions()
dims = {}
for v in unlimited_dims: # put unlimited_dims first
dims[v] = None
for v in variables.values():
dims.update(dict(zip(v.dims, v.shape)))
for dim, length in dims.items():
if dim in existing_dims and length != existing_dims[dim]:
raise ValueError(
"Unable to update size for existing dimension"
"%r (%d != %d)" % (dim, length, existing_dims[dim])
)
elif dim not in existing_dims:
is_unlimited = dim in unlimited_dims
self.set_dimension(dim, length, is_unlimited)
class WritableCFDataStore(AbstractWritableDataStore):
__slots__ = ()
def encode(self, variables, attributes):
# All NetCDF files get CF encoded by default, without this attempting
# to write times, for example, would fail.
variables, attributes = cf_encoder(variables, attributes)
variables = {k: self.encode_variable(v) for k, v in variables.items()}
attributes = {k: self.encode_attribute(v) for k, v in attributes.items()}
return variables, attributes
class BackendEntrypoint:
__slots__ = ("guess_can_open", "open_dataset", "open_dataset_parameters")
def __init__(self, open_dataset, open_dataset_parameters=None, guess_can_open=None):
self.open_dataset = open_dataset
self.open_dataset_parameters = open_dataset_parameters
self.guess_can_open = guess_can_open
|
import asyncio
import aiohttp
import homeassistant.components.rest_command as rc
from homeassistant.const import CONTENT_TYPE_JSON, CONTENT_TYPE_TEXT_PLAIN
from homeassistant.setup import setup_component
from tests.common import assert_setup_component, get_test_home_assistant
class TestRestCommandSetup:
"""Test the rest command component."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
self.config = {rc.DOMAIN: {"test_get": {"url": "http://example.com/"}}}
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_component(self):
"""Test setup component."""
with assert_setup_component(1):
setup_component(self.hass, rc.DOMAIN, self.config)
def test_setup_component_timeout(self):
"""Test setup component timeout."""
self.config[rc.DOMAIN]["test_get"]["timeout"] = 10
with assert_setup_component(1):
setup_component(self.hass, rc.DOMAIN, self.config)
def test_setup_component_test_service(self):
"""Test setup component and check if service exits."""
with assert_setup_component(1):
setup_component(self.hass, rc.DOMAIN, self.config)
assert self.hass.services.has_service(rc.DOMAIN, "test_get")
class TestRestCommandComponent:
"""Test the rest command component."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.url = "https://example.com/"
self.config = {
rc.DOMAIN: {
"get_test": {"url": self.url, "method": "get"},
"patch_test": {"url": self.url, "method": "patch"},
"post_test": {"url": self.url, "method": "post"},
"put_test": {"url": self.url, "method": "put"},
"delete_test": {"url": self.url, "method": "delete"},
}
}
self.hass = get_test_home_assistant()
def teardown_method(self):
"""Stop everything that was started."""
self.hass.stop()
def test_setup_tests(self):
"""Set up test config and test it."""
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
assert self.hass.services.has_service(rc.DOMAIN, "get_test")
assert self.hass.services.has_service(rc.DOMAIN, "post_test")
assert self.hass.services.has_service(rc.DOMAIN, "put_test")
assert self.hass.services.has_service(rc.DOMAIN, "delete_test")
def test_rest_command_timeout(self, aioclient_mock):
"""Call a rest command with timeout."""
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, exc=asyncio.TimeoutError())
self.hass.services.call(rc.DOMAIN, "get_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_aiohttp_error(self, aioclient_mock):
"""Call a rest command with aiohttp exception."""
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, exc=aiohttp.ClientError())
self.hass.services.call(rc.DOMAIN, "get_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_http_error(self, aioclient_mock):
"""Call a rest command with status code 400."""
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, status=400)
self.hass.services.call(rc.DOMAIN, "get_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_auth(self, aioclient_mock):
"""Call a rest command with auth credential."""
data = {"username": "test", "password": "123456"}
self.config[rc.DOMAIN]["get_test"].update(data)
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, content=b"success")
self.hass.services.call(rc.DOMAIN, "get_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_form_data(self, aioclient_mock):
"""Call a rest command with post form data."""
data = {"payload": "test"}
self.config[rc.DOMAIN]["post_test"].update(data)
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.post(self.url, content=b"success")
self.hass.services.call(rc.DOMAIN, "post_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == b"test"
def test_rest_command_get(self, aioclient_mock):
"""Call a rest command with get."""
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.get(self.url, content=b"success")
self.hass.services.call(rc.DOMAIN, "get_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_delete(self, aioclient_mock):
"""Call a rest command with delete."""
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.delete(self.url, content=b"success")
self.hass.services.call(rc.DOMAIN, "delete_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
def test_rest_command_patch(self, aioclient_mock):
"""Call a rest command with patch."""
data = {"payload": "data"}
self.config[rc.DOMAIN]["patch_test"].update(data)
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.patch(self.url, content=b"success")
self.hass.services.call(rc.DOMAIN, "patch_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == b"data"
def test_rest_command_post(self, aioclient_mock):
"""Call a rest command with post."""
data = {"payload": "data"}
self.config[rc.DOMAIN]["post_test"].update(data)
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.post(self.url, content=b"success")
self.hass.services.call(rc.DOMAIN, "post_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == b"data"
def test_rest_command_put(self, aioclient_mock):
"""Call a rest command with put."""
data = {"payload": "data"}
self.config[rc.DOMAIN]["put_test"].update(data)
with assert_setup_component(5):
setup_component(self.hass, rc.DOMAIN, self.config)
aioclient_mock.put(self.url, content=b"success")
self.hass.services.call(rc.DOMAIN, "put_test", {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == b"data"
def test_rest_command_headers(self, aioclient_mock):
"""Call a rest command with custom headers and content types."""
header_config_variations = {
rc.DOMAIN: {
"no_headers_test": {},
"content_type_test": {"content_type": CONTENT_TYPE_TEXT_PLAIN},
"headers_test": {
"headers": {
"Accept": CONTENT_TYPE_JSON,
"User-Agent": "Mozilla/5.0",
}
},
"headers_and_content_type_test": {
"headers": {"Accept": CONTENT_TYPE_JSON},
"content_type": CONTENT_TYPE_TEXT_PLAIN,
},
"headers_and_content_type_override_test": {
"headers": {
"Accept": CONTENT_TYPE_JSON,
aiohttp.hdrs.CONTENT_TYPE: "application/pdf",
},
"content_type": CONTENT_TYPE_TEXT_PLAIN,
},
"headers_template_test": {
"headers": {
"Accept": CONTENT_TYPE_JSON,
"User-Agent": "Mozilla/{{ 3 + 2 }}.0",
}
},
"headers_and_content_type_override_template_test": {
"headers": {
"Accept": "application/{{ 1 + 1 }}json",
aiohttp.hdrs.CONTENT_TYPE: "application/pdf",
},
"content_type": "text/json",
},
}
}
# add common parameters
for variation in header_config_variations[rc.DOMAIN].values():
variation.update(
{"url": self.url, "method": "post", "payload": "test data"}
)
with assert_setup_component(7):
setup_component(self.hass, rc.DOMAIN, header_config_variations)
# provide post request data
aioclient_mock.post(self.url, content=b"success")
for test_service in [
"no_headers_test",
"content_type_test",
"headers_test",
"headers_and_content_type_test",
"headers_and_content_type_override_test",
"headers_template_test",
"headers_and_content_type_override_template_test",
]:
self.hass.services.call(rc.DOMAIN, test_service, {})
self.hass.block_till_done()
assert len(aioclient_mock.mock_calls) == 7
# no_headers_test
assert aioclient_mock.mock_calls[0][3] is None
# content_type_test
assert len(aioclient_mock.mock_calls[1][3]) == 1
assert (
aioclient_mock.mock_calls[1][3].get(aiohttp.hdrs.CONTENT_TYPE)
== CONTENT_TYPE_TEXT_PLAIN
)
# headers_test
assert len(aioclient_mock.mock_calls[2][3]) == 2
assert aioclient_mock.mock_calls[2][3].get("Accept") == CONTENT_TYPE_JSON
assert aioclient_mock.mock_calls[2][3].get("User-Agent") == "Mozilla/5.0"
# headers_and_content_type_test
assert len(aioclient_mock.mock_calls[3][3]) == 2
assert (
aioclient_mock.mock_calls[3][3].get(aiohttp.hdrs.CONTENT_TYPE)
== CONTENT_TYPE_TEXT_PLAIN
)
assert aioclient_mock.mock_calls[3][3].get("Accept") == CONTENT_TYPE_JSON
# headers_and_content_type_override_test
assert len(aioclient_mock.mock_calls[4][3]) == 2
assert (
aioclient_mock.mock_calls[4][3].get(aiohttp.hdrs.CONTENT_TYPE)
== CONTENT_TYPE_TEXT_PLAIN
)
assert aioclient_mock.mock_calls[4][3].get("Accept") == CONTENT_TYPE_JSON
# headers_template_test
assert len(aioclient_mock.mock_calls[5][3]) == 2
assert aioclient_mock.mock_calls[5][3].get("Accept") == CONTENT_TYPE_JSON
assert aioclient_mock.mock_calls[5][3].get("User-Agent") == "Mozilla/5.0"
# headers_and_content_type_override_template_test
assert len(aioclient_mock.mock_calls[6][3]) == 2
assert (
aioclient_mock.mock_calls[6][3].get(aiohttp.hdrs.CONTENT_TYPE)
== "text/json"
)
assert aioclient_mock.mock_calls[6][3].get("Accept") == "application/2json"
|
import glob
import logging
import os
import threading
import pytest
import sys
import shutil
import yaml
from yandextank.core import TankCore
from yandextank.core.tankworker import parse_options, TankInfo
try:
from yatest import common
PATH = common.source_path('load/projects/yandex-tank/yandextank/core/tests')
TMPDIR = os.path.join(os.getcwd(), 'artifacts_dir')
except ImportError:
PATH = os.path.dirname(__file__)
TMPDIR = './'
logger = logging.getLogger('')
logger.setLevel(logging.DEBUG)
console_handler = logging.StreamHandler(sys.stdout)
fmt = logging.Formatter("%(asctime)s [%(levelname)s] %(name)s %(filename)s:%(lineno)d\t%(message)s")
console_handler.setLevel(logging.DEBUG)
console_handler.setFormatter(fmt)
logger.addHandler(console_handler)
def load_yaml(directory, filename):
with open(os.path.join(directory, filename), 'r') as f:
return yaml.load(f, Loader=yaml.FullLoader)
CFG1 = {
"version": "1.8.36",
"core": {
'operator': 'fomars',
'artifacts_base_dir': TMPDIR,
'artifacts_dir': TMPDIR
},
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 'test_monitoring.xml',
'disguise_hostnames': True
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'localhost',
'header_http': '1.1',
'uris': ['/'],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1, 10, 1m)'},
'phantom_path': './phantom_mock.sh',
'connection_test': False
},
'lunapark': {
'package': 'yandextank.plugins.DataUploader',
'enabled': True,
'api_address': 'https://lunapark.test.yandex-team.ru/',
'task': 'LOAD-204',
'ignore_target_lock': True,
}
}
CFG2 = {
"version": "1.8.36",
"core": {
'operator': 'fomars',
'artifacts_base_dir': TMPDIR,
'artifacts_dir': TMPDIR
},
'telegraf': {
'enabled': False,
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'lunapark.test.yandex-team.ru',
'header_http': '1.1',
'uris': ['/'],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1, 10, 1m)'},
'connection_test': False
},
'lunapark': {
'package': 'yandextank.plugins.DataUploader',
'enabled': True,
'api_address': 'https://lunapark.test.yandex-team.ru/',
'task': 'LOAD-204',
'ignore_target_lock': True,
},
'shellexec': {
'enabled': False
}
}
CFG_MULTI = load_yaml(PATH, 'test_multi_cfg.yaml')
original_working_dir = os.getcwd()
def setup_module(module):
os.chdir(PATH)
@pytest.mark.parametrize('config, expected', [
(CFG1,
{'plugin_telegraf', 'plugin_phantom', 'plugin_lunapark',
'plugin_rcheck', 'plugin_shellexec', 'plugin_autostop',
'plugin_console', 'plugin_rcassert', 'plugin_json_report',
}),
(CFG2,
{'plugin_phantom', 'plugin_lunapark', 'plugin_rcheck',
'plugin_autostop', 'plugin_console',
'plugin_rcassert', 'plugin_json_report',
})
])
def test_core_load_plugins(config, expected):
core = TankCore([load_yaml(os.path.join(PATH, '../config'), '00-base.yaml'), config],
threading.Event(), TankInfo({}))
core.load_plugins()
assert set(core.plugins.keys()) == expected
@pytest.mark.parametrize('config, expected', [
(CFG1, None)
])
def test_core_plugins_configure(config, expected):
core = TankCore([config], threading.Event(), TankInfo({}))
core.plugins_configure()
@pytest.mark.skip('disabled for travis')
@pytest.mark.parametrize('config, expected', [
(CFG1, None),
(CFG_MULTI, None)
])
def test_plugins_prepare_test(config, expected):
core = TankCore([config], threading.Event())
core.plugins_prepare_test()
@pytest.mark.skip('Not implemented')
def test_stpd_file():
raise NotImplementedError
@pytest.mark.skip('disabled for travis')
@pytest.mark.parametrize('config', [
CFG_MULTI,
])
def test_start_test(config):
core = TankCore(configs=[config])
core.plugins_prepare_test()
core.plugins_start_test()
core.plugins_end_test(1)
@pytest.mark.parametrize('options, expected', [
(
['meta.task=LOAD-204',
'phantom.ammofile = air-tickets-search-ammo.log',
'meta.component = air_tickets_search [imbalance]',
'meta.jenkinsjob = https://jenkins-load.yandex-team.ru/job/air_tickets_search/'],
[{'uploader': {'package': 'yandextank.plugins.DataUploader', 'task': 'LOAD-204'}},
{'phantom': {'package': 'yandextank.plugins.Phantom', 'ammofile': 'air-tickets-search-ammo.log'}},
{'uploader': {'package': 'yandextank.plugins.DataUploader', 'component': 'air_tickets_search [imbalance]'}},
{'uploader': {'package': 'yandextank.plugins.DataUploader',
'meta': {'jenkinsjob': 'https://jenkins-load.yandex-team.ru/job/air_tickets_search/'}}}]
),
# with converting/type-casting
(
['phantom.rps_schedule = line(10,100,10m)',
'phantom.instances=200',
'phantom.connection_test=0'],
[{'phantom': {'package': 'yandextank.plugins.Phantom', 'load_profile': {'load_type': 'rps', 'schedule': 'line(10,100,10m)'}}},
{'phantom': {'package': 'yandextank.plugins.Phantom', 'instances': 200}},
{'phantom': {'package': 'yandextank.plugins.Phantom', 'connection_test': 0}}]
)
])
def test_parse_options(options, expected):
assert parse_options(options) == expected
def teardown_module(module):
for pattern in ['monitoring_*.xml', 'agent_*', '*.log', '*.stpd_si.json', '*.stpd', '*.conf']:
for path in glob.glob(pattern):
os.remove(path)
try:
shutil.rmtree('logs/')
shutil.rmtree('lunapark/')
except OSError:
pass
global original_working_dir
os.chdir(original_working_dir)
def sort_schema_alphabetically(filename):
with open(filename, 'r') as f:
schema = yaml.load(f, Loader=yaml.FullLoader)
with open(filename, 'w') as f:
for key, value in sorted(schema.items()):
f.write(key + ':\n')
for k, v in sorted(value.items()):
f.write(' ' + k + ': ' + str(v).lower() + '\n')
|
import asyncio
from collections import namedtuple
from datetime import timedelta
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, HTTP_OK
from homeassistant.helpers.aiohttp_client import async_create_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_HOME_ID = "home_id"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_HOME_ID): cv.string,
}
)
def get_scanner(hass, config):
"""Return a Tado scanner."""
scanner = TadoDeviceScanner(hass, config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple("Device", ["mac", "name"])
class TadoDeviceScanner(DeviceScanner):
"""This class gets geofenced devices from Tado."""
def __init__(self, hass, config):
"""Initialize the scanner."""
self.hass = hass
self.last_results = []
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
# The Tado device tracker can work with or without a home_id
self.home_id = config[CONF_HOME_ID] if CONF_HOME_ID in config else None
# If there's a home_id, we need a different API URL
if self.home_id is None:
self.tadoapiurl = "https://my.tado.com/api/v2/me"
else:
self.tadoapiurl = "https://my.tado.com/api/v2/homes/{home_id}/mobileDevices"
# The API URL always needs a username and password
self.tadoapiurl += "?username={username}&password={password}"
self.websession = None
self.success_init = asyncio.run_coroutine_threadsafe(
self._async_update_info(), hass.loop
).result()
_LOGGER.info("Scanner initialized")
async def async_scan_devices(self):
"""Scan for devices and return a list containing found device ids."""
await self._async_update_info()
return [device.mac for device in self.last_results]
async def async_get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [
result.name for result in self.last_results if result.mac == device
]
if filter_named:
return filter_named[0]
return None
@Throttle(MIN_TIME_BETWEEN_SCANS)
async def _async_update_info(self):
"""
Query Tado for device marked as at home.
Returns boolean if scanning successful.
"""
_LOGGER.debug("Requesting Tado")
if self.websession is None:
self.websession = async_create_clientsession(
self.hass, cookie_jar=aiohttp.CookieJar(unsafe=True)
)
last_results = []
try:
with async_timeout.timeout(10):
# Format the URL here, so we can log the template URL if
# anything goes wrong without exposing username and password.
url = self.tadoapiurl.format(
home_id=self.home_id, username=self.username, password=self.password
)
response = await self.websession.get(url)
if response.status != HTTP_OK:
_LOGGER.warning("Error %d on %s", response.status, self.tadoapiurl)
return False
tado_json = await response.json()
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Cannot load Tado data")
return False
# Without a home_id, we fetched an URL where the mobile devices can be
# found under the mobileDevices key.
if "mobileDevices" in tado_json:
tado_json = tado_json["mobileDevices"]
# Find devices that have geofencing enabled, and are currently at home.
for mobile_device in tado_json:
if mobile_device.get("location"):
if mobile_device["location"]["atHome"]:
device_id = mobile_device["id"]
device_name = mobile_device["name"]
last_results.append(Device(device_id, device_name))
self.last_results = last_results
_LOGGER.debug(
"Tado presence query successful, %d device(s) at home",
len(self.last_results),
)
return True
|
from httpcore import ConnectError
from wolf_smartset.models import Device
from wolf_smartset.token_auth import InvalidAuth
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.wolflink.const import (
DEVICE_GATEWAY,
DEVICE_ID,
DEVICE_NAME,
DOMAIN,
)
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
CONFIG = {
DEVICE_NAME: "test-device",
DEVICE_ID: 1234,
DEVICE_GATEWAY: 5678,
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
}
INPUT_CONFIG = {
CONF_USERNAME: CONFIG[CONF_USERNAME],
CONF_PASSWORD: CONFIG[CONF_PASSWORD],
}
DEVICE = Device(CONFIG[DEVICE_ID], CONFIG[DEVICE_GATEWAY], CONFIG[DEVICE_NAME])
async def test_show_form(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_device_step_form(hass):
"""Test we get the second step of config."""
with patch(
"homeassistant.components.wolflink.config_flow.WolfClient.fetch_system_list",
return_value=[DEVICE],
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=INPUT_CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "device"
async def test_create_entry(hass):
"""Test entity creation from device step."""
with patch(
"homeassistant.components.wolflink.config_flow.WolfClient.fetch_system_list",
return_value=[DEVICE],
), patch("homeassistant.components.wolflink.async_setup_entry", return_value=True):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=INPUT_CONFIG
)
result_create_entry = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_name": CONFIG[DEVICE_NAME]},
)
assert result_create_entry["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result_create_entry["title"] == CONFIG[DEVICE_NAME]
assert result_create_entry["data"] == CONFIG
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
with patch(
"homeassistant.components.wolflink.config_flow.WolfClient.fetch_system_list",
side_effect=InvalidAuth,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=INPUT_CONFIG
)
assert result["type"] == "form"
assert result["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
with patch(
"homeassistant.components.wolflink.config_flow.WolfClient.fetch_system_list",
side_effect=ConnectError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=INPUT_CONFIG
)
assert result["type"] == "form"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_exception(hass):
"""Test we handle cannot connect error."""
with patch(
"homeassistant.components.wolflink.config_flow.WolfClient.fetch_system_list",
side_effect=Exception,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=INPUT_CONFIG
)
assert result["type"] == "form"
assert result["errors"] == {"base": "unknown"}
async def test_already_configured_error(hass):
"""Test already configured while creating entry."""
with patch(
"homeassistant.components.wolflink.config_flow.WolfClient.fetch_system_list",
return_value=[DEVICE],
), patch("homeassistant.components.wolflink.async_setup_entry", return_value=True):
MockConfigEntry(
domain=DOMAIN, unique_id=CONFIG[DEVICE_ID], data=CONFIG
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=INPUT_CONFIG
)
result_create_entry = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"device_name": CONFIG[DEVICE_NAME]},
)
assert result_create_entry["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result_create_entry["reason"] == "already_configured"
|
import logging
from bluepy.btle import BTLEException # pylint: disable=import-error, no-name-in-module
import eq3bt as eq3 # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_BOOST,
PRESET_NONE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_DEVICES,
CONF_MAC,
PRECISION_HALVES,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
STATE_BOOST = "boost"
ATTR_STATE_WINDOW_OPEN = "window_open"
ATTR_STATE_VALVE = "valve"
ATTR_STATE_LOCKED = "is_locked"
ATTR_STATE_LOW_BAT = "low_battery"
ATTR_STATE_AWAY_END = "away_end"
EQ_TO_HA_HVAC = {
eq3.Mode.Open: HVAC_MODE_HEAT,
eq3.Mode.Closed: HVAC_MODE_OFF,
eq3.Mode.Auto: HVAC_MODE_AUTO,
eq3.Mode.Manual: HVAC_MODE_HEAT,
eq3.Mode.Boost: HVAC_MODE_AUTO,
eq3.Mode.Away: HVAC_MODE_HEAT,
}
HA_TO_EQ_HVAC = {
HVAC_MODE_HEAT: eq3.Mode.Manual,
HVAC_MODE_OFF: eq3.Mode.Closed,
HVAC_MODE_AUTO: eq3.Mode.Auto,
}
EQ_TO_HA_PRESET = {eq3.Mode.Boost: PRESET_BOOST, eq3.Mode.Away: PRESET_AWAY}
HA_TO_EQ_PRESET = {PRESET_BOOST: eq3.Mode.Boost, PRESET_AWAY: eq3.Mode.Away}
DEVICE_SCHEMA = vol.Schema({vol.Required(CONF_MAC): cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_DEVICES): vol.Schema({cv.string: DEVICE_SCHEMA})}
)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the eQ-3 BLE thermostats."""
devices = []
for name, device_cfg in config[CONF_DEVICES].items():
mac = device_cfg[CONF_MAC]
devices.append(EQ3BTSmartThermostat(mac, name))
add_entities(devices, True)
class EQ3BTSmartThermostat(ClimateEntity):
"""Representation of an eQ-3 Bluetooth Smart thermostat."""
def __init__(self, _mac, _name):
"""Initialize the thermostat."""
# We want to avoid name clash with this module.
self._name = _name
self._thermostat = eq3.Thermostat(_mac)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def available(self) -> bool:
"""Return if thermostat is available."""
return self._thermostat.mode >= 0
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement that is used."""
return TEMP_CELSIUS
@property
def precision(self):
"""Return eq3bt's precision 0.5."""
return PRECISION_HALVES
@property
def current_temperature(self):
"""Can not report temperature, so return target_temperature."""
return self.target_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._thermostat.target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
self._thermostat.target_temperature = temperature
@property
def hvac_mode(self):
"""Return the current operation mode."""
if self._thermostat.mode < 0:
return HVAC_MODE_OFF
return EQ_TO_HA_HVAC[self._thermostat.mode]
@property
def hvac_modes(self):
"""Return the list of available operation modes."""
return list(HA_TO_EQ_HVAC)
def set_hvac_mode(self, hvac_mode):
"""Set operation mode."""
if self.preset_mode:
return
self._thermostat.mode = HA_TO_EQ_HVAC[hvac_mode]
@property
def min_temp(self):
"""Return the minimum temperature."""
return self._thermostat.min_temp
@property
def max_temp(self):
"""Return the maximum temperature."""
return self._thermostat.max_temp
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
dev_specific = {
ATTR_STATE_AWAY_END: self._thermostat.away_end,
ATTR_STATE_LOCKED: self._thermostat.locked,
ATTR_STATE_LOW_BAT: self._thermostat.low_battery,
ATTR_STATE_VALVE: self._thermostat.valve_state,
ATTR_STATE_WINDOW_OPEN: self._thermostat.window_open,
}
return dev_specific
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp.
Requires SUPPORT_PRESET_MODE.
"""
return EQ_TO_HA_PRESET.get(self._thermostat.mode)
@property
def preset_modes(self):
"""Return a list of available preset modes.
Requires SUPPORT_PRESET_MODE.
"""
return list(HA_TO_EQ_PRESET)
def set_preset_mode(self, preset_mode):
"""Set new preset mode."""
if preset_mode == PRESET_NONE:
self.set_hvac_mode(HVAC_MODE_HEAT)
self._thermostat.mode = HA_TO_EQ_PRESET[preset_mode]
def update(self):
"""Update the data from the thermostat."""
try:
self._thermostat.update()
except BTLEException as ex:
_LOGGER.warning("Updating the state failed: %s", ex)
|
import math
from gi.repository import Gdk, Gtk
from meld.settings import get_meld_settings
from meld.style import get_common_theme
# Rounded rectangle corner radius for culled changes display
RADIUS = 3
class LinkMap(Gtk.DrawingArea):
__gtype_name__ = "LinkMap"
def __init__(self):
self.filediff = None
self.views = []
def associate(self, filediff, left_view, right_view):
self.filediff = filediff
self.views = [left_view, right_view]
if self.get_direction() == Gtk.TextDirection.RTL:
self.views.reverse()
self.view_indices = [filediff.textview.index(t) for t in self.views]
meld_settings = get_meld_settings()
self.on_setting_changed(meld_settings, 'style-scheme')
meld_settings.connect('changed', self.on_setting_changed)
def on_setting_changed(self, settings, key):
if key == 'style-scheme':
self.fill_colors, self.line_colors = get_common_theme()
def do_draw(self, context):
if not self.views:
return
pix_start = [t.get_visible_rect().y for t in self.views]
y_offset = [
t.translate_coordinates(self, 0, 0)[1] + 1 for t in self.views]
clip_y = min(y_offset) - 1
clip_height = max(t.get_visible_rect().height for t in self.views) + 2
allocation = self.get_allocation()
stylecontext = self.get_style_context()
Gtk.render_background(
stylecontext, context, 0, clip_y, allocation.width, clip_height)
context.set_line_width(1.0)
height = allocation.height
visible = [
self.views[0].get_line_num_for_y(pix_start[0]),
self.views[0].get_line_num_for_y(pix_start[0] + height),
self.views[1].get_line_num_for_y(pix_start[1]),
self.views[1].get_line_num_for_y(pix_start[1] + height),
]
# For bezier control points
x_steps = [-0.5, allocation.width / 2, allocation.width + 0.5]
q_rad = math.pi / 2
left, right = self.view_indices
def view_offset_line(view_idx, line_num):
line_start = self.views[view_idx].get_y_for_line_num(line_num)
return line_start - pix_start[view_idx] + y_offset[view_idx]
for c in self.filediff.linediffer.pair_changes(left, right, visible):
# f and t are short for "from" and "to"
f0, f1 = [view_offset_line(0, l) for l in c[1:3]]
t0, t1 = [view_offset_line(1, l) for l in c[3:5]]
# We want the last pixel of the previous line
f1 = f1 if f1 == f0 else f1 - 1
t1 = t1 if t1 == t0 else t1 - 1
# If either endpoint is completely off-screen, we cull for clarity
if (t0 < 0 and t1 < 0) or (t0 > height and t1 > height):
if f0 == f1:
continue
context.arc(
x_steps[0], f0 - 0.5 + RADIUS, RADIUS, q_rad * 3, 0)
context.arc(x_steps[0], f1 - 0.5 - RADIUS, RADIUS, 0, q_rad)
context.close_path()
elif (f0 < 0 and f1 < 0) or (f0 > height and f1 > height):
if t0 == t1:
continue
context.arc_negative(x_steps[2], t0 - 0.5 + RADIUS, RADIUS,
q_rad * 3, q_rad * 2)
context.arc_negative(x_steps[2], t1 - 0.5 - RADIUS, RADIUS,
q_rad * 2, q_rad)
context.close_path()
else:
context.move_to(x_steps[0], f0 - 0.5)
context.curve_to(x_steps[1], f0 - 0.5,
x_steps[1], t0 - 0.5,
x_steps[2], t0 - 0.5)
context.line_to(x_steps[2], t1 - 0.5)
context.curve_to(x_steps[1], t1 - 0.5,
x_steps[1], f1 - 0.5,
x_steps[0], f1 - 0.5)
context.close_path()
Gdk.cairo_set_source_rgba(context, self.fill_colors[c[0]])
context.fill_preserve()
chunk_idx = self.filediff.linediffer.locate_chunk(left, c[1])[0]
if chunk_idx == self.filediff.cursor.chunk:
highlight = self.fill_colors['current-chunk-highlight']
Gdk.cairo_set_source_rgba(context, highlight)
context.fill_preserve()
Gdk.cairo_set_source_rgba(context, self.line_colors[c[0]])
context.stroke()
LinkMap.set_css_name("link-map")
class ScrollLinkMap(Gtk.DrawingArea):
__gtype_name__ = "ScrollLinkMap"
|
import json
from homeassistant.components.accuweather.const import DOMAIN
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
async def init_integration(
hass, forecast=False, unsupported_icon=False
) -> MockConfigEntry:
"""Set up the AccuWeather integration in Home Assistant."""
options = {}
if forecast:
options["forecast"] = True
entry = MockConfigEntry(
domain=DOMAIN,
title="Home",
unique_id="0123456",
data={
"api_key": "32-character-string-1234567890qw",
"latitude": 55.55,
"longitude": 122.12,
"name": "Home",
},
options=options,
)
current = json.loads(load_fixture("accuweather/current_conditions_data.json"))
forecast = json.loads(load_fixture("accuweather/forecast_data.json"))
if unsupported_icon:
current["WeatherIcon"] = 999
with patch(
"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions",
return_value=current,
), patch(
"homeassistant.components.accuweather.AccuWeather.async_get_forecast",
return_value=forecast,
):
entry.add_to_hass(hass)
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
import numpy as np
class TestBaseTopicModel:
def test_print_topic(self):
topics = self.model.show_topics(formatted=True)
for topic_no, topic in topics:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(topic, str))
def test_print_topics(self):
topics = self.model.print_topics()
for topic_no, topic in topics:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(topic, str))
def test_show_topic(self):
topic = self.model.show_topic(1)
for k, v in topic:
self.assertTrue(isinstance(k, str))
self.assertTrue(isinstance(v, (np.floating, float)))
def test_show_topics(self):
topics = self.model.show_topics(formatted=False)
for topic_no, topic in topics:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(isinstance(topic, list))
for k, v in topic:
self.assertTrue(isinstance(k, str))
self.assertTrue(isinstance(v, (np.floating, float)))
def test_get_topics(self):
topics = self.model.get_topics()
vocab_size = len(self.model.id2word)
for topic in topics:
self.assertTrue(isinstance(topic, np.ndarray))
# Note: started moving to np.float32 as default
# self.assertEqual(topic.dtype, np.float64)
self.assertEqual(vocab_size, topic.shape[0])
self.assertAlmostEqual(np.sum(topic), 1.0, 5)
|
import numpy as np
from scattertext import Corpus
from scattertext.TermDocMatrixFromScikit import TermDocMatrixFromScikit
class CorpusFromScikit(TermDocMatrixFromScikit):
'''
Tie-in to incorporate sckit-learn's various vectorizers into Scattertext
>>> from sklearn.datasets import fetch_20newsgroups
>>> from sklearn.feature_extraction.text import CountVectorizer
>>> from scattertext.CorpusFromScikit import CorpusFromScikit
>>> newsgroups_train = fetch_20newsgroups(subset='train', remove=('headers', 'footers', 'quotes'))
>>> count_vectorizer = CountVectorizer()
>>> X_counts = count_vectorizer.fit_transform(newsgroups_train.data)
>>> corpus = CorpusFromScikit(
... X=X_counts,
... y=newsgroups_train.target,
... feature_vocabulary=count_vectorizer.vocabulary_,
... category_names=newsgroups_train.target_names,
... raw_texts=newsgroups_train.data
... ).build()
'''
def __init__(self,
X,
y,
feature_vocabulary,
category_names,
raw_texts,
unigram_frequency_path=None):
'''
Parameters
----------
X: sparse matrix integer, giving term-document-matrix counts
y: list, integer categories
feature_vocabulary: dict (feat_name -> idx)
category_names: list of category names (len of y)
raw_texts: array-like of raw texts
unigram_frequency_path: str (see TermDocMatrix)
'''
TermDocMatrixFromScikit.__init__(self, X, y, feature_vocabulary,
category_names, unigram_frequency_path)
self.raw_texts = raw_texts
def build(self):
'''
Returns
-------
Corpus
'''
constructor_kwargs = self._get_build_kwargs()
if type(self.raw_texts) == list:
constructor_kwargs['raw_texts'] = np.array(self.raw_texts)
else:
constructor_kwargs['raw_texts'] = self.raw_texts
return Corpus(**constructor_kwargs)
|
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_FRIENDLY_NAME,
CONF_DEVICE_CLASS,
CONF_ENTITY_PICTURE_TEMPLATE,
CONF_ICON_TEMPLATE,
CONF_SENSORS,
CONF_UNIQUE_ID,
CONF_VALUE_TEMPLATE,
)
from homeassistant.core import callback
from homeassistant.exceptions import TemplateError
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.helpers.event import async_call_later
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.template import result_as_boolean
from .const import CONF_AVAILABILITY_TEMPLATE, DOMAIN, PLATFORMS
from .template_entity import TemplateEntity
CONF_DELAY_ON = "delay_on"
CONF_DELAY_OFF = "delay_off"
CONF_ATTRIBUTE_TEMPLATES = "attribute_templates"
SENSOR_SCHEMA = vol.All(
cv.deprecated(ATTR_ENTITY_ID),
vol.Schema(
{
vol.Required(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_ICON_TEMPLATE): cv.template,
vol.Optional(CONF_ENTITY_PICTURE_TEMPLATE): cv.template,
vol.Optional(CONF_AVAILABILITY_TEMPLATE): cv.template,
vol.Optional(CONF_ATTRIBUTE_TEMPLATES): vol.Schema(
{cv.string: cv.template}
),
vol.Optional(ATTR_FRIENDLY_NAME): cv.string,
vol.Optional(ATTR_ENTITY_ID): cv.entity_ids,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
vol.Optional(CONF_DELAY_ON): cv.positive_time_period,
vol.Optional(CONF_DELAY_OFF): cv.positive_time_period,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
),
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_SENSORS): cv.schema_with_slug_keys(SENSOR_SCHEMA)}
)
async def _async_create_entities(hass, config):
"""Create the template binary sensors."""
sensors = []
for device, device_config in config[CONF_SENSORS].items():
value_template = device_config[CONF_VALUE_TEMPLATE]
icon_template = device_config.get(CONF_ICON_TEMPLATE)
entity_picture_template = device_config.get(CONF_ENTITY_PICTURE_TEMPLATE)
availability_template = device_config.get(CONF_AVAILABILITY_TEMPLATE)
attribute_templates = device_config.get(CONF_ATTRIBUTE_TEMPLATES, {})
friendly_name = device_config.get(ATTR_FRIENDLY_NAME, device)
device_class = device_config.get(CONF_DEVICE_CLASS)
delay_on = device_config.get(CONF_DELAY_ON)
delay_off = device_config.get(CONF_DELAY_OFF)
unique_id = device_config.get(CONF_UNIQUE_ID)
sensors.append(
BinarySensorTemplate(
hass,
device,
friendly_name,
device_class,
value_template,
icon_template,
entity_picture_template,
availability_template,
delay_on,
delay_off,
attribute_templates,
unique_id,
)
)
return sensors
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template binary sensors."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
async_add_entities(await _async_create_entities(hass, config))
class BinarySensorTemplate(TemplateEntity, BinarySensorEntity):
"""A virtual binary sensor that triggers from another sensor."""
def __init__(
self,
hass,
device,
friendly_name,
device_class,
value_template,
icon_template,
entity_picture_template,
availability_template,
delay_on,
delay_off,
attribute_templates,
unique_id,
):
"""Initialize the Template binary sensor."""
super().__init__(
attribute_templates=attribute_templates,
availability_template=availability_template,
icon_template=icon_template,
entity_picture_template=entity_picture_template,
)
self.entity_id = async_generate_entity_id(ENTITY_ID_FORMAT, device, hass=hass)
self._name = friendly_name
self._device_class = device_class
self._template = value_template
self._state = None
self._delay_cancel = None
self._delay_on = delay_on
self._delay_off = delay_off
self._unique_id = unique_id
async def async_added_to_hass(self):
"""Register callbacks."""
self.add_template_attribute("_state", self._template, None, self._update_state)
await super().async_added_to_hass()
@callback
def _update_state(self, result):
super()._update_state(result)
if self._delay_cancel:
self._delay_cancel()
self._delay_cancel = None
state = None if isinstance(result, TemplateError) else result_as_boolean(result)
if state == self._state:
return
# state without delay
if (
state is None
or (state and not self._delay_on)
or (not state and not self._delay_off)
):
self._state = state
return
@callback
def _set_state(_):
"""Set state of template binary sensor."""
self._state = state
self.async_write_ha_state()
delay = (self._delay_on if state else self._delay_off).seconds
# state with delay. Cancelled if template result changes.
self._delay_cancel = async_call_later(self.hass, delay, _set_state)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique id of this binary sensor."""
return self._unique_id
@property
def is_on(self):
"""Return true if sensor is on."""
return self._state
@property
def device_class(self):
"""Return the sensor class of the binary sensor."""
return self._device_class
|
from abc import ABC
import asyncio
from datetime import datetime, timedelta
import functools as ft
import logging
from timeit import default_timer as timer
from typing import Any, Awaitable, Dict, Iterable, List, Optional
from homeassistant.config import DATA_CUSTOMIZE
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_PICTURE,
ATTR_FRIENDLY_NAME,
ATTR_ICON,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_DEFAULT_NAME,
STATE_OFF,
STATE_ON,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import CALLBACK_TYPE, Context, HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError, NoEntitySpecifiedError
from homeassistant.helpers.entity_platform import EntityPlatform
from homeassistant.helpers.entity_registry import RegistryEntry
from homeassistant.helpers.event import Event, async_track_entity_registry_updated_event
from homeassistant.helpers.typing import StateType
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util, ensure_unique_string, slugify
_LOGGER = logging.getLogger(__name__)
SLOW_UPDATE_WARNING = 10
DATA_ENTITY_SOURCE = "entity_info"
SOURCE_CONFIG_ENTRY = "config_entry"
SOURCE_PLATFORM_CONFIG = "platform_config"
@callback
@bind_hass
def entity_sources(hass: HomeAssistant) -> Dict[str, Dict[str, str]]:
"""Get the entity sources."""
return hass.data.get(DATA_ENTITY_SOURCE, {})
def generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[List[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
return async_generate_entity_id(entity_id_format, name, current_ids, hass)
@callback
def async_generate_entity_id(
entity_id_format: str,
name: Optional[str],
current_ids: Optional[Iterable[str]] = None,
hass: Optional[HomeAssistant] = None,
) -> str:
"""Generate a unique entity ID based on given entity IDs or used IDs."""
name = (name or DEVICE_DEFAULT_NAME).lower()
preferred_string = entity_id_format.format(slugify(name))
if current_ids is not None:
return ensure_unique_string(preferred_string, current_ids)
if hass is None:
raise ValueError("Missing required parameter current_ids or hass")
test_string = preferred_string
tries = 1
while not hass.states.async_available(test_string):
tries += 1
test_string = f"{preferred_string}_{tries}"
return test_string
class Entity(ABC):
"""An abstract class for Home Assistant entities."""
# SAFE TO OVERWRITE
# The properties and methods here are safe to overwrite when inheriting
# this class. These may be used to customize the behavior of the entity.
entity_id = None # type: str
# Owning hass instance. Will be set by EntityPlatform
hass: Optional[HomeAssistant] = None
# Owning platform instance. Will be set by EntityPlatform
platform: Optional[EntityPlatform] = None
# If we reported if this entity was slow
_slow_reported = False
# If we reported this entity is updated while disabled
_disabled_reported = False
# Protect for multiple updates
_update_staged = False
# Process updates in parallel
parallel_updates: Optional[asyncio.Semaphore] = None
# Entry in the entity registry
registry_entry: Optional[RegistryEntry] = None
# Hold list for functions to call on remove.
_on_remove: Optional[List[CALLBACK_TYPE]] = None
# Context
_context: Optional[Context] = None
_context_set: Optional[datetime] = None
# If entity is added to an entity platform
_added = False
@property
def should_poll(self) -> bool:
"""Return True if entity has to be polled for state.
False if entity pushes its state to HA.
"""
return True
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return None
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return None
@property
def state(self) -> StateType:
"""Return the state of the entity."""
return STATE_UNKNOWN
@property
def capability_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the capability attributes.
Attributes that explain the capabilities of an entity.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes.
Implemented by component base class. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return device specific state attributes.
Implemented by platform classes. Convention for attribute names
is lowercase snake_case.
"""
return None
@property
def device_info(self) -> Optional[Dict[str, Any]]:
"""Return device specific attributes.
Implemented by platform classes.
"""
return None
@property
def device_class(self) -> Optional[str]:
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
@property
def unit_of_measurement(self) -> Optional[str]:
"""Return the unit of measurement of this entity, if any."""
return None
@property
def icon(self) -> Optional[str]:
"""Return the icon to use in the frontend, if any."""
return None
@property
def entity_picture(self) -> Optional[str]:
"""Return the entity picture to use in the frontend, if any."""
return None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return True
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return False
@property
def force_update(self) -> bool:
"""Return True if state updates should be forced.
If True, a state change will be triggered anytime the state property is
updated, not just when the value changes.
"""
return False
@property
def supported_features(self) -> Optional[int]:
"""Flag supported features."""
return None
@property
def context_recent_time(self) -> timedelta:
"""Time that a context is considered recent."""
return timedelta(seconds=5)
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return True
# DO NOT OVERWRITE
# These properties and methods are either managed by Home Assistant or they
# are used to perform a very specific function. Overwriting these may
# produce undesirable effects in the entity's operation.
@property
def enabled(self) -> bool:
"""Return if the entity is enabled in the entity registry.
If an entity is not part of the registry, it cannot be disabled
and will therefore always be enabled.
"""
return self.registry_entry is None or not self.registry_entry.disabled
@callback
def async_set_context(self, context: Context) -> None:
"""Set the context the entity currently operates under."""
self._context = context
self._context_set = dt_util.utcnow()
async def async_update_ha_state(self, force_refresh: bool = False) -> None:
"""Update Home Assistant with current state of entity.
If force_refresh == True will update entity before setting state.
This method must be run in the event loop.
"""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
# update entity data
if force_refresh:
try:
await self.async_device_update()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Update for %s fails", self.entity_id)
return
self._async_write_ha_state()
@callback
def async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.hass is None:
raise RuntimeError(f"Attribute hass is None for {self}")
if self.entity_id is None:
raise NoEntitySpecifiedError(
f"No entity id specified for entity {self.name}"
)
self._async_write_ha_state()
@callback
def _async_write_ha_state(self) -> None:
"""Write the state to the state machine."""
if self.registry_entry and self.registry_entry.disabled_by:
if not self._disabled_reported:
self._disabled_reported = True
assert self.platform is not None
_LOGGER.warning(
"Entity %s is incorrectly being triggered for updates while it is disabled. This is a bug in the %s integration",
self.entity_id,
self.platform.platform_name,
)
return
start = timer()
attr = self.capability_attributes
attr = dict(attr) if attr else {}
if not self.available:
state = STATE_UNAVAILABLE
else:
sstate = self.state
state = STATE_UNKNOWN if sstate is None else str(sstate)
attr.update(self.state_attributes or {})
attr.update(self.device_state_attributes or {})
unit_of_measurement = self.unit_of_measurement
if unit_of_measurement is not None:
attr[ATTR_UNIT_OF_MEASUREMENT] = unit_of_measurement
entry = self.registry_entry
# pylint: disable=consider-using-ternary
name = (entry and entry.name) or self.name
if name is not None:
attr[ATTR_FRIENDLY_NAME] = name
icon = (entry and entry.icon) or self.icon
if icon is not None:
attr[ATTR_ICON] = icon
entity_picture = self.entity_picture
if entity_picture is not None:
attr[ATTR_ENTITY_PICTURE] = entity_picture
assumed_state = self.assumed_state
if assumed_state:
attr[ATTR_ASSUMED_STATE] = assumed_state
supported_features = self.supported_features
if supported_features is not None:
attr[ATTR_SUPPORTED_FEATURES] = supported_features
device_class = self.device_class
if device_class is not None:
attr[ATTR_DEVICE_CLASS] = str(device_class)
end = timer()
if end - start > 0.4 and not self._slow_reported:
self._slow_reported = True
extra = ""
if "custom_components" in type(self).__module__:
extra = "Please report it to the custom component author."
else:
extra = (
"Please create a bug report at "
"https://github.com/home-assistant/core/issues?q=is%3Aopen+is%3Aissue"
)
if self.platform:
extra += (
f"+label%3A%22integration%3A+{self.platform.platform_name}%22"
)
_LOGGER.warning(
"Updating state for %s (%s) took %.3f seconds. %s",
self.entity_id,
type(self),
end - start,
extra,
)
# Overwrite properties that have been set in the config file.
assert self.hass is not None
if DATA_CUSTOMIZE in self.hass.data:
attr.update(self.hass.data[DATA_CUSTOMIZE].get(self.entity_id))
# Convert temperature if we detect one
try:
unit_of_measure = attr.get(ATTR_UNIT_OF_MEASUREMENT)
units = self.hass.config.units
if (
unit_of_measure in (TEMP_CELSIUS, TEMP_FAHRENHEIT)
and unit_of_measure != units.temperature_unit
):
prec = len(state) - state.index(".") - 1 if "." in state else 0
temp = units.temperature(float(state), unit_of_measure)
state = str(round(temp) if prec == 0 else round(temp, prec))
attr[ATTR_UNIT_OF_MEASUREMENT] = units.temperature_unit
except ValueError:
# Could not convert state to float
pass
if (
self._context_set is not None
and dt_util.utcnow() - self._context_set > self.context_recent_time
):
self._context = None
self._context_set = None
self.hass.states.async_set(
self.entity_id, state, attr, self.force_update, self._context
)
def schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
assert self.hass is not None
self.hass.add_job(self.async_update_ha_state(force_refresh)) # type: ignore
@callback
def async_schedule_update_ha_state(self, force_refresh: bool = False) -> None:
"""Schedule an update ha state change task.
This method must be run in the event loop.
Scheduling the update avoids executor deadlocks.
Entity state and attributes are read when the update ha state change
task is executed.
If state is changed more than once before the ha state change task has
been executed, the intermediate state transitions will be missed.
"""
if force_refresh:
assert self.hass is not None
self.hass.async_create_task(self.async_update_ha_state(force_refresh))
else:
self.async_write_ha_state()
async def async_device_update(self, warning: bool = True) -> None:
"""Process 'update' or 'async_update' from entity.
This method is a coroutine.
"""
if self._update_staged:
return
self._update_staged = True
# Process update sequential
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
# pylint: disable=no-member
if hasattr(self, "async_update"):
task = self.hass.async_create_task(self.async_update()) # type: ignore
elif hasattr(self, "update"):
task = self.hass.async_add_executor_job(self.update) # type: ignore
else:
return
if not warning:
await task
return
finished, _ = await asyncio.wait([task], timeout=SLOW_UPDATE_WARNING)
for done in finished:
exc = done.exception()
if exc:
raise exc
return
_LOGGER.warning(
"Update of %s is taking over %s seconds",
self.entity_id,
SLOW_UPDATE_WARNING,
)
await task
finally:
self._update_staged = False
if self.parallel_updates:
self.parallel_updates.release()
@callback
def async_on_remove(self, func: CALLBACK_TYPE) -> None:
"""Add a function to call when entity removed."""
if self._on_remove is None:
self._on_remove = []
self._on_remove.append(func)
async def async_removed_from_registry(self) -> None:
"""Run when entity has been removed from entity registry.
To be extended by integrations.
"""
@callback
def add_to_platform_start(
self,
hass: HomeAssistant,
platform: EntityPlatform,
parallel_updates: Optional[asyncio.Semaphore],
) -> None:
"""Start adding an entity to a platform."""
if self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} cannot be added a second time to an entity platform"
)
self.hass = hass
self.platform = platform
self.parallel_updates = parallel_updates
self._added = True
@callback
def add_to_platform_abort(self) -> None:
"""Abort adding an entity to a platform."""
self.hass = None
self.platform = None
self.parallel_updates = None
self._added = False
async def add_to_platform_finish(self) -> None:
"""Finish adding an entity to a platform."""
await self.async_internal_added_to_hass()
await self.async_added_to_hass()
self.async_write_ha_state()
async def async_remove(self) -> None:
"""Remove entity from Home Assistant."""
assert self.hass is not None
if self.platform and not self._added:
raise HomeAssistantError(
f"Entity {self.entity_id} async_remove called twice"
)
self._added = False
if self._on_remove is not None:
while self._on_remove:
self._on_remove.pop()()
await self.async_internal_will_remove_from_hass()
await self.async_will_remove_from_hass()
self.hass.states.async_remove(self.entity_id, context=self._context)
async def async_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
To be extended by integrations.
"""
async def async_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
To be extended by integrations.
"""
async def async_internal_added_to_hass(self) -> None:
"""Run when entity about to be added to hass.
Not to be extended by integrations.
"""
assert self.hass is not None
if self.platform:
info = {"domain": self.platform.platform_name}
if self.platform.config_entry:
info["source"] = SOURCE_CONFIG_ENTRY
info["config_entry"] = self.platform.config_entry.entry_id
else:
info["source"] = SOURCE_PLATFORM_CONFIG
self.hass.data.setdefault(DATA_ENTITY_SOURCE, {})[self.entity_id] = info
if self.registry_entry is not None:
# This is an assert as it should never happen, but helps in tests
assert (
not self.registry_entry.disabled_by
), f"Entity {self.entity_id} is being added while it's disabled"
self.async_on_remove(
async_track_entity_registry_updated_event(
self.hass, self.entity_id, self._async_registry_updated
)
)
async def async_internal_will_remove_from_hass(self) -> None:
"""Run when entity will be removed from hass.
Not to be extended by integrations.
"""
if self.platform:
assert self.hass is not None
self.hass.data[DATA_ENTITY_SOURCE].pop(self.entity_id)
async def _async_registry_updated(self, event: Event) -> None:
"""Handle entity registry update."""
data = event.data
if data["action"] == "remove":
await self.async_removed_from_registry()
await self.async_remove()
if data["action"] != "update":
return
assert self.hass is not None
ent_reg = await self.hass.helpers.entity_registry.async_get_registry()
old = self.registry_entry
self.registry_entry = ent_reg.async_get(data["entity_id"])
assert self.registry_entry is not None
if self.registry_entry.disabled_by is not None:
await self.async_remove()
return
assert old is not None
if self.registry_entry.entity_id == old.entity_id:
self.async_write_ha_state()
return
await self.async_remove()
assert self.platform is not None
self.entity_id = self.registry_entry.entity_id
await self.platform.async_add_entities([self])
def __eq__(self, other: Any) -> bool:
"""Return the comparison."""
if not isinstance(other, self.__class__):
return False
# Can only decide equality if both have a unique id
if self.unique_id is None or other.unique_id is None:
return False
# Ensure they belong to the same platform
if self.platform is not None or other.platform is not None:
if self.platform is None or other.platform is None:
return False
if self.platform.platform != other.platform.platform:
return False
return self.unique_id == other.unique_id
def __repr__(self) -> str:
"""Return the representation."""
return f"<Entity {self.name}: {self.state}>"
async def async_request_call(self, coro: Awaitable) -> None:
"""Process request batched."""
if self.parallel_updates:
await self.parallel_updates.acquire()
try:
await coro
finally:
if self.parallel_updates:
self.parallel_updates.release()
class ToggleEntity(Entity):
"""An abstract class for entities that can be turned on and off."""
@property
def state(self) -> str:
"""Return the state."""
return STATE_ON if self.is_on else STATE_OFF
@property
def is_on(self) -> bool:
"""Return True if entity is on."""
raise NotImplementedError()
def turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
raise NotImplementedError()
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn the entity on."""
assert self.hass is not None
await self.hass.async_add_executor_job(ft.partial(self.turn_on, **kwargs))
def turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
raise NotImplementedError()
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn the entity off."""
assert self.hass is not None
await self.hass.async_add_executor_job(ft.partial(self.turn_off, **kwargs))
def toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
self.turn_off(**kwargs)
else:
self.turn_on(**kwargs)
async def async_toggle(self, **kwargs: Any) -> None:
"""Toggle the entity."""
if self.is_on:
await self.async_turn_off(**kwargs)
else:
await self.async_turn_on(**kwargs)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.