text
stringlengths 213
32.3k
|
---|
import logging
from homeassistant import config_entries
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN
@config_entries.HANDLERS.register(DOMAIN)
class SomfyFlowHandler(config_entry_oauth2_flow.AbstractOAuth2FlowHandler):
"""Config flow to handle Somfy OAuth2 authentication."""
DOMAIN = DOMAIN
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="single_instance_allowed")
return await super().async_step_user(user_input)
|
from ..cog_utils import CompositeMetaClass
from .audioset import AudioSetCommands
from .controller import PlayerControllerCommands
from .equalizer import EqualizerCommands
from .llset import LavalinkSetupCommands
from .localtracks import LocalTrackCommands
from .miscellaneous import MiscellaneousCommands
from .player import PlayerCommands
from .playlists import PlaylistCommands
from .queue import QueueCommands
class Commands(
AudioSetCommands,
PlayerControllerCommands,
EqualizerCommands,
LavalinkSetupCommands,
LocalTrackCommands,
MiscellaneousCommands,
PlayerCommands,
PlaylistCommands,
QueueCommands,
metaclass=CompositeMetaClass,
):
"""Class joining all command subclasses"""
|
import asyncio
import voluptuous as vol
from homeassistant.config import async_log_exception
from homeassistant.const import CONF_SEQUENCE
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.script import async_validate_action_config
from . import DOMAIN, SCRIPT_ENTRY_SCHEMA
async def async_validate_config_item(hass, config, full_config=None):
"""Validate config item."""
config = SCRIPT_ENTRY_SCHEMA(config)
config[CONF_SEQUENCE] = await asyncio.gather(
*[
async_validate_action_config(hass, action)
for action in config[CONF_SEQUENCE]
]
)
return config
async def _try_async_validate_config_item(hass, object_id, config, full_config=None):
"""Validate config item."""
try:
cv.slug(object_id)
config = await async_validate_config_item(hass, config, full_config)
except (vol.Invalid, HomeAssistantError) as ex:
async_log_exception(ex, DOMAIN, full_config or config, hass)
return None
return config
async def async_validate_config(hass, config):
"""Validate config."""
if DOMAIN in config:
validated_config = {}
for object_id, cfg in config[DOMAIN].items():
cfg = await _try_async_validate_config_item(hass, object_id, cfg, config)
if cfg is not None:
validated_config[object_id] = cfg
config[DOMAIN] = validated_config
return config
|
from datetime import timedelta
from pyruckus.exceptions import AuthenticationError
from homeassistant import config_entries
from homeassistant.components.ruckus_unleashed.const import DOMAIN
from homeassistant.util import utcnow
from tests.async_mock import patch
from tests.common import async_fire_time_changed
from tests.components.ruckus_unleashed import CONFIG, DEFAULT_SYSTEM_INFO, DEFAULT_TITLE
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus.connect",
return_value=None,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.mesh_name",
return_value=DEFAULT_TITLE,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.system_info",
return_value=DEFAULT_SYSTEM_INFO,
), patch(
"homeassistant.components.ruckus_unleashed.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.ruckus_unleashed.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == DEFAULT_TITLE
assert result2["data"] == CONFIG
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_invalid_auth(hass):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus.connect",
side_effect=AuthenticationError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_form_cannot_connect(hass):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus.connect",
side_effect=ConnectionError,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_unknown_error(hass):
"""Test we handle unknown error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus.connect",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
async def test_form_cannot_connect_unknown_serial(hass):
"""Test we handle cannot connect error on invalid serial number."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus.connect",
return_value=None,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.mesh_name",
return_value=DEFAULT_TITLE,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.system_info",
return_value={},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_duplicate_error(hass):
"""Test we handle duplicate error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.ruckus_unleashed.Ruckus.connect",
return_value=None,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.mesh_name",
return_value=DEFAULT_TITLE,
), patch(
"homeassistant.components.ruckus_unleashed.Ruckus.system_info",
return_value=DEFAULT_SYSTEM_INFO,
):
await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
future = utcnow() + timedelta(minutes=60)
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
CONFIG,
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
|
from osgeo import osr
from pyproj import Proj, transform
def lat_lon_to_pixel(raster_dataset, location):
"""From zacharybears.com/using-python-to-translate-latlon-locations-to-pixels-on-a-geotiff/."""
ds = raster_dataset
gt = ds.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
srs_lat_lon = srs.CloneGeogCS()
ct = osr.CoordinateTransformation(srs_lat_lon, srs)
new_location = [None, None]
# Change the point locations into the GeoTransform space
(new_location[1], new_location[0], holder) = ct.TransformPoint(location[1], location[0])
# Translate the x and y coordinates into pixel values
x = (new_location[1] - gt[0]) / gt[1]
y = (new_location[0] - gt[3]) / gt[5]
return (int(x), int(y))
def pixel_to_lat_lon(raster_dataset, col, row):
"""From zacharybears.com/using-python-to-translate-latlon-locations-to-pixels-on-a-geotiff/."""
ds = raster_dataset
gt = ds.GetGeoTransform()
srs = osr.SpatialReference()
srs.ImportFromWkt(ds.GetProjection())
srs_lat_lon = srs.CloneGeogCS()
ct = osr.CoordinateTransformation(srs, srs_lat_lon)
ulon = col * gt[1] + gt[0]
ulat = row * gt[5] + gt[3]
# Transform the point into the GeoTransform space
(lon, lat, holder) = ct.TransformPoint(ulon, ulat)
return (lat, lon)
def pixel_to_lat_lon_web_mercator(raster_dataset, col, row):
"""Convert a pixel on the raster_dataset to web mercator (epsg:3857)."""
spatial_reference = osr.SpatialReference()
spatial_reference.ImportFromWkt(raster_dataset.GetProjection())
ds_spatial_reference_proj_string = spatial_reference.ExportToProj4()
in_proj = Proj(ds_spatial_reference_proj_string)
out_proj = Proj(init='epsg:3857')
geo_transform = raster_dataset.GetGeoTransform()
ulon = col * geo_transform[1] + geo_transform[0]
ulat = row * geo_transform[5] + geo_transform[3]
x2, y2 = transform(in_proj, out_proj, ulon, ulat)
x2, y2 = out_proj(x2, y2, inverse=True)
return x2, y2
|
from typing import Optional
from gi.repository import Gio, GLib, Gtk
from meld.conf import _
from meld.misc import get_modal_parent, modal_dialog
from meld.ui.filechooser import MeldFileChooserDialog
def trash_or_confirm(gfile: Gio.File) -> bool:
"""Trash or delete the given Gio.File
Files and folders will be moved to the system Trash location
without confirmation. If they can't be trashed, then the user is
prompted for an irreversible deletion.
:rtype: bool
:returns: whether the file was deleted
"""
try:
gfile.trash(None)
return True
except GLib.Error as e:
# Handle not-supported, as that's due to the trashing target
# being a (probably network) mount-point, not an underlying
# problem. We also have to handle the generic FAILED code
# because that's what we get with NFS mounts.
expected_error = (
e.code == Gio.IOErrorEnum.NOT_SUPPORTED or
e.code == Gio.IOErrorEnum.FAILED
)
if not expected_error:
raise RuntimeError(str(e))
file_type = gfile.query_file_type(
Gio.FileQueryInfoFlags.NONE, None)
if file_type == Gio.FileType.DIRECTORY:
raise RuntimeError(_("Deleting remote folders is not supported"))
elif file_type != Gio.FileType.REGULAR:
raise RuntimeError(_("Not a file or directory"))
delete_permanently = modal_dialog(
primary=_(
"“{}” can’t be put in the trash. Do you want to "
"delete it immediately?".format(
GLib.markup_escape_text(gfile.get_parse_name()))
),
secondary=_(
"This remote location does not support sending items "
"to the trash."
),
buttons=[
(_("_Cancel"), Gtk.ResponseType.CANCEL),
(_("_Delete Permanently"), Gtk.ResponseType.OK),
],
)
if delete_permanently != Gtk.ResponseType.OK:
return False
try:
gfile.delete(None)
# TODO: Deleting remote folders involves reimplementing
# shutil.rmtree for gio, and then calling
# self.recursively_update().
except Exception as e:
raise RuntimeError(str(e))
return True
def prompt_save_filename(
title: str, parent: Optional[Gtk.Widget] = None) -> Optional[Gio.File]:
dialog = MeldFileChooserDialog(
title,
transient_for=get_modal_parent(parent),
action=Gtk.FileChooserAction.SAVE,
)
dialog.set_default_response(Gtk.ResponseType.ACCEPT)
response = dialog.run()
gfile = dialog.get_file()
dialog.destroy()
if response != Gtk.ResponseType.ACCEPT or not gfile:
return None
try:
file_info = gfile.query_info(
'standard::name,standard::display-name',
Gio.FileQueryInfoFlags.NONE,
None,
)
except GLib.Error as err:
if err.code == Gio.IOErrorEnum.NOT_FOUND:
return gfile
raise
# The selected file exists, so we need to prompt for overwrite.
parent_folder = gfile.get_parent()
parent_name = parent_folder.get_parse_name() if parent_folder else ''
file_name = file_info.get_display_name()
replace = modal_dialog(
primary=_("Replace file “%s”?") % file_name,
secondary=_(
"A file with this name already exists in “%s”.\n"
"If you replace the existing file, its contents "
"will be lost.") % parent_name,
buttons=[
(_("_Cancel"), Gtk.ResponseType.CANCEL),
(_("_Replace"), Gtk.ResponseType.OK),
],
messagetype=Gtk.MessageType.WARNING,
)
if replace != Gtk.ResponseType.OK:
return None
return gfile
|
import asyncio
import logging
from aiohttp import ClientError
from griddypower.async_api import LOAD_ZONES, AsyncGriddy
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.helpers import aiohttp_client
from .const import CONF_LOADZONE
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({vol.Required(CONF_LOADZONE): vol.In(LOAD_ZONES)})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
client_session = aiohttp_client.async_get_clientsession(hass)
try:
await AsyncGriddy(
client_session, settlement_point=data[CONF_LOADZONE]
).async_getnow()
except (asyncio.TimeoutError, ClientError) as err:
raise CannotConnect from err
# Return info that you want to store in the config entry.
return {"title": f"Load Zone {data[CONF_LOADZONE]}"}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Griddy Power."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
info = None
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(user_input[CONF_LOADZONE])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input):
"""Handle import."""
await self.async_set_unique_id(user_input[CONF_LOADZONE])
self._abort_if_unique_id_configured()
return await self.async_step_user(user_input)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
import asyncio
from collections import Counter
import itertools
from typing import Any, Callable, Iterator, List, Optional, Tuple, cast
import voluptuous as vol
from homeassistant.components import light
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_EFFECT_LIST,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_FLASH,
SUPPORT_TRANSITION,
SUPPORT_WHITE_VALUE,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_ENTITIES,
CONF_NAME,
STATE_ON,
STATE_UNAVAILABLE,
)
from homeassistant.core import CoreState, State
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.util import color as color_util
from . import GroupEntity
# mypy: allow-incomplete-defs, allow-untyped-calls, allow-untyped-defs
# mypy: no-check-untyped-defs
DEFAULT_NAME = "Light Group"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_ENTITIES): cv.entities_domain(light.DOMAIN),
}
)
SUPPORT_GROUP_LIGHT = (
SUPPORT_BRIGHTNESS
| SUPPORT_COLOR_TEMP
| SUPPORT_EFFECT
| SUPPORT_FLASH
| SUPPORT_COLOR
| SUPPORT_TRANSITION
| SUPPORT_WHITE_VALUE
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Initialize light.group platform."""
async_add_entities(
[LightGroup(cast(str, config.get(CONF_NAME)), config[CONF_ENTITIES])]
)
class LightGroup(GroupEntity, light.LightEntity):
"""Representation of a light group."""
def __init__(self, name: str, entity_ids: List[str]) -> None:
"""Initialize a light group."""
self._name = name
self._entity_ids = entity_ids
self._is_on = False
self._available = False
self._icon = "mdi:lightbulb-group"
self._brightness: Optional[int] = None
self._hs_color: Optional[Tuple[float, float]] = None
self._color_temp: Optional[int] = None
self._min_mireds: Optional[int] = 154
self._max_mireds: Optional[int] = 500
self._white_value: Optional[int] = None
self._effect_list: Optional[List[str]] = None
self._effect: Optional[str] = None
self._supported_features: int = 0
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
async def async_state_changed_listener(event):
"""Handle child updates."""
self.async_set_context(event.context)
await self.async_defer_or_update_ha_state()
assert self.hass
self.async_on_remove(
async_track_state_change_event(
self.hass, self._entity_ids, async_state_changed_listener
)
)
if self.hass.state == CoreState.running:
await self.async_update()
return
await super().async_added_to_hass()
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def is_on(self) -> bool:
"""Return the on/off state of the light group."""
return self._is_on
@property
def available(self) -> bool:
"""Return whether the light group is available."""
return self._available
@property
def icon(self):
"""Return the light group icon."""
return self._icon
@property
def brightness(self) -> Optional[int]:
"""Return the brightness of this light group between 0..255."""
return self._brightness
@property
def hs_color(self) -> Optional[Tuple[float, float]]:
"""Return the HS color value [float, float]."""
return self._hs_color
@property
def color_temp(self) -> Optional[int]:
"""Return the CT color value in mireds."""
return self._color_temp
@property
def min_mireds(self) -> Optional[int]:
"""Return the coldest color_temp that this light group supports."""
return self._min_mireds
@property
def max_mireds(self) -> Optional[int]:
"""Return the warmest color_temp that this light group supports."""
return self._max_mireds
@property
def white_value(self) -> Optional[int]:
"""Return the white value of this light group between 0..255."""
return self._white_value
@property
def effect_list(self) -> Optional[List[str]]:
"""Return the list of supported effects."""
return self._effect_list
@property
def effect(self) -> Optional[str]:
"""Return the current effect."""
return self._effect
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def should_poll(self) -> bool:
"""No polling needed for a light group."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes for the light group."""
return {ATTR_ENTITY_ID: self._entity_ids}
async def async_turn_on(self, **kwargs):
"""Forward the turn_on command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
emulate_color_temp_entity_ids = []
if ATTR_BRIGHTNESS in kwargs:
data[ATTR_BRIGHTNESS] = kwargs[ATTR_BRIGHTNESS]
if ATTR_HS_COLOR in kwargs:
data[ATTR_HS_COLOR] = kwargs[ATTR_HS_COLOR]
if ATTR_COLOR_TEMP in kwargs:
data[ATTR_COLOR_TEMP] = kwargs[ATTR_COLOR_TEMP]
# Create a new entity list to mutate
updated_entities = list(self._entity_ids)
# Walk through initial entity ids, split entity lists by support
for entity_id in self._entity_ids:
state = self.hass.states.get(entity_id)
if not state:
continue
support = state.attributes.get(ATTR_SUPPORTED_FEATURES)
# Only pass color temperature to supported entity_ids
if bool(support & SUPPORT_COLOR) and not bool(
support & SUPPORT_COLOR_TEMP
):
emulate_color_temp_entity_ids.append(entity_id)
updated_entities.remove(entity_id)
data[ATTR_ENTITY_ID] = updated_entities
if ATTR_WHITE_VALUE in kwargs:
data[ATTR_WHITE_VALUE] = kwargs[ATTR_WHITE_VALUE]
if ATTR_EFFECT in kwargs:
data[ATTR_EFFECT] = kwargs[ATTR_EFFECT]
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
if ATTR_FLASH in kwargs:
data[ATTR_FLASH] = kwargs[ATTR_FLASH]
if not emulate_color_temp_entity_ids:
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
data,
blocking=True,
context=self._context,
)
return
emulate_color_temp_data = data.copy()
temp_k = color_util.color_temperature_mired_to_kelvin(
emulate_color_temp_data[ATTR_COLOR_TEMP]
)
hs_color = color_util.color_temperature_to_hs(temp_k)
emulate_color_temp_data[ATTR_HS_COLOR] = hs_color
del emulate_color_temp_data[ATTR_COLOR_TEMP]
emulate_color_temp_data[ATTR_ENTITY_ID] = emulate_color_temp_entity_ids
await asyncio.gather(
self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
data,
blocking=True,
context=self._context,
),
self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
emulate_color_temp_data,
blocking=True,
context=self._context,
),
)
async def async_turn_off(self, **kwargs):
"""Forward the turn_off command to all lights in the light group."""
data = {ATTR_ENTITY_ID: self._entity_ids}
if ATTR_TRANSITION in kwargs:
data[ATTR_TRANSITION] = kwargs[ATTR_TRANSITION]
await self.hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_OFF,
data,
blocking=True,
context=self._context,
)
async def async_update(self):
"""Query all members and determine the light group state."""
all_states = [self.hass.states.get(x) for x in self._entity_ids]
states: List[State] = list(filter(None, all_states))
on_states = [state for state in states if state.state == STATE_ON]
self._is_on = len(on_states) > 0
self._available = any(state.state != STATE_UNAVAILABLE for state in states)
self._brightness = _reduce_attribute(on_states, ATTR_BRIGHTNESS)
self._hs_color = _reduce_attribute(on_states, ATTR_HS_COLOR, reduce=_mean_tuple)
self._white_value = _reduce_attribute(on_states, ATTR_WHITE_VALUE)
self._color_temp = _reduce_attribute(on_states, ATTR_COLOR_TEMP)
self._min_mireds = _reduce_attribute(
states, ATTR_MIN_MIREDS, default=154, reduce=min
)
self._max_mireds = _reduce_attribute(
states, ATTR_MAX_MIREDS, default=500, reduce=max
)
self._effect_list = None
all_effect_lists = list(_find_state_attributes(states, ATTR_EFFECT_LIST))
if all_effect_lists:
# Merge all effects from all effect_lists with a union merge.
self._effect_list = list(set().union(*all_effect_lists))
self._effect = None
all_effects = list(_find_state_attributes(on_states, ATTR_EFFECT))
if all_effects:
# Report the most common effect.
effects_count = Counter(itertools.chain(all_effects))
self._effect = effects_count.most_common(1)[0][0]
self._supported_features = 0
for support in _find_state_attributes(states, ATTR_SUPPORTED_FEATURES):
# Merge supported features by emulating support for every feature
# we find.
self._supported_features |= support
# Bitwise-and the supported features with the GroupedLight's features
# so that we don't break in the future when a new feature is added.
self._supported_features &= SUPPORT_GROUP_LIGHT
def _find_state_attributes(states: List[State], key: str) -> Iterator[Any]:
"""Find attributes with matching key from states."""
for state in states:
value = state.attributes.get(key)
if value is not None:
yield value
def _mean_int(*args):
"""Return the mean of the supplied values."""
return int(sum(args) / len(args))
def _mean_tuple(*args):
"""Return the mean values along the columns of the supplied values."""
return tuple(sum(x) / len(x) for x in zip(*args))
def _reduce_attribute(
states: List[State],
key: str,
default: Optional[Any] = None,
reduce: Callable[..., Any] = _mean_int,
) -> Any:
"""Find the first attribute matching key from states.
If none are found, return default.
"""
attrs = list(_find_state_attributes(states, key))
if not attrs:
return default
if len(attrs) == 1:
return attrs[0]
return reduce(*attrs)
|
import argparse
import logging
import sys
from typing import Sequence
import service_configuration_lib
from kubernetes.client import V1beta1CustomResourceDefinition
from kubernetes.client.rest import ApiException
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import paasta_prefixed
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Creates/updates kubernetes CRDs.")
parser.add_argument(
"service_list",
nargs="+",
help="The list of services to create or update CRDs for",
metavar="SERVICE",
)
parser.add_argument(
"-c",
"--cluster",
dest="cluster",
metavar="CLUSTER",
default=None,
help="Kubernetes cluster name",
)
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose", default=False
)
args = parser.parse_args()
return args
def main() -> None:
args = parse_args()
soa_dir = args.soa_dir
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
if args.cluster:
cluster = args.cluster
else:
system_paasta_config = load_system_paasta_config()
cluster = system_paasta_config.get_cluster()
kube_client = KubeClient()
success = setup_kube_crd(
kube_client=kube_client,
cluster=cluster,
services=args.service_list,
soa_dir=soa_dir,
)
sys.exit(0 if success else 1)
def setup_kube_crd(
kube_client: KubeClient,
cluster: str,
services: Sequence[str],
soa_dir: str = DEFAULT_SOA_DIR,
) -> bool:
existing_crds = kube_client.apiextensions.list_custom_resource_definition(
label_selector=paasta_prefixed("service")
)
success = True
for service in services:
crd_config = service_configuration_lib.read_extra_service_information(
service, f"crd-{cluster}", soa_dir=soa_dir
)
if not crd_config:
log.info("nothing to deploy")
continue
metadata = crd_config.get("metadata", {})
if "labels" not in metadata:
metadata["labels"] = {}
metadata["labels"]["yelp.com/paasta_service"] = service
metadata["labels"][paasta_prefixed("service")] = service
desired_crd = V1beta1CustomResourceDefinition(
api_version=crd_config.get("apiVersion"),
kind=crd_config.get("kind"),
metadata=metadata,
spec=crd_config.get("spec"),
)
existing_crd = None
for crd in existing_crds.items:
if crd.metadata.name == desired_crd.metadata["name"]:
existing_crd = crd
break
try:
if existing_crd:
desired_crd.metadata[
"resourceVersion"
] = existing_crd.metadata.resource_version
kube_client.apiextensions.replace_custom_resource_definition(
name=desired_crd.metadata["name"], body=desired_crd
)
else:
try:
kube_client.apiextensions.create_custom_resource_definition(
body=desired_crd
)
except ValueError as err:
# TODO: kubernetes server will sometimes reply with conditions:null,
# figure out how to deal with this correctly, for more details:
# https://github.com/kubernetes/kubernetes/pull/64996
if "`conditions`, must not be `None`" in str(err):
pass
else:
raise err
log.info(f"deployed {desired_crd.metadata['name']} for {cluster}:{service}")
except ApiException as exc:
log.error(
f"error deploying crd for {cluster}:{service}, "
f"status: {exc.status}, reason: {exc.reason}"
)
log.debug(exc.body)
success = False
return success
if __name__ == "__main__":
main()
|
import typing
import numpy as np
from .unit import Unit
class FixedLength(Unit):
"""
FixedLengthUnit Class.
Process unit to get the fixed length text.
Examples:
>>> from matchzoo.preprocessors.units import FixedLength
>>> fixedlen = FixedLength(3)
>>> fixedlen.transform(list(range(1, 6))) == [3, 4, 5]
True
>>> fixedlen.transform(list(range(1, 3))) == [0, 1, 2]
True
"""
def __init__(
self,
text_length: int,
pad_value: typing.Union[int, str] = 0,
pad_mode: str = 'pre',
truncate_mode: str = 'pre'
):
"""
Class initialization.
:param text_length: fixed length of the text.
:param pad_value: if text length is smaller than :attr:`text_length`,
filling text with :attr:`pad_value`.
:param pad_mode: String, `pre` or `post`:
pad either before or after each sequence.
:param truncate_mode: String, `pre` or `post`:
remove values from sequences larger than :attr:`text_length`,
either at the beginning or at the end of the sequences.
"""
self._text_length = text_length
self._pad_value = pad_value
self._pad_mode = pad_mode
self._truncate_mode = truncate_mode
def transform(self, input_: list) -> list:
"""
Transform list of tokenized tokens into the fixed length text.
:param input_: list of tokenized tokens.
:return tokens: list of tokenized tokens in fixed length.
"""
# padding process can not handle empty list as input
if len(input_) == 0:
input_ = [self._pad_value]
np_tokens = np.array(input_)
fixed_tokens = np.full([self._text_length], self._pad_value,
dtype=np_tokens.dtype)
if self._truncate_mode == 'pre':
trunc_tokens = input_[-self._text_length:]
elif self._truncate_mode == 'post':
trunc_tokens = input_[:self._text_length]
else:
raise ValueError('{} is not a vaild '
'truncate mode.'.format(self._truncate_mode))
if self._pad_mode == 'post':
fixed_tokens[:len(trunc_tokens)] = trunc_tokens
elif self._pad_mode == 'pre':
fixed_tokens[-len(trunc_tokens):] = trunc_tokens
else:
raise ValueError('{} is not a vaild '
'pad mode.'.format(self._pad_mode))
return fixed_tokens.tolist()
|
from itertools import izip
try:
import pynvml
USE_PYTHON_BINDING = True
except ImportError:
USE_PYTHON_BINDING = False
import diamond.collector
class NvidiaGPUCollector(diamond.collector.ProcessCollector):
def get_default_config_help(self):
config_help = super(NvidiaGPUCollector, self).get_default_config_help()
config_help.update({
'bin': 'The path to the nvidia-smi binary',
'stats': 'A list of Nvidia GPU stats to collect. '
'Use `nvidia-smi --help-query-gpu` for more information'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(NvidiaGPUCollector, self).get_default_config()
config.update({
'path': 'nvidia',
'bin': '/usr/bin/nvidia-smi',
'stats': [
'index',
'memory.total',
'memory.used',
'memory.free',
'utilization.gpu',
'utilization.memory',
'temperature.gpu'
]
})
return config
def collect_via_nvidia_smi(self, stats_config):
"""
Use nvidia smi command line tool to collect metrics
:param stats_config:
:return:
"""
raw_output = self.run_command([
'--query-gpu={query_gpu}'.format(query_gpu=','.join(stats_config)),
'--format=csv,nounits,noheader'
])
if raw_output is None:
return
results = raw_output[0].strip().split("\n")
for result in results:
stats = result.strip().split(',')
assert len(stats) == len(stats_config)
index = stats[0]
for stat_name, metric in izip(stats_config[1:], stats[1:]):
metric_name = 'gpu_{index}.{stat_name}'.format(
index=str(index),
stat_name=stat_name
)
self.publish(metric_name, metric)
def collect_via_pynvml(self, stats_config):
"""
Use pynvml python binding to collect metrics
:param stats_config:
:return:
"""
try:
NVML_TEMPERATURE_GPU = 0
pynvml.nvmlInit()
device_count = pynvml.nvmlDeviceGetCount()
for device_index in xrange(device_count):
handle = pynvml.nvmlDeviceGetHandleByIndex(device_index)
memoryInfo = pynvml.nvmlDeviceGetMemoryInfo(handle)
utilizationRates = pynvml.nvmlDeviceGetUtilizationRates(handle)
metrics = {
'memory.total': memoryInfo.total / 1024 / 1024,
'memory.used': memoryInfo.total / 1024 / 1024,
'memory.free': memoryInfo.free / 1024 / 1024,
'utilization.gpu': utilizationRates.gpu,
'utilization.memory': utilizationRates.memory,
'temperature.gpu':
pynvml.nvmlDeviceGetTemperature(handle,
NVML_TEMPERATURE_GPU)
}
for stat_name in stats_config[1:]:
metric = metrics.get(stat_name)
if metric:
metric_name = 'gpu_{index}.{stat_name}'.format(
index=str(device_index),
stat_name=stat_name
)
self.publish(metric_name, metric)
finally:
pynvml.nvmlShutdown()
def collect(self):
"""
Collector GPU stats
"""
stats_config = self.config['stats']
if USE_PYTHON_BINDING:
collect_metrics = self.collect_via_pynvml
else:
collect_metrics = self.collect_via_nvidia_smi
collect_metrics(stats_config)
|
import logging
import pymitv
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import CONF_HOST, CONF_NAME, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
DEFAULT_NAME = "Xiaomi TV"
_LOGGER = logging.getLogger(__name__)
SUPPORT_XIAOMI_TV = SUPPORT_VOLUME_STEP | SUPPORT_TURN_ON | SUPPORT_TURN_OFF
# No host is needed for configuration, however it can be set.
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Xiaomi TV platform."""
# If a hostname is set. Discovery is skipped.
host = config.get(CONF_HOST)
name = config.get(CONF_NAME)
if host is not None:
# Check if there's a valid TV at the IP address.
if not pymitv.Discover().check_ip(host):
_LOGGER.error("Could not find Xiaomi TV with specified IP: %s", host)
else:
# Register TV with Home Assistant.
add_entities([XiaomiTV(host, name)])
else:
# Otherwise, discover TVs on network.
add_entities(XiaomiTV(tv, DEFAULT_NAME) for tv in pymitv.Discover().scan())
class XiaomiTV(MediaPlayerEntity):
"""Represent the Xiaomi TV for Home Assistant."""
def __init__(self, ip, name):
"""Receive IP address and name to construct class."""
# Initialize the Xiaomi TV.
self._tv = pymitv.TV(ip)
# Default name value, only to be overridden by user.
self._name = name
self._state = STATE_OFF
@property
def name(self):
"""Return the display name of this TV."""
return self._name
@property
def state(self):
"""Return _state variable, containing the appropriate constant."""
return self._state
@property
def assumed_state(self):
"""Indicate that state is assumed."""
return True
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_XIAOMI_TV
def turn_off(self):
"""
Instruct the TV to turn sleep.
This is done instead of turning off,
because the TV won't accept any input when turned off. Thus, the user
would be unable to turn the TV back on, unless it's done manually.
"""
if self._state != STATE_OFF:
self._tv.sleep()
self._state = STATE_OFF
def turn_on(self):
"""Wake the TV back up from sleep."""
if self._state != STATE_ON:
self._tv.wake()
self._state = STATE_ON
def volume_up(self):
"""Increase volume by one."""
self._tv.volume_up()
def volume_down(self):
"""Decrease volume by one."""
self._tv.volume_down()
|
from perfkitbenchmarker import linux_packages
GIT_REPO = 'https://github.com/joyent/node.git'
GIT_TAG = 'v0.11.14'
NODE_DIR = '%s/node' % linux_packages.INSTALL_DIR
def _Install(vm):
"""Installs the node.js package on the VM."""
vm.Install('build_tools')
vm.RemoteCommand('git clone {0} {1}'.format(GIT_REPO, NODE_DIR))
vm.RemoteCommand('cd {0} && git checkout {1}'.format(NODE_DIR, GIT_TAG))
vm.RemoteCommand('cd {0} && ./configure --prefix=/usr'.format(NODE_DIR))
vm.RemoteCommand('cd {0} && make && sudo make install'.format(NODE_DIR))
def YumInstall(vm):
"""Installs the node.js package on the VM."""
_Install(vm)
def AptInstall(vm):
"""Installs the node.js package on the VM."""
_Install(vm)
def _Uninstall(vm):
"""Uninstalls the node.js package on the VM."""
vm.RemoteCommand('cd {0} && sudo make uninstall'.format(NODE_DIR))
def YumUninstall(vm):
"""Uninstalls the node.js package on the VM."""
_Uninstall(vm)
def AptUninstall(vm):
"""Uninstalls the node.js package on the VM."""
_Uninstall(vm)
|
from hashlib import md5
import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy.lib import auth_basic
from cherrypy.test import helper
class BasicAuthTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self):
return 'This is public.'
class BasicProtected:
@cherrypy.expose
def index(self):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
class BasicProtected2:
@cherrypy.expose
def index(self):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
class BasicProtected2_u:
@cherrypy.expose
def index(self):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
userpassdict = {'xuser': 'xpassword'}
userhashdict = {'xuser': md5(b'xpassword').hexdigest()}
userhashdict_u = {'xюзер': md5(ntob('їжа', 'utf-8')).hexdigest()}
def checkpasshash(realm, user, password):
p = userhashdict.get(user)
return p and p == md5(ntob(password)).hexdigest() or False
def checkpasshash_u(realm, user, password):
p = userhashdict_u.get(user)
return p and p == md5(ntob(password, 'utf-8')).hexdigest() or False
basic_checkpassword_dict = auth_basic.checkpassword_dict(userpassdict)
conf = {
'/basic': {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': basic_checkpassword_dict
},
'/basic2': {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': checkpasshash,
'tools.auth_basic.accept_charset': 'ISO-8859-1',
},
'/basic2_u': {
'tools.auth_basic.on': True,
'tools.auth_basic.realm': 'wonderland',
'tools.auth_basic.checkpassword': checkpasshash_u,
'tools.auth_basic.accept_charset': 'UTF-8',
},
}
root = Root()
root.basic = BasicProtected()
root.basic2 = BasicProtected2()
root.basic2_u = BasicProtected2_u()
cherrypy.tree.mount(root, config=conf)
def testPublic(self):
self.getPage('/')
self.assertStatus('200 OK')
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
self.assertBody('This is public.')
def testBasic(self):
self.getPage('/basic/')
self.assertStatus(401)
self.assertHeader(
'WWW-Authenticate',
'Basic realm="wonderland", charset="UTF-8"'
)
self.getPage('/basic/',
[('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic/',
[('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
def testBasic2(self):
self.getPage('/basic2/')
self.assertStatus(401)
self.assertHeader('WWW-Authenticate', 'Basic realm="wonderland"')
self.getPage('/basic2/',
[('Authorization', 'Basic eHVzZXI6eHBhc3N3b3JX')])
self.assertStatus(401)
self.getPage('/basic2/',
[('Authorization', 'Basic eHVzZXI6eHBhc3N3b3Jk')])
self.assertStatus('200 OK')
self.assertBody("Hello xuser, you've been authorized.")
def testBasic2_u(self):
self.getPage('/basic2_u/')
self.assertStatus(401)
self.assertHeader(
'WWW-Authenticate',
'Basic realm="wonderland", charset="UTF-8"'
)
self.getPage('/basic2_u/',
[('Authorization', 'Basic eNGO0LfQtdGAOtGX0LbRgw==')])
self.assertStatus(401)
self.getPage('/basic2_u/',
[('Authorization', 'Basic eNGO0LfQtdGAOtGX0LbQsA==')])
self.assertStatus('200 OK')
self.assertBody("Hello xюзер, you've been authorized.")
|
import redis
import sys
from flask import current_app
from lemur.extensions import sentry
from lemur.factory import create_app
if current_app:
flask_app = current_app
else:
flask_app = create_app()
class RedisHandler:
def __init__(self, host=flask_app.config.get('REDIS_HOST', 'localhost'),
port=flask_app.config.get('REDIS_PORT', 6379),
db=flask_app.config.get('REDIS_DB', 0)):
self.host = host
self.port = port
self.db = db
def redis(self, db=0):
# The decode_responses flag here directs the client to convert the responses from Redis into Python strings
# using the default encoding utf-8. This is client specific.
function = f"{__name__}.{sys._getframe().f_code.co_name}"
try:
red = redis.StrictRedis(host=self.host, port=self.port, db=self.db, encoding="utf-8", decode_responses=True)
red.set("test", 0)
except redis.ConnectionError:
log_data = {
"function": function,
"message": "Redis Connection error",
"host": self.host,
"port": self.port
}
current_app.logger.error(log_data)
sentry.captureException()
return red
def redis_get(key, default=None):
red = RedisHandler().redis()
try:
v = red.get(key)
except redis.exceptions.ConnectionError:
v = None
if not v:
return default
return v
|
import argparse
import csv
FIELDS = {
'status': 0,
'transaction_id': 1,
'packet_num': (2, 4),
'proto_type': 4,
'data_size': 5,
'class': 6,
'command': 7,
'params': (8, 24)
}
def expand_payload(payload):
"""
Converts a payload string to a nice format
:param payload: String of hex digits
:type payload: str
:return: Dictionary
:rtype dict
"""
chunks = [payload[i:i + 2] for i in range(0, len(payload), 2)]
result = {}
for header, num_range in FIELDS.items():
if isinstance(num_range, tuple):
result[header] = ''.join(chunks[num_range[0]:num_range[1]])
else:
result[header] = chunks[num_range]
return result
def parse_args():
"""
Parses command line arguments
:return: Argparse arguments object
"""
parser = argparse.ArgumentParser(description="Extracts info from CSV")
parser.add_argument("file", metavar='FILE', type=str, help="CSV file")
return parser.parse_args()
def run():
"""
Main function
"""
args = parse_args()
data = []
with open(args.file, 'r', newline='') as csv_file:
csv_object = csv.DictReader(csv_file, delimiter=',', quotechar='"')
for line in csv_object:
if line['Leftover Capture Data'] == '':
continue
# Now we have data
data.append(expand_payload(line['Leftover Capture Data']))
format_string = "{0:<6} {1:<2} {2:<10} {3:<4} {4:<5} {5:<7} {6}"
print(format_string.format('Status', 'ID', 'Packet Num', 'Size', 'Class', 'Command', 'Params'))
for frame in data:
print(format_string.format(frame['status'], frame['transaction_id'], frame['packet_num'],
frame['data_size'], frame['class'], frame['command'], frame['params']))
print("")
if __name__ == '__main__':
run()
|
import os
import sys
from setuptools import find_packages, setup
from setuptools.command.test import test as TestCommand
class PyTest(TestCommand):
def finalize_options(self):
TestCommand.finalize_options(self)
self.test_args = ['-x', 'build/lib/pygal']
self.test_suite = True
def run_tests(self):
# import here, cause outside the eggs aren't loaded
import pytest
errno = pytest.main(self.test_args)
sys.exit(errno)
ROOT = os.path.dirname(__file__)
# Explicitly specify the encoding of pygal/__init__.py if we're on py3.
kwargs = {}
if sys.version_info[0] == 3:
kwargs['encoding'] = 'utf-8'
cairosvg = 'cairosvg'
else:
cairosvg = 'cairosvg==0.5'
tests_requirements = [
"pyquery", "flask", cairosvg, 'lxml', 'pygal_maps_world', 'pygal_maps_fr',
'pygal_maps_ch', 'coveralls',
'pytest-runner', 'pytest-cov', 'pytest-flake8', 'pytest-isort',
'pytest'
]
about = {}
with open(os.path.join(
os.path.dirname(__file__), "pygal", "__about__.py")) as f:
exec(f.read(), about)
setup(
name=about['__title__'],
version=about['__version__'],
description=about['__summary__'],
url=about['__uri__'],
author=about['__author__'],
author_email=about['__email__'],
license=about['__license__'],
platforms="Any",
packages=find_packages(),
provides=['pygal'],
scripts=["pygal_gen.py"],
keywords=[
"svg", "chart", "graph", "diagram", "plot", "histogram", "kiviat"],
setup_requires=['pytest-runner'],
cmdclass={'test': PyTest},
package_data={'pygal': ['css/*', 'graph/maps/*.svg']},
extras_require={
'lxml': ['lxml'],
'docs': ['sphinx', 'sphinx_rtd_theme', 'pygal_sphinx_directives'],
'png': [cairosvg],
'test': tests_requirements
},
classifiers=[
"Development Status :: 4 - Beta",
"Environment :: Console",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: "
"GNU Lesser General Public License v3 or later (LGPLv3+)",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3",
"Topic :: Multimedia :: Graphics :: Presentation"])
|
import numpy as np
from ..io import BaseRaw
from ..utils import _validate_type, warn, logger, verbose
@verbose
def realign_raw(raw, other, t_raw, t_other, verbose=None):
"""Realign two simultaneous recordings.
Due to clock drift, recordings at a given same sample rate made by two
separate devices simultaneously can become out of sync over time. This
function uses event times captured by both acquisition devices to resample
``other`` to match ``raw``.
Parameters
----------
raw : instance of Raw
The first raw instance.
other : instance of Raw
The second raw instance. It will be resampled to match ``raw``.
t_raw : array-like, shape (n_events,)
The times of shared events in ``raw`` relative to ``raw.times[0]`` (0).
Typically these could be events on some TTL channel like
``find_events(raw)[:, 0] - raw.first_event``.
t_other : array-like, shape (n_events,)
The times of shared events in ``other`` relative to ``other.times[0]``.
%(verbose)s
Notes
-----
This function operates inplace. It will:
1. Estimate the zero-order (start offset) and first-order (clock drift)
correction.
2. Crop the start of ``raw`` or ``other``, depending on which started
recording first.
3. Resample ``other`` to match ``raw`` based on the clock drift.
4. Crop the end of ``raw`` or ``other``, depending on which stopped
recording first (and the clock drift rate).
This function is primarily designed to work on recordings made at the same
sample rate, but it can also operate on recordings made at different
sample rates to resample and deal with clock drift simultaneously.
.. versionadded:: 0.22
"""
from scipy import stats
_validate_type(raw, BaseRaw, 'raw')
_validate_type(other, BaseRaw, 'other')
t_raw = np.array(t_raw, float)
t_other = np.array(t_other, float)
if t_raw.ndim != 1 or t_raw.shape != t_other.shape:
raise ValueError('t_raw and t_other must be 1D with the same shape, '
f'got shapes {t_raw.shape} and {t_other.shape}')
if len(t_raw) < 20:
warn('Fewer than 20 times passed, results may be unreliable')
# 1. Compute correction factors
coef = np.polyfit(t_other, t_raw, deg=1)
r, p = stats.pearsonr(t_other, t_raw)
msg = f'Linear correlation computed as R={r:0.3f} and p={p:0.2e}'
if p > 0.05 or r <= 0:
raise ValueError(msg + ', cannot resample safely')
if p > 1e-6:
warn(msg + ', results may be unreliable')
else:
logger.info(msg)
dr_ms_s = 1000 * abs(1 - coef[0])
logger.info(
f'Drift rate: {1000 * dr_ms_s:0.1f} μs/sec '
f'(total drift over {raw.times[-1]:0.1f} sec recording: '
f'{raw.times[-1] * dr_ms_s:0.1f} ms)')
# 2. Crop start of recordings to match using the zero-order term
msg = f'Cropping {coef[1]:0.3f} sec from the start of '
if coef[1] > 0: # need to crop start of raw to match other
logger.info(msg + 'raw')
raw.crop(coef[1], None)
t_raw -= coef[1]
else: # need to crop start of other to match raw
logger.info(msg + 'other')
other.crop(-coef[1], None)
t_other += coef[1]
# 3. Resample data using the first-order term
logger.info('Resampling other')
coef = coef[0]
sfreq_new = raw.info['sfreq'] * coef
other.load_data().resample(sfreq_new, verbose=True)
other.info['sfreq'] = raw.info['sfreq']
other._update_times()
# 4. Crop the end of one of the recordings if necessary
delta = raw.times[-1] - other.times[-1]
msg = f'Cropping {abs(delta):0.3f} sec from the end of '
if delta > 0:
logger.info(msg + 'raw')
raw.crop(0, other.times[-1])
elif delta < 0:
logger.info(msg + 'other')
other.crop(0, raw.times[-1])
|
import datetime
import pytest
import pytz
from arctic.date import mktz, datetime_to_ms, ms_to_datetime
def assert_roundtrip(tz):
ts = datetime.datetime(1982, 7, 1, 16, 5)
ts1 = ts.replace(tzinfo=tz)
ts2 = ms_to_datetime(datetime_to_ms(ts1.astimezone(mktz("UTC"))), tz)
assert(ts2.hour == ts1.hour)
assert ts2 == ts1
def get_tz():
tz = pytz.timezone("Europe/London")
tmp = ms_to_datetime(0, tz)
tz = tmp.tzinfo
return tz
def test_UTC_roundtrip():
tz = pytz.timezone("UTC")
assert_roundtrip(tz)
def test_weird_get_tz_local():
tz = get_tz()
assert_roundtrip(tz)
@pytest.mark.xfail
def test_pytz_London():
# Don't use pytz
tz = pytz.timezone("Europe/London")
assert_roundtrip(tz)
def test_mktz_London():
tz = mktz("Europe/London")
assert_roundtrip(tz)
def test_datetime_roundtrip_local_no_tz():
pdt = datetime.datetime(2012, 6, 12, 12, 12, 12, 123000)
pdt2 = ms_to_datetime(datetime_to_ms(pdt)).replace(tzinfo=None)
assert pdt2 == pdt
pdt = datetime.datetime(2012, 1, 12, 12, 12, 12, 123000)
pdt2 = ms_to_datetime(datetime_to_ms(pdt)).replace(tzinfo=None)
assert pdt2 == pdt
def test_datetime_roundtrip_local_tz():
pdt = datetime.datetime(2012, 6, 12, 12, 12, 12, 123000, tzinfo=mktz())
pdt2 = ms_to_datetime(datetime_to_ms(pdt))
assert pdt2 == pdt
pdt = datetime.datetime(2012, 1, 12, 12, 12, 12, 123000, tzinfo=mktz())
pdt2 = ms_to_datetime(datetime_to_ms(pdt))
assert pdt2 == pdt
def test_datetime_roundtrip_est_tz():
pdt = datetime.datetime(2012, 6, 12, 12, 12, 12, 123000, tzinfo=mktz('EST'))
pdt2 = ms_to_datetime(datetime_to_ms(pdt))
assert pdt2.replace(tzinfo=mktz()) == pdt
pdt = datetime.datetime(2012, 1, 12, 12, 12, 12, 123000, tzinfo=mktz('EST'))
pdt2 = ms_to_datetime(datetime_to_ms(pdt))
assert pdt2.replace(tzinfo=mktz()) == pdt
@pytest.mark.parametrize("microseconds,expected", [
(807000, 1074069004807),
(807243, 1074069004807),
(807675, 1074069004807)
])
def test_millisecond_conversion(microseconds, expected):
pdt = datetime.datetime(2004, 1, 14, 8, 30, 4, microseconds, tzinfo=pytz.utc)
pdt2 = datetime_to_ms(pdt)
assert pdt2 == expected
|
import numpy as np
import tempfile
import unittest
from chainer import testing
from chainercv.utils import read_label
from chainercv.utils import write_image
@testing.parameterize(*testing.product({
'file_obj': [False, True],
'format': ['bmp', 'jpeg', 'png'],
'size': [(48, 32)],
'dtype': [np.float32, np.uint8, np.int32, bool],
}))
class TestReadLabel(unittest.TestCase):
def setUp(self):
if self.file_obj:
self.f = tempfile.TemporaryFile()
self.file = self.f
format = self.format
else:
if self.format == 'jpeg':
suffix = '.jpg'
else:
suffix = '.' + self.format
self.f = tempfile.NamedTemporaryFile(suffix=suffix, delete=False)
self.file = self.f.name
format = None
self.img = np.random.randint(
0, 255, size=self.size, dtype=np.uint8)
write_image(self.img[None], self.file, format=format)
if self.file_obj:
self.file.seek(0)
def test_read_label(self):
if self.dtype == np.int32:
img = read_label(self.file)
else:
img = read_label(self.file, dtype=self.dtype)
self.assertEqual(img.shape, self.size)
self.assertEqual(img.dtype, self.dtype)
if self.format in {'bmp', 'png'}:
np.testing.assert_equal(img, self.img.astype(self.dtype))
def test_read_label_mutable(self):
img = read_label(self.file, dtype=self.dtype)
img[:] = 0
np.testing.assert_equal(img, 0)
testing.run_module(__name__, __file__)
|
from datetime import timedelta
import logging
from pysuez import SuezClient
from pysuez.client import PySuezError
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, VOLUME_LITERS
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_COUNTER_ID = "counter_id"
SCAN_INTERVAL = timedelta(hours=12)
COMPONENT_ICON = "mdi:water-pump"
COMPONENT_NAME = "Suez Water Client"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_COUNTER_ID): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
username = config[CONF_USERNAME]
password = config[CONF_PASSWORD]
counter_id = config[CONF_COUNTER_ID]
try:
client = SuezClient(username, password, counter_id)
if not client.check_credentials():
_LOGGER.warning("Wrong username and/or password")
return
except PySuezError:
_LOGGER.warning("Unable to create Suez Client")
return
add_entities([SuezSensor(client)], True)
class SuezSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, client):
"""Initialize the data object."""
self._attributes = {}
self._state = None
self._available = None
self.client = client
@property
def name(self):
"""Return the name of the sensor."""
return COMPONENT_NAME
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return VOLUME_LITERS
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
@property
def icon(self):
"""Return the icon of the sensor."""
return COMPONENT_ICON
def _fetch_data(self):
"""Fetch latest data from Suez."""
try:
self.client.update()
# _state holds the volume of consumed water during previous day
self._state = self.client.state
self._available = True
self._attributes["attribution"] = self.client.attributes["attribution"]
self._attributes["this_month_consumption"] = {}
for item in self.client.attributes["thisMonthConsumption"]:
self._attributes["this_month_consumption"][
item
] = self.client.attributes["thisMonthConsumption"][item]
self._attributes["previous_month_consumption"] = {}
for item in self.client.attributes["previousMonthConsumption"]:
self._attributes["previous_month_consumption"][
item
] = self.client.attributes["previousMonthConsumption"][item]
self._attributes["highest_monthly_consumption"] = self.client.attributes[
"highestMonthlyConsumption"
]
self._attributes["last_year_overall"] = self.client.attributes[
"lastYearOverAll"
]
self._attributes["this_year_overall"] = self.client.attributes[
"thisYearOverAll"
]
self._attributes["history"] = {}
for item in self.client.attributes["history"]:
self._attributes["history"][item] = self.client.attributes["history"][
item
]
except PySuezError:
self._available = False
_LOGGER.warning("Unable to fetch data")
def update(self):
"""Return the latest collected data from Linky."""
self._fetch_data()
_LOGGER.debug("Suez data state is: %s", self._state)
|
from flask_script import Manager
import arrow
from datetime import timedelta
from sqlalchemy import cast
from sqlalchemy_utils import ArrowType
from lemur import database
from lemur.extensions import metrics, sentry
from lemur.endpoints.models import Endpoint
manager = Manager(usage="Handles all endpoint related tasks.")
@manager.option(
"-ttl",
"--time-to-live",
type=int,
dest="ttl",
default=2,
help="Time in hours, which endpoint has not been refreshed to remove the endpoint.",
)
def expire(ttl):
"""
Removed all endpoints that have not been recently updated.
"""
print("[+] Staring expiration of old endpoints.")
try:
now = arrow.utcnow()
expiration = now - timedelta(hours=ttl)
endpoints = database.session_query(Endpoint).filter(
cast(Endpoint.last_updated, ArrowType) <= expiration
)
for endpoint in endpoints:
print(
"[!] Expiring endpoint: {name} Last Updated: {last_updated}".format(
name=endpoint.name, last_updated=endpoint.last_updated
)
)
database.delete(endpoint)
metrics.send("endpoint_expired", "counter", 1)
print("[+] Finished expiration.")
except Exception as e:
sentry.captureException()
|
import numpy as np
import pytest
from pytest import raises
from numpy.testing import assert_array_equal, assert_allclose
from os import path as op
import pickle
from itertools import product
import mne
from mne.channels import equalize_channels
from mne.utils import sum_squared, run_tests_if_main, _TempDir, requires_h5py
from mne.time_frequency import (csd_fourier, csd_multitaper,
csd_morlet, csd_array_fourier,
csd_array_multitaper, csd_array_morlet,
tfr_morlet,
CrossSpectralDensity, read_csd,
pick_channels_csd, psd_multitaper)
from mne.time_frequency.csd import _sym_mat_to_vector, _vector_to_sym_mat
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_fname = op.join(base_dir, 'test-eve.fif')
def _make_csd():
"""Make a simple CrossSpectralDensity object."""
frequencies = [1., 2., 3., 4.]
n_freqs = len(frequencies)
names = ['CH1', 'CH2', 'CH3']
tmin, tmax = (0., 1.)
data = np.arange(6. * n_freqs).reshape(n_freqs, 6).T
return CrossSpectralDensity(data, names, frequencies, 1, tmin, tmax)
def test_csd():
"""Test constructing a CrossSpectralDensity."""
csd = CrossSpectralDensity([1, 2, 3], ['CH1', 'CH2'], frequencies=1,
n_fft=1, tmin=0, tmax=1)
assert_array_equal(csd._data, [[1], [2], [3]]) # Conversion to 2D array
assert_array_equal(csd.frequencies, [1]) # Conversion to 1D array
# Channels don't match
raises(ValueError, CrossSpectralDensity, [1, 2, 3],
['CH1', 'CH2', 'Too many!'], tmin=0, tmax=1, frequencies=1, n_fft=1)
raises(ValueError, CrossSpectralDensity, [1, 2, 3], ['too little'],
tmin=0, tmax=1, frequencies=1, n_fft=1)
# Frequencies don't match
raises(ValueError, CrossSpectralDensity,
[[1, 2], [3, 4], [5, 6]], ['CH1', 'CH2'],
tmin=0, tmax=1, frequencies=1, n_fft=1)
# Invalid dims
raises(ValueError, CrossSpectralDensity, [[[1]]], ['CH1'], frequencies=1,
n_fft=1, tmin=0, tmax=1)
def test_csd_repr():
"""Test string representation of CrossSpectralDensity."""
csd = _make_csd()
assert str(csd) == ('<CrossSpectralDensity | n_channels=3, time=0.0 to '
'1.0 s, frequencies=1.0, 2.0, 3.0, 4.0 Hz.>')
assert str(csd.mean()) == ('<CrossSpectralDensity | n_channels=3, '
'time=0.0 to 1.0 s, frequencies=1.0-4.0 Hz.>')
csd_binned = csd.mean(fmin=[1, 3], fmax=[2, 4])
assert str(csd_binned) == ('<CrossSpectralDensity | n_channels=3, '
'time=0.0 to 1.0 s, frequencies=1.0-2.0, '
'3.0-4.0 Hz.>')
csd_binned = csd.mean(fmin=[1, 2], fmax=[1, 4])
assert str(csd_binned) == ('<CrossSpectralDensity | n_channels=3, '
'time=0.0 to 1.0 s, frequencies=1.0, 2.0-4.0 '
'Hz.>')
csd_no_time = csd.copy()
csd_no_time.tmin = None
csd_no_time.tmax = None
assert str(csd_no_time) == (
'<CrossSpectralDensity | n_channels=3, time=unknown, '
'frequencies=1.0, 2.0, 3.0, 4.0 Hz.>'
)
def test_csd_mean():
"""Test averaging frequency bins of CrossSpectralDensity."""
csd = _make_csd()
# Test different ways to average across all frequencies
avg = [[9], [10], [11], [12], [13], [14]]
assert_array_equal(csd.mean()._data, avg)
assert_array_equal(csd.mean(fmin=None, fmax=4)._data, avg)
assert_array_equal(csd.mean(fmin=1, fmax=None)._data, avg)
assert_array_equal(csd.mean(fmin=0, fmax=None)._data, avg)
assert_array_equal(csd.mean(fmin=1, fmax=4)._data, avg)
# Test averaging across frequency bins
csd_binned = csd.mean(fmin=[1, 3], fmax=[2, 4])
assert_array_equal(
csd_binned._data,
[[3, 15],
[4, 16],
[5, 17],
[6, 18],
[7, 19],
[8, 20]],
)
csd_binned = csd.mean(fmin=[1, 3], fmax=[1, 4])
assert_array_equal(
csd_binned._data,
[[0, 15],
[1, 16],
[2, 17],
[3, 18],
[4, 19],
[5, 20]],
)
# This flag should be set after averaging
assert csd.mean()._is_sum
# Test construction of .frequency attribute
assert csd.mean().frequencies == [[1, 2, 3, 4]]
assert (csd.mean(fmin=[1, 3], fmax=[2, 4]).frequencies ==
[[1, 2], [3, 4]])
# Test invalid inputs
raises(ValueError, csd.mean, fmin=1, fmax=[2, 3])
raises(ValueError, csd.mean, fmin=[1, 2], fmax=[3])
raises(ValueError, csd.mean, fmin=[1, 2], fmax=[1, 1])
# Taking the mean twice should raise an error
raises(RuntimeError, csd.mean().mean)
def test_csd_get_frequency_index():
"""Test the _get_frequency_index method of CrossSpectralDensity."""
csd = _make_csd()
assert csd._get_frequency_index(1) == 0
assert csd._get_frequency_index(2) == 1
assert csd._get_frequency_index(4) == 3
assert csd._get_frequency_index(0.9) == 0
assert csd._get_frequency_index(2.1) == 1
assert csd._get_frequency_index(4.1) == 3
# Frequency can be off by a maximum of 1
raises(IndexError, csd._get_frequency_index, csd.frequencies[-1] + 1.0001)
def test_csd_pick_frequency():
"""Test the pick_frequency method of CrossSpectralDensity."""
csd = _make_csd()
csd2 = csd.pick_frequency(freq=2)
assert csd2.frequencies == [2]
assert_array_equal(
csd2.get_data(),
[[6, 7, 8],
[7, 9, 10],
[8, 10, 11]]
)
csd2 = csd.pick_frequency(index=1)
assert csd2.frequencies == [2]
assert_array_equal(
csd2.get_data(),
[[6, 7, 8],
[7, 9, 10],
[8, 10, 11]]
)
# Nonexistent frequency
raises(IndexError, csd.pick_frequency, -1)
# Nonexistent index
raises(IndexError, csd.pick_frequency, index=10)
# Invalid parameters
raises(ValueError, csd.pick_frequency)
raises(ValueError, csd.pick_frequency, freq=2, index=1)
def test_csd_get_data():
"""Test the get_data method of CrossSpectralDensity."""
csd = _make_csd()
# CSD matrix corresponding to 2 Hz.
assert_array_equal(
csd.get_data(frequency=2),
[[6, 7, 8],
[7, 9, 10],
[8, 10, 11]]
)
# Mean CSD matrix
assert_array_equal(
csd.mean().get_data(),
[[9, 10, 11],
[10, 12, 13],
[11, 13, 14]]
)
# Average across frequency bins, select bin
assert_array_equal(
csd.mean(fmin=[1, 3], fmax=[2, 4]).get_data(index=1),
[[15, 16, 17],
[16, 18, 19],
[17, 19, 20]]
)
# Invalid inputs
raises(ValueError, csd.get_data)
raises(ValueError, csd.get_data, frequency=1, index=1)
raises(IndexError, csd.get_data, frequency=15)
raises(ValueError, csd.mean().get_data, frequency=1)
raises(IndexError, csd.mean().get_data, index=15)
@requires_h5py
def test_csd_save():
"""Test saving and loading a CrossSpectralDensity."""
csd = _make_csd()
tempdir = _TempDir()
fname = op.join(tempdir, 'csd.h5')
csd.save(fname)
csd2 = read_csd(fname)
assert_array_equal(csd._data, csd2._data)
assert csd.tmin == csd2.tmin
assert csd.tmax == csd2.tmax
assert csd.ch_names == csd2.ch_names
assert csd.frequencies == csd2.frequencies
assert csd._is_sum == csd2._is_sum
def test_csd_pickle():
"""Test pickling and unpickling a CrossSpectralDensity."""
csd = _make_csd()
tempdir = _TempDir()
fname = op.join(tempdir, 'csd.dat')
with open(fname, 'wb') as f:
pickle.dump(csd, f)
with open(fname, 'rb') as f:
csd2 = pickle.load(f)
assert_array_equal(csd._data, csd2._data)
assert csd.tmin == csd2.tmin
assert csd.tmax == csd2.tmax
assert csd.ch_names == csd2.ch_names
assert csd.frequencies == csd2.frequencies
assert csd._is_sum == csd2._is_sum
def test_pick_channels_csd():
"""Test selecting channels from a CrossSpectralDensity."""
csd = _make_csd()
csd = pick_channels_csd(csd, ['CH1', 'CH3'])
assert csd.ch_names == ['CH1', 'CH3']
assert_array_equal(csd._data, [[0, 6, 12, 18],
[2, 8, 14, 20],
[5, 11, 17, 23]])
def test_sym_mat_to_vector():
"""Test converting between vectors and symmetric matrices."""
mat = np.array([[0, 1, 2, 3],
[1, 4, 5, 6],
[2, 5, 7, 8],
[3, 6, 8, 9]])
assert_array_equal(_sym_mat_to_vector(mat),
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
vec = np.arange(10)
assert_array_equal(_vector_to_sym_mat(vec),
[[0, 1, 2, 3],
[1, 4, 5, 6],
[2, 5, 7, 8],
[3, 6, 8, 9]])
# Test complex values: diagonals should be complex conjugates
comp_vec = np.arange(3) + 1j
assert_array_equal(_vector_to_sym_mat(comp_vec),
[[0. + 0.j, 1. + 1.j],
[1. - 1.j, 2. + 0.j]])
# Test preservation of data type
assert _sym_mat_to_vector(mat.astype(np.int8)).dtype == np.int8
assert _vector_to_sym_mat(vec.astype(np.int8)).dtype == np.int8
assert _sym_mat_to_vector(mat.astype(np.float16)).dtype == np.float16
assert _vector_to_sym_mat(vec.astype(np.float16)).dtype == np.float16
def _generate_coherence_data():
"""Create an epochs object with coherence at 22Hz between channels 1 and 3.
A base 10 Hz sine wave is generated for all channels, but with different
phases, which means no actual coherence. A 22Hz sine wave is laid on top
for channels 1 and 3, with the same phase, so there is coherence between
these channels.
"""
ch_names = ['CH1', 'CH2', 'CH3']
sfreq = 50.
info = mne.create_info(ch_names, sfreq, 'eeg')
tstep = 1. / sfreq
n_samples = int(10 * sfreq) # 10 seconds of data
times = np.arange(n_samples) * tstep
events = np.array([[0, 1, 1]]) # one event
# Phases for the signals
phases = np.arange(info['nchan']) * 0.3 * np.pi
# Generate 10 Hz sine waves with different phases
signal = np.vstack([np.sin(times * 2 * np.pi * 10 + phase)
for phase in phases])
data = np.zeros((1, info['nchan'], n_samples))
data[0, :, :] = signal
# Generate 22Hz sine wave at the first and last electrodes with the same
# phase.
signal = np.sin(times * 2 * np.pi * 22)
data[0, [0, -1], :] += signal
return mne.EpochsArray(data, info, events, baseline=(0, times[-1]))
def _test_csd_matrix(csd):
"""Perform a suite of tests on a CSD matrix."""
# Check shape of the CSD matrix
n_chan = len(csd.ch_names)
assert n_chan == 3
assert csd.ch_names == ['CH1', 'CH2', 'CH3']
n_freqs = len(csd.frequencies)
assert n_freqs == 3
assert csd._data.shape == (6, 3) # Only upper triangle of CSD matrix
# Extract CSD ndarrays. Diagonals are PSDs.
csd_10 = csd.get_data(index=0)
csd_22 = csd.get_data(index=2)
power_10 = np.diag(csd_10)
power_22 = np.diag(csd_22)
# Check if the CSD matrices are hermitian
assert np.all(np.tril(csd_10).T.conj() == np.triu(csd_10))
assert np.all(np.tril(csd_22).T.conj() == np.triu(csd_22))
# Off-diagonals show phase difference
assert np.abs(csd_10[0, 1].imag) > 0.4
assert np.abs(csd_10[0, 2].imag) > 0.4
assert np.abs(csd_10[1, 2].imag) > 0.4
# No phase differences at 22 Hz
assert np.all(np.abs(csd_22[0, 2].imag) < 1E-3)
# Test CSD between the two channels that have a 20Hz signal and the one
# that has only a 10 Hz signal
assert np.abs(csd_22[0, 2]) > np.abs(csd_22[0, 1])
assert np.abs(csd_22[0, 2]) > np.abs(csd_22[1, 2])
# Check that electrodes/frequency combinations with signal have more
# power than frequencies without signal.
power_15 = np.diag(csd.get_data(index=1))
assert np.all(power_10 > power_15)
assert np.all(power_22[[0, -1]] > power_15[[0, -1]])
def _test_fourier_multitaper_parameters(epochs, csd_epochs, csd_array):
"""Parameter tests for csd_*_fourier and csd_*_multitaper."""
raises(ValueError, csd_epochs, epochs, fmin=20, fmax=10)
raises(ValueError, csd_array, epochs._data, epochs.info['sfreq'],
epochs.tmin, fmin=20, fmax=10)
raises(ValueError, csd_epochs, epochs, fmin=20, fmax=20.1)
raises(ValueError, csd_array, epochs._data, epochs.info['sfreq'],
epochs.tmin, fmin=20, fmax=20.1)
raises(ValueError, csd_epochs, epochs, tmin=0.15, tmax=0.1)
raises(ValueError, csd_array, epochs._data, epochs.info['sfreq'],
epochs.tmin, tmin=0.15, tmax=0.1)
raises(ValueError, csd_epochs, epochs, tmin=-1, tmax=10)
raises(ValueError, csd_array, epochs._data, epochs.info['sfreq'],
epochs.tmin, tmin=-1, tmax=10)
raises(ValueError, csd_epochs, epochs, tmin=10, tmax=11)
raises(ValueError, csd_array, epochs._data, epochs.info['sfreq'],
epochs.tmin, tmin=10, tmax=11)
# Test checks for data types and sizes
diff_types = [np.random.randn(3, 5), "error"]
err_data = [np.random.randn(3, 5), np.random.randn(2, 4)]
raises(ValueError, csd_array, err_data, sfreq=1)
raises(ValueError, csd_array, diff_types, sfreq=1)
raises(ValueError, csd_array, np.random.randn(3), sfreq=1)
def test_csd_fourier():
"""Test computing cross-spectral density using short-term Fourier."""
epochs = _generate_coherence_data()
sfreq = epochs.info['sfreq']
_test_fourier_multitaper_parameters(epochs, csd_fourier, csd_array_fourier)
# Compute CSDs using various parameters
times = [(None, None), (1, 9)]
as_arrays = [False, True]
parameters = product(times, as_arrays)
for (tmin, tmax), as_array in parameters:
if as_array:
csd = csd_array_fourier(epochs.get_data(), sfreq, epochs.tmin,
fmin=9, fmax=23, tmin=tmin, tmax=tmax,
ch_names=epochs.ch_names)
else:
csd = csd_fourier(epochs, fmin=9, fmax=23, tmin=tmin, tmax=tmax)
if tmin is None and tmax is None:
assert csd.tmin == 0 and csd.tmax == 9.98
else:
assert csd.tmin == tmin and csd.tmax == tmax
csd = csd.mean([9.9, 14.9, 21.9], [10.1, 15.1, 22.1])
_test_csd_matrix(csd)
# For the next test, generate a simple sine wave with a known power
times = np.arange(20 * sfreq) / sfreq # 20 seconds of signal
signal = np.sin(2 * np.pi * 10 * times)[None, None, :] # 10 Hz wave
signal_power_per_sample = sum_squared(signal) / len(times)
# Power per sample should not depend on time window length
for tmax in [12, 18]:
t_mask = (times <= tmax)
n_samples = sum(t_mask)
# Power per sample should not depend on number of FFT points
for add_n_fft in [0, 30]:
n_fft = n_samples + add_n_fft
csd = csd_array_fourier(signal, sfreq, tmax=tmax,
n_fft=n_fft).sum().get_data()
first_samp = csd[0, 0]
fourier_power_per_sample = np.abs(first_samp) * sfreq / n_fft
assert abs(signal_power_per_sample -
fourier_power_per_sample) < 0.001
def test_csd_multitaper():
"""Test computing cross-spectral density using multitapers."""
epochs = _generate_coherence_data()
sfreq = epochs.info['sfreq']
_test_fourier_multitaper_parameters(epochs, csd_multitaper,
csd_array_multitaper)
# Compute CSDs using various parameters
times = [(None, None), (1, 9)]
as_arrays = [False, True]
adaptives = [False, True]
parameters = product(times, as_arrays, adaptives)
for (tmin, tmax), as_array, adaptive in parameters:
if as_array:
csd = csd_array_multitaper(epochs.get_data(), sfreq, epochs.tmin,
adaptive=adaptive, fmin=9, fmax=23,
tmin=tmin, tmax=tmax,
ch_names=epochs.ch_names)
else:
csd = csd_multitaper(epochs, adaptive=adaptive, fmin=9, fmax=23,
tmin=tmin, tmax=tmax)
if tmin is None and tmax is None:
assert csd.tmin == 0 and csd.tmax == 9.98
else:
assert csd.tmin == tmin and csd.tmax == tmax
csd = csd.mean([9.9, 14.9, 21.9], [10.1, 15.1, 22.1])
_test_csd_matrix(csd)
# Test equivalence with PSD
psd, psd_freqs = psd_multitaper(epochs, fmin=1e-3,
normalization='full') # omit DC
csd = csd_multitaper(epochs)
assert_allclose(psd_freqs, csd.frequencies)
csd = np.array([np.diag(csd.get_data(index=ii))
for ii in range(len(csd))]).T
assert_allclose(psd[0], csd)
# For the next test, generate a simple sine wave with a known power
times = np.arange(20 * sfreq) / sfreq # 20 seconds of signal
signal = np.sin(2 * np.pi * 10 * times)[None, None, :] # 10 Hz wave
signal_power_per_sample = sum_squared(signal) / len(times)
# Power per sample should not depend on time window length
for tmax in [12, 18]:
t_mask = (times <= tmax)
n_samples = sum(t_mask)
n_fft = len(times)
# Power per sample should not depend on number of tapers
for n_tapers in [1, 2, 5]:
bandwidth = sfreq / float(n_samples) * (n_tapers + 1)
csd_mt = csd_array_multitaper(signal, sfreq, tmax=tmax,
bandwidth=bandwidth,
n_fft=n_fft).sum().get_data()
mt_power_per_sample = np.abs(csd_mt[0, 0]) * sfreq / n_fft
assert abs(signal_power_per_sample - mt_power_per_sample) < 0.001
def test_csd_morlet():
"""Test computing cross-spectral density using Morlet wavelets."""
epochs = _generate_coherence_data()
sfreq = epochs.info['sfreq']
# Compute CSDs by a variety of methods
freqs = [10, 15, 22]
n_cycles = [20, 30, 44]
times = [(None, None), (1, 9)]
as_arrays = [False, True]
parameters = product(times, as_arrays)
for (tmin, tmax), as_array in parameters:
if as_array:
csd = csd_array_morlet(epochs.get_data(), sfreq, freqs,
t0=epochs.tmin, n_cycles=n_cycles,
tmin=tmin, tmax=tmax,
ch_names=epochs.ch_names)
else:
csd = csd_morlet(epochs, frequencies=freqs, n_cycles=n_cycles,
tmin=tmin, tmax=tmax)
if tmin is None and tmax is None:
assert csd.tmin == 0 and csd.tmax == 9.98
else:
assert csd.tmin == tmin and csd.tmax == tmax
_test_csd_matrix(csd)
# CSD diagonals should contain PSD
tfr = tfr_morlet(epochs, freqs, n_cycles, return_itc=False)
power = np.mean(tfr.data, 2)
csd = csd_morlet(epochs, frequencies=freqs, n_cycles=n_cycles)
assert_allclose(csd._data[[0, 3, 5]] * sfreq, power)
# Test using plain convolution instead of FFT
csd = csd_morlet(epochs, frequencies=freqs, n_cycles=n_cycles,
use_fft=False)
assert_allclose(csd._data[[0, 3, 5]] * sfreq, power)
# Test baselining warning
epochs_nobase = epochs.copy()
epochs_nobase.baseline = None
epochs_nobase.info['highpass'] = 0
with pytest.warns(RuntimeWarning, match='baseline'):
csd = csd_morlet(epochs_nobase, frequencies=[10], decim=20)
def test_equalize_channels():
"""Test equalization of channels for instances of CrossSpectralDensity."""
csd1 = _make_csd()
csd2 = csd1.copy().pick_channels(['CH2', 'CH1'], ordered=True)
csd1, csd2 = equalize_channels([csd1, csd2])
assert csd1.ch_names == ['CH1', 'CH2']
assert csd2.ch_names == ['CH1', 'CH2']
run_tests_if_main()
|
from aiohue.sensors import (
TYPE_ZLL_LIGHTLEVEL,
TYPE_ZLL_ROTARY,
TYPE_ZLL_SWITCH,
TYPE_ZLL_TEMPERATURE,
)
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
LIGHT_LUX,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.helpers.entity import Entity
from .const import DOMAIN as HUE_DOMAIN
from .sensor_base import SENSOR_CONFIG_MAP, GenericHueSensor, GenericZLLSensor
LIGHT_LEVEL_NAME_FORMAT = "{} light level"
REMOTE_NAME_FORMAT = "{} battery level"
TEMPERATURE_NAME_FORMAT = "{} temperature"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Defer sensor setup to the shared sensor module."""
await hass.data[HUE_DOMAIN][
config_entry.entry_id
].sensor_manager.async_register_component("sensor", async_add_entities)
class GenericHueGaugeSensorEntity(GenericZLLSensor, Entity):
"""Parent class for all 'gauge' Hue device sensors."""
async def _async_update_ha_state(self, *args, **kwargs):
await self.async_update_ha_state(self, *args, **kwargs)
class HueLightLevel(GenericHueGaugeSensorEntity):
"""The light level sensor entity for a Hue motion sensor device."""
device_class = DEVICE_CLASS_ILLUMINANCE
unit_of_measurement = LIGHT_LUX
@property
def state(self):
"""Return the state of the device."""
if self.sensor.lightlevel is None:
return None
# https://developers.meethue.com/develop/hue-api/supported-devices/#clip_zll_lightlevel
# Light level in 10000 log10 (lux) +1 measured by sensor. Logarithm
# scale used because the human eye adjusts to light levels and small
# changes at low lux levels are more noticeable than at high lux
# levels.
return round(float(10 ** ((self.sensor.lightlevel - 1) / 10000)), 2)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = super().device_state_attributes
attributes.update(
{
"lightlevel": self.sensor.lightlevel,
"daylight": self.sensor.daylight,
"dark": self.sensor.dark,
"threshold_dark": self.sensor.tholddark,
"threshold_offset": self.sensor.tholdoffset,
}
)
return attributes
class HueTemperature(GenericHueGaugeSensorEntity):
"""The temperature sensor entity for a Hue motion sensor device."""
device_class = DEVICE_CLASS_TEMPERATURE
unit_of_measurement = TEMP_CELSIUS
@property
def state(self):
"""Return the state of the device."""
if self.sensor.temperature is None:
return None
return self.sensor.temperature / 100
class HueBattery(GenericHueSensor):
"""Battery class for when a batt-powered device is only represented as an event."""
@property
def unique_id(self):
"""Return a unique identifier for this device."""
return f"{self.sensor.uniqueid}-battery"
@property
def state(self):
"""Return the state of the battery."""
return self.sensor.battery
@property
def device_class(self):
"""Return the class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return PERCENTAGE
SENSOR_CONFIG_MAP.update(
{
TYPE_ZLL_LIGHTLEVEL: {
"platform": "sensor",
"name_format": LIGHT_LEVEL_NAME_FORMAT,
"class": HueLightLevel,
},
TYPE_ZLL_TEMPERATURE: {
"platform": "sensor",
"name_format": TEMPERATURE_NAME_FORMAT,
"class": HueTemperature,
},
TYPE_ZLL_SWITCH: {
"platform": "sensor",
"name_format": REMOTE_NAME_FORMAT,
"class": HueBattery,
},
TYPE_ZLL_ROTARY: {
"platform": "sensor",
"name_format": REMOTE_NAME_FORMAT,
"class": HueBattery,
},
}
)
|
import unittest
from urwid.compat import B
from urwid.escape import str_util
class DecodeOneTest(unittest.TestCase):
def gwt(self, ch, exp_ord, exp_pos):
ch = B(ch)
o, pos = str_util.decode_one(ch,0)
assert o==exp_ord, " got:%r expected:%r" % (o, exp_ord)
assert pos==exp_pos, " got:%r expected:%r" % (pos, exp_pos)
def test1byte(self):
self.gwt("ab", ord("a"), 1)
self.gwt("\xc0a", ord("?"), 1) # error
def test2byte(self):
self.gwt("\xc2", ord("?"), 1) # error
self.gwt("\xc0\x80", ord("?"), 1) # error
self.gwt("\xc2\x80", 0x80, 2)
self.gwt("\xdf\xbf", 0x7ff, 2)
def test3byte(self):
self.gwt("\xe0", ord("?"), 1) # error
self.gwt("\xe0\xa0", ord("?"), 1) # error
self.gwt("\xe0\x90\x80", ord("?"), 1) # error
self.gwt("\xe0\xa0\x80", 0x800, 3)
self.gwt("\xef\xbf\xbf", 0xffff, 3)
def test4byte(self):
self.gwt("\xf0", ord("?"), 1) # error
self.gwt("\xf0\x90", ord("?"), 1) # error
self.gwt("\xf0\x90\x80", ord("?"), 1) # error
self.gwt("\xf0\x80\x80\x80", ord("?"), 1) # error
self.gwt("\xf0\x90\x80\x80", 0x10000, 4)
self.gwt("\xf3\xbf\xbf\xbf", 0xfffff, 4)
|
import pytest
import voluptuous as vol
from homeassistant import data_entry_flow
from homeassistant.util.decorator import Registry
from tests.common import async_capture_events
@pytest.fixture
def manager():
"""Return a flow manager."""
handlers = Registry()
entries = []
class FlowManager(data_entry_flow.FlowManager):
"""Test flow manager."""
async def async_create_flow(self, handler_key, *, context, data):
"""Test create flow."""
handler = handlers.get(handler_key)
if handler is None:
raise data_entry_flow.UnknownHandler
flow = handler()
flow.init_step = context.get("init_step", "init")
return flow
async def async_finish_flow(self, flow, result):
"""Test finish flow."""
if result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
result["source"] = flow.context.get("source")
entries.append(result)
return result
mgr = FlowManager(None)
mgr.mock_created_entries = entries
mgr.mock_reg_handler = handlers.register
return mgr
async def test_configure_reuses_handler_instance(manager):
"""Test that we reuse instances."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
handle_count = 0
async def async_step_init(self, user_input=None):
self.handle_count += 1
return self.async_show_form(
errors={"base": str(self.handle_count)}, step_id="init"
)
form = await manager.async_init("test")
assert form["errors"]["base"] == "1"
form = await manager.async_configure(form["flow_id"])
assert form["errors"]["base"] == "2"
assert manager.async_progress() == [
{
"flow_id": form["flow_id"],
"handler": "test",
"step_id": "init",
"context": {},
}
]
assert len(manager.mock_created_entries) == 0
async def test_configure_two_steps(manager):
"""Test that we reuse instances."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 1
async def async_step_first(self, user_input=None):
if user_input is not None:
self.init_data = user_input
return await self.async_step_second()
return self.async_show_form(step_id="first", data_schema=vol.Schema([str]))
async def async_step_second(self, user_input=None):
if user_input is not None:
return self.async_create_entry(
title="Test Entry", data=self.init_data + user_input
)
return self.async_show_form(step_id="second", data_schema=vol.Schema([str]))
form = await manager.async_init("test", context={"init_step": "first"})
with pytest.raises(vol.Invalid):
form = await manager.async_configure(form["flow_id"], "INCORRECT-DATA")
form = await manager.async_configure(form["flow_id"], ["INIT-DATA"])
form = await manager.async_configure(form["flow_id"], ["SECOND-DATA"])
assert form["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
result = manager.mock_created_entries[0]
assert result["handler"] == "test"
assert result["data"] == ["INIT-DATA", "SECOND-DATA"]
async def test_show_form(manager):
"""Test that we can show a form."""
schema = vol.Schema({vol.Required("username"): str, vol.Required("password"): str})
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
async def async_step_init(self, user_input=None):
return self.async_show_form(
step_id="init",
data_schema=schema,
errors={"username": "Should be unique."},
)
form = await manager.async_init("test")
assert form["type"] == "form"
assert form["data_schema"] is schema
assert form["errors"] == {"username": "Should be unique."}
async def test_abort_removes_instance(manager):
"""Test that abort removes the flow from progress."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
is_new = True
async def async_step_init(self, user_input=None):
old = self.is_new
self.is_new = False
return self.async_abort(reason=str(old))
form = await manager.async_init("test")
assert form["reason"] == "True"
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 0
form = await manager.async_init("test")
assert form["reason"] == "True"
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 0
async def test_create_saves_data(manager):
"""Test creating a config entry."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, user_input=None):
return self.async_create_entry(title="Test Title", data="Test Data")
await manager.async_init("test")
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
entry = manager.mock_created_entries[0]
assert entry["version"] == 5
assert entry["handler"] == "test"
assert entry["title"] == "Test Title"
assert entry["data"] == "Test Data"
assert entry["source"] is None
async def test_discovery_init_flow(manager):
"""Test a flow initialized by discovery."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
async def async_step_init(self, info):
return self.async_create_entry(title=info["id"], data=info)
data = {"id": "hello", "token": "secret"}
await manager.async_init("test", context={"source": "discovery"}, data=data)
assert len(manager.async_progress()) == 0
assert len(manager.mock_created_entries) == 1
entry = manager.mock_created_entries[0]
assert entry["version"] == 5
assert entry["handler"] == "test"
assert entry["title"] == "hello"
assert entry["data"] == data
assert entry["source"] == "discovery"
async def test_finish_callback_change_result_type(hass):
"""Test finish callback can change result type."""
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 1
async def async_step_init(self, input):
"""Return init form with one input field 'count'."""
if input is not None:
return self.async_create_entry(title="init", data=input)
return self.async_show_form(
step_id="init", data_schema=vol.Schema({"count": int})
)
class FlowManager(data_entry_flow.FlowManager):
async def async_create_flow(self, handler_name, *, context, data):
"""Create a test flow."""
return TestFlow()
async def async_finish_flow(self, flow, result):
"""Redirect to init form if count <= 1."""
if result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY:
if result["data"] is None or result["data"].get("count", 0) <= 1:
return flow.async_show_form(
step_id="init", data_schema=vol.Schema({"count": int})
)
else:
result["result"] = result["data"]["count"]
return result
manager = FlowManager(hass)
result = await manager.async_init("test")
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await manager.async_configure(result["flow_id"], {"count": 0})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
assert "result" not in result
result = await manager.async_configure(result["flow_id"], {"count": 2})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"] == 2
async def test_external_step(hass, manager):
"""Test external step logic."""
manager.hass = hass
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
VERSION = 5
data = None
async def async_step_init(self, user_input=None):
if not user_input:
return self.async_external_step(
step_id="init", url="https://example.com"
)
self.data = user_input
return self.async_external_step_done(next_step_id="finish")
async def async_step_finish(self, user_input=None):
return self.async_create_entry(title=self.data["title"], data=self.data)
events = async_capture_events(
hass, data_entry_flow.EVENT_DATA_ENTRY_FLOW_PROGRESSED
)
result = await manager.async_init("test")
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert len(manager.async_progress()) == 1
# Mimic external step
# Called by integrations: `hass.config_entries.flow.async_configure(…)`
result = await manager.async_configure(result["flow_id"], {"title": "Hello"})
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP_DONE
await hass.async_block_till_done()
assert len(events) == 1
assert events[0].data == {
"handler": "test",
"flow_id": result["flow_id"],
"refresh": True,
}
# Frontend refreshses the flow
result = await manager.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "Hello"
async def test_abort_flow_exception(manager):
"""Test that the AbortFlow exception works."""
@manager.mock_reg_handler("test")
class TestFlow(data_entry_flow.FlowHandler):
async def async_step_init(self, user_input=None):
raise data_entry_flow.AbortFlow("mock-reason", {"placeholder": "yo"})
form = await manager.async_init("test")
assert form["type"] == "abort"
assert form["reason"] == "mock-reason"
assert form["description_placeholders"] == {"placeholder": "yo"}
|
from datetime import timedelta
import homeassistant.components.ring as ring
from homeassistant.setup import async_setup_component
from tests.common import load_fixture
ATTRIBUTION = "Data provided by Ring.com"
VALID_CONFIG = {
"ring": {"username": "foo", "password": "bar", "scan_interval": timedelta(10)}
}
async def test_setup(hass, requests_mock):
"""Test the setup."""
await async_setup_component(hass, ring.DOMAIN, {})
requests_mock.post(
"https://oauth.ring.com/oauth/token", text=load_fixture("ring_oauth.json")
)
requests_mock.post(
"https://api.ring.com/clients_api/session",
text=load_fixture("ring_session.json"),
)
requests_mock.get(
"https://api.ring.com/clients_api/ring_devices",
text=load_fixture("ring_devices.json"),
)
requests_mock.get(
"https://api.ring.com/clients_api/chimes/999999/health",
text=load_fixture("ring_chime_health_attrs.json"),
)
requests_mock.get(
"https://api.ring.com/clients_api/doorbots/987652/health",
text=load_fixture("ring_doorboot_health_attrs.json"),
)
assert await ring.async_setup(hass, VALID_CONFIG)
|
from perfkitbenchmarker.memcache_service import MemcacheService
from perfkitbenchmarker.providers import gcp
class MemcacheService(MemcacheService):
CLOUD = gcp.CLOUD
def __init__(self):
pass
def Create(self):
raise NotImplementedError
def Destroy(self):
raise NotImplementedError
def Flush(self):
raise NotImplementedError
def GetHosts(self):
raise NotImplementedError
def GetMetadata(self):
raise NotImplementedError
|
import pytest
from yandextank.stepper.instance_plan import LoadPlanBuilder, create
from yandextank.stepper.util import take
class TestCreate(object):
@pytest.mark.parametrize(
'n, loadplan, expected',
[(
7, LoadPlanBuilder().ramp(5, 4000).create(),
[0, 1000, 2000, 3000, 4000, 0, 0]
), (
7, create(['ramp(5, 4s)']),
[0, 1000, 2000, 3000, 4000, 0, 0]
), (
12, create(['ramp(5, 4s)', 'wait(5s)', 'ramp(5,4s)']),
[0, 1000, 2000, 3000, 4000, 9000, 10000, 11000, 12000, 13000, 0, 0]
), (
7, create(['wait(5s)', 'ramp(5, 0)']),
[5000, 5000, 5000, 5000, 5000, 0, 0]
), (
7, create([]),
[0, 0, 0, 0, 0, 0, 0]
), (
12, create(['line(1, 9, 4s)']),
[0, 500, 1000, 1500, 2000, 2500, 3000, 3500, 4000, 0, 0, 0]
), (
12, create(['const(3, 5s)', 'line(7, 11, 2s)']),
[0, 0, 0, 5000, 5000, 5000, 5000, 5500, 6000, 6500, 7000, 0]
), (
12, create(['step(2, 10, 2, 3s)']),
[0, 0, 3000, 3000, 6000, 6000, 9000, 9000, 12000, 12000, 0, 0]
), (
12, LoadPlanBuilder().const(3, 1000).line(5, 10, 5000).steps,
[(3, 1), (5, 1), (6, 1), (7, 1), (8, 1), (9, 1), (10, 1)]
), (
12, LoadPlanBuilder().stairway(100, 950, 100, 30000).steps,
[
(100, 30), (200, 30), (300, 30), (400, 30), (500, 30),
(600, 30), (700, 30), (800, 30), (900, 30), (950, 30)]
)]) # yapf:disable
def test_steps(self, n, loadplan, expected):
assert take(n, loadplan) == expected
@pytest.mark.parametrize(
'loadplan, expected',
[(LoadPlanBuilder().stairway(100, 950, 100, 30000), 950),
(LoadPlanBuilder().const(3, 1000).line(5, 10, 5000), 10),
(LoadPlanBuilder().line(1, 100, 60000), 100)])
def test_instances(self, loadplan, expected):
assert loadplan.instances == expected
|
from typing import cast
import os.path
from PyQt5.QtNetwork import QNetworkDiskCache
from qutebrowser.config import config
from qutebrowser.utils import utils, standarddir
diskcache = cast('DiskCache', None)
class DiskCache(QNetworkDiskCache):
"""Disk cache which sets correct cache dir and size."""
def __init__(self, cache_dir, parent=None):
super().__init__(parent)
self.setCacheDirectory(os.path.join(cache_dir, 'http'))
self._set_cache_size()
config.instance.changed.connect(self._set_cache_size)
def __repr__(self):
return utils.get_repr(self, size=self.cacheSize(),
maxsize=self.maximumCacheSize(),
path=self.cacheDirectory())
@config.change_filter('content.cache.size')
def _set_cache_size(self):
"""Set the cache size based on the config."""
size = config.val.content.cache.size
if size is None:
size = 1024 * 1024 * 50 # default from QNetworkDiskCachePrivate
self.setMaximumCacheSize(size)
def init(parent):
"""Initialize the global cache."""
global diskcache
diskcache = DiskCache(standarddir.cache(), parent=parent)
|
import logging
log = logging.getLogger(__name__)
class LoadTest(object):
def __init__(self, gun):
self.gun = gun
def case1(self, missile):
with self.gun.measure("case1"):
log.info("Shoot case 1: %s", missile)
def case2(self, missile):
with self.gun.measure("case2"):
log.info("Shoot case 2: %s", missile)
def setup(self):
log.info("Setting up LoadTest")
def teardown(self):
log.info("Tearing down LoadTest")
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from compare_gan.architectures import arch_ops
from compare_gan.architectures import resnet_cifar
from six.moves import zip
import tensorflow as tf
class ResNetNormTest(tf.test.TestCase):
def testDefaultGenerator(self):
with tf.Graph().as_default():
# batch size 8, 32x32x3 images, 10 classes.
z = tf.zeros((8, 128))
y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10)
generator = resnet_cifar.Generator(image_shape=(32, 32, 3))
fake_images = generator(z, y=y, is_training=True, reuse=False)
self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3])
expected_variables = [
# Name and shape.
("generator/fc_noise/kernel:0", [128, 4096]),
("generator/fc_noise/bias:0", [4096]),
("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv_shortcut/bias:0", [256]),
("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv1/bias:0", [256]),
("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B1/same_conv2/bias:0", [256]),
("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv_shortcut/bias:0", [256]),
("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv1/bias:0", [256]),
("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B2/same_conv2/bias:0", [256]),
("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv_shortcut/bias:0", [256]),
("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv1/bias:0", [256]),
("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B3/same_conv2/bias:0", [256]),
("generator/final_conv/kernel:0", [3, 3, 256, 3]),
("generator/final_conv/bias:0", [3]),
]
actual_variables = [(v.name, v.shape.as_list())
for v in tf.trainable_variables()]
for a, e in zip(actual_variables, expected_variables):
logging.info("actual: %s, expected: %s", a, e)
self.assertEqual(a, e)
self.assertEqual(len(actual_variables), len(expected_variables))
def testDefaultDiscriminator(self):
with tf.Graph().as_default():
# batch size 8, 32x32x3 images, 10 classes.
x = tf.zeros((8, 32, 32, 3))
y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10)
discriminator = resnet_cifar.Discriminator()
_ = discriminator(x, y=y, is_training=True, reuse=False)
expected_variables = [
# Name and shape.
("discriminator/B1/down_conv_shortcut/kernel:0", [3, 3, 3, 128]),
("discriminator/B1/down_conv_shortcut/bias:0", [128]),
("discriminator/B1/same_conv1/kernel:0", [3, 3, 3, 128]),
("discriminator/B1/same_conv1/bias:0", [128]),
("discriminator/B1/down_conv2/kernel:0", [3, 3, 128, 128]),
("discriminator/B1/down_conv2/bias:0", [128]),
("discriminator/B2/down_conv_shortcut/kernel:0", [3, 3, 128, 128]),
("discriminator/B2/down_conv_shortcut/bias:0", [128]),
("discriminator/B2/same_conv1/kernel:0", [3, 3, 128, 128]),
("discriminator/B2/same_conv1/bias:0", [128]),
("discriminator/B2/down_conv2/kernel:0", [3, 3, 128, 128]),
("discriminator/B2/down_conv2/bias:0", [128]),
("discriminator/B3/same_conv_shortcut/kernel:0", [3, 3, 128, 128]),
("discriminator/B3/same_conv_shortcut/bias:0", [128]),
("discriminator/B3/same_conv1/kernel:0", [3, 3, 128, 128]),
("discriminator/B3/same_conv1/bias:0", [128]),
("discriminator/B3/same_conv2/kernel:0", [3, 3, 128, 128]),
("discriminator/B3/same_conv2/bias:0", [128]),
("discriminator/B4/same_conv_shortcut/kernel:0", [3, 3, 128, 128]),
("discriminator/B4/same_conv_shortcut/bias:0", [128]),
("discriminator/B4/same_conv1/kernel:0", [3, 3, 128, 128]),
("discriminator/B4/same_conv1/bias:0", [128]),
("discriminator/B4/same_conv2/kernel:0", [3, 3, 128, 128]),
("discriminator/B4/same_conv2/bias:0", [128]),
("discriminator/disc_final_fc/kernel:0", [128, 1]),
("discriminator/disc_final_fc/bias:0", [1]),
]
actual_variables = [(v.name, v.shape.as_list())
for v in tf.trainable_variables()]
for a, e in zip(actual_variables, expected_variables):
logging.info("actual: %s, expected: %s", a, e)
self.assertEqual(a, e)
self.assertEqual(len(actual_variables), len(expected_variables))
def testDefaultGeneratorWithBatchNorm(self):
with tf.Graph().as_default():
# batch size 8, 32x32x3 images, 10 classes.
z = tf.zeros((8, 128))
y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10)
generator = resnet_cifar.Generator(
image_shape=(32, 32, 3),
batch_norm_fn=arch_ops.batch_norm)
fake_images = generator(z, y=y, is_training=True, reuse=False)
self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3])
expected_variables = [
# Name and shape.
("generator/fc_noise/kernel:0", [128, 4096]),
("generator/fc_noise/bias:0", [4096]),
("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv_shortcut/bias:0", [256]),
("generator/B1/bn1/gamma:0", [256]),
("generator/B1/bn1/beta:0", [256]),
("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv1/bias:0", [256]),
("generator/B1/bn2/gamma:0", [256]),
("generator/B1/bn2/beta:0", [256]),
("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B1/same_conv2/bias:0", [256]),
("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv_shortcut/bias:0", [256]),
("generator/B2/bn1/gamma:0", [256]),
("generator/B2/bn1/beta:0", [256]),
("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv1/bias:0", [256]),
("generator/B2/bn2/gamma:0", [256]),
("generator/B2/bn2/beta:0", [256]),
("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B2/same_conv2/bias:0", [256]),
("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv_shortcut/bias:0", [256]),
("generator/B3/bn1/gamma:0", [256]),
("generator/B3/bn1/beta:0", [256]),
("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv1/bias:0", [256]),
("generator/B3/bn2/gamma:0", [256]),
("generator/B3/bn2/beta:0", [256]),
("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B3/same_conv2/bias:0", [256]),
("generator/final_norm/gamma:0", [256]),
("generator/final_norm/beta:0", [256]),
("generator/final_conv/kernel:0", [3, 3, 256, 3]),
("generator/final_conv/bias:0", [3]),
]
actual_variables = [(v.name, v.shape.as_list())
for v in tf.trainable_variables()]
for a in actual_variables:
logging.info(a)
for a, e in zip(actual_variables, expected_variables):
logging.info("actual: %s, expected: %s", a, e)
self.assertEqual(a, e)
self.assertEqual(len(actual_variables), len(expected_variables))
def testDefaultGeneratorWithConditionalBatchNorm(self):
with tf.Graph().as_default():
# Batch size 8, 32x32x3 images, 10 classes.
z = tf.zeros((8, 128))
y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10)
generator = resnet_cifar.Generator(
image_shape=(32, 32, 3),
batch_norm_fn=arch_ops.conditional_batch_norm)
fake_images = generator(z, y=y, is_training=True, reuse=False)
self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3])
expected_variables = [
# Name and shape.
("generator/fc_noise/kernel:0", [128, 4096]),
("generator/fc_noise/bias:0", [4096]),
("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv_shortcut/bias:0", [256]),
("generator/B1/bn1/condition/gamma/kernel:0", [10, 256]),
("generator/B1/bn1/condition/beta/kernel:0", [10, 256]),
("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv1/bias:0", [256]),
("generator/B1/bn2/condition/gamma/kernel:0", [10, 256]),
("generator/B1/bn2/condition/beta/kernel:0", [10, 256]),
("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B1/same_conv2/bias:0", [256]),
("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv_shortcut/bias:0", [256]),
("generator/B2/bn1/condition/gamma/kernel:0", [10, 256]),
("generator/B2/bn1/condition/beta/kernel:0", [10, 256]),
("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv1/bias:0", [256]),
("generator/B2/bn2/condition/gamma/kernel:0", [10, 256]),
("generator/B2/bn2/condition/beta/kernel:0", [10, 256]),
("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B2/same_conv2/bias:0", [256]),
("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv_shortcut/bias:0", [256]),
("generator/B3/bn1/condition/gamma/kernel:0", [10, 256]),
("generator/B3/bn1/condition/beta/kernel:0", [10, 256]),
("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv1/bias:0", [256]),
("generator/B3/bn2/condition/gamma/kernel:0", [10, 256]),
("generator/B3/bn2/condition/beta/kernel:0", [10, 256]),
("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B3/same_conv2/bias:0", [256]),
("generator/final_norm/condition/gamma/kernel:0", [10, 256]),
("generator/final_norm/condition/beta/kernel:0", [10, 256]),
("generator/final_conv/kernel:0", [3, 3, 256, 3]),
("generator/final_conv/bias:0", [3]),
]
actual_variables = [(v.name, v.shape.as_list())
for v in tf.trainable_variables()]
for a in actual_variables:
logging.info(a)
for a, e in zip(actual_variables, expected_variables):
logging.info("actual: %s, expected: %s", a, e)
self.assertEqual(a, e)
self.assertEqual(len(actual_variables), len(expected_variables))
def testDefaultGeneratorWithSelfModulatedBatchNorm(self):
with tf.Graph().as_default():
# Batch size 8, 32x32x3 images, 10 classes.
z = tf.zeros((8, 128))
y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10)
generator = resnet_cifar.Generator(
image_shape=(32, 32, 3),
batch_norm_fn=arch_ops.self_modulated_batch_norm)
fake_images = generator(z, y=y, is_training=True, reuse=False)
self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3])
expected_variables = [
# Name and shape.
("generator/fc_noise/kernel:0", [128, 4096]),
("generator/fc_noise/bias:0", [4096]),
("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv_shortcut/bias:0", [256]),
("generator/B1/bn1/sbn/hidden/kernel:0", [128, 32]),
("generator/B1/bn1/sbn/hidden/bias:0", [32]),
("generator/B1/bn1/sbn/gamma/kernel:0", [32, 256]),
("generator/B1/bn1/sbn/gamma/bias:0", [256]),
("generator/B1/bn1/sbn/beta/kernel:0", [32, 256]),
("generator/B1/bn1/sbn/beta/bias:0", [256]),
("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv1/bias:0", [256]),
("generator/B1/bn2/sbn/hidden/kernel:0", [128, 32]),
("generator/B1/bn2/sbn/hidden/bias:0", [32]),
("generator/B1/bn2/sbn/gamma/kernel:0", [32, 256]),
("generator/B1/bn2/sbn/gamma/bias:0", [256]),
("generator/B1/bn2/sbn/beta/kernel:0", [32, 256]),
("generator/B1/bn2/sbn/beta/bias:0", [256]),
("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B1/same_conv2/bias:0", [256]),
("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv_shortcut/bias:0", [256]),
("generator/B2/bn1/sbn/hidden/kernel:0", [128, 32]),
("generator/B2/bn1/sbn/hidden/bias:0", [32]),
("generator/B2/bn1/sbn/gamma/kernel:0", [32, 256]),
("generator/B2/bn1/sbn/gamma/bias:0", [256]),
("generator/B2/bn1/sbn/beta/kernel:0", [32, 256]),
("generator/B2/bn1/sbn/beta/bias:0", [256]),
("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv1/bias:0", [256]),
("generator/B2/bn2/sbn/hidden/kernel:0", [128, 32]),
("generator/B2/bn2/sbn/hidden/bias:0", [32]),
("generator/B2/bn2/sbn/gamma/kernel:0", [32, 256]),
("generator/B2/bn2/sbn/gamma/bias:0", [256]),
("generator/B2/bn2/sbn/beta/kernel:0", [32, 256]),
("generator/B2/bn2/sbn/beta/bias:0", [256]),
("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B2/same_conv2/bias:0", [256]),
("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv_shortcut/bias:0", [256]),
("generator/B3/bn1/sbn/hidden/kernel:0", [128, 32]),
("generator/B3/bn1/sbn/hidden/bias:0", [32]),
("generator/B3/bn1/sbn/gamma/kernel:0", [32, 256]),
("generator/B3/bn1/sbn/gamma/bias:0", [256]),
("generator/B3/bn1/sbn/beta/kernel:0", [32, 256]),
("generator/B3/bn1/sbn/beta/bias:0", [256]),
("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv1/bias:0", [256]),
("generator/B3/bn2/sbn/hidden/kernel:0", [128, 32]),
("generator/B3/bn2/sbn/hidden/bias:0", [32]),
("generator/B3/bn2/sbn/gamma/kernel:0", [32, 256]),
("generator/B3/bn2/sbn/gamma/bias:0", [256]),
("generator/B3/bn2/sbn/beta/kernel:0", [32, 256]),
("generator/B3/bn2/sbn/beta/bias:0", [256]),
("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B3/same_conv2/bias:0", [256]),
("generator/final_norm/sbn/hidden/kernel:0", [128, 32]),
("generator/final_norm/sbn/hidden/bias:0", [32]),
("generator/final_norm/sbn/gamma/kernel:0", [32, 256]),
("generator/final_norm/sbn/gamma/bias:0", [256]),
("generator/final_norm/sbn/beta/kernel:0", [32, 256]),
("generator/final_norm/sbn/beta/bias:0", [256]),
("generator/final_conv/kernel:0", [3, 3, 256, 3]),
("generator/final_conv/bias:0", [3]),
]
actual_variables = [(v.name, v.shape.as_list())
for v in tf.trainable_variables()]
for a in actual_variables:
logging.info(a)
for a, e in zip(actual_variables, expected_variables):
logging.info("actual: %s, expected: %s", a, e)
self.assertEqual(a, e)
self.assertEqual(len(actual_variables), len(expected_variables))
def testDefaultGeneratorWithSpectralNorm(self):
with tf.Graph().as_default():
# Batch size 8, 32x32x3 images, 10 classes.
z = tf.zeros((8, 128))
y = tf.one_hot(tf.ones((8,), dtype=tf.int32), 10)
generator = resnet_cifar.Generator(
image_shape=(32, 32, 3),
spectral_norm=True)
fake_images = generator(z, y=y, is_training=True, reuse=False)
self.assertEqual(fake_images.shape.as_list(), [8, 32, 32, 3])
expected_variables = [
# Name and shape.
("generator/fc_noise/kernel:0", [128, 4096]),
("generator/fc_noise/kernel/u_var:0", [128, 1]),
("generator/fc_noise/bias:0", [4096]),
("generator/B1/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv_shortcut/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/B1/up_conv_shortcut/bias:0", [256]),
("generator/B1/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B1/up_conv1/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/B1/up_conv1/bias:0", [256]),
("generator/B1/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B1/same_conv2/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/B1/same_conv2/bias:0", [256]),
("generator/B2/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv_shortcut/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/B2/up_conv_shortcut/bias:0", [256]),
("generator/B2/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B2/up_conv1/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/B2/up_conv1/bias:0", [256]),
("generator/B2/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B2/same_conv2/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/B2/same_conv2/bias:0", [256]),
("generator/B3/up_conv_shortcut/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv_shortcut/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/B3/up_conv_shortcut/bias:0", [256]),
("generator/B3/up_conv1/kernel:0", [3, 3, 256, 256]),
("generator/B3/up_conv1/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/B3/up_conv1/bias:0", [256]),
("generator/B3/same_conv2/kernel:0", [3, 3, 256, 256]),
("generator/B3/same_conv2/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/B3/same_conv2/bias:0", [256]),
("generator/final_conv/kernel:0", [3, 3, 256, 3]),
("generator/final_conv/kernel/u_var:0", [3 * 3 * 256, 1]),
("generator/final_conv/bias:0", [3]),
]
actual_variables = [(v.name, v.shape.as_list())
for v in tf.global_variables()]
for a in actual_variables:
logging.info(a)
for a, e in zip(actual_variables, expected_variables):
logging.info("actual: %s, expected: %s", a, e)
self.assertEqual(a, e)
self.assertEqual(len(actual_variables), len(expected_variables))
if __name__ == "__main__":
tf.test.main()
|
from asyncio import iscoroutinefunction
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_DOMAIN
from homeassistant.core import CALLBACK_TYPE, callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_call_later
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_TXT = "txt"
DOMAIN = "duckdns"
INTERVAL = timedelta(minutes=5)
SERVICE_SET_TXT = "set_txt"
UPDATE_URL = "https://www.duckdns.org/update"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_DOMAIN): cv.string,
vol.Required(CONF_ACCESS_TOKEN): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
SERVICE_TXT_SCHEMA = vol.Schema({vol.Required(ATTR_TXT): vol.Any(None, cv.string)})
async def async_setup(hass, config):
"""Initialize the DuckDNS component."""
domain = config[DOMAIN][CONF_DOMAIN]
token = config[DOMAIN][CONF_ACCESS_TOKEN]
session = async_get_clientsession(hass)
async def update_domain_interval(_now):
"""Update the DuckDNS entry."""
return await _update_duckdns(session, domain, token)
intervals = (
INTERVAL,
timedelta(minutes=1),
timedelta(minutes=5),
timedelta(minutes=15),
timedelta(minutes=30),
)
async_track_time_interval_backoff(hass, update_domain_interval, intervals)
async def update_domain_service(call):
"""Update the DuckDNS entry."""
await _update_duckdns(session, domain, token, txt=call.data[ATTR_TXT])
hass.services.async_register(
DOMAIN, SERVICE_SET_TXT, update_domain_service, schema=SERVICE_TXT_SCHEMA
)
return True
_SENTINEL = object()
async def _update_duckdns(session, domain, token, *, txt=_SENTINEL, clear=False):
"""Update DuckDNS."""
params = {"domains": domain, "token": token}
if txt is not _SENTINEL:
if txt is None:
# Pass in empty txt value to indicate it's clearing txt record
params["txt"] = ""
clear = True
else:
params["txt"] = txt
if clear:
params["clear"] = "true"
resp = await session.get(UPDATE_URL, params=params)
body = await resp.text()
if body != "OK":
_LOGGER.warning("Updating DuckDNS domain failed: %s", domain)
return False
return True
@callback
@bind_hass
def async_track_time_interval_backoff(hass, action, intervals) -> CALLBACK_TYPE:
"""Add a listener that fires repetitively at every timedelta interval."""
if not iscoroutinefunction:
_LOGGER.error("action needs to be a coroutine and return True/False")
return
if not isinstance(intervals, (list, tuple)):
intervals = (intervals,)
remove = None
failed = 0
async def interval_listener(now):
"""Handle elapsed intervals with backoff."""
nonlocal failed, remove
try:
failed += 1
if await action(now):
failed = 0
finally:
delay = intervals[failed] if failed < len(intervals) else intervals[-1]
remove = async_call_later(hass, delay.total_seconds(), interval_listener)
hass.async_run_job(interval_listener, dt_util.utcnow())
def remove_listener():
"""Remove interval listener."""
if remove:
remove() # pylint: disable=not-callable
return remove_listener
|
__all__ = ["fetch", "available", "Base"]
import functools
import logging
import pkgutil
import urllib
import docker_registry.drivers
from .compat import json
from .exceptions import NotImplementedError
logger = logging.getLogger(__name__)
def check(value):
value = str(value)
if value == '..':
value = '%2E%2E'
if value == '.':
value = '%2E'
return urllib.quote_plus(value)
def filter_args(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
args = list(args)
ref = args.pop(0)
args = [check(arg) for arg in args]
args.insert(0, ref)
for key, value in kwargs.iteritems():
kwargs[key] = check(value)
return f(*args, **kwargs)
return wrapper
class Base(object):
"""Storage is a convenience class...
... that describes methods that must be implemented by any backend.
You should inherit (or duck type) this if you are implementing your own.
:param host: host name
:type host: unicode
:param port: port number
:type port: int
:param basepath: base path (will be prepended to actual requests)
:type basepath: unicode
"""
# Useful if we want to change those locations later without rewriting
# the code which uses Storage
repositories = 'repositories'
images = 'images'
def _repository_path(self, namespace, repository):
return '{0}/{1}/{2}'.format(
self.repositories, namespace, repository)
# Set the IO buffer to 128kB
buffer_size = 128 * 1024
# By default no storage plugin supports it
supports_bytes_range = False
def __init__(self, path=None, config=None):
pass
# FIXME(samalba): Move all path resolver in each module (out of the base)
@filter_args
def images_list_path(self, namespace, repository):
repository_path = self._repository_path(
namespace=namespace, repository=repository)
return '{0}/_images_list'.format(repository_path)
@filter_args
def image_json_path(self, image_id):
return '{0}/{1}/json'.format(self.images, image_id)
@filter_args
def image_mark_path(self, image_id):
return '{0}/{1}/_inprogress'.format(self.images, image_id)
@filter_args
def image_checksum_path(self, image_id):
return '{0}/{1}/_checksum'.format(self.images, image_id)
@filter_args
def image_layer_path(self, image_id):
return '{0}/{1}/layer'.format(self.images, image_id)
@filter_args
def image_ancestry_path(self, image_id):
return '{0}/{1}/ancestry'.format(self.images, image_id)
@filter_args
def image_files_path(self, image_id):
return '{0}/{1}/_files'.format(self.images, image_id)
@filter_args
def image_diff_path(self, image_id):
return '{0}/{1}/_diff'.format(self.images, image_id)
@filter_args
def repository_path(self, namespace, repository):
return '{0}/{1}/{2}'.format(
self.repositories, namespace, repository)
@filter_args
def tag_path(self, namespace, repository, tagname=None):
repository_path = self._repository_path(
namespace=namespace, repository=repository)
if not tagname:
return repository_path
return '{0}/tag_{1}'.format(repository_path, tagname)
@filter_args
def repository_json_path(self, namespace, repository):
repository_path = self._repository_path(
namespace=namespace, repository=repository)
return '{0}/json'.format(repository_path)
@filter_args
def repository_tag_json_path(self, namespace, repository, tag):
repository_path = self._repository_path(
namespace=namespace, repository=repository)
return '{0}/tag{1}_json'.format(repository_path, tag)
@filter_args
def index_images_path(self, namespace, repository):
repository_path = self._repository_path(
namespace=namespace, repository=repository)
return '{0}/_index_images'.format(repository_path)
@filter_args
def private_flag_path(self, namespace, repository):
repository_path = self._repository_path(
namespace=namespace, repository=repository)
return '{0}/_private'.format(repository_path)
def is_private(self, namespace, repository):
return self.exists(self.private_flag_path(namespace, repository))
def content_redirect_url(self, path):
"""Get a URL for content at path
Get a URL to which client can be redirected to get the content from
the path. Return None if not supported by this engine.
Note, this feature will only be used if the `storage_redirect`
configuration key is set to `True`.
"""
return None
def get_json(self, path):
return json.loads(self.get_unicode(path))
def put_json(self, path, content):
return self.put_unicode(path, json.dumps(content))
def get_unicode(self, path):
return self.get_bytes(path).decode('utf8')
def put_unicode(self, path, content):
return self.put_bytes(path, content.encode('utf8'))
def get_bytes(self, path):
return self.get_content(path)
def put_bytes(self, path, content):
return self.put_content(path, content)
def get_content(self, path):
"""Method to get content."""
raise NotImplementedError(
"You must implement get_content(self, path) on your storage %s" %
self.__class__.__name__)
def put_content(self, path, content):
"""Method to put content."""
raise NotImplementedError(
"You must implement put_content(self, path, content) on %s" %
self.__class__.__name__)
def stream_read(self, path, bytes_range=None):
"""Method to stream read."""
raise NotImplementedError(
"You must implement stream_read(self, path, , bytes_range=None) " +
"on your storage %s" %
self.__class__.__name__)
def stream_write(self, path, fp):
"""Method to stream write."""
raise NotImplementedError(
"You must implement stream_write(self, path, fp) " +
"on your storage %s" %
self.__class__.__name__)
def list_directory(self, path=None):
"""Method to list directory."""
raise NotImplementedError(
"You must implement list_directory(self, path=None) " +
"on your storage %s" %
self.__class__.__name__)
def exists(self, path):
"""Method to test exists."""
raise NotImplementedError(
"You must implement exists(self, path) on your storage %s" %
self.__class__.__name__)
def remove(self, path):
"""Method to remove."""
raise NotImplementedError(
"You must implement remove(self, path) on your storage %s" %
self.__class__.__name__)
def get_size(self, path):
"""Method to get the size."""
raise NotImplementedError(
"You must implement get_size(self, path) on your storage %s" %
self.__class__.__name__)
def fetch(name):
try:
# XXX The noqa below is because of hacking being non-sensical on this
module = __import__('docker_registry.drivers.%s' % name, globals(),
locals(), ['Storage'], 0) # noqa
logger.debug("Will return docker-registry.drivers.%s.Storage" % name)
except ImportError as e:
logger.warn("Got exception: %s" % e)
raise NotImplementedError(
"""You requested storage driver docker_registry.drivers.%s
which is not installed. Try `pip install docker-registry-driver-%s`
or check your configuration. The following are currently
available on your system: %s. Exception was: %s"""
% (name, name, available(), e)
)
module.Storage.scheme = name
return module.Storage
def available():
return [modname for importer, modname, ispkg
in pkgutil.iter_modules(docker_registry.drivers.__path__)]
|
import logging
from openhomedevice.Device import Device
import voluptuous as vol
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import STATE_IDLE, STATE_OFF, STATE_PAUSED, STATE_PLAYING
from homeassistant.helpers import config_validation as cv, entity_platform
from .const import ATTR_PIN_INDEX, DATA_OPENHOME, SERVICE_INVOKE_PIN
SUPPORT_OPENHOME = SUPPORT_SELECT_SOURCE | SUPPORT_TURN_OFF | SUPPORT_TURN_ON
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Openhome platform."""
if not discovery_info:
return
openhome_data = hass.data.setdefault(DATA_OPENHOME, set())
name = discovery_info.get("name")
description = discovery_info.get("ssdp_description")
_LOGGER.info("Openhome device found: %s", name)
device = await hass.async_add_executor_job(Device, description)
# if device has already been discovered
if device.Uuid() in openhome_data:
return True
entity = OpenhomeDevice(hass, device)
async_add_entities([entity])
openhome_data.add(device.Uuid())
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_INVOKE_PIN,
{vol.Required(ATTR_PIN_INDEX): cv.positive_int},
"invoke_pin",
)
class OpenhomeDevice(MediaPlayerEntity):
"""Representation of an Openhome device."""
def __init__(self, hass, device):
"""Initialise the Openhome device."""
self.hass = hass
self._device = device
self._track_information = {}
self._in_standby = None
self._transport_state = None
self._volume_level = None
self._volume_muted = None
self._supported_features = SUPPORT_OPENHOME
self._source_names = []
self._source_index = {}
self._source = {}
self._name = None
self._state = STATE_PLAYING
def update(self):
"""Update state of device."""
self._in_standby = self._device.IsInStandby()
self._transport_state = self._device.TransportState()
self._track_information = self._device.TrackInfo()
self._source = self._device.Source()
self._name = self._device.Room().decode("utf-8")
self._supported_features = SUPPORT_OPENHOME
source_index = {}
source_names = []
if self._device.VolumeEnabled():
self._supported_features |= (
SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET
)
self._volume_level = self._device.VolumeLevel() / 100.0
self._volume_muted = self._device.IsMuted()
for source in self._device.Sources():
source_names.append(source["name"])
source_index[source["name"]] = source["index"]
self._source_index = source_index
self._source_names = source_names
if self._source["type"] == "Radio":
self._supported_features |= SUPPORT_STOP | SUPPORT_PLAY | SUPPORT_PLAY_MEDIA
if self._source["type"] in ("Playlist", "Spotify"):
self._supported_features |= (
SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
)
if self._in_standby:
self._state = STATE_OFF
elif self._transport_state == "Paused":
self._state = STATE_PAUSED
elif self._transport_state in ("Playing", "Buffering"):
self._state = STATE_PLAYING
elif self._transport_state == "Stopped":
self._state = STATE_IDLE
else:
# Device is playing an external source with no transport controls
self._state = STATE_PLAYING
def turn_on(self):
"""Bring device out of standby."""
self._device.SetStandby(False)
def turn_off(self):
"""Put device in standby."""
self._device.SetStandby(True)
def play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
if not media_type == MEDIA_TYPE_MUSIC:
_LOGGER.error(
"Invalid media type %s. Only %s is supported",
media_type,
MEDIA_TYPE_MUSIC,
)
return
track_details = {"title": "Home Assistant", "uri": media_id}
self._device.PlayMedia(track_details)
def media_pause(self):
"""Send pause command."""
self._device.Pause()
def media_stop(self):
"""Send stop command."""
self._device.Stop()
def media_play(self):
"""Send play command."""
self._device.Play()
def media_next_track(self):
"""Send next track command."""
self._device.Skip(1)
def media_previous_track(self):
"""Send previous track command."""
self._device.Skip(-1)
def select_source(self, source):
"""Select input source."""
self._device.SetSource(self._source_index[source])
def invoke_pin(self, pin):
"""Invoke pin."""
self._device.InvokePin(pin)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def supported_features(self):
"""Flag of features commands that are supported."""
return self._supported_features
@property
def unique_id(self):
"""Return a unique ID."""
return self._device.Uuid()
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def source_list(self):
"""List of available input sources."""
return self._source_names
@property
def media_image_url(self):
"""Image url of current playing media."""
return self._track_information.get("albumArtwork")
@property
def media_artist(self):
"""Artist of current playing media, music track only."""
artists = self._track_information.get("artist")
if artists:
return artists[0]
@property
def media_album_name(self):
"""Album name of current playing media, music track only."""
return self._track_information.get("albumTitle")
@property
def media_title(self):
"""Title of current playing media."""
return self._track_information.get("title")
@property
def source(self):
"""Name of the current input source."""
return self._source.get("name")
@property
def volume_level(self):
"""Volume level of the media player (0..1)."""
return self._volume_level
@property
def is_volume_muted(self):
"""Return true if volume is muted."""
return self._volume_muted
def volume_up(self):
"""Volume up media player."""
self._device.IncreaseVolume()
def volume_down(self):
"""Volume down media player."""
self._device.DecreaseVolume()
def set_volume_level(self, volume):
"""Set volume level, range 0..1."""
self._device.SetVolumeLevel(int(volume * 100))
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
self._device.SetMute(mute)
|
import logging
import psutil
import socket
from datetime import datetime, timedelta
import uuid
import locale
from flask import render_template, request, session, jsonify, Response, Blueprint, current_app, g
from werkzeug.local import LocalProxy
from psdash.helpers import socket_families, socket_types
logger = logging.getLogger('psdash.web')
webapp = Blueprint('psdash', __name__, static_folder='static')
def get_current_node():
return current_app.psdash.get_node(g.node)
def get_current_service():
return get_current_node().get_service()
current_node = LocalProxy(get_current_node)
current_service = LocalProxy(get_current_service)
def fromtimestamp(value, dateformat='%Y-%m-%d %H:%M:%S'):
dt = datetime.fromtimestamp(int(value))
return dt.strftime(dateformat)
@webapp.context_processor
def inject_nodes():
return {"current_node": current_node, "nodes": current_app.psdash.get_nodes()}
@webapp.context_processor
def inject_header_data():
sysinfo = current_service.get_sysinfo()
uptime = timedelta(seconds=sysinfo['uptime'])
uptime = str(uptime).split('.')[0]
return {
'os': sysinfo['os'].decode('utf-8'),
'hostname': sysinfo['hostname'].decode('utf-8'),
'uptime': uptime
}
@webapp.url_defaults
def add_node(endpoint, values):
values.setdefault('node', g.node)
@webapp.before_request
def add_node():
g.node = request.args.get('node', current_app.psdash.LOCAL_NODE)
@webapp.before_request
def check_access():
if not current_node:
return 'Unknown psdash node specified', 404
allowed_remote_addrs = current_app.config.get('PSDASH_ALLOWED_REMOTE_ADDRESSES')
if allowed_remote_addrs:
if request.remote_addr not in allowed_remote_addrs:
current_app.logger.info(
'Returning 401 for client %s as address is not in allowed addresses.',
request.remote_addr
)
current_app.logger.debug('Allowed addresses: %s', allowed_remote_addrs)
return 'Access denied', 401
username = current_app.config.get('PSDASH_AUTH_USERNAME')
password = current_app.config.get('PSDASH_AUTH_PASSWORD')
if username and password:
auth = request.authorization
if not auth or auth.username != username or auth.password != password:
return Response(
'Access deined',
401,
{'WWW-Authenticate': 'Basic realm="psDash login required"'}
)
@webapp.before_request
def setup_client_id():
if 'client_id' not in session:
client_id = uuid.uuid4()
current_app.logger.debug('Creating id for client: %s', client_id)
session['client_id'] = client_id
@webapp.errorhandler(psutil.AccessDenied)
def access_denied(e):
errmsg = 'Access denied to %s (pid %d).' % (e.name, e.pid)
return render_template('error.html', error=errmsg), 401
@webapp.errorhandler(psutil.NoSuchProcess)
def access_denied(e):
errmsg = 'No process with pid %d was found.' % e.pid
return render_template('error.html', error=errmsg), 404
@webapp.route('/')
def index():
sysinfo = current_service.get_sysinfo()
netifs = current_service.get_network_interfaces().values()
netifs.sort(key=lambda x: x.get('bytes_sent'), reverse=True)
data = {
'load_avg': sysinfo['load_avg'],
'num_cpus': sysinfo['num_cpus'],
'memory': current_service.get_memory(),
'swap': current_service.get_swap_space(),
'disks': current_service.get_disks(),
'cpu': current_service.get_cpu(),
'users': current_service.get_users(),
'net_interfaces': netifs,
'page': 'overview',
'is_xhr': request.is_xhr
}
return render_template('index.html', **data)
@webapp.route('/processes', defaults={'sort': 'cpu_percent', 'order': 'desc', 'filter': 'user'})
@webapp.route('/processes/<string:sort>')
@webapp.route('/processes/<string:sort>/<string:order>')
@webapp.route('/processes/<string:sort>/<string:order>/<string:filter>')
def processes(sort='pid', order='asc', filter='user'):
procs = current_service.get_process_list()
num_procs = len(procs)
user_procs = [p for p in procs if p['user'] != 'root']
num_user_procs = len(user_procs)
if filter == 'user':
procs = user_procs
procs.sort(
key=lambda x: x.get(sort),
reverse=True if order != 'asc' else False
)
return render_template(
'processes.html',
processes=procs,
sort=sort,
order=order,
filter=filter,
num_procs=num_procs,
num_user_procs=num_user_procs,
page='processes',
is_xhr=request.is_xhr
)
@webapp.route('/process/<int:pid>', defaults={'section': 'overview'})
@webapp.route('/process/<int:pid>/<string:section>')
def process(pid, section):
valid_sections = [
'overview',
'threads',
'files',
'connections',
'memory',
'environment',
'children',
'limits'
]
if section not in valid_sections:
errmsg = 'Invalid subsection when trying to view process %d' % pid
return render_template('error.html', error=errmsg), 404
context = {
'process': current_service.get_process(pid),
'section': section,
'page': 'processes',
'is_xhr': request.is_xhr
}
if section == 'environment':
penviron = current_service.get_process_environment(pid)
whitelist = current_app.config.get('PSDASH_ENVIRON_WHITELIST')
if whitelist:
penviron = dict((k, v if k in whitelist else '*hidden by whitelist*')
for k, v in penviron.iteritems())
context['process_environ'] = penviron
elif section == 'threads':
context['threads'] = current_service.get_process_threads(pid)
elif section == 'files':
context['files'] = current_service.get_process_open_files(pid)
elif section == 'connections':
context['connections'] = current_service.get_process_connections(pid)
elif section == 'memory':
context['memory_maps'] = current_service.get_process_memory_maps(pid)
elif section == 'children':
context['children'] = current_service.get_process_children(pid)
elif section == 'limits':
context['limits'] = current_service.get_process_limits(pid)
return render_template(
'process/%s.html' % section,
**context
)
@webapp.route('/network')
def view_networks():
netifs = current_service.get_network_interfaces().values()
netifs.sort(key=lambda x: x.get('bytes_sent'), reverse=True)
# {'key', 'default_value'}
# An empty string means that no filtering will take place on that key
form_keys = {
'pid': '',
'family': socket_families[socket.AF_INET],
'type': socket_types[socket.SOCK_STREAM],
'state': 'LISTEN'
}
form_values = dict((k, request.args.get(k, default_val)) for k, default_val in form_keys.iteritems())
for k in ('local_addr', 'remote_addr'):
val = request.args.get(k, '')
if ':' in val:
host, port = val.rsplit(':', 1)
form_values[k + '_host'] = host
form_values[k + '_port'] = int(port)
elif val:
form_values[k + '_host'] = val
conns = current_service.get_connections(form_values)
conns.sort(key=lambda x: x['state'])
states = [
'ESTABLISHED', 'SYN_SENT', 'SYN_RECV',
'FIN_WAIT1', 'FIN_WAIT2', 'TIME_WAIT',
'CLOSE', 'CLOSE_WAIT', 'LAST_ACK',
'LISTEN', 'CLOSING', 'NONE'
]
return render_template(
'network.html',
page='network',
network_interfaces=netifs,
connections=conns,
socket_families=socket_families,
socket_types=socket_types,
states=states,
is_xhr=request.is_xhr,
num_conns=len(conns),
**form_values
)
@webapp.route('/disks')
def view_disks():
disks = current_service.get_disks(all_partitions=True)
io_counters = current_service.get_disks_counters().items()
io_counters.sort(key=lambda x: x[1]['read_count'], reverse=True)
return render_template(
'disks.html',
page='disks',
disks=disks,
io_counters=io_counters,
is_xhr=request.is_xhr
)
@webapp.route('/logs')
def view_logs():
available_logs = current_service.get_logs()
available_logs.sort(cmp=lambda x1, x2: locale.strcoll(x1['path'], x2['path']))
return render_template(
'logs.html',
page='logs',
logs=available_logs,
is_xhr=request.is_xhr
)
@webapp.route('/log')
def view_log():
filename = request.args['filename']
seek_tail = request.args.get('seek_tail', '1') != '0'
session_key = session.get('client_id')
try:
content = current_service.read_log(filename, session_key=session_key, seek_tail=seek_tail)
except KeyError:
error_msg = 'File not found. Only files passed through args are allowed.'
if request.is_xhr:
return error_msg
return render_template('error.html', error=error_msg), 404
if request.is_xhr:
return content
return render_template('log.html', content=content, filename=filename)
@webapp.route('/log/search')
def search_log():
filename = request.args['filename']
query_text = request.args['text']
session_key = session.get('client_id')
try:
data = current_service.search_log(filename, query_text, session_key=session_key)
return jsonify(data)
except KeyError:
return 'Could not find log file with given filename', 404
@webapp.route('/register')
def register_node():
name = request.args['name']
port = request.args['port']
host = request.remote_addr
current_app.psdash.register_node(name, host, port)
return jsonify({'status': 'OK'})
|
import asyncio
import argparse
import txaio
txaio.use_asyncio()
import autobahn
from autobahn.websocket.util import parse_url
from autobahn.asyncio.websocket import WebSocketClientProtocol, \
WebSocketClientFactory
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateResponse, PerMessageDeflateResponseAccept
class TesteeClientProtocol(WebSocketClientProtocol):
def onOpen(self):
if self.factory.endCaseId is None:
self.log.info("Getting case count ..")
elif self.factory.currentCaseId <= self.factory.endCaseId:
self.log.info("Running test case {case_id}/{last_case_id} as user agent {agent} on peer {peer}",
case_id=self.factory.currentCaseId,
last_case_id=self.factory.endCaseId,
agent=self.factory.agent,
peer=self.peer)
def onMessage(self, msg, binary):
if self.factory.endCaseId is None:
self.factory.endCaseId = int(msg)
self.log.info("Ok, will run {case_count} cases", case_count=self.factory.endCaseId)
else:
self.sendMessage(msg, binary)
def onClose(self, wasClean, code, reason):
txaio.resolve(self.factory._done, None)
class TesteeClientFactory(WebSocketClientFactory):
protocol = TesteeClientProtocol
def __init__(self, url, agent):
self.agent = agent
WebSocketClientFactory.__init__(self, url, useragent=agent)
self.setProtocolOptions(failByDrop=False) # spec conformance
# enable permessage-deflate WebSocket protocol extension
offers = [PerMessageDeflateOffer()]
self.setProtocolOptions(perMessageCompressionOffers=offers)
def accept(response):
if isinstance(response, PerMessageDeflateResponse):
return PerMessageDeflateResponseAccept(response)
self.setProtocolOptions(perMessageCompressionAccept=accept)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Autobahn Testee Client (asyncio)')
parser.add_argument('--url', dest='url', type=str, default='ws://127.0.0.1:9001', help='The WebSocket fuzzing server URL.')
parser.add_argument('--loglevel', dest='loglevel', type=str, default='info', help='Log level, eg "info" or "debug".')
options = parser.parse_args()
txaio.start_logging(level=options.loglevel)
factory = TesteeClientFactory(options.url, autobahn.asyncio.__ident__)
_, host, port, _, _, _ = parse_url(options.url)
loop = asyncio.get_event_loop()
factory.resource = '/getCaseCount'
factory.endCaseId = None
factory.currentCaseId = 0
factory.updateReports = True
while True:
factory._done = txaio.create_future()
coro = loop.create_connection(factory, host, port)
loop.run_until_complete(coro)
loop.run_until_complete(factory._done)
factory.currentCaseId += 1
if factory.currentCaseId <= factory.endCaseId:
factory.resource = "/runCase?case={}&agent={}".format(factory.currentCaseId, factory.agent)
elif factory.updateReports:
factory.resource = "/updateReports?agent={}".format(factory.agent)
factory.updateReports = False
else:
break
loop.close()
|
import pytest
from homeassistant.components.cloud import GACTIONS_SCHEMA
from homeassistant.components.cloud.google_config import CloudGoogleConfig
from homeassistant.components.google_assistant import helpers as ga_helpers
from homeassistant.const import EVENT_HOMEASSISTANT_STARTED, HTTP_NOT_FOUND
from homeassistant.core import CoreState, State
from homeassistant.helpers.entity_registry import EVENT_ENTITY_REGISTRY_UPDATED
from homeassistant.util.dt import utcnow
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import async_fire_time_changed
@pytest.fixture
def mock_conf(hass, cloud_prefs):
"""Mock Google conf."""
return CloudGoogleConfig(
hass,
GACTIONS_SCHEMA({}),
"mock-user-id",
cloud_prefs,
Mock(claims={"cognito:username": "abcdefghjkl"}),
)
async def test_google_update_report_state(mock_conf, hass, cloud_prefs):
"""Test Google config responds to updating preference."""
await mock_conf.async_initialize()
await mock_conf.async_connect_agent_user("mock-user-id")
with patch.object(mock_conf, "async_sync_entities") as mock_sync, patch(
"homeassistant.components.google_assistant.report_state.async_enable_report_state"
) as mock_report_state:
await cloud_prefs.async_update(google_report_state=True)
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 1
assert len(mock_report_state.mock_calls) == 1
async def test_sync_entities(aioclient_mock, hass, cloud_prefs):
"""Test sync devices."""
config = CloudGoogleConfig(
hass,
GACTIONS_SCHEMA({}),
"mock-user-id",
cloud_prefs,
Mock(auth=Mock(async_check_token=AsyncMock())),
)
with patch(
"hass_nabucasa.cloud_api.async_google_actions_request_sync",
return_value=Mock(status=HTTP_NOT_FOUND),
) as mock_request_sync:
assert await config.async_sync_entities("user") == HTTP_NOT_FOUND
assert len(mock_request_sync.mock_calls) == 1
async def test_google_update_expose_trigger_sync(
hass, legacy_patchable_time, cloud_prefs
):
"""Test Google config responds to updating exposed entities."""
config = CloudGoogleConfig(
hass,
GACTIONS_SCHEMA({}),
"mock-user-id",
cloud_prefs,
Mock(claims={"cognito:username": "abcdefghjkl"}),
)
await config.async_initialize()
await config.async_connect_agent_user("mock-user-id")
with patch.object(config, "async_sync_entities") as mock_sync, patch.object(
ga_helpers, "SYNC_DELAY", 0
):
await cloud_prefs.async_update_google_entity_config(
entity_id="light.kitchen", should_expose=True
)
await hass.async_block_till_done()
async_fire_time_changed(hass, utcnow())
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 1
with patch.object(config, "async_sync_entities") as mock_sync, patch.object(
ga_helpers, "SYNC_DELAY", 0
):
await cloud_prefs.async_update_google_entity_config(
entity_id="light.kitchen", should_expose=False
)
await cloud_prefs.async_update_google_entity_config(
entity_id="binary_sensor.door", should_expose=True
)
await cloud_prefs.async_update_google_entity_config(
entity_id="sensor.temp", should_expose=True
)
await hass.async_block_till_done()
async_fire_time_changed(hass, utcnow())
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 1
async def test_google_entity_registry_sync(hass, mock_cloud_login, cloud_prefs):
"""Test Google config responds to entity registry."""
config = CloudGoogleConfig(
hass, GACTIONS_SCHEMA({}), "mock-user-id", cloud_prefs, hass.data["cloud"]
)
await config.async_initialize()
await config.async_connect_agent_user("mock-user-id")
with patch.object(
config, "async_schedule_google_sync_all"
) as mock_sync, patch.object(ga_helpers, "SYNC_DELAY", 0):
# Created entity
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{"action": "create", "entity_id": "light.kitchen"},
)
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 1
# Removed entity
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{"action": "remove", "entity_id": "light.kitchen"},
)
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 2
# Entity registry updated with relevant changes
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{
"action": "update",
"entity_id": "light.kitchen",
"changes": ["entity_id"],
},
)
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 3
# Entity registry updated with non-relevant changes
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{"action": "update", "entity_id": "light.kitchen", "changes": ["icon"]},
)
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 3
# When hass is not started yet we wait till started
hass.state = CoreState.starting
hass.bus.async_fire(
EVENT_ENTITY_REGISTRY_UPDATED,
{"action": "create", "entity_id": "light.kitchen"},
)
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 3
with patch.object(config, "async_sync_entities_all") as mock_sync:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STARTED)
await hass.async_block_till_done()
assert len(mock_sync.mock_calls) == 1
async def test_google_config_expose_entity_prefs(mock_conf, cloud_prefs):
"""Test Google config should expose using prefs."""
entity_conf = {"should_expose": False}
await cloud_prefs.async_update(
google_entity_configs={"light.kitchen": entity_conf},
google_default_expose=["light"],
)
state = State("light.kitchen", "on")
assert not mock_conf.should_expose(state)
entity_conf["should_expose"] = True
assert mock_conf.should_expose(state)
entity_conf["should_expose"] = None
assert mock_conf.should_expose(state)
await cloud_prefs.async_update(
google_default_expose=["sensor"],
)
assert not mock_conf.should_expose(state)
|
from test import CollectorTestCase
from test import get_collector_config
from mock import Mock
from mock import patch
from diamond.collector import Collector
from dseopscenter import DseOpsCenterCollector
##########################################################################
class TestDseOpsCenterCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('DseOpsCenterCollector',
{'cluster_id': 'MyTestCluster'})
self.collector = DseOpsCenterCollector(config, None)
def test_import(self):
self.assertTrue(DseOpsCenterCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
urlopen_mock1 = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: self.getFixture('keyspaces.json')))
urlopen_mock1.start()
self.collector._get_schema()
urlopen_mock1.stop()
urlopen_mock2 = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: self.getFixture('new-metrics.json')))
urlopen_mock2.start()
self.collector.collect()
urlopen_mock2.stop()
metrics = {
'cf-bf-false-positives.dse_system.leases': 0,
'key-cache-requests': 38.28847822050253,
'key-cache-hits': 9.114316945274672,
'nonheap-max': 136314880,
'nonheap-used': 48491696.666666664,
'read-ops': 55.91526222229004,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
|
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from . import ATTR_VERSION, DATA_UPDATED, DOMAIN as IPERF3_DOMAIN, SENSOR_TYPES
ATTRIBUTION = "Data retrieved using Iperf3"
ICON = "mdi:speedometer"
ATTR_PROTOCOL = "Protocol"
ATTR_REMOTE_HOST = "Remote Server"
ATTR_REMOTE_PORT = "Remote Port"
async def async_setup_platform(hass, config, async_add_entities, discovery_info):
"""Set up the Iperf3 sensor."""
sensors = []
for iperf3_host in hass.data[IPERF3_DOMAIN].values():
sensors.extend([Iperf3Sensor(iperf3_host, sensor) for sensor in discovery_info])
async_add_entities(sensors, True)
class Iperf3Sensor(RestoreEntity):
"""A Iperf3 sensor implementation."""
def __init__(self, iperf3_data, sensor_type):
"""Initialize the sensor."""
self._name = f"{SENSOR_TYPES[sensor_type][0]} {iperf3_data.host}"
self._state = None
self._sensor_type = sensor_type
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
self._iperf3_data = iperf3_data
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return icon."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_PROTOCOL: self._iperf3_data.protocol,
ATTR_REMOTE_HOST: self._iperf3_data.host,
ATTR_REMOTE_PORT: self._iperf3_data.port,
ATTR_VERSION: self._iperf3_data.data[ATTR_VERSION],
}
@property
def should_poll(self):
"""Return the polling requirement for this sensor."""
return False
async def async_added_to_hass(self):
"""Handle entity which will be added."""
await super().async_added_to_hass()
self.async_on_remove(
async_dispatcher_connect(
self.hass, DATA_UPDATED, self._schedule_immediate_update
)
)
state = await self.async_get_last_state()
if not state:
return
self._state = state.state
def update(self):
"""Get the latest data and update the states."""
data = self._iperf3_data.data.get(self._sensor_type)
if data is not None:
self._state = round(data, 2)
@callback
def _schedule_immediate_update(self, host):
if host == self._iperf3_data.host:
self.async_schedule_update_ha_state(True)
|
import asyncio
from datetime import timedelta
import logging
import async_timeout
from pywemo.ouimeaux_device.api.service import ActionException
import voluptuous as vol
from homeassistant.components.fan import (
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.const import ATTR_ENTITY_ID
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
DOMAIN as WEMO_DOMAIN,
SERVICE_RESET_FILTER_LIFE,
SERVICE_SET_HUMIDITY,
)
SCAN_INTERVAL = timedelta(seconds=10)
PARALLEL_UPDATES = 0
_LOGGER = logging.getLogger(__name__)
ATTR_CURRENT_HUMIDITY = "current_humidity"
ATTR_TARGET_HUMIDITY = "target_humidity"
ATTR_FAN_MODE = "fan_mode"
ATTR_FILTER_LIFE = "filter_life"
ATTR_FILTER_EXPIRED = "filter_expired"
ATTR_WATER_LEVEL = "water_level"
# The WEMO_ constants below come from pywemo itself
WEMO_ON = 1
WEMO_OFF = 0
WEMO_HUMIDITY_45 = 0
WEMO_HUMIDITY_50 = 1
WEMO_HUMIDITY_55 = 2
WEMO_HUMIDITY_60 = 3
WEMO_HUMIDITY_100 = 4
WEMO_FAN_OFF = 0
WEMO_FAN_MINIMUM = 1
WEMO_FAN_LOW = 2 # Not used due to limitations of the base fan implementation
WEMO_FAN_MEDIUM = 3
WEMO_FAN_HIGH = 4 # Not used due to limitations of the base fan implementation
WEMO_FAN_MAXIMUM = 5
WEMO_WATER_EMPTY = 0
WEMO_WATER_LOW = 1
WEMO_WATER_GOOD = 2
SUPPORTED_SPEEDS = [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
SUPPORTED_FEATURES = SUPPORT_SET_SPEED
# Since the base fan object supports a set list of fan speeds,
# we have to reuse some of them when mapping to the 5 WeMo speeds
WEMO_FAN_SPEED_TO_HASS = {
WEMO_FAN_OFF: SPEED_OFF,
WEMO_FAN_MINIMUM: SPEED_LOW,
WEMO_FAN_LOW: SPEED_LOW, # Reusing SPEED_LOW
WEMO_FAN_MEDIUM: SPEED_MEDIUM,
WEMO_FAN_HIGH: SPEED_HIGH, # Reusing SPEED_HIGH
WEMO_FAN_MAXIMUM: SPEED_HIGH,
}
# Because we reused mappings in the previous dict, we have to filter them
# back out in this dict, or else we would have duplicate keys
HASS_FAN_SPEED_TO_WEMO = {
v: k
for (k, v) in WEMO_FAN_SPEED_TO_HASS.items()
if k not in [WEMO_FAN_LOW, WEMO_FAN_HIGH]
}
SET_HUMIDITY_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_TARGET_HUMIDITY): vol.All(
vol.Coerce(float), vol.Range(min=0, max=100)
),
}
)
RESET_FILTER_LIFE_SCHEMA = vol.Schema({vol.Required(ATTR_ENTITY_ID): cv.entity_ids})
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up WeMo binary sensors."""
entities = []
async def _discovered_wemo(device):
"""Handle a discovered Wemo device."""
entity = WemoHumidifier(device)
entities.append(entity)
async_add_entities([entity])
async_dispatcher_connect(hass, f"{WEMO_DOMAIN}.fan", _discovered_wemo)
await asyncio.gather(
*[
_discovered_wemo(device)
for device in hass.data[WEMO_DOMAIN]["pending"].pop("fan")
]
)
def service_handle(service):
"""Handle the WeMo humidifier services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
humidifiers = [entity for entity in entities if entity.entity_id in entity_ids]
if service.service == SERVICE_SET_HUMIDITY:
target_humidity = service.data.get(ATTR_TARGET_HUMIDITY)
for humidifier in humidifiers:
humidifier.set_humidity(target_humidity)
elif service.service == SERVICE_RESET_FILTER_LIFE:
for humidifier in humidifiers:
humidifier.reset_filter_life()
# Register service(s)
hass.services.async_register(
WEMO_DOMAIN,
SERVICE_SET_HUMIDITY,
service_handle,
schema=SET_HUMIDITY_SCHEMA,
)
hass.services.async_register(
WEMO_DOMAIN,
SERVICE_RESET_FILTER_LIFE,
service_handle,
schema=RESET_FILTER_LIFE_SCHEMA,
)
class WemoHumidifier(FanEntity):
"""Representation of a WeMo humidifier."""
def __init__(self, device):
"""Initialize the WeMo switch."""
self.wemo = device
self._state = None
self._available = True
self._update_lock = None
self._fan_mode = None
self._target_humidity = None
self._current_humidity = None
self._water_level = None
self._filter_life = None
self._filter_expired = None
self._last_fan_on_mode = WEMO_FAN_MEDIUM
self._model_name = self.wemo.model_name
self._name = self.wemo.name
self._serialnumber = self.wemo.serialnumber
def _subscription_callback(self, _device, _type, _params):
"""Update the state by the Wemo device."""
_LOGGER.info("Subscription update for %s", self.name)
updated = self.wemo.subscription_update(_type, _params)
self.hass.add_job(self._async_locked_subscription_callback(not updated))
async def _async_locked_subscription_callback(self, force_update):
"""Handle an update from a subscription."""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
await self._async_locked_update(force_update)
self.async_write_ha_state()
@property
def unique_id(self):
"""Return the ID of this WeMo humidifier."""
return self._serialnumber
@property
def name(self):
"""Return the name of the humidifier if any."""
return self._name
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self._state
@property
def available(self):
"""Return true if switch is available."""
return self._available
@property
def device_info(self):
"""Return the device info."""
return {
"name": self._name,
"identifiers": {(WEMO_DOMAIN, self._serialnumber)},
"model": self._model_name,
"manufacturer": "Belkin",
}
@property
def icon(self):
"""Return the icon of device based on its type."""
return "mdi:water-percent"
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_CURRENT_HUMIDITY: self._current_humidity,
ATTR_TARGET_HUMIDITY: self._target_humidity,
ATTR_FAN_MODE: self._fan_mode,
ATTR_WATER_LEVEL: self._water_level,
ATTR_FILTER_LIFE: self._filter_life,
ATTR_FILTER_EXPIRED: self._filter_expired,
}
@property
def speed(self) -> str:
"""Return the current speed."""
return WEMO_FAN_SPEED_TO_HASS.get(self._fan_mode)
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return SUPPORTED_SPEEDS
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORTED_FEATURES
async def async_added_to_hass(self):
"""Wemo humidifier added to Home Assistant."""
# Define inside async context so we know our event loop
self._update_lock = asyncio.Lock()
registry = self.hass.data[WEMO_DOMAIN]["registry"]
await self.hass.async_add_executor_job(registry.register, self.wemo)
registry.on(self.wemo, None, self._subscription_callback)
async def async_update(self):
"""Update WeMo state.
Wemo has an aggressive retry logic that sometimes can take over a
minute to return. If we don't get a state after 5 seconds, assume the
Wemo humidifier is unreachable. If update goes through, it will be made
available again.
"""
# If an update is in progress, we don't do anything
if self._update_lock.locked():
return
try:
with async_timeout.timeout(5):
await asyncio.shield(self._async_locked_update(True))
except asyncio.TimeoutError:
_LOGGER.warning("Lost connection to %s", self.name)
self._available = False
async def _async_locked_update(self, force_update):
"""Try updating within an async lock."""
async with self._update_lock:
await self.hass.async_add_executor_job(self._update, force_update)
def _update(self, force_update=True):
"""Update the device state."""
try:
self._state = self.wemo.get_state(force_update)
self._fan_mode = self.wemo.fan_mode_string
self._target_humidity = self.wemo.desired_humidity_percent
self._current_humidity = self.wemo.current_humidity_percent
self._water_level = self.wemo.water_level_string
self._filter_life = self.wemo.filter_life_percent
self._filter_expired = self.wemo.filter_expired
if self.wemo.fan_mode != WEMO_FAN_OFF:
self._last_fan_on_mode = self.wemo.fan_mode
if not self._available:
_LOGGER.info("Reconnected to %s", self.name)
self._available = True
except (AttributeError, ActionException) as err:
_LOGGER.warning("Could not update status for %s (%s)", self.name, err)
self._available = False
self.wemo.reconnect_with_device()
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn the switch on."""
if speed is None:
try:
self.wemo.set_state(self._last_fan_on_mode)
except ActionException as err:
_LOGGER.warning("Error while turning on device %s (%s)", self.name, err)
self._available = False
else:
self.set_speed(speed)
self.schedule_update_ha_state()
def turn_off(self, **kwargs) -> None:
"""Turn the switch off."""
try:
self.wemo.set_state(WEMO_FAN_OFF)
except ActionException as err:
_LOGGER.warning("Error while turning off device %s (%s)", self.name, err)
self._available = False
self.schedule_update_ha_state()
def set_speed(self, speed: str) -> None:
"""Set the fan_mode of the Humidifier."""
try:
self.wemo.set_state(HASS_FAN_SPEED_TO_WEMO.get(speed))
except ActionException as err:
_LOGGER.warning(
"Error while setting speed of device %s (%s)", self.name, err
)
self._available = False
self.schedule_update_ha_state()
def set_humidity(self, humidity: float) -> None:
"""Set the target humidity level for the Humidifier."""
if humidity < 50:
target_humidity = WEMO_HUMIDITY_45
elif 50 <= humidity < 55:
target_humidity = WEMO_HUMIDITY_50
elif 55 <= humidity < 60:
target_humidity = WEMO_HUMIDITY_55
elif 60 <= humidity < 100:
target_humidity = WEMO_HUMIDITY_60
elif humidity >= 100:
target_humidity = WEMO_HUMIDITY_100
try:
self.wemo.set_humidity(target_humidity)
except ActionException as err:
_LOGGER.warning(
"Error while setting humidity of device: %s (%s)", self.name, err
)
self._available = False
self.schedule_update_ha_state()
def reset_filter_life(self) -> None:
"""Reset the filter life to 100%."""
try:
self.wemo.reset_filter_life()
except ActionException as err:
_LOGGER.warning(
"Error while resetting filter life on device: %s (%s)", self.name, err
)
self._available = False
self.schedule_update_ha_state()
|
import unittest
from trashcli.trash import Parser
from mock import MagicMock, call
class TestParser(unittest.TestCase):
def setUp(self):
self.invalid_option_callback = MagicMock()
self.on_raw = MagicMock()
self.on_help = MagicMock()
self.on_option = MagicMock()
self.parser = Parser()
self.parser.on_invalid_option(self.invalid_option_callback)
self.parser.add_option('raw', self.on_raw)
self.parser.add_option('opt=', self.on_option)
self.parser.on_help(self.on_help)
def test_argument_option_called_without_argument(self):
self.parser(['trash-list', '--opt'])
assert [] == self.on_option.mock_calls
self.invalid_option_callback.assert_called_with('trash-list', 'opt')
def test_argument_option_called_with_argument(self):
self.parser(['trash-list', '--opt=', 'arg'])
assert [call('')] == self.on_option.mock_calls
def test_argument_option_called_with_argument(self):
self.parser(['trash-list', '--opt=arg'])
assert [call('arg')] == self.on_option.mock_calls
def test_argument_option_called_with_argument(self):
self.parser(['trash-list', '--opt', 'arg'])
assert [call('arg')] == self.on_option.mock_calls
def test_it_calls_help(self):
self.parser(['trash-list', '--help'])
self.on_help.assert_called_with('trash-list')
def test_it_calls_the_actions_passing_the_program_name(self):
self.parser(['trash-list', '--raw'])
self.on_raw.assert_called_with('')
def test_how_getopt_works_with_an_invalid_option(self):
self.parser(['command-name', '-x'])
self.invalid_option_callback.assert_called_with('command-name', 'x')
|
import os
from M2Crypto import BIO
from M2Crypto import RSA
import yaml
from docker_registry.core import compat
from docker_registry.core import exceptions
class Config(object):
"""A simple config class
* gives properties access through either items or attributes
* enforce types (thanks to yaml)
* interpolate from ENV
"""
def __init__(self, config=None):
if config is None:
config = {}
if isinstance(config, compat.basestring):
try:
self._config = yaml.load(config)
except Exception as e:
# Failed yaml loading? Stop here!
raise exceptions.ConfigError(
'Config is not valid yaml (%s): \n%s' % (e, config))
else:
# Config is kept as-is...
self._config = config
def __repr__(self):
return repr(self._config)
def __dir__(self):
return self._config.keys()
def keys(self):
return self._config.keys()
# Python 2.6 and below need this
@property
def __members__(self):
return self._config.keys()
@property
def __methods__(self):
return []
def __getattr__(self, key):
# Unset keys return None
if key not in self._config:
return None
# raise exceptions.ConfigError("No such attribute: %s" % key)
result = self._config[key]
# Strings starting with `_env:' get evaluated
if isinstance(
result, compat.basestring) and result.startswith('_env:'):
result = result.split(':', 2)
varname = result[1]
vardefault = '' if len(result) < 3 else result[2]
try:
result = yaml.load(os.environ.get(varname, vardefault))
except Exception as e:
raise exceptions.ConfigError(
'Config `%s` (value: `%s`) is not valid: %s' % (
varname, e, result))
# Dicts are rewrapped inside a Config object
if isinstance(result, dict):
result = Config(result)
return result
def __getitem__(self, key):
return getattr(self, key)
def __contains__(self, key):
return key in self._config
def _init():
flavor = os.environ.get('SETTINGS_FLAVOR', 'dev')
config_path = os.environ.get('DOCKER_REGISTRY_CONFIG', 'config.yml')
if not os.path.isabs(config_path):
config_path = os.path.join(os.path.dirname(__file__), '../../',
'config', config_path)
try:
f = open(config_path)
except Exception:
raise exceptions.FileNotFoundError(
'Heads-up! File is missing: %s' % config_path)
conf = Config(f.read())
if flavor:
if flavor not in conf:
raise exceptions.ConfigError(
'The specified flavor (%s) is missing in your config file (%s)'
% (flavor, config_path))
conf = conf[flavor]
conf.flavor = flavor
if conf.privileged_key:
try:
f = open(conf.privileged_key)
except Exception:
raise exceptions.FileNotFoundError(
'Heads-up! File is missing: %s' % conf.privileged_key)
try:
pk = f.read().split('\n')
pk = 'MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A' + ''.join(pk[1:-2])
pk = [pk[i: i + 64] for i in range(0, len(pk), 64)]
pk = ('-----BEGIN PUBLIC KEY-----\n' + '\n'.join(pk) +
'\n-----END PUBLIC KEY-----')
bio = BIO.MemoryBuffer(pk)
conf.privileged_key = RSA.load_pub_key_bio(bio)
except Exception:
raise exceptions.ConfigError(
'Key at %s is not a valid RSA key' % conf.privileged_key)
f.close()
if conf.index_endpoint:
conf.index_endpoint = conf.index_endpoint.strip('/')
return conf
_config = None
def load():
global _config
if not _config:
_config = _init()
return _config
|
import contextlib
import itertools
import numpy as np
import os
import six
import sys
try:
import pycocotools.coco
import pycocotools.cocoeval
_available = True
except ImportError:
_available = False
def eval_detection_coco(pred_bboxes, pred_labels, pred_scores, gt_bboxes,
gt_labels, gt_areas=None, gt_crowdeds=None):
"""Evaluate detections based on evaluation code of MS COCO.
This function evaluates predicted bounding boxes obtained from a dataset
by using average precision for each class.
The code is based on the evaluation code used in MS COCO.
.. _`evaluation page`: http://cocodataset.org/#detections-eval
Args:
pred_bboxes (iterable of numpy.ndarray): See the table below.
pred_labels (iterable of numpy.ndarray): See the table below.
pred_scores (iterable of numpy.ndarray): See the table below.
gt_bboxes (iterable of numpy.ndarray): See the table below.
gt_labels (iterable of numpy.ndarray): See the table below.
gt_areas (iterable of numpy.ndarray): See the table below. If
:obj:`None`, some scores are not returned.
gt_crowdeds (iterable of numpy.ndarray): See the table below.
.. csv-table::
:header: name, shape, dtype, format
:obj:`pred_bboxes`, ":math:`[(R, 4)]`", :obj:`float32`, \
":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
:obj:`pred_labels`, ":math:`[(R,)]`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`pred_scores`, ":math:`[(R,)]`", :obj:`float32`, \
--
:obj:`gt_bboxes`, ":math:`[(R, 4)]`", :obj:`float32`, \
":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
:obj:`gt_labels`, ":math:`[(R,)]`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
:obj:`gt_areas`, ":math:`[(R,)]`", \
:obj:`float32`, --
:obj:`gt_crowdeds`, ":math:`[(R,)]`", :obj:`bool`, --
All inputs should have the same length. For more detailed explanation
of the inputs, please refer to
:class:`chainercv.datasets.COCOBboxDataset`.
.. seealso::
:class:`chainercv.datasets.COCOBboxDataset`.
Returns:
dict:
The keys, value-types and the description of the values are listed
below. The APs and ARs calculated with different iou
thresholds, sizes of objects, and numbers of detections
per image. For more details on the 12 patterns of evaluation metrics,
please refer to COCO's official `evaluation page`_.
.. csv-table::
:header: key, type, description
ap/iou=0.50:0.95/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_1]_
ap/iou=0.50/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_1]_
ap/iou=0.75/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_1]_
ap/iou=0.50:0.95/area=small/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_1]_ [#coco_det_eval_5]_
ap/iou=0.50:0.95/area=medium/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_1]_ [#coco_det_eval_5]_
ap/iou=0.50:0.95/area=large/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_1]_ [#coco_det_eval_5]_
ar/iou=0.50:0.95/area=all/max_dets=1, *numpy.ndarray*, \
[#coco_det_eval_2]_
ar/iou=0.50/area=all/max_dets=10, *numpy.ndarray*, \
[#coco_det_eval_2]_
ar/iou=0.75/area=all/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_2]_
ar/iou=0.50:0.95/area=small/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_2]_ [#coco_det_eval_5]_
ar/iou=0.50:0.95/area=medium/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_2]_ [#coco_det_eval_5]_
ar/iou=0.50:0.95/area=large/max_dets=100, *numpy.ndarray*, \
[#coco_det_eval_2]_ [#coco_det_eval_5]_
map/iou=0.50:0.95/area=all/max_dets=100, *float*, \
[#coco_det_eval_3]_
map/iou=0.50/area=all/max_dets=100, *float*, \
[#coco_det_eval_3]_
map/iou=0.75/area=all/max_dets=100, *float*, \
[#coco_det_eval_3]_
map/iou=0.50:0.95/area=small/max_dets=100, *float*, \
[#coco_det_eval_3]_ [#coco_det_eval_5]_
map/iou=0.50:0.95/area=medium/max_dets=100, *float*, \
[#coco_det_eval_3]_ [#coco_det_eval_5]_
map/iou=0.50:0.95/area=large/max_dets=100, *float*, \
[#coco_det_eval_3]_ [#coco_det_eval_5]_
mar/iou=0.50:0.95/area=all/max_dets=1, *float*, \
[#coco_det_eval_4]_
mar/iou=0.50/area=all/max_dets=10, *float*, \
[#coco_det_eval_4]_
mar/iou=0.75/area=all/max_dets=100, *float*, \
[#coco_det_eval_4]_
mar/iou=0.50:0.95/area=small/max_dets=100, *float*, \
[#coco_det_eval_4]_ [#coco_det_eval_5]_
mar/iou=0.50:0.95/area=medium/max_dets=100, *float*, \
[#coco_det_eval_4]_ [#coco_det_eval_5]_
mar/iou=0.50:0.95/area=large/max_dets=100, *float*, \
[#coco_det_eval_4]_ [#coco_det_eval_5]_
coco_eval, *pycocotools.cocoeval.COCOeval*, \
result from :obj:`pycocotools`
existent_labels, *numpy.ndarray*, \
used labels \
.. [#coco_det_eval_1] An array of average precisions. \
The :math:`l`-th value corresponds to the average precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
value is set to :obj:`numpy.nan`.
.. [#coco_det_eval_2] An array of average recalls. \
The :math:`l`-th value corresponds to the average precision \
for class :math:`l`. If class :math:`l` does not exist in \
either :obj:`pred_labels` or :obj:`gt_labels`, the corresponding \
value is set to :obj:`numpy.nan`.
.. [#coco_det_eval_3] The average of average precisions over classes.
.. [#coco_det_eval_4] The average of average recalls over classes.
.. [#coco_det_eval_5] Skip if :obj:`gt_areas` is :obj:`None`.
"""
if not _available:
raise ValueError(
'Please install pycocotools \n'
'pip install -e \'git+https://github.com/cocodataset/coco.git'
'#egg=pycocotools&subdirectory=PythonAPI\'')
gt_coco = pycocotools.coco.COCO()
pred_coco = pycocotools.coco.COCO()
pred_bboxes = iter(pred_bboxes)
pred_labels = iter(pred_labels)
pred_scores = iter(pred_scores)
gt_bboxes = iter(gt_bboxes)
gt_labels = iter(gt_labels)
if gt_areas is None:
compute_area_dependent_metrics = False
gt_areas = itertools.repeat(None)
else:
compute_area_dependent_metrics = True
gt_areas = iter(gt_areas)
gt_crowdeds = (iter(gt_crowdeds) if gt_crowdeds is not None
else itertools.repeat(None))
ids = []
pred_annos = []
gt_annos = []
existent_labels = {}
for i, (pred_bbox, pred_label, pred_score, gt_bbox, gt_label,
gt_area, gt_crowded) in enumerate(six.moves.zip(
pred_bboxes, pred_labels, pred_scores,
gt_bboxes, gt_labels, gt_areas, gt_crowdeds)):
if gt_area is None:
gt_area = itertools.repeat(None)
if gt_crowded is None:
gt_crowded = itertools.repeat(None)
# Starting ids from 1 is important when using COCO.
img_id = i + 1
for pred_bb, pred_lb, pred_sc in zip(pred_bbox, pred_label,
pred_score):
pred_annos.append(
_create_anno(pred_bb, pred_lb, pred_sc,
img_id=img_id, anno_id=len(pred_annos) + 1,
crw=0, ar=None))
existent_labels[pred_lb] = True
for gt_bb, gt_lb, gt_ar, gt_crw in zip(
gt_bbox, gt_label, gt_area, gt_crowded):
gt_annos.append(
_create_anno(gt_bb, gt_lb, None,
img_id=img_id, anno_id=len(gt_annos) + 1,
ar=gt_ar, crw=gt_crw))
existent_labels[gt_lb] = True
ids.append({'id': img_id})
existent_labels = sorted(existent_labels.keys())
pred_coco.dataset['categories'] = [{'id': i} for i in existent_labels]
gt_coco.dataset['categories'] = [{'id': i} for i in existent_labels]
pred_coco.dataset['annotations'] = pred_annos
gt_coco.dataset['annotations'] = gt_annos
pred_coco.dataset['images'] = ids
gt_coco.dataset['images'] = ids
with _redirect_stdout(open(os.devnull, 'w')):
pred_coco.createIndex()
gt_coco.createIndex()
coco_eval = pycocotools.cocoeval.COCOeval(gt_coco, pred_coco, 'bbox')
coco_eval.evaluate()
coco_eval.accumulate()
results = {'coco_eval': coco_eval}
p = coco_eval.params
common_kwargs = {
'prec': coco_eval.eval['precision'],
'rec': coco_eval.eval['recall'],
'iou_threshs': p.iouThrs,
'area_ranges': p.areaRngLbl,
'max_detection_list': p.maxDets}
all_kwargs = {
'ap/iou=0.50:0.95/area=all/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 100},
'ap/iou=0.50/area=all/max_dets=100': {
'ap': True, 'iou_thresh': 0.5, 'area_range': 'all',
'max_detection': 100},
'ap/iou=0.75/area=all/max_dets=100': {
'ap': True, 'iou_thresh': 0.75, 'area_range': 'all',
'max_detection': 100},
'ar/iou=0.50:0.95/area=all/max_dets=1': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 1},
'ar/iou=0.50:0.95/area=all/max_dets=10': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 10},
'ar/iou=0.50:0.95/area=all/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'all',
'max_detection': 100},
}
if compute_area_dependent_metrics:
all_kwargs.update({
'ap/iou=0.50:0.95/area=small/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'small',
'max_detection': 100},
'ap/iou=0.50:0.95/area=medium/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'medium',
'max_detection': 100},
'ap/iou=0.50:0.95/area=large/max_dets=100': {
'ap': True, 'iou_thresh': None, 'area_range': 'large',
'max_detection': 100},
'ar/iou=0.50:0.95/area=small/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'small',
'max_detection': 100},
'ar/iou=0.50:0.95/area=medium/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'medium',
'max_detection': 100},
'ar/iou=0.50:0.95/area=large/max_dets=100': {
'ap': False, 'iou_thresh': None, 'area_range': 'large',
'max_detection': 100},
})
for key, kwargs in all_kwargs.items():
kwargs.update(common_kwargs)
metrics, mean_metric = _summarize(**kwargs)
# pycocotools ignores classes that are not included in
# either gt or prediction, but lies between 0 and
# the maximum label id.
# We set values for these classes to np.nan.
results[key] = np.nan * np.ones(np.max(existent_labels) + 1)
results[key][existent_labels] = metrics
results['m' + key] = mean_metric
results['existent_labels'] = existent_labels
return results
def _create_anno(bb, lb, sc, img_id, anno_id, ar=None, crw=None):
y_min = bb[0]
x_min = bb[1]
y_max = bb[2]
x_max = bb[3]
height = y_max - y_min
width = x_max - x_min
if ar is None:
# We compute dummy area to pass to pycocotools.
# Note that area dependent scores are ignored afterwards.
ar = height * width
if crw is None:
crw = False
# Rounding is done to make the result consistent with COCO.
anno = {
'image_id': img_id, 'category_id': lb,
'bbox': [np.round(x_min, 2), np.round(y_min, 2),
np.round(width, 2), np.round(height, 2)],
'segmentation': [x_min, y_min, x_min, y_max,
x_max, y_max, x_max, y_min],
'area': ar,
'id': anno_id,
'iscrowd': crw}
if sc is not None:
anno.update({'score': sc})
return anno
def _summarize(
prec, rec, iou_threshs, area_ranges,
max_detection_list,
ap=True, iou_thresh=None, area_range='all',
max_detection=100):
a_idx = area_ranges.index(area_range)
m_idx = max_detection_list.index(max_detection)
if ap:
val_value = prec.copy() # (T, R, K, A, M)
if iou_thresh is not None:
val_value = val_value[iou_thresh == iou_threshs]
val_value = val_value[:, :, :, a_idx, m_idx]
else:
val_value = rec.copy() # (T, K, A, M)
if iou_thresh is not None:
val_value = val_value[iou_thresh == iou_threshs]
val_value = val_value[:, :, a_idx, m_idx]
val_value[val_value == -1] = np.nan
val_value = val_value.reshape((-1, val_value.shape[-1]))
valid_classes = np.any(np.logical_not(np.isnan(val_value)), axis=0)
cls_val_value = np.nan * np.ones(len(valid_classes), dtype=np.float32)
cls_val_value[valid_classes] = np.nanmean(
val_value[:, valid_classes], axis=0)
if not np.any(valid_classes):
mean_val_value = np.nan
else:
mean_val_value = np.nanmean(cls_val_value)
return cls_val_value, mean_val_value
@contextlib.contextmanager
def _redirect_stdout(target):
original = sys.stdout
sys.stdout = target
yield
sys.stdout = original
|
from typing import Any, Dict, Optional
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN
from .coordinator import ToonDataUpdateCoordinator
class ToonEntity(CoordinatorEntity):
"""Defines a base Toon entity."""
def __init__(
self,
coordinator: ToonDataUpdateCoordinator,
*,
name: str,
icon: str,
enabled_default: bool = True,
) -> None:
"""Initialize the Toon entity."""
super().__init__(coordinator)
self._enabled_default = enabled_default
self._icon = icon
self._name = name
self._state = None
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def icon(self) -> Optional[str]:
"""Return the mdi icon of the entity."""
return self._icon
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self._enabled_default
class ToonDisplayDeviceEntity(ToonEntity):
"""Defines a Toon display device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this thermostat."""
agreement = self.coordinator.data.agreement
model = agreement.display_hardware_version.rpartition("/")[0]
sw_version = agreement.display_software_version.rpartition("/")[-1]
return {
"identifiers": {(DOMAIN, agreement.agreement_id)},
"name": "Toon Display",
"manufacturer": "Eneco",
"model": model,
"sw_version": sw_version,
}
class ToonElectricityMeterDeviceEntity(ToonEntity):
"""Defines a Electricity Meter device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
agreement_id = self.coordinator.data.agreement.agreement_id
return {
"name": "Electricity Meter",
"identifiers": {(DOMAIN, agreement_id, "electricity")},
"via_device": (DOMAIN, agreement_id, "meter_adapter"),
}
class ToonGasMeterDeviceEntity(ToonEntity):
"""Defines a Gas Meter device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
agreement_id = self.coordinator.data.agreement.agreement_id
return {
"name": "Gas Meter",
"identifiers": {(DOMAIN, agreement_id, "gas")},
"via_device": (DOMAIN, agreement_id, "electricity"),
}
class ToonWaterMeterDeviceEntity(ToonEntity):
"""Defines a Water Meter device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
agreement_id = self.coordinator.data.agreement.agreement_id
return {
"name": "Water Meter",
"identifiers": {(DOMAIN, agreement_id, "water")},
"via_device": (DOMAIN, agreement_id, "electricity"),
}
class ToonSolarDeviceEntity(ToonEntity):
"""Defines a Solar Device device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
agreement_id = self.coordinator.data.agreement.agreement_id
return {
"name": "Solar Panels",
"identifiers": {(DOMAIN, agreement_id, "solar")},
"via_device": (DOMAIN, agreement_id, "meter_adapter"),
}
class ToonBoilerModuleDeviceEntity(ToonEntity):
"""Defines a Boiler Module device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
agreement_id = self.coordinator.data.agreement.agreement_id
return {
"name": "Boiler Module",
"manufacturer": "Eneco",
"identifiers": {(DOMAIN, agreement_id, "boiler_module")},
"via_device": (DOMAIN, agreement_id),
}
class ToonBoilerDeviceEntity(ToonEntity):
"""Defines a Boiler device entity."""
@property
def device_info(self) -> Dict[str, Any]:
"""Return device information about this entity."""
agreement_id = self.coordinator.data.agreement.agreement_id
return {
"name": "Boiler",
"identifiers": {(DOMAIN, agreement_id, "boiler")},
"via_device": (DOMAIN, agreement_id, "boiler_module"),
}
|
from qutebrowser.config import configdata, configexc
from qutebrowser.completion.models import completionmodel, listcategory, util
from qutebrowser.commands import runners, cmdexc
from qutebrowser.keyinput import keyutils
def option(*, info):
"""A CompletionModel filled with settings and their descriptions."""
return _option(info, "Options", lambda opt: not opt.no_autoconfig)
def customized_option(*, info):
"""A CompletionModel filled with set settings and their descriptions."""
model = completionmodel.CompletionModel(column_widths=(20, 70, 10))
options = ((values.opt.name, values.opt.description,
info.config.get_str(values.opt.name))
for values in info.config
if values)
model.add_category(listcategory.ListCategory("Customized options",
options))
return model
def list_option(*, info):
"""A CompletionModel filled with settings whose values are lists."""
predicate = lambda opt: (isinstance(info.config.get_obj(opt.name),
list) and not opt.no_autoconfig)
return _option(info, "List options", predicate)
def dict_option(*, info):
"""A CompletionModel filled with settings whose values are dicts."""
predicate = lambda opt: (isinstance(info.config.get_obj(opt.name),
dict) and not opt.no_autoconfig)
return _option(info, "Dict options", predicate)
def _option(info, title, predicate):
"""A CompletionModel that is generated for several option sets.
Args:
info: The config info that can be passed through.
title: The title of the options.
predicate: The function for filtering out the options. Takes a single
argument.
"""
model = completionmodel.CompletionModel(column_widths=(20, 70, 10))
options = ((opt.name, opt.description, info.config.get_str(opt.name))
for opt in configdata.DATA.values()
if predicate(opt))
model.add_category(listcategory.ListCategory(title, options))
return model
def value(optname, *values, info):
"""A CompletionModel filled with setting values.
Args:
optname: The name of the config option this model shows.
values: The values already provided on the command line.
info: A CompletionInfo instance.
"""
model = completionmodel.CompletionModel(column_widths=(30, 70, 0))
try:
current = info.config.get_str(optname)
except configexc.NoOptionError:
return None
opt = info.config.get_opt(optname)
default = opt.typ.to_str(opt.default)
cur_def = []
if current not in values:
cur_def.append((current, "Current value"))
if default not in values:
cur_def.append((default, "Default value"))
if cur_def:
cur_cat = listcategory.ListCategory("Current/Default", cur_def)
model.add_category(cur_cat)
vals = opt.typ.complete() or []
vals = [x for x in vals if x[0] not in values]
if vals:
model.add_category(listcategory.ListCategory("Completions", vals))
return model
def _bind_current_default(key, info):
"""Get current/default data for the given key."""
data = []
try:
seq = keyutils.KeySequence.parse(key)
except keyutils.KeyParseError as e:
data.append(('', str(e), key))
return data
cmd_text = info.keyconf.get_command(seq, 'normal')
if cmd_text:
parser = runners.CommandParser()
try:
cmd = parser.parse(cmd_text).cmd
except cmdexc.NoSuchCommandError:
data.append((cmd_text, '(Current) Invalid command!', key))
else:
data.append((cmd_text, '(Current) {}'.format(cmd.desc), key))
cmd_text = info.keyconf.get_command(seq, 'normal', default=True)
if cmd_text:
parser = runners.CommandParser()
cmd = parser.parse(cmd_text).cmd
data.append((cmd_text, '(Default) {}'.format(cmd.desc), key))
return data
def bind(key, *, info):
"""A CompletionModel filled with all bindable commands and descriptions.
Args:
key: the key being bound.
"""
model = completionmodel.CompletionModel(column_widths=(20, 60, 20))
data = _bind_current_default(key, info)
if data:
model.add_category(listcategory.ListCategory("Current/Default", data))
cmdlist = util.get_cmd_completions(info, include_hidden=True,
include_aliases=True)
model.add_category(listcategory.ListCategory("Commands", cmdlist))
return model
|
import asyncio
import pyotgw
from pyotgw import vars as gw_vars
from serial import SerialException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_DEVICE,
CONF_ID,
CONF_NAME,
PRECISION_HALVES,
PRECISION_TENTHS,
PRECISION_WHOLE,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import DOMAIN
from .const import CONF_FLOOR_TEMP, CONF_PRECISION
class OpenThermGwConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""OpenTherm Gateway Config Flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OpenThermGwOptionsFlow(config_entry)
async def async_step_init(self, info=None):
"""Handle config flow initiation."""
if info:
name = info[CONF_NAME]
device = info[CONF_DEVICE]
gw_id = cv.slugify(info.get(CONF_ID, name))
entries = [e.data for e in self.hass.config_entries.async_entries(DOMAIN)]
if gw_id in [e[CONF_ID] for e in entries]:
return self._show_form({"base": "id_exists"})
if device in [e[CONF_DEVICE] for e in entries]:
return self._show_form({"base": "already_configured"})
async def test_connection():
"""Try to connect to the OpenTherm Gateway."""
otgw = pyotgw.pyotgw()
status = await otgw.connect(self.hass.loop, device)
await otgw.disconnect()
return status.get(gw_vars.OTGW_ABOUT)
try:
res = await asyncio.wait_for(test_connection(), timeout=10)
except (asyncio.TimeoutError, SerialException):
return self._show_form({"base": "cannot_connect"})
if res:
return self._create_entry(gw_id, name, device)
return self._show_form()
async def async_step_user(self, user_input=None):
"""Handle manual initiation of the config flow."""
return await self.async_step_init(user_input)
async def async_step_import(self, import_config):
"""
Import an OpenTherm Gateway device as a config entry.
This flow is triggered by `async_setup` for configured devices.
"""
formatted_config = {
CONF_NAME: import_config.get(CONF_NAME, import_config[CONF_ID]),
CONF_DEVICE: import_config[CONF_DEVICE],
CONF_ID: import_config[CONF_ID],
}
return await self.async_step_init(info=formatted_config)
def _show_form(self, errors=None):
"""Show the config flow form with possible errors."""
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(CONF_NAME): str,
vol.Required(CONF_DEVICE): str,
vol.Optional(CONF_ID): str,
}
),
errors=errors or {},
)
def _create_entry(self, gw_id, name, device):
"""Create entry for the OpenTherm Gateway device."""
return self.async_create_entry(
title=name, data={CONF_ID: gw_id, CONF_DEVICE: device, CONF_NAME: name}
)
class OpenThermGwOptionsFlow(config_entries.OptionsFlow):
"""Handle opentherm_gw options."""
def __init__(self, config_entry):
"""Initialize the options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the opentherm_gw options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_PRECISION,
default=self.config_entry.options.get(CONF_PRECISION, 0),
): vol.All(
vol.Coerce(float),
vol.In(
[0, PRECISION_TENTHS, PRECISION_HALVES, PRECISION_WHOLE]
),
),
vol.Optional(
CONF_FLOOR_TEMP,
default=self.config_entry.options.get(CONF_FLOOR_TEMP, False),
): bool,
}
),
)
|
import logging
from homeassistant.components.fan import FanEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
DOMAIN,
METRIC_KEY_MODE,
METRIC_KEY_PROFILE_FAN_SPEED_AWAY,
METRIC_KEY_PROFILE_FAN_SPEED_BOOST,
METRIC_KEY_PROFILE_FAN_SPEED_HOME,
SIGNAL_VALLOX_STATE_UPDATE,
)
_LOGGER = logging.getLogger(__name__)
# Device attributes
ATTR_PROFILE_FAN_SPEED_HOME = {
"description": "fan_speed_home",
"metric_key": METRIC_KEY_PROFILE_FAN_SPEED_HOME,
}
ATTR_PROFILE_FAN_SPEED_AWAY = {
"description": "fan_speed_away",
"metric_key": METRIC_KEY_PROFILE_FAN_SPEED_AWAY,
}
ATTR_PROFILE_FAN_SPEED_BOOST = {
"description": "fan_speed_boost",
"metric_key": METRIC_KEY_PROFILE_FAN_SPEED_BOOST,
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the fan device."""
if discovery_info is None:
return
client = hass.data[DOMAIN]["client"]
client.set_settable_address(METRIC_KEY_MODE, int)
device = ValloxFan(
hass.data[DOMAIN]["name"], client, hass.data[DOMAIN]["state_proxy"]
)
async_add_entities([device], update_before_add=False)
class ValloxFan(FanEntity):
"""Representation of the fan."""
def __init__(self, name, client, state_proxy):
"""Initialize the fan."""
self._name = name
self._client = client
self._state_proxy = state_proxy
self._available = False
self._state = None
self._fan_speed_home = None
self._fan_speed_away = None
self._fan_speed_boost = None
@property
def should_poll(self):
"""Do not poll the device."""
return False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def available(self):
"""Return if state is known."""
return self._available
@property
def is_on(self):
"""Return if device is on."""
return self._state
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
ATTR_PROFILE_FAN_SPEED_HOME["description"]: self._fan_speed_home,
ATTR_PROFILE_FAN_SPEED_AWAY["description"]: self._fan_speed_away,
ATTR_PROFILE_FAN_SPEED_BOOST["description"]: self._fan_speed_boost,
}
async def async_added_to_hass(self):
"""Call to update."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_VALLOX_STATE_UPDATE, self._update_callback
)
)
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
async def async_update(self):
"""Fetch state from the device."""
try:
# Fetch if the whole device is in regular operation state.
mode = self._state_proxy.fetch_metric(METRIC_KEY_MODE)
if mode == 0:
self._state = True
else:
self._state = False
# Fetch the profile fan speeds.
self._fan_speed_home = int(
self._state_proxy.fetch_metric(
ATTR_PROFILE_FAN_SPEED_HOME["metric_key"]
)
)
self._fan_speed_away = int(
self._state_proxy.fetch_metric(
ATTR_PROFILE_FAN_SPEED_AWAY["metric_key"]
)
)
self._fan_speed_boost = int(
self._state_proxy.fetch_metric(
ATTR_PROFILE_FAN_SPEED_BOOST["metric_key"]
)
)
self._available = True
except (OSError, KeyError) as err:
self._available = False
_LOGGER.error("Error updating fan: %s", err)
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn the device on."""
_LOGGER.debug("Turn on: %s", speed)
# Only the case speed == None equals the GUI toggle switch being
# activated.
if speed is not None:
return
if self._state is False:
try:
await self._client.set_values({METRIC_KEY_MODE: 0})
# This state change affects other entities like sensors. Force
# an immediate update that can be observed by all parties
# involved.
await self._state_proxy.async_update(None)
except OSError as err:
self._available = False
_LOGGER.error("Error turning on: %s", err)
else:
_LOGGER.error("Already on")
async def async_turn_off(self, **kwargs) -> None:
"""Turn the device off."""
if self._state is True:
try:
await self._client.set_values({METRIC_KEY_MODE: 5})
# Same as for turn_on method.
await self._state_proxy.async_update(None)
except OSError as err:
self._available = False
_LOGGER.error("Error turning off: %s", err)
else:
_LOGGER.error("Already off")
|
import random
from urllib.request import urlopen
from flexx import flx
# Define names of standard images
image_names = ['clock.png', 'page.png', 'camera.png', 'coins.png',
'hubble_deep_field.png', 'text.png', 'chelsea.png',
'coffee.png', 'horse.png', 'wikkie.png', 'moon.png',
'astronaut.png', 'immunohistochemistry.png']
def get_img_blob(name):
""" Given an image name, download the raw bytes from imageio's repository
of standard images.
"""
url_root = 'https://github.com/imageio/imageio-binaries/raw/master/images/'
return urlopen(url_root + name, timeout=2.0).read()
# Randomly select a shared image at server start
link1 = flx.assets.add_shared_data('image.png',
get_img_blob(random.choice(image_names)))
class Example(flx.PyComponent):
def init(self):
# Randomly select image - different between sessions
link2 = self.session.add_data('image.png',
get_img_blob(random.choice(image_names)))
# Create widget to show images
View(link1, link2)
class View(flx.Label):
def init(self, link1, link2):
html = '<p>Hit F5 to reload the page (i.e. create a new session'
html += ', unless this is an exported app)</p>'
html += '<p>This is session "%s"</p>' % self.session.id
html += '<img src="%s" />' % link1
html += '<img src="%s" />' % link2
self.set_html(html)
if __name__ == '__main__':
# Launch the app twice to show how different sessions have different data
a = flx.App(Example)
m1 = a.launch('browser')
m2 = a.launch('browser')
flx.run()
|
import os
from tempfile import TemporaryDirectory
from radicale import pathutils, storage
class CollectionDeleteMixin:
def delete(self, href=None):
if href is None:
# Delete the collection
parent_dir = os.path.dirname(self._filesystem_path)
try:
os.rmdir(self._filesystem_path)
except OSError:
with TemporaryDirectory(
prefix=".Radicale.tmp-", dir=parent_dir) as tmp:
os.rename(self._filesystem_path, os.path.join(
tmp, os.path.basename(self._filesystem_path)))
self._storage._sync_directory(parent_dir)
else:
self._storage._sync_directory(parent_dir)
else:
# Delete an item
if not pathutils.is_safe_filesystem_path_component(href):
raise pathutils.UnsafePathError(href)
path = pathutils.path_to_filesystem(self._filesystem_path, href)
if not os.path.isfile(path):
raise storage.ComponentNotFoundError(href)
os.remove(path)
self._storage._sync_directory(os.path.dirname(path))
# Track the change
self._update_history_etag(href, None)
self._clean_history()
|
from importlib import import_module
from django.conf import settings
from django.urls import reverse
from django.utils.http import url_has_allowed_host_and_scheme
from social_django.strategy import DjangoStrategy
from weblate.utils.site import get_site_url
def create_session(*args):
engine = import_module(settings.SESSION_ENGINE)
return engine.SessionStore(*args)
class WeblateStrategy(DjangoStrategy):
def __init__(self, storage, request=None, tpl=None):
"""Restore session data based on passed ID."""
super().__init__(storage, request, tpl)
if request and "verification_code" in request.GET and "id" in request.GET:
self.session = create_session(request.GET["id"])
def request_data(self, merge=True):
if not self.request:
return {}
if merge:
data = self.request.GET.copy()
data.update(self.request.POST)
elif self.request.method == "POST":
data = self.request.POST.copy()
else:
data = self.request.GET.copy()
# This is mostly fix for lack of next validation in Python Social Auth
# - https://github.com/python-social-auth/social-core/pull/92
# - https://github.com/python-social-auth/social-core/issues/62
if "next" in data and not url_has_allowed_host_and_scheme(
data["next"], allowed_hosts=None
):
data["next"] = "{}#account".format(reverse("profile"))
return data
def build_absolute_uri(self, path=None):
if self.request:
self.request.__dict__["_current_scheme_host"] = get_site_url()
return super().build_absolute_uri(path)
def clean_partial_pipeline(self, token):
# The cleanup somehow breaks our partial pipelines, simply skip
# it for now
# See https://github.com/python-social-auth/social-core/issues/287
return
def really_clean_partial_pipeline(self, token):
super().clean_partial_pipeline(token)
|
import errno
import hashlib
import inspect
import locale
import os
import os.path
import random
import re
import socket
import sys
import types
from coverage import env
from coverage.backward import to_bytes, unicode_class
ISOLATED_MODULES = {}
def isolate_module(mod):
"""Copy a module so that we are isolated from aggressive mocking.
If a test suite mocks os.path.exists (for example), and then we need to use
it during the test, everything will get tangled up if we use their mock.
Making a copy of the module when we import it will isolate coverage.py from
those complications.
"""
if mod not in ISOLATED_MODULES:
new_mod = types.ModuleType(mod.__name__)
ISOLATED_MODULES[mod] = new_mod
for name in dir(mod):
value = getattr(mod, name)
if isinstance(value, types.ModuleType):
value = isolate_module(value)
setattr(new_mod, name, value)
return ISOLATED_MODULES[mod]
os = isolate_module(os)
def dummy_decorator_with_args(*args_unused, **kwargs_unused):
"""Dummy no-op implementation of a decorator with arguments."""
def _decorator(func):
return func
return _decorator
# Environment COVERAGE_NO_CONTRACTS=1 can turn off contracts while debugging
# tests to remove noise from stack traces.
# $set_env.py: COVERAGE_NO_CONTRACTS - Disable PyContracts to simplify stack traces.
USE_CONTRACTS = env.TESTING and not bool(int(os.environ.get("COVERAGE_NO_CONTRACTS", 0)))
# Use PyContracts for assertion testing on parameters and returns, but only if
# we are running our own test suite.
if USE_CONTRACTS:
from contracts import contract # pylint: disable=unused-import
from contracts import new_contract as raw_new_contract
def new_contract(*args, **kwargs):
"""A proxy for contracts.new_contract that doesn't mind happening twice."""
try:
return raw_new_contract(*args, **kwargs)
except ValueError:
# During meta-coverage, this module is imported twice, and
# PyContracts doesn't like redefining contracts. It's OK.
pass
# Define contract words that PyContract doesn't have.
new_contract('bytes', lambda v: isinstance(v, bytes))
if env.PY3:
new_contract('unicode', lambda v: isinstance(v, unicode_class))
def one_of(argnames):
"""Ensure that only one of the argnames is non-None."""
def _decorator(func):
argnameset = set(name.strip() for name in argnames.split(","))
def _wrapper(*args, **kwargs):
vals = [kwargs.get(name) for name in argnameset]
assert sum(val is not None for val in vals) == 1
return func(*args, **kwargs)
return _wrapper
return _decorator
else: # pragma: not testing
# We aren't using real PyContracts, so just define our decorators as
# stunt-double no-ops.
contract = dummy_decorator_with_args
one_of = dummy_decorator_with_args
def new_contract(*args_unused, **kwargs_unused):
"""Dummy no-op implementation of `new_contract`."""
pass
def nice_pair(pair):
"""Make a nice string representation of a pair of numbers.
If the numbers are equal, just return the number, otherwise return the pair
with a dash between them, indicating the range.
"""
start, end = pair
if start == end:
return "%d" % start
else:
return "%d-%d" % (start, end)
def expensive(fn):
"""A decorator to indicate that a method shouldn't be called more than once.
Normally, this does nothing. During testing, this raises an exception if
called more than once.
"""
if env.TESTING:
attr = "_once_" + fn.__name__
def _wrapper(self):
if hasattr(self, attr):
raise AssertionError("Shouldn't have called %s more than once" % fn.__name__)
setattr(self, attr, True)
return fn(self)
return _wrapper
else:
return fn # pragma: not testing
def bool_or_none(b):
"""Return bool(b), but preserve None."""
if b is None:
return None
else:
return bool(b)
def join_regex(regexes):
"""Combine a list of regexes into one that matches any of them."""
return "|".join("(?:%s)" % r for r in regexes)
def file_be_gone(path):
"""Remove a file, and don't get annoyed if it doesn't exist."""
try:
os.remove(path)
except OSError as e:
if e.errno != errno.ENOENT:
raise
def ensure_dir(directory):
"""Make sure the directory exists.
If `directory` is None or empty, do nothing.
"""
if directory and not os.path.isdir(directory):
os.makedirs(directory)
def ensure_dir_for_file(path):
"""Make sure the directory for the path exists."""
ensure_dir(os.path.dirname(path))
def output_encoding(outfile=None):
"""Determine the encoding to use for output written to `outfile` or stdout."""
if outfile is None:
outfile = sys.stdout
encoding = (
getattr(outfile, "encoding", None) or
getattr(sys.__stdout__, "encoding", None) or
locale.getpreferredencoding()
)
return encoding
def filename_suffix(suffix):
"""Compute a filename suffix for a data file.
If `suffix` is a string or None, simply return it. If `suffix` is True,
then build a suffix incorporating the hostname, process id, and a random
number.
Returns a string or None.
"""
if suffix is True:
# If data_suffix was a simple true value, then make a suffix with
# plenty of distinguishing information. We do this here in
# `save()` at the last minute so that the pid will be correct even
# if the process forks.
dice = random.Random(os.urandom(8)).randint(0, 999999)
suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice)
return suffix
class Hasher(object):
"""Hashes Python data into md5."""
def __init__(self):
self.md5 = hashlib.md5()
def update(self, v):
"""Add `v` to the hash, recursively if needed."""
self.md5.update(to_bytes(str(type(v))))
if isinstance(v, unicode_class):
self.md5.update(v.encode('utf8'))
elif isinstance(v, bytes):
self.md5.update(v)
elif v is None:
pass
elif isinstance(v, (int, float)):
self.md5.update(to_bytes(str(v)))
elif isinstance(v, (tuple, list)):
for e in v:
self.update(e)
elif isinstance(v, dict):
keys = v.keys()
for k in sorted(keys):
self.update(k)
self.update(v[k])
else:
for k in dir(v):
if k.startswith('__'):
continue
a = getattr(v, k)
if inspect.isroutine(a):
continue
self.update(k)
self.update(a)
self.md5.update(b'.')
def hexdigest(self):
"""Retrieve the hex digest of the hash."""
return self.md5.hexdigest()
def _needs_to_implement(that, func_name):
"""Helper to raise NotImplementedError in interface stubs."""
if hasattr(that, "_coverage_plugin_name"):
thing = "Plugin"
name = that._coverage_plugin_name
else:
thing = "Class"
klass = that.__class__
name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
raise NotImplementedError(
"{thing} {name!r} needs to implement {func_name}()".format(
thing=thing, name=name, func_name=func_name
)
)
class DefaultValue(object):
"""A sentinel object to use for unusual default-value needs.
Construct with a string that will be used as the repr, for display in help
and Sphinx output.
"""
def __init__(self, display_as):
self.display_as = display_as
def __repr__(self):
return self.display_as
def substitute_variables(text, variables):
"""Substitute ``${VAR}`` variables in `text` with their values.
Variables in the text can take a number of shell-inspired forms::
$VAR
${VAR}
${VAR?} strict: an error if VAR isn't defined.
${VAR-missing} defaulted: "missing" if VAR isn't defined.
$$ just a dollar sign.
`variables` is a dictionary of variable values.
Returns the resulting text with values substituted.
"""
dollar_pattern = r"""(?x) # Use extended regex syntax
\$ # A dollar sign,
(?: # then
(?P<dollar>\$) | # a dollar sign, or
(?P<word1>\w+) | # a plain word, or
{ # a {-wrapped
(?P<word2>\w+) # word,
(?:
(?P<strict>\?) | # with a strict marker
-(?P<defval>[^}]*) # or a default value
)? # maybe.
}
)
"""
def dollar_replace(match):
"""Called for each $replacement."""
# Only one of the groups will have matched, just get its text.
word = next(g for g in match.group('dollar', 'word1', 'word2') if g)
if word == "$":
return "$"
elif word in variables:
return variables[word]
elif match.group('strict'):
msg = "Variable {} is undefined: {!r}".format(word, text)
raise CoverageException(msg)
else:
return match.group('defval')
text = re.sub(dollar_pattern, dollar_replace, text)
return text
class BaseCoverageException(Exception):
"""The base of all Coverage exceptions."""
pass
class CoverageException(BaseCoverageException):
"""An exception raised by a coverage.py function."""
pass
class NoSource(CoverageException):
"""We couldn't find the source for a module."""
pass
class NoCode(NoSource):
"""We couldn't find any code at all."""
pass
class NotPython(CoverageException):
"""A source file turned out not to be parsable Python."""
pass
class ExceptionDuringRun(CoverageException):
"""An exception happened while running customer code.
Construct it with three arguments, the values from `sys.exc_info`.
"""
pass
class StopEverything(BaseCoverageException):
"""An exception that means everything should stop.
The CoverageTest class converts these to SkipTest, so that when running
tests, raising this exception will automatically skip the test.
"""
pass
|
import logging
from . import initialize_bot
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config):
"""Set up the Telegram broadcast platform."""
bot = initialize_bot(config)
bot_config = await hass.async_add_executor_job(bot.getMe)
_LOGGER.debug(
"Telegram broadcast platform setup with bot %s", bot_config["username"]
)
return True
|
from aiohomekit.model.characteristics import CharacteristicsTypes
from homeassistant.components.lock import LockEntity
from homeassistant.const import ATTR_BATTERY_LEVEL, STATE_LOCKED, STATE_UNLOCKED
from homeassistant.core import callback
from . import KNOWN_DEVICES, HomeKitEntity
STATE_JAMMED = "jammed"
CURRENT_STATE_MAP = {0: STATE_UNLOCKED, 1: STATE_LOCKED, 2: STATE_JAMMED, 3: None}
TARGET_STATE_MAP = {STATE_UNLOCKED: 0, STATE_LOCKED: 1}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Homekit lock."""
hkid = config_entry.data["AccessoryPairingID"]
conn = hass.data[KNOWN_DEVICES][hkid]
@callback
def async_add_service(aid, service):
if service["stype"] != "lock-mechanism":
return False
info = {"aid": aid, "iid": service["iid"]}
async_add_entities([HomeKitLock(conn, info)], True)
return True
conn.add_listener(async_add_service)
class HomeKitLock(HomeKitEntity, LockEntity):
"""Representation of a HomeKit Controller Lock."""
def get_characteristic_types(self):
"""Define the homekit characteristics the entity cares about."""
return [
CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE,
CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE,
CharacteristicsTypes.BATTERY_LEVEL,
]
@property
def is_locked(self):
"""Return true if device is locked."""
value = self.service.value(CharacteristicsTypes.LOCK_MECHANISM_CURRENT_STATE)
return CURRENT_STATE_MAP[value] == STATE_LOCKED
async def async_lock(self, **kwargs):
"""Lock the device."""
await self._set_lock_state(STATE_LOCKED)
async def async_unlock(self, **kwargs):
"""Unlock the device."""
await self._set_lock_state(STATE_UNLOCKED)
async def _set_lock_state(self, state):
"""Send state command."""
await self.async_put_characteristics(
{CharacteristicsTypes.LOCK_MECHANISM_TARGET_STATE: TARGET_STATE_MAP[state]}
)
@property
def device_state_attributes(self):
"""Return the optional state attributes."""
attributes = {}
battery_level = self.service.value(CharacteristicsTypes.BATTERY_LEVEL)
if battery_level:
attributes[ATTR_BATTERY_LEVEL] = battery_level
return attributes
|
import logging
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_CURTAIN,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPENING,
CoverEntity,
)
from homeassistant.const import ATTR_ID
from .const import API, DEFAULT_OFFSET, DOMAIN, SLIDES
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up cover(s) for Slide platform."""
if discovery_info is None:
return
entities = []
for slide in hass.data[DOMAIN][SLIDES].values():
_LOGGER.debug("Setting up Slide entity: %s", slide)
entities.append(SlideCover(hass.data[DOMAIN][API], slide))
async_add_entities(entities)
class SlideCover(CoverEntity):
"""Representation of a Slide cover."""
def __init__(self, api, slide):
"""Initialize the cover."""
self._api = api
self._slide = slide
self._id = slide["id"]
self._unique_id = slide["mac"]
self._name = slide["name"]
self._invert = slide["invert"]
@property
def unique_id(self):
"""Return the device unique id."""
return self._unique_id
@property
def name(self):
"""Return the device name."""
return self._name
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {ATTR_ID: self._id}
@property
def is_opening(self):
"""Return if the cover is opening or not."""
return self._slide["state"] == STATE_OPENING
@property
def is_closing(self):
"""Return if the cover is closing or not."""
return self._slide["state"] == STATE_CLOSING
@property
def is_closed(self):
"""Return None if status is unknown, True if closed, else False."""
if self._slide["state"] is None:
return None
return self._slide["state"] == STATE_CLOSED
@property
def available(self):
"""Return False if state is not available."""
return self._slide["online"]
@property
def assumed_state(self):
"""Let HA know the integration is assumed state."""
return True
@property
def device_class(self):
"""Return the device class of the cover."""
return DEVICE_CLASS_CURTAIN
@property
def current_cover_position(self):
"""Return the current position of cover shutter."""
pos = self._slide["pos"]
if pos is not None:
if (1 - pos) <= DEFAULT_OFFSET or pos <= DEFAULT_OFFSET:
pos = round(pos)
if not self._invert:
pos = 1 - pos
pos = int(pos * 100)
return pos
async def async_open_cover(self, **kwargs):
"""Open the cover."""
self._slide["state"] = STATE_OPENING
await self._api.slide_open(self._id)
async def async_close_cover(self, **kwargs):
"""Close the cover."""
self._slide["state"] = STATE_CLOSING
await self._api.slide_close(self._id)
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
await self._api.slide_stop(self._id)
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
position = kwargs[ATTR_POSITION] / 100
if not self._invert:
position = 1 - position
if self._slide["pos"] is not None:
if position > self._slide["pos"]:
self._slide["state"] = STATE_CLOSING
else:
self._slide["state"] = STATE_OPENING
await self._api.slide_set_position(self._id, position)
|
from django.db import migrations
CREATE = "CREATE UNIQUE INDEX weblate_auth_user_{0}_ci ON weblate_auth_user(UPPER({0}))"
DROP = "DROP INDEX weblate_auth_user_{0}_ci"
def create_index(apps, schema_editor):
if schema_editor.connection.vendor == "postgresql":
schema_editor.execute(CREATE.format("username"))
schema_editor.execute(CREATE.format("email"))
def drop_index(apps, schema_editor):
if schema_editor.connection.vendor == "postgresql":
schema_editor.execute(DROP.format("username"))
schema_editor.execute(DROP.format("email"))
class Migration(migrations.Migration):
dependencies = [
("weblate_auth", "0010_migrate_componentlist"),
]
operations = [
migrations.RunPython(create_index, drop_index, elidable=False, atomic=False)
]
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hostname(host):
assert 'delegated-instance-docker' == host.check_output('hostname -s')
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
f = host.file('/etc/molecule/delegated-instance-docker')
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
def sort_by_system_instance_health(instances):
return sorted(
instances,
key=lambda i: (
i.instance_status["SystemStatus"]["Status"] != "ok"
or i.instance_status["InstanceStatus"]["Status"] != "ok"
),
)
def sort_by_upcoming_events(instances):
return sorted(instances, key=lambda i: len(i.instance_status.get("Events", [])))
def sort_by_total_tasks(instances):
return sorted(instances, key=lambda i: i.task_counts.count, reverse=True)
def sort_by_running_batch_count(instances):
return sorted(instances, key=lambda i: i.task_counts.batch_count, reverse=True)
def sort_by_ec2_fitness(instances):
"""
Sort a list according to their fitness. This will return the list of instances
in order of 'fitness': that is, that which is least desirable to kill is first in
the list.
Fitness is judged according to the following rules:
- any instance considered to have a non 'ok' system or instance status is always
considered to be least healthy
- next, instances are ranked according to whether they have events planned. an event
planned marks against your fitness.
- next, instances are sorted according to the number of batch tasks running on them.
we can't drain batch tasks, so make an effort to avoid disrupting them.
- finally, instances are sorted according to the number of total tasks they have. those with
the hightest total task are considered fittest, because it's painful to drain them.
"""
return sort_by_system_instance_health(
sort_by_upcoming_events(
sort_by_running_batch_count(sort_by_total_tasks(instances))
)
)
|
from abc import ABC, abstractmethod
from typing import List, Tuple, Optional, Dict
from datetime import datetime
import discord
from redbot.core import Config, commands
from redbot.core.bot import Red
class MixinMeta(ABC):
"""
Base class for well behaved type hint detection with composite class.
Basically, to keep developers sane when not all attributes are defined in each mixin.
"""
def __init__(self, *_args):
self.config: Config
self.bot: Red
self._mutes_cache: Dict[int, Dict[int, Optional[datetime]]]
@staticmethod
@abstractmethod
async def _voice_perm_check(
ctx: commands.Context, user_voice_state: Optional[discord.VoiceState], **perms: bool
) -> bool:
raise NotImplementedError()
|
import inspect
import enum
import pytest
from PyQt5.QtCore import QUrl
from qutebrowser.commands import argparser, cmdexc
class Enum(enum.Enum):
foo = enum.auto()
foo_bar = enum.auto()
class TestArgumentParser:
@pytest.fixture
def parser(self):
return argparser.ArgumentParser('foo')
def test_name(self, parser):
assert parser.name == 'foo'
def test_exit(self, parser):
parser.add_argument('--help', action='help')
with pytest.raises(argparser.ArgumentParserExit) as excinfo:
parser.parse_args(['--help'])
assert excinfo.value.status == 0
def test_error(self, parser):
with pytest.raises(argparser.ArgumentParserError,
match="Unrecognized arguments: --foo"):
parser.parse_args(['--foo'])
def test_help(self, parser, tabbed_browser_stubs):
parser.add_argument('--help', action=argparser.HelpAction, nargs=0)
with pytest.raises(argparser.ArgumentParserExit):
parser.parse_args(['--help'])
expected_url = QUrl('qute://help/commands.html#foo')
assert tabbed_browser_stubs[1].loaded_url == expected_url
@pytest.mark.parametrize('types, value, expected', [
([Enum], 'foo', Enum.foo),
([Enum], 'foo-bar', Enum.foo_bar),
([int], '2', 2),
([int, str], 'foo', 'foo'),
])
@pytest.mark.parametrize('multi', [True, False])
def test_type_conv_valid(types, value, expected, multi):
param = inspect.Parameter('foo', inspect.Parameter.POSITIONAL_ONLY)
if multi:
assert argparser.multitype_conv(param, types, value) == expected
elif len(types) == 1:
assert argparser.type_conv(param, types[0], value) == expected
@pytest.mark.parametrize('typ, value', [
(Enum, 'blubb'),
(Enum, 'foo_bar'),
(int, '2.5'),
(int, 'foo'),
])
@pytest.mark.parametrize('multi', [True, False])
def test_type_conv_invalid(typ, value, multi):
param = inspect.Parameter('foo', inspect.Parameter.POSITIONAL_ONLY)
if multi:
msg = 'foo: Invalid value {}'.format(value)
elif typ is Enum:
msg = ('foo: Invalid value {} - expected one of: foo, '
'foo-bar'.format(value))
else:
msg = 'foo: Invalid {} value {}'.format(typ.__name__, value)
with pytest.raises(cmdexc.ArgumentTypeError, match=msg):
if multi:
argparser.multitype_conv(param, [typ], value)
else:
argparser.type_conv(param, typ, value)
def test_multitype_conv_invalid_type():
"""Test using an invalid type with a multitype converter."""
param = inspect.Parameter('foo', inspect.Parameter.POSITIONAL_ONLY)
with pytest.raises(ValueError, match="foo: Unknown type None!"):
argparser.multitype_conv(param, [None], '')
@pytest.mark.parametrize('value, typ', [(None, None), (42, int)])
def test_conv_default_param(value, typ):
"""The default value should always be a valid choice."""
def func(foo=value):
pass
param = inspect.signature(func).parameters['foo']
assert argparser.type_conv(param, typ, value, str_choices=['val']) == value
def test_conv_str_type():
"""Using a str literal as type used to mean exactly that's a valid value.
This got replaced by @cmdutils.argument(..., choices=...), so we make sure
no string annotations are there anymore.
"""
param = inspect.Parameter('foo', inspect.Parameter.POSITIONAL_ONLY)
with pytest.raises(TypeError, match='foo: Legacy string type!'):
argparser.type_conv(param, 'val', None)
def test_conv_str_choices_valid():
"""Calling str type with str_choices and valid value."""
param = inspect.Parameter('foo', inspect.Parameter.POSITIONAL_ONLY)
converted = argparser.type_conv(param, str, 'val1',
str_choices=['val1', 'val2'])
assert converted == 'val1'
def test_conv_str_choices_invalid():
"""Calling str type with str_choices and invalid value."""
param = inspect.Parameter('foo', inspect.Parameter.POSITIONAL_ONLY)
with pytest.raises(cmdexc.ArgumentTypeError, match='foo: Invalid value '
'val3 - expected one of: val1, val2'):
argparser.type_conv(param, str, 'val3', str_choices=['val1', 'val2'])
|
import pytest
from voluptuous.error import MultipleInvalid
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.rflink import (
CONF_RECONNECT_INTERVAL,
DATA_ENTITY_LOOKUP,
EVENT_KEY_COMMAND,
EVENT_KEY_SENSOR,
SERVICE_SEND_COMMAND,
TMP_ENTITY,
RflinkCommand,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_STOP_COVER, SERVICE_TURN_OFF
from tests.async_mock import Mock
async def mock_rflink(
hass, config, domain, monkeypatch, failures=None, failcommand=False
):
"""Create mock RFLink asyncio protocol, test component setup."""
transport, protocol = (Mock(), Mock())
async def send_command_ack(*command):
return not failcommand
protocol.send_command_ack = Mock(wraps=send_command_ack)
def send_command(*command):
return not failcommand
protocol.send_command = Mock(wraps=send_command)
async def create_rflink_connection(*args, **kwargs):
"""Return mocked transport and protocol."""
# failures can be a list of booleans indicating in which sequence
# creating a connection should success or fail
if failures:
fail = failures.pop()
else:
fail = False
if fail:
raise ConnectionRefusedError
else:
return transport, protocol
mock_create = Mock(wraps=create_rflink_connection)
monkeypatch.setattr(
"homeassistant.components.rflink.create_rflink_connection", mock_create
)
await async_setup_component(hass, "rflink", config)
await async_setup_component(hass, domain, config)
await hass.async_block_till_done()
# hook into mock config for injecting events
event_callback = mock_create.call_args_list[0][1]["event_callback"]
assert event_callback
disconnect_callback = mock_create.call_args_list[0][1]["disconnect_callback"]
return event_callback, mock_create, protocol, disconnect_callback
async def test_version_banner(hass, monkeypatch):
"""Test sending unknown commands doesn't cause issues."""
# use sensor domain during testing main platform
domain = "sensor"
config = {
"rflink": {"port": "/dev/ttyABC0"},
domain: {
"platform": "rflink",
"devices": {"test": {"name": "test", "sensor_type": "temperature"}},
},
}
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, domain, monkeypatch)
event_callback(
{
"hardware": "Nodo RadioFrequencyLink",
"firmware": "RFLink Gateway",
"version": "1.1",
"revision": "45",
}
)
async def test_send_no_wait(hass, monkeypatch):
"""Test command sending without ack."""
domain = "switch"
config = {
"rflink": {"port": "/dev/ttyABC0", "wait_for_ack": False},
domain: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "aliases": ["test_alias_0_0"]}
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
hass.async_create_task(
hass.services.async_call(
domain, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "switch.test"}
)
)
await hass.async_block_till_done()
assert protocol.send_command.call_args_list[0][0][0] == "protocol_0_0"
assert protocol.send_command.call_args_list[0][0][1] == "off"
async def test_cover_send_no_wait(hass, monkeypatch):
"""Test command sending to a cover device without ack."""
domain = "cover"
config = {
"rflink": {"port": "/dev/ttyABC0", "wait_for_ack": False},
domain: {
"platform": "rflink",
"devices": {
"RTS_0100F2_0": {"name": "test", "aliases": ["test_alias_0_0"]}
},
},
}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
hass.async_create_task(
hass.services.async_call(
domain, SERVICE_STOP_COVER, {ATTR_ENTITY_ID: "cover.test"}
)
)
await hass.async_block_till_done()
assert protocol.send_command.call_args_list[0][0][0] == "RTS_0100F2_0"
assert protocol.send_command.call_args_list[0][0][1] == "STOP"
async def test_send_command(hass, monkeypatch):
"""Test send_command service."""
domain = "rflink"
config = {"rflink": {"port": "/dev/ttyABC0"}}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
hass.async_create_task(
hass.services.async_call(
domain,
SERVICE_SEND_COMMAND,
{"device_id": "newkaku_0000c6c2_1", "command": "on"},
)
)
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list[0][0][0] == "newkaku_0000c6c2_1"
assert protocol.send_command_ack.call_args_list[0][0][1] == "on"
async def test_send_command_invalid_arguments(hass, monkeypatch):
"""Test send_command service."""
domain = "rflink"
config = {"rflink": {"port": "/dev/ttyABC0"}}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(hass, config, domain, monkeypatch)
# one argument missing
with pytest.raises(MultipleInvalid):
await hass.services.async_call(domain, SERVICE_SEND_COMMAND, {"command": "on"})
with pytest.raises(MultipleInvalid):
await hass.services.async_call(
domain, SERVICE_SEND_COMMAND, {"device_id": "newkaku_0000c6c2_1"}
)
# no arguments
with pytest.raises(MultipleInvalid):
await hass.services.async_call(domain, SERVICE_SEND_COMMAND, {})
await hass.async_block_till_done()
assert protocol.send_command_ack.call_args_list == []
# bad command (no_command)
success = await hass.services.async_call(
domain,
SERVICE_SEND_COMMAND,
{"device_id": "newkaku_0000c6c2_1", "command": "no_command"},
)
assert not success, "send command should not succeed for unknown command"
async def test_reconnecting_after_disconnect(hass, monkeypatch):
"""An unexpected disconnect should cause a reconnect."""
domain = "sensor"
config = {
"rflink": {"port": "/dev/ttyABC0", CONF_RECONNECT_INTERVAL: 0},
domain: {"platform": "rflink"},
}
# setup mocking rflink module
_, mock_create, _, disconnect_callback = await mock_rflink(
hass, config, domain, monkeypatch
)
assert disconnect_callback, "disconnect callback not passed to rflink"
# rflink initiated disconnect
disconnect_callback(None)
await hass.async_block_till_done()
# we expect 2 call, the initial and reconnect
assert mock_create.call_count == 2
async def test_reconnecting_after_failure(hass, monkeypatch):
"""A failure to reconnect should be retried."""
domain = "sensor"
config = {
"rflink": {"port": "/dev/ttyABC0", CONF_RECONNECT_INTERVAL: 0},
domain: {"platform": "rflink"},
}
# success first time but fail second
failures = [False, True, False]
# setup mocking rflink module
_, mock_create, _, disconnect_callback = await mock_rflink(
hass, config, domain, monkeypatch, failures=failures
)
# rflink initiated disconnect
disconnect_callback(None)
# wait for reconnects to have happened
await hass.async_block_till_done()
await hass.async_block_till_done()
# we expect 3 calls, the initial and 2 reconnects
assert mock_create.call_count == 3
async def test_error_when_not_connected(hass, monkeypatch):
"""Sending command should error when not connected."""
domain = "switch"
config = {
"rflink": {"port": "/dev/ttyABC0", CONF_RECONNECT_INTERVAL: 0},
domain: {
"platform": "rflink",
"devices": {
"protocol_0_0": {"name": "test", "aliases": ["test_alias_0_0"]}
},
},
}
# success first time but fail second
failures = [False, True, False]
# setup mocking rflink module
_, _, _, disconnect_callback = await mock_rflink(
hass, config, domain, monkeypatch, failures=failures
)
# rflink initiated disconnect
disconnect_callback(None)
success = await hass.services.async_call(
domain, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "switch.test"}
)
assert not success, "changing state should not succeed when disconnected"
async def test_async_send_command_error(hass, monkeypatch):
"""Sending command should error when protocol fails."""
domain = "rflink"
config = {"rflink": {"port": "/dev/ttyABC0"}}
# setup mocking rflink module
_, _, protocol, _ = await mock_rflink(
hass, config, domain, monkeypatch, failcommand=True
)
success = await hass.services.async_call(
domain,
SERVICE_SEND_COMMAND,
{"device_id": "newkaku_0000c6c2_1", "command": SERVICE_TURN_OFF},
)
await hass.async_block_till_done()
assert not success, "send command should not succeed if failcommand=True"
assert protocol.send_command_ack.call_args_list[0][0][0] == "newkaku_0000c6c2_1"
assert protocol.send_command_ack.call_args_list[0][0][1] == SERVICE_TURN_OFF
async def test_race_condition(hass, monkeypatch):
"""Test race condition for unknown components."""
domain = "light"
config = {"rflink": {"port": "/dev/ttyABC0"}, domain: {"platform": "rflink"}}
tmp_entity = TMP_ENTITY.format("test3")
# setup mocking rflink module
event_callback, _, _, _ = await mock_rflink(hass, config, domain, monkeypatch)
# test event for new unconfigured sensor
event_callback({"id": "test3", "command": "off"})
event_callback({"id": "test3", "command": "on"})
# tmp_entity added to EVENT_KEY_COMMAND
assert tmp_entity in hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND]["test3"]
# tmp_entity must no be added to EVENT_KEY_SENSOR
assert tmp_entity not in hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR]["test3"]
await hass.async_block_till_done()
# test state of new sensor
new_sensor = hass.states.get(f"{domain}.test3")
assert new_sensor
assert new_sensor.state == "off"
event_callback({"id": "test3", "command": "on"})
await hass.async_block_till_done()
# tmp_entity must be deleted from EVENT_KEY_COMMAND
assert tmp_entity not in hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_COMMAND]["test3"]
# test state of new sensor
new_sensor = hass.states.get(f"{domain}.test3")
assert new_sensor
assert new_sensor.state == "on"
async def test_not_connected(hass, monkeypatch):
"""Test Error when sending commands to a disconnected device."""
import pytest
from homeassistant.core import HomeAssistantError
test_device = RflinkCommand("DUMMY_DEVICE")
RflinkCommand.set_rflink_protocol(None)
with pytest.raises(HomeAssistantError):
await test_device._async_handle_command("turn_on")
|
import os
import sys
import logging
import argparse
import gensim
from gensim import utils
logger = logging.getLogger(__name__)
def word2vec2tensor(word2vec_model_path, tensor_filename, binary=False):
"""Convert file in Word2Vec format and writes two files 2D tensor TSV file.
File "tensor_filename"_tensor.tsv contains word-vectors, "tensor_filename"_metadata.tsv contains words.
Parameters
----------
word2vec_model_path : str
Path to file in Word2Vec format.
tensor_filename : str
Prefix for output files.
binary : bool, optional
True if input file in binary format.
"""
model = gensim.models.KeyedVectors.load_word2vec_format(word2vec_model_path, binary=binary)
outfiletsv = tensor_filename + '_tensor.tsv'
outfiletsvmeta = tensor_filename + '_metadata.tsv'
with utils.open(outfiletsv, 'wb') as file_vector, utils.open(outfiletsvmeta, 'wb') as file_metadata:
for word in model.index_to_key:
file_metadata.write(gensim.utils.to_utf8(word) + gensim.utils.to_utf8('\n'))
vector_row = '\t'.join(str(x) for x in model[word])
file_vector.write(gensim.utils.to_utf8(vector_row) + gensim.utils.to_utf8('\n'))
logger.info("2D tensor file saved to %s", outfiletsv)
logger.info("Tensor metadata file saved to %s", outfiletsvmeta)
if __name__ == "__main__":
logging.basicConfig(format='%(asctime)s - %(module)s - %(levelname)s - %(message)s', level=logging.INFO)
parser = argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter, description=__doc__[:-138])
parser.add_argument("-i", "--input", required=True, help="Path to input file in word2vec format")
parser.add_argument("-o", "--output", required=True, help="Prefix path for output files")
parser.add_argument(
"-b", "--binary", action='store_const', const=True, default=False,
help="Set this flag if word2vec model in binary format (default: %(default)s)"
)
args = parser.parse_args()
logger.info("running %s", ' '.join(sys.argv))
word2vec2tensor(args.input, args.output, args.binary)
logger.info("finished running %s", os.path.basename(sys.argv[0]))
|
import logging
from bimmer_connected.state import LockState
from homeassistant.components.lock import LockEntity
from homeassistant.const import ATTR_ATTRIBUTION, STATE_LOCKED, STATE_UNLOCKED
from . import DOMAIN as BMW_DOMAIN
from .const import ATTRIBUTION
DOOR_LOCK_STATE = "door_lock_state"
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BMW Connected Drive lock."""
accounts = hass.data[BMW_DOMAIN]
_LOGGER.debug("Found BMW accounts: %s", ", ".join([a.name for a in accounts]))
devices = []
for account in accounts:
if not account.read_only:
for vehicle in account.account.vehicles:
device = BMWLock(account, vehicle, "lock", "BMW lock")
devices.append(device)
add_entities(devices, True)
class BMWLock(LockEntity):
"""Representation of a BMW vehicle lock."""
def __init__(self, account, vehicle, attribute: str, sensor_name):
"""Initialize the lock."""
self._account = account
self._vehicle = vehicle
self._attribute = attribute
self._name = f"{self._vehicle.name} {self._attribute}"
self._unique_id = f"{self._vehicle.vin}-{self._attribute}"
self._sensor_name = sensor_name
self._state = None
self.door_lock_state_available = (
DOOR_LOCK_STATE in self._vehicle.available_attributes
)
@property
def should_poll(self):
"""Do not poll this class.
Updates are triggered from BMWConnectedDriveAccount.
"""
return False
@property
def unique_id(self):
"""Return the unique ID of the lock."""
return self._unique_id
@property
def name(self):
"""Return the name of the lock."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of the lock."""
vehicle_state = self._vehicle.state
result = {
"car": self._vehicle.name,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
if self.door_lock_state_available:
result["door_lock_state"] = vehicle_state.door_lock_state.value
result["last_update_reason"] = vehicle_state.last_update_reason
return result
@property
def is_locked(self):
"""Return true if lock is locked."""
return self._state == STATE_LOCKED
def lock(self, **kwargs):
"""Lock the car."""
_LOGGER.debug("%s: locking doors", self._vehicle.name)
# Optimistic state set here because it takes some time before the
# update callback response
self._state = STATE_LOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_lock()
def unlock(self, **kwargs):
"""Unlock the car."""
_LOGGER.debug("%s: unlocking doors", self._vehicle.name)
# Optimistic state set here because it takes some time before the
# update callback response
self._state = STATE_UNLOCKED
self.schedule_update_ha_state()
self._vehicle.remote_services.trigger_remote_door_unlock()
def update(self):
"""Update state of the lock."""
_LOGGER.debug("%s: updating data for %s", self._vehicle.name, self._attribute)
vehicle_state = self._vehicle.state
# Possible values: LOCKED, SECURED, SELECTIVE_LOCKED, UNLOCKED
self._state = (
STATE_LOCKED
if vehicle_state.door_lock_state in [LockState.LOCKED, LockState.SECURED]
else STATE_UNLOCKED
)
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
|
import asyncio
from datetime import timedelta
import logging
import aiohttp
from foobot_async import FoobotClient
import voluptuous as vol
from homeassistant.const import (
ATTR_TEMPERATURE,
ATTR_TIME,
CONCENTRATION_MICROGRAMS_PER_CUBIC_METER,
CONCENTRATION_PARTS_PER_BILLION,
CONCENTRATION_PARTS_PER_MILLION,
CONF_TOKEN,
CONF_USERNAME,
PERCENTAGE,
TEMP_CELSIUS,
TIME_SECONDS,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import PLATFORM_SCHEMA
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_HUMIDITY = "humidity"
ATTR_PM2_5 = "PM2.5"
ATTR_CARBON_DIOXIDE = "CO2"
ATTR_VOLATILE_ORGANIC_COMPOUNDS = "VOC"
ATTR_FOOBOT_INDEX = "index"
SENSOR_TYPES = {
"time": [ATTR_TIME, TIME_SECONDS],
"pm": [ATTR_PM2_5, CONCENTRATION_MICROGRAMS_PER_CUBIC_METER, "mdi:cloud"],
"tmp": [ATTR_TEMPERATURE, TEMP_CELSIUS, "mdi:thermometer"],
"hum": [ATTR_HUMIDITY, PERCENTAGE, "mdi:water-percent"],
"co2": [ATTR_CARBON_DIOXIDE, CONCENTRATION_PARTS_PER_MILLION, "mdi:molecule-co2"],
"voc": [
ATTR_VOLATILE_ORGANIC_COMPOUNDS,
CONCENTRATION_PARTS_PER_BILLION,
"mdi:cloud",
],
"allpollu": [ATTR_FOOBOT_INDEX, PERCENTAGE, "mdi:percent"],
}
SCAN_INTERVAL = timedelta(minutes=10)
PARALLEL_UPDATES = 1
TIMEOUT = 10
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_TOKEN): cv.string, vol.Required(CONF_USERNAME): cv.string}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the devices associated with the account."""
token = config.get(CONF_TOKEN)
username = config.get(CONF_USERNAME)
client = FoobotClient(
token, username, async_get_clientsession(hass), timeout=TIMEOUT
)
dev = []
try:
devices = await client.get_devices()
_LOGGER.debug("The following devices were found: %s", devices)
for device in devices:
foobot_data = FoobotData(client, device["uuid"])
for sensor_type in SENSOR_TYPES:
if sensor_type == "time":
continue
foobot_sensor = FoobotSensor(foobot_data, device, sensor_type)
dev.append(foobot_sensor)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
FoobotClient.TooManyRequests,
FoobotClient.InternalError,
) as err:
_LOGGER.exception("Failed to connect to foobot servers")
raise PlatformNotReady from err
except FoobotClient.ClientError:
_LOGGER.error("Failed to fetch data from foobot servers")
return
async_add_entities(dev, True)
class FoobotSensor(Entity):
"""Implementation of a Foobot sensor."""
def __init__(self, data, device, sensor_type):
"""Initialize the sensor."""
self._uuid = device["uuid"]
self.foobot_data = data
self._name = f"Foobot {device['name']} {SENSOR_TYPES[sensor_type][0]}"
self.type = sensor_type
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend."""
return SENSOR_TYPES[self.type][2]
@property
def state(self):
"""Return the state of the device."""
try:
data = self.foobot_data.data[self.type]
except (KeyError, TypeError):
data = None
return data
@property
def unique_id(self):
"""Return the unique id of this entity."""
return f"{self._uuid}_{self.type}"
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest data."""
await self.foobot_data.async_update()
class FoobotData(Entity):
"""Get data from Foobot API."""
def __init__(self, client, uuid):
"""Initialize the data object."""
self._client = client
self._uuid = uuid
self.data = {}
@Throttle(SCAN_INTERVAL)
async def async_update(self):
"""Get the data from Foobot API."""
interval = SCAN_INTERVAL.total_seconds()
try:
response = await self._client.get_last_data(
self._uuid, interval, interval + 1
)
except (
aiohttp.client_exceptions.ClientConnectorError,
asyncio.TimeoutError,
self._client.TooManyRequests,
self._client.InternalError,
):
_LOGGER.debug("Couldn't fetch data")
return False
_LOGGER.debug("The data response is: %s", response)
self.data = {k: round(v, 1) for k, v in response[0].items()}
return True
|
import pytest
from homeassistant.components import geo_location
from homeassistant.components.geo_location import GeolocationEvent
from homeassistant.setup import async_setup_component
async def test_setup_component(hass):
"""Simple test setup of component."""
result = await async_setup_component(hass, geo_location.DOMAIN, {})
assert result
async def test_event(hass):
"""Simple test of the geolocation event class."""
entity = GeolocationEvent()
assert entity.state is None
assert entity.distance is None
assert entity.latitude is None
assert entity.longitude is None
with pytest.raises(NotImplementedError):
assert entity.source is None
|
import asyncio
import pytest
from homeassistant.core import callback
from homeassistant.helpers import area_registry
import tests.async_mock
from tests.common import flush_store, mock_area_registry
@pytest.fixture
def registry(hass):
"""Return an empty, loaded, registry."""
return mock_area_registry(hass)
@pytest.fixture
def update_events(hass):
"""Capture update events."""
events = []
@callback
def async_capture(event):
events.append(event.data)
hass.bus.async_listen(area_registry.EVENT_AREA_REGISTRY_UPDATED, async_capture)
return events
async def test_list_areas(registry):
"""Make sure that we can read areas."""
registry.async_create("mock")
areas = registry.async_list_areas()
assert len(areas) == len(registry.areas)
async def test_create_area(hass, registry, update_events):
"""Make sure that we can create an area."""
area = registry.async_create("mock")
assert area.name == "mock"
assert len(registry.areas) == 1
await hass.async_block_till_done()
assert len(update_events) == 1
assert update_events[0]["action"] == "create"
assert update_events[0]["area_id"] == area.id
async def test_create_area_with_name_already_in_use(hass, registry, update_events):
"""Make sure that we can't create an area with a name already in use."""
area1 = registry.async_create("mock")
with pytest.raises(ValueError) as e_info:
area2 = registry.async_create("mock")
assert area1 != area2
assert e_info == "Name is already in use"
await hass.async_block_till_done()
assert len(registry.areas) == 1
assert len(update_events) == 1
async def test_delete_area(hass, registry, update_events):
"""Make sure that we can delete an area."""
area = registry.async_create("mock")
await registry.async_delete(area.id)
assert not registry.areas
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["area_id"] == area.id
assert update_events[1]["action"] == "remove"
assert update_events[1]["area_id"] == area.id
async def test_delete_non_existing_area(registry):
"""Make sure that we can't delete an area that doesn't exist."""
registry.async_create("mock")
with pytest.raises(KeyError):
await registry.async_delete("")
assert len(registry.areas) == 1
async def test_update_area(hass, registry, update_events):
"""Make sure that we can read areas."""
area = registry.async_create("mock")
updated_area = registry.async_update(area.id, name="mock1")
assert updated_area != area
assert updated_area.name == "mock1"
assert len(registry.areas) == 1
await hass.async_block_till_done()
assert len(update_events) == 2
assert update_events[0]["action"] == "create"
assert update_events[0]["area_id"] == area.id
assert update_events[1]["action"] == "update"
assert update_events[1]["area_id"] == area.id
async def test_update_area_with_same_name(registry):
"""Make sure that we can reapply the same name to the area."""
area = registry.async_create("mock")
updated_area = registry.async_update(area.id, name="mock")
assert updated_area == area
assert len(registry.areas) == 1
async def test_update_area_with_name_already_in_use(registry):
"""Make sure that we can't update an area with a name already in use."""
area1 = registry.async_create("mock1")
area2 = registry.async_create("mock2")
with pytest.raises(ValueError) as e_info:
registry.async_update(area1.id, name="mock2")
assert e_info == "Name is already in use"
assert area1.name == "mock1"
assert area2.name == "mock2"
assert len(registry.areas) == 2
async def test_load_area(hass, registry):
"""Make sure that we can load/save data correctly."""
registry.async_create("mock1")
registry.async_create("mock2")
assert len(registry.areas) == 2
registry2 = area_registry.AreaRegistry(hass)
await flush_store(registry._store)
await registry2.async_load()
assert list(registry.areas) == list(registry2.areas)
async def test_loading_area_from_storage(hass, hass_storage):
"""Test loading stored areas on start."""
hass_storage[area_registry.STORAGE_KEY] = {
"version": area_registry.STORAGE_VERSION,
"data": {"areas": [{"id": "12345A", "name": "mock"}]},
}
registry = await area_registry.async_get_registry(hass)
assert len(registry.areas) == 1
async def test_loading_race_condition(hass):
"""Test only one storage load called when concurrent loading occurred ."""
with tests.async_mock.patch(
"homeassistant.helpers.area_registry.AreaRegistry.async_load"
) as mock_load:
results = await asyncio.gather(
area_registry.async_get_registry(hass),
area_registry.async_get_registry(hass),
)
mock_load.assert_called_once_with()
assert results[0] == results[1]
|
from unittest.mock import Mock
import pandas as pd
import pytest
import pytz
from qstrader.execution.order import Order
from qstrader.portcon.pcm import PortfolioConstructionModel
SENTINEL_DT = pd.Timestamp('2019-01-01 15:00:00', tz=pytz.utc)
@pytest.mark.parametrize(
'description,port_dict,uni_assets,expected',
[
(
'empty on both sides',
{}, [], []
),
(
'partially intersecting set of assets',
{
'EQ:ABC': 100,
'EQ:DEF': 250,
'EQ:GHI': 38
},
['EQ:123', 'EQ:GHI', 'EQ:ABC', 'EQ:567'],
['EQ:123', 'EQ:567', 'EQ:ABC', 'EQ:DEF', 'EQ:GHI']
),
(
'non-intersecting set of assets',
{'EQ:ABC': 450, 'EQ:DEF': 210},
['EQ:567', 'EQ:123'],
['EQ:123', 'EQ:567', 'EQ:ABC', 'EQ:DEF']
)
]
)
def test_obtain_full_asset_list(description, port_dict, uni_assets, expected):
"""
Tests the _obtain_full_asset_list method of the
PortfolioConstructionModel base class.
"""
port_id = '1234'
broker = Mock()
broker.get_portfolio_as_dict.return_value = port_dict
universe = Mock()
universe.get_assets.return_value = uni_assets
order_sizer = Mock()
optimiser = Mock()
pcm = PortfolioConstructionModel(
broker, port_id, universe, order_sizer, optimiser
)
result = pcm._obtain_full_asset_list(SENTINEL_DT)
assert result == expected
@pytest.mark.parametrize(
'description,full_assets,expected',
[
(
'empty assets',
[],
{}
),
(
'non-empty assets',
['EQ:ABC', 'EQ:123', 'EQ:A1B2'],
{'EQ:ABC': 0.0, 'EQ:123': 0.0, 'EQ:A1B2': 0.0}
)
]
)
def test_create_zero_target_weight_vector(description, full_assets, expected):
"""
Tests the _create_zero_target_weight_vector method of the
PortfolioConstructionModel base class.
"""
port_id = '1234'
broker = Mock()
universe = Mock()
order_sizer = Mock()
optimiser = Mock()
pcm = PortfolioConstructionModel(
broker, port_id, universe, order_sizer, optimiser
)
result = pcm._create_zero_target_weight_vector(full_assets)
assert result == expected
@pytest.mark.parametrize(
'description,zero_weights,optimised_weights,expected',
[
(
'empty weights on both sides',
{},
{},
{}
),
(
'non-intersecting weights',
{'EQ:ABC': 0.0, 'EQ:DEF': 0.0},
{'EQ:123': 0.5, 'EQ:567': 0.5},
{'EQ:ABC': 0.0, 'EQ:DEF': 0.0, 'EQ:123': 0.5, 'EQ:567': 0.5},
),
(
'partially-intersecting weights',
{'EQ:ABC': 0.0, 'EQ:DEF': 0.0, 'EQ:123': 0.0},
{'EQ:123': 0.25, 'EQ:567': 0.25, 'EQ:890': 0.5},
{'EQ:ABC': 0.0, 'EQ:DEF': 0.0, 'EQ:123': 0.25, 'EQ:567': 0.25, 'EQ:890': 0.5},
),
(
'fully-intersecting weights',
{'EQ:ABC': 0.0, 'EQ:DEF': 0.0, 'EQ:123': 0.0},
{'EQ:ABC': 0.25, 'EQ:DEF': 0.25, 'EQ:123': 0.5},
{'EQ:ABC': 0.25, 'EQ:DEF': 0.25, 'EQ:123': 0.5},
)
]
)
def test_create_full_asset_weight_vector(
description, zero_weights, optimised_weights, expected
):
"""
Tests the _create_full_asset_weight_vector method of the
PortfolioConstructionModel base class.
"""
port_id = '1234'
broker = Mock()
universe = Mock()
order_sizer = Mock()
optimiser = Mock()
pcm = PortfolioConstructionModel(
broker, port_id, universe, order_sizer, optimiser
)
result = pcm._create_full_asset_weight_vector(zero_weights, optimised_weights)
assert result == expected
@pytest.mark.parametrize(
'description,target_portfolio,current_portfolio,expected',
[
(
'empty portfolios on both sides',
{},
{},
[]
),
(
'non-empty equal portfolios on both sides - no orders',
{'EQ:ABC': {'quantity': 100}, 'EQ:DEF': {'quantity': 250}},
{'EQ:ABC': {'quantity': 100}, 'EQ:DEF': {'quantity': 250}},
[]
),
(
'non-empty target portfolio with empty current portfolio',
{'EQ:ABC': {'quantity': 100}, 'EQ:DEF': {'quantity': 250}},
{},
[
Order(SENTINEL_DT, 'EQ:ABC', 100),
Order(SENTINEL_DT, 'EQ:DEF', 250)
]
),
(
'empty target portfolio with non-empty current portfolio',
{},
{'EQ:ABC': {'quantity': 345}, 'EQ:DEF': {'quantity': 223}},
[
Order(SENTINEL_DT, 'EQ:ABC', -345),
Order(SENTINEL_DT, 'EQ:DEF', -250)
]
),
(
'non-empty portfolios, non-intersecting symbols',
{'EQ:ABC': {'quantity': 123}, 'EQ:DEF': {'quantity': 456}},
{'EQ:GHI': {'quantity': 217}, 'EQ:JKL': {'quantity': 48}},
[
Order(SENTINEL_DT, 'EQ:ABC', 123),
Order(SENTINEL_DT, 'EQ:DEF', 456),
Order(SENTINEL_DT, 'EQ:GHI', -217),
Order(SENTINEL_DT, 'EQ:JKL', -48)
]
),
(
'non-empty portfolios, partially-intersecting symbols',
{'EQ:ABC': {'quantity': 123}, 'EQ:DEF': {'quantity': 456}},
{'EQ:DEF': {'quantity': 217}, 'EQ:GHI': {'quantity': 48}},
[
Order(SENTINEL_DT, 'EQ:ABC', 123),
Order(SENTINEL_DT, 'EQ:DEF', 239),
Order(SENTINEL_DT, 'EQ:GHI', -48)
]
),
(
'non-empty portfolios, fully-intersecting symbols',
{'EQ:ABC': {'quantity': 123}, 'EQ:DEF': {'quantity': 456}},
{'EQ:ABC': {'quantity': 217}, 'EQ:DEF': {'quantity': 48}},
[
Order(SENTINEL_DT, 'EQ:ABC', -94),
Order(SENTINEL_DT, 'EQ:DEF', 408)
]
)
]
)
def test_generate_rebalance_orders(
helpers, description, target_portfolio, current_portfolio, expected
):
"""
Tests the _generate_rebalance_orders method of the
PortfolioConstructionModel base class.
"""
port_id = '1234'
broker = Mock()
universe = Mock()
order_sizer = Mock()
optimiser = Mock()
pcm = PortfolioConstructionModel(
broker, port_id, universe, order_sizer, optimiser
)
result = pcm._generate_rebalance_orders(SENTINEL_DT, target_portfolio, current_portfolio)
helpers.assert_order_lists_equal(result, expected)
|
from trashcli.put import TrashDirectoryForPut
from trashcli.put import AbsolutePaths, TopDirRelativePaths
from mock import Mock
class TestHowOriginalLocationIsStored:
def test_for_absolute_paths(self):
fs = Mock()
self.dir = TrashDirectoryForPut('/volume/.Trash', '/volume', fs)
self.dir.path_maker = AbsolutePaths(None)
self.assert_path_for_trashinfo_is('/file' , '/file')
self.assert_path_for_trashinfo_is('/file' , '/dir/../file')
self.assert_path_for_trashinfo_is('/outside/file' , '/outside/file')
self.assert_path_for_trashinfo_is('/volume/file' , '/volume/file')
self.assert_path_for_trashinfo_is('/volume/dir/file' , '/volume/dir/file')
def test_for_relative_paths(self):
self.dir = TrashDirectoryForPut('/volume/.Trash', '/volume', Mock())
self.dir.path_maker = TopDirRelativePaths('/volume')
self.assert_path_for_trashinfo_is('/file' , '/file')
self.assert_path_for_trashinfo_is('/file' , '/dir/../file')
self.assert_path_for_trashinfo_is('/outside/file' , '/outside/file')
self.assert_path_for_trashinfo_is('file' , '/volume/file')
self.assert_path_for_trashinfo_is('dir/file' , '/volume/dir/file')
def assert_path_for_trashinfo_is(self, expected_value, file_to_be_trashed):
result = self.dir.path_for_trash_info_for_file(file_to_be_trashed)
assert expected_value == result
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.lock import DOMAIN
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a lock."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "locked",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "unlocked",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set("lock.entity", STATE_UNLOCKED)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "lock.entity",
"type": "locked",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"locked - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "lock.entity",
"type": "unlocked",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"unlocked - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
]
},
)
# Fake that the entity is turning on.
hass.states.async_set("lock.entity", STATE_LOCKED)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data[
"some"
] == "locked - device - {} - unlocked - locked - None".format("lock.entity")
# Fake that the entity is turning off.
hass.states.async_set("lock.entity", STATE_UNLOCKED)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data[
"some"
] == "unlocked - device - {} - locked - unlocked - None".format("lock.entity")
|
from homeassistant.components.broadlink.const import DOMAIN
from tests.async_mock import MagicMock, patch
from tests.common import MockConfigEntry
# Do not edit/remove. Adding is ok.
BROADLINK_DEVICES = {
"Entrance": (
"192.168.0.11",
"34ea34befc25",
"RM mini 3",
"Broadlink",
"RM2",
0x2737,
57,
8,
),
"Living Room": (
"192.168.0.12",
"34ea34b43b5a",
"RM mini 3",
"Broadlink",
"RM4",
0x5F36,
44017,
10,
),
"Office": (
"192.168.0.13",
"34ea34b43d22",
"RM pro",
"Broadlink",
"RM2",
0x2787,
20025,
7,
),
"Garage": (
"192.168.0.14",
"34ea34c43f31",
"RM4 pro",
"Broadlink",
"RM4",
0x6026,
52,
4,
),
"Bedroom": (
"192.168.0.15",
"34ea34b45d2c",
"e-Sensor",
"Broadlink",
"A1",
0x2714,
20025,
5,
),
"Kitchen": ( # Not supported.
"192.168.0.64",
"34ea34b61d2c",
"LB1",
"Broadlink",
"SmartBulb",
0x504E,
57,
5,
),
}
class BroadlinkDevice:
"""Representation of a Broadlink device."""
def __init__(
self, name, host, mac, model, manufacturer, type_, devtype, fwversion, timeout
):
"""Initialize the device."""
self.name: str = name
self.host: str = host
self.mac: str = mac
self.model: str = model
self.manufacturer: str = manufacturer
self.type: str = type_
self.devtype: int = devtype
self.timeout: int = timeout
self.fwversion: int = fwversion
async def setup_entry(self, hass, mock_api=None, mock_entry=None):
"""Set up the device."""
mock_api = mock_api or self.get_mock_api()
mock_entry = mock_entry or self.get_mock_entry()
mock_entry.add_to_hass(hass)
with patch(
"homeassistant.components.broadlink.device.blk.gendevice",
return_value=mock_api,
), patch(
"homeassistant.components.broadlink.updater.blk.discover",
return_value=[mock_api],
):
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
return mock_api, mock_entry
def get_mock_api(self):
"""Return a mock device (API)."""
mock_api = MagicMock()
mock_api.name = self.name
mock_api.host = (self.host, 80)
mock_api.mac = bytes.fromhex(self.mac)
mock_api.model = self.model
mock_api.manufacturer = self.manufacturer
mock_api.type = self.type
mock_api.devtype = self.devtype
mock_api.timeout = self.timeout
mock_api.is_locked = False
mock_api.auth.return_value = True
mock_api.get_fwversion.return_value = self.fwversion
return mock_api
def get_mock_entry(self):
"""Return a mock config entry."""
return MockConfigEntry(
domain=DOMAIN,
unique_id=self.mac,
title=self.name,
data=self.get_entry_data(),
)
def get_entry_data(self):
"""Return entry data."""
return {
"host": self.host,
"mac": self.mac,
"type": self.devtype,
"timeout": self.timeout,
}
def get_device(name):
"""Get a device by name."""
return BroadlinkDevice(name, *BROADLINK_DEVICES[name])
|
import cherrypy
from cherrypy.lib import auth_digest
from cherrypy._cpcompat import ntob
from cherrypy.test import helper
def _fetch_users():
return {'test': 'test', '☃йюзер': 'їпароль'}
get_ha1 = cherrypy.lib.auth_digest.get_ha1_dict_plain(_fetch_users())
class DigestAuthTest(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self):
return 'This is public.'
class DigestProtected:
@cherrypy.expose
def index(self, *args, **kwargs):
return "Hello %s, you've been authorized." % (
cherrypy.request.login)
conf = {'/digest': {'tools.auth_digest.on': True,
'tools.auth_digest.realm': 'localhost',
'tools.auth_digest.get_ha1': get_ha1,
'tools.auth_digest.key': 'a565c27146791cfb',
'tools.auth_digest.debug': True,
'tools.auth_digest.accept_charset': 'UTF-8'}}
root = Root()
root.digest = DigestProtected()
cherrypy.tree.mount(root, config=conf)
def testPublic(self):
self.getPage('/')
assert self.status == '200 OK'
self.assertHeader('Content-Type', 'text/html;charset=utf-8')
assert self.body == b'This is public.'
def _test_parametric_digest(self, username, realm):
test_uri = '/digest/?@/=%2F%40&%f0%9f%99%88=path'
self.getPage(test_uri)
assert self.status_code == 401
msg = 'Digest authentification scheme was not found'
www_auth_digest = tuple(filter(
lambda kv: kv[0].lower() == 'www-authenticate'
and kv[1].startswith('Digest '),
self.headers,
))
assert len(www_auth_digest) == 1, msg
items = www_auth_digest[0][-1][7:].split(', ')
tokens = {}
for item in items:
key, value = item.split('=')
tokens[key.lower()] = value
assert tokens['realm'] == '"localhost"'
assert tokens['algorithm'] == '"MD5"'
assert tokens['qop'] == '"auth"'
assert tokens['charset'] == '"UTF-8"'
nonce = tokens['nonce'].strip('"')
# Test user agent response with a wrong value for 'realm'
base_auth = ('Digest username="%s", '
'realm="%s", '
'nonce="%s", '
'uri="%s", '
'algorithm=MD5, '
'response="%s", '
'qop=auth, '
'nc=%s, '
'cnonce="1522e61005789929"')
encoded_user = username
encoded_user = encoded_user.encode('utf-8')
encoded_user = encoded_user.decode('latin1')
auth_header = base_auth % (
encoded_user, realm, nonce, test_uri,
'11111111111111111111111111111111', '00000001',
)
auth = auth_digest.HttpDigestAuthorization(auth_header, 'GET')
# calculate the response digest
ha1 = get_ha1(auth.realm, auth.username)
response = auth.request_digest(ha1)
auth_header = base_auth % (
encoded_user, realm, nonce, test_uri,
response, '00000001',
)
self.getPage(test_uri, [('Authorization', auth_header)])
def test_wrong_realm(self):
# send response with correct response digest, but wrong realm
self._test_parametric_digest(username='test', realm='wrong realm')
assert self.status_code == 401
def test_ascii_user(self):
self._test_parametric_digest(username='test', realm='localhost')
assert self.status == '200 OK'
assert self.body == b"Hello test, you've been authorized."
def test_unicode_user(self):
self._test_parametric_digest(username='☃йюзер', realm='localhost')
assert self.status == '200 OK'
assert self.body == ntob(
"Hello ☃йюзер, you've been authorized.", 'utf-8',
)
def test_wrong_scheme(self):
basic_auth = {
'Authorization': 'Basic foo:bar',
}
self.getPage('/digest/', headers=list(basic_auth.items()))
assert self.status_code == 401
|
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import docker
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cloudsuite_data_analytics'
BENCHMARK_CONFIG = """
cloudsuite_data_analytics:
description: >
Run Cloudsuite data analytics benchmark. Specify the number of slave VMs
with --num_vms.
vm_groups:
master:
vm_spec: *default_single_core
vm_count: 1
slaves:
vm_spec: *default_single_core
"""
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['num_vms'].present:
config['vm_groups']['slaves']['vm_count'] = FLAGS.num_vms
return config
def Prepare(benchmark_spec):
"""Install docker. Pull images. Start the master and slaves.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
master = benchmark_spec.vm_groups['master'][0]
slaves = benchmark_spec.vm_groups['slaves']
def PrepareCommon(vm):
if not docker.IsInstalled(vm):
vm.Install('docker')
def PrepareMaster(vm):
PrepareCommon(vm)
vm.RemoteCommand('sudo docker pull cloudsuite/data-analytics')
vm.RemoteCommand('sudo docker run -d --name master --net host '
'cloudsuite/data-analytics master')
def PrepareSlave(vm):
PrepareCommon(vm)
vm.RemoteCommand('sudo docker pull cloudsuite/hadoop')
vm.RemoteCommand('sudo docker run -d --name slave --net host '
'cloudsuite/hadoop slave %s' % master.internal_ip)
target_arg_tuples = ([(PrepareSlave, [vm], {}) for vm in slaves] +
[(PrepareMaster, [master], {})])
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
def Run(benchmark_spec):
"""Run the data analytics benchmark.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
master = benchmark_spec.vm_groups['master'][0]
results = []
stdout, _ = master.RemoteCommand('sudo docker exec master benchmark 2>&1',
should_log=True)
matches = re.findall(r'^Benchmark time: (\d+)ms$', stdout, re.MULTILINE)
if len(matches) != 1:
raise errors.Benchmark.RunError('Expected to find benchmark runtime')
results.append(sample.Sample('Benchmark runtime', float(matches[0]) / 1000,
'seconds'))
return results
def Cleanup(benchmark_spec):
"""Stop and remove docker containers. Remove images.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
master = benchmark_spec.vm_groups['master'][0]
slaves = benchmark_spec.vm_groups['slaves']
def CleanupMaster(vm):
vm.RemoteCommand('sudo docker stop master')
vm.RemoteCommand('sudo docker rm master')
vm.RemoteCommand('sudo docker rmi cloudsuite/data-analytics')
def CleanupSlave(vm):
vm.RemoteCommand('sudo docker stop slave')
vm.RemoteCommand('sudo docker rm slave')
vm.RemoteCommand('sudo docker rmi cloudsuite/hadoop')
target_arg_tuples = ([(CleanupSlave, [vm], {}) for vm in slaves] +
[(CleanupMaster, [master], {})])
vm_util.RunParallelThreads(target_arg_tuples, len(target_arg_tuples))
|
import mock
from pytest import raises
from paasta_tools.delete_kubernetes_deployments import get_deployment_names_from_list
from paasta_tools.delete_kubernetes_deployments import main
from paasta_tools.utils import InvalidJobNameError
def test_main():
with mock.patch(
"paasta_tools.delete_kubernetes_deployments.get_deployment_names_from_list",
autospec=True,
) as mock_get_deployment_names_from_list, mock.patch(
"paasta_tools.delete_kubernetes_deployments.delete_deployment", autospec=True,
) as mock_delete_deployment, mock.patch(
"paasta_tools.delete_kubernetes_deployments.KubeClient", autospec=True
) as mock_kube_client, mock.patch(
"paasta_tools.delete_kubernetes_deployments.ensure_namespace", autospec=True,
) as mock_ensure_namespace:
# Test main() success
mock_get_deployment_names_from_list.return_value = ["fake_pcm_deployment"]
with raises(SystemExit) as e:
main()
assert e.value.code == 0
assert mock_ensure_namespace.called
mock_delete_deployment.assert_called_with(
kube_client=mock_kube_client.return_value,
deployment_name="fake_pcm_deployment",
)
# Test main() failed
mock_delete_deployment.side_effect = Exception("Delete Error")
with raises(SystemExit) as e:
main()
assert e.value.code == 1
def test_get_deployment_names_from_list():
with mock.patch(
"paasta_tools.delete_kubernetes_deployments.decompose_job_id", autospec=True
) as mock_decompose_job_id:
# Test get_deployment_names_from_list() success
mock_decompose_job_id.return_value = (
"fake-service",
"fake_instance",
"fake_hash",
"fake_hash",
)
output = get_deployment_names_from_list(["fake-service.fake_instance"])
assert output[0] == "fake-service-fake--instance"
# Test get_deployment_names_from_list() failed
mock_decompose_job_id.side_effect = InvalidJobNameError()
with raises(SystemExit) as e:
get_deployment_names_from_list(["fake-service.fake_instance"])
assert e.value.code == 1
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tensorflow as tf
from datasets import dataset_utils
slim = tf.contrib.slim
_FILE_PATTERN = 'mnist_%s.tfrecord'
_SPLITS_TO_SIZES = {'train': 60000, 'test': 10000}
_NUM_CLASSES = 10
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A [28 x 28 x 1] grayscale image.',
'label': 'A single integer between 0 and 9',
}
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading MNIST.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature((), tf.string, default_value=''),
'image/format': tf.FixedLenFeature((), tf.string, default_value='raw'),
'image/class/label': tf.FixedLenFeature(
[1], tf.int64, default_value=tf.zeros([1], dtype=tf.int64)),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image(shape=[28, 28, 1], channels=1),
'label': slim.tfexample_decoder.Tensor('image/class/label', shape=[]),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
num_classes=_NUM_CLASSES,
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
labels_to_names=labels_to_names)
|
import argparse
from django.core.management.base import CommandError
from django.http.request import HttpRequest
from weblate.trans.management.commands import WeblateTranslationCommand
class Command(WeblateTranslationCommand):
"""Command for mass importing suggestions."""
help = "imports suggestions"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--author",
default="[email protected]",
help=("Email address of author (has to be registered in Weblate)"),
)
parser.add_argument("file", type=argparse.FileType("rb"), help="File to import")
def handle(self, *args, **options):
# Get translation object
translation = self.get_translation(**options)
# Create fake request object
request = HttpRequest()
request.user = None
# Process import
try:
translation.merge_upload(
request,
options["file"],
False,
method="suggest",
author_email=options["author"],
)
except OSError as err:
raise CommandError(f"Failed to import translation file: {err}")
finally:
options["file"].close()
|
import functools
import itertools
import time
def merge(obj, *keys):
return itertools.chain(*[obj[k] for k in keys])
class CachedProperty:
def __init__(self, ttl=300):
self.ttl = ttl
def __call__(self, fget, doc=None):
self.fget = fget
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
return self
def __get__(self, inst, owner):
try:
value, last_update = inst._cache[self.__name__]
if self.ttl > 0 and time.time() - last_update > self.ttl:
raise AttributeError
except (KeyError, AttributeError):
value = self.fget(inst)
try:
cache = inst._cache
except AttributeError:
cache = inst._cache = {}
cache[self.__name__] = (value, time.time())
return value
def memoize(obj):
cache = obj.cache = {}
@functools.wraps(obj)
def memoizer(*args, **kwargs):
key = str(args) + str(kwargs)
if key not in cache:
cache[key] = obj(*args, **kwargs)
return cache[key]
return memoizer
def humanize_bytes(b):
abbrevs = ((1 << 30, "GB"), (1 << 20, "MB"), (1 << 10, "kB"), (1, "B"))
for factor, suffix in abbrevs:
if b >= factor:
break
return "%.*f %s" % (2, b / float(factor), suffix)
|
from django.core.management.base import CommandError
from django.db.models import Q
from weblate.auth.models import User
from weblate.utils.backup import make_password
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
help = "setups admin user with random password"
def add_arguments(self, parser):
parser.add_argument(
"--password",
default=None,
help="Password to set, random is generated if not specified",
)
parser.add_argument(
"--no-password",
action="store_true",
default=False,
help="Do not set password at all (useful with --update)",
)
parser.add_argument(
"--username", default="admin", help='Admin username, defaults to "admin"'
)
parser.add_argument(
"--email",
default="[email protected]",
help='Admin email, defaults to "[email protected]"',
)
parser.add_argument(
"--name",
default="Weblate Admin",
help='Admin name, defaults to "Weblate Admin"',
)
parser.add_argument(
"--update",
action="store_true",
default=False,
help="Change password for this account if exists",
)
def handle(self, *args, **options):
"""Create admin account with admin password.
This is useful mostly for setup inside appliances, when user wants to be able to
login remotely and change password then.
"""
try:
user = User.objects.filter(
Q(username=options["username"]) | Q(email=options["email"])
).get()
except User.DoesNotExist:
user = None
except User.MultipleObjectsReturned:
raise CommandError("Multiple users matched given parameters!")
if user and not options["update"]:
raise CommandError("User exists, specify --update to update existing")
if options["no_password"]:
password = None
elif options["password"]:
password = options["password"]
else:
password = make_password(13)
self.stdout.write(f"Using generated password: {password}")
if user and options["update"]:
self.stdout.write(f"Updating user {user.username}")
user.email = options["email"]
if password is not None and not user.check_password(password):
user.set_password(password)
else:
self.stdout.write("Creating user {}".format(options["username"]))
user = User.objects.create_user(
options["username"], options["email"], password
)
user.full_name = options["name"]
user.is_superuser = True
user.is_active = True
user.save()
|
import os
from django.apps import AppConfig
from django.core.checks import Warning, register
from filelock import FileLock
from weblate.utils.checks import weblate_check
from weblate.utils.data import data_dir
from weblate.vcs.base import RepositoryException
from weblate.vcs.git import GitRepository
from weblate.vcs.gpg import check_gpg
GIT_ERRORS = []
def check_vcs(app_configs, **kwargs):
from weblate.vcs.models import VCS_REGISTRY
message = "Failure in loading VCS module for {}: {}"
return [
weblate_check(
f"weblate.W033.{key}", message.format(key, value.strip()), Warning
)
for key, value in VCS_REGISTRY.errors.items()
]
def check_git(app_configs, **kwargs):
template = "Failure in configuring Git: {}"
return [
weblate_check("weblate.C035", template.format(message))
for message in GIT_ERRORS
]
class VCSConfig(AppConfig):
name = "weblate.vcs"
label = "vcs"
verbose_name = "VCS"
def ready(self):
super().ready()
register(check_vcs)
register(check_git, deploy=True)
register(check_gpg, deploy=True)
home = data_dir("home")
if not os.path.exists(home):
os.makedirs(home)
# Configure merge driver for Gettext PO
# We need to do this behind lock to avoid errors when servers
# start in parallel
lockfile = FileLock(os.path.join(home, "gitlock"))
with lockfile:
try:
GitRepository.global_setup()
except RepositoryException as error:
GIT_ERRORS.append(str(error))
# Use it for *.po by default
configdir = os.path.join(home, ".config", "git")
configfile = os.path.join(configdir, "attributes")
if not os.path.exists(configfile):
if not os.path.exists(configdir):
os.makedirs(configdir)
with open(configfile, "w") as handle:
handle.write("*.po merge=weblate-merge-gettext-po\n")
|
import pytest
from homeassistant.components.arcam_fmj.const import DOMAIN
import homeassistant.components.automation as automation
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a arcam_fmj."""
config_entry = MockConfigEntry(domain=DOMAIN, data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={(DOMAIN, "host", 1234)},
)
entity_reg.async_get_or_create(
"media_player", DOMAIN, "5678", device_id=device_entry.id
)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "media_player.arcam_fmj_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_turn_on_request(hass, calls, player_setup, state):
"""Test for turn_on and turn_off triggers firing."""
state.get_power.return_value = None
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": player_setup,
"type": "turn_on",
},
"action": {
"service": "test.automation",
"data_template": {"some": "{{ trigger.entity_id }}"},
},
}
]
},
)
await hass.services.async_call(
"media_player",
"turn_on",
{"entity_id": player_setup},
blocking=True,
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == player_setup
|
import os
from integration_tests.files import (make_parent_for, make_file,
make_unreadable_file)
class FakeTrashDir:
def __init__(self, path):
self.info_path = os.path.join(path, 'info')
self.files_path = os.path.join(path, 'files')
self.number = 1
def add_unreadable_trashinfo(self, basename):
path = self.a_trashinfo(basename)
make_unreadable_file(path)
def add_trashed_file(self, basename, path, content):
trashinfo_path = self.a_trashinfo(basename)
file_path = self.file_path(basename)
make_file(trashinfo_path, a_trashinfo(path))
make_file(file_path, content)
def a_trashinfo(self, basename):
return '%s/%s.trashinfo' % (self.info_path, basename)
def file_path(self, basename):
return '%s/%s' % (self.files_path, basename)
def add_trashinfo(self, contents, base_name = None):
if not base_name:
base_name = str(self.number)
self.number += 1
path = '%(info_dir)s/%(name)s.trashinfo' % {'info_dir': self.info_path,
'name': base_name}
make_parent_for(path)
make_file(path, contents)
self.path_of_last_file_added = path
def add_trashinfo2(self, escaped_path_entry, formatted_deletion_date):
self.add_trashinfo(a_trashinfo(escaped_path_entry, formatted_deletion_date))
def a_trashinfo(escaped_path_entry,
formatted_deletion_date = '2000-01-01T00:00:01'):
return ("[Trash Info]\n" +
"Path=%s\n" % escaped_path_entry +
"DeletionDate=%s\n" % formatted_deletion_date)
def a_trashinfo_without_date():
return ("[Trash Info]\n"
"Path=/path\n")
def a_trashinfo_with_invalid_date():
return ("[Trash Info]\n"
"Path=/path\n"
"DeletionDate=Wrong Date")
def a_trashinfo_without_path():
return ("[Trash Info]\n"
"DeletionDate='2000-01-01T00:00:00'\n")
def a_trashinfo_with_date(date):
return ("[Trash Info]\n"
"DeletionDate=%s\n" % date)
def a_trashinfo_with_path(path):
return ("[Trash Info]\n"
"Path=%s\n" % path)
|
from homeassistant.const import ATTR_UNIT_OF_MEASUREMENT, PERCENTAGE, STATE_UNAVAILABLE
from tests.components.august.mocks import (
_create_august_with_devices,
_mock_activities_from_fixture,
_mock_doorbell_from_fixture,
_mock_doorsense_enabled_august_lock_detail,
_mock_lock_from_fixture,
)
async def test_create_doorbell(hass):
"""Test creation of a doorbell."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.json")
await _create_august_with_devices(hass, [doorbell_one])
sensor_k98gidt45gul_name_battery = hass.states.get(
"sensor.k98gidt45gul_name_battery"
)
assert sensor_k98gidt45gul_name_battery.state == "96"
assert (
sensor_k98gidt45gul_name_battery.attributes["unit_of_measurement"] == PERCENTAGE
)
async def test_create_doorbell_offline(hass):
"""Test creation of a doorbell that is offline."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.offline.json")
await _create_august_with_devices(hass, [doorbell_one])
entity_registry = await hass.helpers.entity_registry.async_get_registry()
sensor_tmt100_name_battery = hass.states.get("sensor.tmt100_name_battery")
assert sensor_tmt100_name_battery.state == "81"
assert sensor_tmt100_name_battery.attributes["unit_of_measurement"] == PERCENTAGE
entry = entity_registry.async_get("sensor.tmt100_name_battery")
assert entry
assert entry.unique_id == "tmt100_device_battery"
async def test_create_doorbell_hardwired(hass):
"""Test creation of a doorbell that is hardwired without a battery."""
doorbell_one = await _mock_doorbell_from_fixture(
hass, "get_doorbell.nobattery.json"
)
await _create_august_with_devices(hass, [doorbell_one])
sensor_tmt100_name_battery = hass.states.get("sensor.tmt100_name_battery")
assert sensor_tmt100_name_battery is None
async def test_create_lock_with_linked_keypad(hass):
"""Test creation of a lock with a linked keypad that both have a battery."""
lock_one = await _mock_lock_from_fixture(hass, "get_lock.doorsense_init.json")
await _create_august_with_devices(hass, [lock_one])
entity_registry = await hass.helpers.entity_registry.async_get_registry()
sensor_a6697750d607098bae8d6baa11ef8063_name_battery = hass.states.get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_battery"
)
assert sensor_a6697750d607098bae8d6baa11ef8063_name_battery.state == "88"
assert (
sensor_a6697750d607098bae8d6baa11ef8063_name_battery.attributes[
"unit_of_measurement"
]
== PERCENTAGE
)
entry = entity_registry.async_get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_battery"
)
assert entry
assert entry.unique_id == "A6697750D607098BAE8D6BAA11EF8063_device_battery"
state = hass.states.get("sensor.front_door_lock_keypad_battery")
assert state.state == "60"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == PERCENTAGE
entry = entity_registry.async_get("sensor.front_door_lock_keypad_battery")
assert entry
assert entry.unique_id == "5bc65c24e6ef2a263e1450a8_linked_keypad_battery"
async def test_create_lock_with_low_battery_linked_keypad(hass):
"""Test creation of a lock with a linked keypad that both have a battery."""
lock_one = await _mock_lock_from_fixture(hass, "get_lock.low_keypad_battery.json")
await _create_august_with_devices(hass, [lock_one])
entity_registry = await hass.helpers.entity_registry.async_get_registry()
sensor_a6697750d607098bae8d6baa11ef8063_name_battery = hass.states.get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_battery"
)
assert sensor_a6697750d607098bae8d6baa11ef8063_name_battery.state == "88"
assert (
sensor_a6697750d607098bae8d6baa11ef8063_name_battery.attributes[
"unit_of_measurement"
]
== PERCENTAGE
)
entry = entity_registry.async_get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_battery"
)
assert entry
assert entry.unique_id == "A6697750D607098BAE8D6BAA11EF8063_device_battery"
state = hass.states.get("sensor.front_door_lock_keypad_battery")
assert state.state == "10"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == PERCENTAGE
entry = entity_registry.async_get("sensor.front_door_lock_keypad_battery")
assert entry
assert entry.unique_id == "5bc65c24e6ef2a263e1450a8_linked_keypad_battery"
# No activity means it will be unavailable until someone unlocks/locks it
lock_operator_sensor = entity_registry.async_get(
"sensor.a6697750d607098bae8d6baa11ef8063_name_operator"
)
assert (
lock_operator_sensor.unique_id
== "A6697750D607098BAE8D6BAA11EF8063_lock_operator"
)
assert (
hass.states.get("sensor.a6697750d607098bae8d6baa11ef8063_name_operator").state
== STATE_UNAVAILABLE
)
async def test_lock_operator_bluetooth(hass):
"""Test operation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(
hass, "get_activity.lock_from_bluetooth.json"
)
await _create_august_with_devices(hass, [lock_one], activities=activities)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== "Your favorite elven princess"
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"remote"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"keypad"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"autorelock"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"method"
]
== "mobile"
)
async def test_lock_operator_keypad(hass):
"""Test operation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(
hass, "get_activity.lock_from_keypad.json"
)
await _create_august_with_devices(hass, [lock_one], activities=activities)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== "Your favorite elven princess"
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"remote"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"keypad"
]
is True
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"autorelock"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"method"
]
== "keypad"
)
async def test_lock_operator_remote(hass):
"""Test operation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(hass, "get_activity.lock.json")
await _create_august_with_devices(hass, [lock_one], activities=activities)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== "Your favorite elven princess"
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"remote"
]
is True
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"keypad"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"autorelock"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"method"
]
== "remote"
)
async def test_lock_operator_autorelock(hass):
"""Test operation of a lock with doorsense and bridge."""
lock_one = await _mock_doorsense_enabled_august_lock_detail(hass)
activities = await _mock_activities_from_fixture(
hass, "get_activity.lock_from_autorelock.json"
)
await _create_august_with_devices(hass, [lock_one], activities=activities)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
lock_operator_sensor = entity_registry.async_get(
"sensor.online_with_doorsense_name_operator"
)
assert lock_operator_sensor
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").state
== "Auto Relock"
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"remote"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"keypad"
]
is False
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"autorelock"
]
is True
)
assert (
hass.states.get("sensor.online_with_doorsense_name_operator").attributes[
"method"
]
== "autorelock"
)
|
import numpy as np
from scipy import linalg
from .epochs import Epochs
from .utils import check_fname, logger, verbose, _check_option
from .io.open import fiff_open
from .io.pick import pick_types, pick_types_forward
from .io.proj import (Projection, _has_eeg_average_ref_proj, _read_proj,
make_projector, make_eeg_average_ref_proj, _write_proj)
from .io.write import start_file, end_file
from .event import make_fixed_length_events
from .parallel import parallel_func
from .cov import _check_n_samples
from .forward import (is_fixed_orient, _subject_from_forward,
convert_forward_solution)
from .source_estimate import _make_stc
@verbose
def read_proj(fname, verbose=None):
"""Read projections from a FIF file.
Parameters
----------
fname : str
The name of file containing the projections vectors. It should end with
-proj.fif or -proj.fif.gz.
%(verbose)s
Returns
-------
projs : list
The list of projection vectors.
See Also
--------
write_proj
"""
check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz',
'_proj.fif', '_proj.fif.gz'))
ff, tree, _ = fiff_open(fname)
with ff as fid:
projs = _read_proj(fid, tree)
return projs
def write_proj(fname, projs):
"""Write projections to a FIF file.
Parameters
----------
fname : str
The name of file containing the projections vectors. It should end with
-proj.fif or -proj.fif.gz.
projs : list
The list of projection vectors.
See Also
--------
read_proj
"""
check_fname(fname, 'projection', ('-proj.fif', '-proj.fif.gz',
'_proj.fif', '_proj.fif.gz'))
with start_file(fname) as fid:
_write_proj(fid, projs)
end_file(fid)
@verbose
def _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix,
meg='separate', verbose=None):
grad_ind = pick_types(info, meg='grad', ref_meg=False, exclude='bads')
mag_ind = pick_types(info, meg='mag', ref_meg=False, exclude='bads')
eeg_ind = pick_types(info, meg=False, eeg=True, ref_meg=False,
exclude='bads')
_check_option('meg', meg, ['separate', 'combined'])
if meg == 'combined':
if n_grad != n_mag:
raise ValueError('n_grad (%d) must be equal to n_mag (%d) when '
'using meg="combined"')
kinds = ['meg', '', 'eeg']
n_mag = 0
grad_ind = pick_types(info, meg=True, ref_meg=False, exclude='bads')
if (n_grad > 0) and len(grad_ind) == 0:
logger.info("No MEG channels found for joint estimation. "
"Forcing n_grad=n_mag=0")
n_grad = 0
else:
kinds = ['planar', 'axial', 'eeg']
if (n_grad > 0) and len(grad_ind) == 0:
logger.info("No gradiometers found. Forcing n_grad to 0")
n_grad = 0
if (n_mag > 0) and len(mag_ind) == 0:
logger.info("No magnetometers found. Forcing n_mag to 0")
n_mag = 0
if (n_eeg > 0) and len(eeg_ind) == 0:
logger.info("No EEG channels found. Forcing n_eeg to 0")
n_eeg = 0
ch_names = info['ch_names']
grad_names, mag_names, eeg_names = ([ch_names[k] for k in ind]
for ind in [grad_ind, mag_ind,
eeg_ind])
projs = []
for n, ind, names, desc in zip([n_grad, n_mag, n_eeg],
[grad_ind, mag_ind, eeg_ind],
[grad_names, mag_names, eeg_names],
kinds):
if n == 0:
continue
data_ind = data[ind][:, ind]
# data is the covariance matrix: U * S**2 * Ut
U, Sexp2, _ = linalg.svd(data_ind, full_matrices=False,
overwrite_a=True)
U = U[:, :n]
exp_var = Sexp2 / Sexp2.sum()
exp_var = exp_var[:n]
for k, (u, var) in enumerate(zip(U.T, exp_var)):
proj_data = dict(col_names=names, row_names=None,
data=u[np.newaxis, :], nrow=1, ncol=u.size)
this_desc = "%s-%s-PCA-%02d" % (desc, desc_prefix, k + 1)
logger.info("Adding projection: %s" % this_desc)
proj = Projection(active=False, data=proj_data,
desc=this_desc, kind=1, explained_var=var)
projs.append(proj)
return projs
@verbose
def compute_proj_epochs(epochs, n_grad=2, n_mag=2, n_eeg=2, n_jobs=1,
desc_prefix=None, meg='separate', verbose=None):
"""Compute SSP (signal-space projection) vectors on epoched data.
%(compute_ssp)s
Parameters
----------
epochs : instance of Epochs
The epochs containing the artifact.
n_grad : int
Number of vectors for gradiometers.
n_mag : int
Number of vectors for magnetometers.
n_eeg : int
Number of vectors for EEG channels.
%(n_jobs)s
Number of jobs to use to compute covariance.
desc_prefix : str | None
The description prefix to use. If None, one will be created based on
the event_id, tmin, and tmax.
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
projs: list
List of projection vectors.
See Also
--------
compute_proj_raw, compute_proj_evoked
"""
# compute data covariance
data = _compute_cov_epochs(epochs, n_jobs)
event_id = epochs.event_id
if event_id is None or len(list(event_id.keys())) == 0:
event_id = '0'
elif len(event_id.keys()) == 1:
event_id = str(list(event_id.values())[0])
else:
event_id = 'Multiple-events'
if desc_prefix is None:
desc_prefix = "%s-%-.3f-%-.3f" % (event_id, epochs.tmin, epochs.tmax)
return _compute_proj(data, epochs.info, n_grad, n_mag, n_eeg, desc_prefix,
meg=meg)
def _compute_cov_epochs(epochs, n_jobs):
"""Compute epochs covariance."""
parallel, p_fun, _ = parallel_func(np.dot, n_jobs)
data = parallel(p_fun(e, e.T) for e in epochs)
n_epochs = len(data)
if n_epochs == 0:
raise RuntimeError('No good epochs found')
n_chan, n_samples = epochs.info['nchan'], len(epochs.times)
_check_n_samples(n_samples * n_epochs, n_chan)
data = sum(data)
return data
@verbose
def compute_proj_evoked(evoked, n_grad=2, n_mag=2, n_eeg=2, desc_prefix=None,
meg='separate', verbose=None):
"""Compute SSP (signal-space projection) vectors on evoked data.
%(compute_ssp)s
Parameters
----------
evoked : instance of Evoked
The Evoked obtained by averaging the artifact.
n_grad : int
Number of vectors for gradiometers.
n_mag : int
Number of vectors for magnetometers.
n_eeg : int
Number of vectors for EEG channels.
desc_prefix : str | None
The description prefix to use. If None, one will be created based on
tmin and tmax.
.. versionadded:: 0.17
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
projs : list
List of projection vectors.
See Also
--------
compute_proj_raw, compute_proj_epochs
"""
data = np.dot(evoked.data, evoked.data.T) # compute data covariance
if desc_prefix is None:
desc_prefix = "%-.3f-%-.3f" % (evoked.times[0], evoked.times[-1])
return _compute_proj(data, evoked.info, n_grad, n_mag, n_eeg, desc_prefix,
meg=meg)
@verbose
def compute_proj_raw(raw, start=0, stop=None, duration=1, n_grad=2, n_mag=2,
n_eeg=0, reject=None, flat=None, n_jobs=1, meg='separate',
verbose=None):
"""Compute SSP (signal-space projection) vectors on continuous data.
%(compute_ssp)s
Parameters
----------
raw : instance of Raw
A raw object to use the data from.
start : float
Time (in sec) to start computing SSP.
stop : float
Time (in sec) to stop computing SSP.
None will go to the end of the file.
duration : float
Duration (in sec) to chunk data into for SSP
If duration is None, data will not be chunked.
n_grad : int
Number of vectors for gradiometers.
n_mag : int
Number of vectors for magnetometers.
n_eeg : int
Number of vectors for EEG channels.
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
%(n_jobs)s
Number of jobs to use to compute covariance.
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
projs: list
List of projection vectors.
See Also
--------
compute_proj_epochs, compute_proj_evoked
"""
if duration is not None:
duration = np.round(duration * raw.info['sfreq']) / raw.info['sfreq']
events = make_fixed_length_events(raw, 999, start, stop, duration)
picks = pick_types(raw.info, meg=True, eeg=True, eog=True, ecg=True,
emg=True, exclude='bads')
epochs = Epochs(raw, events, None, tmin=0.,
tmax=duration - 1. / raw.info['sfreq'],
picks=picks, reject=reject, flat=flat,
baseline=None, proj=False)
data = _compute_cov_epochs(epochs, n_jobs)
info = epochs.info
if not stop:
stop = raw.n_times / raw.info['sfreq']
else:
# convert to sample indices
start = max(raw.time_as_index(start)[0], 0)
stop = raw.time_as_index(stop)[0] if stop else raw.n_times
stop = min(stop, raw.n_times)
data, times = raw[:, start:stop]
_check_n_samples(stop - start, data.shape[0])
data = np.dot(data, data.T) # compute data covariance
info = raw.info
# convert back to times
start = start / raw.info['sfreq']
stop = stop / raw.info['sfreq']
desc_prefix = "Raw-%-.3f-%-.3f" % (start, stop)
projs = _compute_proj(data, info, n_grad, n_mag, n_eeg, desc_prefix,
meg=meg)
return projs
@verbose
def sensitivity_map(fwd, projs=None, ch_type='grad', mode='fixed', exclude=[],
verbose=None):
"""Compute sensitivity map.
Such maps are used to know how much sources are visible by a type
of sensor, and how much projections shadow some sources.
Parameters
----------
fwd : Forward
The forward operator.
projs : list
List of projection vectors.
ch_type : 'grad' | 'mag' | 'eeg'
The type of sensors to use.
mode : str
The type of sensitivity map computed. See manual. Should be 'free',
'fixed', 'ratio', 'radiality', 'angle', 'remaining', or 'dampening'
corresponding to the argument --map 1, 2, 3, 4, 5, 6 and 7 of the
command mne_sensitivity_map.
exclude : list of str | str
List of channels to exclude. If empty do not exclude any (default).
If 'bads', exclude channels in fwd['info']['bads'].
%(verbose)s
Returns
-------
stc : SourceEstimate | VolSourceEstimate
The sensitivity map as a SourceEstimate or VolSourceEstimate instance
for visualization.
"""
# check strings
_check_option('ch_type', ch_type, ['eeg', 'grad', 'mag'])
_check_option('mode', mode, ['free', 'fixed', 'ratio', 'radiality',
'angle', 'remaining', 'dampening'])
# check forward
if is_fixed_orient(fwd, orig=True):
raise ValueError('fwd should must be computed with free orientation')
# limit forward (this will make a copy of the data for us)
if ch_type == 'eeg':
fwd = pick_types_forward(fwd, meg=False, eeg=True, exclude=exclude)
else:
fwd = pick_types_forward(fwd, meg=ch_type, eeg=False, exclude=exclude)
convert_forward_solution(fwd, surf_ori=True, force_fixed=False,
copy=False, verbose=False)
if not fwd['surf_ori'] or is_fixed_orient(fwd):
raise RuntimeError('Error converting solution, please notify '
'mne-python developers')
gain = fwd['sol']['data']
# Make sure EEG has average
if ch_type == 'eeg':
if projs is None or not _has_eeg_average_ref_proj(projs):
eeg_ave = [make_eeg_average_ref_proj(fwd['info'])]
else:
eeg_ave = []
projs = eeg_ave if projs is None else projs + eeg_ave
# Construct the projector
residual_types = ['angle', 'remaining', 'dampening']
if projs is not None:
proj, ncomp, U = make_projector(projs, fwd['sol']['row_names'],
include_active=True)
# do projection for most types
if mode not in residual_types:
gain = np.dot(proj, gain)
elif ncomp == 0:
raise RuntimeError('No valid projectors found for channel type '
'%s, cannot compute %s' % (ch_type, mode))
# can only run the last couple methods if there are projectors
elif mode in residual_types:
raise ValueError('No projectors used, cannot compute %s' % mode)
n_sensors, n_dipoles = gain.shape
n_locations = n_dipoles // 3
sensitivity_map = np.empty(n_locations)
for k in range(n_locations):
gg = gain[:, 3 * k:3 * (k + 1)]
if mode != 'fixed':
s = linalg.svd(gg, full_matrices=False, compute_uv=False)
if mode == 'free':
sensitivity_map[k] = s[0]
else:
gz = linalg.norm(gg[:, 2]) # the normal component
if mode == 'fixed':
sensitivity_map[k] = gz
elif mode == 'ratio':
sensitivity_map[k] = gz / s[0]
elif mode == 'radiality':
sensitivity_map[k] = 1. - (gz / s[0])
else:
if mode == 'angle':
co = linalg.norm(np.dot(gg[:, 2], U))
sensitivity_map[k] = co / gz
else:
p = linalg.norm(np.dot(proj, gg[:, 2]))
if mode == 'remaining':
sensitivity_map[k] = p / gz
elif mode == 'dampening':
sensitivity_map[k] = 1. - p / gz
else:
raise ValueError('Unknown mode type (got %s)' % mode)
# only normalize fixed and free methods
if mode in ['fixed', 'free']:
sensitivity_map /= np.max(sensitivity_map)
subject = _subject_from_forward(fwd)
vertices = [s['vertno'] for s in fwd['src']]
return _make_stc(sensitivity_map[:, np.newaxis], vertices, fwd['src'].kind,
tmin=0., tstep=1., subject=subject)
|
from homeassistant.components.surepetcare.const import DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
HOUSEHOLD_ID = "household-id"
HUB_ID = "hub-id"
MOCK_HUB = {
"id": HUB_ID,
"product_id": 1,
"household_id": HOUSEHOLD_ID,
"name": "Hub",
"status": {"online": True, "led_mode": 0, "pairing_mode": 0},
}
MOCK_FEEDER = {
"id": 12345,
"product_id": 4,
"household_id": HOUSEHOLD_ID,
"name": "Feeder",
"parent": {"product_id": 1, "id": HUB_ID},
"status": {
"battery": 6.4,
"locking": {"mode": 0},
"learn_mode": 0,
"signal": {"device_rssi": 60, "hub_rssi": 65},
},
}
MOCK_CAT_FLAP = {
"id": 13579,
"product_id": 6,
"household_id": HOUSEHOLD_ID,
"name": "Cat Flap",
"parent": {"product_id": 1, "id": HUB_ID},
"status": {
"battery": 6.4,
"locking": {"mode": 0},
"learn_mode": 0,
"signal": {"device_rssi": 65, "hub_rssi": 64},
},
}
MOCK_PET_FLAP = {
"id": 13576,
"product_id": 3,
"household_id": HOUSEHOLD_ID,
"name": "Pet Flap",
"parent": {"product_id": 1, "id": HUB_ID},
"status": {
"battery": 6.4,
"locking": {"mode": 0},
"learn_mode": 0,
"signal": {"device_rssi": 70, "hub_rssi": 65},
},
}
MOCK_PET = {
"id": 24680,
"household_id": HOUSEHOLD_ID,
"name": "Pet",
"position": {"since": "2020-08-23T23:10:50", "where": 1},
"status": {},
}
MOCK_API_DATA = {
"devices": [MOCK_HUB, MOCK_CAT_FLAP, MOCK_PET_FLAP, MOCK_FEEDER],
"pets": [MOCK_PET],
}
MOCK_CONFIG = {
DOMAIN: {
CONF_USERNAME: "test-username",
CONF_PASSWORD: "test-password",
"feeders": [12345],
"flaps": [13579, 13576],
"pets": [24680],
},
}
def _patch_sensor_setup():
return patch(
"homeassistant.components.surepetcare.sensor.async_setup_platform",
return_value=True,
)
|
from mock import patch
from test import unittest
import configobj
from diamond.collector import Collector
class BaseCollectorTest(unittest.TestCase):
def test_SetCustomHostname(self):
config = configobj.ConfigObj()
config['server'] = {}
config['server']['collectors_config_path'] = ''
config['collectors'] = {}
config['collectors']['default'] = {
'hostname': 'custom.localhost',
}
c = Collector(config, [])
self.assertEquals('custom.localhost', c.get_hostname())
def test_SetHostnameViaShellCmd(self):
config = configobj.ConfigObj()
config['server'] = {}
config['server']['collectors_config_path'] = ''
config['collectors'] = {}
config['collectors']['default'] = {
'hostname': 'echo custom.localhost',
'hostname_method': 'shell',
}
c = Collector(config, [])
self.assertEquals('custom.localhost', c.get_hostname())
@patch('diamond.collector.get_hostname')
def test_get_metric_path_no_prefix(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = ''
config['collectors']['default']['path'] = 'bar'
get_hostname_mock.return_value = None
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('bar.foo', result)
@patch('diamond.collector.get_hostname')
def test_get_metric_path_no_prefix_no_path(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = ''
config['collectors']['default']['path'] = ''
get_hostname_mock.return_value = None
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('foo', result)
@patch('diamond.collector.get_hostname')
def test_get_metric_path_no_path(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = 'bar'
config['collectors']['default']['path'] = ''
get_hostname_mock.return_value = None
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('bar.foo', result)
@patch('diamond.collector.get_hostname')
def test_get_metric_path_dot_path(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = 'bar'
config['collectors']['default']['path'] = '.'
get_hostname_mock.return_value = None
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('bar.foo', result)
@patch('diamond.collector.get_hostname')
def test_get_metric_path(self, get_hostname_mock):
config = configobj.ConfigObj()
config['collectors'] = {}
config['collectors']['default'] = {}
config['collectors']['default']['path_prefix'] = 'poof'
config['collectors']['default']['path'] = 'xyz'
get_hostname_mock.return_value = 'bar'
result = Collector(config, []).get_metric_path('foo')
self.assertEqual('poof.bar.xyz.foo', result)
|
import warnings
import numpy as np
from typing import Any, Sequence, List, Optional, Union, Text, Tuple, Dict, Set
from tensornetwork import tensor as tn_tensor
from tensornetwork.backend_contextmanager import get_default_backend
from tensornetwork.backends import backend_factory
from tensornetwork.backends.abstract_backend import AbstractBackend
import time
Tensor = Any
_CACHED_JITTED_NCONS = {}
def _get_cont_out_labels(
network_structure: Sequence[Sequence[Union[int, str]]]) -> Any:
"""
Compute the contracted and free and labels of `network_structure`,
using the following rules:
* Any negative number-type element and any hyphen-prepended str-type
element are considered output labels.
* Any positive number-type element and any non-hyphen-prepended
str-type element are considered contracted labels.
* Any negative number-type element appearing more than once, any
hyphen-prepended str-type element appearing more than once,
any positive element appearing exactly once and
any element appearing more than twice are considered batch labels.
Computed lists are ordered according to int and ASCII ordering
for integer and string values, with first entries in each list
being ordered integer labels followed by ASCII ordered string
labels.
Returns:
int_cont_labels: The number-type contracted labels
str_cont_labels: The str-type contracted labels
int_out_labels: The number-type output labels
str_out_labels: The str-type output labels
"""
flat_labels = [l for sublist in network_structure for l in sublist]
int_labels = {o for o in flat_labels if not isinstance(o, str)}
str_labels = {o for o in flat_labels if isinstance(o, str)}
int_out_labels = sorted([l for l in int_labels if l < 0], reverse=True)
int_cont_labels = sorted([label for label in int_labels if label >= 0])
# pylint: disable=unnecessary-lambda
str_out_labels = sorted([label for label in str_labels if label[0] == '-'],
key=lambda x: str(x))
# pylint: disable=unnecessary-lambda
str_cont_labels = sorted([label for label in str_labels if label[0] != '-'],
key=lambda x: str(x))
# pylint: disable=line-too-long
return int_cont_labels, str_cont_labels, int_out_labels, str_out_labels
def _canonicalize_network_structure(
network_structure: Sequence[Sequence[Union[int, str]]]
) -> Tuple[List[List], Dict]:
"""
Map `network_structure` to a canonical form.
The elements in `network_structure` are replaced
by integers according to the following rules:
1. All negative numbers are sorted in decreasing order and mapped to
to decreasing integers, starting with -1.
E.g., the numbers [-4,-10,-1] are mapped to
-1 -> -1, -4 -> -2, -10 -> -3
2. All strings prepended with a hyphen '-' are ordered increasingly
using ASCII ordering, and mapped to decreasing negative integers
starting with the next value following the last integer under
point 1. above. E.g. [-4,-10,-1,'-303','-a','-33']
is mapped to
-1 -> -1, -4 -> -2, -10 -> -3, '-303' -> -4, '-33' -> -5, '-a' -> -6
3. All positive numbers are sorted increasingly and mapped to
increasing integers, starting at 1
4. All strings without a prepended hyphen are sorted increasingly
using ASCII order and mapped to positive integers, starting
with the next integer value following the last used value under
point 3.
"""
flat_labels = [l for sublist in network_structure for l in sublist]
neg_int_labels = sorted(
list({l for l in flat_labels if not isinstance(l, str) and l < 0}))
pos_int_labels = sorted(
list({l for l in flat_labels if not isinstance(l, str) and l > 0}))
neg_str_labels = sorted(
{l for l in flat_labels if isinstance(l, str) and l[0] == '-'},
reverse=True)
pos_str_labels = sorted(
list({l for l in flat_labels if isinstance(l, str) and l[0] != '-'}))
neg_mapping = dict(
zip(neg_str_labels + neg_int_labels,
np.arange(-len(neg_int_labels + neg_str_labels), 0)))
pos_mapping = dict(
zip(pos_int_labels + pos_str_labels,
np.arange(1, 1 + len(pos_int_labels + pos_str_labels))))
neg_mapping.update(pos_mapping)
mapped_network_structure = [
[neg_mapping[label] for label in labels] for labels in network_structure
]
return mapped_network_structure, neg_mapping
def _check_network(network_structure: Sequence[Sequence[Union[int, str]]],
tensor_dimensions: List[Tuple[int]],
con_order: Optional[List[Union[int, str]]] = None,
out_order: Optional[List[Union[int, str]]] = None) -> None:
"""
Perform checks on `network_structure`.
"""
# check if number of tensors matches the number of lists
# in network_structure
if len(network_structure) != len(tensor_dimensions):
raise ValueError("number of tensors does not match the"
" number of network connections.")
# check number of labels of each element in network_structure
# matches the tensor order
for n, dims in enumerate(tensor_dimensions):
if len(dims) != len(network_structure[n]):
raise ValueError(f"number of indices does not match"
f" number of labels on tensor {n}.")
flat_labels = [l for sublist in network_structure for l in sublist]
# pylint: disable=line-too-long
int_cont_labels, str_cont_labels, int_out_labels, str_out_labels = _get_cont_out_labels(
network_structure)
out_labels = int_out_labels + str_out_labels
cont_labels = int_cont_labels + str_cont_labels
str_labels = str_cont_labels + [l[1:] for l in str_out_labels]
mask = [l.isalnum() for l in str_labels]
if not np.all(mask):
raise ValueError(
f"only alphanumeric values allowed for string labels, "
f"found {[l for n, l in enumerate(str_labels) if not mask[n]]}")
# make sure no value 0 is used as a label (legacy behaviour)
if int_cont_labels.count(0) > 0:
raise ValueError("only nonzero values are allowed to "
"specify network structure.")
if con_order is not None:
#check that all integer elements in `con_order` are positive
int_cons = [o for o in con_order if not isinstance(o, str)]
labels = [o for o in int_cons if o < 0]
if len(labels) > 0:
raise ValueError(f"all number type labels in `con_order` have "
f"to be positive, found {labels}")
str_cons = [o for o in con_order if isinstance(o, str)]
labels = [o for o in str_cons if o[0] == '-']
#check that all string type elements in `con_order` have no hyphens
if len(labels) > 0:
raise ValueError(f"all string type labels in `con_order` "
f"must be unhyphenized, found {labels}")
# check that elements in `con_order` appear only once
labels = []
for l in con_order:
if (con_order.count(l) != 1) and (l not in labels):
labels.append(l)
if len(labels) > 0:
raise ValueError(f"labels {labels} appear more than once in `con_order`.")
# check if passed `con_order` makes sense
if len(con_order) != len(cont_labels):
raise ValueError(f"`con_order = {con_order} is not "
f"a valid contraction order for contracted "
f"labels {cont_labels}")
# check if all labels in `con_order` appear in `network_structure`
labels = [o for o in con_order if o not in flat_labels]
if len(labels) != 0:
raise ValueError(f"labels {labels} in `con_order` do not appear as "
f"contracted labels in `network_structure`.")
if out_order is not None:
#check that all integer elements in `out_order` are negative
int_outs = [o for o in out_order if not isinstance(o, str)]
labels = [o for o in int_outs if o > 0]
if len(labels) > 0:
raise ValueError(f"all number type labels in `out_order` have "
f"to be negative, found {labels}")
#check that all string type elements in `out_order` have hyphens
str_outs = [o for o in out_order if isinstance(o, str)]
labels = [o for o in str_outs if o[0] != '-']
if len(labels) > 0:
raise ValueError(f"all string type labels in `out_order` "
f"have to be hyphenized, found {labels}")
# check that all elements in `out_order` appear exactly once
labels = []
for l in out_order:
if (out_order.count(l) != 1) and (l not in labels):
labels.append(l)
if len(labels) > 0:
raise ValueError(f"labels {labels} appear more than once in `out_order`.")
# check if `out_order` has right length
if len(out_order) != len(out_labels):
raise ValueError(f"`out_order` = {out_order} is not "
f"a valid output order for open "
f"labels {out_labels}")
# check if all labels in `out_order` appear in `network_structure`
labels = [o for o in out_order if o not in flat_labels]
if len(labels) != 0:
raise ValueError(f"labels {labels} in `out_order` do not "
f"appear in `network_structure`.")
# check if contracted dimensions are matching
mismatched_labels = []
for l in cont_labels:
dims = {
tensor_dimensions[m][n]
for m, labels in enumerate(network_structure)
for n, l1 in enumerate(labels)
if l1 == l
}
if len(dims) > 1:
mismatched_labels.append(l)
if len(mismatched_labels) > 0:
raise ValueError(
f"tensor dimensions for labels {mismatched_labels} are mismatching")
def _partial_trace(
tensor: Tensor, labels: List,
backend_obj: AbstractBackend) -> Tuple[Tensor, List, List]:
"""
Perform the partial trace of `tensor`.
All labels appearing twice in `labels` are traced out.
Argns:
tensor: A tensor.
labels: The ncon-style labels of `tensor`.
Returns:
Tensor: The result of the tracing.
"""
trace_labels = [l for l in labels if labels.count(l) == 2]
if len(trace_labels) > 0:
num_cont = len(trace_labels) // 2
unique_trace_labels = sorted(trace_labels)[0:-1:2]
trace_label_positions = [[
n for n, label in enumerate(labels) if label == trace_label
] for trace_label in unique_trace_labels]
contracted_indices = [l[0] for l in trace_label_positions
] + [l[1] for l in trace_label_positions]
free_indices = [
n for n in range(len(labels)) if n not in contracted_indices
]
shape = backend_obj.shape_tuple(tensor)
contracted_dimension = np.prod(
[shape[d] for d in contracted_indices[:num_cont]])
temp_shape = tuple([shape[pos] for pos in free_indices] +
[contracted_dimension, contracted_dimension])
result = backend_obj.trace(
backend_obj.reshape(
backend_obj.transpose(tensor,
tuple(free_indices + contracted_indices)),
temp_shape))
new_labels = [l for l in labels if l not in unique_trace_labels]
return result, new_labels, unique_trace_labels
return tensor, labels, []
def _batch_cont(
t1: Tensor, t2: Tensor, tensors: List[Tensor],
network_structure: List[List], con_order: List, common_batch_labels: Set,
labels_t1: List, labels_t2: List, backend_obj: AbstractBackend
) -> Tuple[Tensor, List[List], List]:
"""
Subroutine for performing a batched contraction of tensors `t1` and `t2`.
Args:
t1: A Tensor.
t2: A Tensor.
tensors: List of Tensor objects.
network_structure: The canonical labels of the networks.
con_order: Array of contracted labels.
common_batch_labels: The common batch labels of `t1` and `t2`.
labels_t1: The labels of `t1`
labels_t2: The labels of `t2`
backend_obj: A backend object.
Returns:
List[Tensor]: Updated list of tensors.
List[List]: Updated `network_structure`.
List: Updated `con_order` (contraction order).
"""
common_batch_labels = list(common_batch_labels)
#find positions of common batch labels
t1_batch_pos = [labels_t1.index(l) for l in common_batch_labels]
t2_batch_pos = [labels_t2.index(l) for l in common_batch_labels]
#find positions of contracted non-batch labels
non_batch_labels_t1 = {l for l in labels_t1 if l not in common_batch_labels}
non_batch_labels_t2 = {l for l in labels_t2 if l not in common_batch_labels}
common_contracted_labels = list(
non_batch_labels_t1.intersection(non_batch_labels_t2))
t1_cont = [labels_t1.index(l) for l in common_contracted_labels]
t2_cont = [labels_t2.index(l) for l in common_contracted_labels]
free_labels_t1 = set(labels_t1) - set(common_contracted_labels) - set(
common_batch_labels)
free_labels_t2 = set(labels_t2) - set(common_contracted_labels) - set(
common_batch_labels)
# find positions of uncontracted non-batch labels
free_pos_t1 = [n for n, l in enumerate(labels_t1) if l in free_labels_t1]
free_pos_t2 = [n for n, l in enumerate(labels_t2) if l in free_labels_t2]
t1_shape = np.array(backend_obj.shape_tuple(t1))
t2_shape = np.array(backend_obj.shape_tuple(t2))
newshape_t1 = (np.prod(t1_shape[t1_batch_pos]),
np.prod(t1_shape[free_pos_t1]), np.prod(t1_shape[t1_cont]))
newshape_t2 = (np.prod(t2_shape[t2_batch_pos]), np.prod(t2_shape[t2_cont]),
np.prod(t2_shape[free_pos_t2]))
#bring batch labels to the front
order_t1 = tuple(t1_batch_pos + free_pos_t1 + t1_cont)
order_t2 = tuple(t2_batch_pos + t2_cont + free_pos_t2)
mat1 = backend_obj.reshape(backend_obj.transpose(t1, order_t1), newshape_t1)
mat2 = backend_obj.reshape(backend_obj.transpose(t2, order_t2), newshape_t2)
result = backend_obj.matmul(mat1, mat2)
final_shape = tuple(
np.concatenate([
t1_shape[t1_batch_pos], t1_shape[free_pos_t1], t2_shape[free_pos_t2]
]))
result = backend_obj.reshape(result, final_shape)
# update labels, tensors, network_structure and con_order
new_labels = [labels_t1[i] for i in t1_batch_pos] + [
labels_t1[i] for i in free_pos_t1
] + [labels_t2[i] for i in free_pos_t2]
network_structure.append(new_labels)
tensors.append(result)
con_order = [c for c in con_order if c not in common_contracted_labels]
return tensors, network_structure, con_order
def label_intersection(labels1, labels2):
common_labels = list(set(labels1).intersection(labels2))
idx_1 = [labels1.index(l) for l in common_labels]
idx_2 = [labels2.index(l) for l in common_labels]
return common_labels, idx_1, idx_2
def _jittable_ncon(tensors: List[Tensor], flat_labels: Tuple[int],
sizes: Tuple[int], con_order: Tuple[int],
out_order: Tuple[int],
backend_obj: AbstractBackend) -> Tensor:
"""
Jittable Ncon function. Performs the contraction of `tensors`.
Args:
tensors: List of tensors.
flat_labels: A Tuple of integers.
sizes: Tuple of int used to reconstruct `network_structure` from
`flat_labels`.
con_order: Order of the contraction.
out_order: Order of the final axis order.
backend_obj: A backend object.
Returns:
The final tensor after contraction.
"""
# some jax-juggling to avoid retracing ...
flat_labels = list(flat_labels)
slices = np.append(0, np.cumsum(sizes))
network_structure = [
flat_labels[slices[n]:slices[n + 1]] for n in range(len(slices) - 1)
]
out_order = list(out_order)
con_order = list(con_order)
# pylint: disable=unnecessary-comprehension
init_con_order = [c for c in con_order]
init_network_structure = [c for c in network_structure]
# partial trace
for n, tensor in enumerate(tensors):
tensors[n], network_structure[n], contracted_labels = _partial_trace(
tensor, network_structure[n], backend_obj)
if len(contracted_labels) > 0:
con_order = [c for c in con_order if c not in contracted_labels]
flat_labels = [l for sublist in network_structure for l in sublist]
# contracted all positive labels appearing only once in `network_structure`
contractable_labels = [
l for l in flat_labels if (flat_labels.count(l) == 1) and (l > 0)
]
# update con_order
if len(contractable_labels) > 0:
con_order = [o for o in con_order if o not in contractable_labels]
# collapse axes of single-labelled tensors
locs = []
for n, labels in enumerate(network_structure):
if len(set(labels).intersection(contractable_labels)) > 0:
locs.append(n)
for loc in locs:
labels = network_structure[loc]
contractable_inds = [labels.index(l) for l in contractable_labels]
network_structure[loc] = [l for l in labels if l not in contractable_labels]
tensors[loc] = backend_obj.sum(tensors[loc], tuple(contractable_inds))
# perform binary and batch contractions
skip_counter = 0
batch_labels = []
batch_cnts = []
for l in set(flat_labels):
cnt = flat_labels.count(l)
if (cnt > 2) or (cnt == 2 and l < 0):
batch_labels.append(l)
batch_cnts.append(cnt)
while len(con_order) > 0:
# the next index to be contracted
cont_ind = con_order[0]
if cont_ind in batch_labels:
# if its still a batch index then do it later
con_order.append(con_order.pop(0))
skip_counter += 1
# avoid being stuck in an infinite loop
if skip_counter > len(con_order):
raise ValueError(f"ncon seems stuck in an infinite loop. \n"
f"Please check if `con_order` = {init_con_order} is "
f"a valid contraction order for \n"
f"`network_structure` = {init_network_structure}")
continue
# find locations of `cont_ind` in `network_structure`
locs = [
n for n, labels in enumerate(network_structure) if cont_ind in labels
]
t2 = tensors.pop(locs[1])
t1 = tensors.pop(locs[0])
labels_t2 = network_structure.pop(locs[1])
labels_t1 = network_structure.pop(locs[0])
common_labels, t1_cont, t2_cont = label_intersection(labels_t1, labels_t2)
# check if there are batch labels (i.e. labels appearing more than twice
# in `network_structure`).
common_batch_labels = set(batch_labels).intersection(common_labels)
if len(common_batch_labels) > 0:
# case1: both tensors have one or more common batch indices -> use matmul
ix = np.nonzero(
np.array(batch_labels)[:, None] == np.array(
list(common_batch_labels))[None, :])[0]
# reduce the counts of these labels in `batch_cnts` by 1
delete = []
for i in ix:
batch_cnts[i] -= 1
if (batch_labels[i] > 0) and (batch_cnts[i] <= 2):
delete.append(i)
elif (batch_labels[i] < 0) and (batch_cnts[i] < 2):
delete.append(i)
for i in sorted(delete, reverse=True):
del batch_cnts[i]
del batch_labels[i]
tensors, network_structure, con_order = _batch_cont(
t1, t2, tensors, network_structure, con_order, common_batch_labels,
labels_t1, labels_t2, backend_obj)
# in all other cases do a regular tensordot
else:
# for len(t1_cont)~<20 this is faster than np.argsort
ind_sort = [t1_cont.index(l) for l in sorted(t1_cont)]
tensors.append(
backend_obj.tensordot(
t1,
t2,
axes=(tuple([t1_cont[i] for i in ind_sort]),
tuple([t2_cont[i] for i in ind_sort]))))
new_labels = [l for l in labels_t1 if l not in common_labels
] + [l for l in labels_t2 if l not in common_labels]
network_structure.append(new_labels)
# remove contracted labels from con_order
con_order = [c for c in con_order if c not in common_labels]
# perform outer products and remaining batch contractions
while len(tensors) > 1:
t2 = tensors.pop()
t1 = tensors.pop()
labels_t2 = network_structure.pop()
labels_t1 = network_structure.pop()
# check if there are negative batch indices left
# (have to be collapsed to a single one)
common_labels, t1_cont, t2_cont = label_intersection(labels_t1, labels_t2)
common_batch_labels = set(batch_labels).intersection(common_labels)
if len(common_batch_labels) > 0:
# collapse all negative batch indices
tensors, network_structure, con_order = _batch_cont(
t1, t2, tensors, network_structure, con_order, common_batch_labels,
labels_t1, labels_t2, backend_obj)
else:
tensors.append(backend_obj.outer_product(t1, t2))
network_structure.append(labels_t1 + labels_t2)
# if necessary do a final permutation
if len(network_structure[0]) > 1:
labels = network_structure[0]
final_order = tuple([labels.index(l) for l in out_order])
return backend_obj.transpose(tensors[0], final_order)
return tensors[0]
def ncon(
tensors: Sequence[Union[tn_tensor.Tensor, Tensor]],
network_structure: Sequence[Sequence[Union[str, int]]],
con_order: Optional[Sequence] = None,
out_order: Optional[Sequence] = None,
check_network: bool = True,
backend: Optional[Union[Text, AbstractBackend]] = None
) -> Union[tn_tensor.Tensor, Tensor]:
r"""Contracts a list of backend-tensors or `Tensor`s
according to a tensor network
specification.
The network is provided as a list of lists, one for each
tensor, specifying the labels for the edges connected to that tensor.
Labels can be any numbers or strings. Negative number-type labels
and string-type labels with a prepended hyphen ('-') are open labels
and remain uncontracted.
Positive number-type labels and string-type labels with no prepended
hyphen ('-') are closed labels and are contracted.
Any open label appearing more than once is treated as an open
batch label. Any closed label appearing more than once is treated as
a closed batch label.
Upon finishing the contraction, all open batch labels will have been
collapsed into a single dimension, and all closed batch labels will
have been summed over.
If `out_order = None`, output labels are ordered according to descending
number ordering and ascending ASCII ordering, with number labels always
appearing before string labels. Example:
network_structure = [[-1, 1, '-rick', '2',-2], [-2, '2', 1, '-morty']]
results in an output order of [-1, -2, '-morty', '-rick'].
If `out_order` is given, the indices of the resulting tensor will be
transposed into this order.
If `con_order = None`, `ncon` will first contract all number labels
in ascending order followed by all string labels in ascending ASCII
order.
If `con_order` is given, `ncon` will contract according to this order.
For example, matrix multiplication:
.. code-block:: python
A = np.array([[1.0, 2.0], [3.0, 4.0]])
B = np.array([[1.0, 1.0], [0.0, 1.0]])
ncon([A,B], [(-1, 1), (1, -2)])
Matrix trace:
.. code-block:: python
A = np.array([[1.0, 2.0], [3.0, 4.0]])
ncon([A], [(1, 1)]) # 5.0
Note:
Disallowing `0` as an edge label is legacy behaviour, see
`original NCON implementation`_.
.. _original NCON implementation:
https://arxiv.org/abs/1402.0939
Args:
tensors: List of backend-tensors or `Tensor`s.
network_structure: List of lists specifying the tensor network structure.
con_order: List of edge labels specifying the contraction order.
out_order: List of edge labels specifying the output order.
check_network: Boolean flag. If `True` check the network.
backend: String specifying the backend to use. Defaults to
`tensornetwork.backend_contextmanager.get_default_backend`.
Returns:
The result of the contraction:
* A backend-tensor: If all elements of `tensors` are backend-tensors.
* A `Tensor`: If all elements of `tensors` are `Tensor` objects.
"""
# TODO (mganahl): for certain cases np.einsum is still faster than ncon:
# - contractions containing batched outer products with small dimensions
# This should eventually be fixed, but it's not a priority.
if backend is None:
backend = get_default_backend()
if isinstance(backend, AbstractBackend):
backend_obj = backend
else:
backend_obj = backend_factory.get_backend(backend)
if out_order == []: #allow empty list as input
out_order = None
if con_order == []: #allow empty list as input
con_order = None
are_tensors = [isinstance(t, tn_tensor.Tensor) for t in tensors]
tensors_set = {t for t in tensors if isinstance(t, tn_tensor.Tensor)}
if not all([n.backend.name == backend_obj.name for n in tensors_set]):
raise ValueError("Some tensors have backends different from '{}'".format(
backend_obj.name))
_tensors = []
for t in tensors:
if isinstance(t, tn_tensor.Tensor):
_tensors.append(t.array)
else:
_tensors.append(t)
_tensors = [backend_obj.convert_to_tensor(t) for t in _tensors]
if check_network:
_check_network(network_structure, [t.shape for t in _tensors], con_order,
out_order)
network_structure, mapping = _canonicalize_network_structure(
network_structure)
flat_labels = [l for sublist in network_structure for l in sublist]
unique_flat_labels = list(set(flat_labels))
if out_order is None:
# negative batch labels (negative labels appearing more than once)
# are subject to the same output ordering as regular output labels
out_order = sorted([l for l in unique_flat_labels if l < 0], reverse=True)
else:
out_order = [mapping[o] for o in out_order]
if con_order is None:
# canonicalization of network structure takes care of appropriate
# contraction ordering (i.e. use ASCII ordering for str and
# regular ordering for int)
# all positive labels appearing are considered proper contraction labels.
con_order = sorted([l for l in unique_flat_labels if l > 0])
else:
con_order = [mapping[o] for o in con_order]
if backend not in _CACHED_JITTED_NCONS:
_CACHED_JITTED_NCONS[backend] = backend_obj.jit(
_jittable_ncon, static_argnums=(1, 2, 3, 4, 5))
sizes = tuple([len(l) for l in network_structure])
res_tensor = _CACHED_JITTED_NCONS[backend](_tensors, tuple(flat_labels),
sizes, tuple(con_order),
tuple(out_order), backend_obj)
if all(are_tensors):
return tn_tensor.Tensor(res_tensor, backend=backend_obj)
return res_tensor
def finalize(ncon_builder: tn_tensor.NconBuilder) -> tn_tensor.Tensor:
return ncon(
ncon_builder.tensors,
ncon_builder.axes,
backend=ncon_builder.tensors[0].backend)
|
import os
import tempfile
from io import open
from stash.tests.stashtest import StashTestCase
class CopyPasteTests(StashTestCase):
"""
Test class for the 'pbcopy' and 'pbpaste' commands.
"""
def test_pbcopy_help(self):
"""
test 'pbcopy --help'.
"""
output_1 = self.run_command("pbcopy -h", exitcode=0)
output_2 = self.run_command("pbcopy --help", exitcode=0)
self.assertEqual(output_1, output_2)
self.assertIn("-h", output_1)
self.assertIn("--help", output_1)
self.assertIn("file", output_1)
self.assertIn("pbcopy", output_1)
self.assertIn("...", output_1)
def test_pbpaste_help(self):
"""
test 'pbpaste --help'.
"""
output_1 = self.run_command("pbpaste -h", exitcode=0)
output_2 = self.run_command("pbpaste --help", exitcode=0)
self.assertEqual(output_1, output_2)
self.assertIn("-h", output_1)
self.assertIn("--help", output_1)
self.assertIn("file", output_1)
self.assertIn("pbpaste", output_1)
def test_copy_paste_stdin(self):
"""
Test copy of stdin & paste
"""
self.run_command("echo teststring | pbcopy", exitcode=0)
output = self.run_command("pbpaste", exitcode=0)
self.assertEqual("teststring\n", output)
def test_copy_paste_file(self):
"""
Test copy of a file & paste
"""
p = os.path.join(self.get_data_path(), "testfile.txt")
self.run_command("pbcopy " + p, exitcode=0)
output = self.run_command("pbpaste", exitcode=0)
with open(p, "r", encoding="utf-8") as fin:
content = fin.read()
self.assertEqual(output, content)
def test_paste_into_file(self):
"""
Test copy of a file & paste into a file.
Comparsion is done using 'md5sum'
"""
pin = os.path.join(self.get_data_path(), "testfile.txt")
pout = os.path.join(tempfile.gettempdir(), "testpastefile.txt")
if os.path.exists(pout):
os.remove(pout)
self.run_command("pbcopy " + pin, exitcode=0)
self.run_command("pbpaste " + pout, exitcode=0)
org_hash = self.run_command("md5sum " + pin, exitcode=0).split()[0]
paste_hash = self.run_command("md5sum " + pout, exitcode=0).split()[0]
self.assertEqual(org_hash, paste_hash)
|
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_NAME
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from .const import ATTR_TARIFF, DEFAULT_NAME, DEFAULT_TARIFF, DOMAIN, PLATFORM, TARIFFS
UI_CONFIG_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME, default=DEFAULT_NAME): str,
vol.Required(ATTR_TARIFF, default=DEFAULT_TARIFF): vol.In(TARIFFS),
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.ensure_list(UI_CONFIG_SCHEMA)}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass: HomeAssistant, config: dict):
"""
Set up the electricity price sensor from configuration.yaml.
```yaml
pvpc_hourly_pricing:
- name: PVPC manual ve
tariff: electric_car
- name: PVPC manual nocturna
tariff: discrimination
timeout: 3
```
"""
for conf in config.get(DOMAIN, []):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, data=conf, context={"source": config_entries.SOURCE_IMPORT}
)
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry):
"""Set up pvpc hourly pricing from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, PLATFORM)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: config_entries.ConfigEntry):
"""Unload a config entry."""
return await hass.config_entries.async_forward_entry_unload(entry, PLATFORM)
|
from datetime import timedelta
import logging
import pysnmp.hlapi.asyncio as hlapi
from pysnmp.hlapi.asyncio import (
CommunityData,
ContextData,
ObjectIdentity,
ObjectType,
SnmpEngine,
UdpTransportTarget,
UsmUserData,
getCmd,
)
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
CONF_VALUE_TEMPLATE,
STATE_UNKNOWN,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from .const import (
CONF_ACCEPT_ERRORS,
CONF_AUTH_KEY,
CONF_AUTH_PROTOCOL,
CONF_BASEOID,
CONF_COMMUNITY,
CONF_DEFAULT_VALUE,
CONF_PRIV_KEY,
CONF_PRIV_PROTOCOL,
CONF_VERSION,
DEFAULT_AUTH_PROTOCOL,
DEFAULT_COMMUNITY,
DEFAULT_HOST,
DEFAULT_NAME,
DEFAULT_PORT,
DEFAULT_PRIV_PROTOCOL,
DEFAULT_VERSION,
MAP_AUTH_PROTOCOLS,
MAP_PRIV_PROTOCOLS,
SNMP_VERSIONS,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=10)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_BASEOID): cv.string,
vol.Optional(CONF_ACCEPT_ERRORS, default=False): cv.boolean,
vol.Optional(CONF_COMMUNITY, default=DEFAULT_COMMUNITY): cv.string,
vol.Optional(CONF_DEFAULT_VALUE): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_VERSION, default=DEFAULT_VERSION): vol.In(SNMP_VERSIONS),
vol.Optional(CONF_USERNAME): cv.string,
vol.Optional(CONF_AUTH_KEY): cv.string,
vol.Optional(CONF_AUTH_PROTOCOL, default=DEFAULT_AUTH_PROTOCOL): vol.In(
MAP_AUTH_PROTOCOLS
),
vol.Optional(CONF_PRIV_KEY): cv.string,
vol.Optional(CONF_PRIV_PROTOCOL, default=DEFAULT_PRIV_PROTOCOL): vol.In(
MAP_PRIV_PROTOCOLS
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the SNMP sensor."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
community = config.get(CONF_COMMUNITY)
baseoid = config.get(CONF_BASEOID)
unit = config.get(CONF_UNIT_OF_MEASUREMENT)
version = config.get(CONF_VERSION)
username = config.get(CONF_USERNAME)
authkey = config.get(CONF_AUTH_KEY)
authproto = config.get(CONF_AUTH_PROTOCOL)
privkey = config.get(CONF_PRIV_KEY)
privproto = config.get(CONF_PRIV_PROTOCOL)
accept_errors = config.get(CONF_ACCEPT_ERRORS)
default_value = config.get(CONF_DEFAULT_VALUE)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
if version == "3":
if not authkey:
authproto = "none"
if not privkey:
privproto = "none"
request_args = [
SnmpEngine(),
UsmUserData(
username,
authKey=authkey or None,
privKey=privkey or None,
authProtocol=getattr(hlapi, MAP_AUTH_PROTOCOLS[authproto]),
privProtocol=getattr(hlapi, MAP_PRIV_PROTOCOLS[privproto]),
),
UdpTransportTarget((host, port)),
ContextData(),
]
else:
request_args = [
SnmpEngine(),
CommunityData(community, mpModel=SNMP_VERSIONS[version]),
UdpTransportTarget((host, port)),
ContextData(),
]
errindication, _, _, _ = await getCmd(
*request_args, ObjectType(ObjectIdentity(baseoid))
)
if errindication and not accept_errors:
_LOGGER.error("Please check the details in the configuration file")
return
data = SnmpData(request_args, baseoid, accept_errors, default_value)
async_add_entities([SnmpSensor(data, name, unit, value_template)], True)
class SnmpSensor(Entity):
"""Representation of a SNMP sensor."""
def __init__(self, data, name, unit_of_measurement, value_template):
"""Initialize the sensor."""
self.data = data
self._name = name
self._state = None
self._unit_of_measurement = unit_of_measurement
self._value_template = value_template
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
async def async_update(self):
"""Get the latest data and updates the states."""
await self.data.async_update()
value = self.data.value
if value is None:
value = STATE_UNKNOWN
elif self._value_template is not None:
value = self._value_template.async_render_with_possible_json_value(
value, STATE_UNKNOWN
)
self._state = value
class SnmpData:
"""Get the latest data and update the states."""
def __init__(self, request_args, baseoid, accept_errors, default_value):
"""Initialize the data object."""
self._request_args = request_args
self._baseoid = baseoid
self._accept_errors = accept_errors
self._default_value = default_value
self.value = None
async def async_update(self):
"""Get the latest data from the remote SNMP capable host."""
errindication, errstatus, errindex, restable = await getCmd(
*self._request_args, ObjectType(ObjectIdentity(self._baseoid))
)
if errindication and not self._accept_errors:
_LOGGER.error("SNMP error: %s", errindication)
elif errstatus and not self._accept_errors:
_LOGGER.error(
"SNMP error: %s at %s",
errstatus.prettyPrint(),
errindex and restable[-1][int(errindex) - 1] or "?",
)
elif (errindication or errstatus) and self._accept_errors:
self.value = self._default_value
else:
for resrow in restable:
self.value = str(resrow[-1])
|
from unittest import TestCase
import numpy as np
from scattertext.termscoring.CredTFIDF import CredTFIDF
from scattertext.test.test_termDocMatrixFactory import build_hamlet_jz_corpus
class TestCredTFIDF(TestCase):
def test_get_score_df(self):
corpus = build_hamlet_jz_corpus()
self.assertEqual(
set(CredTFIDF(corpus).set_categories('hamlet').get_score_df().columns),
set(['pos_cred_tfidf', 'neg_cred_tfidf', 'delta_cred_tf_idf'])
)
#print(CredTFIDF(corpus).set_categories('hamlet').get_score_df(bootstrap=True, num_bootstraps=2))
|
import os
import socket
import platform
import logging
import configobj
import time
import re
import subprocess
from diamond.metric import Metric
from diamond.utils.config import load_config
from error import DiamondException
# Detect the architecture of the system and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
MAX_COUNTER = (2 ** 64) - 1
else:
MAX_COUNTER = (2 ** 32) - 1
def get_hostname(config, method=None):
"""
Returns a hostname as configured by the user
"""
method = method or config.get('hostname_method', 'smart')
# case insensitive method
method = method.lower()
if 'hostname' in config and method != 'shell':
return config['hostname']
if method in get_hostname.cached_results:
return get_hostname.cached_results[method]
if method == 'shell':
if 'hostname' not in config:
raise DiamondException(
"hostname must be set to a shell command for"
" hostname_method=shell")
else:
proc = subprocess.Popen(config['hostname'],
shell=True,
stdout=subprocess.PIPE)
hostname = proc.communicate()[0].strip()
if proc.returncode != 0:
raise subprocess.CalledProcessError(proc.returncode,
config['hostname'])
get_hostname.cached_results[method] = hostname
return hostname
if method == 'smart':
hostname = get_hostname(config, 'fqdn_short')
if hostname != 'localhost':
get_hostname.cached_results[method] = hostname
return hostname
hostname = get_hostname(config, 'hostname_short')
get_hostname.cached_results[method] = hostname
return hostname
if method == 'fqdn_short':
hostname = socket.getfqdn().split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'fqdn':
hostname = socket.getfqdn().replace('.', '_')
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'fqdn_rev':
hostname = socket.getfqdn().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'uname_short':
hostname = os.uname()[1].split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'uname_rev':
hostname = os.uname()[1].split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname':
hostname = socket.gethostname()
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname_short':
hostname = socket.gethostname().split('.')[0]
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'hostname_rev':
hostname = socket.gethostname().split('.')
hostname.reverse()
hostname = '.'.join(hostname)
get_hostname.cached_results[method] = hostname
if hostname == '':
raise DiamondException('Hostname is empty?!')
return hostname
if method == 'none':
get_hostname.cached_results[method] = None
return None
raise NotImplementedError(config['hostname_method'])
get_hostname.cached_results = {}
def str_to_bool(value):
"""
Converts string truthy/falsey strings to a bool
Empty strings are false
"""
if isinstance(value, basestring):
value = value.strip().lower()
if value in ['true', 't', 'yes', 'y']:
return True
elif value in ['false', 'f', 'no', 'n', '']:
return False
else:
raise NotImplementedError("Unknown bool %s" % value)
return value
class Collector(object):
"""
The Collector class is a base class for all metric collectors.
"""
def __init__(self, config=None, handlers=[], name=None, configfile=None):
"""
Create a new instance of the Collector class
"""
# Initialize Logger
self.log = logging.getLogger('diamond')
# Initialize Members
if name is None:
self.name = self.__class__.__name__
else:
self.name = name
self.handlers = handlers
self.last_values = {}
self.configfile = None
self.load_config(configfile, config)
def load_config(self, configfile=None, override_config=None):
"""
Process a configfile, or reload if previously given one.
"""
self.config = configobj.ConfigObj()
# Load in the collector's defaults
if self.get_default_config() is not None:
self.config.merge(self.get_default_config())
if configfile is not None:
self.configfile = os.path.abspath(configfile)
if self.configfile is not None:
config = load_config(self.configfile)
if 'collectors' in config:
if 'default' in config['collectors']:
self.config.merge(config['collectors']['default'])
if self.name in config['collectors']:
self.config.merge(config['collectors'][self.name])
if override_config is not None:
if 'collectors' in override_config:
if 'default' in override_config['collectors']:
self.config.merge(override_config['collectors']['default'])
if self.name in override_config['collectors']:
self.config.merge(override_config['collectors'][self.name])
self.process_config()
def process_config(self):
"""
Intended to put any code that should be run after any config reload
event
"""
if 'byte_unit' in self.config:
if isinstance(self.config['byte_unit'], basestring):
self.config['byte_unit'] = self.config['byte_unit'].split()
if 'enabled' in self.config:
self.config['enabled'] = str_to_bool(self.config['enabled'])
if 'measure_collector_time' in self.config:
self.config['measure_collector_time'] = str_to_bool(
self.config['measure_collector_time'])
# Raise an error if both whitelist and blacklist are specified
if ((self.config.get('metrics_whitelist', None) and
self.config.get('metrics_blacklist', None))):
raise DiamondException(
'Both metrics_whitelist and metrics_blacklist specified ' +
'in file %s' % self.configfile)
if self.config.get('metrics_whitelist', None):
self.config['metrics_whitelist'] = re.compile(
self.config['metrics_whitelist'])
elif self.config.get('metrics_blacklist', None):
self.config['metrics_blacklist'] = re.compile(
self.config['metrics_blacklist'])
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this collector
"""
return {
'enabled': 'Enable collecting these metrics',
'byte_unit': 'Default numeric output(s)',
'measure_collector_time': 'Collect the collector run time in ms',
'metrics_whitelist': 'Regex to match metrics to transmit. ' +
'Mutually exclusive with metrics_blacklist',
'metrics_blacklist': 'Regex to match metrics to block. ' +
'Mutually exclusive with metrics_whitelist',
}
def get_default_config(self):
"""
Return the default config for the collector
"""
return {
# Defaults options for all Collectors
# Uncomment and set to hardcode a hostname for the collector path
# Keep in mind, periods are seperators in graphite
# 'hostname': 'my_custom_hostname',
# If you perfer to just use a different way of calculating the
# hostname
# Uncomment and set this to one of these values:
# fqdn_short = Default. Similar to hostname -s
# fqdn = hostname output
# fqdn_rev = hostname in reverse (com.example.www)
# uname_short = Similar to uname -n, but only the first part
# uname_rev = uname -r in reverse (com.example.www)
# 'hostname_method': 'fqdn_short',
# All collectors are disabled by default
'enabled': False,
# Path Prefix
'path_prefix': 'servers',
# Path Prefix for Virtual Machine metrics
'instance_prefix': 'instances',
# Path Suffix
'path_suffix': '',
# Default Poll Interval (seconds)
'interval': 300,
# Default Event TTL (interval multiplier)
'ttl_multiplier': 2,
# Default numeric output
'byte_unit': 'byte',
# Collect the collector run time in ms
'measure_collector_time': False,
# Whitelist of metrics to let through
'metrics_whitelist': None,
# Blacklist of metrics to let through
'metrics_blacklist': None,
}
def get_metric_path(self, name, instance=None):
"""
Get metric path.
Instance indicates that this is a metric for a
virtual machine and should have a different
root prefix.
"""
if 'path' in self.config:
path = self.config['path']
else:
path = self.__class__.__name__
if instance is not None:
if 'instance_prefix' in self.config:
prefix = self.config['instance_prefix']
else:
prefix = 'instances'
if path == '.':
return '.'.join([prefix, instance, name])
else:
return '.'.join([prefix, instance, path, name])
if 'path_prefix' in self.config:
prefix = self.config['path_prefix']
else:
prefix = 'systems'
if 'path_suffix' in self.config:
suffix = self.config['path_suffix']
else:
suffix = None
hostname = get_hostname(self.config)
if hostname is not None:
if prefix:
prefix = ".".join((prefix, hostname))
else:
prefix = hostname
# if there is a suffix, add after the hostname
if suffix:
prefix = '.'.join((prefix, suffix))
is_path_invalid = path == '.' or not path
if is_path_invalid and prefix:
return '.'.join([prefix, name])
elif prefix:
return '.'.join([prefix, path, name])
elif is_path_invalid:
return name
else:
return '.'.join([path, name])
def get_hostname(self):
return get_hostname(self.config)
def collect(self):
"""
Default collector method
"""
raise NotImplementedError()
def publish(self, name, value, raw_value=None, precision=0,
metric_type='GAUGE', instance=None):
"""
Publish a metric with the given name
"""
# Check whitelist/blacklist
if self.config['metrics_whitelist']:
if not self.config['metrics_whitelist'].match(name):
return
elif self.config['metrics_blacklist']:
if self.config['metrics_blacklist'].match(name):
return
# Get metric Path
path = self.get_metric_path(name, instance=instance)
# Get metric TTL
ttl = float(self.config['interval']) * float(
self.config['ttl_multiplier'])
# Create Metric
try:
metric = Metric(path, value, raw_value=raw_value, timestamp=None,
precision=precision, host=self.get_hostname(),
metric_type=metric_type, ttl=ttl)
except DiamondException:
self.log.error(('Error when creating new Metric: path=%r, '
'value=%r'), path, value)
raise
# Publish Metric
self.publish_metric(metric)
def publish_metric(self, metric):
"""
Publish a Metric object
"""
# Process Metric
for handler in self.handlers:
handler._process(metric)
def publish_gauge(self, name, value, precision=0, instance=None):
return self.publish(name, value, precision=precision,
metric_type='GAUGE', instance=instance)
def publish_counter(self, name, value, precision=0, max_value=0,
time_delta=True, interval=None, allow_negative=False,
instance=None):
raw_value = value
value = self.derivative(name, value, max_value=max_value,
time_delta=time_delta, interval=interval,
allow_negative=allow_negative,
instance=instance)
return self.publish(name, value, raw_value=raw_value,
precision=precision, metric_type='COUNTER',
instance=instance)
def derivative(self, name, new, max_value=0,
time_delta=True, interval=None,
allow_negative=False, instance=None):
"""
Calculate the derivative of the metric.
"""
# Format Metric Path
path = self.get_metric_path(name, instance=instance)
if path in self.last_values:
old = self.last_values[path]
# Check for rollover
if new < old:
old = old - max_value
# Get Change in X (value)
derivative_x = new - old
# If we pass in a interval, use it rather then the configured one
if interval is None:
interval = float(self.config['interval'])
# Get Change in Y (time)
if time_delta:
derivative_y = interval
else:
derivative_y = 1
result = float(derivative_x) / float(derivative_y)
if result < 0 and not allow_negative:
result = 0
else:
result = 0
# Store Old Value
self.last_values[path] = new
# Return result
return result
def _run(self):
"""
Run the collector unless it's already running
"""
try:
start_time = time.time()
# Collect Data
self.collect()
end_time = time.time()
collector_time = int((end_time - start_time) * 1000)
self.log.debug('Collection took %s ms', collector_time)
if 'measure_collector_time' in self.config:
if self.config['measure_collector_time']:
metric_name = 'collector_time_ms'
metric_value = collector_time
self.publish(metric_name, metric_value)
finally:
# After collector run, invoke a flush
# method on each handler.
for handler in self.handlers:
handler._flush()
def find_binary(self, binary):
"""
Scan and return the first path to a binary that we can find
"""
if os.path.exists(binary):
return binary
# Extract out the filename if we were given a full path
binary_name = os.path.basename(binary)
# Gather $PATH
search_paths = os.environ['PATH'].split(':')
# Extra paths to scan...
default_paths = [
'/usr/bin',
'/bin'
'/usr/local/bin',
'/usr/sbin',
'/sbin'
'/usr/local/sbin',
]
for path in default_paths:
if path not in search_paths:
search_paths.append(path)
for path in search_paths:
if os.path.isdir(path):
filename = os.path.join(path, binary_name)
if os.path.exists(filename):
return filename
return binary
class ProcessCollector(Collector):
"""
Collector with helpers for handling running commands with/without sudo
"""
def get_default_config_help(self):
config_help = super(ProcessCollector, self).get_default_config_help()
config_help.update({
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ProcessCollector, self).get_default_config()
config.update({
'use_sudo': False,
'sudo_cmd': self.find_binary('/usr/bin/sudo'),
})
return config
def run_command(self, args):
if 'bin' not in self.config:
raise Exception('config does not have any binary configured')
if not os.access(self.config['bin'], os.X_OK):
raise Exception('%s is not executable' % self.config['bin'])
try:
command = args
command.insert(0, self.config['bin'])
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
return subprocess.Popen(command,
stdout=subprocess.PIPE).communicate()
except OSError:
self.log.exception("Unable to run %s", command)
return None
|
from homeassistant.components.group import GroupIntegrationRegistry
from homeassistant.const import (
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
@callback
def async_describe_on_off_states(
hass: HomeAssistantType, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
registry.on_off_states(
{STATE_ON, STATE_PAUSED, STATE_PLAYING, STATE_IDLE}, STATE_OFF
)
|
from homeassistant.components.remote import RemoteEntity
from homeassistant.const import STATE_OFF, STATE_ON
from tests.common import MockToggleEntity
ENTITIES = []
def init(empty=False):
"""Initialize the platform with entities."""
global ENTITIES
ENTITIES = (
[]
if empty
else [
MockRemote("TV", STATE_ON),
MockRemote("DVD", STATE_OFF),
MockRemote(None, STATE_OFF),
]
)
async def async_setup_platform(
hass, config, async_add_entities_callback, discovery_info=None
):
"""Return mock entities."""
async_add_entities_callback(ENTITIES)
class MockRemote(MockToggleEntity, RemoteEntity):
"""Mock remote class."""
supported_features = 0
|
import numpy as np
import pandas as pd
from scattertext.termranking import AbsoluteFrequencyRanker
from scattertext.termscoring.RankDifference import RankDifference
class EmptyNeutralCategoriesError(Exception): pass
'''
!!! Need to properly segregate interfaces
'''
class SemioticSquareBase(object):
def get_labels(self):
raise NotImplementedError()
def get_axes(self, **kwargs):
raise NotImplementedError()
def get_lexicons(self, num_terms=10):
raise NotImplementedError()
class SemioticSquare(SemioticSquareBase):
'''
Create a visualization of a semiotic square. Requires Corpus to have
at least three categories.
>>> newsgroups_train = fetch_20newsgroups(subset='train',
... remove=('headers', 'footers', 'quotes'))
>>> vectorizer = CountVectorizer()
>>> X = vectorizer.fit_transform(newsgroups_train.data)
>>> corpus = st.CorpusFromScikit(
... X=X,
... y=newsgroups_train.target,
... feature_vocabulary=vectorizer.vocabulary_,
... category_names=newsgroups_train.target_names,
... raw_texts=newsgroups_train.data
... ).build()
>>> semseq = SemioticSquare(corpus,
... category_a = 'alt.atheism',
... category_b = 'soc.religion.christian',
... neutral_categories = ['talk.religion.misc']
... )
>>> # A simple HTML table
>>> html = SemioticSquareViz(semseq).to_html()
>>> # The table with an interactive scatterplot below it
>>> html = st.produce_semiotic_square_explorer(semiotic_square,
... x_label='More Atheism, Less Xtnity',
... y_label='General Religious Talk')
'''
def __init__(self,
term_doc_matrix,
category_a,
category_b,
neutral_categories,
labels=None,
term_ranker=AbsoluteFrequencyRanker,
scorer=None):
'''
Parameters
----------
term_doc_matrix : TermDocMatrix
TermDocMatrix (or descendant) which will be used in constructing square.
category_a : str
Category name for term A
category_b : str
Category name for term B (in opposition to A)
neutral_categories : list[str]
List of category names that A and B will be contrasted to. Should be in same domain.
labels : dict
None by default. Labels are dictionary of {'a_and_b': 'A and B', ...} to be shown
above each category.
term_ranker : TermRanker
Class for returning a term-frequency convention_df
scorer : termscoring class, optional
Term scoring class for lexicon mining. Default: `scattertext.termscoring.ScaledFScore`
'''
assert category_a in term_doc_matrix.get_categories()
assert category_b in term_doc_matrix.get_categories()
for category in neutral_categories:
assert category in term_doc_matrix.get_categories()
if len(neutral_categories) == 0:
raise EmptyNeutralCategoriesError()
self.category_a_ = category_a
self.category_b_ = category_b
self.neutral_categories_ = neutral_categories
self._build_square(term_doc_matrix, term_ranker, labels, scorer)
def _build_square(self, term_doc_matrix, term_ranker, labels, scorer):
self.term_doc_matrix_ = term_doc_matrix
self.term_ranker = term_ranker(term_doc_matrix)
self.scorer = RankDifference() \
if scorer is None else scorer
self.axes = self._build_axes(scorer)
self.lexicons = self._build_lexicons()
self._labels = labels
def get_axes(self, scorer=None):
'''
Returns
-------
pd.DataFrame
'''
if scorer:
return self._build_axes(scorer)
return self.axes
def get_lexicons(self, num_terms=10):
'''
Parameters
----------
num_terms, int
Returns
-------
dict
'''
return {k: v.index[:num_terms]
for k, v in self.lexicons.items()}
def get_labels(self):
a = self._get_default_a_label()
b = self._get_default_b_label()
default_labels = {'a': a,
'not_a': 'Not ' + a,
'b': b,
'not_b': 'Not ' + b,
'a_and_b': a + ' + ' + b,
'not_a_and_not_b': 'Not ' + a + ' + Not ' + b,
'a_and_not_b': a + ' + Not ' + b,
'b_and_not_a': 'Not ' + a + ' + ' + b}
labels = self._labels
if labels is None:
labels = {}
return {name + '_label': labels.get(name, default_labels[name])
for name in default_labels}
def _get_default_b_label(self):
return self.category_b_
def _get_default_a_label(self):
return self.category_a_
def _build_axes(self, scorer):
if scorer is None:
scorer = self.scorer
tdf = self._get_term_doc_count_df()
counts = tdf.sum(axis=1)
tdf['x'] = self._get_x_axis(scorer, tdf)
tdf['x'][np.isnan(tdf['x'])] = self.scorer.get_default_score()
tdf['y'] = self._get_y_axis(scorer, tdf)
tdf['y'][np.isnan(tdf['y'])] = self.scorer.get_default_score()
tdf['counts'] = counts
return tdf[['x', 'y', 'counts']]
def _get_x_axis(self, scorer, tdf):
return scorer.get_scores(
tdf[self.category_a_ + ' freq'],
tdf[self.category_b_ + ' freq']
)
def _get_y_axis(self, scorer, tdf):
return scorer.get_scores(
tdf[[t + ' freq' for t in [self.category_a_, self.category_b_]]].sum(axis=1),
tdf[[t + ' freq' for t in self.neutral_categories_]].sum(axis=1)
)
def _get_term_doc_count_df(self):
return (self.term_ranker.get_ranks()
[[t + ' freq' for t in self._get_all_categories()]])
def _get_all_categories(self):
return [self.category_a_, self.category_b_] + self.neutral_categories_
def _build_lexicons(self):
self.lexicons = {}
ax = self.axes
x_max = ax['x'].max()
y_max = ax['y'].max()
x_min = ax['x'].min()
y_min = ax['y'].min()
x_baseline = self._get_x_baseline()
y_baseline = self._get_y_baseline()
def dist(candidates, x_bound, y_bound):
return ((x_bound - candidates['x']) ** 2 + (y_bound - candidates['y']) ** 2).sort_values()
self.lexicons['a'] = dist(ax[(ax['x'] > x_baseline) & (ax['y'] > y_baseline)], x_max, y_max)
self.lexicons['not_a'] = dist(ax[(ax['x'] < x_baseline) & (ax['y'] < y_baseline)], x_min, y_min)
self.lexicons['b'] = dist(ax[(ax['x'] < x_baseline) & (ax['y'] > y_baseline)], x_min, y_max)
self.lexicons['not_b'] = dist(ax[(ax['x'] > x_baseline) & (ax['y'] < y_baseline)], x_max, y_min)
self.lexicons['a_and_b'] = dist(ax[(ax['y'] > y_baseline)], x_baseline, y_max)
self.lexicons['not_a_and_not_b'] = dist(ax[(ax['y'] < y_baseline)], x_baseline, y_min)
self.lexicons['a_and_not_b'] = dist(ax[(ax['x'] > x_baseline)], x_max, y_baseline)
self.lexicons['b_and_not_a'] = dist(ax[(ax['x'] < x_baseline)], x_min, y_baseline)
return self.lexicons
def _get_y_baseline(self):
return self.scorer.get_default_score()
def _get_x_baseline(self):
return self.scorer.get_default_score()
|
import itertools
import os
import re
import sys
def get_characters():
"""Find every Unicode character that is valid in a Python `identifier`_ but
is not matched by the regex ``\\w`` group.
``\\w`` matches some characters that aren't valid in identifiers, but
:meth:`str.isidentifier` will catch that later in lexing.
All start characters are valid continue characters, so we only test for
continue characters.
_identifier: https://docs.python.org/3/reference/lexical_analysis.html#identifiers
"""
for cp in range(sys.maxunicode + 1):
s = chr(cp)
if ("a" + s).isidentifier() and not re.match(r"\w", s):
yield s
def collapse_ranges(data):
"""Given a sorted list of unique characters, generate ranges representing
sequential code points.
Source: https://stackoverflow.com/a/4629241/400617
"""
for _, b in itertools.groupby(enumerate(data), lambda x: ord(x[1]) - x[0]):
b = list(b)
yield b[0][1], b[-1][1]
def build_pattern(ranges):
"""Output the regex pattern for ranges of characters.
One and two character ranges output the individual characters.
"""
out = []
for a, b in ranges:
if a == b: # single char
out.append(a)
elif ord(b) - ord(a) == 1: # two chars, range is redundant
out.append(a)
out.append(b)
else:
out.append(f"{a}-{b}")
return "".join(out)
def main():
"""Build the regex pattern and write it to
``jinja2/_identifier.py``.
"""
pattern = build_pattern(collapse_ranges(get_characters()))
filename = os.path.abspath(
os.path.join(os.path.dirname(__file__), "..", "src", "jinja2", "_identifier.py")
)
with open(filename, "w", encoding="utf8") as f:
f.write("import re\n\n")
f.write("# generated by scripts/generate_identifier_pattern.py\n")
f.write("pattern = re.compile(\n")
f.write(f' r"[\\w{pattern}]+" # noqa: B950\n')
f.write(")\n")
if __name__ == "__main__":
main()
|
import urllib2
import diamond.collector
class ResqueWebCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(ResqueWebCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ResqueWebCollector, self).get_default_config()
config.update({
'host': 'localhost',
'port': 5678,
'path': 'resqueweb',
})
return config
def collect(self):
try:
response = urllib2.urlopen("http://%s:%s/stats.txt" % (
self.config['host'], int(self.config['port'])))
except Exception as e:
self.log.error('Couldnt connect to resque-web: %s', e)
return {}
for data in response.read().split("\n"):
if data == "":
continue
item, count = data.strip().split("=")
try:
count = int(count)
(item, queue) = item.split(".")
if item == "resque":
if queue[-1] == "+":
self.publish("%s.total" %
queue.replace("+", ""), count)
else:
self.publish("%s.current" % queue, count)
else:
self.publish("queue.%s.current" % queue, count)
except Exception as e:
self.log.error('Couldnt parse the queue: %s', e)
|
import asyncio
import logging
import aiohttp
import attr
import tp_connected
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, discovery
from homeassistant.helpers.aiohttp_client import async_create_clientsession
_LOGGER = logging.getLogger(__name__)
DOMAIN = "tplink_lte"
DATA_KEY = "tplink_lte"
CONF_NOTIFY = "notify"
_NOTIFY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_RECIPIENT): vol.All(cv.ensure_list, [cv.string]),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NOTIFY): vol.All(
cv.ensure_list, [_NOTIFY_SCHEMA]
),
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
@attr.s
class ModemData:
"""Class for modem state."""
host = attr.ib()
modem = attr.ib()
connected = attr.ib(init=False, default=True)
@attr.s
class LTEData:
"""Shared state."""
websession = attr.ib()
modem_data = attr.ib(init=False, factory=dict)
def get_modem_data(self, config):
"""Get the requested or the only modem_data value."""
if CONF_HOST in config:
return self.modem_data.get(config[CONF_HOST])
if len(self.modem_data) == 1:
return next(iter(self.modem_data.values()))
return None
async def async_setup(hass, config):
"""Set up TP-Link LTE component."""
if DATA_KEY not in hass.data:
websession = async_create_clientsession(
hass, cookie_jar=aiohttp.CookieJar(unsafe=True)
)
hass.data[DATA_KEY] = LTEData(websession)
domain_config = config.get(DOMAIN, [])
tasks = [_setup_lte(hass, conf) for conf in domain_config]
if tasks:
await asyncio.wait(tasks)
for conf in domain_config:
for notify_conf in conf.get(CONF_NOTIFY, []):
hass.async_create_task(
discovery.async_load_platform(
hass, "notify", DOMAIN, notify_conf, config
)
)
return True
async def _setup_lte(hass, lte_config, delay=0):
"""Set up a TP-Link LTE modem."""
host = lte_config[CONF_HOST]
password = lte_config[CONF_PASSWORD]
websession = hass.data[DATA_KEY].websession
modem = tp_connected.Modem(hostname=host, websession=websession)
modem_data = ModemData(host, modem)
try:
await _login(hass, modem_data, password)
except tp_connected.Error:
retry_task = hass.loop.create_task(_retry_login(hass, modem_data, password))
@callback
def cleanup_retry(event):
"""Clean up retry task resources."""
if not retry_task.done():
retry_task.cancel()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup_retry)
async def _login(hass, modem_data, password):
"""Log in and complete setup."""
await modem_data.modem.login(password=password)
modem_data.connected = True
hass.data[DATA_KEY].modem_data[modem_data.host] = modem_data
async def cleanup(event):
"""Clean up resources."""
await modem_data.modem.logout()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, cleanup)
async def _retry_login(hass, modem_data, password):
"""Sleep and retry setup."""
_LOGGER.warning("Could not connect to %s. Will keep trying", modem_data.host)
modem_data.connected = False
delay = 15
while not modem_data.connected:
await asyncio.sleep(delay)
try:
await _login(hass, modem_data, password)
_LOGGER.warning("Connected to %s", modem_data.host)
except tp_connected.Error:
delay = min(2 * delay, 300)
|
from datetime import datetime, timedelta
from functools import partial
import json
import logging
import mimetypes
import os
from TwitterAPI import TwitterAPI
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_USERNAME, HTTP_OK
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_track_point_in_time
_LOGGER = logging.getLogger(__name__)
CONF_CONSUMER_KEY = "consumer_key"
CONF_CONSUMER_SECRET = "consumer_secret"
CONF_ACCESS_TOKEN_SECRET = "access_token_secret"
ATTR_MEDIA = "media"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ACCESS_TOKEN): cv.string,
vol.Required(CONF_ACCESS_TOKEN_SECRET): cv.string,
vol.Required(CONF_CONSUMER_KEY): cv.string,
vol.Required(CONF_CONSUMER_SECRET): cv.string,
vol.Optional(CONF_USERNAME): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Twitter notification service."""
return TwitterNotificationService(
hass,
config[CONF_CONSUMER_KEY],
config[CONF_CONSUMER_SECRET],
config[CONF_ACCESS_TOKEN],
config[CONF_ACCESS_TOKEN_SECRET],
config.get(CONF_USERNAME),
)
class TwitterNotificationService(BaseNotificationService):
"""Implementation of a notification service for the Twitter service."""
def __init__(
self,
hass,
consumer_key,
consumer_secret,
access_token_key,
access_token_secret,
username,
):
"""Initialize the service."""
self.user = username
self.hass = hass
self.api = TwitterAPI(
consumer_key, consumer_secret, access_token_key, access_token_secret
)
def send_message(self, message="", **kwargs):
"""Tweet a message, optionally with media."""
data = kwargs.get(ATTR_DATA)
media = None
if data:
media = data.get(ATTR_MEDIA)
if not self.hass.config.is_allowed_path(media):
_LOGGER.warning("'%s' is not a whitelisted directory", media)
return
callback = partial(self.send_message_callback, message)
self.upload_media_then_callback(callback, media)
def send_message_callback(self, message, media_id=None):
"""Tweet a message, optionally with media."""
if self.user:
user_resp = self.api.request("users/lookup", {"screen_name": self.user})
user_id = user_resp.json()[0]["id"]
if user_resp.status_code != HTTP_OK:
self.log_error_resp(user_resp)
else:
_LOGGER.debug("Message posted: %s", user_resp.json())
event = {
"event": {
"type": "message_create",
"message_create": {
"target": {"recipient_id": user_id},
"message_data": {"text": message},
},
}
}
resp = self.api.request("direct_messages/events/new", json.dumps(event))
else:
resp = self.api.request(
"statuses/update", {"status": message, "media_ids": media_id}
)
if resp.status_code != HTTP_OK:
self.log_error_resp(resp)
else:
_LOGGER.debug("Message posted: %s", resp.json())
def upload_media_then_callback(self, callback, media_path=None):
"""Upload media."""
if not media_path:
return callback()
with open(media_path, "rb") as file:
total_bytes = os.path.getsize(media_path)
(media_category, media_type) = self.media_info(media_path)
resp = self.upload_media_init(media_type, media_category, total_bytes)
if 199 > resp.status_code < 300:
self.log_error_resp(resp)
return None
media_id = resp.json()["media_id"]
media_id = self.upload_media_chunked(file, total_bytes, media_id)
resp = self.upload_media_finalize(media_id)
if 199 > resp.status_code < 300:
self.log_error_resp(resp)
return None
if resp.json().get("processing_info") is None:
return callback(media_id)
self.check_status_until_done(media_id, callback)
def media_info(self, media_path):
"""Determine mime type and Twitter media category for given media."""
(media_type, _) = mimetypes.guess_type(media_path)
media_category = self.media_category_for_type(media_type)
_LOGGER.debug(
"media %s is mime type %s and translates to %s",
media_path,
media_type,
media_category,
)
return media_category, media_type
def upload_media_init(self, media_type, media_category, total_bytes):
"""Upload media, INIT phase."""
return self.api.request(
"media/upload",
{
"command": "INIT",
"media_type": media_type,
"media_category": media_category,
"total_bytes": total_bytes,
},
)
def upload_media_chunked(self, file, total_bytes, media_id):
"""Upload media, chunked append."""
segment_id = 0
bytes_sent = 0
while bytes_sent < total_bytes:
chunk = file.read(4 * 1024 * 1024)
resp = self.upload_media_append(chunk, media_id, segment_id)
if resp.status_code not in range(HTTP_OK, 299):
self.log_error_resp_append(resp)
return None
segment_id = segment_id + 1
bytes_sent = file.tell()
self.log_bytes_sent(bytes_sent, total_bytes)
return media_id
def upload_media_append(self, chunk, media_id, segment_id):
"""Upload media, APPEND phase."""
return self.api.request(
"media/upload",
{"command": "APPEND", "media_id": media_id, "segment_index": segment_id},
{"media": chunk},
)
def upload_media_finalize(self, media_id):
"""Upload media, FINALIZE phase."""
return self.api.request(
"media/upload", {"command": "FINALIZE", "media_id": media_id}
)
def check_status_until_done(self, media_id, callback, *args):
"""Upload media, STATUS phase."""
resp = self.api.request(
"media/upload",
{"command": "STATUS", "media_id": media_id},
method_override="GET",
)
if resp.status_code != HTTP_OK:
_LOGGER.error("media processing error: %s", resp.json())
processing_info = resp.json()["processing_info"]
_LOGGER.debug("media processing %s status: %s", media_id, processing_info)
if processing_info["state"] in {"succeeded", "failed"}:
return callback(media_id)
check_after_secs = processing_info["check_after_secs"]
_LOGGER.debug(
"media processing waiting %s seconds to check status", str(check_after_secs)
)
when = datetime.now() + timedelta(seconds=check_after_secs)
myself = partial(self.check_status_until_done, media_id, callback)
async_track_point_in_time(self.hass, myself, when)
@staticmethod
def media_category_for_type(media_type):
"""Determine Twitter media category by mime type."""
if media_type is None:
return None
if media_type.startswith("image/gif"):
return "tweet_gif"
if media_type.startswith("video/"):
return "tweet_video"
if media_type.startswith("image/"):
return "tweet_image"
return None
@staticmethod
def log_bytes_sent(bytes_sent, total_bytes):
"""Log upload progress."""
_LOGGER.debug("%s of %s bytes uploaded", str(bytes_sent), str(total_bytes))
@staticmethod
def log_error_resp(resp):
"""Log error response."""
obj = json.loads(resp.text)
error_message = obj["errors"]
_LOGGER.error("Error %s: %s", resp.status_code, error_message)
@staticmethod
def log_error_resp_append(resp):
"""Log error response, during upload append phase."""
obj = json.loads(resp.text)
error_message = obj["errors"][0]["message"]
error_code = obj["errors"][0]["code"]
_LOGGER.error(
"Error %s: %s (Code %s)", resp.status_code, error_message, error_code
)
|
import asyncio
import discord
from datetime import datetime
from redbot.core.utils.chat_formatting import pagify
import io
import weakref
from typing import List, Optional
from .common_filters import filter_mass_mentions
_instances = weakref.WeakValueDictionary({})
class TunnelMeta(type):
"""
lets prevent having multiple tunnels with the same
places involved.
"""
def __call__(cls, *args, **kwargs):
lockout_tuple = ((kwargs.get("sender"), kwargs.get("origin")), kwargs.get("recipient"))
if lockout_tuple in _instances:
return _instances[lockout_tuple]
# this is needed because weakvalue dicts can
# change size without warning if an object is discarded
# it can raise a runtime error, so ..
while True:
try:
if not (
any(lockout_tuple[0] == x[0] for x in _instances.keys())
or any(lockout_tuple[1] == x[1] for x in _instances.keys())
):
# if this isn't temporarily stored, the weakref dict
# will discard this before the return statement,
# causing a key error
temp = super(TunnelMeta, cls).__call__(*args, **kwargs)
_instances[lockout_tuple] = temp
return temp
except: # NOQA: E722
# Am I really supposed to except a runtime error flake >.>
continue
else:
return None
class Tunnel(metaclass=TunnelMeta):
"""
A tunnel interface for messages
This will return None on init if the destination
or source + origin pair is already in use, or the
existing tunnel object if one exists for the designated
parameters
Attributes
----------
sender: `discord.Member`
The person who opened the tunnel
origin: `discord.TextChannel`
The channel in which it was opened
recipient: `discord.User`
The user on the other end of the tunnel
"""
def __init__(
self, *, sender: discord.Member, origin: discord.TextChannel, recipient: discord.User
):
self.sender = sender
self.origin = origin
self.recipient = recipient
self.last_interaction = datetime.utcnow()
async def react_close(self, *, uid: int, message: str = ""):
send_to = self.recipient if uid == self.sender.id else self.origin
closer = next(filter(lambda x: x.id == uid, (self.sender, self.recipient)), None)
await send_to.send(filter_mass_mentions(message.format(closer=closer)))
@property
def members(self):
return self.sender, self.recipient
@property
def minutes_since(self):
return int((self.last_interaction - datetime.utcnow()).seconds / 60)
@staticmethod
async def message_forwarder(
*,
destination: discord.abc.Messageable,
content: str = None,
embed=None,
files: Optional[List[discord.File]] = None,
) -> List[discord.Message]:
"""
This does the actual sending, use this instead of a full tunnel
if you are using command initiated reactions instead of persistent
event based ones
Parameters
----------
destination: discord.abc.Messageable
Where to send
content: str
The message content
embed: discord.Embed
The embed to send
files: Optional[List[discord.File]]
A list of files to send.
Returns
-------
List[discord.Message]
The messages sent as a result.
Raises
------
discord.Forbidden
see `discord.abc.Messageable.send`
discord.HTTPException
see `discord.abc.Messageable.send`
"""
rets = []
if content:
for page in pagify(content):
rets.append(await destination.send(page, files=files, embed=embed))
if files:
del files
if embed:
del embed
elif embed or files:
rets.append(await destination.send(files=files, embed=embed))
return rets
@staticmethod
async def files_from_attach(
m: discord.Message, *, use_cached: bool = False, images_only: bool = False
) -> List[discord.File]:
"""
makes a list of file objects from a message
returns an empty list if none, or if the sum of file sizes
is too large for the bot to send
Parameters
---------
m: `discord.Message`
A message to get attachments from
use_cached: `bool`
Whether to use ``proxy_url`` rather than ``url`` when downloading the attachment
images_only: `bool`
Whether only image attachments should be added to returned list
Returns
-------
list of `discord.File`
A list of `discord.File` objects
"""
files = []
max_size = 8 * 1000 * 1000
if m.attachments and sum(a.size for a in m.attachments) <= max_size:
for a in m.attachments:
if images_only and a.height is None:
# if this is None, it's not an image
continue
_fp = io.BytesIO()
try:
await a.save(_fp, use_cached=use_cached)
except discord.HTTPException as e:
# this is required, because animated webp files aren't cached
if not (e.status == 415 and images_only and use_cached):
raise
files.append(discord.File(_fp, filename=a.filename))
return files
# Backwards-compatible typo fix (GH-2496)
files_from_attatch = files_from_attach
async def close_because_disabled(self, close_message: str):
"""
Sends a mesage to both ends of the tunnel that the tunnel is now closed.
Parameters
----------
close_message: str
The message to send to both ends of the tunnel.
"""
tasks = [destination.send(close_message) for destination in (self.recipient, self.origin)]
await asyncio.gather(*tasks, return_exceptions=True)
async def communicate(
self, *, message: discord.Message, topic: str = None, skip_message_content: bool = False
):
"""
Forwards a message.
Parameters
----------
message : `discord.Message`
The message to forward
topic : `str`
A string to prepend
skip_message_content : `bool`
If this flag is set, only the topic will be sent
Returns
-------
`int`, `int`
a pair of ints matching the ids of the
message which was forwarded
and the last message the bot sent to do that.
useful if waiting for reactions.
Raises
------
discord.Forbidden
This should only happen if the user's DMs are disabled
the bot can't upload at the origin channel
or can't add reactions there.
"""
if message.channel == self.origin and message.author == self.sender:
send_to = self.recipient
elif message.author == self.recipient and isinstance(message.channel, discord.DMChannel):
send_to = self.origin
else:
return None
if not skip_message_content:
content = "\n".join((topic, message.content)) if topic else message.content
else:
content = topic
if message.attachments:
attach = await self.files_from_attach(message)
if not attach:
await message.channel.send(
"Could not forward attachments. "
"Total size of attachments in a single "
"message must be less than 8MB."
)
else:
attach = []
rets = await self.message_forwarder(destination=send_to, content=content, files=attach)
await message.add_reaction("\N{WHITE HEAVY CHECK MARK}")
await message.add_reaction("\N{NEGATIVE SQUARED CROSS MARK}")
self.last_interaction = datetime.utcnow()
await rets[-1].add_reaction("\N{NEGATIVE SQUARED CROSS MARK}")
return [rets[-1].id, message.id]
|
import pytest
from marshmallow.exceptions import ValidationError
from lemur.tests.factories import RoleFactory
def test_get_object_attribute():
from lemur.schemas import get_object_attribute
with pytest.raises(ValidationError):
get_object_attribute({})
with pytest.raises(ValidationError):
get_object_attribute([{}], many=True)
with pytest.raises(ValidationError):
get_object_attribute([{}, {"id": 1}], many=True)
with pytest.raises(ValidationError):
get_object_attribute([{}, {"name": "test"}], many=True)
assert get_object_attribute({"name": "test"}) == "name"
assert get_object_attribute({"id": 1}) == "id"
assert get_object_attribute([{"name": "test"}], many=True) == "name"
assert get_object_attribute([{"id": 1}], many=True) == "id"
def test_fetch_objects(session):
from lemur.roles.models import Role
from lemur.schemas import fetch_objects
role = RoleFactory()
role1 = RoleFactory()
session.commit()
data = {"id": role.id}
found_role = fetch_objects(Role, data)
assert found_role == role
data = {"name": role.name}
found_role = fetch_objects(Role, data)
assert found_role == role
data = [{"id": role.id}, {"id": role1.id}]
found_roles = fetch_objects(Role, data, many=True)
assert found_roles == [role, role1]
data = [{"name": role.name}, {"name": role1.name}]
found_roles = fetch_objects(Role, data, many=True)
assert found_roles == [role, role1]
with pytest.raises(ValidationError):
data = [{"name": "blah"}, {"name": role1.name}]
fetch_objects(Role, data, many=True)
with pytest.raises(ValidationError):
data = {"name": "nah"}
fetch_objects(Role, data)
|
import email.parser
import sys
import pkg_resources
from django.conf import settings
from django.core.cache import cache
from django.core.exceptions import ImproperlyConfigured
from django.db import connection
import weblate
from weblate.utils.errors import report_error
from weblate.vcs.git import GitRepository, GitWithGerritRepository, SubversionRepository
from weblate.vcs.mercurial import HgRepository
REQUIRES = [
"Django",
"siphashc",
"Whoosh",
"translate-toolkit",
"lxml",
"Pillow",
"bleach",
"python-dateutil",
"social-auth-core",
"social-auth-app-django",
"django-crispy-forms",
"oauthlib",
"django-compressor",
"djangorestframework",
"django-filter",
"django-appconf",
"user-agents",
"filelock",
"setuptools",
"jellyfish",
"openpyxl",
"celery",
"kombu",
"translation-finder",
"weblate-language-data",
"html2text",
"pycairo",
"pygobject",
"diff-match-patch",
"requests",
"django-redis",
"hiredis",
"sentry_sdk",
"Cython",
"misaka",
"GitPython",
"borgbackup",
"pyparsing",
]
OPTIONAL = [
"psycopg2",
"psycopg2-binary",
"phply",
"chardet",
"ruamel.yaml",
"tesserocr",
"akismet",
"boto3",
"zeep",
"aeidon",
"iniparse",
"mysqlclient",
]
def get_version_module(name, optional=False):
"""Return module object.
On error raises verbose exception with name and URL.
"""
try:
dist = pkg_resources.get_distribution(name)
metadata = email.parser.Parser().parsestr(dist.get_metadata(dist.PKG_INFO))
return (
name,
metadata.get("Home-page"),
pkg_resources.get_distribution(name).version,
)
except pkg_resources.DistributionNotFound:
if optional:
return None
raise ImproperlyConfigured(
"Missing dependency {0}, please install using: pip install {0}".format(name)
)
def get_optional_versions():
"""Return versions of optional modules."""
result = []
for name in OPTIONAL:
module = get_version_module(name, True)
if module is not None:
result.append(module)
if HgRepository.is_supported():
result.append(
("Mercurial", "https://www.mercurial-scm.org/", HgRepository.get_version())
)
if SubversionRepository.is_supported():
result.append(
(
"git-svn",
"https://git-scm.com/docs/git-svn",
SubversionRepository.get_version(),
)
)
if GitWithGerritRepository.is_supported():
result.append(
(
"git-review",
"https://pypi.org/project/git-review/",
GitWithGerritRepository.get_version(),
)
)
return result
def get_versions():
"""Return list of used versions."""
result = [get_version_module(name) for name in REQUIRES]
result.append(("Python", "https://www.python.org/", sys.version.split()[0]))
try:
result.append(("Git", "https://git-scm.com/", GitRepository.get_version()))
except OSError:
raise ImproperlyConfigured("Failed to run git, please install it.")
return result
def get_db_version():
if connection.vendor == "postgresql":
try:
with connection.cursor() as cursor:
cursor.execute("SHOW server_version")
version = cursor.fetchone()
except RuntimeError:
report_error(cause="PostgreSQL version check")
return None
return (
"PostgreSQL server",
"https://www.postgresql.org/",
version[0].split(" ")[0],
)
try:
with connection.cursor() as cursor:
version = cursor.connection.get_server_info()
except RuntimeError:
report_error(cause="MySQL version check")
return None
return (
f"{connection.display_name} sever",
"https://mariadb.org/"
if connection.mysql_is_mariadb
else "https://www.mysql.com/",
version.split("-", 1)[0],
)
def get_cache_version():
if settings.CACHES["default"]["BACKEND"] == "django_redis.cache.RedisCache":
try:
version = cache.client.get_client().info()["redis_version"]
except RuntimeError:
report_error(cause="Redis version check")
return None
return ("Redis server", "https://redis.io/", version)
return None
def get_db_cache_version():
"""Returns the list of all the Database and Cache version."""
result = []
cache_version = get_cache_version()
if cache_version:
result.append(cache_version)
db_version = get_db_version()
if db_version:
result.append(db_version)
return result
def get_versions_list():
"""Return list with version information summary."""
return (
[("Weblate", "https://weblate.org/", weblate.GIT_VERSION)]
+ get_versions()
+ get_optional_versions()
+ get_db_cache_version()
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.