text
stringlengths 213
32.3k
|
---|
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_DOOR,
DEVICE_CLASS_LOCK,
DEVICE_CLASS_POWER,
DEVICE_CLASS_PROBLEM,
BinarySensorEntity,
)
from .account import StarlineAccount, StarlineDevice
from .const import DOMAIN
from .entity import StarlineEntity
SENSOR_TYPES = {
"hbrake": ["Hand Brake", DEVICE_CLASS_POWER],
"hood": ["Hood", DEVICE_CLASS_DOOR],
"trunk": ["Trunk", DEVICE_CLASS_DOOR],
"alarm": ["Alarm", DEVICE_CLASS_PROBLEM],
"door": ["Doors", DEVICE_CLASS_LOCK],
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the StarLine sensors."""
account: StarlineAccount = hass.data[DOMAIN][entry.entry_id]
entities = []
for device in account.api.devices.values():
for key, value in SENSOR_TYPES.items():
if key in device.car_state:
sensor = StarlineSensor(account, device, key, *value)
if sensor.is_on is not None:
entities.append(sensor)
async_add_entities(entities)
class StarlineSensor(StarlineEntity, BinarySensorEntity):
"""Representation of a StarLine binary sensor."""
def __init__(
self,
account: StarlineAccount,
device: StarlineDevice,
key: str,
name: str,
device_class: str,
):
"""Initialize sensor."""
super().__init__(account, device, key, name)
self._device_class = device_class
@property
def device_class(self):
"""Return the class of the binary sensor."""
return self._device_class
@property
def is_on(self):
"""Return the state of the binary sensor."""
return self._device.car_state.get(self._key)
|
def is_iterator(obj):
"""Detect if the object provided implements the iterator protocol.
(i.e. like a generator).
This will return False for objects which are iterable,
but not iterators themselves.
"""
from types import GeneratorType
if isinstance(obj, GeneratorType):
return True
elif not hasattr(obj, '__iter__'):
return False
else:
# Types which implement the protocol must return themselves when
# invoking 'iter' upon them.
return iter(obj) is obj
def is_closable_iterator(obj):
"""Detect if the given object is both closable and iterator."""
# Not an iterator.
if not is_iterator(obj):
return False
# A generator - the easiest thing to deal with.
import inspect
if inspect.isgenerator(obj):
return True
# A custom iterator. Look for a close method...
if not (hasattr(obj, 'close') and callable(obj.close)):
return False
# ... which doesn't require any arguments.
try:
inspect.getcallargs(obj.close)
except TypeError:
return False
else:
return True
class file_generator(object):
"""Yield the given input (a file object) in chunks (default 64k).
(Core)
"""
def __init__(self, input, chunkSize=65536):
"""Initialize file_generator with file ``input`` for chunked access."""
self.input = input
self.chunkSize = chunkSize
def __iter__(self):
"""Return iterator."""
return self
def __next__(self):
"""Return next chunk of file."""
chunk = self.input.read(self.chunkSize)
if chunk:
return chunk
else:
if hasattr(self.input, 'close'):
self.input.close()
raise StopIteration()
next = __next__
def file_generator_limited(fileobj, count, chunk_size=65536):
"""Yield the given file object in chunks.
Stopps after `count` bytes has been emitted.
Default chunk size is 64kB. (Core)
"""
remaining = count
while remaining > 0:
chunk = fileobj.read(min(chunk_size, remaining))
chunklen = len(chunk)
if chunklen == 0:
return
remaining -= chunklen
yield chunk
def set_vary_header(response, header_name):
"""Add a Vary header to a response."""
varies = response.headers.get('Vary', '')
varies = [x.strip() for x in varies.split(',') if x.strip()]
if header_name not in varies:
varies.append(header_name)
response.headers['Vary'] = ', '.join(varies)
|
import asyncio
from datetime import timedelta
import logging
from typing import Any, Dict, List
from aiohttp import ClientConnectionError
from async_timeout import timeout
from pymelcloud import Device, get_devices
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_TOKEN, CONF_USERNAME
from homeassistant.exceptions import ConfigEntryNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import Throttle
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
PLATFORMS = ["climate", "sensor", "water_heater"]
CONF_LANGUAGE = "language"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_TOKEN): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistantType, config: ConfigEntry):
"""Establish connection with MELCloud."""
if DOMAIN not in config:
return True
username = config[DOMAIN][CONF_USERNAME]
token = config[DOMAIN][CONF_TOKEN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={CONF_USERNAME: username, CONF_TOKEN: token},
)
)
return True
async def async_setup_entry(hass: HomeAssistantType, entry: ConfigEntry):
"""Establish connection with MELClooud."""
conf = entry.data
mel_devices = await mel_devices_setup(hass, conf[CONF_TOKEN])
hass.data.setdefault(DOMAIN, {}).update({entry.entry_id: mel_devices})
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, platform)
for platform in PLATFORMS
]
)
hass.data[DOMAIN].pop(config_entry.entry_id)
if not hass.data[DOMAIN]:
hass.data.pop(DOMAIN)
return True
class MelCloudDevice:
"""MELCloud Device instance."""
def __init__(self, device: Device):
"""Construct a device wrapper."""
self.device = device
self.name = device.name
self._available = True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Pull the latest data from MELCloud."""
try:
await self.device.update()
self._available = True
except ClientConnectionError:
_LOGGER.warning("Connection failed for %s", self.name)
self._available = False
async def async_set(self, properties: Dict[str, Any]):
"""Write state changes to the MELCloud API."""
try:
await self.device.set(properties)
self._available = True
except ClientConnectionError:
_LOGGER.warning("Connection failed for %s", self.name)
self._available = False
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def device_id(self):
"""Return device ID."""
return self.device.device_id
@property
def building_id(self):
"""Return building ID of the device."""
return self.device.building_id
@property
def device_info(self):
"""Return a device description for device registry."""
_device_info = {
"connections": {(CONNECTION_NETWORK_MAC, self.device.mac)},
"identifiers": {(DOMAIN, f"{self.device.mac}-{self.device.serial}")},
"manufacturer": "Mitsubishi Electric",
"name": self.name,
}
unit_infos = self.device.units
if unit_infos is not None:
_device_info["model"] = ", ".join(
[x["model"] for x in unit_infos if x["model"]]
)
return _device_info
async def mel_devices_setup(hass, token) -> List[MelCloudDevice]:
"""Query connected devices from MELCloud."""
session = hass.helpers.aiohttp_client.async_get_clientsession()
try:
with timeout(10):
all_devices = await get_devices(
token,
session,
conf_update_interval=timedelta(minutes=5),
device_set_debounce=timedelta(seconds=1),
)
except (asyncio.TimeoutError, ClientConnectionError) as ex:
raise ConfigEntryNotReady() from ex
wrapped_devices = {}
for device_type, devices in all_devices.items():
wrapped_devices[device_type] = [MelCloudDevice(device) for device in devices]
return wrapped_devices
|
import asyncio
from pyairvisual import CloudAPI, NodeSamba
from pyairvisual.errors import InvalidKeyError, NodeProError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_API_KEY,
CONF_IP_ADDRESS,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_PASSWORD,
CONF_SHOW_ON_MAP,
)
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
from . import async_get_geography_id
from .const import ( # pylint: disable=unused-import
CONF_GEOGRAPHIES,
CONF_INTEGRATION_TYPE,
DOMAIN,
INTEGRATION_TYPE_GEOGRAPHY,
INTEGRATION_TYPE_NODE_PRO,
LOGGER,
)
class AirVisualFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle an AirVisual config flow."""
VERSION = 2
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize the config flow."""
self._geo_id = None
self._latitude = None
self._longitude = None
self.api_key_data_schema = vol.Schema({vol.Required(CONF_API_KEY): str})
@property
def geography_schema(self):
"""Return the data schema for the cloud API."""
return self.api_key_data_schema.extend(
{
vol.Required(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Required(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
}
)
@property
def pick_integration_type_schema(self):
"""Return the data schema for picking the integration type."""
return vol.Schema(
{
vol.Required("type"): vol.In(
[INTEGRATION_TYPE_GEOGRAPHY, INTEGRATION_TYPE_NODE_PRO]
)
}
)
@property
def node_pro_schema(self):
"""Return the data schema for a Node/Pro."""
return vol.Schema(
{vol.Required(CONF_IP_ADDRESS): str, vol.Required(CONF_PASSWORD): str}
)
async def _async_set_unique_id(self, unique_id):
"""Set the unique ID of the config flow and abort if it already exists."""
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Define the config flow to handle options."""
return AirVisualOptionsFlowHandler(config_entry)
async def async_step_geography(self, user_input=None):
"""Handle the initialization of the integration via the cloud API."""
if not user_input:
return self.async_show_form(
step_id="geography", data_schema=self.geography_schema
)
self._geo_id = async_get_geography_id(user_input)
await self._async_set_unique_id(self._geo_id)
self._abort_if_unique_id_configured()
# Find older config entries without unique ID:
for entry in self._async_current_entries():
if entry.version != 1:
continue
if any(
self._geo_id == async_get_geography_id(geography)
for geography in entry.data[CONF_GEOGRAPHIES]
):
return self.async_abort(reason="already_configured")
return await self.async_step_geography_finish(
user_input, "geography", self.geography_schema
)
async def async_step_geography_finish(self, user_input, error_step, error_schema):
"""Validate a Cloud API key."""
websession = aiohttp_client.async_get_clientsession(self.hass)
cloud_api = CloudAPI(user_input[CONF_API_KEY], session=websession)
# If this is the first (and only the first) time we've seen this API key, check
# that it's valid:
valid_keys = self.hass.data.setdefault("airvisual_checked_api_keys", set())
valid_keys_lock = self.hass.data.setdefault(
"airvisual_checked_api_keys_lock", asyncio.Lock()
)
async with valid_keys_lock:
if user_input[CONF_API_KEY] not in valid_keys:
try:
await cloud_api.air_quality.nearest_city()
except InvalidKeyError:
return self.async_show_form(
step_id=error_step,
data_schema=error_schema,
errors={CONF_API_KEY: "invalid_api_key"},
)
valid_keys.add(user_input[CONF_API_KEY])
existing_entry = await self.async_set_unique_id(self._geo_id)
if existing_entry:
self.hass.config_entries.async_update_entry(existing_entry, data=user_input)
return self.async_abort(reason="reauth_successful")
return self.async_create_entry(
title=f"Cloud API ({self._geo_id})",
data={**user_input, CONF_INTEGRATION_TYPE: INTEGRATION_TYPE_GEOGRAPHY},
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_geography(import_config)
async def async_step_node_pro(self, user_input=None):
"""Handle the initialization of the integration with a Node/Pro."""
if not user_input:
return self.async_show_form(
step_id="node_pro", data_schema=self.node_pro_schema
)
await self._async_set_unique_id(user_input[CONF_IP_ADDRESS])
node = NodeSamba(user_input[CONF_IP_ADDRESS], user_input[CONF_PASSWORD])
try:
await node.async_connect()
except NodeProError as err:
LOGGER.error("Error connecting to Node/Pro unit: %s", err)
return self.async_show_form(
step_id="node_pro",
data_schema=self.node_pro_schema,
errors={CONF_IP_ADDRESS: "cannot_connect"},
)
await node.async_disconnect()
return self.async_create_entry(
title=f"Node/Pro ({user_input[CONF_IP_ADDRESS]})",
data={**user_input, CONF_INTEGRATION_TYPE: INTEGRATION_TYPE_NODE_PRO},
)
async def async_step_reauth(self, data):
"""Handle configuration by re-auth."""
self._geo_id = async_get_geography_id(data)
self._latitude = data[CONF_LATITUDE]
self._longitude = data[CONF_LONGITUDE]
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(self, user_input=None):
"""Handle re-auth completion."""
if not user_input:
return self.async_show_form(
step_id="reauth_confirm", data_schema=self.api_key_data_schema
)
conf = {
CONF_API_KEY: user_input[CONF_API_KEY],
CONF_LATITUDE: self._latitude,
CONF_LONGITUDE: self._longitude,
CONF_INTEGRATION_TYPE: INTEGRATION_TYPE_GEOGRAPHY,
}
return await self.async_step_geography_finish(
conf, "reauth_confirm", self.api_key_data_schema
)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return self.async_show_form(
step_id="user", data_schema=self.pick_integration_type_schema
)
if user_input["type"] == INTEGRATION_TYPE_GEOGRAPHY:
return await self.async_step_geography()
return await self.async_step_node_pro()
class AirVisualOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle an AirVisual options flow."""
def __init__(self, config_entry):
"""Initialize."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Required(
CONF_SHOW_ON_MAP,
default=self.config_entry.options.get(CONF_SHOW_ON_MAP),
): bool
}
),
)
|
from .base import FieldType
from dedupe import predicates
from simplecosine.cosine import CosineSetSimilarity
class SetType(FieldType):
type = "Set"
_predicate_functions = (predicates.wholeSetPredicate,
predicates.commonSetElementPredicate,
predicates.lastSetElementPredicate,
predicates.commonTwoElementsPredicate,
predicates.commonThreeElementsPredicate,
predicates.magnitudeOfCardinality,
predicates.firstSetElementPredicate)
_index_predicates = (predicates.TfidfSetSearchPredicate,
predicates.TfidfSetCanopyPredicate)
_index_thresholds = (0.2, 0.4, 0.6, 0.8)
def __init__(self, definition):
super(SetType, self).__init__(definition)
if 'corpus' not in definition:
definition['corpus'] = []
self.comparator = CosineSetSimilarity(definition['corpus'])
|
import os
from radicale import pathutils
class StorageMoveMixin:
def move(self, item, to_collection, to_href):
if not pathutils.is_safe_filesystem_path_component(to_href):
raise pathutils.UnsafePathError(to_href)
os.replace(
pathutils.path_to_filesystem(
item.collection._filesystem_path, item.href),
pathutils.path_to_filesystem(
to_collection._filesystem_path, to_href))
self._sync_directory(to_collection._filesystem_path)
if item.collection._filesystem_path != to_collection._filesystem_path:
self._sync_directory(item.collection._filesystem_path)
# Move the item cache entry
cache_folder = os.path.join(item.collection._filesystem_path,
".Radicale.cache", "item")
to_cache_folder = os.path.join(to_collection._filesystem_path,
".Radicale.cache", "item")
self._makedirs_synced(to_cache_folder)
try:
os.replace(os.path.join(cache_folder, item.href),
os.path.join(to_cache_folder, to_href))
except FileNotFoundError:
pass
else:
self._makedirs_synced(to_cache_folder)
if cache_folder != to_cache_folder:
self._makedirs_synced(cache_folder)
# Track the change
to_collection._update_history_etag(to_href, item)
item.collection._update_history_etag(item.href, None)
to_collection._clean_history()
if item.collection._filesystem_path != to_collection._filesystem_path:
item.collection._clean_history()
|
from __future__ import print_function
#by Siddharth Dushantha
#31 July 2017
from sys import argv
import json
file = '{"alias":"Define or print aliases","cat":"Print contents of file","cd":"Change current directory","clear":"Clear console","cp":"Copy file","crypt":"File encryption using AES in CBC mode","cowsay":"Generates ASCII picture of a cow with a message","curl":"Transfer from an URL","cut":"Cut out selection portions of each line of a file","du":"Summarize disk usage of the set of FILEs, recursively for directories","echo":"Output text to console","edit":"Open any text type files in Pythonista editor","find":"Powerful file searching tool","fg":"Bring a background job to foreground","ftpserver":"A simple FTP server","gci":"Interface to Pythons built-in garbage collector","git":"Git client","grep":"search contents of file(s)","head":"Display first lines of a file","httpserver":"A simple HTTP server with upload function","jobs":"List all jobs that are currently running","kill":"Terminate a running job","ls":"List files","mail":"Send emails with optional file attachment","man":"Show help message (docstring) of a given command","mc":"Easily work with multiple filesystems (e.g. local and FTP) synchronously","md5sum":"Print or check MD5 checksums","mkdir":"Create directory","monkeylord":"Manage monkey patches with the goal to make Pythonista more viable","mv":"Move file","openin":"Show the open in dialog to open a file in external apps","pbcopy":"Copy to iOS clipboard","pbpaste":"Paste from iOS clipboard","pip":"Search, download, install, update and uninstall pure Python packages from PyPI","printenv":"List environment variables","printhex":"Print hexadecimal dump of the given file","pwd":"Print current directory","python":"Run python scripts or modules","quicklook":"iOS quick look for files of known types","rm":"delete (remove) file","scp":"Copy files from/to remote servers","selfupdate":"Update StaSh from its GitHub repo","sha1sum":"Print of check SHA1 checksums","sha256sum":"Print of check SHA256 checksums","sort":"Sort a list, also see unique","source":"Evaluate a script in the current environment","ssh":"SSH client to either execute a command or spawn an interactive session on remote servers","ssh-keygen":"Generate RSA/DSA SSH Keys","stashconf":"Change StaSh configuration on the fly","tail":"Print last lines of a FILE","tar":"Manipulate archive files","touch":"Update timestamp of the given file or create it if not exist","uniq":"Remove duplicates from list, also see sort","unzip":"Unzip file","version":"Show StaSh installation and version information","wc":"Line, word, character counting","wget":"Get data from the net","whatis":"Search manual page databases","which":"Find the exact path to a command script","wol":"Wake on LAN using MAC address for launching a sleeping system","xargs":"Command constructing and executing utility","zip":"Zip file"}'
def main():
data = json.loads(file)
try:
print(command + ' - ' + data[command])
except KeyError:
print('whatis: nothing appropriate')
command = argv[1]
main()
|
import numpy as np
from six.moves import zip_longest
import unittest
import chainer
from chainer.iterators import SerialIterator
from chainer import testing
from chainermn import create_communicator
from chainercv.utils import apply_to_iterator
from chainercv.utils.testing import attr
@testing.parameterize(*testing.product({
'multi_in_values': [False, True],
'multi_out_values': [False, True],
'with_rest_values': [False, True],
'with_hook': [False, True],
}))
class TestApplyToIterator(unittest.TestCase):
def setUp(self):
if self.multi_in_values:
self.n_input = 2
else:
self.n_input = 1
in_values_expect = []
for _ in range(self.n_input):
in_value = []
for _ in range(5):
H, W = np.random.randint(8, 16, size=2)
in_value.append(np.random.randint(0, 256, size=(3, H, W)))
in_values_expect.append(in_value)
self.in_values_expect = tuple(in_values_expect)
if self.multi_out_values:
def func(*in_values):
n_sample = len(in_values[0])
return (
[np.random.uniform(size=(10, 4)) for _ in range(n_sample)],
[np.random.uniform(size=10) for _ in range(n_sample)],
[np.random.uniform(size=10) for _ in range(n_sample)])
self.n_output = 3
else:
def func(*in_values):
n_sample = len(in_values[0])
return [np.random.uniform(size=(48, 64))
for _ in range(n_sample)]
self.n_output = 1
self.func = func
if self.with_rest_values:
strs = ['a', 'bc', 'def', 'ghij', 'klmno']
nums = [0, 1, 2, 3, 4]
arrays = [np.random.uniform(size=10) for _ in range(5)]
self.rest_values_expect = (strs, nums, arrays)
self.n_rest = 3
self.dataset = chainer.datasets.TupleDataset(
*(self.in_values_expect + self.rest_values_expect))
else:
self.rest_values_expect = ()
self.n_rest = 0
self. dataset = list(zip(*self.in_values_expect))
self.iterator = SerialIterator(
self.dataset, 2, repeat=False, shuffle=False)
if self.with_hook:
def hook(in_values, out_values, rest_values):
n_sample = len(in_values[0])
self.assertEqual(len(in_values), self.n_input)
for in_vals in in_values:
self.assertEqual(len(in_vals), n_sample)
self.assertEqual(len(out_values), self.n_output)
for out_vals in out_values:
self.assertEqual(len(out_vals), n_sample)
self.assertEqual(len(rest_values), self.n_rest)
for rest_vals in rest_values:
self.assertEqual(len(rest_vals), n_sample)
self.hook = hook
else:
self.hook = None
def _check_apply_to_iterator(self, comm=None):
values = apply_to_iterator(
self.func, self.iterator, n_input=self.n_input,
hook=self.hook, comm=comm)
if comm is not None and not comm.rank == 0:
self.assertEqual(values, None)
return
in_values, out_values, rest_values = values
self.assertEqual(len(in_values), self.n_input)
for in_vals, in_vals_expect in \
zip_longest(in_values, self.in_values_expect):
for in_val, in_val_expect in zip_longest(in_vals, in_vals_expect):
np.testing.assert_equal(in_val, in_val_expect)
self.assertEqual(len(out_values), self.n_output)
for out_vals in out_values:
self.assertEqual(len(list(out_vals)), len(self.dataset))
self.assertEqual(len(rest_values), self.n_rest)
for rest_vals, rest_vals_expect in \
zip_longest(rest_values, self.rest_values_expect):
for rest_val, rest_val_expect in \
zip_longest(rest_vals, rest_vals_expect):
if isinstance(rest_val_expect, np.ndarray):
np.testing.assert_equal(rest_val, rest_val_expect)
else:
self.assertEqual(rest_val, rest_val_expect)
def test_apply_to_iterator(self):
self._check_apply_to_iterator()
@attr.mpi
def test_apply_to_iterator_with_comm(self):
comm = create_communicator('naive')
self._check_apply_to_iterator(comm)
class TestApplyToIteratorWithInfiniteIterator(unittest.TestCase):
def test_apply_to_iterator_with_infinite_iterator(self):
def func(*in_values):
n_sample = len(in_values[0])
return [np.random.uniform(size=(48, 64)) for _ in range(n_sample)]
dataset = []
for _ in range(5):
H, W = np.random.randint(8, 16, size=2)
dataset.append(np.random.randint(0, 256, size=(3, H, W)))
iterator = SerialIterator(dataset, 2)
in_values, out_values, rest_values = apply_to_iterator(func, iterator)
for _ in range(10):
next(in_values[0])
for _ in range(10):
next(out_values[0])
testing.run_module(__name__, __file__)
|
import pytest
from molecule.model import schema_v2
@pytest.fixture
def _model_scenario_section_data():
return {
'scenario': {
'name': 'foo',
'check_sequence': [
'foo',
],
'converge_sequence': [
'foo',
],
'create_sequence': [
'foo',
],
'destroy_sequence': [
'foo',
],
'test_sequence': [
'foo',
],
}
}
@pytest.mark.parametrize(
'_config', ['_model_scenario_section_data'], indirect=True)
def test_scenario(_config):
assert {} == schema_v2.validate(_config)
@pytest.fixture
def _model_scenario_errors_section_data():
return {
'scenario': {
'name': int(),
'check_sequence': [
int(),
],
'converge_sequence': [
int(),
],
'create_sequence': [
int(),
],
'destroy_sequence': [
int(),
],
'test_sequence': [
int(),
],
}
}
@pytest.mark.parametrize(
'_config', ['_model_scenario_errors_section_data'], indirect=True)
def test_scenario_has_errors(_config):
x = {
'scenario': [{
'converge_sequence': [{
0: ['must be of string type'],
}],
'name': ['must be of string type'],
'check_sequence': [{
0: ['must be of string type'],
}],
'create_sequence': [{
0: ['must be of string type'],
}],
'destroy_sequence': [{
0: ['must be of string type'],
}],
'test_sequence': [{
0: ['must be of string type'],
}]
}]
}
assert x == schema_v2.validate(_config)
|
import pytest
from unittest.mock import patch
from case import mock
from kombu import Connection
class test_get_manager:
@mock.mask_modules('pyrabbit')
def test_without_pyrabbit(self):
with pytest.raises(ImportError):
Connection('amqp://').get_manager()
@mock.module_exists('pyrabbit')
def test_with_pyrabbit(self):
with patch('pyrabbit.Client', create=True) as Client:
manager = Connection('amqp://').get_manager()
assert manager is not None
Client.assert_called_with(
'localhost:15672', 'guest', 'guest',
)
@mock.module_exists('pyrabbit')
def test_transport_options(self):
with patch('pyrabbit.Client', create=True) as Client:
manager = Connection('amqp://', transport_options={
'manager_hostname': 'admin.mq.vandelay.com',
'manager_port': 808,
'manager_userid': 'george',
'manager_password': 'bosco',
}).get_manager()
assert manager is not None
Client.assert_called_with(
'admin.mq.vandelay.com:808', 'george', 'bosco',
)
|
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.cloud.prefs import STORAGE_KEY, CloudPreferences
from tests.async_mock import patch
async def test_set_username(hass):
"""Test we clear config if we set different username."""
prefs = CloudPreferences(hass)
await prefs.async_initialize()
assert prefs.google_enabled
await prefs.async_update(google_enabled=False)
assert not prefs.google_enabled
await prefs.async_set_username("new-username")
assert prefs.google_enabled
async def test_set_username_migration(hass):
"""Test we not clear config if we had no username."""
prefs = CloudPreferences(hass)
with patch.object(prefs, "_empty_config", return_value=prefs._empty_config(None)):
await prefs.async_initialize()
assert prefs.google_enabled
await prefs.async_update(google_enabled=False)
assert not prefs.google_enabled
await prefs.async_set_username("new-username")
assert not prefs.google_enabled
async def test_load_invalid_cloud_user(hass, hass_storage):
"""Test loading cloud user with invalid storage."""
hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": "non-existing"}}
prefs = CloudPreferences(hass)
await prefs.async_initialize()
cloud_user_id = await prefs.get_cloud_user()
assert cloud_user_id != "non-existing"
cloud_user = await hass.auth.async_get_user(
hass_storage[STORAGE_KEY]["data"]["cloud_user"]
)
assert cloud_user
assert cloud_user.groups[0].id == GROUP_ID_ADMIN
async def test_setup_remove_cloud_user(hass, hass_storage):
"""Test creating and removing cloud user."""
hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": None}}
prefs = CloudPreferences(hass)
await prefs.async_initialize()
await prefs.async_set_username("user1")
cloud_user = await hass.auth.async_get_user(await prefs.get_cloud_user())
assert cloud_user
assert cloud_user.groups[0].id == GROUP_ID_ADMIN
await prefs.async_set_username("user2")
cloud_user2 = await hass.auth.async_get_user(await prefs.get_cloud_user())
assert cloud_user2
assert cloud_user2.groups[0].id == GROUP_ID_ADMIN
assert cloud_user2.id != cloud_user.id
|
import pkgutil
from scattertext.Common import DEFAULT_D3_URL, DEFAULT_D3_SCALE_CHROMATIC, \
DEFAULT_HTML_VIZ_FILE_NAME, AUTOCOMPLETE_CSS_FILE_NAME, SEARCH_FORM_FILE_NAME
from scattertext.viz.ScatterplotStructure import InvalidProtocolException
class ExternalJSUtilts:
@staticmethod
def ensure_valid_protocol(protocol):
if protocol not in ('https', 'http'):
raise InvalidProtocolException(
"Invalid protocol: %s. Protocol must be either http or https." % (protocol))
class D3URLs:
def __init__(self, d3_url=None, d3_scale_chromatic_url=None):
self.d3_url = d3_url
self.d3_scale_chromatic_url = d3_scale_chromatic_url
def get_d3_url(self):
return DEFAULT_D3_URL if self.d3_url is None else self.d3_url
def get_d3_scale_chromatic_url(self):
return DEFAULT_D3_SCALE_CHROMATIC if self.d3_scale_chromatic_url is None else self.d3_scale_chromatic_url
class PackedDataUtils:
@staticmethod
def full_content_of_default_html_template():
return PackedDataUtils.get_packaged_html_template_content(DEFAULT_HTML_VIZ_FILE_NAME)
@staticmethod
def full_content_of_default_autocomplete_css():
return PackedDataUtils.get_packaged_html_template_content(AUTOCOMPLETE_CSS_FILE_NAME)
@staticmethod
def full_content_of_default_search_form(input_id):
return PackedDataUtils.get_packaged_html_template_content(SEARCH_FORM_FILE_NAME).replace('{{id}}', input_id)
@staticmethod
def full_content_of_javascript_files():
return PackedDataUtils._load_script_file_names([
'rectangle-holder.js', # 'range-tree.js',
'main.js',
'autocomplete_definition.js'
])
@staticmethod
def _load_script_file_names(script_names):
return '; \n \n '.join([PackedDataUtils.get_packaged_script_content(script_name)
for script_name in script_names])
@staticmethod
def javascript_post_build_viz(input_id, plot_interface_name):
return (PackedDataUtils
._load_script_file_names(['autocomplete_call.js'])
.replace('{{id}}', input_id)
.replace('__plotInterface__', plot_interface_name))
@staticmethod
def get_packaged_script_content(file_name):
return pkgutil.get_data('scattertext',
'data/viz/scripts/' + file_name).decode('utf-8')
@staticmethod
def get_packaged_html_template_content(file_name):
return pkgutil.get_data('scattertext',
'data/viz/' + file_name).decode('utf-8')
class BasicHTMLFromScatterplotStructure(object):
def __init__(self, scatterplot_structure):
'''
:param scatterplot_structure: ScatterplotStructure
'''
self.scatterplot_structure = scatterplot_structure
def to_html(self,
protocol='http',
d3_url=None,
d3_scale_chromatic_url=None,
html_base=None,
search_input_id='searchInput'):
'''
Parameters
----------
protocol : str
'http' or 'https' for including external urls
d3_url, str
None by default. The url (or path) of
d3, to be inserted into <script src="..."/>
By default, this is `DEFAULT_D3_URL` declared in `ScatterplotStructure`.
d3_scale_chromatic_url : str
None by default.
URL of d3_scale_chromatic_url, to be inserted into <script src="..."/>
By default, this is `DEFAULT_D3_SCALE_CHROMATIC` declared in `ScatterplotStructure`.
html_base : str
None by default. HTML of semiotic square to be inserted above plot.
search_input_id : str
Id of search input. Default is 'searchInput'.
Returns
-------
str, the html file representation
'''
d3_url_struct = D3URLs(d3_url, d3_scale_chromatic_url)
ExternalJSUtilts.ensure_valid_protocol(protocol)
javascript_to_insert = '\n'.join([
PackedDataUtils.full_content_of_javascript_files(),
self.scatterplot_structure._visualization_data.to_javascript(),
self.scatterplot_structure.get_js_to_call_build_scatterplot(),
PackedDataUtils.javascript_post_build_viz(search_input_id, 'plotInterface'),
])
html_template = (PackedDataUtils.full_content_of_default_html_template()
if html_base is None
else self._format_html_base(html_base))
html_content = (
html_template
.replace('<!-- INSERT SCRIPT -->', javascript_to_insert, 1)
.replace('<!-- INSERT SEARCH FORM -->',
PackedDataUtils.full_content_of_default_search_form(search_input_id), 1)
.replace('<!--D3URL-->', d3_url_struct.get_d3_url(), 1)
.replace('<!--D3SCALECHROMATIC-->', d3_url_struct.get_d3_scale_chromatic_url())
# .replace('<!-- INSERT D3 -->', self._get_packaged_file_content('d3.min.js'), 1)
)
'''
if html_base is not None:
html_file = html_file.replace('<!-- INSERT SEMIOTIC SQUARE -->',
html_base)
'''
extra_libs = ''
if self.scatterplot_structure._save_svg_button:
# extra_libs = '<script src="https://cdn.rawgit.com/edeno/d3-save-svg/gh-pages/assets/d3-save-svg.min.js" charset="utf-8"></script>'
extra_libs = ''
autocomplete_css = PackedDataUtils.full_content_of_default_autocomplete_css()
html_content = (html_content
.replace('/***AUTOCOMPLETE CSS***/', autocomplete_css, 1)
.replace('<!-- EXTRA LIBS -->', extra_libs, 1)
.replace('http://', protocol + '://'))
return html_content
def _format_html_base(self, html_base):
return (html_base.replace('{width}', str(self.scatterplot_structure._width_in_pixels))
.replace('{height}', str(self.scatterplot_structure._height_in_pixels)))
|
from homeassistant.components.air_quality import AirQualityEntity
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Air Quality."""
async_add_entities(
[DemoAirQuality("Home", 14, 23, 100), DemoAirQuality("Office", 4, 16, None)]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoAirQuality(AirQualityEntity):
"""Representation of Air Quality data."""
def __init__(self, name, pm_2_5, pm_10, n2o):
"""Initialize the Demo Air Quality."""
self._name = name
self._pm_2_5 = pm_2_5
self._pm_10 = pm_10
self._n2o = n2o
@property
def name(self):
"""Return the name of the sensor."""
return f"Demo Air Quality {self._name}"
@property
def should_poll(self):
"""No polling needed for Demo Air Quality."""
return False
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._pm_2_5
@property
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self._pm_10
@property
def nitrogen_oxide(self):
"""Return the nitrogen oxide (N2O) level."""
return self._n2o
@property
def attribution(self):
"""Return the attribution."""
return "Powered by Home Assistant"
|
import pytest
from homeassistant.components.tag import DOMAIN, TAGS, async_scan_tag
from homeassistant.helpers import collection
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.async_mock import patch
@pytest.fixture
def storage_setup(hass, hass_storage):
"""Storage setup."""
async def _storage(items=None):
if items is None:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {"items": [{"id": "test tag"}]},
}
else:
hass_storage[DOMAIN] = items
config = {DOMAIN: {}}
return await async_setup_component(hass, DOMAIN, config)
return _storage
async def test_ws_list(hass, hass_ws_client, storage_setup):
"""Test listing tags via WS."""
assert await storage_setup()
client = await hass_ws_client(hass)
await client.send_json({"id": 6, "type": f"{DOMAIN}/list"})
resp = await client.receive_json()
assert resp["success"]
result = {item["id"]: item for item in resp["result"]}
assert len(result) == 1
assert "test tag" in result
async def test_ws_update(hass, hass_ws_client, storage_setup):
"""Test listing tags via WS."""
assert await storage_setup()
await async_scan_tag(hass, "test tag", "some_scanner")
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 6,
"type": f"{DOMAIN}/update",
f"{DOMAIN}_id": "test tag",
"name": "New name",
}
)
resp = await client.receive_json()
assert resp["success"]
item = resp["result"]
assert item["id"] == "test tag"
assert item["name"] == "New name"
async def test_tag_scanned(hass, hass_ws_client, storage_setup):
"""Test scanning tags."""
assert await storage_setup()
client = await hass_ws_client(hass)
await client.send_json({"id": 6, "type": f"{DOMAIN}/list"})
resp = await client.receive_json()
assert resp["success"]
result = {item["id"]: item for item in resp["result"]}
assert len(result) == 1
assert "test tag" in result
now = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=now):
await async_scan_tag(hass, "new tag", "some_scanner")
await client.send_json({"id": 7, "type": f"{DOMAIN}/list"})
resp = await client.receive_json()
assert resp["success"]
result = {item["id"]: item for item in resp["result"]}
assert len(result) == 2
assert "test tag" in result
assert "new tag" in result
assert result["new tag"]["last_scanned"] == now.isoformat()
def track_changes(coll: collection.ObservableCollection):
"""Create helper to track changes in a collection."""
changes = []
async def listener(*args):
changes.append(args)
coll.async_add_listener(listener)
return changes
async def test_tag_id_exists(hass, hass_ws_client, storage_setup):
"""Test scanning tags."""
assert await storage_setup()
changes = track_changes(hass.data[DOMAIN][TAGS])
client = await hass_ws_client(hass)
await client.send_json({"id": 2, "type": f"{DOMAIN}/create", "tag_id": "test tag"})
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "unknown_error"
assert len(changes) == 0
|
from aioshelly import Block
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from . import ShellyDeviceWrapper
from .const import DATA_CONFIG_ENTRY, DOMAIN
from .entity import ShellyBlockEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up switches for device."""
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id]
# In roller mode the relay blocks exist but do not contain required info
if (
wrapper.model in ["SHSW-21", "SHSW-25"]
and wrapper.device.settings["mode"] != "relay"
):
return
relay_blocks = [block for block in wrapper.device.blocks if block.type == "relay"]
if not relay_blocks:
return
async_add_entities(RelaySwitch(wrapper, block) for block in relay_blocks)
class RelaySwitch(ShellyBlockEntity, SwitchEntity):
"""Switch that controls a relay block on Shelly devices."""
def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:
"""Initialize relay switch."""
super().__init__(wrapper, block)
self.control_result = None
@property
def is_on(self) -> bool:
"""If switch is on."""
if self.control_result:
return self.control_result["ison"]
return self.block.output
async def async_turn_on(self, **kwargs):
"""Turn on relay."""
self.control_result = await self.block.set_state(turn="on")
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn off relay."""
self.control_result = await self.block.set_state(turn="off")
self.async_write_ha_state()
@callback
def _update_callback(self):
"""When device updates, clear control result that overrides state."""
self.control_result = None
super()._update_callback()
|
from requests import ConnectTimeout, HTTPError
from homeassistant.components.canary.const import (
CONF_FFMPEG_ARGUMENTS,
DEFAULT_FFMPEG_ARGUMENTS,
DEFAULT_TIMEOUT,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_TIMEOUT
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from homeassistant.setup import async_setup_component
from . import USER_INPUT, _patch_async_setup, _patch_async_setup_entry, init_integration
from tests.async_mock import patch
async def test_user_form(hass, canary_config_flow):
"""Test we get the user initiated form."""
await async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with _patch_async_setup() as mock_setup, _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "test-username"
assert result["data"] == {**USER_INPUT, CONF_TIMEOUT: DEFAULT_TIMEOUT}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_form_cannot_connect(hass, canary_config_flow):
"""Test we handle errors that should trigger the cannot connect error."""
canary_config_flow.side_effect = HTTPError()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
canary_config_flow.side_effect = ConnectTimeout()
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {"base": "cannot_connect"}
async def test_user_form_unexpected_exception(hass, canary_config_flow):
"""Test we handle unexpected exception."""
canary_config_flow.side_effect = Exception()
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
USER_INPUT,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
async def test_user_form_single_instance_allowed(hass, canary_config_flow):
"""Test that configuring more than one instance is rejected."""
await init_integration(hass, skip_entry_setup=True)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=USER_INPUT,
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_options_flow(hass, canary):
"""Test updating options."""
with patch("homeassistant.components.canary.PLATFORMS", []):
entry = await init_integration(hass)
assert entry.options[CONF_FFMPEG_ARGUMENTS] == DEFAULT_FFMPEG_ARGUMENTS
assert entry.options[CONF_TIMEOUT] == DEFAULT_TIMEOUT
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
with _patch_async_setup(), _patch_async_setup_entry():
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={CONF_FFMPEG_ARGUMENTS: "-v", CONF_TIMEOUT: 7},
)
await hass.async_block_till_done()
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_FFMPEG_ARGUMENTS] == "-v"
assert result["data"][CONF_TIMEOUT] == 7
|
from collections import namedtuple
from contextlib import contextmanager
from datetime import datetime
from homeassistant.components import jewish_calendar
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
_LatLng = namedtuple("_LatLng", ["lat", "lng"])
HDATE_DEFAULT_ALTITUDE = 754
NYC_LATLNG = _LatLng(40.7128, -74.0060)
JERUSALEM_LATLNG = _LatLng(31.778, 35.235)
ORIG_TIME_ZONE = dt_util.DEFAULT_TIME_ZONE
def teardown_module():
"""Reset time zone."""
dt_util.set_default_time_zone(ORIG_TIME_ZONE)
def make_nyc_test_params(dtime, results, havdalah_offset=0):
"""Make test params for NYC."""
if isinstance(results, dict):
time_zone = dt_util.get_time_zone("America/New_York")
results = {
key: time_zone.localize(value) if isinstance(value, datetime) else value
for key, value in results.items()
}
return (
dtime,
jewish_calendar.CANDLE_LIGHT_DEFAULT,
havdalah_offset,
True,
"America/New_York",
NYC_LATLNG.lat,
NYC_LATLNG.lng,
results,
)
def make_jerusalem_test_params(dtime, results, havdalah_offset=0):
"""Make test params for Jerusalem."""
if isinstance(results, dict):
time_zone = dt_util.get_time_zone("Asia/Jerusalem")
results = {
key: time_zone.localize(value) if isinstance(value, datetime) else value
for key, value in results.items()
}
return (
dtime,
jewish_calendar.CANDLE_LIGHT_DEFAULT,
havdalah_offset,
False,
"Asia/Jerusalem",
JERUSALEM_LATLNG.lat,
JERUSALEM_LATLNG.lng,
results,
)
@contextmanager
def alter_time(local_time):
"""Manage multiple time mocks."""
utc_time = dt_util.as_utc(local_time)
patch1 = patch("homeassistant.util.dt.utcnow", return_value=utc_time)
patch2 = patch("homeassistant.util.dt.now", return_value=local_time)
with patch1, patch2:
yield
|
from pylatex import Document, Section
from pylatex.utils import escape_latex
def test():
doc = Document("utils_escape_latex")
section = Section('Escape LaTeX characters test')
text = escape_latex('''\
& (ampersand)
% (percent)
$ (dollar)
# (number)
_ (underscore)
{ (left curly brace)
} (right curly brace)
~ (tilde)
^ (caret)
\\ (backslash)
--- (three minuses)
a\xA0a (non breaking space)
[ (left bracket)
] (right bracket)
''')
section.append(text)
doc.append(section)
doc.generate_pdf()
if __name__ == '__main__':
test()
|
import logging
from clx.xms.api import MtBatchTextSmsResult
from clx.xms.client import Client
from clx.xms.exceptions import (
ErrorResponseException,
NotFoundException,
UnauthorizedException,
UnexpectedResponseException,
)
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_API_KEY, CONF_SENDER
import homeassistant.helpers.config_validation as cv
DOMAIN = "sinch"
CONF_SERVICE_PLAN_ID = "service_plan_id"
CONF_DEFAULT_RECIPIENTS = "default_recipients"
ATTR_SENDER = CONF_SENDER
DEFAULT_SENDER = "Home Assistant"
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_SERVICE_PLAN_ID): cv.string,
vol.Optional(CONF_SENDER, default=DEFAULT_SENDER): cv.string,
vol.Optional(CONF_DEFAULT_RECIPIENTS, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
def get_service(hass, config, discovery_info=None):
"""Get the Sinch notification service."""
return SinchNotificationService(config)
class SinchNotificationService(BaseNotificationService):
"""Send Notifications to Sinch SMS recipients."""
def __init__(self, config):
"""Initialize the service."""
self.default_recipients = config[CONF_DEFAULT_RECIPIENTS]
self.sender = config[CONF_SENDER]
self.client = Client(config[CONF_SERVICE_PLAN_ID], config[CONF_API_KEY])
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
targets = kwargs.get(ATTR_TARGET, self.default_recipients)
data = kwargs.get(ATTR_DATA) or {}
clx_args = {ATTR_MESSAGE: message, ATTR_SENDER: self.sender}
if ATTR_SENDER in data:
clx_args[ATTR_SENDER] = data[ATTR_SENDER]
if not targets:
_LOGGER.error("At least 1 target is required")
return
try:
for target in targets:
result: MtBatchTextSmsResult = self.client.create_text_message(
clx_args[ATTR_SENDER], target, clx_args[ATTR_MESSAGE]
)
batch_id = result.batch_id
_LOGGER.debug(
'Successfully sent SMS to "%s" (batch_id: %s)', target, batch_id
)
except ErrorResponseException as ex:
_LOGGER.error(
"Caught ErrorResponseException. Response code: %s (%s)",
ex.error_code,
ex,
)
except NotFoundException as ex:
_LOGGER.error("Caught NotFoundException (request URL: %s)", ex.url)
except UnauthorizedException as ex:
_LOGGER.error(
"Caught UnauthorizedException (service plan: %s)", ex.service_plan_id
)
except UnexpectedResponseException as ex:
_LOGGER.error("Caught UnexpectedResponseException: %s", ex)
|
from celery.exceptions import SoftTimeLimitExceeded, TimeLimitExceeded
from urllib.parse import urlparse
from httpobs.conf import (RETRIEVER_CONNECT_TIMEOUT,
RETRIEVER_CORS_ORIGIN,
RETRIEVER_READ_TIMEOUT,
RETRIEVER_USER_AGENT)
from httpobs.scanner.utils import parse_http_equiv_headers
import logging
import requests
# Disable the requests InsecureRequestWarning -- we will track certificate errors manually when
# verification is disabled. Also disable requests errors at levels lower than CRITICAL, see:
# https://github.com/celery/celery/issues/3633 for crashy details
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
logging.getLogger('requests').setLevel(logging.CRITICAL)
# MIME types for HTML requests
HTML_TYPES = ('text/html', 'application/xhtml+xml')
# Maximum timeout for requests for all GET requests for anything but the TLS Observatory
# The default ConnectionTimeout is something like 75 seconds, which means that things like
# tiles can take ~600s to timeout, since they have 8 DNS entries. Setting it to lower
# should hopefully keep requests from taking forever
TIMEOUT = (RETRIEVER_CONNECT_TIMEOUT, RETRIEVER_READ_TIMEOUT)
# Create a session, returning the session and the HTTP response in a dictionary
# Don't create the sessions if it can't connect and retrieve the root of the website
# TODO: Allow people to scan a subdirectory instead of using '/' as the default path?
def __create_session(url: str, **kwargs) -> dict:
s = requests.Session()
# Allow certificate verification to be disabled on the initial request, which means that sites won't get
# penalized on things like HSTS, even for self-signed certificates
s.verify = kwargs['verify']
# Add the headers to the session
if kwargs['headers']:
s.headers.update(kwargs['headers'])
# Set all the cookies and force them to be sent only over HTTPS; this might change in the future
if kwargs['cookies']:
s.cookies.update(kwargs['cookies'])
for cookie in s.cookies:
cookie.secure = True
# Override the User-Agent; some sites (like twitter) don't send the CSP header unless you have a modern
# user agent
s.headers.update({
'User-Agent': RETRIEVER_USER_AGENT,
})
try:
r = s.get(url, timeout=TIMEOUT)
# No tls errors
r.verified = True
# Let celery exceptions percolate upward
except (SoftTimeLimitExceeded, TimeLimitExceeded):
raise
# We can try again if there's an SSL error, making sure to note it in the session
except requests.exceptions.SSLError:
try:
r = s.get(url, timeout=TIMEOUT, verify=False)
r.verified = False
except (KeyboardInterrupt, SystemExit):
raise
except:
r = None
s = None
except (KeyboardInterrupt, SystemExit):
raise
except:
r = None
s = None
# Store the domain name and scheme in the session
if r is not None and s is not None:
s.url = urlparse(r.url)
return {'session': s, 'response': r}
def __get(session, relative_path='/', headers=None, cookies=None):
if not headers:
headers = {}
if not cookies:
cookies = {}
try:
# TODO: limit the maximum size of the response, to keep malicious site operators from killing us
# TODO: Perhaps we can naively do it for now by simply setting a timeout?
# TODO: catch TLS errors instead of just setting it to None?
return session.get(session.url.scheme + '://' + session.url.netloc + relative_path,
headers=headers,
cookies=cookies,
timeout=TIMEOUT)
# Let celery exceptions percolate upward
except (SoftTimeLimitExceeded, TimeLimitExceeded):
raise
except (KeyboardInterrupt, SystemExit):
raise
except:
return None
def __get_page_text(response: requests.Response, force: bool = False) -> str:
if response is None:
return None
elif response.status_code == 200 or force: # Some pages we want to get the page text even with non-200s
# A quick and dirty check to make sure that somebody's 404 page didn't actually return 200 with html
ext = (response.history[0].url if response.history else response.url).split('.')[-1]
if response.headers.get('Content-Type', '') in HTML_TYPES and ext in ('json', 'txt', 'xml'):
return None
return response.text
else:
return None
def retrieve_all(hostname, **kwargs):
kwargs['cookies'] = kwargs.get('cookies', {}) # HTTP cookies to send, instead of from the database
kwargs['headers'] = kwargs.get('headers', {}) # HTTP headers to send, instead of from the database
# This way of doing it keeps the urls tidy even if makes the code ugly
kwargs['http_port'] = ':' + str(kwargs.get('http_port', '')) if 'http_port' in kwargs else ''
kwargs['https_port'] = ':' + str(kwargs.get('https_port', '')) if 'https_port' in kwargs else ''
kwargs['path'] = kwargs.get('path', '/')
kwargs['verify'] = kwargs.get('verify', True)
retrievals = {
'hostname': hostname,
'resources': {
},
'responses': {
'auto': None, # whichever of 'http' or 'https' actually works, with 'https' as higher priority
'cors': None, # CORS preflight test
'http': None,
'https': None,
},
'session': None,
}
# The list of resources to get
resources = (
'/clientaccesspolicy.xml',
'/contribute.json',
'/crossdomain.xml',
'/robots.txt'
)
# Create some reusable sessions, one for HTTP and one for HTTPS
http_session = __create_session('http://' + hostname + kwargs['http_port'] + kwargs['path'], **kwargs)
https_session = __create_session('https://' + hostname + kwargs['https_port'] + kwargs['path'], **kwargs)
# If neither one works, then the site just can't be loaded
if http_session['session'] is None and https_session['session'] is None:
return retrievals
else:
# Store the HTTP only and HTTPS only responses (some things can only be retrieved over one or the other)
retrievals['responses']['http'] = http_session['response']
retrievals['responses']['https'] = https_session['response']
if https_session['session'] is not None:
retrievals['responses']['auto'] = https_session['response']
retrievals['session'] = https_session['session']
else:
retrievals['responses']['auto'] = http_session['response']
retrievals['session'] = http_session['session']
# Store the contents of the "base" page
retrievals['resources']['__path__'] = __get_page_text(retrievals['responses']['auto'], force=True)
# Do a CORS preflight request
retrievals['responses']['cors'] = __get(retrievals['session'],
kwargs['path'],
headers={'Origin': RETRIEVER_CORS_ORIGIN})
# Store all the files we retrieve
for resource in resources:
resp = __get(retrievals['session'], resource)
retrievals['resources'][resource] = __get_page_text(resp)
# Parse out the HTTP meta-equiv headers
if (retrievals['responses']['auto'].headers.get('Content-Type', '').split(';')[0] in HTML_TYPES and
retrievals['resources']['__path__']):
retrievals['responses']['auto'].http_equiv = parse_http_equiv_headers(retrievals['resources']['__path__'])
else:
retrievals['responses']['auto'].http_equiv = {}
return retrievals
|
import argparse
import asyncio
import txaio
txaio.use_asyncio()
import autobahn
from autobahn.websocket.util import parse_url
from autobahn.asyncio.websocket import WebSocketServerProtocol, \
WebSocketServerFactory
from autobahn.websocket.compress import PerMessageDeflateOffer, \
PerMessageDeflateOfferAccept
# FIXME: streaming mode API is currently incompatible with permessage-deflate!
USE_STREAMING_TESTEE = False
class TesteeServerProtocol(WebSocketServerProtocol):
"""
A message-based WebSocket echo server.
"""
def onMessage(self, payload, isBinary):
self.sendMessage(payload, isBinary)
class StreamingTesteeServerProtocol(WebSocketServerProtocol):
"""
A streaming WebSocket echo server.
"""
def onMessageBegin(self, isBinary):
WebSocketServerProtocol.onMessageBegin(self, isBinary)
self.beginMessage(isBinary)
def onMessageFrameBegin(self, length):
WebSocketServerProtocol.onMessageFrameBegin(self, length)
self.beginMessageFrame(length)
def onMessageFrameData(self, payload):
self.sendMessageFrameData(payload)
def onMessageFrameEnd(self):
pass
def onMessageEnd(self):
self.endMessage()
class TesteeServerFactory(WebSocketServerFactory):
log = txaio.make_logger()
if USE_STREAMING_TESTEE:
protocol = StreamingTesteeServerProtocol
else:
protocol = TesteeServerProtocol
def __init__(self, url):
testee_ident = autobahn.asyncio.__ident__
self.log.info("Testee identification: {testee_ident}", testee_ident=testee_ident)
WebSocketServerFactory.__init__(self, url, server=testee_ident)
self.setProtocolOptions(failByDrop=False) # spec conformance
# self.setProtocolOptions(utf8validateIncoming = False)
if USE_STREAMING_TESTEE:
self.setProtocolOptions(failByDrop=True) # needed for streaming mode
else:
# enable permessage-deflate WebSocket protocol extension
def accept(offers):
for offer in offers:
if isinstance(offer, PerMessageDeflateOffer):
return PerMessageDeflateOfferAccept(offer)
self.setProtocolOptions(perMessageCompressionAccept=accept)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Autobahn Testee Server (Twisted)')
parser.add_argument('--url', dest='url', type=str, default='ws://127.0.0.1:9001', help='The WebSocket fuzzing server URL.')
parser.add_argument('--loglevel', dest='loglevel', type=str, default='info', help='Log level, eg "info" or "debug".')
options = parser.parse_args()
txaio.start_logging(level=options.loglevel)
factory = TesteeServerFactory(options.url)
_, _, port, _, _, _ = parse_url(options.url)
loop = asyncio.get_event_loop()
coro = loop.create_server(factory, port=port)
server = loop.run_until_complete(coro)
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
server.close()
loop.close()
|
import diamond.collector
import re
class MdStatCollector(diamond.collector.Collector):
MDSTAT_PATH = '/proc/mdstat'
def get_default_config_help(self):
config_help = super(MdStatCollector, self).get_default_config_help()
return config_help
def get_default_config(self):
config = super(MdStatCollector, self).get_default_config()
config.update({
'path': 'mdstat',
})
return config
def process_config(self):
super(MdStatCollector, self).process_config()
def collect(self):
"""Publish all mdstat metrics."""
def traverse(d, metric_name=''):
"""
Traverse the given nested dict using depth-first search.
If a value is reached it will be published with a metric name
consisting of the hierarchically concatenated keys
of its branch.
"""
for key, value in d.iteritems():
if isinstance(value, dict):
if metric_name == '':
metric_name_next = key
else:
metric_name_next = metric_name + '.' + key
traverse(value, metric_name_next)
else:
metric_name_finished = metric_name + '.' + key
self.publish_gauge(
name=metric_name_finished,
value=value,
precision=1
)
md_state = self._parse_mdstat()
traverse(md_state, '')
def _parse_mdstat(self):
"""
Parse /proc/mdstat.
File format:
The first line is the "Personalities" line.
It won't get parsed since it contains only string metrics.
The second to second-last lines contain raid array information.
The last line contains the unused devices.
It won't get parsed since it contains only string metrics.
:return: Parsed information
:rtype: dict
"""
arrays = {}
mdstat_array_blocks = ''
try:
with open(self.MDSTAT_PATH, 'r') as f:
lines = f.readlines()
except IOError as err:
self.log.exception(
'Error opening {mdstat_path} for reading: {err}'.format(
mdstat_path=self.MDSTAT_PATH,
err=err
)
)
return arrays
# concatenate all lines except the first and last one
for line in lines[1:-1]:
mdstat_array_blocks += line
if mdstat_array_blocks == '':
# no md arrays found
return arrays
for block in mdstat_array_blocks.split('\n\n'):
md_device_name = self._parse_device_name(block)
if md_device_name:
# this block begins with a md device name
# 'member_count' and 'status' are mandatory keys
arrays[md_device_name] = {
'member_count': self._parse_array_member_state(block),
'status': self._parse_array_status(block),
}
# 'bitmap' and 'recovery' are optional keys
bitmap_status = self._parse_array_bitmap(block)
recovery_status = self._parse_array_recovery(block)
if bitmap_status:
arrays[md_device_name].update(
{'bitmap': bitmap_status}
)
if recovery_status:
arrays[md_device_name].update(
{'recovery': recovery_status}
)
return arrays
def _parse_device_name(self, block):
"""
Parse for a md device name.
>>> block = 'md0 : active raid1 sdd2[0] sdb2[2](S) sdc2[1]\n'
>>> ' 100171776 blocks super 1.2 [2/2] [UU]\n'
>>> ' bitmap: 1/1 pages [4KB], 65536KB chunk\n\n'
>>> print _parse_device_name(block)
md0
:return: parsed device name
:rtype: string
"""
return block.split('\n')[0].split(' : ')[0]
def _parse_array_member_state(self, block):
"""
Parse the state of the the md array members.
>>> block = 'md0 : active raid1 sdd2[0] sdb2[2](S) sdc2[1]\n'
>>> ' 100171776 blocks super 1.2 [2/2] [UU]\n'
>>> ' bitmap: 1/1 pages [4KB], 65536KB chunk\n\n'
>>> print _parse_array_member_state(block)
{
'active': 2,
'faulty': 0,
'spare': 1
}
:return: dictionary of states with according count
:rtype: dict
"""
members = block.split('\n')[0].split(' : ')[1].split(' ')[2:]
device_regexp = re.compile(
'^(?P<member_name>.*)'
'\[(?P<member_role_number>\d*)\]'
'\(?(?P<member_state>[FS])?\)?$'
)
ret = {
'active': 0,
'faulty': 0,
'spare': 0
}
for member in members:
member_dict = device_regexp.match(member).groupdict()
if member_dict['member_state'] == 'S':
ret['spare'] += 1
elif member_dict['member_state'] == 'F':
ret['faulty'] += 1
else:
ret['active'] += 1
return ret
def _parse_array_status(self, block):
"""
Parse the status of the md array.
>>> block = 'md0 : active raid1 sdd2[0] sdb2[2](S) sdc2[1]\n'
>>> ' 100171776 blocks super 1.2 [2/2] [UU]\n'
>>> ' bitmap: 1/1 pages [4KB], 65536KB chunk\n\n'
>>> print _parse_array_status(block)
{
'total_members': '2',
'actual_members': '2',
'superblock_version': '1.2',
'blocks': '100171776'
}
:return: dictionary of status information
:rtype: dict
"""
array_status_regexp = re.compile(
'^ *(?P<blocks>\d*) blocks '
'(?:super (?P<superblock_version>\d\.\d) )?'
'(?:level (?P<raid_level>\d), '
'(?P<chunk_size>\d*)k chunk, '
'algorithm (?P<algorithm>\d) )?'
'(?:\[(?P<total_members>\d*)/(?P<actual_members>\d*)\])?'
'(?:(?P<rounding_factor>\d*)k rounding)?.*$'
)
array_status_dict = \
array_status_regexp.match(block.split('\n')[1]).groupdict()
array_status_dict_sanitizied = {}
# convert all non None values to float
for key, value in array_status_dict.iteritems():
if not value:
continue
if key == 'superblock_version':
array_status_dict_sanitizied[key] = float(value)
else:
array_status_dict_sanitizied[key] = int(value)
if 'chunk_size' in array_status_dict_sanitizied:
# convert chunk size from kBytes to Bytes
array_status_dict_sanitizied['chunk_size'] *= 1024
if 'rounding_factor' in array_status_dict_sanitizied:
# convert rounding_factor from kBytes to Bytes
array_status_dict_sanitizied['rounding_factor'] *= 1024
return array_status_dict_sanitizied
def _parse_array_bitmap(self, block):
"""
Parse the bitmap status of the md array.
>>> block = 'md0 : active raid1 sdd2[0] sdb2[2](S) sdc2[1]\n'
>>> ' 100171776 blocks super 1.2 [2/2] [UU]\n'
>>> ' bitmap: 1/1 pages [4KB], 65536KB chunk\n\n'
>>> print _parse_array_bitmap(block)
{
'total_pages': '1',
'allocated_pages': '1',
'page_size': 4096,
'chunk_size': 67108864
}
:return: dictionary of bitmap status information
:rtype: dict
"""
array_bitmap_regexp = re.compile(
'^ *bitmap: (?P<allocated_pages>[0-9]*)/'
'(?P<total_pages>[0-9]*) pages '
'\[(?P<page_size>[0-9]*)KB\], '
'(?P<chunk_size>[0-9]*)KB chunk.*$',
re.MULTILINE
)
regexp_res = array_bitmap_regexp.search(block)
# bitmap is optionally in mdstat
if not regexp_res:
return None
array_bitmap_dict = regexp_res.groupdict()
array_bitmap_dict_sanitizied = {}
# convert all values to int
for key, value in array_bitmap_dict.iteritems():
if not value:
continue
array_bitmap_dict_sanitizied[key] = int(value)
# convert page_size to bytes
array_bitmap_dict_sanitizied['page_size'] *= 1024
# convert chunk_size to bytes
array_bitmap_dict_sanitizied['chunk_size'] *= 1024
return array_bitmap_dict
def _parse_array_recovery(self, block):
"""
Parse the recovery progress of the md array.
>>> block = 'md0 : active raid1 sdd2[0] sdb2[2](S) sdc2[1]\n'
>>> ' 100171776 blocks super 1.2 [2/2] [UU]\n'
>>> ' [===================>.] recovery = 99.5% '
>>> '(102272/102272) finish=13.37min speed=102272K/sec\n'
>>> '\n'
>>> print _parse_array_recovery(block)
{
'percent': '99.5',
'speed': 104726528,
'remaining_time': 802199
}
:return: dictionary of recovery progress status information
:rtype: dict
"""
array_recovery_regexp = re.compile(
'^ *\[.*\] *recovery = (?P<percent>\d*\.?\d*)%'
' \(\d*/\d*\) finish=(?P<remaining_time>\d*\.?\d*)min '
'speed=(?P<speed>\d*)K/sec$',
re.MULTILINE
)
regexp_res = array_recovery_regexp.search(block)
# recovery is optionally in mdstat
if not regexp_res:
return None
array_recovery_dict = regexp_res.groupdict()
array_recovery_dict['percent'] = \
float(array_recovery_dict['percent'])
# convert speed to bits
array_recovery_dict['speed'] = \
int(array_recovery_dict['speed']) * 1024
# convert minutes to milliseconds
array_recovery_dict['remaining_time'] = \
int(float(array_recovery_dict['remaining_time'])*60*1000)
return array_recovery_dict
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock, call
from mock import patch
from diamond.collector import Collector
from aerospike import AerospikeCollector
##########################################################################
class TestAerospike27Collector(CollectorTestCase):
def bootStrap(self, custom_config={}):
config = get_collector_config('AerospikeCollector', custom_config)
self.collector = AerospikeCollector(config, None)
def test_import(self):
self.assertTrue(AerospikeCollector)
@patch.object(Collector, 'publish')
@patch.object(Collector, 'publish_gauge')
@patch.object(Collector, 'publish_counter')
def test_latency(self, publish_counter_mock,
publish_gauge_mock, publish_mock):
mockTelnet = Mock(**{
'read_until.side_effect':
[
"2.7.1\n",
self.getFixture('v2.7/latency').getvalue(),
]
})
patch_Telnet = patch('telnetlib.Telnet', Mock(return_value=mockTelnet))
patch_Telnet.start()
self.bootStrap(custom_config={
'latency': True,
'statistics': False,
'throughput': False,
'namespaces': False,
})
self.collector.collect()
patch_Telnet.stop()
mockTelnet.read_until.assert_any_call('\n', 1)
metrics = {
'latency.reads.1ms': 1.75,
'latency.reads.8ms': 0.67,
'latency.reads.64ms': 0.36,
'latency.reads.ops': 54839.0,
'latency.writes_master.1ms': 11.69,
'latency.writes_master.8ms': 2.54,
'latency.writes_master.64ms': 2.06,
'latency.writes_master.ops': 8620.1,
'latency.proxy.1ms': 1.35,
'latency.proxy.8ms': 6.88,
'latency.proxy.64ms': 1.37,
'latency.proxy.ops': 320.1,
'latency.udf.1ms': 1.47,
'latency.udf.8ms': 8.64,
'latency.udf.64ms': 4.11,
'latency.udf.ops': 140.33,
'latency.query.1ms': 3.44,
'latency.query.8ms': 2.74,
'latency.query.64ms': 1.04,
'latency.query.ops': 84.12,
}
self.assertPublishedMany(
[publish_mock,
publish_gauge_mock,
publish_counter_mock,
],
metrics,
)
@patch.object(Collector, 'publish')
@patch.object(Collector, 'publish_gauge')
@patch.object(Collector, 'publish_counter')
def test_statistics(self, publish_counter_mock,
publish_gauge_mock, publish_mock):
mockTelnet = Mock(**{
'read_until.side_effect':
[
"2.7.1\n",
self.getFixture('v2.7/statistics').getvalue()
]
})
patch_Telnet = patch('telnetlib.Telnet', Mock(return_value=mockTelnet))
patch_Telnet.start()
self.bootStrap(custom_config={
'latency': False,
'statistics': True,
'throughput': False,
'namespaces': False,
})
self.collector.collect()
patch_Telnet.stop()
mockTelnet.read_until.assert_any_call('\n', 1)
metrics = {
'statistics.total-bytes-memory': 345744867328,
'statistics.total-bytes-disk': 8801921007616,
'statistics.used-bytes-memory': 126136727552,
'statistics.used-bytes-disk': 2457236328960,
'statistics.free-pct-memory': 63,
'statistics.free-pct-disk': 72,
'statistics.data-used-bytes-memory': 0,
'statistics.index-used-bytes-memory': 126136727552,
'statistics.cluster_size': 13,
'statistics.objects': 1970886368,
'statistics.client_connections': 3014,
}
self.assertPublishedMany(
[publish_mock,
publish_gauge_mock,
publish_counter_mock,
],
metrics,
)
@patch.object(Collector, 'publish')
@patch.object(Collector, 'publish_gauge')
@patch.object(Collector, 'publish_counter')
def test_throughput(self, publish_counter_mock,
publish_gauge_mock, publish_mock):
mockTelnet = Mock(**{
'read_until.side_effect':
[
"2.7.1",
self.getFixture('v2.7/throughput').getvalue(),
]
})
patch_Telnet = patch('telnetlib.Telnet', Mock(return_value=mockTelnet))
patch_Telnet.start()
self.bootStrap(custom_config={
'latency': False,
'statistics': False,
'throughput': True,
'namespaces': False,
})
self.collector.collect()
patch_Telnet.stop()
mockTelnet.read_until.assert_any_call('\n', 1)
metrics = {
'throughput.reads': 54563.9,
'throughput.writes_master': 9031.0,
'throughput.proxy': 884.3,
'throughput.udf': 42.3,
'throughput.query': 64.3,
}
self.assertPublishedMany(
[publish_mock,
publish_gauge_mock,
publish_counter_mock,
],
metrics,
)
@patch.object(Collector, 'publish')
@patch.object(Collector, 'publish_gauge')
@patch.object(Collector, 'publish_counter')
def test_namespaces(self, publish_counter_mock,
publish_gauge_mock, publish_mock):
mockTelnet = Mock(**{
'read_until.side_effect':
[
"2.7.1\n",
self.getFixture('v2.7/namespaces').getvalue(),
self.getFixture('v2.7/namespace_foo').getvalue(),
self.getFixture('v2.7/namespace_bar').getvalue(),
],
})
patch_Telnet = patch('telnetlib.Telnet', Mock(return_value=mockTelnet))
patch_Telnet.start()
self.bootStrap(custom_config={
'latency': False,
'statistics': False,
'throughput': False,
'namespaces': True,
})
self.collector.collect()
patch_Telnet.stop()
mockTelnet.read_until.assert_any_call('\n', 1)
mockTelnet.write.assert_has_calls(
[
call('version\n'),
call('namespaces\n'),
call('namespace/foo\n'),
call('namespace/bar\n'),
],
)
metrics = {
'namespace.foo.objects': 1841012935,
'namespace.foo.evicted-objects': 0,
'namespace.foo.expired-objects': 167836937,
'namespace.foo.used-bytes-memory': 117824827840,
'namespace.foo.data-used-bytes-memory': 0,
'namespace.foo.index-used-bytes-memory': 117824827840,
'namespace.foo.used-bytes-disk': 2401223781248,
'namespace.foo.memory-size': 343597383680,
'namespace.foo.total-bytes-memory': 343597383680,
'namespace.foo.total-bytes-disk': 8801921007616,
'namespace.foo.migrate-tx-partitions-initial': 651,
'namespace.foo.migrate-tx-partitions-remaining': 651,
'namespace.foo.migrate-rx-partitions-initial': 651,
'namespace.foo.migrate-rx-partitions-remaining': 651,
'namespace.foo.available_pct': 60,
}
self.assertPublishedMany(
[publish_mock,
publish_gauge_mock,
publish_counter_mock,
],
metrics,
)
def test_namespace_whitelist(self):
mockTelnet = Mock(**{
'read_until.side_effect':
[
"2.7.1\n",
self.getFixture('v2.7/namespaces').getvalue(),
self.getFixture('v2.7/namespace_bar').getvalue(),
],
})
patch_Telnet = patch('telnetlib.Telnet', Mock(return_value=mockTelnet))
patch_Telnet.start()
self.bootStrap(custom_config={
'latency': False,
'statistics': False,
'throughput': False,
'namespaces': True,
'namespaces_whitelist': ['bar'],
})
self.collector.collect()
patch_Telnet.stop()
mockTelnet.read_until.assert_any_call('\n', 1)
mockTelnet.write.assert_has_calls(
[
call('version\n'),
call('namespaces\n'),
call('namespace/bar\n'),
],
)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from __future__ import print_function
import argparse
import os
import tarfile
def output_print(msg):
if args.verbose:
print(msg)
class MyFileObject(tarfile.ExFileObject):
def read(self, size, *args):
if self.position == self.size:
output_print("Extracting: %s" % self.name)
return tarfile.ExFileObject.read(self, size, *args)
def extract_members(members, extract):
for tarinfo in members:
for path in extract:
if tarinfo.name == path or tarinfo.name.startswith(path):
yield tarinfo
def extract_all(filename, members=None, directory=''):
if args.gzip:
output_print('Reading gzip file.')
tar = tarfile.open(filename, "r:gz")
elif args.bz2:
output_print('Reading bz2 file.')
tar = tarfile.open(filename, "r:bz2")
else:
output_print('Reading tar file.')
tar = tarfile.open(filename, "r:")
output_print('Extracting files.')
# check for specific file extraction
if members:
tar.extractall(path=directory, members=extract_members(tar, members))
else:
tar.extractall(path=directory)
tar.close()
print('Archive extracted.')
def create_tar(filename, files):
# Progress filter
def tar_filter(tarinfo):
output_print('Adding: %s' % tarinfo.name)
return tarinfo
if args.gzip:
output_print('Creating gzip file.')
tar = tarfile.open(filename, "w:gz")
elif args.bz2:
output_print('Creating bz2 file.')
tar = tarfile.open(filename, "w:bz2")
else:
output_print('Creating tar file.')
tar = tarfile.open(filename, "w")
for name in files:
output_print('Adding %s' % name)
tar.add(name, filter=tar_filter)
tar.close()
print('Archive Created.')
def list_tar(filename):
if args.gzip:
tar = tarfile.open(filename, "r:gz")
elif args.bz2:
tar = tarfile.open(filename, "r:bz2")
else:
tar = tarfile.open(filename, "r:")
tar.list()
tar.close()
if __name__ == '__main__':
ap = argparse.ArgumentParser()
ap.add_argument('-c', '--create', action='store_true', default=False, help='Creates a new archive')
ap.add_argument('-v', '--verbose', action='store_true', default=False, help='Verbose output print.')
ap.add_argument('-t', '--list', action='store_true', default=False, help='List Contents')
ap.add_argument('-j', '--bz2', action='store_true', default=False, help='Compress as bz2 format')
ap.add_argument('-z', '--gzip', action='store_true', default=False, help='Compress as gzip format')
ap.add_argument('-x', '--extract', action='store_true', default=False, help='Extract an archive.')
ap.add_argument('-f', '--file', action='store', help='Archive filename.')
ap.add_argument(
'-C',
'--directory',
action='store',
default='',
help='Change to directory before processing remaining files'
)
ap.add_argument(
'files',
action='store',
default=[],
nargs='*',
help='Create: Files/Dirs to add to archive.\nExtract: Specific Files/Dirs to extract, default: all',
)
args = ap.parse_args()
tarfile.TarFile.fileobject = MyFileObject
if args.list:
list_tar(os.path.expanduser(args.file))
elif args.create:
create_tar(os.path.expanduser(args.file), args.files)
elif args.extract:
extract_all(os.path.expanduser(args.file), args.files, directory=args.directory)
|
import logging
from aioflo import async_get_api
from aioflo.errors import RequestError
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({"username": str, "password": str})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
try:
api = await async_get_api(
data[CONF_USERNAME], data[CONF_PASSWORD], session=session
)
except RequestError as request_error:
_LOGGER.error("Error connecting to the Flo API: %s", request_error)
raise CannotConnect from request_error
user_info = await api.user.get_info()
a_location_id = user_info["locations"][0]["id"]
location_info = await api.location.get_info(a_location_id)
return {"title": location_info["nickname"]}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for flo."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
try:
info = await validate_input(self.hass, user_input)
return self.async_create_entry(title=info["title"], data=user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
import unittest
from mock import MagicMock, patch
from pgmpy.factors.discrete import DiscreteFactor, TabularCPD, State
from pgmpy.models import BayesianModel, MarkovModel
from pgmpy.sampling import BayesianModelSampling, GibbsSampling
class TestBayesianModelSampling(unittest.TestCase):
def setUp(self):
# Bayesian Model without state names
self.bayesian_model = BayesianModel(
[("A", "J"), ("R", "J"), ("J", "Q"), ("J", "L"), ("G", "L")]
)
cpd_a = TabularCPD("A", 2, [[0.2], [0.8]])
cpd_r = TabularCPD("R", 2, [[0.4], [0.6]])
cpd_j = TabularCPD(
"J", 2, [[0.9, 0.6, 0.7, 0.1], [0.1, 0.4, 0.3, 0.9]], ["R", "A"], [2, 2]
)
cpd_q = TabularCPD("Q", 2, [[0.9, 0.2], [0.1, 0.8]], ["J"], [2])
cpd_l = TabularCPD(
"L", 2, [[0.9, 0.45, 0.8, 0.1], [0.1, 0.55, 0.2, 0.9]], ["G", "J"], [2, 2]
)
cpd_g = TabularCPD("G", 2, [[0.6], [0.4]])
self.bayesian_model.add_cpds(cpd_a, cpd_g, cpd_j, cpd_l, cpd_q, cpd_r)
self.sampling_inference = BayesianModelSampling(self.bayesian_model)
# Bayesian Model with state names
self.bayesian_model_names = BayesianModel(
[("A", "J"), ("R", "J"), ("J", "Q"), ("J", "L"), ("G", "L")]
)
cpd_a_names = TabularCPD(
"A", 2, [[0.2], [0.8]], state_names={"A": ["a0", "a1"]}
)
cpd_r_names = TabularCPD(
"R", 2, [[0.4], [0.6]], state_names={"R": ["r0", "r1"]}
)
cpd_j_names = TabularCPD(
"J",
2,
[[0.9, 0.6, 0.7, 0.1], [0.1, 0.4, 0.3, 0.9]],
["R", "A"],
[2, 2],
state_names={"J": ["j0", "j1"], "R": ["r0", "r1"], "A": ["a0", "a1"]},
)
cpd_q_names = TabularCPD(
"Q",
2,
[[0.9, 0.2], [0.1, 0.8]],
["J"],
[2],
state_names={"Q": ["q0", "q1"], "J": ["j0", "j1"]},
)
cpd_l_names = TabularCPD(
"L",
2,
[[0.9, 0.45, 0.8, 0.1], [0.1, 0.55, 0.2, 0.9]],
["G", "J"],
[2, 2],
state_names={"L": ["l0", "l1"], "G": ["g0", "g1"], "J": ["j0", "j1"]},
)
cpd_g_names = TabularCPD(
"G", 2, [[0.6], [0.4]], state_names={"G": ["g0", "g1"]}
)
self.bayesian_model_names.add_cpds(
cpd_a_names, cpd_g_names, cpd_j_names, cpd_l_names, cpd_q_names, cpd_r_names
)
self.sampling_inference_names = BayesianModelSampling(self.bayesian_model_names)
self.markov_model = MarkovModel()
def test_init(self):
with self.assertRaises(TypeError):
BayesianModelSampling(self.markov_model)
def test_forward_sample(self):
# Test without state names
sample = self.sampling_inference.forward_sample(25)
self.assertEquals(len(sample), 25)
self.assertEquals(len(sample.columns), 6)
self.assertIn("A", sample.columns)
self.assertIn("J", sample.columns)
self.assertIn("R", sample.columns)
self.assertIn("Q", sample.columns)
self.assertIn("G", sample.columns)
self.assertIn("L", sample.columns)
self.assertTrue(set(sample.A).issubset({0, 1}))
self.assertTrue(set(sample.J).issubset({0, 1}))
self.assertTrue(set(sample.R).issubset({0, 1}))
self.assertTrue(set(sample.Q).issubset({0, 1}))
self.assertTrue(set(sample.G).issubset({0, 1}))
self.assertTrue(set(sample.L).issubset({0, 1}))
# Test with state names
sample = self.sampling_inference_names.forward_sample(25)
self.assertEquals(len(sample), 25)
self.assertEquals(len(sample.columns), 6)
self.assertIn("A", sample.columns)
self.assertIn("J", sample.columns)
self.assertIn("R", sample.columns)
self.assertIn("Q", sample.columns)
self.assertIn("G", sample.columns)
self.assertIn("L", sample.columns)
self.assertTrue(set(sample.A).issubset({"a0", "a1"}))
self.assertTrue(set(sample.J).issubset({"j0", "j1"}))
self.assertTrue(set(sample.R).issubset({"r0", "r1"}))
self.assertTrue(set(sample.Q).issubset({"q0", "q1"}))
self.assertTrue(set(sample.G).issubset({"g0", "g1"}))
self.assertTrue(set(sample.L).issubset({"l0", "l1"}))
def test_rejection_sample_basic(self):
# Test without state names
sample = self.sampling_inference.rejection_sample()
sample = self.sampling_inference.rejection_sample(
[State("A", 1), State("J", 1), State("R", 1)], 25
)
self.assertEquals(len(sample), 25)
self.assertEquals(len(sample.columns), 6)
self.assertIn("A", sample.columns)
self.assertIn("J", sample.columns)
self.assertIn("R", sample.columns)
self.assertIn("Q", sample.columns)
self.assertIn("G", sample.columns)
self.assertIn("L", sample.columns)
self.assertTrue(set(sample.A).issubset({1}))
self.assertTrue(set(sample.J).issubset({1}))
self.assertTrue(set(sample.R).issubset({1}))
self.assertTrue(set(sample.Q).issubset({0, 1}))
self.assertTrue(set(sample.G).issubset({0, 1}))
self.assertTrue(set(sample.L).issubset({0, 1}))
# Test with state names
sample = self.sampling_inference_names.rejection_sample()
sample = self.sampling_inference_names.rejection_sample(
[State("A", "a1"), State("J", "j1"), State("R", "r1")], 25
)
self.assertEquals(len(sample), 25)
self.assertEquals(len(sample.columns), 6)
self.assertIn("A", sample.columns)
self.assertIn("J", sample.columns)
self.assertIn("R", sample.columns)
self.assertIn("Q", sample.columns)
self.assertIn("G", sample.columns)
self.assertIn("L", sample.columns)
self.assertTrue(set(sample.A).issubset({"a1"}))
self.assertTrue(set(sample.J).issubset({"j1"}))
self.assertTrue(set(sample.R).issubset({"r1"}))
self.assertTrue(set(sample.Q).issubset({"q0", "q1"}))
self.assertTrue(set(sample.G).issubset({"g0", "g1"}))
self.assertTrue(set(sample.L).issubset({"l0", "l1"}))
def test_likelihood_weighted_sample(self):
# Test without state names
sample = self.sampling_inference.likelihood_weighted_sample()
sample = self.sampling_inference.likelihood_weighted_sample(
[State("A", 0), State("J", 1), State("R", 0)], 25
)
self.assertEquals(len(sample), 25)
self.assertEquals(len(sample.columns), 7)
self.assertIn("A", sample.columns)
self.assertIn("J", sample.columns)
self.assertIn("R", sample.columns)
self.assertIn("Q", sample.columns)
self.assertIn("G", sample.columns)
self.assertIn("L", sample.columns)
self.assertIn("_weight", sample.columns)
self.assertTrue(set(sample.A).issubset({0}))
self.assertTrue(set(sample.J).issubset({1}))
self.assertTrue(set(sample.R).issubset({0}))
self.assertTrue(set(sample.Q).issubset({0, 1}))
self.assertTrue(set(sample.G).issubset({0, 1}))
self.assertTrue(set(sample.L).issubset({0, 1}))
# Test with state names
sample = self.sampling_inference_names.likelihood_weighted_sample()
sample = self.sampling_inference_names.likelihood_weighted_sample(
[State("A", "a0"), State("J", "j1"), State("R", "r0")], 25
)
self.assertEquals(len(sample), 25)
self.assertEquals(len(sample.columns), 7)
self.assertIn("A", sample.columns)
self.assertIn("J", sample.columns)
self.assertIn("R", sample.columns)
self.assertIn("Q", sample.columns)
self.assertIn("G", sample.columns)
self.assertIn("L", sample.columns)
self.assertIn("_weight", sample.columns)
self.assertTrue(set(sample.A).issubset({"a0"}))
self.assertTrue(set(sample.J).issubset({"j1"}))
self.assertTrue(set(sample.R).issubset({"r0"}))
self.assertTrue(set(sample.Q).issubset({"q0", "q1"}))
self.assertTrue(set(sample.G).issubset({"g0", "g1"}))
self.assertTrue(set(sample.L).issubset({"l0", "l1"}))
def tearDown(self):
del self.sampling_inference
del self.bayesian_model
del self.markov_model
class TestGibbsSampling(unittest.TestCase):
def setUp(self):
# A test Bayesian model
diff_cpd = TabularCPD("diff", 2, [[0.6], [0.4]])
intel_cpd = TabularCPD("intel", 2, [[0.7], [0.3]])
grade_cpd = TabularCPD(
"grade",
3,
[[0.3, 0.05, 0.9, 0.5], [0.4, 0.25, 0.08, 0.3], [0.3, 0.7, 0.02, 0.2]],
evidence=["diff", "intel"],
evidence_card=[2, 2],
)
self.bayesian_model = BayesianModel()
self.bayesian_model.add_nodes_from(["diff", "intel", "grade"])
self.bayesian_model.add_edges_from([("diff", "grade"), ("intel", "grade")])
self.bayesian_model.add_cpds(diff_cpd, intel_cpd, grade_cpd)
# A test Markov model
self.markov_model = MarkovModel([("A", "B"), ("C", "B"), ("B", "D")])
factor_ab = DiscreteFactor(["A", "B"], [2, 3], [1, 2, 3, 4, 5, 6])
factor_cb = DiscreteFactor(
["C", "B"], [4, 3], [3, 1, 4, 5, 7, 8, 1, 3, 10, 4, 5, 6]
)
factor_bd = DiscreteFactor(["B", "D"], [3, 2], [5, 7, 2, 1, 9, 3])
self.markov_model.add_factors(factor_ab, factor_cb, factor_bd)
self.gibbs = GibbsSampling(self.bayesian_model)
def tearDown(self):
del self.bayesian_model
del self.markov_model
@patch(
"pgmpy.sampling.GibbsSampling._get_kernel_from_bayesian_model", autospec=True
)
@patch("pgmpy.models.MarkovChain.__init__", autospec=True)
def test_init_bayesian_model(self, init, get_kernel):
model = MagicMock(spec_set=BayesianModel)
gibbs = GibbsSampling(model)
init.assert_called_once_with(gibbs)
get_kernel.assert_called_once_with(gibbs, model)
@patch("pgmpy.sampling.GibbsSampling._get_kernel_from_markov_model", autospec=True)
def test_init_markov_model(self, get_kernel):
model = MagicMock(spec_set=MarkovModel)
gibbs = GibbsSampling(model)
get_kernel.assert_called_once_with(gibbs, model)
def test_get_kernel_from_bayesian_model(self):
gibbs = GibbsSampling()
gibbs._get_kernel_from_bayesian_model(self.bayesian_model)
self.assertListEqual(list(gibbs.variables), list(self.bayesian_model.nodes()))
self.assertDictEqual(gibbs.cardinalities, {"diff": 2, "intel": 2, "grade": 3})
def test_get_kernel_from_markov_model(self):
gibbs = GibbsSampling()
gibbs._get_kernel_from_markov_model(self.markov_model)
self.assertListEqual(list(gibbs.variables), list(self.markov_model.nodes()))
self.assertDictEqual(gibbs.cardinalities, {"A": 2, "B": 3, "C": 4, "D": 2})
def test_sample(self):
start_state = [State("diff", 0), State("intel", 0), State("grade", 0)]
sample = self.gibbs.sample(start_state, 2)
self.assertEquals(len(sample), 2)
self.assertEquals(len(sample.columns), 3)
self.assertIn("diff", sample.columns)
self.assertIn("intel", sample.columns)
self.assertIn("grade", sample.columns)
self.assertTrue(set(sample["diff"]).issubset({0, 1}))
self.assertTrue(set(sample["intel"]).issubset({0, 1}))
self.assertTrue(set(sample["grade"]).issubset({0, 1, 2}))
@patch("pgmpy.sampling.GibbsSampling.random_state", autospec=True)
def test_sample_less_arg(self, random_state):
self.gibbs.state = None
random_state.return_value = [
State("diff", 0),
State("intel", 0),
State("grade", 0),
]
sample = self.gibbs.sample(size=2)
random_state.assert_called_once_with(self.gibbs)
self.assertEqual(len(sample), 2)
def test_generate_sample(self):
start_state = [State("diff", 0), State("intel", 0), State("grade", 0)]
gen = self.gibbs.generate_sample(start_state, 2)
samples = [sample for sample in gen]
self.assertEqual(len(samples), 2)
self.assertEqual(
{samples[0][0].var, samples[0][1].var, samples[0][2].var},
{"diff", "intel", "grade"},
)
self.assertEqual(
{samples[1][0].var, samples[1][1].var, samples[1][2].var},
{"diff", "intel", "grade"},
)
@patch("pgmpy.sampling.GibbsSampling.random_state", autospec=True)
def test_generate_sample_less_arg(self, random_state):
self.gibbs.state = None
gen = self.gibbs.generate_sample(size=2)
samples = [sample for sample in gen]
random_state.assert_called_once_with(self.gibbs)
self.assertEqual(len(samples), 2)
|
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
SUPPORT_CLOSE,
SUPPORT_OPEN,
CoverEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_utc_time_change
from . import DOMAIN
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Demo covers."""
async_add_entities(
[
DemoCover(hass, "cover_1", "Kitchen Window"),
DemoCover(hass, "cover_2", "Hall Window", 10),
DemoCover(hass, "cover_3", "Living Room Window", 70, 50),
DemoCover(
hass,
"cover_4",
"Garage Door",
device_class="garage",
supported_features=(SUPPORT_OPEN | SUPPORT_CLOSE),
),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
class DemoCover(CoverEntity):
"""Representation of a demo cover."""
def __init__(
self,
hass,
unique_id,
name,
position=None,
tilt_position=None,
device_class=None,
supported_features=None,
):
"""Initialize the cover."""
self.hass = hass
self._unique_id = unique_id
self._name = name
self._position = position
self._device_class = device_class
self._supported_features = supported_features
self._set_position = None
self._set_tilt_position = None
self._tilt_position = tilt_position
self._requested_closing = True
self._requested_closing_tilt = True
self._unsub_listener_cover = None
self._unsub_listener_cover_tilt = None
self._is_opening = False
self._is_closing = False
if position is None:
self._closed = True
else:
self._closed = self.current_cover_position <= 0
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {
# Serial numbers are unique identifiers within a specific domain
(DOMAIN, self.unique_id)
},
"name": self.name,
}
@property
def unique_id(self):
"""Return unique ID for cover."""
return self._unique_id
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def should_poll(self):
"""No polling needed for a demo cover."""
return False
@property
def current_cover_position(self):
"""Return the current position of the cover."""
return self._position
@property
def current_cover_tilt_position(self):
"""Return the current tilt position of the cover."""
return self._tilt_position
@property
def is_closed(self):
"""Return if the cover is closed."""
return self._closed
@property
def is_closing(self):
"""Return if the cover is closing."""
return self._is_closing
@property
def is_opening(self):
"""Return if the cover is opening."""
return self._is_opening
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return self._device_class
@property
def supported_features(self):
"""Flag supported features."""
if self._supported_features is not None:
return self._supported_features
return super().supported_features
async def async_close_cover(self, **kwargs):
"""Close the cover."""
if self._position == 0:
return
if self._position is None:
self._closed = True
self.async_write_ha_state()
return
self._is_closing = True
self._listen_cover()
self._requested_closing = True
self.async_write_ha_state()
async def async_close_cover_tilt(self, **kwargs):
"""Close the cover tilt."""
if self._tilt_position in (0, None):
return
self._listen_cover_tilt()
self._requested_closing_tilt = True
async def async_open_cover(self, **kwargs):
"""Open the cover."""
if self._position == 100:
return
if self._position is None:
self._closed = False
self.async_write_ha_state()
return
self._is_opening = True
self._listen_cover()
self._requested_closing = False
self.async_write_ha_state()
async def async_open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
if self._tilt_position in (100, None):
return
self._listen_cover_tilt()
self._requested_closing_tilt = False
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
position = kwargs.get(ATTR_POSITION)
self._set_position = round(position, -1)
if self._position == position:
return
self._listen_cover()
self._requested_closing = position < self._position
async def async_set_cover_tilt_position(self, **kwargs):
"""Move the cover til to a specific position."""
tilt_position = kwargs.get(ATTR_TILT_POSITION)
self._set_tilt_position = round(tilt_position, -1)
if self._tilt_position == tilt_position:
return
self._listen_cover_tilt()
self._requested_closing_tilt = tilt_position < self._tilt_position
async def async_stop_cover(self, **kwargs):
"""Stop the cover."""
self._is_closing = False
self._is_opening = False
if self._position is None:
return
if self._unsub_listener_cover is not None:
self._unsub_listener_cover()
self._unsub_listener_cover = None
self._set_position = None
async def async_stop_cover_tilt(self, **kwargs):
"""Stop the cover tilt."""
if self._tilt_position is None:
return
if self._unsub_listener_cover_tilt is not None:
self._unsub_listener_cover_tilt()
self._unsub_listener_cover_tilt = None
self._set_tilt_position = None
@callback
def _listen_cover(self):
"""Listen for changes in cover."""
if self._unsub_listener_cover is None:
self._unsub_listener_cover = async_track_utc_time_change(
self.hass, self._time_changed_cover
)
async def _time_changed_cover(self, now):
"""Track time changes."""
if self._requested_closing:
self._position -= 10
else:
self._position += 10
if self._position in (100, 0, self._set_position):
await self.async_stop_cover()
self._closed = self.current_cover_position <= 0
self.async_write_ha_state()
@callback
def _listen_cover_tilt(self):
"""Listen for changes in cover tilt."""
if self._unsub_listener_cover_tilt is None:
self._unsub_listener_cover_tilt = async_track_utc_time_change(
self.hass, self._time_changed_cover_tilt
)
async def _time_changed_cover_tilt(self, now):
"""Track time changes."""
if self._requested_closing_tilt:
self._tilt_position -= 10
else:
self._tilt_position += 10
if self._tilt_position in (100, 0, self._set_tilt_position):
await self.async_stop_cover_tilt()
self.async_write_ha_state()
|
from homeassistant.const import PERCENTAGE
from .util import async_init_integration
async def test_pr3000rt2u(hass):
"""Test creation of PR3000RT2U sensors."""
await async_init_integration(hass, "PR3000RT2U", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == "CPS_PR3000RT2U_PYVJO2000034_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_cp1350c(hass):
"""Test creation of CP1350C sensors."""
config_entry = await async_init_integration(hass, "CP1350C", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_5e850i(hass):
"""Test creation of 5E850I sensors."""
config_entry = await async_init_integration(hass, "5E850I", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_5e650i(hass):
"""Test creation of 5E650I sensors."""
config_entry = await async_init_integration(hass, "5E650I", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online Battery Charging",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_backupsses600m1(hass):
"""Test creation of BACKUPSES600M1 sensors."""
await async_init_integration(hass, "BACKUPSES600M1", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert (
entry.unique_id
== "American Power Conversion_Back-UPS ES 600M1_4B1713P32195 _battery.charge"
)
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_cp1500pfclcd(hass):
"""Test creation of CP1500PFCLCD sensors."""
config_entry = await async_init_integration(
hass, "CP1500PFCLCD", ["battery.charge"]
)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_dl650elcd(hass):
"""Test creation of DL650ELCD sensors."""
config_entry = await async_init_integration(hass, "DL650ELCD", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
async def test_blazer_usb(hass):
"""Test creation of blazer_usb sensors."""
config_entry = await async_init_integration(hass, "blazer_usb", ["battery.charge"])
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.ups1_battery_charge")
assert entry
assert entry.unique_id == f"{config_entry.entry_id}_battery.charge"
state = hass.states.get("sensor.ups1_battery_charge")
assert state.state == "100"
expected_attributes = {
"device_class": "battery",
"friendly_name": "Ups1 Battery Charge",
"state": "Online",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
|
from homeassistant.components.sleepiq import binary_sensor as sleepiq
from homeassistant.setup import async_setup_component
from tests.async_mock import MagicMock
from tests.components.sleepiq.test_init import mock_responses
CONFIG = {"username": "foo", "password": "bar"}
async def test_sensor_setup(hass, requests_mock):
"""Test for successfully setting up the SleepIQ platform."""
mock_responses(requests_mock)
await async_setup_component(hass, "sleepiq", {"sleepiq": CONFIG})
device_mock = MagicMock()
sleepiq.setup_platform(hass, CONFIG, device_mock, MagicMock())
devices = device_mock.call_args[0][0]
assert 2 == len(devices)
left_side = devices[1]
assert "SleepNumber ILE Test1 Is In Bed" == left_side.name
assert "on" == left_side.state
right_side = devices[0]
assert "SleepNumber ILE Test2 Is In Bed" == right_side.name
assert "off" == right_side.state
async def test_setup_single(hass, requests_mock):
"""Test for successfully setting up the SleepIQ platform."""
mock_responses(requests_mock, single=True)
await async_setup_component(hass, "sleepiq", {"sleepiq": CONFIG})
device_mock = MagicMock()
sleepiq.setup_platform(hass, CONFIG, device_mock, MagicMock())
devices = device_mock.call_args[0][0]
assert 1 == len(devices)
right_side = devices[0]
assert "SleepNumber ILE Test1 Is In Bed" == right_side.name
assert "on" == right_side.state
|
import json
import random
from flasgger import Swagger
from flasgger.utils import get_examples
def get_specs_data(mod):
"""
return all specs dictionary for some app
"""
# for each example app in /examples folder
client = mod.app.test_client()
# init swag if not yet inititalized (no-routes example)
specs_route = None
specs_data = {}
if getattr(mod.app, 'swag', None) is None:
_swag = Swagger()
_swag.config['endpoint'] = str(random.randint(1, 5000))
_swag.init_app(mod.app)
# get all the specs defined for the example app
else:
try:
flasgger_config = mod.swag.config
if flasgger_config.get('swagger_ui') is False:
return specs_data
specs_route = flasgger_config.get('specs_route', '/apidocs/')
except AttributeError:
pass
if specs_route is None:
specs_route = '/apidocs/'
apidocs = client.get('?'.join((specs_route, 'json=true')))
specs = json.loads(apidocs.data.decode("utf-8")).get('specs')
for spec in specs:
# for each spec get the spec url
url = spec['url']
response = client.get(url)
decoded = response.data.decode("utf-8")
specs_data[url] = json.loads(decoded)
return specs_data
def get_test_metadata(mod):
"""Create a dictionary of test metadata defined in an example
Every top-level constant prefixed with "_TEST_META_" is treated as
metadata which may control test behavior. The prefix is stripped and the
remaining text is lowercased to form the key in the metadata dictionary.
Example: '_TEST_META_SKIP_FULL_VALIDATION' -> 'skip_full_validation'
"""
test_metadata_prefix = '_TEST_META_'
return {key[len(test_metadata_prefix):].lower(): getattr(mod, key)
for key in mod.__dict__
if key.startswith(test_metadata_prefix)}
def pytest_generate_tests(metafunc):
"""
parametrize tests using examples() function
to generate one test for each examples/
"""
if 'test_data' in metafunc.fixturenames:
test_data = [
(mod, mod.app.test_client(),
get_specs_data(mod), get_test_metadata(mod))
for mod in get_examples()
]
metafunc.parametrize(
'test_data',
test_data,
ids=lambda x: x[0].__name__
)
|
from homeassistant.components import gdacs
from homeassistant.components.gdacs import DEFAULT_SCAN_INTERVAL
from homeassistant.components.gdacs.sensor import (
ATTR_CREATED,
ATTR_LAST_UPDATE,
ATTR_LAST_UPDATE_SUCCESSFUL,
ATTR_REMOVED,
ATTR_STATUS,
ATTR_UPDATED,
)
from homeassistant.const import (
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONF_RADIUS,
EVENT_HOMEASSISTANT_START,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed
from tests.components.gdacs import _generate_mock_feed_entry
CONFIG = {gdacs.DOMAIN: {CONF_RADIUS: 200}}
async def test_setup(hass, legacy_patchable_time):
"""Test the general setup of the integration."""
# Set up some mock feed entries for this test.
mock_entry_1 = _generate_mock_feed_entry(
"1234",
"Title 1",
15.5,
(38.0, -3.0),
attribution="Attribution 1",
)
mock_entry_2 = _generate_mock_feed_entry(
"2345",
"Title 2",
20.5,
(38.1, -3.1),
)
mock_entry_3 = _generate_mock_feed_entry(
"3456",
"Title 3",
25.5,
(38.2, -3.2),
)
mock_entry_4 = _generate_mock_feed_entry("4567", "Title 4", 12.5, (38.3, -3.3))
# Patching 'utcnow' to gain more control over the timed update.
utcnow = dt_util.utcnow()
with patch("homeassistant.util.dt.utcnow", return_value=utcnow), patch(
"aio_georss_client.feed.GeoRssFeed.update"
) as mock_feed_update:
mock_feed_update.return_value = "OK", [mock_entry_1, mock_entry_2, mock_entry_3]
assert await async_setup_component(hass, gdacs.DOMAIN, CONFIG)
# Artificially trigger update and collect events.
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
all_states = hass.states.async_all()
# 3 geolocation and 1 sensor entities
assert len(all_states) == 4
state = hass.states.get("sensor.gdacs_32_87336_117_22743")
assert state is not None
assert int(state.state) == 3
assert state.name == "GDACS (32.87336, -117.22743)"
attributes = state.attributes
assert attributes[ATTR_STATUS] == "OK"
assert attributes[ATTR_CREATED] == 3
assert attributes[ATTR_LAST_UPDATE].tzinfo == dt_util.UTC
assert attributes[ATTR_LAST_UPDATE_SUCCESSFUL].tzinfo == dt_util.UTC
assert attributes[ATTR_LAST_UPDATE] == attributes[ATTR_LAST_UPDATE_SUCCESSFUL]
assert attributes[ATTR_UNIT_OF_MEASUREMENT] == "alerts"
assert attributes[ATTR_ICON] == "mdi:alert"
# Simulate an update - two existing, one new entry, one outdated entry
mock_feed_update.return_value = "OK", [mock_entry_1, mock_entry_4, mock_entry_3]
async_fire_time_changed(hass, utcnow + DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 4
state = hass.states.get("sensor.gdacs_32_87336_117_22743")
attributes = state.attributes
assert attributes[ATTR_CREATED] == 1
assert attributes[ATTR_UPDATED] == 2
assert attributes[ATTR_REMOVED] == 1
# Simulate an update - empty data, but successful update,
# so no changes to entities.
mock_feed_update.return_value = "OK_NO_DATA", None
async_fire_time_changed(hass, utcnow + 2 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 4
# Simulate an update - empty data, removes all entities
mock_feed_update.return_value = "ERROR", None
async_fire_time_changed(hass, utcnow + 3 * DEFAULT_SCAN_INTERVAL)
await hass.async_block_till_done()
all_states = hass.states.async_all()
assert len(all_states) == 1
state = hass.states.get("sensor.gdacs_32_87336_117_22743")
attributes = state.attributes
assert attributes[ATTR_REMOVED] == 3
|
import logging
import sys
import io
import cheroot.server
import cherrypy
from cherrypy._cperror import format_exc, bare_error
from cherrypy.lib import httputil
from ._cpcompat import tonative
class NativeGateway(cheroot.server.Gateway):
"""Native gateway implementation allowing to bypass WSGI."""
recursive = False
def respond(self):
"""Obtain response from CherryPy machinery and then send it."""
req = self.req
try:
# Obtain a Request object from CherryPy
local = req.server.bind_addr # FIXME: handle UNIX sockets
local = tonative(local[0]), local[1]
local = httputil.Host(local[0], local[1], '')
remote = tonative(req.conn.remote_addr), req.conn.remote_port
remote = httputil.Host(remote[0], remote[1], '')
scheme = tonative(req.scheme)
sn = cherrypy.tree.script_name(tonative(req.uri or '/'))
if sn is None:
self.send_response('404 Not Found', [], [''])
else:
app = cherrypy.tree.apps[sn]
method = tonative(req.method)
path = tonative(req.path)
qs = tonative(req.qs or '')
headers = (
(tonative(h), tonative(v))
for h, v in req.inheaders.items()
)
rfile = req.rfile
prev = None
try:
redirections = []
while True:
request, response = app.get_serving(
local, remote, scheme, 'HTTP/1.1')
request.multithread = True
request.multiprocess = False
request.app = app
request.prev = prev
# Run the CherryPy Request object and obtain the
# response
try:
request.run(
method, path, qs,
tonative(req.request_protocol),
headers, rfile,
)
break
except cherrypy.InternalRedirect:
ir = sys.exc_info()[1]
app.release_serving()
prev = request
if not self.recursive:
if ir.path in redirections:
raise RuntimeError(
'InternalRedirector visited the same '
'URL twice: %r' % ir.path)
else:
# Add the *previous* path_info + qs to
# redirections.
if qs:
qs = '?' + qs
redirections.append(sn + path + qs)
# Munge environment and try again.
method = 'GET'
path = ir.path
qs = ir.query_string
rfile = io.BytesIO()
self.send_response(
response.output_status, response.header_list,
response.body)
finally:
app.release_serving()
except Exception:
tb = format_exc()
# print tb
cherrypy.log(tb, 'NATIVE_ADAPTER', severity=logging.ERROR)
s, h, b = bare_error()
self.send_response(s, h, b)
def send_response(self, status, headers, body):
"""Send response to HTTP request."""
req = self.req
# Set response status
req.status = status or b'500 Server Error'
# Set response headers
for header, value in headers:
req.outheaders.append((header, value))
if (req.ready and not req.sent_headers):
req.sent_headers = True
req.send_headers()
# Set response body
for seg in body:
req.write(seg)
class CPHTTPServer(cheroot.server.HTTPServer):
"""Wrapper for cheroot.server.HTTPServer.
cheroot has been designed to not reference CherryPy in any way,
so that it can be used in other frameworks and applications.
Therefore, we wrap it here, so we can apply some attributes
from config -> cherrypy.server -> HTTPServer.
"""
def __init__(self, server_adapter=cherrypy.server):
"""Initialize CPHTTPServer."""
self.server_adapter = server_adapter
server_name = (self.server_adapter.socket_host or
self.server_adapter.socket_file or
None)
cheroot.server.HTTPServer.__init__(
self, server_adapter.bind_addr, NativeGateway,
minthreads=server_adapter.thread_pool,
maxthreads=server_adapter.thread_pool_max,
server_name=server_name)
self.max_request_header_size = (
self.server_adapter.max_request_header_size or 0)
self.max_request_body_size = (
self.server_adapter.max_request_body_size or 0)
self.request_queue_size = self.server_adapter.socket_queue_size
self.timeout = self.server_adapter.socket_timeout
self.shutdown_timeout = self.server_adapter.shutdown_timeout
self.protocol = self.server_adapter.protocol_version
self.nodelay = self.server_adapter.nodelay
ssl_module = self.server_adapter.ssl_module or 'pyopenssl'
if self.server_adapter.ssl_context:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
self.ssl_adapter.context = self.server_adapter.ssl_context
elif self.server_adapter.ssl_certificate:
adapter_class = cheroot.server.get_ssl_adapter_class(ssl_module)
self.ssl_adapter = adapter_class(
self.server_adapter.ssl_certificate,
self.server_adapter.ssl_private_key,
self.server_adapter.ssl_certificate_chain,
self.server_adapter.ssl_ciphers)
|
from twisted.internet.task import react
from twisted.internet.defer import inlineCallbacks as coroutine
from autobahn.twisted.wamp import Connection
@coroutine
def main(transport):
session = yield transport.join('myrealm1')
result = yield session.call('com.myapp.add2', 2, 3)
print("Result: {}".format(result))
yield session.leave()
yield transport.close()
if __name__ == '__main__':
connection = Connection(main)
react(connection.start)
|
from itertools import product
import os
import os.path as op
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose, assert_array_equal
from mne.channels import make_standard_montage
from mne.datasets import testing
from mne.io import read_raw_fif, read_raw_kit, read_raw_bti, read_info
from mne.io.constants import FIFF
from mne import (read_forward_solution, write_forward_solution,
make_forward_solution, convert_forward_solution,
setup_volume_source_space, read_source_spaces, create_info,
make_sphere_model, pick_types_forward, pick_info, pick_types,
read_evokeds, read_cov, read_dipole)
from mne.utils import (requires_mne, requires_nibabel,
run_tests_if_main, run_subprocess)
from mne.forward._make_forward import _create_meg_coils, make_forward_dipole
from mne.forward._compute_forward import _magnetic_dipole_field_vec
from mne.forward import Forward, _do_forward_solution
from mne.dipole import Dipole, fit_dipole
from mne.simulation import simulate_evoked
from mne.source_estimate import VolSourceEstimate
from mne.source_space import (get_volume_labels_from_aseg, write_source_spaces,
_compare_source_spaces, setup_source_space)
from mne.forward.tests.test_forward import assert_forward_allclose
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data',
'test_raw.fif')
fname_evo = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
fname_dip = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_set1.dip')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = os.path.join(data_path, 'subjects')
fname_src = op.join(subjects_dir, 'sample', 'bem', 'sample-oct-4-src.fif')
fname_bem = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-1280-1280-bem-sol.fif')
fname_aseg = op.join(subjects_dir, 'sample', 'mri', 'aseg.mgz')
fname_bem_meg = op.join(subjects_dir, 'sample', 'bem',
'sample-1280-bem-sol.fif')
def _compare_forwards(fwd, fwd_py, n_sensors, n_src,
meg_rtol=1e-4, meg_atol=1e-9,
eeg_rtol=1e-3, eeg_atol=1e-3):
"""Test forwards."""
# check source spaces
assert_equal(len(fwd['src']), len(fwd_py['src']))
_compare_source_spaces(fwd['src'], fwd_py['src'], mode='approx')
for surf_ori, force_fixed in product([False, True], [False, True]):
# use copy here to leave our originals unmodified
fwd = convert_forward_solution(fwd, surf_ori, force_fixed, copy=True,
use_cps=True)
fwd_py = convert_forward_solution(fwd_py, surf_ori, force_fixed,
copy=True, use_cps=True)
check_src = n_src // 3 if force_fixed else n_src
for key in ('nchan', 'source_rr', 'source_ori',
'surf_ori', 'coord_frame', 'nsource'):
assert_allclose(fwd_py[key], fwd[key], rtol=1e-4, atol=1e-7,
err_msg=key)
# In surf_ori=True only Z matters for source_nn
if surf_ori and not force_fixed:
ori_sl = slice(2, None, 3)
else:
ori_sl = slice(None)
assert_allclose(fwd_py['source_nn'][ori_sl], fwd['source_nn'][ori_sl],
rtol=1e-4, atol=1e-6)
assert_allclose(fwd_py['mri_head_t']['trans'],
fwd['mri_head_t']['trans'], rtol=1e-5, atol=1e-8)
assert_equal(fwd_py['sol']['data'].shape, (n_sensors, check_src))
assert_equal(len(fwd['sol']['row_names']), n_sensors)
assert_equal(len(fwd_py['sol']['row_names']), n_sensors)
# check MEG
assert_allclose(fwd['sol']['data'][:306, ori_sl],
fwd_py['sol']['data'][:306, ori_sl],
rtol=meg_rtol, atol=meg_atol,
err_msg='MEG mismatch')
# check EEG
if fwd['sol']['data'].shape[0] > 306:
assert_allclose(fwd['sol']['data'][306:, ori_sl],
fwd_py['sol']['data'][306:, ori_sl],
rtol=eeg_rtol, atol=eeg_atol,
err_msg='EEG mismatch')
def test_magnetic_dipole():
"""Test basic magnetic dipole forward calculation."""
info = read_info(fname_raw)
picks = pick_types(info, meg=True, eeg=False, exclude=[])
info = pick_info(info, picks[:12])
coils = _create_meg_coils(info['chs'], 'normal', None)
# magnetic dipole far (meters!) from device origin
r0 = np.array([0., 13., -6.])
for ch, coil in zip(info['chs'], coils):
rr = (ch['loc'][:3] + r0) / 2. # get halfway closer
far_fwd = _magnetic_dipole_field_vec(r0[np.newaxis, :], [coil])
near_fwd = _magnetic_dipole_field_vec(rr[np.newaxis, :], [coil])
ratio = 8. if ch['ch_name'][-1] == '1' else 16. # grad vs mag
assert_allclose(np.median(near_fwd / far_fwd), ratio, atol=1e-1)
# degenerate case
r0 = coils[0]['rmag'][[0]]
with pytest.raises(RuntimeError, match='Coil too close'):
_magnetic_dipole_field_vec(r0, coils[:1])
with pytest.warns(RuntimeWarning, match='Coil too close'):
fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='warning')
assert not np.isfinite(fwd).any()
with np.errstate(invalid='ignore'):
fwd = _magnetic_dipole_field_vec(r0, coils[:1], too_close='info')
assert not np.isfinite(fwd).any()
@pytest.mark.slowtest # slow-ish on Travis OSX
@pytest.mark.timeout(60) # can take longer than 30 sec on Travis
@testing.requires_testing_data
@requires_mne
def test_make_forward_solution_kit(tmpdir):
"""Test making fwd using KIT, BTI, and CTF (compensated) files."""
kit_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'kit',
'tests', 'data')
sqd_path = op.join(kit_dir, 'test.sqd')
mrk_path = op.join(kit_dir, 'test_mrk.sqd')
elp_path = op.join(kit_dir, 'test_elp.txt')
hsp_path = op.join(kit_dir, 'test_hsp.txt')
trans_path = op.join(kit_dir, 'trans-sample.fif')
fname_kit_raw = op.join(kit_dir, 'test_bin_raw.fif')
bti_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'bti',
'tests', 'data')
bti_pdf = op.join(bti_dir, 'test_pdf_linux')
bti_config = op.join(bti_dir, 'test_config_linux')
bti_hs = op.join(bti_dir, 'test_hs_linux')
fname_bti_raw = op.join(bti_dir, 'exported4D_linux_raw.fif')
fname_ctf_raw = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test_ctf_comp_raw.fif')
# first set up a small testing source space
fname_src_small = tmpdir.join('sample-oct-2-src.fif')
src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
add_dist=False)
write_source_spaces(fname_src_small, src) # to enable working with MNE-C
n_src = 108 # this is the resulting # of verts in fwd
# first use mne-C: convert file, make forward solution
fwd = _do_forward_solution('sample', fname_kit_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
assert (isinstance(fwd, Forward))
# now let's use python with the same raw file
fwd_py = make_forward_solution(fname_kit_raw, trans_path, src,
fname_bem_meg, eeg=False, meg=True)
_compare_forwards(fwd, fwd_py, 157, n_src)
assert (isinstance(fwd_py, Forward))
# now let's use mne-python all the way
raw_py = read_raw_kit(sqd_path, mrk_path, elp_path, hsp_path)
# without ignore_ref=True, this should throw an error:
with pytest.raises(NotImplementedError, match='Cannot.*KIT reference'):
make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
# check that asking for eeg channels (even if they don't exist) is handled
meg_only_info = pick_info(raw_py.info, pick_types(raw_py.info, meg=True,
eeg=False))
fwd_py = make_forward_solution(meg_only_info, src=src, meg=True, eeg=True,
bem=fname_bem_meg, trans=trans_path,
ignore_ref=True)
_compare_forwards(fwd, fwd_py, 157, n_src,
meg_rtol=1e-3, meg_atol=1e-7)
# BTI python end-to-end versus C
fwd = _do_forward_solution('sample', fname_bti_raw, src=fname_src_small,
bem=fname_bem_meg, mri=trans_path,
eeg=False, meg=True, subjects_dir=subjects_dir)
raw_py = read_raw_bti(bti_pdf, bti_config, bti_hs, preload=False)
fwd_py = make_forward_solution(raw_py.info, src=src, eeg=False, meg=True,
bem=fname_bem_meg, trans=trans_path)
_compare_forwards(fwd, fwd_py, 248, n_src)
# now let's test CTF w/compensation
fwd_py = make_forward_solution(fname_ctf_raw, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
fwd = _do_forward_solution('sample', fname_ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True, subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
# CTF with compensation changed in python
ctf_raw = read_raw_fif(fname_ctf_raw)
ctf_raw.info['bads'] = ['MRO24-2908'] # test that it works with some bads
ctf_raw.apply_gradient_compensation(2)
fwd_py = make_forward_solution(ctf_raw.info, fname_trans, src,
fname_bem_meg, eeg=False, meg=True)
fwd = _do_forward_solution('sample', ctf_raw, mri=fname_trans,
src=fname_src_small, bem=fname_bem_meg,
eeg=False, meg=True,
subjects_dir=subjects_dir)
_compare_forwards(fwd, fwd_py, 274, n_src)
fname_temp = tmpdir.join('test-ctf-fwd.fif')
write_forward_solution(fname_temp, fwd_py)
fwd_py2 = read_forward_solution(fname_temp)
_compare_forwards(fwd_py, fwd_py2, 274, n_src)
repr(fwd_py)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_forward_solution():
"""Test making M-EEG forward solution from python."""
fwd_py = make_forward_solution(fname_raw, fname_trans, fname_src,
fname_bem, mindist=5.)
assert (isinstance(fwd_py, Forward))
fwd = read_forward_solution(fname_meeg)
assert (isinstance(fwd, Forward))
_compare_forwards(fwd, fwd_py, 366, 1494, meg_rtol=1e-3)
# Homogeneous model
with pytest.raises(RuntimeError, match='homogeneous.*1-layer.*EEG'):
make_forward_solution(fname_raw, fname_trans, fname_src,
fname_bem_meg)
@testing.requires_testing_data
def test_make_forward_solution_discrete(tmpdir):
"""Test making and converting a forward solution with discrete src."""
# smoke test for depth weighting and discrete source spaces
src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
add_dist=False)
src = src + setup_volume_source_space(
pos=dict(rr=src[0]['rr'][src[0]['vertno'][:3]].copy(),
nn=src[0]['nn'][src[0]['vertno'][:3]].copy()))
sphere = make_sphere_model()
fwd = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=False)
convert_forward_solution(fwd, surf_ori=True)
@testing.requires_testing_data
@requires_mne
@pytest.mark.timeout(90) # can take longer than 60 sec on Travis
def test_make_forward_solution_sphere(tmpdir):
"""Test making a forward solution with a sphere model."""
fname_src_small = tmpdir.join('sample-oct-2-src.fif')
src = setup_source_space('sample', 'oct2', subjects_dir=subjects_dir,
add_dist=False)
write_source_spaces(fname_src_small, src) # to enable working with MNE-C
out_name = tmpdir.join('tmp-fwd.fif')
run_subprocess(['mne_forward_solution', '--meg', '--eeg',
'--meas', fname_raw, '--src', fname_src_small,
'--mri', fname_trans, '--fwd', out_name])
fwd = read_forward_solution(out_name)
sphere = make_sphere_model(verbose=True)
fwd_py = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=True, verbose=True)
_compare_forwards(fwd, fwd_py, 366, 108,
meg_rtol=5e-1, meg_atol=1e-6,
eeg_rtol=5e-1, eeg_atol=5e-1)
# Since the above is pretty lax, let's check a different way
for meg, eeg in zip([True, False], [False, True]):
fwd_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
fwd_py_ = pick_types_forward(fwd, meg=meg, eeg=eeg)
assert_allclose(np.corrcoef(fwd_['sol']['data'].ravel(),
fwd_py_['sol']['data'].ravel())[0, 1],
1.0, rtol=1e-3)
# Number of layers in the sphere model doesn't matter for MEG
# (as long as no sources are omitted due to distance)
assert len(sphere['layers']) == 4
fwd = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=False)
sphere_1 = make_sphere_model(head_radius=None)
assert len(sphere_1['layers']) == 0
assert_array_equal(sphere['r0'], sphere_1['r0'])
fwd_1 = make_forward_solution(fname_raw, fname_trans, src, sphere,
meg=True, eeg=False)
_compare_forwards(fwd, fwd_1, 306, 108, meg_rtol=1e-12, meg_atol=1e-12)
# Homogeneous model
sphere = make_sphere_model(head_radius=None)
with pytest.raises(RuntimeError, match='zero shells.*EEG'):
make_forward_solution(fname_raw, fname_trans, src, sphere)
@pytest.mark.slowtest
@testing.requires_testing_data
@requires_nibabel()
def test_forward_mixed_source_space(tmpdir):
"""Test making the forward solution for a mixed source space."""
# get the surface source space
rng = np.random.RandomState(0)
surf = read_source_spaces(fname_src)
# setup two volume source spaces
label_names = get_volume_labels_from_aseg(fname_aseg)
vol_labels = rng.choice(label_names, 2)
vol1 = setup_volume_source_space('sample', pos=20., mri=fname_aseg,
volume_label=vol_labels[0],
add_interpolator=False)
vol2 = setup_volume_source_space('sample', pos=20., mri=fname_aseg,
volume_label=vol_labels[1],
add_interpolator=False)
# merge surfaces and volume
src = surf + vol1 + vol2
# calculate forward solution
fwd = make_forward_solution(fname_raw, fname_trans, src, fname_bem)
assert (repr(fwd))
# extract source spaces
src_from_fwd = fwd['src']
# get the coordinate frame of each source space
coord_frames = np.array([s['coord_frame'] for s in src_from_fwd])
# assert that all source spaces are in head coordinates
assert ((coord_frames == FIFF.FIFFV_COORD_HEAD).all())
# run tests for SourceSpaces.export_volume
fname_img = tmpdir.join('temp-image.mgz')
# head coordinates and mri_resolution, but trans file
with pytest.raises(ValueError, match='trans containing mri to head'):
src_from_fwd.export_volume(fname_img, mri_resolution=True, trans=None)
# head coordinates and mri_resolution, but wrong trans file
vox_mri_t = vol1[0]['vox_mri_t']
with pytest.raises(ValueError, match='head<->mri, got mri_voxel->mri'):
src_from_fwd.export_volume(fname_img, mri_resolution=True,
trans=vox_mri_t)
@pytest.mark.slowtest
@testing.requires_testing_data
def test_make_forward_dipole(tmpdir):
"""Test forward-projecting dipoles."""
rng = np.random.RandomState(0)
evoked = read_evokeds(fname_evo)[0]
cov = read_cov(fname_cov)
cov['projs'] = [] # avoid proj warning
dip_c = read_dipole(fname_dip)
# Only use magnetometers for speed!
picks = pick_types(evoked.info, meg='mag', eeg=False)[::8]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
info = evoked.info
# Make new Dipole object with n_test_dipoles picked from the dipoles
# in the test dataset.
n_test_dipoles = 3 # minimum 3 needed to get uneven sampling in time
dipsel = np.sort(rng.permutation(np.arange(len(dip_c)))[:n_test_dipoles])
dip_test = Dipole(times=dip_c.times[dipsel],
pos=dip_c.pos[dipsel],
amplitude=dip_c.amplitude[dipsel],
ori=dip_c.ori[dipsel],
gof=dip_c.gof[dipsel])
sphere = make_sphere_model(head_radius=0.1)
# Warning emitted due to uneven sampling in time
with pytest.warns(RuntimeWarning, match='unevenly spaced'):
fwd, stc = make_forward_dipole(dip_test, sphere, info,
trans=fname_trans)
# stc is list of VolSourceEstimate's
assert isinstance(stc, list)
for n_dip in range(n_test_dipoles):
assert isinstance(stc[n_dip], VolSourceEstimate)
# Now simulate evoked responses for each of the test dipoles,
# and fit dipoles to them (sphere model, MEG and EEG)
times, pos, amplitude, ori, gof = [], [], [], [], []
nave = 200 # add a tiny amount of noise to the simulated evokeds
for s in stc:
evo_test = simulate_evoked(fwd, s, info, cov,
nave=nave, random_state=rng)
# evo_test.add_proj(make_eeg_average_ref_proj(evo_test.info))
dfit, resid = fit_dipole(evo_test, cov, sphere, None)
times += dfit.times.tolist()
pos += dfit.pos.tolist()
amplitude += dfit.amplitude.tolist()
ori += dfit.ori.tolist()
gof += dfit.gof.tolist()
# Create a new Dipole object with the dipole fits
dip_fit = Dipole(times, pos, amplitude, ori, gof)
# check that true (test) dipoles and fits are "close"
# cf. mne/tests/test_dipole.py
diff = dip_test.pos - dip_fit.pos
corr = np.corrcoef(dip_test.pos.ravel(), dip_fit.pos.ravel())[0, 1]
dist = np.sqrt(np.mean(np.sum(diff * diff, axis=1)))
gc_dist = 180 / np.pi * \
np.mean(np.arccos(np.sum(dip_test.ori * dip_fit.ori, axis=1)))
amp_err = np.sqrt(np.mean((dip_test.amplitude - dip_fit.amplitude) ** 2))
# Make sure each coordinate is close to reference
# NB tolerance should be set relative to snr of simulated evoked!
assert_allclose(dip_fit.pos, dip_test.pos, rtol=0, atol=1e-2,
err_msg='position mismatch')
assert dist < 1e-2 # within 1 cm
assert corr > 0.985
assert gc_dist < 20 # less than 20 degrees
assert amp_err < 10e-9 # within 10 nAm
# Make sure rejection works with BEM: one dipole at z=1m
# NB _make_forward.py:_prepare_for_forward will raise a RuntimeError
# if no points are left after min_dist exclusions, hence 2 dips here!
dip_outside = Dipole(times=[0., 0.001],
pos=[[0., 0., 1.0], [0., 0., 0.040]],
amplitude=[100e-9, 100e-9],
ori=[[1., 0., 0.], [1., 0., 0.]], gof=1)
with pytest.raises(ValueError, match='outside the inner skull'):
make_forward_dipole(dip_outside, fname_bem, info, fname_trans)
# if we get this far, can safely assume the code works with BEMs too
# -> use sphere again below for speed
# Now make an evenly sampled set of dipoles, some simultaneous,
# should return a VolSourceEstimate regardless
times = [0., 0., 0., 0.001, 0.001, 0.002]
pos = np.random.rand(6, 3) * 0.020 + \
np.array([0., 0., 0.040])[np.newaxis, :]
amplitude = np.random.rand(6) * 100e-9
ori = np.eye(6, 3) + np.eye(6, 3, -3)
gof = np.arange(len(times)) / len(times) # arbitrary
dip_even_samp = Dipole(times, pos, amplitude, ori, gof)
# I/O round-trip
fname = str(tmpdir.join('test-fwd.fif'))
with pytest.warns(RuntimeWarning, match='free orientation'):
write_forward_solution(fname, fwd)
fwd_read = convert_forward_solution(
read_forward_solution(fname), force_fixed=True)
assert_forward_allclose(fwd, fwd_read, rtol=1e-6)
fwd, stc = make_forward_dipole(dip_even_samp, sphere, info,
trans=fname_trans)
assert isinstance(stc, VolSourceEstimate)
assert_allclose(stc.times, np.arange(0., 0.003, 0.001))
@testing.requires_testing_data
def test_make_forward_no_meg(tmpdir):
"""Test that we can make and I/O forward solution with no MEG channels."""
pos = dict(rr=[[0.05, 0, 0]], nn=[[0, 0, 1.]])
src = setup_volume_source_space(pos=pos)
bem = make_sphere_model()
trans = None
montage = make_standard_montage('standard_1020')
info = create_info(['Cz'], 1000., 'eeg').set_montage(montage)
fwd = make_forward_solution(info, trans, src, bem)
fname = tmpdir.join('test-fwd.fif')
write_forward_solution(fname, fwd)
fwd_read = read_forward_solution(fname)
assert_allclose(fwd['sol']['data'], fwd_read['sol']['data'])
run_tests_if_main()
|
import asyncio
from datetime import datetime, timedelta
import logging
from typing import Optional
from aio_geojson_geonetnz_volcano import GeonetnzVolcanoFeedManager
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
CONF_UNIT_SYSTEM,
CONF_UNIT_SYSTEM_IMPERIAL,
LENGTH_MILES,
)
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
from homeassistant.util.unit_system import METRIC_SYSTEM
from .config_flow import configured_instances
from .const import DEFAULT_RADIUS, DEFAULT_SCAN_INTERVAL, DOMAIN, FEED
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS): vol.Coerce(float),
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the GeoNet NZ Volcano component."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
latitude = conf.get(CONF_LATITUDE, hass.config.latitude)
longitude = conf.get(CONF_LONGITUDE, hass.config.longitude)
scan_interval = conf[CONF_SCAN_INTERVAL]
identifier = f"{latitude}, {longitude}"
if identifier in configured_instances(hass):
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_LATITUDE: latitude,
CONF_LONGITUDE: longitude,
CONF_RADIUS: conf[CONF_RADIUS],
CONF_SCAN_INTERVAL: scan_interval,
},
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up the GeoNet NZ Volcano component as config entry."""
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN].setdefault(FEED, {})
radius = config_entry.data[CONF_RADIUS]
unit_system = config_entry.data[CONF_UNIT_SYSTEM]
if unit_system == CONF_UNIT_SYSTEM_IMPERIAL:
radius = METRIC_SYSTEM.length(radius, LENGTH_MILES)
# Create feed entity manager for all platforms.
manager = GeonetnzVolcanoFeedEntityManager(hass, config_entry, radius, unit_system)
hass.data[DOMAIN][FEED][config_entry.entry_id] = manager
_LOGGER.debug("Feed entity manager added for %s", config_entry.entry_id)
await manager.async_init()
return True
async def async_unload_entry(hass, config_entry):
"""Unload an GeoNet NZ Volcano component config entry."""
manager = hass.data[DOMAIN][FEED].pop(config_entry.entry_id)
await manager.async_stop()
await asyncio.wait(
[hass.config_entries.async_forward_entry_unload(config_entry, "sensor")]
)
return True
class GeonetnzVolcanoFeedEntityManager:
"""Feed Entity Manager for GeoNet NZ Volcano feed."""
def __init__(self, hass, config_entry, radius_in_km, unit_system):
"""Initialize the Feed Entity Manager."""
self._hass = hass
self._config_entry = config_entry
coordinates = (
config_entry.data[CONF_LATITUDE],
config_entry.data[CONF_LONGITUDE],
)
websession = aiohttp_client.async_get_clientsession(hass)
self._feed_manager = GeonetnzVolcanoFeedManager(
websession,
self._generate_entity,
self._update_entity,
self._remove_entity,
coordinates,
filter_radius=radius_in_km,
)
self._config_entry_id = config_entry.entry_id
self._scan_interval = timedelta(seconds=config_entry.data[CONF_SCAN_INTERVAL])
self._unit_system = unit_system
self._track_time_remove_callback = None
self.listeners = []
async def async_init(self):
"""Schedule initial and regular updates based on configured time interval."""
self._hass.async_create_task(
self._hass.config_entries.async_forward_entry_setup(
self._config_entry, "sensor"
)
)
async def update(event_time):
"""Update."""
await self.async_update()
# Trigger updates at regular intervals.
self._track_time_remove_callback = async_track_time_interval(
self._hass, update, self._scan_interval
)
_LOGGER.debug("Feed entity manager initialized")
async def async_update(self):
"""Refresh data."""
await self._feed_manager.update()
_LOGGER.debug("Feed entity manager updated")
async def async_stop(self):
"""Stop this feed entity manager from refreshing."""
for unsub_dispatcher in self.listeners:
unsub_dispatcher()
self.listeners = []
if self._track_time_remove_callback:
self._track_time_remove_callback()
_LOGGER.debug("Feed entity manager stopped")
@callback
def async_event_new_entity(self):
"""Return manager specific event to signal new entity."""
return f"geonetnz_volcano_new_sensor_{self._config_entry_id}"
def get_entry(self, external_id):
"""Get feed entry by external id."""
return self._feed_manager.feed_entries.get(external_id)
def last_update(self) -> Optional[datetime]:
"""Return the last update of this feed."""
return self._feed_manager.last_update
def last_update_successful(self) -> Optional[datetime]:
"""Return the last successful update of this feed."""
return self._feed_manager.last_update_successful
async def _generate_entity(self, external_id):
"""Generate new entity."""
async_dispatcher_send(
self._hass,
self.async_event_new_entity(),
self,
external_id,
self._unit_system,
)
async def _update_entity(self, external_id):
"""Update entity."""
async_dispatcher_send(self._hass, f"geonetnz_volcano_update_{external_id}")
async def _remove_entity(self, external_id):
"""Ignore removing entity."""
|
import logging
import os.path
from defusedxml import ElementTree
from ihcsdk.ihccontroller import IHCController
import voluptuous as vol
from homeassistant.components.binary_sensor import DEVICE_CLASSES_SCHEMA
from homeassistant.config import load_yaml_config_file
from homeassistant.const import (
CONF_ID,
CONF_NAME,
CONF_PASSWORD,
CONF_TYPE,
CONF_UNIT_OF_MEASUREMENT,
CONF_URL,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
ATTR_IHC_ID,
ATTR_VALUE,
CONF_AUTOSETUP,
CONF_BINARY_SENSOR,
CONF_DIMMABLE,
CONF_INFO,
CONF_INVERTING,
CONF_LIGHT,
CONF_NODE,
CONF_NOTE,
CONF_OFF_ID,
CONF_ON_ID,
CONF_POSITION,
CONF_SENSOR,
CONF_SWITCH,
CONF_XPATH,
SERVICE_PULSE,
SERVICE_SET_RUNTIME_VALUE_BOOL,
SERVICE_SET_RUNTIME_VALUE_FLOAT,
SERVICE_SET_RUNTIME_VALUE_INT,
)
from .util import async_pulse
_LOGGER = logging.getLogger(__name__)
AUTO_SETUP_YAML = "ihc_auto_setup.yaml"
DOMAIN = "ihc"
IHC_CONTROLLER = "controller"
IHC_INFO = "info"
IHC_PLATFORMS = ("binary_sensor", "light", "sensor", "switch")
def validate_name(config):
"""Validate the device name."""
if CONF_NAME in config:
return config
ihcid = config[CONF_ID]
name = f"ihc_{ihcid}"
config[CONF_NAME] = name
return config
DEVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_ID): cv.positive_int,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_NOTE): cv.string,
vol.Optional(CONF_POSITION): cv.string,
}
)
SWITCH_SCHEMA = DEVICE_SCHEMA.extend(
{
vol.Optional(CONF_OFF_ID, default=0): cv.positive_int,
vol.Optional(CONF_ON_ID, default=0): cv.positive_int,
}
)
BINARY_SENSOR_SCHEMA = DEVICE_SCHEMA.extend(
{
vol.Optional(CONF_INVERTING, default=False): cv.boolean,
vol.Optional(CONF_TYPE): DEVICE_CLASSES_SCHEMA,
}
)
LIGHT_SCHEMA = DEVICE_SCHEMA.extend(
{
vol.Optional(CONF_DIMMABLE, default=False): cv.boolean,
vol.Optional(CONF_OFF_ID, default=0): cv.positive_int,
vol.Optional(CONF_ON_ID, default=0): cv.positive_int,
}
)
SENSOR_SCHEMA = DEVICE_SCHEMA.extend(
{vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS): cv.string}
)
IHC_SCHEMA = vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_URL): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_AUTOSETUP, default=True): cv.boolean,
vol.Optional(CONF_BINARY_SENSOR, default=[]): vol.All(
cv.ensure_list, [vol.All(BINARY_SENSOR_SCHEMA, validate_name)]
),
vol.Optional(CONF_INFO, default=True): cv.boolean,
vol.Optional(CONF_LIGHT, default=[]): vol.All(
cv.ensure_list, [vol.All(LIGHT_SCHEMA, validate_name)]
),
vol.Optional(CONF_SENSOR, default=[]): vol.All(
cv.ensure_list, [vol.All(SENSOR_SCHEMA, validate_name)]
),
vol.Optional(CONF_SWITCH, default=[]): vol.All(
cv.ensure_list, [vol.All(SWITCH_SCHEMA, validate_name)]
),
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema(vol.All(cv.ensure_list, [IHC_SCHEMA]))}, extra=vol.ALLOW_EXTRA
)
AUTO_SETUP_SCHEMA = vol.Schema(
{
vol.Optional(CONF_BINARY_SENSOR, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
vol.Optional(CONF_INVERTING, default=False): cv.boolean,
vol.Optional(CONF_TYPE): cv.string,
}
)
],
),
vol.Optional(CONF_LIGHT, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
vol.Optional(CONF_DIMMABLE, default=False): cv.boolean,
}
)
],
),
vol.Optional(CONF_SENSOR, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
vol.Optional(
CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS
): cv.string,
}
)
],
),
vol.Optional(CONF_SWITCH, default=[]): vol.All(
cv.ensure_list,
[
vol.All(
{
vol.Required(CONF_NODE): cv.string,
vol.Required(CONF_XPATH): cv.string,
}
)
],
),
}
)
SET_RUNTIME_VALUE_BOOL_SCHEMA = vol.Schema(
{vol.Required(ATTR_IHC_ID): cv.positive_int, vol.Required(ATTR_VALUE): cv.boolean}
)
SET_RUNTIME_VALUE_INT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): vol.Coerce(int),
}
)
SET_RUNTIME_VALUE_FLOAT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_IHC_ID): cv.positive_int,
vol.Required(ATTR_VALUE): vol.Coerce(float),
}
)
PULSE_SCHEMA = vol.Schema({vol.Required(ATTR_IHC_ID): cv.positive_int})
def setup(hass, config):
"""Set up the IHC platform."""
conf = config.get(DOMAIN)
for index, controller_conf in enumerate(conf):
if not ihc_setup(hass, config, controller_conf, index):
return False
return True
def ihc_setup(hass, config, conf, controller_id):
"""Set up the IHC component."""
url = conf[CONF_URL]
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
ihc_controller = IHCController(url, username, password)
if not ihc_controller.authenticate():
_LOGGER.error("Unable to authenticate on IHC controller")
return False
if conf[CONF_AUTOSETUP] and not autosetup_ihc_products(
hass, config, ihc_controller, controller_id
):
return False
# Manual configuration
get_manual_configuration(hass, config, conf, ihc_controller, controller_id)
# Store controller configuration
ihc_key = f"ihc{controller_id}"
hass.data[ihc_key] = {IHC_CONTROLLER: ihc_controller, IHC_INFO: conf[CONF_INFO]}
setup_service_functions(hass, ihc_controller)
return True
def get_manual_configuration(hass, config, conf, ihc_controller, controller_id):
"""Get manual configuration for IHC devices."""
for component in IHC_PLATFORMS:
discovery_info = {}
if component in conf:
component_setup = conf.get(component)
for sensor_cfg in component_setup:
name = sensor_cfg[CONF_NAME]
device = {
"ihc_id": sensor_cfg[CONF_ID],
"ctrl_id": controller_id,
"product": {
"name": name,
"note": sensor_cfg.get(CONF_NOTE) or "",
"position": sensor_cfg.get(CONF_POSITION) or "",
},
"product_cfg": {
"type": sensor_cfg.get(CONF_TYPE),
"inverting": sensor_cfg.get(CONF_INVERTING),
"off_id": sensor_cfg.get(CONF_OFF_ID),
"on_id": sensor_cfg.get(CONF_ON_ID),
"dimmable": sensor_cfg.get(CONF_DIMMABLE),
"unit_of_measurement": sensor_cfg.get(CONF_UNIT_OF_MEASUREMENT),
},
}
discovery_info[name] = device
if discovery_info:
discovery.load_platform(hass, component, DOMAIN, discovery_info, config)
def autosetup_ihc_products(
hass: HomeAssistantType, config, ihc_controller, controller_id
):
"""Auto setup of IHC products from the IHC project file."""
project_xml = ihc_controller.get_project()
if not project_xml:
_LOGGER.error("Unable to read project from IHC controller")
return False
project = ElementTree.fromstring(project_xml)
# If an auto setup file exist in the configuration it will override
yaml_path = hass.config.path(AUTO_SETUP_YAML)
if not os.path.isfile(yaml_path):
yaml_path = os.path.join(os.path.dirname(__file__), AUTO_SETUP_YAML)
yaml = load_yaml_config_file(yaml_path)
try:
auto_setup_conf = AUTO_SETUP_SCHEMA(yaml)
except vol.Invalid as exception:
_LOGGER.error("Invalid IHC auto setup data: %s", exception)
return False
groups = project.findall(".//group")
for component in IHC_PLATFORMS:
component_setup = auto_setup_conf[component]
discovery_info = get_discovery_info(component_setup, groups, controller_id)
if discovery_info:
discovery.load_platform(hass, component, DOMAIN, discovery_info, config)
return True
def get_discovery_info(component_setup, groups, controller_id):
"""Get discovery info for specified IHC component."""
discovery_data = {}
for group in groups:
groupname = group.attrib["name"]
for product_cfg in component_setup:
products = group.findall(product_cfg[CONF_XPATH])
for product in products:
nodes = product.findall(product_cfg[CONF_NODE])
for node in nodes:
if "setting" in node.attrib and node.attrib["setting"] == "yes":
continue
ihc_id = int(node.attrib["id"].strip("_"), 0)
name = f"{groupname}_{ihc_id}"
device = {
"ihc_id": ihc_id,
"ctrl_id": controller_id,
"product": {
"name": product.get("name") or "",
"note": product.get("note") or "",
"position": product.get("position") or "",
},
"product_cfg": product_cfg,
}
discovery_data[name] = device
return discovery_data
def setup_service_functions(hass: HomeAssistantType, ihc_controller):
"""Set up the IHC service functions."""
def set_runtime_value_bool(call):
"""Set a IHC runtime bool value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller.set_runtime_value_bool(ihc_id, value)
def set_runtime_value_int(call):
"""Set a IHC runtime integer value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller.set_runtime_value_int(ihc_id, value)
def set_runtime_value_float(call):
"""Set a IHC runtime float value service function."""
ihc_id = call.data[ATTR_IHC_ID]
value = call.data[ATTR_VALUE]
ihc_controller.set_runtime_value_float(ihc_id, value)
async def async_pulse_runtime_input(call):
"""Pulse a IHC controller input function."""
ihc_id = call.data[ATTR_IHC_ID]
await async_pulse(hass, ihc_controller, ihc_id)
hass.services.register(
DOMAIN,
SERVICE_SET_RUNTIME_VALUE_BOOL,
set_runtime_value_bool,
schema=SET_RUNTIME_VALUE_BOOL_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SET_RUNTIME_VALUE_INT,
set_runtime_value_int,
schema=SET_RUNTIME_VALUE_INT_SCHEMA,
)
hass.services.register(
DOMAIN,
SERVICE_SET_RUNTIME_VALUE_FLOAT,
set_runtime_value_float,
schema=SET_RUNTIME_VALUE_FLOAT_SCHEMA,
)
hass.services.register(
DOMAIN, SERVICE_PULSE, async_pulse_runtime_input, schema=PULSE_SCHEMA
)
|
import http.client as httplib
import multiprocessing
import pytest
from xmlrpc.client import ServerProxy
from xmlrpc.server import SimpleXMLRPCServer
requests = pytest.importorskip("requests")
import vcr # NOQA
def test_domain_redirect():
"""Ensure that redirects across domains are considered unique"""
# In this example, seomoz.org redirects to moz.com, and if those
# requests are considered identical, then we'll be stuck in a redirect
# loop.
url = "http://seomoz.org/"
with vcr.use_cassette("tests/fixtures/wild/domain_redirect.yaml") as cass:
requests.get(url, headers={"User-Agent": "vcrpy-test"})
# Ensure that we've now served two responses. One for the original
# redirect, and a second for the actual fetch
assert len(cass) == 2
def test_flickr_multipart_upload(httpbin, tmpdir):
"""
The python-flickr-api project does a multipart
upload that confuses vcrpy
"""
def _pretend_to_be_flickr_library():
content_type, body = "text/plain", "HELLO WORLD"
h = httplib.HTTPConnection(httpbin.host, httpbin.port)
headers = {"Content-Type": content_type, "content-length": str(len(body))}
h.request("POST", "/post/", headers=headers)
h.send(body)
r = h.getresponse()
data = r.read()
h.close()
return data
testfile = str(tmpdir.join("flickr.yml"))
with vcr.use_cassette(testfile) as cass:
_pretend_to_be_flickr_library()
assert len(cass) == 1
with vcr.use_cassette(testfile) as cass:
assert len(cass) == 1
_pretend_to_be_flickr_library()
assert cass.play_count == 1
def test_flickr_should_respond_with_200(tmpdir):
testfile = str(tmpdir.join("flickr.yml"))
with vcr.use_cassette(testfile):
r = requests.post("https://api.flickr.com/services/upload", verify=False)
assert r.status_code == 200
def test_cookies(tmpdir, httpbin):
testfile = str(tmpdir.join("cookies.yml"))
with vcr.use_cassette(testfile):
s = requests.Session()
s.get(httpbin.url + "/cookies/set?k1=v1&k2=v2")
r2 = s.get(httpbin.url + "/cookies")
assert len(r2.json()["cookies"]) == 2
def test_amazon_doctype(tmpdir):
# amazon gzips its homepage. For some reason, in requests 2.7, it's not
# getting gunzipped.
with vcr.use_cassette(str(tmpdir.join("amz.yml"))):
r = requests.get("http://www.amazon.com", verify=False)
assert "html" in r.text
def start_rpc_server(q):
httpd = SimpleXMLRPCServer(("127.0.0.1", 0))
httpd.register_function(pow)
q.put("http://{}:{}".format(*httpd.server_address))
httpd.serve_forever()
@pytest.yield_fixture(scope="session")
def rpc_server():
q = multiprocessing.Queue()
proxy_process = multiprocessing.Process(target=start_rpc_server, args=(q,))
try:
proxy_process.start()
yield q.get()
finally:
proxy_process.terminate()
def test_xmlrpclib(tmpdir, rpc_server):
with vcr.use_cassette(str(tmpdir.join("xmlrpcvideo.yaml"))):
roundup_server = ServerProxy(rpc_server, allow_none=True)
original_schema = roundup_server.pow(2, 4)
with vcr.use_cassette(str(tmpdir.join("xmlrpcvideo.yaml"))):
roundup_server = ServerProxy(rpc_server, allow_none=True)
second_schema = roundup_server.pow(2, 4)
assert original_schema == second_schema
|
from __future__ import absolute_import
from __future__ import unicode_literals
import socket
import struct
BROADCAST_IP = '255.255.255.255'
DEFAULT_PORT = 9
def create_magic_packet(macaddress):
"""
Create a magic packet which can be used for wake on lan using the
mac address given as a parameter.
Keyword arguments:
:arg macaddress: the mac address that should be parsed into a magic
packet.
"""
if len(macaddress) == 12:
pass
elif len(macaddress) == 17:
sep = macaddress[2]
macaddress = macaddress.replace(sep, '')
else:
raise ValueError('Incorrect MAC address format')
# Pad the synchronization stream
data = b'FFFFFFFFFFFF' + (macaddress * 20).encode()
send_data = b''
# Split up the hex values in pack
for i in range(0, len(data), 2):
send_data += struct.pack(b'B', int(data[i:i + 2], 16))
return send_data
def send_magic_packet(*macs, **kwargs):
"""
Wakes the computer with the given mac address if wake on lan is
enabled on that host.
Keyword arguments:
:arguments macs: One or more macaddresses of machines to wake.
:key ip_address: the ip address of the host to send the magic packet
to (default "255.255.255.255")
:key port: the port of the host to send the magic packet to
(default 9)
"""
packets = []
ip = kwargs.pop('ip_address', BROADCAST_IP)
port = kwargs.pop('port', DEFAULT_PORT)
for k in kwargs:
raise TypeError('send_magic_packet() got an unexpected keyword ' 'argument {!r}'.format(k))
for mac in macs:
packet = create_magic_packet(mac)
packets.append(packet)
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
sock.connect((ip, port))
for packet in packets:
sock.send(packet)
sock.close()
|
import os
from invoke import task
from ._config import ROOT_DIR, NAME
def trim_py_files(*directories):
"""Remove trailing whitespace on all .py files in the given directories.
"""
nchanged = 0
for directory in directories:
for root, dirs, files in os.walk(directory):
for fname in files:
filename = os.path.join(root, fname)
if fname.endswith('.py'):
with open(filename, 'rb') as f:
code1 = f.read().decode()
lines = [line.rstrip() for line in code1.splitlines()]
while lines and not lines[-1]:
lines.pop(-1)
lines.append('') # always end with a newline
code2 = '\n'.join(lines)
if code1 != code2:
nchanged += 1
print(' Removing trailing whitespace on', filename)
with open(filename, 'wb') as f:
f.write(code2.encode())
print('Removed trailing whitespace on {} files.'.format(nchanged))
@task
def ws(ctx):
""" Remove trailing whitespace from all py files.
"""
trim_py_files(os.path.join(ROOT_DIR, 'flexx'),
os.path.join(ROOT_DIR, 'flexxamples'),
os.path.join(ROOT_DIR, 'tasks'),
)
|
import logging
import nuheat
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import (
CONF_PASSWORD,
CONF_USERNAME,
HTTP_BAD_REQUEST,
HTTP_INTERNAL_SERVER_ERROR,
)
from .const import CONF_SERIAL_NUMBER
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_SERIAL_NUMBER): str,
}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
api = nuheat.NuHeat(data[CONF_USERNAME], data[CONF_PASSWORD])
try:
await hass.async_add_executor_job(api.authenticate)
except requests.exceptions.Timeout as ex:
raise CannotConnect from ex
except requests.exceptions.HTTPError as ex:
if (
ex.response.status_code > HTTP_BAD_REQUEST
and ex.response.status_code < HTTP_INTERNAL_SERVER_ERROR
):
raise InvalidAuth from ex
raise CannotConnect from ex
#
# The underlying module throws a generic exception on login failure
#
except Exception as ex:
raise InvalidAuth from ex
try:
thermostat = await hass.async_add_executor_job(
api.get_thermostat, data[CONF_SERIAL_NUMBER]
)
except requests.exceptions.HTTPError as ex:
raise InvalidThermostat from ex
return {"title": thermostat.room, "serial_number": thermostat.serial_number}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for NuHeat."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except InvalidThermostat:
errors["base"] = "invalid_thermostat"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(info["serial_number"])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input):
"""Handle import."""
await self.async_set_unique_id(user_input[CONF_SERIAL_NUMBER])
self._abort_if_unique_id_configured()
return await self.async_step_user(user_input)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
class InvalidThermostat(exceptions.HomeAssistantError):
"""Error to indicate there is invalid thermostat."""
|
import datetime
import os
import random
import sys
sys.path = [os.path.abspath(os.path.dirname(__file__))] + sys.path
from auto_ml import Predictor
from auto_ml.utils_models import load_ml_model
import dill
import numpy as np
import pandas as pd
from nose.tools import assert_equal, assert_not_equal, with_setup
from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
import utils_testing as utils
def optimize_final_model_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
# We just want to make sure these run, not necessarily make sure that they're super accurate (which takes more time, and is dataset dependent)
df_titanic_train = df_titanic_train.sample(frac=0.5)
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, optimize_final_model=True, model_names=model_name)
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
# Small sample sizes mean there's a fair bit of noise here
lower_bound = -0.18
if model_name == 'DeepLearningClassifier':
lower_bound = -0.255
if model_name == 'LGBMClassifier':
lower_bound = -0.221
if model_name == 'GradientBoostingClassifier':
lower_bound = -0.225
if model_name == 'CatBoostClassifier':
lower_bound = -0.221
assert lower_bound < test_score < -0.135
def categorical_ensembling_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train_categorical_ensemble(df_titanic_train, model_names=model_name, categorical_column='embarked')
test_score = ml_predictor.score(df_titanic_test, df_titanic_test.survived)
print('test_score')
print(test_score)
lower_bound = -0.18
upper_bound = -0.145
if model_name == 'DeepLearningClassifier':
lower_bound = -0.215
# CatBoost is super inconsistent
if model_name == 'CatBoostClassifier':
upper_bound = -0.137
assert lower_bound < test_score < upper_bound
def getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_titanic_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.18
upper_bound = -0.135
if model_name == 'DeepLearningClassifier':
lower_bound = -0.195
if model_name == 'CatBoostClassifier':
lower_bound = -0.215
upper_bound = -0.128
assert lower_bound < first_score < upper_bound
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < upper_bound
def getting_single_predictions_multilabel_classification(model_name=None):
# auto_ml does not support multilabel classification for deep learning at the moment
if model_name == 'DeepLearningClassifier' or model_name == 'CatBoostClassifier':
return
np.random.seed(0)
df_twitter_train, df_twitter_test = utils.get_twitter_sentiment_multilabel_classification_dataset()
column_descriptions = {
'airline_sentiment': 'output'
, 'airline': 'categorical'
, 'text': 'ignore'
, 'tweet_location': 'categorical'
, 'user_timezone': 'categorical'
, 'tweet_created': 'date'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
ml_predictor.train(df_twitter_train, model_names=model_name)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_twitter_test_dictionaries = df_twitter_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
first_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = 0.67
# LGBM is super finnicky here- sometimes it's fine, but sometimes it does pretty terribly.
if model_name == 'LGBMClassifier':
lower_bound = 0.6
assert lower_bound < first_score < 0.79
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_twitter_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_twitter_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_twitter_test_dictionaries:
predictions.append(saved_ml_pipeline.predict(row))
print('predictions')
print(predictions)
print('df_twitter_test_dictionaries')
print(df_twitter_test_dictionaries)
second_score = accuracy_score(df_twitter_test.airline_sentiment, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < 0.79
def feature_learning_getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)
ml_predictor.train(df_titanic_train, model_names=model_name, feature_learning=True, fl_data=fl_data)
file_name = ml_predictor.save(str(random.random()))
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.16
if model_name == 'DeepLearningClassifier':
lower_bound = -0.187
assert lower_bound < first_score < -0.133
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -0.133
def feature_learning_categorical_ensembling_getting_single_predictions_classification(model_name=None):
np.random.seed(0)
df_titanic_train, df_titanic_test = utils.get_titanic_binary_classification_dataset()
column_descriptions = {
'survived': 'output'
, 'sex': 'categorical'
, 'embarked': 'categorical'
, 'pclass': 'categorical'
}
ml_predictor = Predictor(type_of_estimator='classifier', column_descriptions=column_descriptions)
# NOTE: this is bad practice to pass in our same training set as our fl_data set, but we don't have enough data to do it any other way
df_titanic_train, fl_data = train_test_split(df_titanic_train, test_size=0.2)
ml_predictor.train_categorical_ensemble(df_titanic_train, model_names=model_name, feature_learning=True, fl_data=fl_data, categorical_column='embarked')
file_name = ml_predictor.save(str(random.random()))
from auto_ml.utils_models import load_ml_model
saved_ml_pipeline = load_ml_model(file_name)
os.remove(file_name)
try:
keras_file_name = file_name[:-5] + '_keras_deep_learning_model.h5'
os.remove(keras_file_name)
except:
pass
df_titanic_test_dictionaries = df_titanic_test.to_dict('records')
# 1. make sure the accuracy is the same
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
first_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('first_score')
print(first_score)
# Make sure our score is good, but not unreasonably good
lower_bound = -0.17
if model_name == 'DeepLearningClassifier':
lower_bound = -0.245
if model_name == 'CatBoostClassifier':
lower_bound = -0.265
assert lower_bound < first_score < -0.147
# 2. make sure the speed is reasonable (do it a few extra times)
data_length = len(df_titanic_test_dictionaries)
start_time = datetime.datetime.now()
for idx in range(1000):
row_num = idx % data_length
saved_ml_pipeline.predict(df_titanic_test_dictionaries[row_num])
end_time = datetime.datetime.now()
duration = end_time - start_time
print('duration.total_seconds()')
print(duration.total_seconds())
# It's very difficult to set a benchmark for speed that will work across all machines.
# On my 2013 bottom of the line 15" MacBook Pro, this runs in about 0.8 seconds for 1000 predictions
# That's about 1 millisecond per prediction
# Assuming we might be running on a test box that's pretty weak, multiply by 3
# Also make sure we're not running unreasonably quickly
assert 0.2 < duration.total_seconds() < 60
# 3. make sure we're not modifying the dictionaries (the score is the same after running a few experiments as it is the first time)
predictions = []
for row in df_titanic_test_dictionaries:
predictions.append(saved_ml_pipeline.predict_proba(row)[1])
print('predictions')
print(predictions)
print('df_titanic_test_dictionaries')
print(df_titanic_test_dictionaries)
second_score = utils.calculate_brier_score_loss(df_titanic_test.survived, predictions)
print('second_score')
print(second_score)
# Make sure our score is good, but not unreasonably good
assert lower_bound < second_score < -0.147
|
from flexx import flx
class Drawing(flx.CanvasWidget):
CSS = """
.flx-Drawing {background: #fff; border: 5px solid #000;}
"""
def init(self):
super().init()
self.ctx = self.node.getContext('2d')
self._last_pos = {}
# Set mouse capturing mode
self.set_capture_mouse(1)
# Label to show info about the event
self.label = flx.Label()
def show_event(self, ev):
if -1 in ev.touches: # Mouse
t = 'mouse pos: {:.0f} {:.0f} buttons: {}'
self.label.set_text(t.format(ev.pos[0], ev.pos[1], ev.buttons))
else: # Touch
self.label.set_text('Touch ids: {}'.format(ev.touches.keys()))
@flx.reaction('pointer_move')
def on_move(self, *events):
for ev in events:
self.show_event(ev)
# Effective way to only draw if mouse is down, but disabled for
# sake of example. Not necessary if capture_mouse == 1.
# if 1 not in ev.buttons:
# return
# One can simply use ev.pos, but let's support multi-touch here!
# Mouse events also have touches, with a touch_id of -1.
for touch_id in ev.touches:
x, y, force = ev.touches[touch_id]
self.ctx.beginPath()
self.ctx.strokeStyle = '#080'
self.ctx.lineWidth = 3
self.ctx.lineCap = 'round'
self.ctx.moveTo(*self._last_pos[touch_id])
self.ctx.lineTo(x, y)
self.ctx.stroke()
self._last_pos[touch_id] = x, y
@flx.reaction('pointer_down')
def on_down(self, *events):
for ev in events:
self.show_event(ev)
for touch_id in ev.touches:
x, y, force = ev.touches[touch_id]
self.ctx.beginPath()
self.ctx.fillStyle = '#f00'
self.ctx.arc(x, y, 3, 0, 6.2831)
self.ctx.fill()
self._last_pos[touch_id] = x, y
@flx.reaction('pointer_up')
def on_up(self, *events):
for ev in events:
self.show_event(ev)
for touch_id in ev.touches:
x, y, force = ev.touches[touch_id]
self.ctx.beginPath()
self.ctx.fillStyle = '#00f'
self.ctx.arc(x, y, 3, 0, 6.2831)
self.ctx.fill()
class Main(flx.Widget):
""" Embed in larger widget to test offset.
"""
CSS = """
.flx-Main {background: #eee;}
"""
def init(self):
with flx.VFix():
flx.Widget(flex=1)
with flx.HFix(flex=2):
flx.Widget(flex=1)
Drawing(flex=2)
flx.Widget(flex=1)
flx.Widget(flex=1)
if __name__ == '__main__':
a = flx.App(Main)
m = a.launch('firefox-browser')
flx.start()
|
from rebulk.loose import ensure_list
from .score import get_equivalent_release_groups, score_keys
from .video import Episode, Movie
from .utils import sanitize, sanitize_release_group
def series_matches(video, title=None, **kwargs):
"""Whether the `video` matches the series title.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str title: the series name.
:return: whether there's a match
:rtype: bool
"""
if isinstance(video, Episode):
return video.series and sanitize(title) in (
sanitize(name) for name in [video.series] + video.alternative_series
)
def title_matches(video, title=None, episode_title=None, **kwargs):
"""Whether the movie matches the movie `title` or the series matches the `episode_title`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str title: the movie title.
:param str episode_title: the series episode title.
:return: whether there's a match
:rtype: bool
"""
if isinstance(video, Episode):
return video.title and sanitize(episode_title) == sanitize(video.title)
if isinstance(video, Movie):
return video.title and sanitize(title) == sanitize(video.title)
def season_matches(video, season=None, **kwargs):
"""Whether the episode matches the `season`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param int season: the episode season.
:return: whether there's a match
:rtype: bool
"""
if isinstance(video, Episode):
return video.season and season == video.season
def episode_matches(video, episode=None, **kwargs):
"""Whether the episode matches the `episode`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param episode: the episode season.
:type: list of int or int
:return: whether there's a match
:rtype: bool
"""
if isinstance(video, Episode):
return video.episodes and ensure_list(episode) == video.episodes
def year_matches(video, year=None, partial=False, **kwargs):
"""Whether the video matches the `year`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param int year: the video year.
:param bool partial: whether or not the guess is partial.
:return: whether there's a match
:rtype: bool
"""
if video.year and year == video.year:
return True
if isinstance(video, Episode):
# count "no year" as an information
return not partial and video.original_series and not year
def country_matches(video, country=None, partial=False, **kwargs):
"""Whether the video matches the `country`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param country: the video country.
:type country: :class:`~babelfish.country.Country`
:param bool partial: whether or not the guess is partial.
:return: whether there's a match
:rtype: bool
"""
if video.country and country == video.country:
return True
if isinstance(video, Episode):
# count "no country" as an information
return not partial and video.original_series and not country
if isinstance(video, Movie):
# count "no country" as an information
return not video.country and not country
def release_group_matches(video, release_group=None, **kwargs):
"""Whether the video matches the `release_group`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str release_group: the video release group.
:return: whether there's a match
:rtype: bool
"""
return (video.release_group and release_group and
any(r in sanitize_release_group(release_group)
for r in get_equivalent_release_groups(sanitize_release_group(video.release_group))))
def streaming_service_matches(video, streaming_service=None, **kwargs):
"""Whether the video matches the `streaming_service`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str streaming_service: the video streaming service
:return: whether there's a match
:rtype: bool
"""
return video.streaming_service and streaming_service == video.streaming_service
def resolution_matches(video, screen_size=None, **kwargs):
"""Whether the video matches the `resolution`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str screen_size: the video resolution
:return: whether there's a match
:rtype: bool
"""
return video.resolution and screen_size == video.resolution
def source_matches(video, source=None, **kwargs):
"""Whether the video matches the `source`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str source: the video source
:return: whether there's a match
:rtype: bool
"""
return video.source and source == video.source
def video_codec_matches(video, video_codec=None, **kwargs):
"""Whether the video matches the `video_codec`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str video_codec: the video codec
:return: whether there's a match
:rtype: bool
"""
return video.video_codec and video_codec == video.video_codec
def audio_codec_matches(video, audio_codec=None, **kwargs):
"""Whether the video matches the `audio_codec`.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param str audio_codec: the video audio codec
:return: whether there's a match
:rtype: bool
"""
return video.audio_codec and audio_codec == video.audio_codec
#: Available matches functions
matches_manager = {
'series': series_matches,
'title': title_matches,
'season': season_matches,
'episode': episode_matches,
'year': year_matches,
'country': country_matches,
'release_group': release_group_matches,
'streaming_service': streaming_service_matches,
'resolution': resolution_matches,
'source': source_matches,
'video_codec': video_codec_matches,
'audio_codec': audio_codec_matches
}
def guess_matches(video, guess, partial=False):
"""Get matches between a `video` and a `guess`.
If a guess is `partial`, the absence information won't be counted as a match.
:param video: the video.
:type video: :class:`~subliminal.video.Video`
:param guess: the guess.
:type guess: dict
:param bool partial: whether or not the guess is partial.
:return: matches between the `video` and the `guess`.
:rtype: set
"""
matches = set()
for key in score_keys:
if key in matches_manager and matches_manager[key](video, partial=partial, **guess):
matches.add(key)
return matches
|
import pandas as pd
from scattertext.Corpus import Corpus
class CorpusDF(Corpus):
def __init__(self,
df,
X,
mX,
y,
text_col,
term_idx_store,
category_idx_store,
metadata_idx_store,
unigram_frequency_path=None):
'''
Parameters
----------
X : csr_matrix
term document matrix
mX : csr_matrix
metadata-document matrix
y : np.array
category index array
term_idx_store : IndexStore
Term indices
category_idx_store : IndexStore
Catgory indices
metadata_idx_store : IndexStore
Document metadata indices
text_col: np.array or pd.Series
Raw texts
unigram_frequency_path : str or None
Path to term frequency file.
'''
self._df = df
self._text_col = text_col
Corpus.__init__(self,
X,
mX,
y,
term_idx_store,
category_idx_store,
metadata_idx_store,
df[text_col],
unigram_frequency_path)
def get_texts(self):
'''
Returns
-------
pd.Series, all raw documents
'''
return self._df[self._text_col]
def get_df(self):
return self._df
def search(self, ngram):
'''
Parameters
----------
ngram, str or unicode, string to search for
Returns
-------
pd.DataFrame, {self._parsed_col: <matching texts>, self._category_col: <corresponding categories>, ...}
'''
mask = self._document_index_mask(ngram)
return self._df[mask]
def _make_new_term_doc_matrix(self,
new_X=None,
new_mX=None,
new_y=None,
new_term_idx_store=None,
new_category_idx_store=None,
new_metadata_idx_store=None,
new_y_mask=None):
return CorpusDF(
df=self._df[new_y_mask] if new_y_mask is not None else self._df,
X=new_X if new_X is not None else self._X,
mX=new_mX if new_mX is not None else self._mX,
y=new_y if new_y is not None else self._y,
term_idx_store=new_term_idx_store if new_term_idx_store is not None else self._term_idx_store,
category_idx_store=new_category_idx_store if new_category_idx_store is not None else self._category_idx_store,
metadata_idx_store=new_metadata_idx_store if new_metadata_idx_store is not None else self._metadata_idx_store,
text_col=self._text_col,
unigram_frequency_path=self._unigram_frequency_path
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import functools
import logging
import operator
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import tomcat
from perfkitbenchmarker.linux_packages import wrk
import six
import six.moves.urllib.parse
flags.DEFINE_integer('tomcat_wrk_test_length', 120,
'Length of time, in seconds, to run wrk for each '
'connction count', lower_bound=1)
flags.DEFINE_integer('tomcat_wrk_max_connections', 128,
'Maximum number of simultaneous connections to attempt',
lower_bound=1)
flags.DEFINE_boolean('tomcat_wrk_report_all_samples', False,
'If true, report throughput/latency at all connection '
'counts. If false (the default), report only the '
'connection counts with lowest p50 latency and highest '
'throughput.')
# Stop when >= 1% of requests have errors
MAX_ERROR_RATE = 0.01
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'tomcat_wrk'
BENCHMARK_CONFIG = """
tomcat_wrk:
description: Run wrk against tomcat.
vm_groups:
server:
vm_spec: *default_single_core
client:
vm_spec: *default_single_core
"""
MAX_OPEN_FILES = 65536
WARM_UP_DURATION = 30
# Target: simple sample page that generates an SVG.
SAMPLE_PAGE_PATH = 'examples/jsp/jsp2/jspx/textRotate.jspx?name=JSPX'
NOFILE_LIMIT_CONF = '/etc/security/limits.d/pkb-tomcat.conf'
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _IncreaseMaxOpenFiles(vm):
vm.RemoteCommand(('echo "{0} soft nofile {1}\n{0} hard nofile {1}" | '
'sudo tee {2}').format(vm.user_name, MAX_OPEN_FILES,
NOFILE_LIMIT_CONF))
def _RemoveOpenFileLimit(vm):
vm.RemoteCommand('sudo rm -f {0}'.format(NOFILE_LIMIT_CONF))
def _PrepareServer(vm):
"""Installs tomcat on the server."""
vm.Install('tomcat')
_IncreaseMaxOpenFiles(vm)
tomcat.Start(vm)
def _PrepareClient(vm):
"""Install wrk on the client VM."""
_IncreaseMaxOpenFiles(vm)
vm.Install('curl')
vm.Install('wrk')
def Prepare(benchmark_spec):
"""Install tomcat on one VM and wrk on another.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
tomcat_vm = benchmark_spec.vm_groups['server'][0]
wrk_vm = benchmark_spec.vm_groups['client'][0]
tomcat_vm.AllowPort(tomcat.TOMCAT_HTTP_PORT)
vm_util.RunThreaded((lambda f: f()),
[functools.partial(_PrepareServer, tomcat_vm),
functools.partial(_PrepareClient, wrk_vm)])
def Run(benchmark_spec):
"""Run wrk against tomcat.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
tomcat_vm = benchmark_spec.vm_groups['server'][0]
wrk_vm = benchmark_spec.vm_groups['client'][0]
samples = []
errors = 0
connections = 1
duration = FLAGS.tomcat_wrk_test_length
max_connections = FLAGS.tomcat_wrk_max_connections
target = six.moves.urllib.parse.urljoin(
'http://{0}:{1}'.format(tomcat_vm.ip_address,
tomcat.TOMCAT_HTTP_PORT),
SAMPLE_PAGE_PATH)
logging.info('Warming up for %ds', WARM_UP_DURATION)
list(wrk.Run(wrk_vm, connections=1, target=target, duration=WARM_UP_DURATION))
all_by_metric = []
while connections <= max_connections:
run_samples = list(wrk.Run(wrk_vm, connections=connections, target=target,
duration=duration))
by_metric = {i.metric: i for i in run_samples}
errors = by_metric['errors'].value
requests = by_metric['requests'].value
throughput = by_metric['throughput'].value
if requests < 1:
logging.warn('No requests issued for %d connections.',
connections)
error_rate = 1.0
else:
error_rate = float(errors) / requests
if error_rate <= MAX_ERROR_RATE:
all_by_metric.append(by_metric)
else:
logging.warn('Error rate exceeded maximum (%g > %g)', error_rate,
MAX_ERROR_RATE)
logging.info('Ran with %d connections; %.2f%% errors, %.2f req/s',
connections, error_rate, throughput)
# Retry with double the connections
connections *= 2
if not all_by_metric:
raise ValueError('No requests succeeded.')
# Annotate the sample with the best throughput
max_throughput = max(all_by_metric, key=lambda x: x['throughput'].value)
for sample in six.itervalues(max_throughput):
sample.metadata.update(best_throughput=True)
# ...and best 50th percentile latency
min_p50 = min(all_by_metric, key=lambda x: x['p50 latency'].value)
for sample in six.itervalues(min_p50):
sample.metadata.update(best_p50=True)
sort_key = operator.attrgetter('metric')
if FLAGS.tomcat_wrk_report_all_samples:
samples = [sample for d in all_by_metric
for sample in sorted(six.itervalues(d), key=sort_key)]
else:
samples = (sorted(six.itervalues(min_p50), key=sort_key) +
sorted(six.itervalues(max_throughput), key=sort_key))
for sample in samples:
sample.metadata.update(ip_type='external', runtime_in_seconds=duration)
return samples
def Cleanup(benchmark_spec):
"""Remove tomcat and wrk.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
tomcat_vm = benchmark_spec.vm_groups['server'][0]
tomcat.Stop(tomcat_vm)
vm_util.RunThreaded(_RemoveOpenFileLimit, benchmark_spec.vms)
|
from __future__ import absolute_import
import argparse
import os
import io
import json
import logging
import sys
import errno
import hashlib
import math
import shutil
import tempfile
from functools import partial
if sys.version_info[0] == 2:
import urllib
from urllib2 import urlopen
else:
import urllib.request as urllib
from urllib.request import urlopen
_DEFAULT_BASE_DIR = os.path.expanduser('~/gensim-data')
BASE_DIR = os.environ.get('GENSIM_DATA_DIR', _DEFAULT_BASE_DIR)
"""The default location to store downloaded data.
You may override this with the GENSIM_DATA_DIR environment variable.
"""
_PARENT_DIR = os.path.abspath(os.path.join(BASE_DIR, '..'))
base_dir = BASE_DIR # for backward compatibility with some of our test data
logger = logging.getLogger(__name__)
DATA_LIST_URL = "https://raw.githubusercontent.com/RaRe-Technologies/gensim-data/master/list.json"
DOWNLOAD_BASE_URL = "https://github.com/RaRe-Technologies/gensim-data/releases/download"
def _progress(chunks_downloaded, chunk_size, total_size, part=1, total_parts=1):
"""Reporthook for :func:`urllib.urlretrieve`, code from [1]_.
Parameters
----------
chunks_downloaded : int
Number of chunks of data that have been downloaded.
chunk_size : int
Size of each chunk of data.
total_size : int
Total size of the dataset/model.
part : int, optional
Number of current part, used only if `no_parts` > 1.
total_parts : int, optional
Total number of parts.
References
----------
[1] https://gist.github.com/vladignatyev/06860ec2040cb497f0f3
"""
bar_len = 50
size_downloaded = float(chunks_downloaded * chunk_size)
filled_len = int(math.floor((bar_len * size_downloaded) / total_size))
percent_downloaded = round(((size_downloaded * 100) / total_size), 1)
bar = '=' * filled_len + '-' * (bar_len - filled_len)
if total_parts == 1:
sys.stdout.write(
'\r[%s] %s%s %s/%sMB downloaded' % (
bar, percent_downloaded, "%",
round(size_downloaded / (1024 * 1024), 1),
round(float(total_size) / (1024 * 1024), 1))
)
sys.stdout.flush()
else:
sys.stdout.write(
'\r Part %s/%s [%s] %s%s %s/%sMB downloaded' % (
part + 1, total_parts, bar, percent_downloaded, "%",
round(size_downloaded / (1024 * 1024), 1),
round(float(total_size) / (1024 * 1024), 1))
)
sys.stdout.flush()
def _create_base_dir():
"""Create the gensim-data directory in home directory, if it has not been already created.
Raises
------
Exception
An exception is raised when read/write permissions are not available or a file named gensim-data
already exists in the home directory.
"""
if not os.path.isdir(BASE_DIR):
try:
logger.info("Creating %s", BASE_DIR)
os.makedirs(BASE_DIR)
except OSError as e:
if e.errno == errno.EEXIST:
raise Exception(
"Not able to create folder gensim-data in {}. File gensim-data "
"exists in the directory already.".format(_PARENT_DIR)
)
else:
raise Exception(
"Can't create {}. Make sure you have the read/write permissions "
"to the directory or you can try creating the folder manually"
.format(BASE_DIR)
)
def _calculate_md5_checksum(fname):
"""Calculate the checksum of the file, exactly same as md5-sum linux util.
Parameters
----------
fname : str
Path to the file.
Returns
-------
str
MD5-hash of file names as `fname`.
"""
hash_md5 = hashlib.md5()
with open(fname, "rb") as f:
for chunk in iter(lambda: f.read(4096), b""):
hash_md5.update(chunk)
return hash_md5.hexdigest()
def _load_info(url=DATA_LIST_URL, encoding='utf-8'):
"""Load dataset information from the network.
If the network access fails, fall back to a local cache. This cache gets
updated each time a network request _succeeds_.
"""
cache_path = os.path.join(BASE_DIR, 'information.json')
_create_base_dir()
try:
info_bytes = urlopen(url).read()
except (OSError, IOError):
#
# The exception raised by urlopen differs between Py2 and Py3.
#
# https://docs.python.org/3/library/urllib.error.html
# https://docs.python.org/2/library/urllib.html
#
logger.exception(
'caught non-fatal exception while trying to update gensim-data cache from %r; '
'using local cache at %r instead', url, cache_path
)
else:
with open(cache_path, 'wb') as fout:
fout.write(info_bytes)
try:
#
# We need io.open here because Py2 open doesn't support encoding keyword
#
with io.open(cache_path, 'r', encoding=encoding) as fin:
return json.load(fin)
except IOError:
raise ValueError(
'unable to read local cache %r during fallback, '
'connect to the Internet and retry' % cache_path
)
def info(name=None, show_only_latest=True, name_only=False):
"""Provide the information related to model/dataset.
Parameters
----------
name : str, optional
Name of model/dataset. If not set - shows all available data.
show_only_latest : bool, optional
If storage contains different versions for one data/model, this flag allow to hide outdated versions.
Affects only if `name` is None.
name_only : bool, optional
If True, will return only the names of available models and corpora.
Returns
-------
dict
Detailed information about one or all models/datasets.
If name is specified, return full information about concrete dataset/model,
otherwise, return information about all available datasets/models.
Raises
------
Exception
If name that has been passed is incorrect.
Examples
--------
.. sourcecode:: pycon
>>> import gensim.downloader as api
>>> api.info("text8") # retrieve information about text8 dataset
{u'checksum': u'68799af40b6bda07dfa47a32612e5364',
u'description': u'Cleaned small sample from wikipedia',
u'file_name': u'text8.gz',
u'parts': 1,
u'source': u'http://mattmahoney.net/dc/text8.zip'}
>>>
>>> api.info() # retrieve information about all available datasets and models
"""
information = _load_info()
if name is not None:
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]
elif name in models:
return information['models'][name]
else:
raise ValueError("Incorrect model/corpus name")
if not show_only_latest:
return information
if name_only:
return {"corpora": list(information['corpora'].keys()), "models": list(information['models'])}
return {
"corpora": {name: data for (name, data) in information['corpora'].items() if data.get("latest", True)},
"models": {name: data for (name, data) in information['models'].items() if data.get("latest", True)}
}
def _get_checksum(name, part=None):
"""Retrieve the checksum of the model/dataset from gensim-data repository.
Parameters
----------
name : str
Dataset/model name.
part : int, optional
Number of part (for multipart data only).
Returns
-------
str
Retrieved checksum of dataset/model.
"""
information = info()
corpora = information['corpora']
models = information['models']
if part is None:
if name in corpora:
return information['corpora'][name]["checksum"]
elif name in models:
return information['models'][name]["checksum"]
else:
if name in corpora:
return information['corpora'][name]["checksum-{}".format(part)]
elif name in models:
return information['models'][name]["checksum-{}".format(part)]
def _get_parts(name):
"""Retrieve the number of parts in which dataset/model has been split.
Parameters
----------
name: str
Dataset/model name.
Returns
-------
int
Number of parts in which dataset/model has been split.
"""
information = info()
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]["parts"]
elif name in models:
return information['models'][name]["parts"]
def _download(name):
"""Download and extract the dataset/model.
Parameters
----------
name: str
Dataset/model name which has to be downloaded.
Raises
------
Exception
If md5sum on client and in repo are different.
"""
url_load_file = "{base}/{fname}/__init__.py".format(base=DOWNLOAD_BASE_URL, fname=name)
data_folder_dir = os.path.join(BASE_DIR, name)
data_folder_dir_tmp = data_folder_dir + '_tmp'
tmp_dir = tempfile.mkdtemp()
init_path = os.path.join(tmp_dir, "__init__.py")
urllib.urlretrieve(url_load_file, init_path)
total_parts = _get_parts(name)
if total_parts > 1:
concatenated_folder_name = "{fname}.gz".format(fname=name)
concatenated_folder_dir = os.path.join(tmp_dir, concatenated_folder_name)
for part in range(0, total_parts):
url_data = "{base}/{fname}/{fname}.gz_0{part}".format(base=DOWNLOAD_BASE_URL, fname=name, part=part)
fname = "{f}.gz_0{p}".format(f=name, p=part)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(
url_data, dst_path,
reporthook=partial(_progress, part=part, total_parts=total_parts)
)
if _calculate_md5_checksum(dst_path) == _get_checksum(name, part):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("Part %s/%s downloaded", part + 1, total_parts)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
with open(concatenated_folder_dir, 'wb') as wfp:
for part in range(0, total_parts):
part_path = os.path.join(tmp_dir, "{fname}.gz_0{part}".format(fname=name, part=part))
with open(part_path, "rb") as rfp:
shutil.copyfileobj(rfp, wfp)
os.remove(part_path)
else:
url_data = "{base}/{fname}/{fname}.gz".format(base=DOWNLOAD_BASE_URL, fname=name)
fname = "{fname}.gz".format(fname=name)
dst_path = os.path.join(tmp_dir, fname)
urllib.urlretrieve(url_data, dst_path, reporthook=_progress)
if _calculate_md5_checksum(dst_path) == _get_checksum(name):
sys.stdout.write("\n")
sys.stdout.flush()
logger.info("%s downloaded", name)
else:
shutil.rmtree(tmp_dir)
raise Exception("Checksum comparison failed, try again")
if os.path.exists(data_folder_dir_tmp):
os.remove(data_folder_dir_tmp)
shutil.move(tmp_dir, data_folder_dir_tmp)
os.rename(data_folder_dir_tmp, data_folder_dir)
def _get_filename(name):
"""Retrieve the filename of the dataset/model.
Parameters
----------
name: str
Name of dataset/model.
Returns
-------
str:
Filename of the dataset/model.
"""
information = info()
corpora = information['corpora']
models = information['models']
if name in corpora:
return information['corpora'][name]["file_name"]
elif name in models:
return information['models'][name]["file_name"]
def load(name, return_path=False):
"""Download (if needed) dataset/model and load it to memory (unless `return_path` is set).
Parameters
----------
name: str
Name of the model/dataset.
return_path: bool, optional
If True, return full path to file, otherwise, return loaded model / iterable dataset.
Returns
-------
Model
Requested model, if `name` is model and `return_path` == False.
Dataset (iterable)
Requested dataset, if `name` is dataset and `return_path` == False.
str
Path to file with dataset / model, only when `return_path` == True.
Raises
------
Exception
Raised if `name` is incorrect.
Examples
--------
Model example:
.. sourcecode:: pycon
>>> import gensim.downloader as api
>>>
>>> model = api.load("glove-twitter-25") # load glove vectors
>>> model.most_similar("cat") # show words that similar to word 'cat'
Dataset example:
.. sourcecode:: pycon
>>> import gensim.downloader as api
>>>
>>> wiki = api.load("wiki-en") # load extracted Wikipedia dump, around 6 Gb
>>> for article in wiki: # iterate over all wiki script
>>> pass
Download only example:
.. sourcecode:: pycon
>>> import gensim.downloader as api
>>>
>>> print(api.load("wiki-en", return_path=True)) # output: /home/user/gensim-data/wiki-en/wiki-en.gz
"""
_create_base_dir()
file_name = _get_filename(name)
if file_name is None:
raise ValueError("Incorrect model/corpus name")
folder_dir = os.path.join(BASE_DIR, name)
path = os.path.join(folder_dir, file_name)
if not os.path.exists(folder_dir):
_download(name)
if return_path:
return path
else:
sys.path.insert(0, BASE_DIR)
module = __import__(name)
return module.load_data()
if __name__ == '__main__':
logging.basicConfig(
format='%(asctime)s : %(name)s : %(levelname)s : %(message)s', stream=sys.stdout, level=logging.INFO
)
parser = argparse.ArgumentParser(
description="Gensim console API",
usage="python -m gensim.api.downloader [-h] [-d data_name | -i data_name]"
)
group = parser.add_mutually_exclusive_group()
group.add_argument(
"-d", "--download", metavar="data_name", nargs=1,
help="To download a corpus/model : python -m gensim.downloader -d <dataname>"
)
full_information = 1
group.add_argument(
"-i", "--info", metavar="data_name", nargs='?', const=full_information,
help="To get information about a corpus/model : python -m gensim.downloader -i <dataname>"
)
args = parser.parse_args()
if args.download is not None:
data_path = load(args.download[0], return_path=True)
logger.info("Data has been installed and data path is %s", data_path)
elif args.info is not None:
if args.info == 'name':
print(json.dumps(info(name_only=True), indent=4))
else:
output = info() if (args.info == full_information) else info(name=args.info)
print(json.dumps(output, indent=4))
|
import unittest
import mock
from Tests.utils.utils import get_test_path
from kalliope import ResourcesManager
from kalliope.core.Models import Resources
from kalliope.core.Models.Dna import Dna
class TestResourcesmanager(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_is_settings_ok(self):
# -----------------
# valid resource
# -----------------
# valid neuron
valid_resource = Resources()
valid_resource.neuron_folder = "/path"
dna = Dna()
dna.module_type = "neuron"
self.assertTrue(ResourcesManager.is_settings_ok(valid_resource, dna))
# valid stt
valid_resource = Resources()
valid_resource.stt_folder = "/path"
dna = Dna()
dna.module_type = "stt"
self.assertTrue(ResourcesManager.is_settings_ok(valid_resource, dna))
# valid tts
valid_resource = Resources()
valid_resource.tts_folder = "/path"
dna = Dna()
dna.module_type = "tss"
self.assertTrue(ResourcesManager.is_settings_ok(valid_resource, dna))
# valid trigger
valid_resource = Resources()
valid_resource.trigger_folder = "/path"
dna = Dna()
dna.module_type = "trigger"
self.assertTrue(ResourcesManager.is_settings_ok(valid_resource, dna))
# valid signal
valid_resource = Resources()
valid_resource.signal_folder = "/path"
dna = Dna()
dna.module_type = "signal"
self.assertTrue(ResourcesManager.is_settings_ok(valid_resource, dna))
# -----------------
# invalid resource
# -----------------
# valid neuron
valid_resource = Resources()
valid_resource.neuron_folder = None
dna = Dna()
dna.module_type = "neuron"
self.assertFalse(ResourcesManager.is_settings_ok(valid_resource, dna))
# valid stt
valid_resource = Resources()
valid_resource.stt_folder = None
dna = Dna()
dna.module_type = "stt"
self.assertFalse(ResourcesManager.is_settings_ok(valid_resource, dna))
# valid tts
valid_resource = Resources()
valid_resource.tts_folder = None
dna = Dna()
dna.module_type = "tts"
self.assertFalse(ResourcesManager.is_settings_ok(valid_resource, dna))
# valid trigger
valid_resource = Resources()
valid_resource.trigger_folder = None
dna = Dna()
dna.module_type = "trigger"
self.assertFalse(ResourcesManager.is_settings_ok(valid_resource, dna))
# valid signal
valid_resource = Resources()
valid_resource.signal_folder = None
dna = Dna()
dna.module_type = "signal"
self.assertFalse(ResourcesManager.is_settings_ok(valid_resource, dna))
def test_is_repo_ok(self):
# valid repo
dna_file_path = get_test_path("modules/dna.yml")
install_file_path = get_test_path("modules/install.yml")
self.assertTrue(ResourcesManager.is_repo_ok(dna_file_path=dna_file_path, install_file_path=install_file_path))
# missing dna
dna_file_path = ""
install_file_path = get_test_path("modules/install.yml")
self.assertFalse(ResourcesManager.is_repo_ok(dna_file_path=dna_file_path, install_file_path=install_file_path))
# missing install
dna_file_path = get_test_path("modules/dna.yml")
install_file_path = ""
self.assertFalse(ResourcesManager.is_repo_ok(dna_file_path=dna_file_path, install_file_path=install_file_path))
def test_get_target_folder(self):
# test get neuron folder
resources = Resources()
resources.neuron_folder = '/var/tmp/test/resources'
self.assertEqual(ResourcesManager._get_target_folder(resources, "neuron"), "/var/tmp/test/resources")
# test get stt folder
resources = Resources()
resources.stt_folder = '/var/tmp/test/resources'
self.assertEqual(ResourcesManager._get_target_folder(resources, "stt"), "/var/tmp/test/resources")
# test get tts folder
resources = Resources()
resources.tts_folder = '/var/tmp/test/resources'
self.assertEqual(ResourcesManager._get_target_folder(resources, "tts"), "/var/tmp/test/resources")
# test get trigger folder
resources = Resources()
resources.trigger_folder = '/var/tmp/test/resources'
self.assertEqual(ResourcesManager._get_target_folder(resources, "trigger"), "/var/tmp/test/resources")
# test get signal folder
resources = Resources()
resources.signal_folder = '/var/tmp/test/resources'
self.assertEqual(ResourcesManager._get_target_folder(resources, "signal"), "/var/tmp/test/resources")
# test get non existing resource
resources = Resources()
self.assertIsNone(ResourcesManager._get_target_folder(resources, "not_existing"))
def test_check_supported_version(self):
# version ok
current_version = '0.4.0'
supported_version = ['0.4', '0.3', '0.2']
self.assertTrue(ResourcesManager._check_supported_version(current_version=current_version,
supported_versions=supported_version))
# version ok
current_version = '11.23.0'
supported_version = ['11.23', '12.3', '2.23']
self.assertTrue(ResourcesManager._check_supported_version(current_version=current_version,
supported_versions=supported_version))
# version non ok, user does not config
# Testing with integer values instead of string
current_version = '0.4.0'
supported_version = [0.3, 0.2]
with mock.patch('kalliope.Utils.query_yes_no', return_value=True):
self.assertTrue(ResourcesManager._check_supported_version(current_version=current_version,
supported_versions=supported_version))
with mock.patch('kalliope.Utils.query_yes_no', return_value=False):
self.assertFalse(ResourcesManager._check_supported_version(current_version=current_version,
supported_versions=supported_version))
|
import asyncio
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .parent_device import WiLightParent
# List the platforms that you want to support.
PLATFORMS = ["light"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the WiLight with Config Flow component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up a wilight config entry."""
parent = WiLightParent(hass, entry)
if not await parent.async_setup():
raise ConfigEntryNotReady
hass.data[DOMAIN][entry.entry_id] = parent
# Set up all platforms for this device/entry.
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload WiLight config entry."""
# Unload entities for this entry/device.
await asyncio.gather(
*(
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
)
)
# Cleanup
parent = hass.data[DOMAIN][entry.entry_id]
await parent.async_reset()
del hass.data[DOMAIN][entry.entry_id]
return True
class WiLightDevice(Entity):
"""Representation of a WiLight device.
Contains the common logic for WiLight entities.
"""
def __init__(self, api_device, index, item_name):
"""Initialize the device."""
# WiLight specific attributes for every component type
self._device_id = api_device.device_id
self._sw_version = api_device.swversion
self._client = api_device.client
self._model = api_device.model
self._name = item_name
self._index = index
self._unique_id = f"{self._device_id}_{self._index}"
self._status = {}
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def name(self):
"""Return a name for this WiLight item."""
return self._name
@property
def unique_id(self):
"""Return the unique ID for this WiLight item."""
return self._unique_id
@property
def device_info(self):
"""Return the device info."""
return {
"name": self._name,
"identifiers": {(DOMAIN, self._unique_id)},
"model": self._model,
"manufacturer": "WiLight",
"sw_version": self._sw_version,
"via_device": (DOMAIN, self._device_id),
}
@property
def available(self):
"""Return True if entity is available."""
return bool(self._client.is_connected)
@callback
def handle_event_callback(self, states):
"""Propagate changes through ha."""
self._status = states
self.async_write_ha_state()
async def async_update(self):
"""Synchronize state with api_device."""
await self._client.status(self._index)
async def async_added_to_hass(self):
"""Register update callback."""
self._client.register_status_callback(self.handle_event_callback, self._index)
await self._client.status(self._index)
|
from flask import current_app
from lemur.exceptions import InvalidConfiguration
# inspired by https://github.com/getsentry/sentry
class InstanceManager(object):
def __init__(self, class_list=None, instances=True):
if class_list is None:
class_list = []
self.instances = instances
self.update(class_list)
def get_class_list(self):
return self.class_list
def add(self, class_path):
self.cache = None
if class_path not in self.class_list:
self.class_list.append(class_path)
def remove(self, class_path):
self.cache = None
self.class_list.remove(class_path)
def update(self, class_list):
"""
Updates the class list and wipes the cache.
"""
self.cache = None
self.class_list = class_list
def all(self):
"""
Returns a list of cached instances.
"""
class_list = list(self.get_class_list())
if not class_list:
self.cache = []
return []
if self.cache is not None:
return self.cache
results = []
for cls_path in class_list:
module_name, class_name = cls_path.rsplit(".", 1)
try:
module = __import__(module_name, {}, {}, class_name)
cls = getattr(module, class_name)
if self.instances:
results.append(cls())
else:
results.append(cls)
except InvalidConfiguration as e:
current_app.logger.warning(
"Plugin '{0}' may not work correctly. {1}".format(class_name, e)
)
except Exception as e:
current_app.logger.exception(
"Unable to import {0}. Reason: {1}".format(cls_path, e)
)
continue
self.cache = results
return results
|
revision = "412b22cb656a"
down_revision = "4c50b903d1ae"
from alembic import op
import sqlalchemy as sa
from sqlalchemy.sql import text
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.create_table(
"roles_authorities",
sa.Column("authority_id", sa.Integer(), nullable=True),
sa.Column("role_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["authority_id"], ["authorities.id"]),
sa.ForeignKeyConstraint(["role_id"], ["roles.id"]),
)
op.create_index(
"roles_authorities_ix",
"roles_authorities",
["authority_id", "role_id"],
unique=True,
)
op.create_table(
"roles_certificates",
sa.Column("certificate_id", sa.Integer(), nullable=True),
sa.Column("role_id", sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(["certificate_id"], ["certificates.id"]),
sa.ForeignKeyConstraint(["role_id"], ["roles.id"]),
)
op.create_index(
"roles_certificates_ix",
"roles_certificates",
["certificate_id", "role_id"],
unique=True,
)
op.create_index(
"certificate_associations_ix",
"certificate_associations",
["domain_id", "certificate_id"],
unique=True,
)
op.create_index(
"certificate_destination_associations_ix",
"certificate_destination_associations",
["destination_id", "certificate_id"],
unique=True,
)
op.create_index(
"certificate_notification_associations_ix",
"certificate_notification_associations",
["notification_id", "certificate_id"],
unique=True,
)
op.create_index(
"certificate_replacement_associations_ix",
"certificate_replacement_associations",
["certificate_id", "certificate_id"],
unique=True,
)
op.create_index(
"certificate_source_associations_ix",
"certificate_source_associations",
["source_id", "certificate_id"],
unique=True,
)
op.create_index(
"roles_users_ix", "roles_users", ["user_id", "role_id"], unique=True
)
### end Alembic commands ###
# migrate existing authority_id relationship to many_to_many
conn = op.get_bind()
for id, authority_id in conn.execute(
text("select id, authority_id from roles where authority_id is not null")
):
stmt = text(
"insert into roles_authoritties (role_id, authority_id) values (:role_id, :authority_id)"
)
stmt = stmt.bindparams(role_id=id, authority_id=authority_id)
op.execute(stmt)
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_index("roles_users_ix", table_name="roles_users")
op.drop_index(
"certificate_source_associations_ix",
table_name="certificate_source_associations",
)
op.drop_index(
"certificate_replacement_associations_ix",
table_name="certificate_replacement_associations",
)
op.drop_index(
"certificate_notification_associations_ix",
table_name="certificate_notification_associations",
)
op.drop_index(
"certificate_destination_associations_ix",
table_name="certificate_destination_associations",
)
op.drop_index("certificate_associations_ix", table_name="certificate_associations")
op.drop_index("roles_certificates_ix", table_name="roles_certificates")
op.drop_table("roles_certificates")
op.drop_index("roles_authorities_ix", table_name="roles_authorities")
op.drop_table("roles_authorities")
### end Alembic commands ###
|
import os
import pytest
import sh
from molecule import config
from molecule.dependency import ansible_galaxy
@pytest.fixture
def _patched_ansible_galaxy_has_requirements_file(mocker):
m = mocker.patch(('molecule.dependency.ansible_galaxy.'
'AnsibleGalaxy._has_requirements_file'))
m.return_value = True
return m
@pytest.fixture
def _dependency_section_data():
return {
'dependency': {
'name': 'galaxy',
'options': {
'foo': 'bar',
'v': True,
},
'env': {
'FOO': 'bar',
}
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(_dependency_section_data, patched_config_validate,
config_instance):
return ansible_galaxy.AnsibleGalaxy(config_instance)
@pytest.fixture
def role_file(_instance):
return os.path.join(_instance._config.scenario.directory,
'requirements.yml')
@pytest.fixture
def roles_path(_instance):
return os.path.join(_instance._config.scenario.ephemeral_directory,
'roles')
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance, role_file, roles_path):
x = {'role-file': role_file, 'roles-path': roles_path, 'force': True}
assert x == _instance.default_options
def test_default_env_property(_instance):
env = _instance.default_env
assert 'MOLECULE_FILE' in env
assert 'MOLECULE_INVENTORY_FILE' in env
assert 'MOLECULE_SCENARIO_DIRECTORY' in env
assert 'MOLECULE_INSTANCE_CONFIG' in env
def test_name_property(_instance):
assert 'galaxy' == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_options_property(_instance, role_file, roles_path):
x = {
'force': True,
'role-file': role_file,
'roles-path': roles_path,
'foo': 'bar',
'v': True,
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_options_property_handles_cli_args(role_file, roles_path, _instance):
_instance._config.args = {'debug': True}
x = {
'force': True,
'role-file': role_file,
'roles-path': roles_path,
'foo': 'bar',
'vvv': True,
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_env_property(_instance):
assert 'bar' == _instance.env['FOO']
@pytest.mark.parametrize(
'config_instance', ['_dependency_section_data'], indirect=True)
def test_bake(_instance, role_file, roles_path):
_instance.bake()
x = [
str(sh.ansible_galaxy), 'install', '--role-file={}'.format(role_file),
'--roles-path={}'.format(roles_path), '--force', '--foo=bar', '-v'
]
result = str(_instance._sh_command).split()
assert sorted(x) == sorted(result)
def test_execute(patched_run_command,
_patched_ansible_galaxy_has_requirements_file,
patched_logger_success, _instance):
_instance._sh_command = 'patched-command'
_instance.execute()
role_directory = os.path.join(_instance._config.scenario.directory,
_instance.options['roles-path'])
assert os.path.isdir(role_directory)
patched_run_command.assert_called_once_with('patched-command', debug=False)
msg = 'Dependency completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute_when_disabled(
patched_run_command, patched_logger_warn, _instance):
_instance._config.config['dependency']['enabled'] = False
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, dependency is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_execute_does_not_execute_when_no_requirements_file(
patched_run_command, _patched_ansible_galaxy_has_requirements_file,
patched_logger_warn, _instance):
_patched_ansible_galaxy_has_requirements_file.return_value = False
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, missing the requirements file.'
patched_logger_warn.assert_called_once_with(msg)
def test_execute_bakes(patched_run_command, _instance, role_file,
_patched_ansible_galaxy_has_requirements_file,
roles_path):
_instance.execute()
assert _instance._sh_command is not None
assert 1 == patched_run_command.call_count
def test_executes_catches_and_exits_return_code(
patched_run_command, _patched_ansible_galaxy_has_requirements_file,
_instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(
sh.ansible_galaxy, b'', b'')
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
def test_setup(_instance):
role_directory = os.path.join(_instance._config.scenario.directory,
_instance.options['roles-path'])
assert not os.path.isdir(role_directory)
_instance._setup()
assert os.path.isdir(role_directory)
def test_role_file(role_file, _instance):
assert role_file == _instance._role_file()
def test_has_requirements_file(_instance):
assert not _instance._has_requirements_file()
|
from datetime import datetime
from datetime import timedelta
from zinnia.settings import ALLOW_EMPTY
from zinnia.settings import ALLOW_FUTURE
from zinnia.settings import PAGINATION
class ArchiveMixin(object):
"""
Mixin centralizing the configuration of the archives views.
"""
paginate_by = PAGINATION
allow_empty = ALLOW_EMPTY
allow_future = ALLOW_FUTURE
date_field = 'publication_date'
month_format = '%m'
week_format = '%W'
class PreviousNextPublishedMixin(object):
"""
Mixin for correcting the previous/next
context variable to return dates with published datas.
"""
def get_previous_next_published(self, date):
"""
Returns a dict of the next and previous date periods
with published entries.
"""
previous_next = getattr(self, 'previous_next', None)
if previous_next is None:
date_year = datetime(date.year, 1, 1)
date_month = datetime(date.year, date.month, 1)
date_day = datetime(date.year, date.month, date.day)
date_next_week = date_day + timedelta(weeks=1)
previous_next = {'year': [None, None],
'week': [None, None],
'month': [None, None],
'day': [None, None]}
dates = self.get_queryset().datetimes(
'publication_date', 'day', order='ASC')
for d in dates:
d_year = datetime(d.year, 1, 1)
d_month = datetime(d.year, d.month, 1)
d_day = datetime(d.year, d.month, d.day)
if d_year < date_year:
previous_next['year'][0] = d_year.date()
elif d_year > date_year and not previous_next['year'][1]:
previous_next['year'][1] = d_year.date()
if d_month < date_month:
previous_next['month'][0] = d_month.date()
elif d_month > date_month and not previous_next['month'][1]:
previous_next['month'][1] = d_month.date()
if d_day < date_day:
previous_next['day'][0] = d_day.date()
previous_next['week'][0] = d_day.date() - timedelta(
days=d_day.weekday())
elif d_day > date_day and not previous_next['day'][1]:
previous_next['day'][1] = d_day.date()
if d_day > date_next_week and not previous_next['week'][1]:
previous_next['week'][1] = d_day.date() - timedelta(
days=d_day.weekday())
setattr(self, 'previous_next', previous_next)
return previous_next
def get_next_year(self, date):
"""
Get the next year with published entries.
"""
return self.get_previous_next_published(date)['year'][1]
def get_previous_year(self, date):
"""
Get the previous year with published entries.
"""
return self.get_previous_next_published(date)['year'][0]
def get_next_week(self, date):
"""
Get the next week with published entries.
"""
return self.get_previous_next_published(date)['week'][1]
def get_previous_week(self, date):
"""
Get the previous wek with published entries.
"""
return self.get_previous_next_published(date)['week'][0]
def get_next_month(self, date):
"""
Get the next month with published entries.
"""
return self.get_previous_next_published(date)['month'][1]
def get_previous_month(self, date):
"""
Get the previous month with published entries.
"""
return self.get_previous_next_published(date)['month'][0]
def get_next_day(self, date):
"""
Get the next day with published entries.
"""
return self.get_previous_next_published(date)['day'][1]
def get_previous_day(self, date):
"""
Get the previous day with published entries.
"""
return self.get_previous_next_published(date)['day'][0]
|
import unittest
from homeassistant.components.tcp import binary_sensor as bin_tcp
import homeassistant.components.tcp.sensor as tcp
from homeassistant.setup import setup_component
from tests.async_mock import Mock, patch
from tests.common import assert_setup_component, get_test_home_assistant
import tests.components.tcp.test_sensor as test_tcp
class TestTCPBinarySensor(unittest.TestCase):
"""Test the TCP Binary Sensor."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop down everything that was started."""
self.hass.stop()
def test_setup_platform_valid_config(self):
"""Check a valid configuration."""
with assert_setup_component(0, "binary_sensor"):
assert setup_component(self.hass, "binary_sensor", test_tcp.TEST_CONFIG)
def test_setup_platform_invalid_config(self):
"""Check the invalid configuration."""
with assert_setup_component(0):
assert setup_component(
self.hass,
"binary_sensor",
{"binary_sensor": {"platform": "tcp", "porrt": 1234}},
)
@patch("homeassistant.components.tcp.sensor.TcpSensor.update")
def test_setup_platform_devices(self, mock_update):
"""Check the supplied config and call add_entities with sensor."""
add_entities = Mock()
ret = bin_tcp.setup_platform(None, test_tcp.TEST_CONFIG, add_entities)
assert ret is None
assert add_entities.called
assert isinstance(add_entities.call_args[0][0][0], bin_tcp.TcpBinarySensor)
@patch("homeassistant.components.tcp.sensor.TcpSensor.update")
def test_is_on_true(self, mock_update):
"""Check the return that _state is value_on."""
sensor = bin_tcp.TcpBinarySensor(self.hass, test_tcp.TEST_CONFIG["sensor"])
sensor._state = test_tcp.TEST_CONFIG["sensor"][tcp.CONF_VALUE_ON]
print(sensor._state)
assert sensor.is_on
@patch("homeassistant.components.tcp.sensor.TcpSensor.update")
def test_is_on_false(self, mock_update):
"""Check the return that _state is not the same as value_on."""
sensor = bin_tcp.TcpBinarySensor(self.hass, test_tcp.TEST_CONFIG["sensor"])
sensor._state = "{} abc".format(
test_tcp.TEST_CONFIG["sensor"][tcp.CONF_VALUE_ON]
)
assert not sensor.is_on
|
import logging
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.util.distance import LENGTH_KILOMETERS, LENGTH_MILES
from homeassistant.util.unit_system import IMPERIAL_SYSTEM, METRIC_SYSTEM
from . import (
DATA_BATTERY,
DATA_CHARGING,
DATA_LEAF,
DATA_RANGE_AC,
DATA_RANGE_AC_OFF,
LeafEntity,
)
_LOGGER = logging.getLogger(__name__)
ICON_RANGE = "mdi:speedometer"
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Sensors setup."""
if discovery_info is None:
return
devices = []
for vin, datastore in hass.data[DATA_LEAF].items():
_LOGGER.debug("Adding sensors for vin=%s", vin)
devices.append(LeafBatterySensor(datastore))
devices.append(LeafRangeSensor(datastore, True))
devices.append(LeafRangeSensor(datastore, False))
add_devices(devices, True)
class LeafBatterySensor(LeafEntity):
"""Nissan Leaf Battery Sensor."""
@property
def name(self):
"""Sensor Name."""
return f"{self.car.leaf.nickname} Charge"
@property
def device_class(self):
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def state(self):
"""Battery state percentage."""
return round(self.car.data[DATA_BATTERY])
@property
def unit_of_measurement(self):
"""Battery state measured in percentage."""
return PERCENTAGE
@property
def icon(self):
"""Battery state icon handling."""
chargestate = self.car.data[DATA_CHARGING]
return icon_for_battery_level(battery_level=self.state, charging=chargestate)
class LeafRangeSensor(LeafEntity):
"""Nissan Leaf Range Sensor."""
def __init__(self, car, ac_on):
"""Set up range sensor. Store if AC on."""
self._ac_on = ac_on
super().__init__(car)
@property
def name(self):
"""Update sensor name depending on AC."""
if self._ac_on is True:
return f"{self.car.leaf.nickname} Range (AC)"
return f"{self.car.leaf.nickname} Range"
def log_registration(self):
"""Log registration."""
_LOGGER.debug(
"Registered LeafRangeSensor integration with Home Assistant for VIN %s",
self.car.leaf.vin,
)
@property
def state(self):
"""Battery range in miles or kms."""
if self._ac_on:
ret = self.car.data[DATA_RANGE_AC]
else:
ret = self.car.data[DATA_RANGE_AC_OFF]
if not self.car.hass.config.units.is_metric or self.car.force_miles:
ret = IMPERIAL_SYSTEM.length(ret, METRIC_SYSTEM.length_unit)
return round(ret)
@property
def unit_of_measurement(self):
"""Battery range unit."""
if not self.car.hass.config.units.is_metric or self.car.force_miles:
return LENGTH_MILES
return LENGTH_KILOMETERS
@property
def icon(self):
"""Nice icon for range."""
return ICON_RANGE
|
import copy
import os
import RFXtrx as rfxtrxmod
import serial
import serial.tools.list_ports
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_DEVICE,
CONF_DEVICE_ID,
CONF_DEVICES,
CONF_HOST,
CONF_PORT,
CONF_TYPE,
)
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.device_registry import (
async_entries_for_config_entry,
async_get_registry as async_get_device_registry,
)
from homeassistant.helpers.entity_registry import (
async_entries_for_device,
async_get_registry as async_get_entity_registry,
)
from . import DOMAIN, get_device_id, get_rfx_object
from .binary_sensor import supported as binary_supported
from .const import (
CONF_AUTOMATIC_ADD,
CONF_DATA_BITS,
CONF_FIRE_EVENT,
CONF_OFF_DELAY,
CONF_REMOVE_DEVICE,
CONF_REPLACE_DEVICE,
CONF_SIGNAL_REPETITIONS,
DEVICE_PACKET_TYPE_LIGHTING4,
)
from .cover import supported as cover_supported
from .light import supported as light_supported
from .switch import supported as switch_supported
CONF_EVENT_CODE = "event_code"
CONF_MANUAL_PATH = "Enter Manually"
def none_or_int(value, base):
"""Check if strin is one otherwise convert to int."""
if value is None:
return None
return int(value, base)
class OptionsFlow(config_entries.OptionsFlow):
"""Handle Rfxtrx options."""
def __init__(self, config_entry: ConfigEntry) -> None:
"""Initialize rfxtrx options flow."""
self._config_entry = config_entry
self._global_options = None
self._selected_device = None
self._selected_device_entry_id = None
self._selected_device_event_code = None
self._selected_device_object = None
self._device_entries = None
self._device_registry = None
async def async_step_init(self, user_input=None):
"""Manage the options."""
return await self.async_step_prompt_options()
async def async_step_prompt_options(self, user_input=None):
"""Prompt for options."""
errors = {}
if user_input is not None:
self._global_options = {
CONF_AUTOMATIC_ADD: user_input[CONF_AUTOMATIC_ADD],
}
if CONF_DEVICE in user_input:
entry_id = user_input[CONF_DEVICE]
device_data = self._get_device_data(entry_id)
self._selected_device_entry_id = entry_id
event_code = device_data[CONF_EVENT_CODE]
self._selected_device_event_code = event_code
self._selected_device = self._config_entry.data[CONF_DEVICES][
event_code
]
self._selected_device_object = get_rfx_object(event_code)
return await self.async_step_set_device_options()
if CONF_REMOVE_DEVICE in user_input:
remove_devices = user_input[CONF_REMOVE_DEVICE]
devices = {}
for entry_id in remove_devices:
device_data = self._get_device_data(entry_id)
event_code = device_data[CONF_EVENT_CODE]
device_id = device_data[CONF_DEVICE_ID]
self.hass.helpers.dispatcher.async_dispatcher_send(
f"{DOMAIN}_{CONF_REMOVE_DEVICE}_{device_id}"
)
self._device_registry.async_remove_device(entry_id)
if event_code is not None:
devices[event_code] = None
self.update_config_data(
global_options=self._global_options, devices=devices
)
return self.async_create_entry(title="", data={})
if CONF_EVENT_CODE in user_input:
self._selected_device_event_code = user_input[CONF_EVENT_CODE]
self._selected_device = {}
selected_device_object = get_rfx_object(
self._selected_device_event_code
)
if selected_device_object is None:
errors[CONF_EVENT_CODE] = "invalid_event_code"
elif not self._can_add_device(selected_device_object):
errors[CONF_EVENT_CODE] = "already_configured_device"
else:
self._selected_device_object = selected_device_object
return await self.async_step_set_device_options()
if not errors:
self.update_config_data(global_options=self._global_options)
return self.async_create_entry(title="", data={})
device_registry = await async_get_device_registry(self.hass)
device_entries = async_entries_for_config_entry(
device_registry, self._config_entry.entry_id
)
self._device_registry = device_registry
self._device_entries = device_entries
remove_devices = {
entry.id: entry.name_by_user if entry.name_by_user else entry.name
for entry in device_entries
}
configure_devices = {
entry.id: entry.name_by_user if entry.name_by_user else entry.name
for entry in device_entries
if self._get_device_event_code(entry.id) is not None
}
options = {
vol.Optional(
CONF_AUTOMATIC_ADD,
default=self._config_entry.data[CONF_AUTOMATIC_ADD],
): bool,
vol.Optional(CONF_EVENT_CODE): str,
vol.Optional(CONF_DEVICE): vol.In(configure_devices),
vol.Optional(CONF_REMOVE_DEVICE): cv.multi_select(remove_devices),
}
return self.async_show_form(
step_id="prompt_options", data_schema=vol.Schema(options), errors=errors
)
async def async_step_set_device_options(self, user_input=None):
"""Manage device options."""
errors = {}
if user_input is not None:
device_id = get_device_id(
self._selected_device_object.device,
data_bits=user_input.get(CONF_DATA_BITS),
)
if CONF_REPLACE_DEVICE in user_input:
await self._async_replace_device(user_input[CONF_REPLACE_DEVICE])
devices = {self._selected_device_event_code: None}
self.update_config_data(
global_options=self._global_options, devices=devices
)
return self.async_create_entry(title="", data={})
try:
command_on = none_or_int(user_input.get(CONF_COMMAND_ON), 16)
except ValueError:
errors[CONF_COMMAND_ON] = "invalid_input_2262_on"
try:
command_off = none_or_int(user_input.get(CONF_COMMAND_OFF), 16)
except ValueError:
errors[CONF_COMMAND_OFF] = "invalid_input_2262_off"
try:
off_delay = none_or_int(user_input.get(CONF_OFF_DELAY), 10)
except ValueError:
errors[CONF_OFF_DELAY] = "invalid_input_off_delay"
if not errors:
devices = {}
device = {
CONF_DEVICE_ID: device_id,
CONF_FIRE_EVENT: user_input.get(CONF_FIRE_EVENT, False),
CONF_SIGNAL_REPETITIONS: user_input.get(CONF_SIGNAL_REPETITIONS, 1),
}
devices[self._selected_device_event_code] = device
if off_delay:
device[CONF_OFF_DELAY] = off_delay
if user_input.get(CONF_DATA_BITS):
device[CONF_DATA_BITS] = user_input[CONF_DATA_BITS]
if command_on:
device[CONF_COMMAND_ON] = command_on
if command_off:
device[CONF_COMMAND_OFF] = command_off
self.update_config_data(
global_options=self._global_options, devices=devices
)
return self.async_create_entry(title="", data={})
device_data = self._selected_device
data_schema = {
vol.Optional(
CONF_FIRE_EVENT, default=device_data.get(CONF_FIRE_EVENT, False)
): bool,
}
if binary_supported(self._selected_device_object):
if device_data.get(CONF_OFF_DELAY):
off_delay_schema = {
vol.Optional(
CONF_OFF_DELAY,
description={"suggested_value": device_data[CONF_OFF_DELAY]},
): str,
}
else:
off_delay_schema = {
vol.Optional(CONF_OFF_DELAY): str,
}
data_schema.update(off_delay_schema)
if (
binary_supported(self._selected_device_object)
or cover_supported(self._selected_device_object)
or light_supported(self._selected_device_object)
or switch_supported(self._selected_device_object)
):
data_schema.update(
{
vol.Optional(
CONF_SIGNAL_REPETITIONS,
default=device_data.get(CONF_SIGNAL_REPETITIONS, 1),
): int,
}
)
if (
self._selected_device_object.device.packettype
== DEVICE_PACKET_TYPE_LIGHTING4
):
data_schema.update(
{
vol.Optional(
CONF_DATA_BITS, default=device_data.get(CONF_DATA_BITS, 0)
): int,
vol.Optional(
CONF_COMMAND_ON,
default=hex(device_data.get(CONF_COMMAND_ON, 0)),
): str,
vol.Optional(
CONF_COMMAND_OFF,
default=hex(device_data.get(CONF_COMMAND_OFF, 0)),
): str,
}
)
devices = {
entry.id: entry.name_by_user if entry.name_by_user else entry.name
for entry in self._device_entries
if self._can_replace_device(entry.id)
}
if devices:
data_schema.update(
{
vol.Optional(CONF_REPLACE_DEVICE): vol.In(devices),
}
)
return self.async_show_form(
step_id="set_device_options",
data_schema=vol.Schema(data_schema),
errors=errors,
)
async def _async_replace_device(self, replace_device):
"""Migrate properties of a device into another."""
device_registry = self._device_registry
old_device = self._selected_device_entry_id
old_entry = device_registry.async_get(old_device)
device_registry.async_update_device(
replace_device,
area_id=old_entry.area_id,
name_by_user=old_entry.name_by_user,
)
old_device_data = self._get_device_data(old_device)
new_device_data = self._get_device_data(replace_device)
old_device_id = "_".join(x for x in old_device_data[CONF_DEVICE_ID])
new_device_id = "_".join(x for x in new_device_data[CONF_DEVICE_ID])
entity_registry = await async_get_entity_registry(self.hass)
entity_entries = async_entries_for_device(entity_registry, old_device)
entity_migration_map = {}
for entry in entity_entries:
unique_id = entry.unique_id
new_unique_id = unique_id.replace(old_device_id, new_device_id)
new_entity_id = entity_registry.async_get_entity_id(
entry.domain, entry.platform, new_unique_id
)
if new_entity_id is not None:
entity_migration_map[new_entity_id] = entry
for entry in entity_migration_map.values():
entity_registry.async_remove(entry.entity_id)
for entity_id, entry in entity_migration_map.items():
entity_registry.async_update_entity(
entity_id,
new_entity_id=entry.entity_id,
name=entry.name,
icon=entry.icon,
)
device_registry.async_remove_device(old_device)
def _can_add_device(self, new_rfx_obj):
"""Check if device does not already exist."""
new_device_id = get_device_id(new_rfx_obj.device)
for packet_id, entity_info in self._config_entry.data[CONF_DEVICES].items():
rfx_obj = get_rfx_object(packet_id)
device_id = get_device_id(rfx_obj.device, entity_info.get(CONF_DATA_BITS))
if new_device_id == device_id:
return False
return True
def _can_replace_device(self, entry_id):
"""Check if device can be replaced with selected device."""
device_data = self._get_device_data(entry_id)
event_code = device_data[CONF_EVENT_CODE]
rfx_obj = get_rfx_object(event_code)
if (
rfx_obj.device.packettype == self._selected_device_object.device.packettype
and rfx_obj.device.subtype == self._selected_device_object.device.subtype
and self._selected_device_event_code != event_code
):
return True
return False
def _get_device_event_code(self, entry_id):
data = self._get_device_data(entry_id)
return data[CONF_EVENT_CODE]
def _get_device_data(self, entry_id):
"""Get event code based on device identifier."""
event_code = None
device_id = None
entry = self._device_registry.async_get(entry_id)
device_id = next(iter(entry.identifiers))[1:]
for packet_id, entity_info in self._config_entry.data[CONF_DEVICES].items():
if tuple(entity_info.get(CONF_DEVICE_ID)) == device_id:
event_code = packet_id
break
data = {CONF_EVENT_CODE: event_code, CONF_DEVICE_ID: device_id}
return data
@callback
def update_config_data(self, global_options=None, devices=None):
"""Update data in ConfigEntry."""
entry_data = self._config_entry.data.copy()
entry_data[CONF_DEVICES] = copy.deepcopy(self._config_entry.data[CONF_DEVICES])
if global_options:
entry_data.update(global_options)
if devices:
for event_code, options in devices.items():
if options is None:
entry_data[CONF_DEVICES].pop(event_code)
else:
entry_data[CONF_DEVICES][event_code] = options
self.hass.config_entries.async_update_entry(self._config_entry, data=entry_data)
self.hass.async_create_task(
self.hass.config_entries.async_reload(self._config_entry.entry_id)
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for RFXCOM RFXtrx."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
async def async_step_user(self, user_input=None):
"""Step when user initializes a integration."""
await self.async_set_unique_id(DOMAIN)
self._abort_if_unique_id_configured()
errors = {}
if user_input is not None:
user_selection = user_input[CONF_TYPE]
if user_selection == "Serial":
return await self.async_step_setup_serial()
return await self.async_step_setup_network()
list_of_types = ["Serial", "Network"]
schema = vol.Schema({vol.Required(CONF_TYPE): vol.In(list_of_types)})
return self.async_show_form(step_id="user", data_schema=schema, errors=errors)
async def async_step_setup_network(self, user_input=None):
"""Step when setting up network configuration."""
errors = {}
if user_input is not None:
host = user_input[CONF_HOST]
port = user_input[CONF_PORT]
try:
data = await self.async_validate_rfx(host=host, port=port)
except CannotConnect:
errors["base"] = "cannot_connect"
if not errors:
return self.async_create_entry(title="RFXTRX", data=data)
schema = vol.Schema(
{vol.Required(CONF_HOST): str, vol.Required(CONF_PORT): int}
)
return self.async_show_form(
step_id="setup_network",
data_schema=schema,
errors=errors,
)
async def async_step_setup_serial(self, user_input=None):
"""Step when setting up serial configuration."""
errors = {}
if user_input is not None:
user_selection = user_input[CONF_DEVICE]
if user_selection == CONF_MANUAL_PATH:
return await self.async_step_setup_serial_manual_path()
dev_path = await self.hass.async_add_executor_job(
get_serial_by_id, user_selection
)
try:
data = await self.async_validate_rfx(device=dev_path)
except CannotConnect:
errors["base"] = "cannot_connect"
if not errors:
return self.async_create_entry(title="RFXTRX", data=data)
ports = await self.hass.async_add_executor_job(serial.tools.list_ports.comports)
list_of_ports = {}
for port in ports:
list_of_ports[
port.device
] = f"{port}, s/n: {port.serial_number or 'n/a'}" + (
f" - {port.manufacturer}" if port.manufacturer else ""
)
list_of_ports[CONF_MANUAL_PATH] = CONF_MANUAL_PATH
schema = vol.Schema({vol.Required(CONF_DEVICE): vol.In(list_of_ports)})
return self.async_show_form(
step_id="setup_serial",
data_schema=schema,
errors=errors,
)
async def async_step_setup_serial_manual_path(self, user_input=None):
"""Select path manually."""
errors = {}
if user_input is not None:
device = user_input[CONF_DEVICE]
try:
data = await self.async_validate_rfx(device=device)
except CannotConnect:
errors["base"] = "cannot_connect"
if not errors:
return self.async_create_entry(title="RFXTRX", data=data)
schema = vol.Schema({vol.Required(CONF_DEVICE): str})
return self.async_show_form(
step_id="setup_serial_manual_path",
data_schema=schema,
errors=errors,
)
async def async_step_import(self, import_config=None):
"""Handle the initial step."""
entry = await self.async_set_unique_id(DOMAIN)
if entry:
if CONF_DEVICES not in entry.data:
# In version 0.113, devices key was not written to config entry. Update the entry with import data
self._abort_if_unique_id_configured(import_config)
else:
self._abort_if_unique_id_configured()
host = import_config[CONF_HOST]
port = import_config[CONF_PORT]
device = import_config[CONF_DEVICE]
try:
if host is not None:
await self.async_validate_rfx(host=host, port=port)
else:
await self.async_validate_rfx(device=device)
except CannotConnect:
return self.async_abort(reason="cannot_connect")
return self.async_create_entry(title="RFXTRX", data=import_config)
async def async_validate_rfx(self, host=None, port=None, device=None):
"""Create data for rfxtrx entry."""
success = await self.hass.async_add_executor_job(
_test_transport, host, port, device
)
if not success:
raise CannotConnect
data = {
CONF_HOST: host,
CONF_PORT: port,
CONF_DEVICE: device,
CONF_AUTOMATIC_ADD: False,
CONF_DEVICES: {},
}
return data
@staticmethod
@callback
def async_get_options_flow(config_entry: ConfigEntry) -> OptionsFlow:
"""Get the options flow for this handler."""
return OptionsFlow(config_entry)
def _test_transport(host, port, device):
"""Construct a rfx object based on config."""
if port is not None:
try:
conn = rfxtrxmod.PyNetworkTransport((host, port))
except OSError:
return False
conn.close()
else:
try:
conn = rfxtrxmod.PySerialTransport(device)
except serial.serialutil.SerialException:
return False
if conn.serial is None:
return False
conn.close()
return True
def get_serial_by_id(dev_path: str) -> str:
"""Return a /dev/serial/by-id match for given device if available."""
by_id = "/dev/serial/by-id"
if not os.path.isdir(by_id):
return dev_path
for path in (entry.path for entry in os.scandir(by_id) if entry.is_symlink()):
if os.path.realpath(path) == dev_path:
return path
return dev_path
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
import pytest
from homeassistant.components.rfxtrx import DOMAIN
from homeassistant.components.rfxtrx.const import ATTR_EVENT
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_CELSIUS,
)
from homeassistant.core import State
from tests.common import MockConfigEntry, mock_restore_cache
from tests.components.rfxtrx.conftest import create_rfx_test_cfg
async def test_default_config(hass, rfxtrx):
"""Test with 0 sensor."""
entry_data = create_rfx_test_cfg(devices={})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 0
async def test_one_sensor(hass, rfxtrx):
"""Test with 1 sensor."""
entry_data = create_rfx_test_cfg(devices={"0a52080705020095220269": {}})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_05_02_temperature")
assert state
assert state.state == "unknown"
assert (
state.attributes.get("friendly_name")
== "WT260,WT260H,WT440H,WT450,WT450H 05:02 Temperature"
)
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
@pytest.mark.parametrize(
"state,event",
[["18.4", "0a520801070100b81b0279"], ["17.9", "0a52085e070100b31b0279"]],
)
async def test_state_restore(hass, rfxtrx, state, event):
"""State restoration."""
entity_id = "sensor.wt260_wt260h_wt440h_wt450_wt450h_07_01_temperature"
mock_restore_cache(hass, [State(entity_id, state, attributes={ATTR_EVENT: event})])
entry_data = create_rfx_test_cfg(devices={"0a520801070100b81b0279": {}})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == state
async def test_one_sensor_no_datatype(hass, rfxtrx):
"""Test with 1 sensor."""
entry_data = create_rfx_test_cfg(devices={"0a52080705020095220269": {}})
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
base_id = "sensor.wt260_wt260h_wt440h_wt450_wt450h_05_02"
base_name = "WT260,WT260H,WT440H,WT450,WT450H 05:02"
state = hass.states.get(f"{base_id}_temperature")
assert state
assert state.state == "unknown"
assert state.attributes.get("friendly_name") == f"{base_name} Temperature"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
state = hass.states.get(f"{base_id}_humidity")
assert state
assert state.state == "unknown"
assert state.attributes.get("friendly_name") == f"{base_name} Humidity"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
state = hass.states.get(f"{base_id}_humidity_status")
assert state
assert state.state == "unknown"
assert state.attributes.get("friendly_name") == f"{base_name} Humidity status"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
state = hass.states.get(f"{base_id}_rssi_numeric")
assert state
assert state.state == "unknown"
assert state.attributes.get("friendly_name") == f"{base_name} Rssi numeric"
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
state = hass.states.get(f"{base_id}_battery_numeric")
assert state
assert state.state == "unknown"
assert state.attributes.get("friendly_name") == f"{base_name} Battery numeric"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
async def test_several_sensors(hass, rfxtrx):
"""Test with 3 sensors."""
entry_data = create_rfx_test_cfg(
devices={
"0a52080705020095220269": {},
"0a520802060100ff0e0269": {},
}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_05_02_temperature")
assert state
assert state.state == "unknown"
assert (
state.attributes.get("friendly_name")
== "WT260,WT260H,WT440H,WT450,WT450H 05:02 Temperature"
)
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_06_01_temperature")
assert state
assert state.state == "unknown"
assert (
state.attributes.get("friendly_name")
== "WT260,WT260H,WT440H,WT450,WT450H 06:01 Temperature"
)
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_06_01_humidity")
assert state
assert state.state == "unknown"
assert (
state.attributes.get("friendly_name")
== "WT260,WT260H,WT440H,WT450,WT450H 06:01 Humidity"
)
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
async def test_discover_sensor(hass, rfxtrx_automatic):
"""Test with discovery of sensor."""
rfxtrx = rfxtrx_automatic
# 1
await rfxtrx.signal("0a520801070100b81b0279")
base_id = "sensor.wt260_wt260h_wt440h_wt450_wt450h_07_01"
state = hass.states.get(f"{base_id}_humidity")
assert state
assert state.state == "27"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
state = hass.states.get(f"{base_id}_humidity_status")
assert state
assert state.state == "normal"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
state = hass.states.get(f"{base_id}_rssi_numeric")
assert state
assert state.state == "-64"
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
state = hass.states.get(f"{base_id}_temperature")
assert state
assert state.state == "18.4"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
state = hass.states.get(f"{base_id}_battery_numeric")
assert state
assert state.state == "100"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
# 2
await rfxtrx.signal("0a52080405020095240279")
base_id = "sensor.wt260_wt260h_wt440h_wt450_wt450h_05_02"
state = hass.states.get(f"{base_id}_humidity")
assert state
assert state.state == "36"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
state = hass.states.get(f"{base_id}_humidity_status")
assert state
assert state.state == "normal"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
state = hass.states.get(f"{base_id}_rssi_numeric")
assert state
assert state.state == "-64"
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
state = hass.states.get(f"{base_id}_temperature")
assert state
assert state.state == "14.9"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
state = hass.states.get(f"{base_id}_battery_numeric")
assert state
assert state.state == "100"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
# 1 Update
await rfxtrx.signal("0a52085e070100b31b0279")
base_id = "sensor.wt260_wt260h_wt440h_wt450_wt450h_07_01"
state = hass.states.get(f"{base_id}_humidity")
assert state
assert state.state == "27"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
state = hass.states.get(f"{base_id}_humidity_status")
assert state
assert state.state == "normal"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) is None
state = hass.states.get(f"{base_id}_rssi_numeric")
assert state
assert state.state == "-64"
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
state = hass.states.get(f"{base_id}_temperature")
assert state
assert state.state == "17.9"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
state = hass.states.get(f"{base_id}_battery_numeric")
assert state
assert state.state == "100"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert len(hass.states.async_all()) == 10
async def test_update_of_sensors(hass, rfxtrx):
"""Test with 3 sensors."""
entry_data = create_rfx_test_cfg(
devices={
"0a52080705020095220269": {},
"0a520802060100ff0e0269": {},
}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_05_02_temperature")
assert state
assert state.state == "unknown"
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_06_01_temperature")
assert state
assert state.state == "unknown"
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_06_01_humidity")
assert state
assert state.state == "unknown"
await rfxtrx.signal("0a520802060101ff0f0269")
await rfxtrx.signal("0a52080705020085220269")
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_05_02_temperature")
assert state
assert state.state == "13.3"
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_06_01_temperature")
assert state
assert state.state == "51.1"
state = hass.states.get("sensor.wt260_wt260h_wt440h_wt450_wt450h_06_01_humidity")
assert state
assert state.state == "15"
async def test_rssi_sensor(hass, rfxtrx):
"""Test with 1 sensor."""
entry_data = create_rfx_test_cfg(
devices={
"0913000022670e013b70": {
"data_bits": 4,
"command_on": 0xE,
"command_off": 0x7,
},
"0b1100cd0213c7f230010f71": {},
}
)
mock_entry = MockConfigEntry(domain="rfxtrx", unique_id=DOMAIN, data=entry_data)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
await hass.async_start()
state = hass.states.get("sensor.pt2262_22670e_rssi_numeric")
assert state
assert state.state == "unknown"
assert state.attributes.get("friendly_name") == "PT2262 22670e Rssi numeric"
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
state = hass.states.get("sensor.ac_213c7f2_48_rssi_numeric")
assert state
assert state.state == "unknown"
assert state.attributes.get("friendly_name") == "AC 213c7f2:48 Rssi numeric"
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== SIGNAL_STRENGTH_DECIBELS_MILLIWATT
)
await rfxtrx.signal("0913000022670e013b70")
await rfxtrx.signal("0b1100cd0213c7f230010f71")
state = hass.states.get("sensor.pt2262_22670e_rssi_numeric")
assert state
assert state.state == "-64"
state = hass.states.get("sensor.ac_213c7f2_48_rssi_numeric")
assert state
assert state.state == "-64"
await rfxtrx.signal("0913000022670e013b60")
await rfxtrx.signal("0b1100cd0213c7f230010f61")
state = hass.states.get("sensor.pt2262_22670e_rssi_numeric")
assert state
assert state.state == "-72"
state = hass.states.get("sensor.ac_213c7f2_48_rssi_numeric")
assert state
assert state.state == "-72"
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from conntrack import ConnTrackCollector
##########################################################################
class TestConnTrackCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ConnTrackCollector', {
'interval': 10,
'bin': 'true',
'dir': self.getFixtureDirPath(),
})
self.collector = ConnTrackCollector(config, None)
def test_import(self):
self.assertTrue(ConnTrackCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_synthetic_data(self, publish_mock):
self.collector.collect()
metrics = {
'ip_conntrack_count': 33.0,
'ip_conntrack_max': 36.0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(
return_value=(
'sysctl: cannot stat /proc/sys/net/net' +
'filter/nf_conntrack_count: ' +
'No such file or directory', '')))
patch_communicate.start()
self.collector.collect()
patch_communicate.stop()
self.assertPublishedMany(publish_mock, {})
@patch('os.access', Mock(return_value=False))
@patch.object(Collector, 'publish')
def test_should_fail_gracefully_2(self, publish_mock):
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
try:
from thread import get_ident as _get_ident
except ImportError:
try:
from dummy_thread import get_ident as _get_ident
except ImportError:
from threading import get_ident as _get_ident
try:
from _abcoll import KeysView, ValuesView, ItemsView
except ImportError:
pass
class OrderedDict(dict):
'Dictionary that remembers insertion order'
# An inherited dict maps keys to values.
# The inherited dict provides __getitem__, __len__, __contains__, and get.
# The remaining methods are order-aware.
# Big-O running times for all methods are the same as for regular dictionaries.
# The internal self.__map dictionary maps keys to links in a doubly linked list.
# The circular doubly linked list starts and ends with a sentinel element.
# The sentinel element never gets deleted (this simplifies the algorithm).
# Each link is stored as a list of length three: [PREV, NEXT, KEY].
def __init__(self, *args, **kwds):
'''Initialize an ordered dictionary. Signature is the same as for
regular dictionaries, but keyword arguments are not recommended
because their insertion order is arbitrary.
'''
if len(args) > 1:
raise TypeError('expected at most 1 arguments, got %d' % len(args))
try:
self.__root
except AttributeError:
self.__root = root = [] # sentinel node
root[:] = [root, root, None]
self.__map = {}
self.__update(*args, **kwds)
def __setitem__(self, key, value, dict_setitem=dict.__setitem__):
'od.__setitem__(i, y) <==> od[i]=y'
# Setting a new item creates a new link which goes at the end of the linked
# list, and the inherited dictionary is updated with the new key/value pair.
if key not in self:
root = self.__root
last = root[0]
last[1] = root[0] = self.__map[key] = [last, root, key]
dict_setitem(self, key, value)
def __delitem__(self, key, dict_delitem=dict.__delitem__):
'od.__delitem__(y) <==> del od[y]'
# Deleting an existing item uses self.__map to find the link which is
# then removed by updating the links in the predecessor and successor nodes.
dict_delitem(self, key)
link_prev, link_next, key = self.__map.pop(key)
link_prev[1] = link_next
link_next[0] = link_prev
def __iter__(self):
'od.__iter__() <==> iter(od)'
root = self.__root
curr = root[1]
while curr is not root:
yield curr[2]
curr = curr[1]
def __reversed__(self):
'od.__reversed__() <==> reversed(od)'
root = self.__root
curr = root[0]
while curr is not root:
yield curr[2]
curr = curr[0]
def clear(self):
'od.clear() -> None. Remove all items from od.'
try:
for node in self.__map.values():
del node[:]
root = self.__root
root[:] = [root, root, None]
self.__map.clear()
except AttributeError:
pass
dict.clear(self)
def popitem(self, last=True):
'''od.popitem() -> (k, v), return and remove a (key, value) pair.
Pairs are returned in LIFO order if last is true or FIFO order if false.
'''
if not self:
raise KeyError('dictionary is empty')
root = self.__root
if last:
link = root[0]
link_prev = link[0]
link_prev[1] = root
root[0] = link_prev
else:
link = root[1]
link_next = link[1]
root[1] = link_next
link_next[0] = root
key = link[2]
del self.__map[key]
value = dict.pop(self, key)
return key, value
# -- the following methods do not depend on the internal structure --
def keys(self):
'od.keys() -> list of keys in od'
return list(self)
def values(self):
'od.values() -> list of values in od'
return [self[key] for key in self]
def items(self):
'od.items() -> list of (key, value) pairs in od'
return [(key, self[key]) for key in self]
def iterkeys(self):
'od.iterkeys() -> an iterator over the keys in od'
return iter(self)
def itervalues(self):
'od.itervalues -> an iterator over the values in od'
for k in self:
yield self[k]
def iteritems(self):
'od.iteritems -> an iterator over the (key, value) items in od'
for k in self:
yield (k, self[k])
def update(*args, **kwds):
'''od.update(E, **F) -> None. Update od from dict/iterable E and F.
If E is a dict instance, does: for k in E: od[k] = E[k]
If E has a .keys() method, does: for k in E.keys(): od[k] = E[k]
Or if E is an iterable of items, does: for k, v in E: od[k] = v
In either case, this is followed by: for k, v in F.items(): od[k] = v
'''
if len(args) > 2:
raise TypeError('update() takes at most 2 positional '
'arguments (%d given)' % (len(args),))
elif not args:
raise TypeError('update() takes at least 1 argument (0 given)')
self = args[0]
# Make progressively weaker assumptions about "other"
other = ()
if len(args) == 2:
other = args[1]
if isinstance(other, dict):
for key in other:
self[key] = other[key]
elif hasattr(other, 'keys'):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
__update = update # let subclasses override update without breaking __init__
__marker = object()
def pop(self, key, default=__marker):
'''od.pop(k[,d]) -> v, remove specified key and return the corresponding value.
If key is not found, d is returned if given, otherwise KeyError is raised.
'''
if key in self:
result = self[key]
del self[key]
return result
if default is self.__marker:
raise KeyError(key)
return default
def setdefault(self, key, default=None):
'od.setdefault(k[,d]) -> od.get(k,d), also set od[k]=d if k not in od'
if key in self:
return self[key]
self[key] = default
return default
def __repr__(self, _repr_running={}):
'od.__repr__() <==> repr(od)'
call_key = id(self), _get_ident()
if call_key in _repr_running:
return '...'
_repr_running[call_key] = 1
try:
if not self:
return '%s()' % (self.__class__.__name__,)
return '%s(%r)' % (self.__class__.__name__, self.items())
finally:
del _repr_running[call_key]
def __reduce__(self):
'Return state information for pickling'
items = [[k, self[k]] for k in self]
inst_dict = vars(self).copy()
for k in vars(OrderedDict()):
inst_dict.pop(k, None)
if inst_dict:
return (self.__class__, (items,), inst_dict)
return self.__class__, (items,)
def copy(self):
'od.copy() -> a shallow copy of od'
return self.__class__(self)
@classmethod
def fromkeys(cls, iterable, value=None):
'''OD.fromkeys(S[, v]) -> New ordered dictionary with keys from S
and values equal to v (which defaults to None).
'''
d = cls()
for key in iterable:
d[key] = value
return d
def __eq__(self, other):
'''od.__eq__(y) <==> od==y. Comparison to another OD is order-sensitive
while comparison to a regular mapping is order-insensitive.
'''
if isinstance(other, OrderedDict):
return len(self)==len(other) and self.items() == other.items()
return dict.__eq__(self, other)
def __ne__(self, other):
return not self == other
# -- the following methods are only used in Python 2.7 --
def viewkeys(self):
"od.viewkeys() -> a set-like object providing a view on od's keys"
return KeysView(self)
def viewvalues(self):
"od.viewvalues() -> an object providing a view on od's values"
return ValuesView(self)
def viewitems(self):
"od.viewitems() -> a set-like object providing a view on od's items"
return ItemsView(self)
|
from datetime import datetime
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OCCUPANCY,
BinarySensorEntity,
)
from homeassistant.core import callback
from . import DOMAIN
from .entity import RingEntityMixin
# Sensor types: Name, category, device_class
SENSOR_TYPES = {
"ding": ["Ding", ["doorbots", "authorized_doorbots"], DEVICE_CLASS_OCCUPANCY],
"motion": [
"Motion",
["doorbots", "authorized_doorbots", "stickup_cams"],
DEVICE_CLASS_MOTION,
],
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Ring binary sensors from a config entry."""
ring = hass.data[DOMAIN][config_entry.entry_id]["api"]
devices = hass.data[DOMAIN][config_entry.entry_id]["devices"]
sensors = []
for device_type in ("doorbots", "authorized_doorbots", "stickup_cams"):
for sensor_type in SENSOR_TYPES:
if device_type not in SENSOR_TYPES[sensor_type][1]:
continue
for device in devices[device_type]:
sensors.append(
RingBinarySensor(config_entry.entry_id, ring, device, sensor_type)
)
async_add_entities(sensors)
class RingBinarySensor(RingEntityMixin, BinarySensorEntity):
"""A binary sensor implementation for Ring device."""
_active_alert = None
def __init__(self, config_entry_id, ring, device, sensor_type):
"""Initialize a sensor for Ring device."""
super().__init__(config_entry_id, device)
self._ring = ring
self._sensor_type = sensor_type
self._name = "{} {}".format(self._device.name, SENSOR_TYPES.get(sensor_type)[0])
self._device_class = SENSOR_TYPES.get(sensor_type)[2]
self._state = None
self._unique_id = f"{device.id}-{sensor_type}"
self._update_alert()
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
self.ring_objects["dings_data"].async_add_listener(self._dings_update_callback)
self._dings_update_callback()
async def async_will_remove_from_hass(self):
"""Disconnect callbacks."""
await super().async_will_remove_from_hass()
self.ring_objects["dings_data"].async_remove_listener(
self._dings_update_callback
)
@callback
def _dings_update_callback(self):
"""Call update method."""
self._update_alert()
self.async_write_ha_state()
@callback
def _update_alert(self):
"""Update active alert."""
self._active_alert = next(
(
alert
for alert in self._ring.active_alerts()
if alert["kind"] == self._sensor_type
and alert["doorbot_id"] == self._device.id
),
None,
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def is_on(self):
"""Return True if the binary sensor is on."""
return self._active_alert is not None
@property
def device_class(self):
"""Return the class of the binary sensor."""
return self._device_class
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = super().device_state_attributes
if self._active_alert is None:
return attrs
attrs["state"] = self._active_alert["state"]
attrs["expires_at"] = datetime.fromtimestamp(
self._active_alert.get("now") + self._active_alert.get("expires_in")
).isoformat()
return attrs
|
import sys
from flexx.util.testing import run_tests_if_main, skipif, skip, raises
from flexx.event.both_tester import run_in_both, this_is_js
from flexx import event
loop = event.loop
class MyObject(event.Component):
foo = event.Property(0)
@event.action
def set_foo(self, v):
self._mutate_foo(v)
@event.action
def set_foo_add(self, *args):
self._mutate_foo(sum(args))
@event.action
def increase_foo(self):
self.set_foo(self.foo + 1) # mutation will be applied *now*
self.set_foo(self.foo + 1) # ... so we increase by 2
@event.action
def do_silly(self):
return 1 # not allowed
@run_in_both(MyObject)
def test_action_simple():
"""
True
True
hi
hi
43
43
12
? should not return a value
"""
m = MyObject()
print(m.foo == 0)
m.set_foo("hi")
print(m.foo == 0)
loop.iter()
print(m.foo)
m.set_foo(42)
m.set_foo(43)
print(m.foo)
loop.iter()
print(m.foo)
m.set_foo_add(3, 4, 5)
print(m.foo)
loop.iter()
print(m.foo)
m.do_silly()
loop.iter()
@run_in_both(MyObject)
def test_action_chained_calling():
"""
0
2
8
"""
m = MyObject()
loop.iter()
print(m.foo)
m.increase_foo()
loop.iter()
print(m.foo)
m.increase_foo().increase_foo().increase_foo()
loop.iter()
print(m.foo)
@run_in_both(MyObject)
def test_action_one_by_one():
"""
0
hi
there
42
42
xx
bar
0
"""
m = MyObject()
print(m.foo)
m.set_foo("hi")
m.set_foo("there")
m.set_foo(42)
loop._process_actions(1) # process one
print(m.foo)
loop._process_actions(1) # process one
print(m.foo)
loop._process_actions(1) # process one
print(m.foo)
loop._process_actions(1) # process one
print(m.foo)
print('xx')
m.set_foo("foo")
m.set_foo("bar")
m.set_foo(0)
loop._process_actions(2) # process two
print(m.foo)
loop._process_actions(2) # process two
print(m.foo)
@run_in_both(MyObject)
def test_action_init():
"""
9
9
12
42
"""
m = MyObject(foo=9)
print(m.foo)
loop.iter()
print(m.foo)
m = MyObject(foo=12)
print(m.foo)
m.set_foo(42)
loop.iter()
print(m.foo)
class MyObject_autoaction(event.Component):
foo = event.Property(0, settable=True)
@run_in_both(MyObject_autoaction)
def test_action_auto():
"""
True
True
hi
"""
m = MyObject_autoaction()
print(m.foo == 0) # None is represented as "null" in JS
m.set_foo("hi")
print(m.foo == 0)
loop.iter()
print(m.foo)
class MyObject_actionclash1(event.Component): # explicit method gets preference
foo = event.Property(0, settable=True)
@event.action
def set_foo(self, v):
print('Custom one')
self._mutate_foo(v)
class MyObject_actionclash2(MyObject_autoaction):
@event.action
def set_foo(self, v):
print('Custom one')
self._mutate_foo(v)
@run_in_both(MyObject_actionclash1, MyObject_actionclash2)
def test_action_clash():
"""
Custom one
Custom one
hi
Custom one
Custom one
hi
"""
m = MyObject_actionclash1()
m.set_foo("hi")
loop.iter()
print(m.foo)
m = MyObject_actionclash2()
m.set_foo("hi")
loop.iter()
print(m.foo)
class MyObject2(MyObject):
@event.action
def set_foo(self, v):
super().set_foo(v + 1)
class MyObject3(MyObject_autoaction): # base class has autogenerated set_foo
@event.action
def set_foo(self, v):
super().set_foo(v + 1)
@run_in_both(MyObject2, MyObject3)
def test_action_inheritance():
"""
1
5
1
5
"""
m = MyObject2()
m.set_foo(4)
print(m.foo)
loop.iter()
print(m.foo) # one iter handles action and supercall in one go
m = MyObject3()
m.set_foo(4)
print(m.foo)
loop.iter()
print(m.foo)
@run_in_both(MyObject)
def test_action_subaction():
"""
0
2
"""
m = MyObject()
m.set_foo(0)
loop.iter()
print(m.foo)
m.increase_foo()
loop.iter()
print(m.foo)
class MyObject4(event.Component):
@event.action
def emitit(self):
self.emit('boe', dict(value=42))
@event.reaction('!boe')
def on_boe(self, *events):
print([ev.value for ev in events])
@run_in_both(MyObject4)
def test_action_can_emit():
"""
[42]
[42, 42]
"""
m = MyObject4()
with loop:
m.emitit()
with loop:
m.emitit()
m.emitit()
class RecursiveActions(event.Component):
p1 = event.IntProp()
p2 = event.IntProp()
@event.action
def set1(self, v):
self.set2(v + 1)
self._mutate_p1(v)
@event.action
def set2(self, v):
self.set1(v - 1)
self._mutate_p2(v)
@run_in_both(RecursiveActions)
def test_property_recursive():
"""
0 0
? maximum
0 0
? maximum
0 0
"""
# What we really test is that we don't do anything to prevent action recursion :)
m = RecursiveActions()
loop.iter()
print(m.p1, m.p2)
try:
m.set1(7)
loop.iter()
except Exception as err:
print(err)
print(m.p1, m.p2)
try:
m.set2(18)
loop.iter()
except Exception as err:
print(err)
print(m.p1, m.p2)
class MyProp(event.IntProp):
def _validate(self, value, name, data):
super()._validate(value, name, data)
print('mutating', name)
class MyObject9(event.Component):
foo1 = MyProp(settable=True)
foo2 = MyProp(settable=True)
foo3 = MyProp(settable=False)
@event.action
def set_foo2(self, v):
print('setting foo2')
self._mutate_foo2(v)
@run_in_both(MyObject9, MyProp)
def test_property_setters_get_called():
"""
mutating foo1
mutating foo2
mutating foo3
setting foo2
mutating foo2
"""
# This validates that all properties are first set via their mutators,
# which cannot require other properties, and that then the properties
# that have a custom setter (but only those) have these setters called.
m = MyObject9()
## Meta-ish tests that are similar for property/emitter/action/reaction
@run_in_both(MyObject)
def test_action_not_settable():
"""
fail AttributeError
"""
m = MyObject()
try:
m.set_foo = 3
except AttributeError:
print('fail AttributeError')
# We cannot prevent deletion in JS, otherwise we cannot overload
def test_action_python_only():
m = MyObject()
# Action decorator needs proper callable
with raises(TypeError):
event.action(3)
if '__pypy__' in sys.builtin_module_names:
pass # skip
else:
with raises(TypeError):
event.action(isinstance)
# Check type of the instance attribute
assert isinstance(m.set_foo, event._action.Action)
# Cannot set or delete an action
with raises(AttributeError):
m.set_foo = 3
with raises(AttributeError):
del m.set_foo
# Repr and docs
assert 'action' in repr(m.__class__.set_foo).lower()
assert 'action' in repr(m.set_foo).lower()
assert 'foo' in repr(m.set_foo)
# Also for autogenereated action
m = MyObject_autoaction()
assert 'action' in repr(m.__class__.set_foo).lower()
assert 'action' in repr(m.set_foo).lower()
assert 'foo' in repr(m.set_foo)
run_tests_if_main()
|
from flask.globals import request, session
# get / post data
def get_parameter(key, default=None):
'''
info:获得请求参数,包括get和post,其他类型的访问不管
'''
# post参数
if request.method == 'POST':
param = request.form.get(key, default)
# get
elif request.method == 'GET':
param = request.args.get(key, default)
else:
return default
return param
# login user from session
def get_login_user():
return session.get('u_id', {})
# set user login
def login_user(user):
session['u_id'] = user
# logou user, session pop
def logout():
session.pop('oauth_token', None)
session.pop('u_id', None)
|
import argparse
import asyncio
import importlib
import logging
import os
import sys
from typing import List, Optional, Sequence, Text
from homeassistant import runner
from homeassistant.bootstrap import async_mount_local_lib_path
from homeassistant.config import get_default_config_dir
from homeassistant.requirements import pip_kwargs
from homeassistant.util.package import install_package, is_installed, is_virtual_env
# mypy: allow-untyped-defs, no-warn-return-any
def run(args: List) -> int:
"""Run a script."""
scripts = []
path = os.path.dirname(__file__)
for fil in os.listdir(path):
if fil == "__pycache__":
continue
if os.path.isdir(os.path.join(path, fil)):
scripts.append(fil)
elif fil != "__init__.py" and fil.endswith(".py"):
scripts.append(fil[:-3])
if not args:
print("Please specify a script to run.")
print("Available scripts:", ", ".join(scripts))
return 1
if args[0] not in scripts:
print("Invalid script specified.")
print("Available scripts:", ", ".join(scripts))
return 1
script = importlib.import_module(f"homeassistant.scripts.{args[0]}")
config_dir = extract_config_dir()
loop = asyncio.get_event_loop()
if not is_virtual_env():
loop.run_until_complete(async_mount_local_lib_path(config_dir))
_pip_kwargs = pip_kwargs(config_dir)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
for req in getattr(script, "REQUIREMENTS", []):
if is_installed(req):
continue
if not install_package(req, **_pip_kwargs):
print("Aborting script, could not install dependency", req)
return 1
asyncio.set_event_loop_policy(runner.HassEventLoopPolicy(False))
return script.run(args[1:]) # type: ignore
def extract_config_dir(args: Optional[Sequence[Text]] = None) -> str:
"""Extract the config dir from the arguments or get the default."""
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument("-c", "--config", default=None)
parsed_args = parser.parse_known_args(args)[0]
return (
os.path.join(os.getcwd(), parsed_args.config)
if parsed_args.config
else get_default_config_dir()
)
|
import unittest
import smart_open.utils
class ClampTest(unittest.TestCase):
def test_low(self):
self.assertEqual(smart_open.utils.clamp(5, 0, 10), 5)
def test_high(self):
self.assertEqual(smart_open.utils.clamp(11, 0, 10), 10)
def test_out_of_range(self):
self.assertEqual(smart_open.utils.clamp(-1, 0, 10), 0)
|
import pytest
from molecule.command import create
@pytest.fixture
def _patched_create_setup(mocker):
return mocker.patch('molecule.command.create.Create._setup')
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
def test_execute(mocker, patched_logger_info, command_patched_ansible_create,
patched_config_validate, config_instance):
c = create.Create(config_instance)
c.execute()
x = [
mocker.call("Scenario: 'default'"),
mocker.call("Action: 'create'"),
]
assert x == patched_logger_info.mock_calls
assert 'docker' == config_instance.state.driver
command_patched_ansible_create.assert_called_once_with()
assert config_instance.state.created
@pytest.mark.parametrize(
'config_instance', ['command_driver_delegated_section_data'],
indirect=True)
def test_execute_skips_when_delegated_driver(
_patched_create_setup, patched_logger_warn,
command_patched_ansible_create, config_instance):
c = create.Create(config_instance)
c.execute()
msg = 'Skipping, instances are delegated.'
patched_logger_warn.assert_called_once_with(msg)
assert not command_patched_ansible_create.called
def test_execute_skips_when_instances_already_created(
patched_logger_warn, command_patched_ansible_create, config_instance):
config_instance.state.change_state('created', True)
c = create.Create(config_instance)
c.execute()
msg = 'Skipping, instances already created.'
patched_logger_warn.assert_called_once_with(msg)
assert not command_patched_ansible_create.called
|
def get_plugin_option(name, options):
"""
Retrieve option name from options dict.
:param options:
:return:
"""
for o in options:
if o.get("name") == name:
return o.get("value", o.get("default"))
def set_plugin_option(name, value, options):
"""
Set value for option name for options dict.
:param options:
"""
for o in options:
if o.get("name") == name:
o.update({"value": value})
|
from homeassistant.components.device_tracker import DeviceScanner
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.components.device_tracker.const import SOURCE_TYPE_ROUTER
def get_scanner(hass, config):
"""Return a mock scanner."""
return SCANNER
class MockScannerEntity(ScannerEntity):
"""Test implementation of a ScannerEntity."""
def __init__(self):
"""Init."""
self.connected = False
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_ROUTER
@property
def battery_level(self):
"""Return the battery level of the device.
Percentage from 0-100.
"""
return 100
@property
def is_connected(self):
"""Return true if the device is connected to the network."""
return self.connected
def set_connected(self):
"""Set connected to True."""
self.connected = True
self.async_schedule_update_ha_state()
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the config entry."""
entity = MockScannerEntity()
async_add_entities([entity])
class MockScanner(DeviceScanner):
"""Mock device scanner."""
def __init__(self):
"""Initialize the MockScanner."""
self.devices_home = []
def come_home(self, device):
"""Make a device come home."""
self.devices_home.append(device)
def leave_home(self, device):
"""Make a device leave the house."""
self.devices_home.remove(device)
def reset(self):
"""Reset which devices are home."""
self.devices_home = []
def scan_devices(self):
"""Return a list of fake devices."""
return list(self.devices_home)
def get_device_name(self, device):
"""Return a name for a mock device.
Return None for dev1 for testing.
"""
return None if device == "DEV1" else device.lower()
SCANNER = MockScanner()
|
from paasta_tools.kubernetes.application.controller_wrappers import (
get_application_wrapper,
)
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.paasta_service_config_loader import PaastaServiceConfigLoader
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import load_system_paasta_config
def main() -> None:
system_paasta_config = load_system_paasta_config()
kube_client = KubeClient()
services = {
service
for service, instance in get_services_for_cluster(
cluster=system_paasta_config.get_cluster(), instance_type="kubernetes"
)
}
for service in services:
pscl = PaastaServiceConfigLoader(service=service, load_deployments=False)
for instance_config in pscl.instance_configs(
cluster=system_paasta_config.get_cluster(),
instance_type_class=KubernetesDeploymentConfig,
):
max_instances = instance_config.get_max_instances()
if max_instances is not None:
formatted_application = instance_config.format_kubernetes_app()
formatted_application.spec.replicas = max_instances
wrapper = get_application_wrapper(formatted_application)
wrapper.soa_config = instance_config
print(f"Scaling up {service}.{instance_config.instance}")
wrapper.update(kube_client)
if __name__ == "__main__":
main()
|
import pandas as pd
import numpy as np
from scipy.stats import norm, mannwhitneyu, ranksums
from scattertext.termscoring.CorpusBasedTermScorer import CorpusBasedTermScorer
class MannWhitneyU(CorpusBasedTermScorer):
'''
term_scorer = (MannWhitneyU(corpus).set_categories('Positive', ['Negative'], ['Plot']))
html = st.produce_frequency_explorer(
corpus,
category='Positive',
not_categories=['Negative'],
neutral_categories=['Plot'],
term_scorer=term_scorer,
metadata=rdf['movie_name'],
grey_threshold=0,
show_neutral=True
)
file_name = 'rotten_fresh_mwu.html'
open(file_name, 'wb').write(html.encode('utf-8'))
IFrame(src=file_name, width=1300, height=700)
'''
def _set_scorer_args(self, **kwargs):
pass
def get_scores(self, *args):
return self.get_score_df()['mwu_z']
def get_score_df(self, correction_method=None):
'''
Computes Mann Whitney corrected p, z-values. Falls back to normal approximation when numerical limits are reached.
:param correction_method: str or None, correction method from statsmodels.stats.multitest.multipletests
'fdr_bh' is recommended.
:return: pd.DataFrame
'''
X = self._get_X().astype(np.float64)
X = X / X.sum(axis=1)
cat_X, ncat_X = self._get_cat_and_ncat(X)
def normal_apx(u, x, y):
# from https://stats.stackexchange.com/questions/116315/problem-with-mann-whitney-u-test-in-scipy
m_u = len(x) * len(y) / 2
sigma_u = np.sqrt(len(x) * len(y) * (len(x) + len(y) + 1) / 12)
z = (u - m_u) / sigma_u
return 2*norm.cdf(z)
scores = []
for i in range(cat_X.shape[1]):
cat_list = cat_X.T[i].A1
ncat_list = ncat_X.T[i].A1
try:
if cat_list.mean() > ncat_list.mean():
mw = mannwhitneyu(cat_list, ncat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, cat_list, ncat_list)
scores.append({'mwu': mw.statistic, 'mwu_p': mw.pvalue, 'mwu_z': norm.isf(float(mw.pvalue)), 'valid':True})
else:
mw = mannwhitneyu(ncat_list, cat_list, alternative='greater')
if mw.pvalue in (0, 1):
mw.pvalue = normal_apx(mw.staistic, ncat_list, cat_list)
scores.append({'mwu': -mw.statistic, 'mwu_p': 1 - mw.pvalue, 'mwu_z': 1. - norm.isf(float(mw.pvalue)), 'valid':True})
except:
scores.append({'mwu': 0, 'mwu_p': 0, 'mwu_z': 0, 'valid':False})
score_df = pd.DataFrame(scores, index=self.corpus_.get_terms()).fillna(0)
if correction_method is not None:
from statsmodels.stats.multitest import multipletests
for method in ['mwu']:
valid_pvals = score_df[score_df.valid].mwu_p
valid_pvals_abs = np.min([valid_pvals, 1-valid_pvals], axis=0)
valid_pvals_abs_corr = multipletests(valid_pvals_abs, method=correction_method)[1]
score_df[method + '_p_corr'] = 0.5
valid_pvals_abs_corr[valid_pvals > 0.5] = 1. - valid_pvals_abs_corr[valid_pvals > 0.5]
valid_pvals_abs_corr[valid_pvals < 0.5] = valid_pvals_abs_corr[valid_pvals < 0.5]
score_df.loc[score_df.valid, method + '_p_corr'] = valid_pvals_abs_corr
score_df[method + '_z'] = -norm.ppf(score_df[method + '_p_corr'])
return score_df
def get_name(self):
return "Mann Whitney Z"
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy.random as random
import numpy as np
import matplotlib.pyplot as plt
from filterpy.kalman import FadingKalmanFilter
from pytest import approx
from scipy.spatial.distance import mahalanobis as scipy_mahalanobis
DO_PLOT = False
def test_noisy_1d():
f = FadingKalmanFilter(3., dim_x=2, dim_z=1)
f.x = np.array([[2.],
[0.]]) # initial state (location and velocity)
f.F = np.array([[1.,1.],
[0.,1.]]) # state transition matrix
f.H = np.array([[1.,0.]]) # Measurement function
f.P *= 1000. # covariance matrix
f.R = 5.**2 # state uncertainty
f.Q = np.array([[0, 0],
[0, 0.0001]]) # process uncertainty
measurements = []
results = []
zs = []
for t in range (100):
# create measurement = t plus white noise
z = t + random.randn() * np.sqrt(f.R)
zs.append(z)
# perform kalman filtering
f.update(z)
f.predict()
# save data
results.append(f.x[0, 0])
measurements.append(z)
# test mahalanobis
a = np.zeros(f.y.shape)
maha = scipy_mahalanobis(a, f.y, f.SI)
assert f.mahalanobis == approx(maha)
print(z, maha, f.y, f.S)
assert maha < 4
# now do a batch run with the stored z values so we can test that
# it is working the same as the recursive implementation.
# give slightly different P so result is slightly different
f.X = np.array([[2.,0]]).T
f.P = np.eye(2)*100.
m, c, _, _ = f.batch_filter(zs,update_first=False)
# plot data
if DO_PLOT:
p1, = plt.plot(measurements,'r', alpha=0.5)
p2, = plt.plot (results,'b')
p4, = plt.plot(m[:,0], 'm')
p3, = plt.plot ([0, 100],[0, 100], 'g') # perfect result
plt.legend([p1,p2, p3, p4],
["noisy measurement", "KF output", "ideal", "batch"], loc=4)
plt.show()
if __name__ == "__main__":
DO_PLOT = True
test_noisy_1d()
|
import pytest
from kubernetes.client import V2beta2ExternalMetricSource
from kubernetes.client import V2beta2ExternalMetricStatus
from kubernetes.client import V2beta2MetricIdentifier
from kubernetes.client import V2beta2MetricSpec
from kubernetes.client import V2beta2MetricStatus
from kubernetes.client import V2beta2MetricTarget
from kubernetes.client import V2beta2MetricValueStatus
from kubernetes.client import V2beta2PodsMetricSource
from kubernetes.client import V2beta2PodsMetricStatus
from kubernetes.client import V2beta2ResourceMetricSource
from kubernetes.client import V2beta2ResourceMetricStatus
from paasta_tools.instance.hpa_metrics_parser import HPAMetricsParser
@pytest.fixture
def parser():
return HPAMetricsParser(hpa=None)
def test_parse_target_external_metric_value(parser):
metric_spec = V2beta2MetricSpec(
type="External",
external=V2beta2ExternalMetricSource(
metric=V2beta2MetricIdentifier(name="foo"),
target=V2beta2MetricTarget(type="Value", average_value=12,),
),
)
status = parser.parse_target(metric_spec)
assert status["name"] == "foo"
assert status["target_value"] == "12"
def test_parse_target_external_metric_average_value(parser):
# The parser handles this case, but it's not currently
# used in kubernetes_tools
metric_spec = V2beta2MetricSpec(
type="External",
external=V2beta2ExternalMetricSource(
metric=V2beta2MetricIdentifier(name="foo"),
target=V2beta2MetricTarget(type="AverageValue", average_value=0.5,),
),
)
status = parser.parse_target(metric_spec)
assert status["name"] == "foo"
assert status["target_value"] == "0.5"
def test_parse_target_pod_metric(parser):
metric_spec = V2beta2MetricSpec(
type="Pods",
pods=V2beta2PodsMetricSource(
metric=V2beta2MetricIdentifier(name="foo"),
target=V2beta2MetricTarget(type="AverageValue", average_value=0.5,),
),
)
status = parser.parse_target(metric_spec)
assert status["name"] == "foo"
assert status["target_value"] == "0.5"
def test_parse_target_resource_metric(parser):
metric_spec = V2beta2MetricSpec(
type="Resource",
resource=V2beta2ResourceMetricSource(
name="cpu",
target=V2beta2MetricTarget(type="Utilization", average_utilization=0.5,),
),
)
status = parser.parse_target(metric_spec)
assert status["name"] == "cpu"
assert status["target_value"] == "0.5"
def test_parse_current_external_metric_value(parser):
metric_status = V2beta2MetricStatus(
type="External",
external=V2beta2ExternalMetricStatus(
current=V2beta2MetricValueStatus(value=4,),
metric=V2beta2MetricIdentifier(name="foo"),
),
)
status = parser.parse_current(metric_status)
assert status["name"] == "foo"
assert status["current_value"] == "4"
def test_parse_current_external_metric_average_value(parser):
# The parser handles this case, but it's not currently
# used in kubernetes_tools
metric_status = V2beta2MetricStatus(
type="External",
external=V2beta2ExternalMetricStatus(
current=V2beta2MetricValueStatus(average_value=0.4,),
metric=V2beta2MetricIdentifier(name="foo"),
),
)
status = parser.parse_current(metric_status)
assert status["name"] == "foo"
assert status["current_value"] == "0.4"
def test_parse_current_pod_metric(parser):
metric_status = V2beta2MetricStatus(
type="Pods",
pods=V2beta2PodsMetricStatus(
current=V2beta2MetricValueStatus(average_value=0.4,),
metric=V2beta2MetricIdentifier(name="foo"),
),
)
status = parser.parse_current(metric_status)
assert status["name"] == "foo"
assert status["current_value"] == "0.4"
def test_parse_current_resource_metric(parser):
metric_status = V2beta2MetricStatus(
type="Resource",
resource=V2beta2ResourceMetricStatus(
current=V2beta2MetricValueStatus(average_utilization=0.4,), name="cpu",
),
)
status = parser.parse_current(metric_status)
assert status["name"] == "cpu"
assert status["current_value"] == "0.4"
|
import requests
from nikola.plugin_categories import Command
from nikola import __version__
URL = 'https://pypi.org/pypi/Nikola/json'
class CommandVersion(Command):
"""Print Nikola version."""
name = "version"
doc_usage = "[--check]"
needs_config = False
doc_purpose = "print the Nikola version number"
cmd_options = [
{
'name': 'check',
'long': 'check',
'short': '',
'default': False,
'type': bool,
'help': "Check for new versions.",
}
]
def _execute(self, options={}, args=None):
"""Print the version number."""
print("Nikola v" + __version__)
if options.get('check'):
data = requests.get(URL).json()
pypi_version = data['info']['version']
if pypi_version == __version__:
print("Nikola is up-to-date")
else:
print("The latest version of Nikola is v{0}. Please upgrade "
"using `pip install --upgrade Nikola=={0}` or your "
"system package manager.".format(pypi_version))
|
from collections import defaultdict
from concurrent.futures import ThreadPoolExecutor
from datetime import datetime
import io
import itertools
import logging
import operator
import os
from babelfish import Language, LanguageReverseError
from guessit import guessit
from rarfile import BadRarFile, NotRarFile, RarCannotExec, RarFile, Error, is_rarfile
from zipfile import BadZipfile
from .extensions import provider_manager, default_providers, refiner_manager
from .score import compute_score as default_compute_score
from .subtitle import SUBTITLE_EXTENSIONS
from .utils import handle_exception
from .video import VIDEO_EXTENSIONS, Episode, Movie, Video
#: Supported archive extensions
ARCHIVE_EXTENSIONS = ('.rar',)
logger = logging.getLogger(__name__)
class ProviderPool(object):
"""A pool of providers with the same API as a single :class:`~subliminal.providers.Provider`.
It has a few extra features:
* Lazy loads providers when needed and supports the `with` statement to :meth:`terminate`
the providers on exit.
* Automatically discard providers on failure.
:param list providers: name of providers to use, if not all.
:param dict provider_configs: provider configuration as keyword arguments per provider name to pass when
instantiating the :class:`~subliminal.providers.Provider`.
"""
def __init__(self, providers=None, provider_configs=None):
#: Name of providers to use
self.providers = providers or default_providers
#: Provider configuration
self.provider_configs = provider_configs or {}
#: Initialized providers
self.initialized_providers = {}
#: Discarded providers
self.discarded_providers = set()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
self.terminate()
def __getitem__(self, name):
if name not in self.providers:
raise KeyError
if name not in self.initialized_providers:
logger.info('Initializing provider %s', name)
provider = provider_manager[name].plugin(**self.provider_configs.get(name, {}))
provider.initialize()
self.initialized_providers[name] = provider
return self.initialized_providers[name]
def __delitem__(self, name):
if name not in self.initialized_providers:
raise KeyError(name)
try:
logger.info('Terminating provider %s', name)
self.initialized_providers[name].terminate()
except Exception as e:
handle_exception(e, 'Provider {} improperly terminated'.format(name))
del self.initialized_providers[name]
def __iter__(self):
return iter(self.initialized_providers)
def list_subtitles_provider(self, provider, video, languages):
"""List subtitles with a single provider.
The video and languages are checked against the provider.
:param str provider: name of the provider.
:param video: video to list subtitles for.
:type video: :class:`~subliminal.video.Video`
:param languages: languages to search for.
:type languages: set of :class:`~babelfish.language.Language`
:return: found subtitles.
:rtype: list of :class:`~subliminal.subtitle.Subtitle` or None
"""
# check video validity
if not provider_manager[provider].plugin.check(video):
logger.info('Skipping provider %r: not a valid video', provider)
return []
# check supported languages
provider_languages = provider_manager[provider].plugin.check_languages(languages)
if not provider_languages:
logger.info('Skipping provider %r: no language to search for', provider)
return []
# list subtitles
logger.info('Listing subtitles with provider %r and languages %r', provider, provider_languages)
try:
return self[provider].list_subtitles(video, provider_languages)
except Exception as e:
handle_exception(e, 'Provider {}'.format(provider))
def list_subtitles(self, video, languages):
"""List subtitles.
:param video: video to list subtitles for.
:type video: :class:`~subliminal.video.Video`
:param languages: languages to search for.
:type languages: set of :class:`~babelfish.language.Language`
:return: found subtitles.
:rtype: list of :class:`~subliminal.subtitle.Subtitle`
"""
subtitles = []
for name in self.providers:
# check discarded providers
if name in self.discarded_providers:
logger.debug('Skipping discarded provider %r', name)
continue
# list subtitles
provider_subtitles = self.list_subtitles_provider(name, video, languages)
if provider_subtitles is None:
logger.info('Discarding provider %s', name)
self.discarded_providers.add(name)
continue
# add the subtitles
subtitles.extend(provider_subtitles)
return subtitles
def download_subtitle(self, subtitle):
"""Download `subtitle`'s :attr:`~subliminal.subtitle.Subtitle.content`.
:param subtitle: subtitle to download.
:type subtitle: :class:`~subliminal.subtitle.Subtitle`
:return: `True` if the subtitle has been successfully downloaded, `False` otherwise.
:rtype: bool
"""
# check discarded providers
if subtitle.provider_name in self.discarded_providers:
logger.warning('Provider %r is discarded', subtitle.provider_name)
return False
logger.info('Downloading subtitle %r', subtitle)
try:
self[subtitle.provider_name].download_subtitle(subtitle)
except (BadZipfile, BadRarFile):
logger.error('Bad archive for subtitle %r', subtitle)
except Exception as e:
handle_exception(e, 'Discarding provider {}'.format(subtitle.provider_name))
self.discarded_providers.add(subtitle.provider_name)
# check subtitle validity
if not subtitle.is_valid():
logger.error('Invalid subtitle')
return False
return True
def download_best_subtitles(self, subtitles, video, languages, min_score=0, hearing_impaired=False, only_one=False,
compute_score=None):
"""Download the best matching subtitles.
:param subtitles: the subtitles to use.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
:param video: video to download subtitles for.
:type video: :class:`~subliminal.video.Video`
:param languages: languages to download.
:type languages: set of :class:`~babelfish.language.Language`
:param int min_score: minimum score for a subtitle to be downloaded.
:param bool hearing_impaired: hearing impaired preference.
:param bool only_one: download only one subtitle, not one per language.
:param compute_score: function that takes `subtitle` and `video` as positional arguments,
`hearing_impaired` as keyword argument and returns the score.
:return: downloaded subtitles.
:rtype: list of :class:`~subliminal.subtitle.Subtitle`
"""
compute_score = compute_score or default_compute_score
# sort subtitles by score
scored_subtitles = sorted([(s, compute_score(s, video, hearing_impaired=hearing_impaired))
for s in subtitles], key=operator.itemgetter(1), reverse=True)
# download best subtitles, falling back on the next on error
downloaded_subtitles = []
for subtitle, score in scored_subtitles:
# check score
if score < min_score:
logger.info('Score %d is below min_score (%d)', score, min_score)
break
# check downloaded languages
if subtitle.language in set(s.language for s in downloaded_subtitles):
logger.debug('Skipping subtitle: %r already downloaded', subtitle.language)
continue
# download
if self.download_subtitle(subtitle):
downloaded_subtitles.append(subtitle)
# stop when all languages are downloaded
if set(s.language for s in downloaded_subtitles) == languages:
logger.debug('All languages downloaded')
break
# stop if only one subtitle is requested
if only_one:
logger.debug('Only one subtitle downloaded')
break
return downloaded_subtitles
def terminate(self):
"""Terminate all the :attr:`initialized_providers`."""
logger.debug('Terminating initialized providers')
for name in list(self.initialized_providers):
del self[name]
class AsyncProviderPool(ProviderPool):
"""Subclass of :class:`ProviderPool` with asynchronous support for :meth:`~ProviderPool.list_subtitles`.
:param int max_workers: maximum number of threads to use. If `None`, :attr:`max_workers` will be set
to the number of :attr:`~ProviderPool.providers`.
"""
def __init__(self, max_workers=None, *args, **kwargs):
super(AsyncProviderPool, self).__init__(*args, **kwargs)
#: Maximum number of threads to use
self.max_workers = max_workers or len(self.providers)
def list_subtitles_provider(self, provider, video, languages):
return provider, super(AsyncProviderPool, self).list_subtitles_provider(provider, video, languages)
def list_subtitles(self, video, languages):
subtitles = []
with ThreadPoolExecutor(self.max_workers) as executor:
for provider, provider_subtitles in executor.map(self.list_subtitles_provider, self.providers,
itertools.repeat(video, len(self.providers)),
itertools.repeat(languages, len(self.providers))):
# discard provider that failed
if provider_subtitles is None:
logger.info('Discarding provider %s', provider)
self.discarded_providers.add(provider)
continue
# add subtitles
subtitles.extend(provider_subtitles)
return subtitles
def check_video(video, languages=None, age=None, undefined=False):
"""Perform some checks on the `video`.
All the checks are optional. Return `False` if any of this check fails:
* `languages` already exist in `video`'s :attr:`~subliminal.video.Video.subtitle_languages`.
* `video` is older than `age`.
* `video` has an `undefined` language in :attr:`~subliminal.video.Video.subtitle_languages`.
:param video: video to check.
:type video: :class:`~subliminal.video.Video`
:param languages: desired languages.
:type languages: set of :class:`~babelfish.language.Language`
:param datetime.timedelta age: maximum age of the video.
:param bool undefined: fail on existing undefined language.
:return: `True` if the video passes the checks, `False` otherwise.
:rtype: bool
"""
# language test
if languages and not (languages - video.subtitle_languages):
logger.debug('All languages %r exist', languages)
return False
# age test
if age and video.age > age:
logger.debug('Video is older than %r', age)
return False
# undefined test
if undefined and Language('und') in video.subtitle_languages:
logger.debug('Undefined language found')
return False
return True
def search_external_subtitles(path, directory=None):
"""Search for external subtitles from a video `path` and their associated language.
Unless `directory` is provided, search will be made in the same directory as the video file.
:param str path: path to the video.
:param str directory: directory to search for subtitles.
:return: found subtitles with their languages.
:rtype: dict
"""
# split path
dirpath, filename = os.path.split(path)
dirpath = dirpath or '.'
fileroot, fileext = os.path.splitext(filename)
# search for subtitles
subtitles = {}
for p in os.listdir(directory or dirpath):
# keep only valid subtitle filenames
if not p.startswith(fileroot) or not p.lower().endswith(SUBTITLE_EXTENSIONS):
continue
# extract the potential language code
language = Language('und')
language_code = p[len(fileroot):-len(os.path.splitext(p)[1])].replace(fileext, '').replace('_', '-')[1:]
if language_code:
try:
language = Language.fromietf(language_code)
except (ValueError, LanguageReverseError):
logger.error('Cannot parse language code %r', language_code)
subtitles[p] = language
logger.debug('Found subtitles %r', subtitles)
return subtitles
def scan_video(path):
"""Scan a video from a `path`.
:param str path: existing path to the video.
:return: the scanned video.
:rtype: :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check video extension
if not path.lower().endswith(VIDEO_EXTENSIONS):
raise ValueError('%r is not a valid video extension' % os.path.splitext(path)[1])
dirpath, filename = os.path.split(path)
logger.info('Scanning video %r in %r', filename, dirpath)
# guess
video = Video.fromguess(path, guessit(path))
# size
video.size = os.path.getsize(path)
logger.debug('Size is %d', video.size)
return video
def scan_archive(path):
"""Scan an archive from a `path`.
:param str path: existing path to the archive.
:return: the scanned video.
:rtype: :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
if not is_rarfile(path):
raise ValueError("'{0}' is not a valid archive".format(os.path.splitext(path)[1]))
dir_path, filename = os.path.split(path)
logger.info('Scanning archive %r in %r', filename, dir_path)
# Get filename and file size from RAR
rar = RarFile(path)
# check that the rar doesnt need a password
if rar.needs_password():
raise ValueError('Rar requires a password')
# raise an exception if the rar file is broken
# must be called to avoid a potential deadlock with some broken rars
rar.testrar()
file_info = [f for f in rar.infolist() if not f.isdir() and f.filename.endswith(VIDEO_EXTENSIONS)]
# sort by file size descending, the largest video in the archive is the one we want, there may be samples or intros
file_info.sort(key=operator.attrgetter('file_size'), reverse=True)
# no video found
if not file_info:
raise ValueError('No video in archive')
# Free the information about irrelevant files before guessing
file_info = file_info[0]
# guess
video_filename = file_info.filename
video_path = os.path.join(dir_path, video_filename)
video = Video.fromguess(video_path, guessit(video_path))
# size
video.size = file_info.file_size
return video
def scan_videos(path, age=None, archives=True):
"""Scan `path` for videos and their subtitles.
See :func:`refine` to find additional information for the video.
:param str path: existing directory path to scan.
:param datetime.timedelta age: maximum age of the video or archive.
:param bool archives: scan videos in archives.
:return: the scanned videos.
:rtype: list of :class:`~subliminal.video.Video`
"""
# check for non-existing path
if not os.path.exists(path):
raise ValueError('Path does not exist')
# check for non-directory path
if not os.path.isdir(path):
raise ValueError('Path is not a directory')
# walk the path
videos = []
for dirpath, dirnames, filenames in os.walk(path):
logger.debug('Walking directory %r', dirpath)
# remove badly encoded and hidden dirnames
for dirname in list(dirnames):
if dirname.startswith('.'):
logger.debug('Skipping hidden dirname %r in %r', dirname, dirpath)
dirnames.remove(dirname)
# Skip Sample folder
if dirname.lower() == 'sample':
logger.debug('Skipping sample dirname %r in %r', dirname, dirpath)
dirnames.remove(dirname)
# scan for videos
for filename in filenames:
# filter on videos and archives
if not (filename.lower().endswith(VIDEO_EXTENSIONS) or
archives and filename.lower().endswith(ARCHIVE_EXTENSIONS)):
continue
# skip hidden files
if filename.startswith('.'):
logger.debug('Skipping hidden filename %r in %r', filename, dirpath)
continue
# skip 'sample' media files
if os.path.splitext(filename)[0].lower() == 'sample':
logger.debug('Skipping sample filename %r in %r', filename, dirpath)
continue
# reconstruct the file path
filepath = os.path.join(dirpath, filename)
# skip links
if os.path.islink(filepath):
logger.debug('Skipping link %r in %r', filename, dirpath)
continue
# skip old files
try:
file_age = datetime.utcfromtimestamp(os.path.getmtime(filepath))
except ValueError:
logger.warning('Could not get age of file %r in %r', filename, dirpath)
continue
else:
if age and datetime.utcnow() - file_age > age:
logger.debug('Skipping old file %r in %r', filename, dirpath)
continue
# scan
if filename.lower().endswith(VIDEO_EXTENSIONS): # video
try:
video = scan_video(filepath)
except ValueError: # pragma: no cover
logger.exception('Error scanning video')
continue
elif archives and filename.lower().endswith(ARCHIVE_EXTENSIONS): # archive
try:
video = scan_archive(filepath)
except (Error, NotRarFile, RarCannotExec, ValueError): # pragma: no cover
logger.exception('Error scanning archive')
continue
else: # pragma: no cover
raise ValueError('Unsupported file %r' % filename)
videos.append(video)
return videos
def refine(video, episode_refiners=None, movie_refiners=None, refiner_configs=None, **kwargs):
"""Refine a video using :ref:`refiners`.
.. note::
Exceptions raised in refiners are silently passed and logged.
:param video: the video to refine.
:type video: :class:`~subliminal.video.Video`
:param tuple episode_refiners: refiners to use for episodes.
:param tuple movie_refiners: refiners to use for movies.
:param dict refiner_configs: refiner configuration as keyword arguments per refiner name to pass when
calling the refine method
:param \*\*kwargs: additional parameters for the :func:`~subliminal.refiners.refine` functions.
"""
refiners = ()
if isinstance(video, Episode):
refiners = episode_refiners or ('metadata', 'tvdb', 'omdb')
elif isinstance(video, Movie):
refiners = movie_refiners or ('metadata', 'omdb')
for refiner in ('hash', ) + refiners:
logger.info('Refining video with %s', refiner)
try:
refiner_manager[refiner].plugin(video, **dict((refiner_configs or {}).get(refiner, {}), **kwargs))
except Exception as e:
handle_exception(e, 'Failed to refine video {0!r}'.format(video.name))
def list_subtitles(videos, languages, pool_class=ProviderPool, **kwargs):
"""List subtitles.
The `videos` must pass the `languages` check of :func:`check_video`.
:param videos: videos to list subtitles for.
:type videos: set of :class:`~subliminal.video.Video`
:param languages: languages to search for.
:type languages: set of :class:`~babelfish.language.Language`
:param pool_class: class to use as provider pool.
:type pool_class: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar
:param \*\*kwargs: additional parameters for the provided `pool_class` constructor.
:return: found subtitles per video.
:rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle`
"""
listed_subtitles = defaultdict(list)
# check videos
checked_videos = []
for video in videos:
if not check_video(video, languages=languages):
logger.info('Skipping video %r', video)
continue
checked_videos.append(video)
# return immediately if no video passed the checks
if not checked_videos:
return listed_subtitles
# list subtitles
with pool_class(**kwargs) as pool:
for video in checked_videos:
logger.info('Listing subtitles for %r', video)
subtitles = pool.list_subtitles(video, languages - video.subtitle_languages)
listed_subtitles[video].extend(subtitles)
logger.info('Found %d subtitle(s)', len(subtitles))
return listed_subtitles
def download_subtitles(subtitles, pool_class=ProviderPool, **kwargs):
"""Download :attr:`~subliminal.subtitle.Subtitle.content` of `subtitles`.
:param subtitles: subtitles to download.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
:param pool_class: class to use as provider pool.
:type pool_class: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar
:param \*\*kwargs: additional parameters for the provided `pool_class` constructor.
"""
with pool_class(**kwargs) as pool:
for subtitle in subtitles:
logger.info('Downloading subtitle %r', subtitle)
pool.download_subtitle(subtitle)
def download_best_subtitles(videos, languages, min_score=0, hearing_impaired=False, only_one=False, compute_score=None,
pool_class=ProviderPool, **kwargs):
"""List and download the best matching subtitles.
The `videos` must pass the `languages` and `undefined` (`only_one`) checks of :func:`check_video`.
:param videos: videos to download subtitles for.
:type videos: set of :class:`~subliminal.video.Video`
:param languages: languages to download.
:type languages: set of :class:`~babelfish.language.Language`
:param int min_score: minimum score for a subtitle to be downloaded.
:param bool hearing_impaired: hearing impaired preference.
:param bool only_one: download only one subtitle, not one per language.
:param compute_score: function that takes `subtitle` and `video` as positional arguments,
`hearing_impaired` as keyword argument and returns the score.
:param pool_class: class to use as provider pool.
:type pool_class: :class:`ProviderPool`, :class:`AsyncProviderPool` or similar
:param \*\*kwargs: additional parameters for the provided `pool_class` constructor.
:return: downloaded subtitles per video.
:rtype: dict of :class:`~subliminal.video.Video` to list of :class:`~subliminal.subtitle.Subtitle`
"""
downloaded_subtitles = defaultdict(list)
# check videos
checked_videos = []
for video in videos:
if not check_video(video, languages=languages, undefined=only_one):
logger.info('Skipping video %r', video)
continue
checked_videos.append(video)
# return immediately if no video passed the checks
if not checked_videos:
return downloaded_subtitles
# download best subtitles
with pool_class(**kwargs) as pool:
for video in checked_videos:
logger.info('Downloading best subtitles for %r', video)
subtitles = pool.download_best_subtitles(pool.list_subtitles(video, languages - video.subtitle_languages),
video, languages, min_score=min_score,
hearing_impaired=hearing_impaired, only_one=only_one,
compute_score=compute_score)
logger.info('Downloaded %d subtitle(s)', len(subtitles))
downloaded_subtitles[video].extend(subtitles)
return downloaded_subtitles
def save_subtitles(video, subtitles, single=False, directory=None, encoding=None):
"""Save subtitles on filesystem.
Subtitles are saved in the order of the list. If a subtitle with a language has already been saved, other subtitles
with the same language are silently ignored.
The extension used is `.lang.srt` by default or `.srt` is `single` is `True`, with `lang` being the IETF code for
the :attr:`~subliminal.subtitle.Subtitle.language` of the subtitle.
:param video: video of the subtitles.
:type video: :class:`~subliminal.video.Video`
:param subtitles: subtitles to save.
:type subtitles: list of :class:`~subliminal.subtitle.Subtitle`
:param bool single: save a single subtitle, default is to save one subtitle per language.
:param str directory: path to directory where to save the subtitles, default is next to the video.
:param str encoding: encoding in which to save the subtitles, default is to keep original encoding.
:return: the saved subtitles
:rtype: list of :class:`~subliminal.subtitle.Subtitle`
"""
saved_subtitles = []
for subtitle in subtitles:
# check content
if subtitle.content is None:
logger.error('Skipping subtitle %r: no content', subtitle)
continue
# check language
if subtitle.language in set(s.language for s in saved_subtitles):
logger.debug('Skipping subtitle %r: language already saved', subtitle)
continue
# create subtitle path
subtitle_path = subtitle.get_path(video, single=single)
if directory is not None:
subtitle_path = os.path.join(directory, os.path.split(subtitle_path)[1])
# save content as is or in the specified encoding
logger.info('Saving %r to %r', subtitle, subtitle_path)
if encoding is None:
with io.open(subtitle_path, 'wb') as f:
f.write(subtitle.content)
else:
with io.open(subtitle_path, 'w', encoding=encoding) as f:
f.write(subtitle.text)
saved_subtitles.append(subtitle)
# check single
if single:
break
return saved_subtitles
|
from abc import abstractmethod
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import (
ATTR_CODE,
ATTR_CODE_FORMAT,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
make_entity_service_schema,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from .const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
SUPPORT_ALARM_TRIGGER,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "alarm_control_panel"
SCAN_INTERVAL = timedelta(seconds=30)
ATTR_CHANGED_BY = "changed_by"
FORMAT_TEXT = "text"
FORMAT_NUMBER = "number"
ATTR_CODE_ARM_REQUIRED = "code_arm_required"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
ALARM_SERVICE_SCHEMA = make_entity_service_schema({vol.Optional(ATTR_CODE): cv.string})
async def async_setup(hass, config):
"""Track states and offer events for sensors."""
component = hass.data[DOMAIN] = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(
SERVICE_ALARM_DISARM, ALARM_SERVICE_SCHEMA, "async_alarm_disarm"
)
component.async_register_entity_service(
SERVICE_ALARM_ARM_HOME,
ALARM_SERVICE_SCHEMA,
"async_alarm_arm_home",
[SUPPORT_ALARM_ARM_HOME],
)
component.async_register_entity_service(
SERVICE_ALARM_ARM_AWAY,
ALARM_SERVICE_SCHEMA,
"async_alarm_arm_away",
[SUPPORT_ALARM_ARM_AWAY],
)
component.async_register_entity_service(
SERVICE_ALARM_ARM_NIGHT,
ALARM_SERVICE_SCHEMA,
"async_alarm_arm_night",
[SUPPORT_ALARM_ARM_NIGHT],
)
component.async_register_entity_service(
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
ALARM_SERVICE_SCHEMA,
"async_alarm_arm_custom_bypass",
[SUPPORT_ALARM_ARM_CUSTOM_BYPASS],
)
component.async_register_entity_service(
SERVICE_ALARM_TRIGGER,
ALARM_SERVICE_SCHEMA,
"async_alarm_trigger",
[SUPPORT_ALARM_TRIGGER],
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class AlarmControlPanelEntity(Entity):
"""An abstract class for alarm control entities."""
@property
def code_format(self):
"""Regex for code format or None if no code is required."""
return None
@property
def changed_by(self):
"""Last change triggered by."""
return None
@property
def code_arm_required(self):
"""Whether the code is required for arm actions."""
return True
def alarm_disarm(self, code=None):
"""Send disarm command."""
raise NotImplementedError()
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
await self.hass.async_add_executor_job(self.alarm_disarm, code)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
raise NotImplementedError()
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
await self.hass.async_add_executor_job(self.alarm_arm_home, code)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
raise NotImplementedError()
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
await self.hass.async_add_executor_job(self.alarm_arm_away, code)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
raise NotImplementedError()
async def async_alarm_arm_night(self, code=None):
"""Send arm night command."""
await self.hass.async_add_executor_job(self.alarm_arm_night, code)
def alarm_trigger(self, code=None):
"""Send alarm trigger command."""
raise NotImplementedError()
async def async_alarm_trigger(self, code=None):
"""Send alarm trigger command."""
await self.hass.async_add_executor_job(self.alarm_trigger, code)
def alarm_arm_custom_bypass(self, code=None):
"""Send arm custom bypass command."""
raise NotImplementedError()
async def async_alarm_arm_custom_bypass(self, code=None):
"""Send arm custom bypass command."""
await self.hass.async_add_executor_job(self.alarm_arm_custom_bypass, code)
@property
@abstractmethod
def supported_features(self) -> int:
"""Return the list of supported features."""
@property
def state_attributes(self):
"""Return the state attributes."""
return {
ATTR_CODE_FORMAT: self.code_format,
ATTR_CHANGED_BY: self.changed_by,
ATTR_CODE_ARM_REQUIRED: self.code_arm_required,
}
class AlarmControlPanel(AlarmControlPanelEntity):
"""An abstract class for alarm control entities (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"AlarmControlPanel is deprecated, modify %s to extend AlarmControlPanelEntity",
cls.__name__,
)
|
from datetime import timedelta
import logging
from urllib.error import HTTPError
from coinmarketcap import Market
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_DISPLAY_CURRENCY
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_VOLUME_24H = "volume_24h"
ATTR_AVAILABLE_SUPPLY = "available_supply"
ATTR_CIRCULATING_SUPPLY = "circulating_supply"
ATTR_MARKET_CAP = "market_cap"
ATTR_PERCENT_CHANGE_24H = "percent_change_24h"
ATTR_PERCENT_CHANGE_7D = "percent_change_7d"
ATTR_PERCENT_CHANGE_1H = "percent_change_1h"
ATTR_PRICE = "price"
ATTR_RANK = "rank"
ATTR_SYMBOL = "symbol"
ATTR_TOTAL_SUPPLY = "total_supply"
ATTRIBUTION = "Data provided by CoinMarketCap"
CONF_CURRENCY_ID = "currency_id"
CONF_DISPLAY_CURRENCY_DECIMALS = "display_currency_decimals"
DEFAULT_CURRENCY_ID = 1
DEFAULT_DISPLAY_CURRENCY = "USD"
DEFAULT_DISPLAY_CURRENCY_DECIMALS = 2
ICON = "mdi:currency-usd"
SCAN_INTERVAL = timedelta(minutes=15)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_CURRENCY_ID, default=DEFAULT_CURRENCY_ID): cv.positive_int,
vol.Optional(CONF_DISPLAY_CURRENCY, default=DEFAULT_DISPLAY_CURRENCY): vol.All(
cv.string, vol.Upper
),
vol.Optional(
CONF_DISPLAY_CURRENCY_DECIMALS, default=DEFAULT_DISPLAY_CURRENCY_DECIMALS
): vol.All(vol.Coerce(int), vol.Range(min=1)),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the CoinMarketCap sensor."""
currency_id = config[CONF_CURRENCY_ID]
display_currency = config[CONF_DISPLAY_CURRENCY]
display_currency_decimals = config[CONF_DISPLAY_CURRENCY_DECIMALS]
try:
CoinMarketCapData(currency_id, display_currency).update()
except HTTPError:
_LOGGER.warning(
"Currency ID %s or display currency %s "
"is not available. Using 1 (bitcoin) "
"and USD",
currency_id,
display_currency,
)
currency_id = DEFAULT_CURRENCY_ID
display_currency = DEFAULT_DISPLAY_CURRENCY
add_entities(
[
CoinMarketCapSensor(
CoinMarketCapData(currency_id, display_currency),
display_currency_decimals,
)
],
True,
)
class CoinMarketCapSensor(Entity):
"""Representation of a CoinMarketCap sensor."""
def __init__(self, data, display_currency_decimals):
"""Initialize the sensor."""
self.data = data
self.display_currency_decimals = display_currency_decimals
self._ticker = None
self._unit_of_measurement = self.data.display_currency
@property
def name(self):
"""Return the name of the sensor."""
return self._ticker.get("name")
@property
def state(self):
"""Return the state of the sensor."""
return round(
float(
self._ticker.get("quotes").get(self.data.display_currency).get("price")
),
self.display_currency_decimals,
)
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
ATTR_VOLUME_24H: self._ticker.get("quotes")
.get(self.data.display_currency)
.get("volume_24h"),
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_CIRCULATING_SUPPLY: self._ticker.get("circulating_supply"),
ATTR_MARKET_CAP: self._ticker.get("quotes")
.get(self.data.display_currency)
.get("market_cap"),
ATTR_PERCENT_CHANGE_24H: self._ticker.get("quotes")
.get(self.data.display_currency)
.get("percent_change_24h"),
ATTR_PERCENT_CHANGE_7D: self._ticker.get("quotes")
.get(self.data.display_currency)
.get("percent_change_7d"),
ATTR_PERCENT_CHANGE_1H: self._ticker.get("quotes")
.get(self.data.display_currency)
.get("percent_change_1h"),
ATTR_RANK: self._ticker.get("rank"),
ATTR_SYMBOL: self._ticker.get("symbol"),
ATTR_TOTAL_SUPPLY: self._ticker.get("total_supply"),
}
def update(self):
"""Get the latest data and updates the states."""
self.data.update()
self._ticker = self.data.ticker.get("data")
class CoinMarketCapData:
"""Get the latest data and update the states."""
def __init__(self, currency_id, display_currency):
"""Initialize the data object."""
self.currency_id = currency_id
self.display_currency = display_currency
self.ticker = None
def update(self):
"""Get the latest data from coinmarketcap.com."""
self.ticker = Market().ticker(self.currency_id, convert=self.display_currency)
|
from datetime import timedelta
from os import path
from homeassistant import config as hass_config, setup
from homeassistant.components.trend import DOMAIN
from homeassistant.const import SERVICE_RELOAD
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import assert_setup_component, get_test_home_assistant
class TestTrendBinarySensor:
"""Test the Trend sensor."""
hass = None
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
def test_up(self):
"""Test up trend."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {"entity_id": "sensor.test_state"}
},
}
},
)
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "1")
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "2")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "on"
def test_up_using_trendline(self):
"""Test up trend using multiple samples and trendline calculation."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {
"entity_id": "sensor.test_state",
"sample_duration": 10000,
"min_gradient": 1,
"max_samples": 25,
}
},
}
},
)
self.hass.block_till_done()
now = dt_util.utcnow()
for val in [10, 0, 20, 30]:
with patch("homeassistant.util.dt.utcnow", return_value=now):
self.hass.states.set("sensor.test_state", val)
self.hass.block_till_done()
now += timedelta(seconds=2)
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "on"
# have to change state value, otherwise sample will lost
for val in [0, 30, 1, 0]:
with patch("homeassistant.util.dt.utcnow", return_value=now):
self.hass.states.set("sensor.test_state", val)
self.hass.block_till_done()
now += timedelta(seconds=2)
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "off"
def test_down_using_trendline(self):
"""Test down trend using multiple samples and trendline calculation."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {
"entity_id": "sensor.test_state",
"sample_duration": 10000,
"min_gradient": 1,
"max_samples": 25,
"invert": "Yes",
}
},
}
},
)
self.hass.block_till_done()
now = dt_util.utcnow()
for val in [30, 20, 30, 10]:
with patch("homeassistant.util.dt.utcnow", return_value=now):
self.hass.states.set("sensor.test_state", val)
self.hass.block_till_done()
now += timedelta(seconds=2)
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "on"
for val in [30, 0, 45, 50]:
with patch("homeassistant.util.dt.utcnow", return_value=now):
self.hass.states.set("sensor.test_state", val)
self.hass.block_till_done()
now += timedelta(seconds=2)
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "off"
def test_down(self):
"""Test down trend."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {"entity_id": "sensor.test_state"}
},
}
},
)
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "2")
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "1")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "off"
def test_invert_up(self):
"""Test up trend with custom message."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {
"entity_id": "sensor.test_state",
"invert": "Yes",
}
},
}
},
)
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "1")
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "2")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "off"
def test_invert_down(self):
"""Test down trend with custom message."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {
"entity_id": "sensor.test_state",
"invert": "Yes",
}
},
}
},
)
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "2")
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "1")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "on"
def test_attribute_up(self):
"""Test attribute up trend."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {
"entity_id": "sensor.test_state",
"attribute": "attr",
}
},
}
},
)
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "State", {"attr": "1"})
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "State", {"attr": "2"})
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "on"
def test_attribute_down(self):
"""Test attribute down trend."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {
"entity_id": "sensor.test_state",
"attribute": "attr",
}
},
}
},
)
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "State", {"attr": "2"})
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "State", {"attr": "1"})
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "off"
def test_max_samples(self):
"""Test that sample count is limited correctly."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {
"entity_id": "sensor.test_state",
"max_samples": 3,
"min_gradient": -1,
}
},
}
},
)
self.hass.block_till_done()
for val in [0, 1, 2, 3, 2, 1]:
self.hass.states.set("sensor.test_state", val)
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "on"
assert state.attributes["sample_count"] == 3
def test_non_numeric(self):
"""Test up trend."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {"entity_id": "sensor.test_state"}
},
}
},
)
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "Non")
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "Numeric")
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "off"
def test_missing_attribute(self):
"""Test attribute down trend."""
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {
"test_trend_sensor": {
"entity_id": "sensor.test_state",
"attribute": "missing",
}
},
}
},
)
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "State", {"attr": "2"})
self.hass.block_till_done()
self.hass.states.set("sensor.test_state", "State", {"attr": "1"})
self.hass.block_till_done()
state = self.hass.states.get("binary_sensor.test_trend_sensor")
assert state.state == "off"
def test_invalid_name_does_not_create(self):
"""Test invalid name."""
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test INVALID sensor": {"entity_id": "sensor.test_state"}
},
}
},
)
assert self.hass.states.all() == []
def test_invalid_sensor_does_not_create(self):
"""Test invalid sensor."""
with assert_setup_component(0):
assert setup.setup_component(
self.hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "template",
"sensors": {
"test_trend_sensor": {"not_entity_id": "sensor.test_state"}
},
}
},
)
assert self.hass.states.all() == []
def test_no_sensors_does_not_create(self):
"""Test no sensors."""
with assert_setup_component(0):
assert setup.setup_component(
self.hass, "binary_sensor", {"binary_sensor": {"platform": "trend"}}
)
assert self.hass.states.all() == []
async def test_reload(hass):
"""Verify we can reload trend sensors."""
hass.states.async_set("sensor.test_state", 1234)
await setup.async_setup_component(
hass,
"binary_sensor",
{
"binary_sensor": {
"platform": "trend",
"sensors": {"test_trend_sensor": {"entity_id": "sensor.test_state"}},
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("binary_sensor.test_trend_sensor")
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"trend/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("binary_sensor.test_trend_sensor") is None
assert hass.states.get("binary_sensor.second_test_trend_sensor")
def _get_fixtures_base_path():
return path.dirname(path.dirname(path.dirname(__file__)))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import netperf
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'netperf_pps'
BENCHMARK_CONFIG = """
netperf_pps:
description: test packets per second performance using netperf
vm_groups:
servers:
vm_spec: *default_single_core
vm_count: 2
client:
vm_spec: *default_single_core
"""
TRANSACTIONS_PER_SECOND = 'transactions_per_second'
# Command ports are even (id*2), data ports are odd (id*2 + 1)
PORT_START = 12865
REMOTE_SCRIPTS_DIR = 'netperf_test_scripts'
REMOTE_SCRIPT = 'runemomniaggdemo.sh'
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def PrepareNetperfAggregate(vm):
"""Installs netperf on a single vm."""
vm.Install('texinfo')
vm.Install('python_rrdtool')
vm.Install('netperf')
PORT_END = PORT_START
if vm_util.ShouldRunOnExternalIpAddress():
vm.AllowPort(PORT_START, PORT_END)
netserver_cmd = ('{netserver_path} -p {port_start}').format(
port_start=PORT_START,
netserver_path=netperf.NETSERVER_PATH)
vm.RemoteCommand(netserver_cmd)
remote_path = netperf.NETPERF_EXAMPLE_DIR + REMOTE_SCRIPT
vm.RemoteCommand('chmod +x %s' % (remote_path))
def Prepare(benchmark_spec):
"""Install netperf on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm_util.RunThreaded(PrepareNetperfAggregate, vms)
def ParseNetperfAggregateOutput(stdout):
"""Parses the stdout of a single netperf process.
Args:
stdout: the stdout of the netperf process
metadata: metadata for any sample.Sample objects we create
Returns:
A tuple containing (throughput_sample, latency_samples, latency_histogram)
"""
# Don't modify the metadata dict that was passed in
logging.info("Parsing netperf aggregate output")
metadata = {}
aggregate_samples = []
for line in stdout.splitlines():
match = re.search('peak interval', line)
if match:
line_split = line.split()
metric = line_split[0] + ' ' + line_split[6]
value = float(line_split[5])
unit = line_split[6]
aggregate_samples.append(sample.Sample(
metric, value, unit, metadata))
# Each Transaction consists of a send and a receive packet
# So Packets per second is Trans/s * 2
if "Trans/s" in metric:
metric = metric.split()[0] + " Packets/s"
value = value * 2
unit = "Packets/s"
aggregate_samples.append(sample.Sample(
metric, value, unit, metadata))
return aggregate_samples
def RunNetperfAggregate(vm, server_ips):
"""Spawns netperf on a remote VM, parses results.
Args:
vm: The VM that the netperf TCP_RR benchmark will be run upon.
benchmark_name: The netperf benchmark to run, see the documentation.
server_ip: A machine that is running netserver.
num_streams: The number of netperf client threads to run.
Returns:
A sample.Sample object with the result.
"""
# setup remote hosts file
vm.RemoteCommand("cd %s && rm remote_hosts"
% (netperf.NETPERF_EXAMPLE_DIR))
ip_num = 0
for ip in server_ips:
vm.RemoteCommand("cd %s && echo 'REMOTE_HOSTS[%d]=%s' >> remote_hosts"
% (netperf.NETPERF_EXAMPLE_DIR, ip_num, ip))
ip_num += 1
vm.RemoteCommand("cd %s && echo 'NUM_REMOTE_HOSTS=%d' >> remote_hosts"
% (netperf.NETPERF_EXAMPLE_DIR, len(server_ips)))
vm.RemoteCommand('cd %s && export PATH=$PATH:.'
% (netperf.NETPERF_EXAMPLE_DIR))
# allow script to be executed and run script
stdout, stderr = vm.RemoteCommand("cd %s && export PATH=$PATH:. && chmod "
"+x runemomniaggdemo.sh && "
"./runemomniaggdemo.sh"
% (netperf.NETPERF_EXAMPLE_DIR),
ignore_failure=True, should_log=True,
login_shell=False, timeout=1200)
# print out netperf_tps.log to log
stdout_1, stderr_1 = vm.RemoteCommand("cd %s && cat netperf_tps.log" %
(netperf.NETPERF_EXAMPLE_DIR),
ignore_failure=True, should_log=True,
login_shell=False, timeout=1200)
logging.info(stdout_1)
logging.info(stderr_1)
# do post processing step
proc_stdout, proc_stderr = vm.RemoteCommand("cd %s && ./post_proc.py "
"--intervals netperf_tps.log"
% (netperf.NETPERF_EXAMPLE_DIR),
ignore_failure=True)
samples = ParseNetperfAggregateOutput(proc_stdout)
return samples
def Run(benchmark_spec):
"""Run netperf TCP_RR on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
# set client and server vms
vm_dict = benchmark_spec.vm_groups
client_vms = vm_dict['client']
server_vms = vm_dict['servers']
client_vm = client_vms[0]
results = []
if vm_util.ShouldRunOnExternalIpAddress():
server_ips = list((vm.ip_address for vm in server_vms))
external_ip_results = RunNetperfAggregate(client_vm, server_ips)
for external_ip_result in external_ip_results:
external_ip_result.metadata['ip_type'] = 'external'
results.extend(external_ip_results)
# check if all server vms internal ips are reachable
runInternal = True
for tmp_vm in server_vms:
if not vm_util.ShouldRunOnInternalIpAddress(client_vm, tmp_vm):
runInternal = False
break
if runInternal:
server_ips = list((vm.internal_ip for vm in server_vms))
internal_ip_results = RunNetperfAggregate(client_vm, server_ips)
for internal_ip_result in internal_ip_results:
internal_ip_result.metadata['ip_type'] = 'internal'
results.extend(internal_ip_results)
return results
def Cleanup(benchmark_spec):
"""Cleanup netperf on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
for vm in vms:
vms.RemoteCommand('sudo killall netserver')
|
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import RainMachineEntity
from .const import (
DATA_CLIENT,
DATA_PROVISION_SETTINGS,
DATA_RESTRICTIONS_CURRENT,
DATA_RESTRICTIONS_UNIVERSAL,
DOMAIN as RAINMACHINE_DOMAIN,
SENSOR_UPDATE_TOPIC,
)
TYPE_FLOW_SENSOR = "flow_sensor"
TYPE_FREEZE = "freeze"
TYPE_FREEZE_PROTECTION = "freeze_protection"
TYPE_HOT_DAYS = "extra_water_on_hot_days"
TYPE_HOURLY = "hourly"
TYPE_MONTH = "month"
TYPE_RAINDELAY = "raindelay"
TYPE_RAINSENSOR = "rainsensor"
TYPE_WEEKDAY = "weekday"
BINARY_SENSORS = {
TYPE_FLOW_SENSOR: ("Flow Sensor", "mdi:water-pump", True, DATA_PROVISION_SETTINGS),
TYPE_FREEZE: ("Freeze Restrictions", "mdi:cancel", True, DATA_RESTRICTIONS_CURRENT),
TYPE_FREEZE_PROTECTION: (
"Freeze Protection",
"mdi:weather-snowy",
True,
DATA_RESTRICTIONS_UNIVERSAL,
),
TYPE_HOT_DAYS: (
"Extra Water on Hot Days",
"mdi:thermometer-lines",
True,
DATA_RESTRICTIONS_UNIVERSAL,
),
TYPE_HOURLY: (
"Hourly Restrictions",
"mdi:cancel",
False,
DATA_RESTRICTIONS_CURRENT,
),
TYPE_MONTH: ("Month Restrictions", "mdi:cancel", False, DATA_RESTRICTIONS_CURRENT),
TYPE_RAINDELAY: (
"Rain Delay Restrictions",
"mdi:cancel",
False,
DATA_RESTRICTIONS_CURRENT,
),
TYPE_RAINSENSOR: (
"Rain Sensor Restrictions",
"mdi:cancel",
False,
DATA_RESTRICTIONS_CURRENT,
),
TYPE_WEEKDAY: (
"Weekday Restrictions",
"mdi:cancel",
False,
DATA_RESTRICTIONS_CURRENT,
),
}
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up RainMachine binary sensors based on a config entry."""
rainmachine = hass.data[RAINMACHINE_DOMAIN][DATA_CLIENT][entry.entry_id]
async_add_entities(
[
RainMachineBinarySensor(
rainmachine, sensor_type, name, icon, enabled_by_default, api_category
)
for (
sensor_type,
(name, icon, enabled_by_default, api_category),
) in BINARY_SENSORS.items()
],
)
class RainMachineBinarySensor(RainMachineEntity, BinarySensorEntity):
"""A sensor implementation for raincloud device."""
def __init__(
self, rainmachine, sensor_type, name, icon, enabled_by_default, api_category
):
"""Initialize the sensor."""
super().__init__(rainmachine)
self._api_category = api_category
self._enabled_by_default = enabled_by_default
self._icon = icon
self._name = name
self._sensor_type = sensor_type
self._state = None
@property
def entity_registry_enabled_default(self):
"""Determine whether an entity is enabled by default."""
return self._enabled_by_default
@property
def icon(self) -> str:
"""Return the icon."""
return self._icon
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return "{}_{}".format(
self.rainmachine.device_mac.replace(":", ""), self._sensor_type
)
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(self.hass, SENSOR_UPDATE_TOPIC, self._update_state)
)
await self.rainmachine.async_register_sensor_api_interest(self._api_category)
self.update_from_latest_data()
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listeners and deregister API interest."""
super().async_will_remove_from_hass()
self.rainmachine.async_deregister_sensor_api_interest(self._api_category)
@callback
def update_from_latest_data(self):
"""Update the state."""
if self._sensor_type == TYPE_FLOW_SENSOR:
self._state = self.rainmachine.data[DATA_PROVISION_SETTINGS]["system"].get(
"useFlowSensor"
)
elif self._sensor_type == TYPE_FREEZE:
self._state = self.rainmachine.data[DATA_RESTRICTIONS_CURRENT]["freeze"]
elif self._sensor_type == TYPE_FREEZE_PROTECTION:
self._state = self.rainmachine.data[DATA_RESTRICTIONS_UNIVERSAL][
"freezeProtectEnabled"
]
elif self._sensor_type == TYPE_HOT_DAYS:
self._state = self.rainmachine.data[DATA_RESTRICTIONS_UNIVERSAL][
"hotDaysExtraWatering"
]
elif self._sensor_type == TYPE_HOURLY:
self._state = self.rainmachine.data[DATA_RESTRICTIONS_CURRENT]["hourly"]
elif self._sensor_type == TYPE_MONTH:
self._state = self.rainmachine.data[DATA_RESTRICTIONS_CURRENT]["month"]
elif self._sensor_type == TYPE_RAINDELAY:
self._state = self.rainmachine.data[DATA_RESTRICTIONS_CURRENT]["rainDelay"]
elif self._sensor_type == TYPE_RAINSENSOR:
self._state = self.rainmachine.data[DATA_RESTRICTIONS_CURRENT]["rainSensor"]
elif self._sensor_type == TYPE_WEEKDAY:
self._state = self.rainmachine.data[DATA_RESTRICTIONS_CURRENT]["weekDay"]
|
from typing import Dict, List
import voluptuous as vol
from homeassistant.components.device_automation import toggle_entity
from homeassistant.const import CONF_DOMAIN
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.condition import ConditionCheckerType
from homeassistant.helpers.typing import ConfigType
from . import DOMAIN
CONDITION_SCHEMA = toggle_entity.CONDITION_SCHEMA.extend(
{vol.Required(CONF_DOMAIN): DOMAIN}
)
@callback
def async_condition_from_config(
config: ConfigType, config_validation: bool
) -> ConditionCheckerType:
"""Evaluate state based on configuration."""
if config_validation:
config = CONDITION_SCHEMA(config)
return toggle_entity.async_condition_from_config(config)
async def async_get_conditions(
hass: HomeAssistant, device_id: str
) -> List[Dict[str, str]]:
"""List device conditions."""
return await toggle_entity.async_get_conditions(hass, device_id, DOMAIN)
async def async_get_condition_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List condition capabilities."""
return await toggle_entity.async_get_condition_capabilities(hass, config)
|
import pytest
pytest.importorskip('PyQt5.QtWebEngineWidgets')
from PyQt5.QtWebEngineCore import QWebEngineUrlRequestInfo
from qutebrowser.browser.webengine import interceptor
def test_no_missing_resource_types():
request_interceptor = interceptor.RequestInterceptor()
qb_keys = request_interceptor._resource_types.keys()
qt_keys = {i for i in vars(QWebEngineUrlRequestInfo).values()
if isinstance(i, QWebEngineUrlRequestInfo.ResourceType)}
assert qt_keys == qb_keys
def test_resource_type_values():
request_interceptor = interceptor.RequestInterceptor()
for qt_value, qb_item in request_interceptor._resource_types.items():
assert qt_value == qb_item.value
|
from collections import Counter
from unittest import TestCase
from scattertext import whitespace_nlp_with_sentences
from scattertext.features.UseFullDocAsMetadata import UseFullDocAsMetadata
class TestUseFullDocAsMetadata(TestCase):
def test_get_feats(self):
doc = whitespace_nlp_with_sentences("A a bb cc.")
term_freq = UseFullDocAsMetadata().get_doc_metadata(doc)
self.assertEqual(Counter({"A a bb cc.": 1}), term_freq)
|
from __future__ import print_function
import sys
from io import StringIO
buffers = [StringIO]
if sys.version_info < (3, 0):
from cStringIO import StringIO as cStringIO
from StringIO import StringIO as pStringIO
buffers += [cStringIO, pStringIO]
from logilab.common.ureports.nodes import *
class WriterTC:
def _test_output(self, test_id, layout, msg=None):
for buffercls in buffers:
buffer = buffercls()
self.writer.format(layout, buffer)
got = buffer.getvalue()
expected = getattr(self, test_id)
try:
self.assertMultiLineEqual(got, expected)
except:
print('**** using a %s' % buffer.__class__)
print('**** got for %s' % test_id)
print(got)
print('**** while expected')
print(expected)
print('****')
raise
def test_section(self):
layout = Section('Section title',
'Section\'s description.\nBlabla bla')
self._test_output('section_base', layout)
layout.append(Section('Subsection', 'Sub section description'))
self._test_output('section_nested', layout)
def test_verbatim(self):
layout = VerbatimText('blablabla')
self._test_output('verbatim_base', layout)
def test_list(self):
layout = List(children=('item1', 'item2', 'item3', 'item4'))
self._test_output('list_base', layout)
def test_nested_list(self):
layout = List(children=(Paragraph(("blabla", List(children=('1', "2", "3")))),
"an other point"))
self._test_output('nested_list', layout)
def test_table(self):
layout = Table(cols=2, children=('head1', 'head2', 'cell1', 'cell2'))
self._test_output('table_base', layout)
def test_field_table(self):
table = Table(cols=2, klass='field', id='mytable')
for field, value in (('f1', 'v1'), ('f22', 'v22'), ('f333', 'v333')):
table.append(Text(field))
table.append(Text(value))
self._test_output('field_table', table)
def test_advanced_table(self):
table = Table(cols=2, klass='whatever', id='mytable', rheaders=1)
for field, value in (('field', 'value'), ('f1', 'v1'), ('f22', 'v22'), ('f333', 'v333')):
table.append(Text(field))
table.append(Text(value))
table.append(Link('http://www.perdu.com', 'toi perdu ?'))
table.append(Text(''))
self._test_output('advanced_table', table)
## def test_image(self):
## layout = Verbatim('blablabla')
## self._test_output('verbatim_base', layout)
|
import asyncio
from datetime import datetime, timedelta
import logging
from typing import Optional
import aiohttp
import voluptuous as vol
from homeassistant.components.camera import PLATFORM_SCHEMA, Camera
from homeassistant.const import CONF_NAME
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.util import dt as dt_util
CONF_DIMENSION = "dimension"
CONF_DELTA = "delta"
CONF_COUNTRY = "country_code"
_LOGGER = logging.getLogger(__name__)
# Maximum range according to docs
DIM_RANGE = vol.All(vol.Coerce(int), vol.Range(min=120, max=700))
# Multiple choice for available Radar Map URL
SUPPORTED_COUNTRY_CODES = ["NL", "BE"]
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DIMENSION, default=512): DIM_RANGE,
vol.Optional(CONF_DELTA, default=600.0): cv.positive_float,
vol.Optional(CONF_NAME, default="Buienradar loop"): cv.string,
vol.Optional(CONF_COUNTRY, default="NL"): vol.All(
vol.Coerce(str), vol.In(SUPPORTED_COUNTRY_CODES)
),
}
)
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up buienradar radar-loop camera component."""
dimension = config[CONF_DIMENSION]
delta = config[CONF_DELTA]
name = config[CONF_NAME]
country = config[CONF_COUNTRY]
async_add_entities([BuienradarCam(name, dimension, delta, country)])
class BuienradarCam(Camera):
"""
A camera component producing animated buienradar radar-imagery GIFs.
Rain radar imagery camera based on image URL taken from [0].
[0]: https://www.buienradar.nl/overbuienradar/gratis-weerdata
"""
def __init__(self, name: str, dimension: int, delta: float, country: str):
"""
Initialize the component.
This constructor must be run in the event loop.
"""
super().__init__()
self._name = name
# dimension (x and y) of returned radar image
self._dimension = dimension
# time a cached image stays valid for
self._delta = delta
# country location
self._country = country
# Condition that guards the loading indicator.
#
# Ensures that only one reader can cause an http request at the same
# time, and that all readers are notified after this request completes.
#
# invariant: this condition is private to and owned by this instance.
self._condition = asyncio.Condition()
self._last_image: Optional[bytes] = None
# value of the last seen last modified header
self._last_modified: Optional[str] = None
# loading status
self._loading = False
# deadline for image refresh - self.delta after last successful load
self._deadline: Optional[datetime] = None
self._unique_id = f"{self._dimension}_{self._country}"
@property
def name(self) -> str:
"""Return the component name."""
return self._name
def __needs_refresh(self) -> bool:
if not (self._delta and self._deadline and self._last_image):
return True
return dt_util.utcnow() > self._deadline
async def __retrieve_radar_image(self) -> bool:
"""Retrieve new radar image and return whether this succeeded."""
session = async_get_clientsession(self.hass)
url = (
f"https://api.buienradar.nl/image/1.0/RadarMap{self._country}"
f"?w={self._dimension}&h={self._dimension}"
)
if self._last_modified:
headers = {"If-Modified-Since": self._last_modified}
else:
headers = {}
try:
async with session.get(url, timeout=5, headers=headers) as res:
res.raise_for_status()
if res.status == 304:
_LOGGER.debug("HTTP 304 - success")
return True
last_modified = res.headers.get("Last-Modified")
if last_modified:
self._last_modified = last_modified
self._last_image = await res.read()
_LOGGER.debug("HTTP 200 - Last-Modified: %s", last_modified)
return True
except (asyncio.TimeoutError, aiohttp.ClientError) as err:
_LOGGER.error("Failed to fetch image, %s", type(err))
return False
async def async_camera_image(self) -> Optional[bytes]:
"""
Return a still image response from the camera.
Uses ayncio conditions to make sure only one task enters the critical
section at the same time. Otherwise, two http requests would start
when two tabs with Home Assistant are open.
The condition is entered in two sections because otherwise the lock
would be held while doing the http request.
A boolean (_loading) is used to indicate the loading status instead of
_last_image since that is initialized to None.
For reference:
* :func:`asyncio.Condition.wait` releases the lock and acquires it
again before continuing.
* :func:`asyncio.Condition.notify_all` requires the lock to be held.
"""
if not self.__needs_refresh():
return self._last_image
# get lock, check iff loading, await notification if loading
async with self._condition:
# can not be tested - mocked http response returns immediately
if self._loading:
_LOGGER.debug("already loading - waiting for notification")
await self._condition.wait()
return self._last_image
# Set loading status **while holding lock**, makes other tasks wait
self._loading = True
try:
now = dt_util.utcnow()
was_updated = await self.__retrieve_radar_image()
# was updated? Set new deadline relative to now before loading
if was_updated:
self._deadline = now + timedelta(seconds=self._delta)
return self._last_image
finally:
# get lock, unset loading status, notify all waiting tasks
async with self._condition:
self._loading = False
self._condition.notify_all()
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
|
from datetime import timedelta
import logging
import threading
import time
import voluptuous as vol
from waterfurnace.waterfurnace import WaterFurnace, WFCredentialError, WFException
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers import config_validation as cv, discovery
_LOGGER = logging.getLogger(__name__)
DOMAIN = "waterfurnace"
UPDATE_TOPIC = f"{DOMAIN}_update"
SCAN_INTERVAL = timedelta(seconds=10)
ERROR_INTERVAL = timedelta(seconds=300)
MAX_FAILS = 10
NOTIFICATION_ID = "waterfurnace_website_notification"
NOTIFICATION_TITLE = "WaterFurnace website status"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, base_config):
"""Set up waterfurnace platform."""
config = base_config.get(DOMAIN)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
wfconn = WaterFurnace(username, password)
# NOTE(sdague): login will throw an exception if this doesn't
# work, which will abort the setup.
try:
wfconn.login()
except WFCredentialError:
_LOGGER.error("Invalid credentials for waterfurnace login")
return False
hass.data[DOMAIN] = WaterFurnaceData(hass, wfconn)
hass.data[DOMAIN].start()
discovery.load_platform(hass, "sensor", DOMAIN, {}, config)
return True
class WaterFurnaceData(threading.Thread):
"""WaterFurnace Data collector.
This is implemented as a dedicated thread polling a websocket in a
tight loop. The websocket will shut itself from the server side if
a packet is not sent at least every 30 seconds. The reading is
cheap, the login is less cheap, so keeping this open and polling
on a very regular cadence is actually the least io intensive thing
to do.
"""
def __init__(self, hass, client):
"""Initialize the data object."""
super().__init__()
self.hass = hass
self.client = client
self.unit = self.client.gwid
self.data = None
self._shutdown = False
self._fails = 0
def _reconnect(self):
"""Reconnect on a failure."""
self._fails += 1
if self._fails > MAX_FAILS:
_LOGGER.error("Failed to refresh login credentials. Thread stopped")
self.hass.components.persistent_notification.create(
"Error:<br/>Connection to waterfurnace website failed "
"the maximum number of times. Thread has stopped",
title=NOTIFICATION_TITLE,
notification_id=NOTIFICATION_ID,
)
self._shutdown = True
return
# sleep first before the reconnect attempt
_LOGGER.debug("Sleeping for fail # %s", self._fails)
time.sleep(self._fails * ERROR_INTERVAL.seconds)
try:
self.client.login()
self.data = self.client.read()
except WFException:
_LOGGER.exception("Failed to reconnect attempt %s", self._fails)
else:
_LOGGER.debug("Reconnected to furnace")
self._fails = 0
def run(self):
"""Thread run loop."""
@callback
def register():
"""Connect to hass for shutdown."""
def shutdown(event):
"""Shutdown the thread."""
_LOGGER.debug("Signaled to shutdown")
self._shutdown = True
self.join()
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
self.hass.add_job(register)
# This does a tight loop in sending read calls to the
# websocket. That's a blocking call, which returns pretty
# quickly (1 second). It's important that we do this
# frequently though, because if we don't call the websocket at
# least every 30 seconds the server side closes the
# connection.
while True:
if self._shutdown:
_LOGGER.debug("Graceful shutdown")
return
try:
self.data = self.client.read()
except WFException:
# WFExceptions are things the WF library understands
# that pretty much can all be solved by logging in and
# back out again.
_LOGGER.exception("Failed to read data, attempting to recover")
self._reconnect()
else:
self.hass.helpers.dispatcher.dispatcher_send(UPDATE_TOPIC)
time.sleep(SCAN_INTERVAL.seconds)
|
from __future__ import print_function
import binascii
import io
import os
import colorama
import pytest
import sh
from molecule import util
colorama.init(autoreset=True)
def test_print_debug(capsys):
util.print_debug('test_title', 'test_data')
result, _ = capsys.readouterr()
title = [
colorama.Back.WHITE, colorama.Style.BRIGHT, colorama.Fore.BLACK,
'DEBUG: test_title', colorama.Fore.RESET, colorama.Back.RESET,
colorama.Style.RESET_ALL
]
print(''.join(title))
data = [
colorama.Fore.BLACK, colorama.Style.BRIGHT, 'test_data',
colorama.Style.RESET_ALL, colorama.Fore.RESET
]
print(''.join(data))
x, _ = capsys.readouterr()
assert x == result
def test_print_environment_vars(capsys):
env = {
'ANSIBLE_FOO': 'foo',
'ANSIBLE_BAR': 'bar',
'ANSIBLE': None,
'MOLECULE_FOO': 'foo',
'MOLECULE_BAR': 'bar',
'MOLECULE': None
}
util.print_environment_vars(env)
result, _ = capsys.readouterr()
# Ansible Environment
title = [
colorama.Back.WHITE, colorama.Style.BRIGHT, colorama.Fore.BLACK,
'DEBUG: ANSIBLE ENVIRONMENT', colorama.Fore.RESET, colorama.Back.RESET,
colorama.Style.RESET_ALL
]
print(''.join(title))
data = [
colorama.Fore.BLACK, colorama.Style.BRIGHT,
util.safe_dump({
'ANSIBLE_FOO': 'foo',
'ANSIBLE_BAR': 'bar'
}), colorama.Style.RESET_ALL, colorama.Fore.RESET
]
print(''.join(data))
# Molecule Environment
title = [
colorama.Back.WHITE, colorama.Style.BRIGHT, colorama.Fore.BLACK,
'DEBUG: MOLECULE ENVIRONMENT', colorama.Fore.RESET,
colorama.Back.RESET, colorama.Style.RESET_ALL
]
print(''.join(title))
data = [
colorama.Fore.BLACK, colorama.Style.BRIGHT,
util.safe_dump({
'MOLECULE_FOO': 'foo',
'MOLECULE_BAR': 'bar'
}), colorama.Style.RESET_ALL, colorama.Fore.RESET
]
print(''.join(data))
# Shell Replay
title = [
colorama.Back.WHITE, colorama.Style.BRIGHT, colorama.Fore.BLACK,
'DEBUG: SHELL REPLAY', colorama.Fore.RESET, colorama.Back.RESET,
colorama.Style.RESET_ALL
]
print(''.join(title))
data = [
colorama.Fore.BLACK, colorama.Style.BRIGHT,
'ANSIBLE_BAR=bar ANSIBLE_FOO=foo MOLECULE_BAR=bar MOLECULE_FOO=foo',
colorama.Style.RESET_ALL, colorama.Fore.RESET
]
print(''.join(data))
print()
x, _ = capsys.readouterr()
assert x == result
def test_sysexit():
with pytest.raises(SystemExit) as e:
util.sysexit()
assert 1 == e.value.code
def test_sysexit_with_custom_code():
with pytest.raises(SystemExit) as e:
util.sysexit(2)
assert 2 == e.value.code
def test_sysexit_with_message(patched_logger_critical):
with pytest.raises(SystemExit) as e:
util.sysexit_with_message('foo')
assert 1 == e.value.code
patched_logger_critical.assert_called_once_with('foo')
def test_sysexit_with_message_and_custom_code(patched_logger_critical):
with pytest.raises(SystemExit) as e:
util.sysexit_with_message('foo', 2)
assert 2 == e.value.code
patched_logger_critical.assert_called_once_with('foo')
def test_run_command():
cmd = sh.ls.bake()
x = util.run_command(cmd)
assert 0 == x.exit_code
def test_run_command_with_debug(mocker, patched_print_debug):
cmd = sh.ls.bake(_env={'ANSIBLE_FOO': 'foo', 'MOLECULE_BAR': 'bar'})
util.run_command(cmd, debug=True)
x = [
mocker.call('ANSIBLE ENVIRONMENT', '---\nANSIBLE_FOO: foo\n'),
mocker.call('MOLECULE ENVIRONMENT', '---\nMOLECULE_BAR: bar\n'),
mocker.call('SHELL REPLAY', 'ANSIBLE_FOO=foo MOLECULE_BAR=bar'),
mocker.call('COMMAND', sh.which('ls'))
]
assert x == patched_print_debug.mock_calls
def test_run_command_with_debug_handles_no_env(mocker, patched_print_debug):
cmd = sh.ls.bake()
util.run_command(cmd, debug=True)
x = [
mocker.call('ANSIBLE ENVIRONMENT', '--- {}\n'),
mocker.call('MOLECULE ENVIRONMENT', '--- {}\n'),
mocker.call('SHELL REPLAY', ''),
mocker.call('COMMAND', sh.which('ls'))
]
assert x == patched_print_debug.mock_calls
def test_os_walk(temp_dir):
scenarios = ['scenario1', 'scenario2', 'scenario3']
molecule_directory = pytest.helpers.molecule_directory()
for scenario in scenarios:
scenario_directory = os.path.join(molecule_directory, scenario)
molecule_file = pytest.helpers.get_molecule_file(scenario_directory)
os.makedirs(scenario_directory)
util.write_file(molecule_file, '')
result = [f for f in util.os_walk(molecule_directory, 'molecule.yml')]
assert 3 == len(result)
def test_render_template():
template = "{{ foo }} = {{ bar }}"
"foo = bar" == util.render_template(template, foo='foo', bar='bar')
def test_write_file(temp_dir):
dest_file = os.path.join(temp_dir.strpath, 'test_util_write_file.tmp')
contents = binascii.b2a_hex(os.urandom(15)).decode('utf-8')
util.write_file(dest_file, contents)
with util.open_file(dest_file) as stream:
data = stream.read()
x = '# Molecule managed\n\n{}'.format(contents)
assert x == data
def molecule_prepender(content):
x = '# Molecule managed\n\nfoo bar'
assert x == util.file_prepender('foo bar')
def test_safe_dump():
x = """
---
foo: bar
""".lstrip()
assert x == util.safe_dump({'foo': 'bar'})
def test_safe_dump_with_increase_indent():
data = {
'foo': [{
'foo': 'bar',
'baz': 'zzyzx',
}],
}
x = """
---
foo:
- baz: zzyzx
foo: bar
""".lstrip()
assert x == util.safe_dump(data)
def test_safe_load():
assert {'foo': 'bar'} == util.safe_load('foo: bar')
def test_safe_load_returns_empty_dict_on_empty_string():
assert {} == util.safe_load('')
def test_safe_load_exits_when_cannot_parse():
data = """
---
%foo:
""".strip()
with pytest.raises(SystemExit) as e:
util.safe_load(data)
assert 1 == e.value.code
def test_safe_load_file(temp_dir):
path = os.path.join(temp_dir.strpath, 'foo')
util.write_file(path, 'foo: bar')
assert {'foo': 'bar'} == util.safe_load_file(path)
def test_open_file(temp_dir):
path = os.path.join(temp_dir.strpath, 'foo')
util.write_file(path, 'foo: bar')
with util.open_file(path) as stream:
try:
file_types = (file, io.IOBase)
except NameError:
file_types = io.IOBase
assert isinstance(stream, file_types)
def test_instance_with_scenario_name():
assert 'foo-bar' == util.instance_with_scenario_name('foo', 'bar')
def test_strip_ansi_escape():
string = 'ls\r\n\x1b[00m\x1b[01;31mfoo\x1b[00m\r\n\x1b[01;31m'
assert 'ls\r\nfoo\r\n' == util.strip_ansi_escape(string)
def test_strip_ansi_color():
s = 'foo\x1b[0m\x1b[0m\x1b[0m\n\x1b[0m\x1b[0m\x1b[0m\x1b[0m\x1b[0m'
assert 'foo\n' == util.strip_ansi_color(s)
def test_verbose_flag():
options = {'verbose': True, 'v': True}
assert ['-v'] == util.verbose_flag(options)
assert {} == options
def test_verbose_flag_extra_verbose():
options = {'verbose': True, 'vvv': True}
assert ['-vvv'] == util.verbose_flag(options)
assert {} == options
def test_verbose_flag_preserves_verbose_option():
options = {'verbose': True}
assert [] == util.verbose_flag(options)
assert {'verbose': True} == options
def test_filter_verbose_permutation():
options = {
'v': True,
'vv': True,
'vvv': True,
'vfoo': True,
'foo': True,
'bar': True,
}
x = {
'vfoo': True,
'foo': True,
'bar': True,
}
assert x == util.filter_verbose_permutation(options)
def test_title():
assert 'Foo' == util.title('foo')
assert 'Foo Bar' == util.title('foo_bar')
def test_abs_path(temp_dir):
x = os.path.abspath(
os.path.join(os.getcwd(), os.path.pardir, 'foo', 'bar'))
assert x == util.abs_path(os.path.join(os.path.pardir, 'foo', 'bar'))
def test_abs_path_with_none_path():
assert util.abs_path(None) is None
def test_camelize():
assert 'Foo' == util.camelize('foo')
assert 'FooBar' == util.camelize('foo_bar')
assert 'FooBarBaz' == util.camelize('foo_bar_baz')
def test_underscore():
assert 'foo' == util.underscore('Foo')
assert 'foo_bar' == util.underscore('FooBar')
assert 'foo_bar_baz' == util.underscore('FooBarBaz')
def test_merge_dicts():
# example taken from python-anyconfig/anyconfig/__init__.py
a = {'b': [{'c': 0}, {'c': 2}], 'd': {'e': 'aaa', 'f': 3}}
b = {'a': 1, 'b': [{'c': 3}], 'd': {'e': 'bbb'}}
x = {'a': 1, 'b': [{'c': 3}], 'd': {'e': "bbb", 'f': 3}}
assert x == util.merge_dicts(a, b)
|
import asyncio
import logging
from aioflo import async_get_api
from aioflo.errors import RequestError
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import CLIENT, DOMAIN
from .device import FloDeviceDataUpdateCoordinator
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["binary_sensor", "sensor", "switch"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the flo component."""
hass.data[DOMAIN] = {}
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up flo from a config entry."""
session = async_get_clientsession(hass)
hass.data[DOMAIN][entry.entry_id] = {}
try:
hass.data[DOMAIN][entry.entry_id][CLIENT] = client = await async_get_api(
entry.data[CONF_USERNAME], entry.data[CONF_PASSWORD], session=session
)
except RequestError as err:
raise ConfigEntryNotReady from err
user_info = await client.user.get_info(include_location_info=True)
_LOGGER.debug("Flo user information with locations: %s", user_info)
hass.data[DOMAIN][entry.entry_id]["devices"] = devices = [
FloDeviceDataUpdateCoordinator(hass, client, location["id"], device["id"])
for location in user_info["locations"]
for device in location["devices"]
]
tasks = [device.async_refresh() for device in devices]
await asyncio.gather(*tasks)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
from ..core import compat
from ..core import driver
from ..core import exceptions
class Storage(driver.Base):
_storage = {}
def __init__(self, path=None, config=None):
self.supports_bytes_range = True
def exists(self, path):
return path in self._storage
def get_size(self, path):
if path not in self._storage:
raise exceptions.FileNotFoundError('%s is not there' % path)
return len(self._storage[path])
def get_content(self, path):
if path not in self._storage:
raise exceptions.FileNotFoundError('%s is not there' % path)
return self._storage[path]
def put_content(self, path, content):
self._storage[path] = content
def remove(self, path):
# Straight key, delete
if path in self._storage:
del self._storage[path]
return
# Directory like, get the list
ls = []
for k in self._storage.keys():
if (not k == path) and k.startswith(path):
ls.append(k)
if not len(ls):
raise exceptions.FileNotFoundError('%s is not there' % path)
for item in ls:
self.remove(item)
def stream_read(self, path, bytes_range=None):
if path not in self._storage:
raise exceptions.FileNotFoundError('%s is not there' % path)
f = self._storage[path]
nb_bytes = 0
total_size = 0
if bytes_range:
f.seek(bytes_range[0])
total_size = bytes_range[1] - bytes_range[0] + 1
else:
f.seek(0)
while True:
buf = None
if bytes_range:
# Bytes Range is enabled
buf_size = self.buffer_size
if nb_bytes + buf_size > total_size:
# We make sure we don't read out of the range
buf_size = total_size - nb_bytes
if buf_size > 0:
buf = f.read(buf_size)
nb_bytes += len(buf)
else:
# We're at the end of the range
buf = ''
else:
buf = f.read(self.buffer_size)
if not buf:
break
yield buf
def stream_write(self, path, fp):
# Size is mandatory
if path not in self._storage:
self._storage[path] = compat.StringIO()
f = self._storage[path]
try:
while True:
buf = fp.read(self.buffer_size)
if not buf:
break
f.write(buf)
except IOError:
pass
def list_directory(self, path=None):
# if path not in self._storage:
# raise exceptions.FileNotFoundError('%s is not there' % path)
ls = []
for k in self._storage.keys():
if (not k == path) and k.startswith(path or ''):
prefix = '/'
if not k.startswith('/'):
prefix = ''
ls.append(prefix + "/".join(k.lstrip("/").split("/")[0:2]))
if not len(ls):
raise exceptions.FileNotFoundError('%s is not there' % path)
return ls
|
import importlib.machinery
import discord
from redbot.core.utils.chat_formatting import humanize_number
from .i18n import Translator
_ = Translator(__name__, __file__)
class RedError(Exception):
"""Base error class for Red-related errors."""
class PackageAlreadyLoaded(RedError):
"""Raised when trying to load an already-loaded package."""
def __init__(self, spec: importlib.machinery.ModuleSpec, *args, **kwargs):
super().__init__(*args, **kwargs)
self.spec: importlib.machinery.ModuleSpec = spec
def __str__(self) -> str:
return f"There is already a package named {self.spec.name.split('.')[-1]} loaded"
class CogLoadError(RedError):
"""Raised by a cog when it cannot load itself.
The message will be send to the user."""
pass
class BankError(RedError):
"""Base error class for bank-related errors."""
class BalanceTooHigh(BankError, OverflowError):
"""Raised when trying to set a user's balance to higher than the maximum."""
def __init__(
self, user: discord.abc.User, max_balance: int, currency_name: str, *args, **kwargs
):
super().__init__(*args, **kwargs)
self.user = user
self.max_balance = max_balance
self.currency_name = currency_name
def __str__(self) -> str:
return _("{user}'s balance cannot rise above {max} {currency}.").format(
user=self.user, max=humanize_number(self.max_balance), currency=self.currency_name
)
class BankPruneError(BankError):
"""Raised when trying to prune a local bank and no server is specified."""
class MissingExtraRequirements(RedError):
"""Raised when an extra requirement is missing but required."""
class ConfigError(RedError):
"""Error in a Config operation."""
class StoredTypeError(ConfigError, TypeError):
"""A TypeError pertaining to stored Config data.
This error may arise when, for example, trying to increment a value
which is not a number, or trying to toggle a value which is not a
boolean.
"""
class CannotSetSubfield(StoredTypeError):
"""Tried to set sub-field of an invalid data structure.
This would occur in the following example::
>>> import asyncio
>>> from redbot.core import Config
>>> config = Config.get_conf(None, 1234, cog_name="Example")
>>> async def example():
... await config.foo.set(True)
... await config.set_raw("foo", "bar", False) # Should raise here
...
>>> asyncio.run(example())
"""
|
import os
import unittest
import random
import shutil
import numpy as np
from scipy import sparse
from gensim.utils import is_corpus, mock_data
from gensim.corpora.sharded_corpus import ShardedCorpus
#############################################################################
class TestShardedCorpus(unittest.TestCase):
# @classmethod
# def setUpClass(cls):
# cls.dim = 1000
# cls.data = mock_data(dim=cls.dim)
#
# random_string = ''.join(random.choice('1234567890') for _ in range(8))
#
# cls.tmp_dir = 'test-temp-' + random_string
# os.makedirs(cls.tmp_dir)
#
# cls.tmp_fname = os.path.join(cls.tmp_dir,
# 'shcorp.' + random_string + '.tmp')
# @classmethod
# def tearDownClass(cls):
# shutil.rmtree(cls.tmp_dir)
def setUp(self):
self.dim = 1000
self.random_string = ''.join(random.choice('1234567890') for _ in range(8))
self.tmp_dir = 'test-temp-' + self.random_string
os.makedirs(self.tmp_dir)
self.tmp_fname = os.path.join(self.tmp_dir,
'shcorp.' + self.random_string + '.tmp')
self.data = mock_data(dim=1000)
self.corpus = ShardedCorpus(self.tmp_fname, self.data, dim=self.dim,
shardsize=100)
def tearDown(self):
shutil.rmtree(self.tmp_dir)
def test_init(self):
# Test that the shards were actually created during setUp
self.assertTrue(os.path.isfile(self.tmp_fname + '.1'))
def test_load(self):
# Test that the shards were actually created
self.assertTrue(os.path.isfile(self.tmp_fname + '.1'))
self.corpus.save()
loaded_corpus = ShardedCorpus.load(self.tmp_fname)
self.assertEqual(loaded_corpus.dim, self.corpus.dim)
self.assertEqual(loaded_corpus.n_shards, self.corpus.n_shards)
def test_getitem(self):
_ = self.corpus[130] # noqa:F841
# Does retrieving the item load the correct shard?
self.assertEqual(self.corpus.current_shard_n, 1)
item = self.corpus[220:227]
self.assertEqual((7, self.corpus.dim), item.shape)
self.assertEqual(self.corpus.current_shard_n, 2)
for i in range(220, 227):
self.assertTrue(np.array_equal(self.corpus[i], item[i - 220]))
def test_sparse_serialization(self):
no_exception = True
try:
ShardedCorpus(self.tmp_fname, self.data, shardsize=100, dim=self.dim, sparse_serialization=True)
except Exception:
no_exception = False
raise
finally:
self.assertTrue(no_exception)
def test_getitem_dense2dense(self):
corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, sparse_retrieval=False
)
item = corpus[3]
self.assertTrue(isinstance(item, np.ndarray))
self.assertEqual(item.shape, (corpus.dim,))
dslice = corpus[2:6]
self.assertTrue(isinstance(dslice, np.ndarray))
self.assertEqual(dslice.shape, (4, corpus.dim))
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(isinstance(ilist, np.ndarray))
self.assertEqual(ilist.shape, (4, corpus.dim))
self.assertEqual(ilist.all(), dslice.all())
def test_getitem_dense2sparse(self):
corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, sparse_retrieval=True
)
item = corpus[3]
self.assertTrue(isinstance(item, sparse.csr_matrix))
self.assertEqual(item.shape, (1, corpus.dim))
dslice = corpus[2:6]
self.assertTrue(isinstance(dslice, sparse.csr_matrix))
self.assertEqual(dslice.shape, (4, corpus.dim))
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(isinstance(ilist, sparse.csr_matrix))
self.assertEqual(ilist.shape, (4, corpus.dim))
self.assertEqual((ilist != dslice).getnnz(), 0)
def test_getitem_sparse2sparse(self):
sp_tmp_fname = self.tmp_fname + '.sparse'
corpus = ShardedCorpus(
sp_tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=True, sparse_retrieval=True
)
dense_corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, sparse_retrieval=True
)
item = corpus[3]
self.assertTrue(isinstance(item, sparse.csr_matrix))
self.assertEqual(item.shape, (1, corpus.dim))
dslice = corpus[2:6]
self.assertTrue(isinstance(dslice, sparse.csr_matrix))
self.assertEqual(dslice.shape, (4, corpus.dim))
expected_nnz = sum(len(self.data[i]) for i in range(2, 6))
self.assertEqual(dslice.getnnz(), expected_nnz)
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(isinstance(ilist, sparse.csr_matrix))
self.assertEqual(ilist.shape, (4, corpus.dim))
# Also compare with what the dense dataset is giving us
d_dslice = dense_corpus[2:6]
self.assertEqual((d_dslice != dslice).getnnz(), 0)
self.assertEqual((ilist != dslice).getnnz(), 0)
def test_getitem_sparse2dense(self):
sp_tmp_fname = self.tmp_fname + '.sparse'
corpus = ShardedCorpus(
sp_tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=True, sparse_retrieval=False
)
dense_corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, sparse_retrieval=False
)
item = corpus[3]
self.assertTrue(isinstance(item, np.ndarray))
self.assertEqual(item.shape, (1, corpus.dim))
dslice = corpus[2:6]
self.assertTrue(isinstance(dslice, np.ndarray))
self.assertEqual(dslice.shape, (4, corpus.dim))
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(isinstance(ilist, np.ndarray))
self.assertEqual(ilist.shape, (4, corpus.dim))
# Also compare with what the dense dataset is giving us
d_dslice = dense_corpus[2:6]
self.assertEqual(dslice.all(), d_dslice.all())
self.assertEqual(ilist.all(), dslice.all())
def test_getitem_dense2gensim(self):
corpus = ShardedCorpus(
self.tmp_fname, self.data, shardsize=100, dim=self.dim,
sparse_serialization=False, gensim=True
)
item = corpus[3]
self.assertTrue(isinstance(item, list))
self.assertTrue(isinstance(item[0], tuple))
dslice = corpus[2:6]
self.assertTrue(next(dslice) == corpus[2])
dslice = list(dslice)
self.assertTrue(isinstance(dslice, list))
self.assertTrue(isinstance(dslice[0], list))
self.assertTrue(isinstance(dslice[0][0], tuple))
iscorp, _ = is_corpus(dslice)
self.assertTrue(iscorp, "Is the object returned by slice notation a gensim corpus?")
ilist = corpus[[2, 3, 4, 5]]
self.assertTrue(next(ilist) == corpus[2])
ilist = list(ilist)
self.assertTrue(isinstance(ilist, list))
self.assertTrue(isinstance(ilist[0], list))
self.assertTrue(isinstance(ilist[0][0], tuple))
# From generators to lists
self.assertEqual(len(ilist), len(dslice))
for i in range(len(ilist)):
self.assertEqual(len(ilist[i]), len(dslice[i]),
"Row %d: dims %d/%d" % (i, len(ilist[i]),
len(dslice[i])))
for j in range(len(ilist[i])):
self.assertEqual(ilist[i][j], dslice[i][j],
"ilist[%d][%d] = %s ,dslice[%d][%d] = %s" % (
i, j, str(ilist[i][j]), i, j,
str(dslice[i][j])))
iscorp, _ = is_corpus(ilist)
self.assertTrue(iscorp, "Is the object returned by list notation a gensim corpus?")
def test_resize(self):
dataset = ShardedCorpus(self.tmp_fname, self.data, shardsize=100,
dim=self.dim)
self.assertEqual(10, dataset.n_shards)
dataset.resize_shards(250)
self.assertEqual(4, dataset.n_shards)
for n in range(dataset.n_shards):
fname = dataset._shard_name(n)
self.assertTrue(os.path.isfile(fname))
def test_init_with_generator(self):
def data_generator():
yield [(0, 1)]
yield [(1, 1)]
gen_tmp_fname = self.tmp_fname + '.generator'
corpus = ShardedCorpus(gen_tmp_fname, data_generator(), dim=2)
self.assertEqual(2, len(corpus))
self.assertEqual(1, corpus[0][0])
if __name__ == '__main__':
suite = unittest.TestSuite()
loader = unittest.TestLoader()
tests = loader.loadTestsFromTestCase(TestShardedCorpus)
suite.addTest(tests)
runner = unittest.TextTestRunner()
runner.run(suite)
|
import logging
from aiohttp.client_exceptions import ClientConnectorError
from async_timeout import timeout
from gios import ApiError, Gios, InvalidSensorsData, NoStationError
from homeassistant.core import Config, HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import CONF_STATION_ID, DOMAIN, SCAN_INTERVAL
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: Config) -> bool:
"""Set up configured GIOS."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up GIOS as config entry."""
station_id = config_entry.data[CONF_STATION_ID]
_LOGGER.debug("Using station_id: %s", station_id)
websession = async_get_clientsession(hass)
coordinator = GiosDataUpdateCoordinator(hass, websession, station_id)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][config_entry.entry_id] = coordinator
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, "air_quality")
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
hass.data[DOMAIN].pop(config_entry.entry_id)
await hass.config_entries.async_forward_entry_unload(config_entry, "air_quality")
return True
class GiosDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold GIOS data."""
def __init__(self, hass, session, station_id):
"""Class to manage fetching GIOS data API."""
self.gios = Gios(station_id, session)
super().__init__(hass, _LOGGER, name=DOMAIN, update_interval=SCAN_INTERVAL)
async def _async_update_data(self):
"""Update data via library."""
try:
with timeout(30):
await self.gios.update()
except (
ApiError,
NoStationError,
ClientConnectorError,
InvalidSensorsData,
) as error:
raise UpdateFailed(error) from error
return self.gios.data
|
import asyncio
from typing import Any, List, Optional
import zigpy.exceptions
import zigpy.zcl.clusters.general as general
from homeassistant.core import callback
from homeassistant.helpers.event import async_call_later
from .. import registries, typing as zha_typing
from ..const import (
REPORT_CONFIG_ASAP,
REPORT_CONFIG_BATTERY_SAVE,
REPORT_CONFIG_DEFAULT,
REPORT_CONFIG_IMMEDIATE,
SIGNAL_ATTR_UPDATED,
SIGNAL_MOVE_LEVEL,
SIGNAL_SET_LEVEL,
SIGNAL_STATE_ATTR,
SIGNAL_UPDATE_DEVICE,
)
from .base import ClientChannel, ZigbeeChannel, parse_and_log_command
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.Alarms.cluster_id)
class Alarms(ZigbeeChannel):
"""Alarms channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.AnalogInput.cluster_id)
class AnalogInput(ZigbeeChannel):
"""Analog Input channel."""
REPORT_CONFIG = [{"attr": "present_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.AnalogOutput.cluster_id)
class AnalogOutput(ZigbeeChannel):
"""Analog Output channel."""
REPORT_CONFIG = [{"attr": "present_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.AnalogValue.cluster_id)
class AnalogValue(ZigbeeChannel):
"""Analog Value channel."""
REPORT_CONFIG = [{"attr": "present_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.ApplianceControl.cluster_id)
class ApplianceContorl(ZigbeeChannel):
"""Appliance Control channel."""
@registries.CHANNEL_ONLY_CLUSTERS.register(general.Basic.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.Basic.cluster_id)
class BasicChannel(ZigbeeChannel):
"""Channel to interact with the basic cluster."""
UNKNOWN = 0
BATTERY = 3
POWER_SOURCES = {
UNKNOWN: "Unknown",
1: "Mains (single phase)",
2: "Mains (3 phase)",
BATTERY: "Battery",
4: "DC source",
5: "Emergency mains constantly powered",
6: "Emergency mains and transfer switch",
}
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Initialize BasicChannel."""
super().__init__(cluster, ch_pool)
self._power_source = None
async def async_configure(self):
"""Configure this channel."""
await super().async_configure()
await self.async_initialize(False)
async def async_initialize(self, from_cache):
"""Initialize channel."""
if not self._ch_pool.skip_configuration or from_cache:
power_source = await self.get_attribute_value(
"power_source", from_cache=from_cache
)
if power_source is not None:
self._power_source = power_source
await super().async_initialize(from_cache)
def get_power_source(self):
"""Get the power source."""
return self._power_source
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.BinaryInput.cluster_id)
class BinaryInput(ZigbeeChannel):
"""Binary Input channel."""
REPORT_CONFIG = [{"attr": "present_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.BinaryOutput.cluster_id)
class BinaryOutput(ZigbeeChannel):
"""Binary Output channel."""
REPORT_CONFIG = [{"attr": "present_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.BinaryValue.cluster_id)
class BinaryValue(ZigbeeChannel):
"""Binary Value channel."""
REPORT_CONFIG = [{"attr": "present_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.Commissioning.cluster_id)
class Commissioning(ZigbeeChannel):
"""Commissioning channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.DeviceTemperature.cluster_id)
class DeviceTemperature(ZigbeeChannel):
"""Device Temperature channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.GreenPowerProxy.cluster_id)
class GreenPowerProxy(ZigbeeChannel):
"""Green Power Proxy channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.Groups.cluster_id)
class Groups(ZigbeeChannel):
"""Groups channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.Identify.cluster_id)
class Identify(ZigbeeChannel):
"""Identify channel."""
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
cmd = parse_and_log_command(self, tsn, command_id, args)
if cmd == "trigger_effect":
self.async_send_signal(f"{self.unique_id}_{cmd}", args[0])
@registries.CLIENT_CHANNELS_REGISTRY.register(general.LevelControl.cluster_id)
class LevelControlClientChannel(ClientChannel):
"""LevelControl client cluster."""
@registries.BINDABLE_CLUSTERS.register(general.LevelControl.cluster_id)
@registries.LIGHT_CLUSTERS.register(general.LevelControl.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.LevelControl.cluster_id)
class LevelControlChannel(ZigbeeChannel):
"""Channel for the LevelControl Zigbee cluster."""
CURRENT_LEVEL = 0
REPORT_CONFIG = ({"attr": "current_level", "config": REPORT_CONFIG_ASAP},)
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
cmd = parse_and_log_command(self, tsn, command_id, args)
if cmd in ("move_to_level", "move_to_level_with_on_off"):
self.dispatch_level_change(SIGNAL_SET_LEVEL, args[0])
elif cmd in ("move", "move_with_on_off"):
# We should dim slowly -- for now, just step once
rate = args[1]
if args[0] == 0xFF:
rate = 10 # Should read default move rate
self.dispatch_level_change(SIGNAL_MOVE_LEVEL, -rate if args[0] else rate)
elif cmd in ("step", "step_with_on_off"):
# Step (technically may change on/off)
self.dispatch_level_change(
SIGNAL_MOVE_LEVEL, -args[1] if args[0] else args[1]
)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
self.debug("received attribute: %s update with value: %s", attrid, value)
if attrid == self.CURRENT_LEVEL:
self.dispatch_level_change(SIGNAL_SET_LEVEL, value)
def dispatch_level_change(self, command, level):
"""Dispatch level change."""
self.async_send_signal(f"{self.unique_id}_{command}", level)
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.get_attribute_value(self.CURRENT_LEVEL, from_cache=from_cache)
await super().async_initialize(from_cache)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.MultistateInput.cluster_id)
class MultistateInput(ZigbeeChannel):
"""Multistate Input channel."""
REPORT_CONFIG = [{"attr": "present_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.MultistateOutput.cluster_id)
class MultistateOutput(ZigbeeChannel):
"""Multistate Output channel."""
REPORT_CONFIG = [{"attr": "present_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.MultistateValue.cluster_id)
class MultistateValue(ZigbeeChannel):
"""Multistate Value channel."""
REPORT_CONFIG = [{"attr": "present_value", "config": REPORT_CONFIG_DEFAULT}]
@registries.CLIENT_CHANNELS_REGISTRY.register(general.OnOff.cluster_id)
class OnOffClientChannel(ClientChannel):
"""OnOff client channel."""
@registries.BINARY_SENSOR_CLUSTERS.register(general.OnOff.cluster_id)
@registries.BINDABLE_CLUSTERS.register(general.OnOff.cluster_id)
@registries.LIGHT_CLUSTERS.register(general.OnOff.cluster_id)
@registries.SWITCH_CLUSTERS.register(general.OnOff.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.OnOff.cluster_id)
class OnOffChannel(ZigbeeChannel):
"""Channel for the OnOff Zigbee cluster."""
ON_OFF = 0
REPORT_CONFIG = ({"attr": "on_off", "config": REPORT_CONFIG_IMMEDIATE},)
def __init__(
self, cluster: zha_typing.ZigpyClusterType, ch_pool: zha_typing.ChannelPoolType
) -> None:
"""Initialize OnOffChannel."""
super().__init__(cluster, ch_pool)
self._state = None
self._off_listener = None
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
cmd = parse_and_log_command(self, tsn, command_id, args)
if cmd in ("off", "off_with_effect"):
self.attribute_updated(self.ON_OFF, False)
elif cmd in ("on", "on_with_recall_global_scene"):
self.attribute_updated(self.ON_OFF, True)
elif cmd == "on_with_timed_off":
should_accept = args[0]
on_time = args[1]
# 0 is always accept 1 is only accept when already on
if should_accept == 0 or (should_accept == 1 and self._state):
if self._off_listener is not None:
self._off_listener()
self._off_listener = None
self.attribute_updated(self.ON_OFF, True)
if on_time > 0:
self._off_listener = async_call_later(
self._ch_pool.hass,
(on_time / 10), # value is in 10ths of a second
self.set_to_off,
)
elif cmd == "toggle":
self.attribute_updated(self.ON_OFF, not bool(self._state))
@callback
def set_to_off(self, *_):
"""Set the state to off."""
self._off_listener = None
self.attribute_updated(self.ON_OFF, False)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
if attrid == self.ON_OFF:
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", attrid, "on_off", value
)
self._state = bool(value)
async def async_initialize(self, from_cache):
"""Initialize channel."""
await super().async_initialize(from_cache)
state = await self.get_attribute_value(self.ON_OFF, from_cache=True)
if state is not None:
self._state = bool(state)
async def async_update(self):
"""Initialize channel."""
if self.cluster.is_client:
return
from_cache = not self._ch_pool.is_mains_powered
self.debug("attempting to update onoff state - from cache: %s", from_cache)
state = await self.get_attribute_value(self.ON_OFF, from_cache=from_cache)
if state is not None:
self._state = bool(state)
await super().async_update()
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.OnOffConfiguration.cluster_id)
class OnOffConfiguration(ZigbeeChannel):
"""OnOff Configuration channel."""
@registries.CLIENT_CHANNELS_REGISTRY.register(general.Ota.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.Ota.cluster_id)
class Ota(ZigbeeChannel):
"""OTA Channel."""
@callback
def cluster_command(
self, tsn: int, command_id: int, args: Optional[List[Any]]
) -> None:
"""Handle OTA commands."""
cmd_name = self.cluster.server_commands.get(command_id, [command_id])[0]
signal_id = self._ch_pool.unique_id.split("-")[0]
if cmd_name == "query_next_image":
self.async_send_signal(SIGNAL_UPDATE_DEVICE.format(signal_id), args[3])
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.Partition.cluster_id)
class Partition(ZigbeeChannel):
"""Partition channel."""
@registries.CHANNEL_ONLY_CLUSTERS.register(general.PollControl.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.PollControl.cluster_id)
class PollControl(ZigbeeChannel):
"""Poll Control channel."""
CHECKIN_INTERVAL = 55 * 60 * 4 # 55min
CHECKIN_FAST_POLL_TIMEOUT = 2 * 4 # 2s
LONG_POLL = 6 * 4 # 6s
async def async_configure(self) -> None:
"""Configure channel: set check-in interval."""
try:
res = await self.cluster.write_attributes(
{"checkin_interval": self.CHECKIN_INTERVAL}
)
self.debug("%ss check-in interval set: %s", self.CHECKIN_INTERVAL / 4, res)
except (asyncio.TimeoutError, zigpy.exceptions.ZigbeeException) as ex:
self.debug("Couldn't set check-in interval: %s", ex)
await super().async_configure()
@callback
def cluster_command(
self, tsn: int, command_id: int, args: Optional[List[Any]]
) -> None:
"""Handle commands received to this cluster."""
cmd_name = self.cluster.client_commands.get(command_id, [command_id])[0]
self.debug("Received %s tsn command '%s': %s", tsn, cmd_name, args)
self.zha_send_event(cmd_name, args)
if cmd_name == "checkin":
self.cluster.create_catching_task(self.check_in_response(tsn))
async def check_in_response(self, tsn: int) -> None:
"""Respond to checkin command."""
await self.checkin_response(True, self.CHECKIN_FAST_POLL_TIMEOUT, tsn=tsn)
await self.set_long_poll_interval(self.LONG_POLL)
@registries.DEVICE_TRACKER_CLUSTERS.register(general.PowerConfiguration.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.PowerConfiguration.cluster_id)
class PowerConfigurationChannel(ZigbeeChannel):
"""Channel for the zigbee power configuration cluster."""
REPORT_CONFIG = (
{"attr": "battery_voltage", "config": REPORT_CONFIG_BATTERY_SAVE},
{"attr": "battery_percentage_remaining", "config": REPORT_CONFIG_BATTERY_SAVE},
)
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
attr = self._report_config[1].get("attr")
if isinstance(attr, str):
attr_id = self.cluster.attridx.get(attr)
else:
attr_id = attr
if attrid == attr_id:
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
attrid,
self.cluster.attributes.get(attrid, [attrid])[0],
value,
)
return
attr_name = self.cluster.attributes.get(attrid, [attrid])[0]
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_STATE_ATTR}", attr_name, value
)
async def async_initialize(self, from_cache):
"""Initialize channel."""
await self.async_read_state(from_cache)
await super().async_initialize(from_cache)
async def async_update(self):
"""Retrieve latest state."""
await self.async_read_state(True)
async def async_read_state(self, from_cache):
"""Read data from the cluster."""
attributes = [
"battery_size",
"battery_percentage_remaining",
"battery_voltage",
"battery_quantity",
]
await self.get_attributes(attributes, from_cache=from_cache)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.PowerProfile.cluster_id)
class PowerProfile(ZigbeeChannel):
"""Power Profile channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.RSSILocation.cluster_id)
class RSSILocation(ZigbeeChannel):
"""RSSI Location channel."""
@registries.CLIENT_CHANNELS_REGISTRY.register(general.Scenes.cluster_id)
class ScenesClientChannel(ClientChannel):
"""Scenes channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.Scenes.cluster_id)
class Scenes(ZigbeeChannel):
"""Scenes channel."""
@registries.ZIGBEE_CHANNEL_REGISTRY.register(general.Time.cluster_id)
class Time(ZigbeeChannel):
"""Time channel."""
|
from collections import namedtuple
import logging
import re
import telnetlib
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
_LEASES_REGEX = re.compile(
r"(?P<ip>([0-9]{1,3}[\.]){3}[0-9]{1,3})"
+ r"\smac:\s(?P<mac>([0-9a-f]{2}[:-]){5}([0-9a-f]{2}))"
+ r"\svalid\sfor:\s(?P<timevalid>(-?\d+))"
+ r"\ssec"
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return an Actiontec scanner."""
scanner = ActiontecDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
Device = namedtuple("Device", ["mac", "ip", "last_update"])
class ActiontecDeviceScanner(DeviceScanner):
"""This class queries an actiontec router for connected devices."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.username = config[CONF_USERNAME]
self.password = config[CONF_PASSWORD]
self.last_results = []
data = self.get_actiontec_data()
self.success_init = data is not None
_LOGGER.info("canner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [client.mac for client in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
if not self.last_results:
return None
for client in self.last_results:
if client.mac == device:
return client.ip
return None
def _update_info(self):
"""Ensure the information from the router is up to date.
Return boolean if scanning successful.
"""
_LOGGER.info("Scanning")
if not self.success_init:
return False
now = dt_util.now()
actiontec_data = self.get_actiontec_data()
if not actiontec_data:
return False
self.last_results = [
Device(data["mac"], name, now)
for name, data in actiontec_data.items()
if data["timevalid"] > -60
]
_LOGGER.info("Scan successful")
return True
def get_actiontec_data(self):
"""Retrieve data from Actiontec MI424WR and return parsed result."""
try:
telnet = telnetlib.Telnet(self.host)
telnet.read_until(b"Username: ")
telnet.write((f"{self.username}\n").encode("ascii"))
telnet.read_until(b"Password: ")
telnet.write((f"{self.password}\n").encode("ascii"))
prompt = telnet.read_until(b"Wireless Broadband Router> ").split(b"\n")[-1]
telnet.write(b"firewall mac_cache_dump\n")
telnet.write(b"\n")
telnet.read_until(prompt)
leases_result = telnet.read_until(prompt).split(b"\n")[1:-1]
telnet.write(b"exit\n")
except EOFError:
_LOGGER.exception("Unexpected response from router")
return
except ConnectionRefusedError:
_LOGGER.exception("Connection refused by router. Telnet enabled?")
return None
devices = {}
for lease in leases_result:
match = _LEASES_REGEX.search(lease.decode("utf-8"))
if match is not None:
devices[match.group("ip")] = {
"ip": match.group("ip"),
"mac": match.group("mac").upper(),
"timevalid": int(match.group("timevalid")),
}
return devices
|
INLINESTYLES = False
try:
import locale
locale.setlocale(locale.LC_ALL, '')
except:
pass
# set up Pygments
from pygments.formatters import HtmlFormatter
# The default formatter
DEFAULT = HtmlFormatter(noclasses=INLINESTYLES, cssclass='syntax')
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
# run the generation
from docutils.core import publish_cmdline, default_description
description = ('Generates (X)HTML documents from standalone reStructuredText '
'sources. ' + default_description)
publish_cmdline(writer_name='html', description=description)
|
import numpy as np
import collections.abc
VALID_3D_BACKENDS = (
'pyvista', # default 3d backend
'mayavi',
'notebook',
)
ALLOWED_QUIVER_MODES = ('2darrow', 'arrow', 'cone', 'cylinder', 'sphere',
'oct')
def _get_colormap_from_array(colormap=None, normalized_colormap=False,
default_colormap='coolwarm'):
from matplotlib import cm
from matplotlib.colors import ListedColormap
if colormap is None:
cmap = cm.get_cmap(default_colormap)
elif isinstance(colormap, str):
cmap = cm.get_cmap(colormap)
elif normalized_colormap:
cmap = ListedColormap(colormap)
else:
cmap = ListedColormap(np.array(colormap) / 255.0)
return cmap
def _check_color(color):
from matplotlib.colors import colorConverter
if isinstance(color, str):
color = colorConverter.to_rgb(color)
elif isinstance(color, collections.abc.Iterable):
np_color = np.array(color)
if np_color.size % 3 != 0 and np_color.size % 4 != 0:
raise ValueError("The expected valid format is RGB or RGBA.")
if np_color.dtype in (np.int64, np.int32):
if (np_color < 0).any() or (np_color > 255).any():
raise ValueError("Values out of range [0, 255].")
elif np_color.dtype == np.float64:
if (np_color < 0.0).any() or (np_color > 1.0).any():
raise ValueError("Values out of range [0.0, 1.0].")
else:
raise TypeError("Expected data type is `np.int64`, `np.int32`, or "
"`np.float64` but {} was given."
.format(np_color.dtype))
else:
raise TypeError("Expected type is `str` or iterable but "
"{} was given.".format(type(color)))
return color
def _alpha_blend_background(ctable, background_color):
alphas = ctable[:, -1][:, np.newaxis] / 255.
use_table = ctable.copy()
use_table[:, -1] = 255.
return (use_table * alphas) + background_color * (1 - alphas)
|
import numpy as np
from chainercv.transforms.image.resize import resize
from chainercv.utils.bbox.bbox_iou import bbox_iou
from chainercv.utils import non_maximum_suppression
def _mask_aggregation(
bbox, seg_prob, seg_weight,
size, binary_thresh
):
assert bbox.shape[0] == len(seg_prob)
assert bbox.shape[0] == seg_weight.shape[0]
aggregated_msk = np.zeros(size, dtype=np.float32)
for bb, seg_pb, seg_w in zip(bbox, seg_prob, seg_weight):
bb = np.round(bb).astype(np.int32)
y_min, x_min, y_max, x_max = bb
if y_max - y_min > 0 and x_max - x_min > 0:
seg_pb = resize(
seg_pb.astype(np.float32)[None],
(y_max - y_min, x_max - x_min))
seg_m = (seg_pb >= binary_thresh).astype(np.float32)[0]
aggregated_msk[y_min:y_max, x_min:x_max] += seg_m * seg_w
y_indices, x_indices = np.where(aggregated_msk >= binary_thresh)
if len(y_indices) == 0 or len(x_indices) == 0:
return None, None
else:
y_max = y_indices.max() + 1
y_min = y_indices.min()
x_max = x_indices.max() + 1
x_min = x_indices.min()
aggregated_bb = np.array(
[y_min, x_min, y_max, x_max],
dtype=np.float32)
aggregated_cmsk = aggregated_msk[y_min:y_max, x_min:x_max]
return aggregated_cmsk[None], aggregated_bb[None]
def mask_voting(
seg_prob, bbox, cls_prob, size,
score_thresh, nms_thresh,
mask_merge_thresh, binary_thresh,
limit=100, bg_label=0
):
"""Refine mask probabilities by merging multiple masks.
First, this function discard invalid masks with non maximum suppression.
Then, it merges masks with weight calculated from class probabilities and
iou.
This function improves the mask qualities by merging overlapped masks
predicted as the same object class.
Here are notations used.
* :math:`R` is the total number of RoIs produced in one image.
* :math:`L` is the number of classes excluding the background.
* :math:`RH` is the height of pooled image.
* :math:`RW` is the height of pooled image.
Args:
seg_prob (array): A mask probability array whose shape is
:math:`(R, RH, RW)`.
bbox (array): A bounding box array whose shape is
:math:`(R, 4)`.
cls_prob (array): A class probability array whose shape is
:math:`(R, L + 1)`.
size (tuple of int): Original image size.
score_thresh (float): A threshold value of the class score.
nms_thresh (float): A threshold value of non maximum suppression.
mask_merge_thresh (float): A threshold value of the bounding box iou
for mask merging.
binary_thresh (float): A threshold value of mask score
for mask merging.
limit (int): The maximum number of outputs.
bg_label (int): The id of the background label.
Returns:
array, array, array, array:
* **v_seg_prob**: Merged mask probability. Its shapes is \
:math:`(N, RH, RW)`.
* **v_bbox**: Bounding boxes for the merged masks. Its shape is \
:math:`(N, 4)`.
* **v_label**: Class labels for the merged masks. Its shape is \
:math:`(N, )`.
* **v_score**: Class probabilities for the merged masks. Its shape \
is :math:`(N, )`.
"""
seg_size = seg_prob.shape[1:]
n_class = cls_prob.shape[1]
v_seg_prob = []
v_bbox = []
v_label = []
v_cls_prob = []
cls_score = []
cls_bbox = []
for label in range(0, n_class):
# background
if label == bg_label:
continue
# non maximum suppression
score_l = cls_prob[:, label]
keep_indices = non_maximum_suppression(
bbox, nms_thresh, score_l)
bbox_l = bbox[keep_indices]
score_l = score_l[keep_indices]
cls_bbox.append(bbox_l)
cls_score.append(score_l)
sorted_score = np.sort(np.concatenate(cls_score))[::-1]
n_keep = min(len(sorted_score), limit)
score_thresh = max(sorted_score[n_keep - 1], score_thresh)
for label in range(0, n_class):
# background
if label == bg_label:
continue
bbox_l = cls_bbox[label - 1]
score_l = cls_score[label - 1]
keep_indices = np.where(score_l >= score_thresh)
bbox_l = bbox_l[keep_indices]
score_l = score_l[keep_indices]
v_seg_prob_l = []
v_bbox_l = []
v_score_l = []
for i, bb in enumerate(bbox_l):
iou = bbox_iou(bbox, bb[np.newaxis, :])
keep_indices = np.where(iou >= mask_merge_thresh)[0]
seg_weight = cls_prob[keep_indices, label]
seg_weight = seg_weight / seg_weight.sum()
seg_prob_i = seg_prob[keep_indices]
bbox_i = bbox[keep_indices]
m_seg, m_bbox = _mask_aggregation(
bbox_i, seg_prob_i, seg_weight, size, binary_thresh)
if m_seg is not None and m_bbox is not None:
m_seg = resize(m_seg, seg_size)
m_seg = np.clip(m_seg, 0.0, 1.0)
v_seg_prob_l.append(m_seg)
v_bbox_l.append(m_bbox)
v_score_l.append(score_l[i])
if len(v_seg_prob_l) > 0:
v_label_l = np.repeat(
label - 1, len(v_score_l)).astype(np.int32)
v_seg_prob += v_seg_prob_l
v_bbox += v_bbox_l
v_label.append(v_label_l)
v_cls_prob.append(v_score_l)
if len(v_seg_prob) > 0:
v_seg_prob = np.concatenate(v_seg_prob)
v_bbox = np.concatenate(v_bbox)
v_label = np.concatenate(v_label)
v_cls_prob = np.concatenate(v_cls_prob)
else:
v_seg_prob = np.empty((0, seg_size[0], seg_size[1]))
v_bbox = np.empty((0, 4))
v_label = np.empty((0, ))
v_cls_prob = np.empty((0, ))
return v_seg_prob, v_bbox, v_label, v_cls_prob
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets import overfeat
slim = tf.contrib.slim
class OverFeatTest(tf.test.TestCase):
def testBuild(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes)
self.assertEquals(logits.op.name, 'overfeat/fc8/squeezed')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
def testFullyConvolutional(self):
batch_size = 1
height, width = 281, 281
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs, num_classes, spatial_squeeze=False)
self.assertEquals(logits.op.name, 'overfeat/fc8/BiasAdd')
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, 2, 2, num_classes])
def testEndPoints(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
_, end_points = overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1',
'overfeat/pool1',
'overfeat/conv2',
'overfeat/pool2',
'overfeat/conv3',
'overfeat/conv4',
'overfeat/conv5',
'overfeat/pool5',
'overfeat/fc6',
'overfeat/fc7',
'overfeat/fc8'
]
self.assertSetEqual(set(end_points.keys()), set(expected_names))
def testModelVariables(self):
batch_size = 5
height, width = 231, 231
num_classes = 1000
with self.test_session():
inputs = tf.random_uniform((batch_size, height, width, 3))
overfeat.overfeat(inputs, num_classes)
expected_names = ['overfeat/conv1/weights',
'overfeat/conv1/biases',
'overfeat/conv2/weights',
'overfeat/conv2/biases',
'overfeat/conv3/weights',
'overfeat/conv3/biases',
'overfeat/conv4/weights',
'overfeat/conv4/biases',
'overfeat/conv5/weights',
'overfeat/conv5/biases',
'overfeat/fc6/weights',
'overfeat/fc6/biases',
'overfeat/fc7/weights',
'overfeat/fc7/biases',
'overfeat/fc8/weights',
'overfeat/fc8/biases',
]
model_variables = [v.op.name for v in slim.get_model_variables()]
self.assertSetEqual(set(model_variables), set(expected_names))
def testEvaluation(self):
batch_size = 2
height, width = 231, 231
num_classes = 1000
with self.test_session():
eval_inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False)
self.assertListEqual(logits.get_shape().as_list(),
[batch_size, num_classes])
predictions = tf.argmax(logits, 1)
self.assertListEqual(predictions.get_shape().as_list(), [batch_size])
def testTrainEvalWithReuse(self):
train_batch_size = 2
eval_batch_size = 1
train_height, train_width = 231, 231
eval_height, eval_width = 281, 281
num_classes = 1000
with self.test_session():
train_inputs = tf.random_uniform(
(train_batch_size, train_height, train_width, 3))
logits, _ = overfeat.overfeat(train_inputs)
self.assertListEqual(logits.get_shape().as_list(),
[train_batch_size, num_classes])
tf.get_variable_scope().reuse_variables()
eval_inputs = tf.random_uniform(
(eval_batch_size, eval_height, eval_width, 3))
logits, _ = overfeat.overfeat(eval_inputs, is_training=False,
spatial_squeeze=False)
self.assertListEqual(logits.get_shape().as_list(),
[eval_batch_size, 2, 2, num_classes])
logits = tf.reduce_mean(logits, [1, 2])
predictions = tf.argmax(logits, 1)
self.assertEquals(predictions.get_shape().as_list(), [eval_batch_size])
def testForward(self):
batch_size = 1
height, width = 231, 231
with self.test_session() as sess:
inputs = tf.random_uniform((batch_size, height, width, 3))
logits, _ = overfeat.overfeat(inputs)
sess.run(tf.global_variables_initializer())
output = sess.run(logits)
self.assertTrue(output.any())
if __name__ == '__main__':
tf.test.main()
|
import numpy as np
import os.path as op
from numpy.testing import (assert_array_equal, assert_array_almost_equal,
assert_allclose)
import pytest
from scipy import linalg, stats
from mne import (Epochs, read_events, pick_types, compute_raw_covariance,
create_info, EpochsArray)
from mne.decoding import Vectorizer
from mne.io import read_raw_fif
from mne.utils import requires_sklearn
from mne.preprocessing.xdawn import Xdawn, _XdawnTransformer
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
tmin, tmax = -0.1, 0.2
event_id = dict(cond2=2, cond3=3)
def _get_data():
"""Get data."""
raw = read_raw_fif(raw_fname, verbose=False, preload=True)
raw.set_eeg_reference(projection=True)
events = read_events(event_name)
picks = pick_types(raw.info, meg=False, eeg=True, stim=False,
ecg=False, eog=False,
exclude='bads')[::8]
return raw, events, picks
def test_xdawn():
"""Test init of xdawn."""
# Init xdawn with good parameters
Xdawn(n_components=2, correct_overlap='auto', signal_cov=None, reg=None)
# Init xdawn with bad parameters
pytest.raises(ValueError, Xdawn, correct_overlap='foo')
def test_xdawn_picks():
"""Test picking with Xdawn."""
data = np.random.RandomState(0).randn(10, 2, 10)
info = create_info(2, 1000., ('eeg', 'misc'))
epochs = EpochsArray(data, info)
xd = Xdawn(correct_overlap=False)
xd.fit(epochs)
epochs_out = xd.apply(epochs)['1']
assert epochs_out.info['ch_names'] == epochs.ch_names
assert not (epochs_out.get_data()[:, 0] != data[:, 0]).any()
assert_array_equal(epochs_out.get_data()[:, 1], data[:, 1])
def test_xdawn_fit():
"""Test Xdawn fit."""
# Get data
raw, events, picks = _get_data()
raw.del_proj()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
# =========== Basic Fit test =================
# Test base xdawn
xd = Xdawn(n_components=2, correct_overlap='auto')
xd.fit(epochs)
# With these parameters, the overlap correction must be False
assert not xd.correct_overlap_
# No overlap correction should give averaged evoked
evoked = epochs['cond2'].average()
assert_array_equal(evoked.data, xd.evokeds_['cond2'].data)
assert_allclose(np.linalg.norm(xd.filters_['cond2'], axis=1), 1)
# ========== with signal cov provided ====================
# Provide covariance object
signal_cov = compute_raw_covariance(raw, picks=picks)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
xd.fit(epochs)
# Provide ndarray
signal_cov = np.eye(len(picks))
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
xd.fit(epochs)
# Provide ndarray of bad shape
signal_cov = np.eye(len(picks) - 1)
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
pytest.raises(ValueError, xd.fit, epochs)
# Provide another type
signal_cov = 42
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=signal_cov)
pytest.raises(ValueError, xd.fit, epochs)
# Fit with baseline correction and overlap correction should throw an
# error
# XXX This is a buggy test, the epochs here don't overlap
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=(None, 0), verbose=False)
xd = Xdawn(n_components=2, correct_overlap=True)
pytest.raises(ValueError, xd.fit, epochs)
def test_xdawn_apply_transform():
"""Test Xdawn apply and transform."""
# Get data
raw, events, picks = _get_data()
raw.pick_types(eeg=True, meg=False)
epochs = Epochs(raw, events, event_id, tmin, tmax, proj=False,
preload=True, baseline=None,
verbose=False)
n_components = 2
# Fit Xdawn
xd = Xdawn(n_components=n_components, correct_overlap=False)
xd.fit(epochs)
# Apply on different types of instances
for inst in [raw, epochs.average(), epochs]:
denoise = xd.apply(inst)
# Apply on other thing should raise an error
pytest.raises(ValueError, xd.apply, 42)
# Transform on Epochs
xd.transform(epochs)
# Transform on Evoked
xd.transform(epochs.average())
# Transform on ndarray
xd.transform(epochs._data)
xd.transform(epochs._data[0])
# Transform on something else
pytest.raises(ValueError, xd.transform, 42)
# Check numerical results with shuffled epochs
np.random.seed(0) # random makes unstable linalg
idx = np.arange(len(epochs))
np.random.shuffle(idx)
xd.fit(epochs[idx])
denoise_shfl = xd.apply(epochs)
assert_array_almost_equal(denoise['cond2']._data,
denoise_shfl['cond2']._data)
@requires_sklearn
def test_xdawn_regularization():
"""Test Xdawn with regularization."""
# Get data, this time MEG so we can test proper reg/ch type support
raw = read_raw_fif(raw_fname, verbose=False, preload=True)
events = read_events(event_name)
picks = pick_types(raw.info, meg=True, eeg=False, stim=False,
ecg=False, eog=False,
exclude='bads')[::8]
raw.pick_channels([raw.ch_names[pick] for pick in picks])
del picks
raw.info.normalize_proj()
epochs = Epochs(raw, events, event_id, tmin, tmax,
preload=True, baseline=None, verbose=False)
# Test with overlapping events.
# modify events to simulate one overlap
events = epochs.events
sel = np.where(events[:, 2] == 2)[0][:2]
modified_event = events[sel[0]]
modified_event[0] += 1
epochs.events[sel[1]] = modified_event
# Fit and check that overlap was found and applied
xd = Xdawn(n_components=2, correct_overlap='auto', reg='oas')
xd.fit(epochs)
assert xd.correct_overlap_
evoked = epochs['cond2'].average()
assert np.sum(np.abs(evoked.data - xd.evokeds_['cond2'].data))
# With covariance regularization
for reg in [.1, 0.1, 'ledoit_wolf', 'oas']:
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=np.eye(len(epochs.ch_names)), reg=reg)
xd.fit(epochs)
# With bad shrinkage
xd = Xdawn(n_components=2, correct_overlap=False,
signal_cov=np.eye(len(epochs.ch_names)), reg=2)
with pytest.raises(ValueError, match='shrinkage must be'):
xd.fit(epochs)
# With rank-deficient input
# this is a bit wacky because `epochs` has projectors on from the old raw
# but it works as a rank-deficient test case
xd = Xdawn(correct_overlap=False, reg=0.5)
xd.fit(epochs)
xd = Xdawn(correct_overlap=False, reg='diagonal_fixed')
xd.fit(epochs)
# XXX in principle this should maybe raise an error due to deficiency?
# xd = Xdawn(correct_overlap=False, reg=None)
# with pytest.raises(ValueError, match='Could not compute eigenvalues'):
# xd.fit(epochs)
@requires_sklearn
def test_XdawnTransformer():
"""Test _XdawnTransformer."""
# Get data
raw, events, picks = _get_data()
raw.del_proj()
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
preload=True, baseline=None, verbose=False)
X = epochs._data
y = epochs.events[:, -1]
# Fit
xdt = _XdawnTransformer()
xdt.fit(X, y)
pytest.raises(ValueError, xdt.fit, X, y[1:])
pytest.raises(ValueError, xdt.fit, 'foo')
# Provide covariance object
signal_cov = compute_raw_covariance(raw, picks=picks)
xdt = _XdawnTransformer(signal_cov=signal_cov)
xdt.fit(X, y)
# Provide ndarray
signal_cov = np.eye(len(picks))
xdt = _XdawnTransformer(signal_cov=signal_cov)
xdt.fit(X, y)
# Provide ndarray of bad shape
signal_cov = np.eye(len(picks) - 1)
xdt = _XdawnTransformer(signal_cov=signal_cov)
pytest.raises(ValueError, xdt.fit, X, y)
# Provide another type
signal_cov = 42
xdt = _XdawnTransformer(signal_cov=signal_cov)
pytest.raises(ValueError, xdt.fit, X, y)
# Fit with y as None
xdt = _XdawnTransformer()
xdt.fit(X)
# Compare xdawn and _XdawnTransformer
xd = Xdawn(correct_overlap=False)
xd.fit(epochs)
xdt = _XdawnTransformer()
xdt.fit(X, y)
assert_array_almost_equal(xd.filters_['cond2'][:2, :],
xdt.filters_.reshape(2, 2, 8)[0])
# Transform testing
xdt.transform(X[1:, ...]) # different number of epochs
xdt.transform(X[:, :, 1:]) # different number of time
pytest.raises(ValueError, xdt.transform, X[:, 1:, :])
Xt = xdt.transform(X)
pytest.raises(ValueError, xdt.transform, 42)
# Inverse transform testing
Xinv = xdt.inverse_transform(Xt)
assert Xinv.shape == X.shape
xdt.inverse_transform(Xt[1:, ...])
xdt.inverse_transform(Xt[:, :, 1:])
# should raise an error if not correct number of components
pytest.raises(ValueError, xdt.inverse_transform, Xt[:, 1:, :])
pytest.raises(ValueError, xdt.inverse_transform, 42)
def _simulate_erplike_mixed_data(n_epochs=100, n_channels=10):
rng = np.random.RandomState(42)
tmin, tmax = 0., 1.
sfreq = 100.
informative_ch_idx = 0
y = rng.randint(0, 2, n_epochs)
n_times = int((tmax - tmin) * sfreq)
epoch_times = np.linspace(tmin, tmax, n_times)
target_template = 1e-6 * (epoch_times - tmax) * np.sin(
2 * np.pi * epoch_times)
nontarget_template = 0.7e-6 * (epoch_times - tmax) * np.sin(
2 * np.pi * (epoch_times - 0.1))
epoch_data = rng.randn(n_epochs, n_channels, n_times) * 5e-7
epoch_data[y == 0, informative_ch_idx, :] += nontarget_template
epoch_data[y == 1, informative_ch_idx, :] += target_template
mixing_mat = linalg.svd(rng.randn(n_channels, n_channels))[0]
mixed_epoch_data = np.dot(mixing_mat.T, epoch_data).transpose((1, 0, 2))
events = np.zeros((n_epochs, 3), dtype=int)
events[:, 0] = np.arange(0, n_epochs * n_times, n_times)
events[:, 2] = y
info = create_info(
ch_names=['C{:02d}'.format(i) for i in range(n_channels)],
ch_types=['eeg'] * n_channels,
sfreq=sfreq)
epochs = EpochsArray(mixed_epoch_data, info, events,
tmin=tmin,
event_id={'nt': 0, 't': 1})
return epochs, mixing_mat
@requires_sklearn
def test_xdawn_decoding_performance():
"""Test decoding performance and extracted pattern on synthetic data."""
from sklearn.model_selection import KFold
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import accuracy_score
n_xdawn_comps = 3
expected_accuracy = 0.98
epochs, mixing_mat = _simulate_erplike_mixed_data(n_epochs=100)
y = epochs.events[:, 2]
# results of Xdawn and _XdawnTransformer should match
xdawn_pipe = make_pipeline(
Xdawn(n_components=n_xdawn_comps),
Vectorizer(),
MinMaxScaler(),
LogisticRegression(solver='liblinear'))
xdawn_trans_pipe = make_pipeline(
_XdawnTransformer(n_components=n_xdawn_comps),
Vectorizer(),
MinMaxScaler(),
LogisticRegression(solver='liblinear'))
cv = KFold(n_splits=3, shuffle=False)
for pipe, X in (
(xdawn_pipe, epochs),
(xdawn_trans_pipe, epochs.get_data())):
predictions = np.empty_like(y, dtype=float)
for train, test in cv.split(X, y):
pipe.fit(X[train], y[train])
predictions[test] = pipe.predict(X[test])
cv_accuracy_xdawn = accuracy_score(y, predictions)
assert_allclose(cv_accuracy_xdawn, expected_accuracy, atol=0.01)
# for both event types, the first component should "match" the mixing
fitted_xdawn = pipe.steps[0][1]
if isinstance(fitted_xdawn, Xdawn):
relev_patterns = np.concatenate(
[comps[[0]] for comps in fitted_xdawn.patterns_.values()])
else:
relev_patterns = fitted_xdawn.patterns_[::n_xdawn_comps]
for i in range(len(relev_patterns)):
r, _ = stats.pearsonr(relev_patterns[i, :], mixing_mat[0, :])
assert np.abs(r) > 0.99
|
from unittest.mock import MagicMock, PropertyMock
from canary.api import SensorType
from homeassistant.components.canary.const import (
CONF_FFMPEG_ARGUMENTS,
DEFAULT_FFMPEG_ARGUMENTS,
DEFAULT_TIMEOUT,
DOMAIN,
)
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.helpers.typing import HomeAssistantType
from tests.async_mock import patch
from tests.common import MockConfigEntry
ENTRY_CONFIG = {
CONF_PASSWORD: "test-password",
CONF_USERNAME: "test-username",
}
ENTRY_OPTIONS = {
CONF_FFMPEG_ARGUMENTS: DEFAULT_FFMPEG_ARGUMENTS,
CONF_TIMEOUT: DEFAULT_TIMEOUT,
}
USER_INPUT = {
CONF_PASSWORD: "test-password",
CONF_USERNAME: "test-username",
}
YAML_CONFIG = {
CONF_PASSWORD: "test-password",
CONF_USERNAME: "test-username",
CONF_TIMEOUT: 5,
}
def _patch_async_setup(return_value=True):
return patch(
"homeassistant.components.canary.async_setup",
return_value=return_value,
)
def _patch_async_setup_entry(return_value=True):
return patch(
"homeassistant.components.canary.async_setup_entry",
return_value=return_value,
)
async def init_integration(
hass: HomeAssistantType,
*,
data: dict = ENTRY_CONFIG,
options: dict = ENTRY_OPTIONS,
skip_entry_setup: bool = False,
) -> MockConfigEntry:
"""Set up the Canary integration in Home Assistant."""
entry = MockConfigEntry(domain=DOMAIN, data=data, options=options)
entry.add_to_hass(hass)
if not skip_entry_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
def mock_device(device_id, name, is_online=True, device_type_name=None):
"""Mock Canary Device class."""
device = MagicMock()
type(device).device_id = PropertyMock(return_value=device_id)
type(device).name = PropertyMock(return_value=name)
type(device).is_online = PropertyMock(return_value=is_online)
type(device).device_type = PropertyMock(
return_value={"id": 1, "name": device_type_name}
)
return device
def mock_location(
location_id, name, is_celsius=True, devices=None, mode=None, is_private=False
):
"""Mock Canary Location class."""
location = MagicMock()
type(location).location_id = PropertyMock(return_value=location_id)
type(location).name = PropertyMock(return_value=name)
type(location).is_celsius = PropertyMock(return_value=is_celsius)
type(location).is_private = PropertyMock(return_value=is_private)
type(location).devices = PropertyMock(return_value=devices or [])
type(location).mode = PropertyMock(return_value=mode)
return location
def mock_mode(mode_id, name):
"""Mock Canary Mode class."""
mode = MagicMock()
type(mode).mode_id = PropertyMock(return_value=mode_id)
type(mode).name = PropertyMock(return_value=name)
type(mode).resource_url = PropertyMock(return_value=f"/v1/modes/{mode_id}")
return mode
def mock_reading(sensor_type, sensor_value):
"""Mock Canary Reading class."""
reading = MagicMock()
type(reading).sensor_type = SensorType(sensor_type)
type(reading).value = PropertyMock(return_value=sensor_value)
return reading
|
from typing import Any, Dict
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.entity import Entity
from .const import DOMAIN as FLO_DOMAIN
from .device import FloDeviceDataUpdateCoordinator
class FloEntity(Entity):
"""A base class for Flo entities."""
def __init__(
self,
entity_type: str,
name: str,
device: FloDeviceDataUpdateCoordinator,
**kwargs,
):
"""Init Flo entity."""
self._unique_id: str = f"{device.mac_address}_{entity_type}"
self._name: str = name
self._device: FloDeviceDataUpdateCoordinator = device
self._state: Any = None
@property
def name(self) -> str:
"""Return Entity's default name."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def device_info(self) -> Dict[str, Any]:
"""Return a device description for device registry."""
return {
"identifiers": {(FLO_DOMAIN, self._device.id)},
"connections": {(CONNECTION_NETWORK_MAC, self._device.mac_address)},
"manufacturer": self._device.manufacturer,
"model": self._device.model,
"name": self._device.device_name,
"sw_version": self._device.firmware_version,
}
@property
def available(self) -> bool:
"""Return True if device is available."""
return self._device.available
@property
def force_update(self) -> bool:
"""Force update this entity."""
return False
@property
def should_poll(self) -> bool:
"""Poll state from device."""
return False
async def async_update(self):
"""Update Flo entity."""
await self._device.async_request_refresh()
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(self._device.async_add_listener(self.async_write_ha_state))
|
from pylitejet import LiteJet
import voluptuous as vol
from homeassistant.const import CONF_PORT
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
CONF_EXCLUDE_NAMES = "exclude_names"
CONF_INCLUDE_SWITCHES = "include_switches"
DOMAIN = "litejet"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PORT): cv.string,
vol.Optional(CONF_EXCLUDE_NAMES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_INCLUDE_SWITCHES, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the LiteJet component."""
url = config[DOMAIN].get(CONF_PORT)
hass.data["litejet_system"] = LiteJet(url)
hass.data["litejet_config"] = config[DOMAIN]
discovery.load_platform(hass, "light", DOMAIN, {}, config)
if config[DOMAIN].get(CONF_INCLUDE_SWITCHES):
discovery.load_platform(hass, "switch", DOMAIN, {}, config)
discovery.load_platform(hass, "scene", DOMAIN, {}, config)
return True
def is_ignored(hass, name):
"""Determine if a load, switch, or scene should be ignored."""
for prefix in hass.data["litejet_config"].get(CONF_EXCLUDE_NAMES, []):
if name.startswith(prefix):
return True
return False
|
from homeassistant.components.switch import SwitchEntity
from .const import DOMAIN
async def async_setup_entry(hass, config, async_add_entities):
"""Initialize a Spider thermostat."""
api = hass.data[DOMAIN][config.entry_id]
entities = [SpiderPowerPlug(api, entity) for entity in api.get_power_plugs()]
async_add_entities(entities)
class SpiderPowerPlug(SwitchEntity):
"""Representation of a Spider Power Plug."""
def __init__(self, api, power_plug):
"""Initialize the Vera device."""
self.api = api
self.power_plug = power_plug
@property
def unique_id(self):
"""Return the ID of this switch."""
return self.power_plug.id
@property
def name(self):
"""Return the name of the switch if any."""
return self.power_plug.name
@property
def current_power_w(self):
"""Return the current power usage in W."""
return round(self.power_plug.current_energy_consumption)
@property
def today_energy_kwh(self):
"""Return the current power usage in Kwh."""
return round(self.power_plug.today_energy_consumption / 1000, 2)
@property
def is_on(self):
"""Return true if switch is on. Standby is on."""
return self.power_plug.is_on
@property
def available(self):
"""Return true if switch is available."""
return self.power_plug.is_available
def turn_on(self, **kwargs):
"""Turn device on."""
self.power_plug.turn_on()
def turn_off(self, **kwargs):
"""Turn device off."""
self.power_plug.turn_off()
def update(self):
"""Get the latest data."""
self.power_plug = self.api.get_power_plug(self.unique_id)
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.