text
stringlengths 213
32.3k
|
---|
from unittest.mock import patch
import pytest
from homeassistant.components import media_player
from homeassistant.components.denonavr import ATTR_COMMAND, SERVICE_GET_COMMAND
from homeassistant.components.denonavr.config_flow import (
CONF_MANUFACTURER,
CONF_MODEL,
CONF_SERIAL_NUMBER,
CONF_TYPE,
DOMAIN,
)
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_MAC
from tests.common import MockConfigEntry
TEST_HOST = "1.2.3.4"
TEST_MAC = "ab:cd:ef:gh"
TEST_NAME = "Test_Receiver"
TEST_MODEL = "model5"
TEST_SERIALNUMBER = "123456789"
TEST_MANUFACTURER = "Denon"
TEST_RECEIVER_TYPE = "avr-x"
TEST_ZONE = "Main"
TEST_UNIQUE_ID = f"{TEST_MODEL}-{TEST_SERIALNUMBER}"
TEST_TIMEOUT = 2
TEST_SHOW_ALL_SOURCES = False
TEST_ZONE2 = False
TEST_ZONE3 = False
ENTITY_ID = f"{media_player.DOMAIN}.{TEST_NAME}"
@pytest.fixture(name="client")
def client_fixture():
"""Patch of client library for tests."""
with patch(
"homeassistant.components.denonavr.receiver.denonavr.DenonAVR",
autospec=True,
) as mock_client_class, patch(
"homeassistant.components.denonavr.receiver.denonavr.discover"
):
mock_client_class.return_value.name = TEST_NAME
mock_client_class.return_value.model_name = TEST_MODEL
mock_client_class.return_value.serial_number = TEST_SERIALNUMBER
mock_client_class.return_value.manufacturer = TEST_MANUFACTURER
mock_client_class.return_value.receiver_type = TEST_RECEIVER_TYPE
mock_client_class.return_value.zone = TEST_ZONE
mock_client_class.return_value.input_func_list = []
mock_client_class.return_value.sound_mode_list = []
mock_client_class.return_value.zones = {"Main": mock_client_class.return_value}
yield mock_client_class.return_value
async def setup_denonavr(hass):
"""Initialize media_player for tests."""
entry_data = {
CONF_HOST: TEST_HOST,
CONF_MAC: TEST_MAC,
CONF_MODEL: TEST_MODEL,
CONF_TYPE: TEST_RECEIVER_TYPE,
CONF_MANUFACTURER: TEST_MANUFACTURER,
CONF_SERIAL_NUMBER: TEST_SERIALNUMBER,
}
mock_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=TEST_UNIQUE_ID,
data=entry_data,
)
mock_entry.add_to_hass(hass)
await hass.config_entries.async_setup(mock_entry.entry_id)
await hass.async_block_till_done()
state = hass.states.get(ENTITY_ID)
assert state
assert state.name == TEST_NAME
async def test_get_command(hass, client):
"""Test generic command functionality."""
await setup_denonavr(hass)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_COMMAND: "test_command",
}
await hass.services.async_call(DOMAIN, SERVICE_GET_COMMAND, data)
await hass.async_block_till_done()
client.send_get_command.assert_called_with("test_command")
|
import logging
import re
import voluptuous as vol
from homeassistant import core
from homeassistant.components import http, websocket_api
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import HTTP_INTERNAL_SERVER_ERROR
from homeassistant.helpers import config_validation as cv, intent
from homeassistant.loader import bind_hass
from .agent import AbstractConversationAgent
from .default_agent import DefaultAgent, async_register
_LOGGER = logging.getLogger(__name__)
ATTR_TEXT = "text"
DOMAIN = "conversation"
REGEX_TYPE = type(re.compile(""))
DATA_AGENT = "conversation_agent"
DATA_CONFIG = "conversation_config"
SERVICE_PROCESS = "process"
SERVICE_PROCESS_SCHEMA = vol.Schema({vol.Required(ATTR_TEXT): cv.string})
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional("intents"): vol.Schema(
{cv.string: vol.All(cv.ensure_list, [cv.string])}
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
async_register = bind_hass(async_register)
@core.callback
@bind_hass
def async_set_agent(hass: core.HomeAssistant, agent: AbstractConversationAgent):
"""Set the agent to handle the conversations."""
hass.data[DATA_AGENT] = agent
async def async_setup(hass, config):
"""Register the process service."""
hass.data[DATA_CONFIG] = config
async def handle_service(service):
"""Parse text into commands."""
text = service.data[ATTR_TEXT]
_LOGGER.debug("Processing: <%s>", text)
agent = await _get_agent(hass)
try:
await agent.async_process(text, service.context)
except intent.IntentHandleError as err:
_LOGGER.error("Error processing %s: %s", text, err)
hass.services.async_register(
DOMAIN, SERVICE_PROCESS, handle_service, schema=SERVICE_PROCESS_SCHEMA
)
hass.http.register_view(ConversationProcessView())
hass.components.websocket_api.async_register_command(websocket_process)
hass.components.websocket_api.async_register_command(websocket_get_agent_info)
hass.components.websocket_api.async_register_command(websocket_set_onboarding)
return True
@websocket_api.async_response
@websocket_api.websocket_command(
{"type": "conversation/process", "text": str, vol.Optional("conversation_id"): str}
)
async def websocket_process(hass, connection, msg):
"""Process text."""
connection.send_result(
msg["id"],
await _async_converse(
hass, msg["text"], msg.get("conversation_id"), connection.context(msg)
),
)
@websocket_api.async_response
@websocket_api.websocket_command({"type": "conversation/agent/info"})
async def websocket_get_agent_info(hass, connection, msg):
"""Do we need onboarding."""
agent = await _get_agent(hass)
connection.send_result(
msg["id"],
{
"onboarding": await agent.async_get_onboarding(),
"attribution": agent.attribution,
},
)
@websocket_api.async_response
@websocket_api.websocket_command({"type": "conversation/onboarding/set", "shown": bool})
async def websocket_set_onboarding(hass, connection, msg):
"""Set onboarding status."""
agent = await _get_agent(hass)
success = await agent.async_set_onboarding(msg["shown"])
if success:
connection.send_result(msg["id"])
else:
connection.send_error(msg["id"])
class ConversationProcessView(http.HomeAssistantView):
"""View to process text."""
url = "/api/conversation/process"
name = "api:conversation:process"
@RequestDataValidator(
vol.Schema({vol.Required("text"): str, vol.Optional("conversation_id"): str})
)
async def post(self, request, data):
"""Send a request for processing."""
hass = request.app["hass"]
try:
intent_result = await _async_converse(
hass, data["text"], data.get("conversation_id"), self.context(request)
)
except intent.IntentError as err:
_LOGGER.error("Error handling intent: %s", err)
return self.json(
{
"success": False,
"error": {
"code": str(err.__class__.__name__).lower(),
"message": str(err),
},
},
status_code=HTTP_INTERNAL_SERVER_ERROR,
)
return self.json(intent_result)
async def _get_agent(hass: core.HomeAssistant) -> AbstractConversationAgent:
"""Get the active conversation agent."""
agent = hass.data.get(DATA_AGENT)
if agent is None:
agent = hass.data[DATA_AGENT] = DefaultAgent(hass)
await agent.async_initialize(hass.data.get(DATA_CONFIG))
return agent
async def _async_converse(
hass: core.HomeAssistant, text: str, conversation_id: str, context: core.Context
) -> intent.IntentResponse:
"""Process text and get intent."""
agent = await _get_agent(hass)
try:
intent_result = await agent.async_process(text, context, conversation_id)
except intent.IntentHandleError as err:
intent_result = intent.IntentResponse()
intent_result.async_set_speech(str(err))
if intent_result is None:
intent_result = intent.IntentResponse()
intent_result.async_set_speech("Sorry, I didn't understand that")
return intent_result
|
import inspect
from flask.views import MethodView
import flasgger
try:
from marshmallow import Schema, fields
from apispec.ext.marshmallow import openapi
from apispec import APISpec as BaseAPISpec
# Note that openapi_converter is initialized with trivial
# schema_name_resolver. Resolving circular reference is not
# supported for now. See issue #314 .
# Also see: https://github.com/marshmallow-code/apispec/pull/447
openapi_converter = openapi.OpenAPIConverter(
openapi_version='2.0',
schema_name_resolver=lambda schema: None,
spec=None
)
schema2jsonschema = openapi_converter.schema2jsonschema
schema2parameters = openapi_converter.schema2parameters
except ImportError:
Schema = None
fields = None
schema2jsonschema = lambda schema: {} # noqa
schema2parameters = lambda schema: [] # noqa
BaseAPISpec = object
class APISpec(BaseAPISpec):
"""
Wrapper around APISpec to add `to_flasgger` method
"""
def to_flasgger(self, app=None, definitions=None, paths=None):
"""
Converts APISpec dict to flasgger suitable dict
also adds definitions and paths (optional)
"""
if Schema is None:
raise RuntimeError('Please install marshmallow and apispec')
return flasgger.utils.apispec_to_template(
app,
self,
definitions=definitions,
paths=paths
)
class SwaggerView(MethodView):
"""
A Swagger view
"""
parameters = []
responses = {}
definitions = {}
tags = []
consumes = ['application/json']
produces = ['application/json']
schemes = []
security = []
deprecated = False
operationId = None
externalDocs = {}
summary = None
description = None
validation = False
validation_function = None
validation_error_handler = None
def dispatch_request(self, *args, **kwargs):
"""
If validation=True perform validation
"""
if self.validation:
specs = {}
attrs = flasgger.constants.OPTIONAL_FIELDS + [
'parameters', 'definitions', 'responses',
'summary', 'description'
]
for attr in attrs:
specs[attr] = getattr(self, attr)
definitions = {}
specs.update(convert_schemas(specs, definitions))
specs['definitions'] = definitions
flasgger.utils.validate(
specs=specs, validation_function=self.validation_function,
validation_error_handler=self.validation_error_handler
)
return super(SwaggerView, self).dispatch_request(*args, **kwargs)
def convert_schemas(d, definitions=None):
"""
Convert Marshmallow schemas to dict definitions
Also updates the optional definitions argument with any definitions
entries contained within the schema.
"""
if definitions is None:
definitions = {}
definitions.update(d.get('definitions', {}))
new = {}
for k, v in d.items():
if isinstance(v, dict):
v = convert_schemas(v, definitions)
if isinstance(v, (list, tuple)):
new_v = []
for item in v:
if isinstance(item, dict):
new_v.append(convert_schemas(item, definitions))
else:
new_v.append(item)
v = new_v
if inspect.isclass(v) and issubclass(v, Schema):
if Schema is None:
raise RuntimeError('Please install marshmallow and apispec')
definitions[v.__name__] = schema2jsonschema(v)
ref = {
"$ref": "#/definitions/{0}".format(v.__name__)
}
if k == 'parameters':
new[k] = schema2parameters(v)
new[k][0]['schema'] = ref
else:
new[k] = ref
else:
new[k] = v
# This key is not permitted anywhere except the very top level.
if 'definitions' in new:
del new['definitions']
return new
|
from homeassistant.auth import models, permissions
def test_owner_fetching_owner_permissions():
"""Test we fetch the owner permissions for an owner user."""
group = models.Group(name="Test Group", policy={})
owner = models.User(
name="Test User", perm_lookup=None, groups=[group], is_owner=True
)
assert owner.permissions is permissions.OwnerPermissions
def test_permissions_merged():
"""Test we merge the groups permissions."""
group = models.Group(
name="Test Group", policy={"entities": {"domains": {"switch": True}}}
)
group2 = models.Group(
name="Test Group", policy={"entities": {"entity_ids": {"light.kitchen": True}}}
)
user = models.User(name="Test User", perm_lookup=None, groups=[group, group2])
# Make sure we cache instance
assert user.permissions is user.permissions
assert user.permissions.check_entity("switch.bla", "read") is True
assert user.permissions.check_entity("light.kitchen", "read") is True
assert user.permissions.check_entity("light.not_kitchen", "read") is False
|
from .permissions import Permissions
async def setup(bot):
cog = Permissions(bot)
await cog.initialize()
# We should add the rules for the Permissions cog and its own commands *before* adding the cog.
# The actual listeners ought to skip the ones we're passing here.
await cog._on_cog_add(cog)
for command in cog.__cog_commands__:
await cog._on_command_add(command)
bot.add_cog(cog)
|
import asyncio
import json
import logging
from typing import Any, Callable, List, Optional
from aiohttp import web
from aiohttp.typedefs import LooseHeaders
from aiohttp.web_exceptions import (
HTTPBadRequest,
HTTPInternalServerError,
HTTPUnauthorized,
)
import voluptuous as vol
from homeassistant import exceptions
from homeassistant.const import CONTENT_TYPE_JSON, HTTP_OK, HTTP_SERVICE_UNAVAILABLE
from homeassistant.core import Context, is_callback
from homeassistant.helpers.json import JSONEncoder
from .const import KEY_AUTHENTICATED, KEY_HASS
_LOGGER = logging.getLogger(__name__)
class HomeAssistantView:
"""Base view for all views."""
url: Optional[str] = None
extra_urls: List[str] = []
# Views inheriting from this class can override this
requires_auth = True
cors_allowed = False
@staticmethod
def context(request: web.Request) -> Context:
"""Generate a context from a request."""
user = request.get("hass_user")
if user is None:
return Context()
return Context(user_id=user.id)
@staticmethod
def json(
result: Any,
status_code: int = HTTP_OK,
headers: Optional[LooseHeaders] = None,
) -> web.Response:
"""Return a JSON response."""
try:
msg = json.dumps(result, cls=JSONEncoder, allow_nan=False).encode("UTF-8")
except (ValueError, TypeError) as err:
_LOGGER.error("Unable to serialize to JSON: %s\n%s", err, result)
raise HTTPInternalServerError from err
response = web.Response(
body=msg,
content_type=CONTENT_TYPE_JSON,
status=status_code,
headers=headers,
)
response.enable_compression()
return response
def json_message(
self,
message: str,
status_code: int = HTTP_OK,
message_code: Optional[str] = None,
headers: Optional[LooseHeaders] = None,
) -> web.Response:
"""Return a JSON message response."""
data = {"message": message}
if message_code is not None:
data["code"] = message_code
return self.json(data, status_code, headers=headers)
def register(self, app: web.Application, router: web.UrlDispatcher) -> None:
"""Register the view with a router."""
assert self.url is not None, "No url set for view"
urls = [self.url] + self.extra_urls
routes = []
for method in ("get", "post", "delete", "put", "patch", "head", "options"):
handler = getattr(self, method, None)
if not handler:
continue
handler = request_handler_factory(self, handler)
for url in urls:
routes.append(router.add_route(method, url, handler))
if not self.cors_allowed:
return
for route in routes:
app["allow_cors"](route)
def request_handler_factory(view: HomeAssistantView, handler: Callable) -> Callable:
"""Wrap the handler classes."""
assert asyncio.iscoroutinefunction(handler) or is_callback(
handler
), "Handler should be a coroutine or a callback."
async def handle(request: web.Request) -> web.StreamResponse:
"""Handle incoming request."""
if request.app[KEY_HASS].is_stopping:
return web.Response(status=HTTP_SERVICE_UNAVAILABLE)
authenticated = request.get(KEY_AUTHENTICATED, False)
if view.requires_auth and not authenticated:
raise HTTPUnauthorized()
_LOGGER.debug(
"Serving %s to %s (auth: %s)",
request.path,
request.remote,
authenticated,
)
try:
result = handler(request, **request.match_info)
if asyncio.iscoroutine(result):
result = await result
except vol.Invalid as err:
raise HTTPBadRequest() from err
except exceptions.ServiceNotFound as err:
raise HTTPInternalServerError() from err
except exceptions.Unauthorized as err:
raise HTTPUnauthorized() from err
if isinstance(result, web.StreamResponse):
# The method handler returned a ready-made Response, how nice of it
return result
status_code = HTTP_OK
if isinstance(result, tuple):
result, status_code = result
if isinstance(result, bytes):
bresult = result
elif isinstance(result, str):
bresult = result.encode("utf-8")
elif result is None:
bresult = b""
else:
assert (
False
), f"Result should be None, string, bytes or Response. Got: {result}"
return web.Response(body=bresult, status=status_code)
return handle
|
import unittest
import numpy as np
import chainer
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.faster_rcnn import ProposalCreator
from chainercv.utils import generate_random_bbox
@testing.parameterize(
{'train': True},
{'train': False},
)
class TestProposalCreator(unittest.TestCase):
img_size = (320, 240)
n_anchor_base = 9
n_train_post_nms = 350
n_test_post_nms = 300
def setUp(self):
feat_size = (self.img_size[0] // 16, self.img_size[1] // 16)
n_anchor = np.int32(self.n_anchor_base * np.prod(feat_size))
self.score = np.random.uniform(
low=0, high=1, size=(n_anchor,)).astype(np.float32)
self.bbox_d = np.random.uniform(
low=-1, high=1., size=(n_anchor, 4)).astype(np.float32)
self.anchor = generate_random_bbox(n_anchor, self.img_size, 16, 200)
self.proposal_creator = ProposalCreator(
n_train_post_nms=self.n_train_post_nms,
n_test_post_nms=self.n_test_post_nms,
min_size=0)
def check_proposal_creator(
self, proposal_creator,
bbox_d, score, anchor, img_size,
scale=1.):
with chainer.using_config('train', self.train):
roi = self.proposal_creator(bbox_d, score, anchor, img_size, scale)
if self.train:
out_length = self.n_train_post_nms
else:
out_length = self.n_test_post_nms
self.assertIsInstance(roi, type(bbox_d))
self.assertEqual(roi.shape, (out_length, 4))
def test_proposal_creator_cpu(self):
self.check_proposal_creator(
self.proposal_creator,
self.bbox_d,
self.score,
self.anchor, self.img_size, scale=1.)
@attr.gpu
def test_proposal_creator_gpu(self):
self.check_proposal_creator(
self.proposal_creator,
cuda.to_gpu(self.bbox_d),
cuda.to_gpu(self.score),
cuda.to_gpu(self.anchor), self.img_size,
scale=1.)
testing.run_module(__name__, __file__)
|
import logging
import time
from abc import ABC
from abc import abstractmethod
from typing import Any
from typing import Callable
from typing import Dict
from typing import Optional
from typing import Type
from typing import Union
from typing_extensions import Protocol
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
try:
import yelp_meteorite
except ImportError:
yelp_meteorite = None
_metrics_interfaces: Dict[str, Type["BaseMetrics"]] = {}
class TimerProtocol(Protocol):
def start(self) -> None:
raise NotImplementedError()
def stop(self) -> None:
raise NotImplementedError()
def record(self, value: float) -> None:
raise NotImplementedError()
class GaugeProtocol(Protocol):
def set(self, value: Union[int, float]) -> None:
raise NotImplementedError()
class CounterProtocol(Protocol):
def count(self) -> None:
raise NotImplementedError()
class BaseMetrics(ABC):
def __init__(self, base_name: str) -> None:
self.base_name = base_name
@abstractmethod
def create_timer(self, name: str, **kwargs: Any) -> TimerProtocol:
raise NotImplementedError()
@abstractmethod
def create_gauge(self, name: str, **kwargs: Any) -> GaugeProtocol:
raise NotImplementedError()
@abstractmethod
def create_counter(self, name: str, **kwargs: Any) -> CounterProtocol:
raise NotImplementedError()
def get_metrics_interface(base_name: str) -> BaseMetrics:
metrics_provider = load_system_paasta_config().get_metrics_provider()
return _metrics_interfaces[metrics_provider](base_name)
def register_metrics_interface(
name: Optional[str],
) -> Callable[[Type[BaseMetrics]], Type[BaseMetrics]]:
def outer(func: Type[BaseMetrics]) -> Type[BaseMetrics]:
_metrics_interfaces[name] = func
return func
return outer
@register_metrics_interface("meteorite")
class MeteoriteMetrics(BaseMetrics):
def __init__(self, base_name: str) -> None:
self.base_name = base_name
if yelp_meteorite is None:
raise ImportError(
"yelp_meteorite not imported, please try another metrics provider"
)
def create_timer(self, name: str, **kwargs: Any) -> TimerProtocol:
return yelp_meteorite.create_timer(self.base_name + "." + name, kwargs)
def create_gauge(self, name: str, **kwargs: Any) -> GaugeProtocol:
return yelp_meteorite.create_gauge(self.base_name + "." + name, kwargs)
def create_counter(self, name: str, **kwargs: Any) -> CounterProtocol:
return yelp_meteorite.create_counter(self.base_name + "." + name, kwargs)
class Timer(TimerProtocol):
def __init__(self, name: str) -> None:
self.name = name
def start(self) -> None:
log.debug("timer {} start at {}".format(self.name, time.time()))
def stop(self) -> None:
log.debug("timer {} stop at {}".format(self.name, time.time()))
def record(self, value: float) -> None:
log.debug(f"timer {self.name} record value {value}")
class Gauge(GaugeProtocol):
def __init__(self, name: str) -> None:
self.name = name
def set(self, value: Union[int, float]) -> None:
log.debug(f"gauge {self.name} set to {value}")
class Counter(GaugeProtocol):
def __init__(self, name: str) -> None:
self.name = name
self.counter = 0
def count(self) -> None:
self.counter += 1
log.debug(f"counter {self.name} incremented to {self.counter}")
@register_metrics_interface(None)
class NoMetrics(BaseMetrics):
def __init__(self, base_name: str) -> None:
self.base_name = base_name
def create_timer(self, name: str, **kwargs: Any) -> Timer:
return Timer(self.base_name + "." + name)
def create_gauge(self, name: str, **kwargs: Any) -> Gauge:
return Gauge(self.base_name + "." + name)
def create_counter(self, name: str, **kwargs: Any) -> Counter:
return Counter(self.base_name + "." + name)
|
from datetime import timedelta
# pylint: disable=import-error, no-member
import switchmate
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import CONF_MAC, CONF_NAME
import homeassistant.helpers.config_validation as cv
CONF_FLIP_ON_OFF = "flip_on_off"
DEFAULT_NAME = "Switchmate"
SCAN_INTERVAL = timedelta(minutes=30)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_FLIP_ON_OFF, default=False): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None) -> None:
"""Perform the setup for Switchmate devices."""
name = config.get(CONF_NAME)
mac_addr = config[CONF_MAC]
flip_on_off = config[CONF_FLIP_ON_OFF]
add_entities([SwitchmateEntity(mac_addr, name, flip_on_off)], True)
class SwitchmateEntity(SwitchEntity):
"""Representation of a Switchmate."""
def __init__(self, mac, name, flip_on_off) -> None:
"""Initialize the Switchmate."""
self._mac = mac
self._name = name
self._device = switchmate.Switchmate(mac=mac, flip_on_off=flip_on_off)
@property
def unique_id(self) -> str:
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self._mac.replace(":", "")
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._device.available
@property
def name(self) -> str:
"""Return the name of the switch."""
return self._name
def update(self) -> None:
"""Synchronize state with switch."""
self._device.update()
@property
def is_on(self) -> bool:
"""Return true if it is on."""
return self._device.state
def turn_on(self, **kwargs) -> None:
"""Turn the switch on."""
self._device.turn_on()
def turn_off(self, **kwargs) -> None:
"""Turn the switch off."""
self._device.turn_off()
|
import numpy as np
import unittest
from chainer import testing
from chainercv.visualizations import vis_instance_segmentation
try:
import matplotlib # NOQA
_available = True
except ImportError:
_available = False
@testing.parameterize(
*testing.product_dict([
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': None,
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': None,
'label_names': None},
{
'n_bbox': 3, 'label': (0, 1, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 0, 'label': (), 'score': (),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'),
'instance_colors': [
(255, 0, 0), (0, 255, 0), (0, 0, 255), (100, 100, 100)]},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'), 'no_img': False},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisInstanceSegmentation(unittest.TestCase):
def setUp(self):
if hasattr(self, 'no_img'):
self.img = None
else:
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.mask = np.random.randint(
0, 2, size=(self.n_bbox, 32, 48), dtype=bool)
if self.label is not None:
self.label = np.array(self.label, dtype=np.int32)
if self.score is not None:
self.score = np.array(self.score)
if not hasattr(self, 'instance_colors'):
self.instance_colors = None
def test_vis_instance_segmentation(self):
ax = vis_instance_segmentation(
self.img, self.mask, self.label, self.score,
label_names=self.label_names,
instance_colors=self.instance_colors,
sort_by_score=self.sort_by_score)
self.assertIsInstance(ax, matplotlib.axes.Axes)
@testing.parameterize(*testing.product_dict([
{
'n_bbox': 3, 'label': (0, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1, 0.75),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 3), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (-1, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisInstanceSegmentationInvalidInputs(unittest.TestCase):
def setUp(self):
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.mask = np.random.randint(
0, 2, size=(self.n_bbox, 32, 48), dtype=bool)
if self.label is not None:
self.label = np.array(self.label, dtype=int)
if self.score is not None:
self.score = np.array(self.score)
def test_vis_instance_segmentation_invalid_inputs(self):
with self.assertRaises(ValueError):
vis_instance_segmentation(
self.img, self.mask, self.label, self.score,
label_names=self.label_names, sort_by_score=self.sort_by_score)
testing.run_module(__name__, __file__)
|
from homeassistant.components.proximity import DOMAIN
from homeassistant.setup import async_setup_component
async def test_proximities(hass):
"""Test a list of proximities."""
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"tolerance": "1",
},
"work": {"devices": ["device_tracker.test1"], "tolerance": "1"},
}
}
assert await async_setup_component(hass, DOMAIN, config)
proximities = ["home", "work"]
for prox in proximities:
state = hass.states.get(f"proximity.{prox}")
assert state.state == "not set"
assert state.attributes.get("nearest") == "not set"
assert state.attributes.get("dir_of_travel") == "not set"
hass.states.async_set(f"proximity.{prox}", "0")
await hass.async_block_till_done()
state = hass.states.get(f"proximity.{prox}")
assert state.state == "0"
async def test_proximities_setup(hass):
"""Test a list of proximities with missing devices."""
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"tolerance": "1",
},
"work": {"tolerance": "1"},
}
}
assert await async_setup_component(hass, DOMAIN, config)
async def test_proximity(hass):
"""Test the proximity."""
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"tolerance": "1",
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
state = hass.states.get("proximity.home")
assert state.state == "not set"
assert state.attributes.get("nearest") == "not set"
assert state.attributes.get("dir_of_travel") == "not set"
hass.states.async_set("proximity.home", "0")
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.state == "0"
async def test_device_tracker_test1_in_zone(hass):
"""Test for tracker in zone."""
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1"],
"tolerance": "1",
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set(
"device_tracker.test1",
"home",
{"friendly_name": "test1", "latitude": 2.1, "longitude": 1.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.state == "0"
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "arrived"
async def test_device_trackers_in_zone(hass):
"""Test for trackers in zone."""
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"tolerance": "1",
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set(
"device_tracker.test1",
"home",
{"friendly_name": "test1", "latitude": 2.1, "longitude": 1.1},
)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test2",
"home",
{"friendly_name": "test2", "latitude": 2.1, "longitude": 1.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.state == "0"
assert (state.attributes.get("nearest") == "test1, test2") or (
state.attributes.get("nearest") == "test2, test1"
)
assert state.attributes.get("dir_of_travel") == "arrived"
async def test_device_tracker_test1_away(hass):
"""Test for tracker state away."""
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1"],
"tolerance": "1",
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 20.1, "longitude": 10.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
async def test_device_tracker_test1_awayfurther(hass):
"""Test for tracker state away further."""
config_zones(hass)
await hass.async_block_till_done()
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1"],
"tolerance": "1",
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 20.1, "longitude": 10.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 40.1, "longitude": 20.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "away_from"
async def test_device_tracker_test1_awaycloser(hass):
"""Test for tracker state away closer."""
config_zones(hass)
await hass.async_block_till_done()
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1"],
"tolerance": "1",
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 40.1, "longitude": 20.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 20.1, "longitude": 10.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "towards"
async def test_all_device_trackers_in_ignored_zone(hass):
"""Test for tracker in ignored zone."""
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1"],
"tolerance": "1",
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set("device_tracker.test1", "work", {"friendly_name": "test1"})
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.state == "not set"
assert state.attributes.get("nearest") == "not set"
assert state.attributes.get("dir_of_travel") == "not set"
async def test_device_tracker_test1_no_coordinates(hass):
"""Test for tracker with no coordinates."""
config = {
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1"],
"tolerance": "1",
}
}
}
assert await async_setup_component(hass, DOMAIN, config)
hass.states.async_set(
"device_tracker.test1", "not_home", {"friendly_name": "test1"}
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "not set"
assert state.attributes.get("dir_of_travel") == "not set"
async def test_device_tracker_test1_awayfurther_than_test2_first_test1(hass):
"""Test for tracker ordering."""
config_zones(hass)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test1", "not_home", {"friendly_name": "test1"}
)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test2", "not_home", {"friendly_name": "test2"}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
DOMAIN,
{
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"tolerance": "1",
"zone": "home",
}
}
},
)
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 20.1, "longitude": 10.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
hass.states.async_set(
"device_tracker.test2",
"not_home",
{"friendly_name": "test2", "latitude": 40.1, "longitude": 20.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
async def test_device_tracker_test1_awayfurther_than_test2_first_test2(hass):
"""Test for tracker ordering."""
config_zones(hass)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test1", "not_home", {"friendly_name": "test1"}
)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test2", "not_home", {"friendly_name": "test2"}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
DOMAIN,
{
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"zone": "home",
}
}
},
)
hass.states.async_set(
"device_tracker.test2",
"not_home",
{"friendly_name": "test2", "latitude": 40.1, "longitude": 20.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test2"
assert state.attributes.get("dir_of_travel") == "unknown"
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 20.1, "longitude": 10.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
async def test_device_tracker_test1_awayfurther_test2_in_ignored_zone(hass):
"""Test for tracker states."""
hass.states.async_set(
"device_tracker.test1", "not_home", {"friendly_name": "test1"}
)
await hass.async_block_till_done()
hass.states.async_set("device_tracker.test2", "work", {"friendly_name": "test2"})
await hass.async_block_till_done()
assert await async_setup_component(
hass,
DOMAIN,
{
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"zone": "home",
}
}
},
)
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 20.1, "longitude": 10.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
async def test_device_tracker_test1_awayfurther_test2_first(hass):
"""Test for tracker state."""
config_zones(hass)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test1", "not_home", {"friendly_name": "test1"}
)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test2", "not_home", {"friendly_name": "test2"}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
DOMAIN,
{
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"zone": "home",
}
}
},
)
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 10.1, "longitude": 5.1},
)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test2",
"not_home",
{"friendly_name": "test2", "latitude": 20.1, "longitude": 10.1},
)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 40.1, "longitude": 20.1},
)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 35.1, "longitude": 15.1},
)
await hass.async_block_till_done()
hass.states.async_set("device_tracker.test1", "work", {"friendly_name": "test1"})
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test2"
assert state.attributes.get("dir_of_travel") == "unknown"
async def test_device_tracker_test1_awayfurther_a_bit(hass):
"""Test for tracker states."""
assert await async_setup_component(
hass,
DOMAIN,
{
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1"],
"tolerance": 1000,
"zone": "home",
}
}
},
)
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 20.1000001, "longitude": 10.1000001},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 20.1000002, "longitude": 10.1000002},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "stationary"
async def test_device_tracker_test1_nearest_after_test2_in_ignored_zone(hass):
"""Test for tracker states."""
config_zones(hass)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test1", "not_home", {"friendly_name": "test1"}
)
await hass.async_block_till_done()
hass.states.async_set(
"device_tracker.test2", "not_home", {"friendly_name": "test2"}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
DOMAIN,
{
"proximity": {
"home": {
"ignored_zones": ["work"],
"devices": ["device_tracker.test1", "device_tracker.test2"],
"zone": "home",
}
}
},
)
hass.states.async_set(
"device_tracker.test1",
"not_home",
{"friendly_name": "test1", "latitude": 20.1, "longitude": 10.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
hass.states.async_set(
"device_tracker.test2",
"not_home",
{"friendly_name": "test2", "latitude": 10.1, "longitude": 5.1},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test2"
assert state.attributes.get("dir_of_travel") == "unknown"
hass.states.async_set(
"device_tracker.test2",
"work",
{"friendly_name": "test2", "latitude": 12.6, "longitude": 7.6},
)
await hass.async_block_till_done()
state = hass.states.get("proximity.home")
assert state.attributes.get("nearest") == "test1"
assert state.attributes.get("dir_of_travel") == "unknown"
def config_zones(hass):
"""Set up zones for test."""
hass.config.components.add("zone")
hass.states.async_set(
"zone.home",
"zoning",
{"name": "home", "latitude": 2.1, "longitude": 1.1, "radius": 10},
)
hass.states.async_set(
"zone.work",
"zoning",
{"name": "work", "latitude": 2.3, "longitude": 1.3, "radius": 10},
)
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import MagicMock, Mock
from mock import patch
from diamond.collector import Collector
from mountstats import MountStatsCollector
class TestMountStatsCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MountStatsCollector', {
'exclude_filters': ['^/mnt/path2'],
'interval': 1
})
self.collector = MountStatsCollector(config, None)
def test_import(self):
self.assertTrue(MountStatsCollector)
@patch('__builtin__.open')
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_open_mountstats(self, publish_mock, open_mock):
open_mock.return_value = MagicMock()
self.collector.collect()
open_mock.assert_called_once_with(self.collector.MOUNTSTATS)
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock):
# Test the first and last metric of each type
published_metrics = {
'_mnt_path1.events.inoderevalidates': 27110.0,
'_mnt_path1.events.delay': 0.0,
'_mnt_path1.bytes.normalreadbytes': 1424269.0,
'_mnt_path1.bytes.serverwritebytes': 69460.0,
'_mnt_path1.xprt.tcp.port': 0.0,
'_mnt_path1.xprt.tcp.backlogutil': 11896527.0,
'_mnt_path1.rpc.access.ops': 2988.0,
'_mnt_path1.rpc.write.ops': 16.0
}
unpublished_metrics = {
'_mnt_path2.events.delay': 0.0
}
self.collector.MOUNTSTATS = self.getFixturePath('mountstats_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
self.collector.MOUNTSTATS = self.getFixturePath('mountstats_2')
self.collector.collect()
self.assertPublishedMany(publish_mock, published_metrics)
self.assertUnpublishedMany(publish_mock, unpublished_metrics)
@patch.object(Collector, 'publish')
def test_include_filter(self, publish_mock):
config = get_collector_config('MountStatsCollector', {
'include_filters': ['^/mnt/path2'],
'interval': 1
})
self.collector = MountStatsCollector(config, None)
# Test the first and last metric of each type
published_metrics = {
'_mnt_path2.bytes.directwritebytes': 0.0,
'_mnt_path2.bytes.normalreadbytes': 1424269.0,
'_mnt_path2.bytes.normalwritebytes': 66589.0,
'_mnt_path2.bytes.serverreadbytes': 757.0,
'_mnt_path2.bytes.serverwritebytes': 69460.0,
'_mnt_path2.events.attrinvalidates': 144.0,
'_mnt_path2.events.datainvalidates': 23.0,
}
unpublished_metrics = {
'_mnt_path1.events.inoderevalidates': 27110.0,
}
self.collector.MOUNTSTATS = self.getFixturePath('mountstats_1')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
self.collector.MOUNTSTATS = self.getFixturePath('mountstats_2')
self.collector.collect()
self.assertPublishedMany(publish_mock, published_metrics)
self.assertUnpublishedMany(publish_mock, unpublished_metrics)
if __name__ == "__main__":
unittest.main()
|
import operator
import types
from _string import formatter_field_name_split
from collections import abc
from collections import deque
from string import Formatter
from markupsafe import EscapeFormatter
from markupsafe import Markup
from .environment import Environment
from .exceptions import SecurityError
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: Unsafe function attributes.
UNSAFE_FUNCTION_ATTRIBUTES = set()
#: Unsafe method attributes. Function attributes are unsafe for methods too.
UNSAFE_METHOD_ATTRIBUTES = set()
#: unsafe generator attributes.
UNSAFE_GENERATOR_ATTRIBUTES = {"gi_frame", "gi_code"}
#: unsafe attributes on coroutines
UNSAFE_COROUTINE_ATTRIBUTES = {"cr_frame", "cr_code"}
#: unsafe attributes on async generators
UNSAFE_ASYNC_GENERATOR_ATTRIBUTES = {"ag_code", "ag_frame"}
_mutable_spec = (
(
abc.MutableSet,
frozenset(
[
"add",
"clear",
"difference_update",
"discard",
"pop",
"remove",
"symmetric_difference_update",
"update",
]
),
),
(
abc.MutableMapping,
frozenset(["clear", "pop", "popitem", "setdefault", "update"]),
),
(
abc.MutableSequence,
frozenset(["append", "reverse", "insert", "sort", "extend", "remove"]),
),
(
deque,
frozenset(
[
"append",
"appendleft",
"clear",
"extend",
"extendleft",
"pop",
"popleft",
"remove",
"rotate",
]
),
),
)
class _MagicFormatMapping(abc.Mapping):
"""This class implements a dummy wrapper to fix a bug in the Python
standard library for string formatting.
See https://bugs.python.org/issue13598 for information about why
this is necessary.
"""
def __init__(self, args, kwargs):
self._args = args
self._kwargs = kwargs
self._last_index = 0
def __getitem__(self, key):
if key == "":
idx = self._last_index
self._last_index += 1
try:
return self._args[idx]
except LookupError:
pass
key = str(idx)
return self._kwargs[key]
def __iter__(self):
return iter(self._kwargs)
def __len__(self):
return len(self._kwargs)
def inspect_format_method(callable):
if not isinstance(
callable, (types.MethodType, types.BuiltinMethodType)
) or callable.__name__ not in ("format", "format_map"):
return None
obj = callable.__self__
if isinstance(obj, str):
return obj
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = range(*args)
if len(rng) > MAX_RANGE:
raise OverflowError(
"Range too big. The sandbox blocks ranges larger than"
f" MAX_RANGE ({MAX_RANGE})."
)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
.. code-block: python
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overridden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(str, "mro")
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, types.FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, types.MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == "mro":
return True
elif isinstance(obj, (types.CodeType, types.TracebackType, types.FrameType)):
return True
elif isinstance(obj, types.GeneratorType):
if attr in UNSAFE_GENERATOR_ATTRIBUTES:
return True
elif hasattr(types, "CoroutineType") and isinstance(obj, types.CoroutineType):
if attr in UNSAFE_COROUTINE_ATTRIBUTES:
return True
elif hasattr(types, "AsyncGeneratorType") and isinstance(
obj, types.AsyncGeneratorType
):
if attr in UNSAFE_ASYNC_GENERATOR_ATTRIBUTES:
return True
return attr.startswith("__")
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) or the corresponding ABCs would modify it
if called.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object, ``False`` is returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occur during the rendering so
the caller has to ensure that all exceptions are caught.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
"+": operator.add,
"-": operator.sub,
"*": operator.mul,
"/": operator.truediv,
"//": operator.floordiv,
"**": operator.pow,
"%": operator.mod,
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {"+": operator.pos, "-": operator.neg}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is executed for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals["range"] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith("_") or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (
getattr(obj, "unsafe_callable", False) or getattr(obj, "alters_data", False)
)
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, str):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined(
f"access to attribute {attribute!r} of"
f" {obj.__class__.__name__!r} object is unsafe.",
name=attribute,
obj=obj,
exc=SecurityError,
)
def format_string(self, s, args, kwargs, format_func=None):
"""If a format call is detected, then this is routed through this
method so that our safety sandbox can be used for it.
"""
if isinstance(s, Markup):
formatter = SandboxedEscapeFormatter(self, s.escape)
else:
formatter = SandboxedFormatter(self)
if format_func is not None and format_func.__name__ == "format_map":
if len(args) != 1 or kwargs:
raise TypeError(
"format_map() takes exactly one argument"
f" {len(args) + (kwargs is not None)} given"
)
kwargs = args[0]
args = None
kwargs = _MagicFormatMapping(args, kwargs)
rv = formatter.vformat(s, args, kwargs)
return type(s)(rv)
def call(__self, __context, __obj, *args, **kwargs): # noqa: B902
"""Call an object from sandboxed code."""
fmt = inspect_format_method(__obj)
if fmt is not None:
return __self.format_string(fmt, args, kwargs, __obj)
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError(f"{__obj!r} is not safely callable")
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
class SandboxedFormatterMixin:
def __init__(self, env):
self._env = env
def get_field(self, field_name, args, kwargs):
first, rest = formatter_field_name_split(field_name)
obj = self.get_value(first, args, kwargs)
for is_attr, i in rest:
if is_attr:
obj = self._env.getattr(obj, i)
else:
obj = self._env.getitem(obj, i)
return obj, first
class SandboxedFormatter(SandboxedFormatterMixin, Formatter):
def __init__(self, env):
SandboxedFormatterMixin.__init__(self, env)
Formatter.__init__(self)
class SandboxedEscapeFormatter(SandboxedFormatterMixin, EscapeFormatter):
def __init__(self, env, escape):
SandboxedFormatterMixin.__init__(self, env)
EscapeFormatter.__init__(self, escape)
|
import asyncio
import asyncio.subprocess # disables for # https://github.com/PyCQA/pylint/issues/1469
import itertools
import json
import logging
import pathlib
import platform
import re
import shutil
import sys
import tempfile
import time
from typing import ClassVar, Final, List, Optional, Pattern, Tuple
import aiohttp
from tqdm import tqdm
from redbot.core import data_manager
from redbot.core.i18n import Translator
from .errors import LavalinkDownloadFailed
from .utils import task_callback
_ = Translator("Audio", pathlib.Path(__file__))
log = logging.getLogger("red.audio.manager")
JAR_VERSION: Final[str] = "3.3.2.2"
JAR_BUILD: Final[int] = 1170
LAVALINK_DOWNLOAD_URL: Final[str] = (
"https://github.com/Cog-Creators/Lavalink-Jars/releases/download/"
f"{JAR_VERSION}_{JAR_BUILD}/"
"Lavalink.jar"
)
LAVALINK_DOWNLOAD_DIR: Final[pathlib.Path] = data_manager.cog_data_path(raw_name="Audio")
LAVALINK_JAR_FILE: Final[pathlib.Path] = LAVALINK_DOWNLOAD_DIR / "Lavalink.jar"
BUNDLED_APP_YML: Final[pathlib.Path] = pathlib.Path(__file__).parent / "data" / "application.yml"
LAVALINK_APP_YML: Final[pathlib.Path] = LAVALINK_DOWNLOAD_DIR / "application.yml"
_RE_READY_LINE: Final[Pattern] = re.compile(rb"Started Launcher in \S+ seconds")
_FAILED_TO_START: Final[Pattern] = re.compile(rb"Web server failed to start\. (.*)")
_RE_BUILD_LINE: Final[Pattern] = re.compile(rb"Build:\s+(?P<build>\d+)")
# Version regexes
#
# We expect the output to look something like:
# $ java -version
# ...
# ... version "VERSION STRING HERE" ...
# ...
#
# There are two version formats that we might get here:
#
# - Version scheme pre JEP 223 - used by Java 8 and older
#
# examples:
# 1.8.0
# 1.8.0_275
# 1.8.0_272-b10
# 1.8.0_202-internal-201903130451-b08
# 1.8.0_272-ea-202010231715-b10
# 1.8.0_272-ea-b10
#
# Implementation based on J2SE SDK/JRE Version String Naming Convention document:
# https://www.oracle.com/java/technologies/javase/versioning-naming.html
_RE_JAVA_VERSION_LINE_PRE223: Final[Pattern] = re.compile(
r'version "1\.(?P<major>[0-8])\.(?P<minor>0)(?:_(?:\d+))?(?:-.*)?"'
)
# - Version scheme introduced by JEP 223 - used by Java 9 and newer
#
# examples:
# 11
# 11.0.9
# 11.0.9.1
# 11.0.9-ea
# 11.0.9-202011050024
#
# Implementation based on JEP 223 document:
# https://openjdk.java.net/jeps/223
_RE_JAVA_VERSION_LINE_223: Final[Pattern] = re.compile(
r'version "(?P<major>\d+)(?:\.(?P<minor>\d+))?(?:\.\d+)*(\-[a-zA-Z0-9]+)?"'
)
LAVALINK_BRANCH_LINE: Final[Pattern] = re.compile(rb"Branch\s+(?P<branch>[\w\-\d_.]+)")
LAVALINK_JAVA_LINE: Final[Pattern] = re.compile(rb"JVM:\s+(?P<jvm>\d+[.\d+]*)")
LAVALINK_LAVAPLAYER_LINE: Final[Pattern] = re.compile(rb"Lavaplayer\s+(?P<lavaplayer>\d+[.\d+]*)")
LAVALINK_BUILD_TIME_LINE: Final[Pattern] = re.compile(rb"Build time:\s+(?P<build_time>\d+[.\d+]*)")
class ServerManager:
_java_available: ClassVar[Optional[bool]] = None
_java_version: ClassVar[Optional[Tuple[int, int]]] = None
_up_to_date: ClassVar[Optional[bool]] = None
_blacklisted_archs: List[str] = []
_lavaplayer: ClassVar[Optional[str]] = None
_lavalink_build: ClassVar[Optional[int]] = None
_jvm: ClassVar[Optional[str]] = None
_lavalink_branch: ClassVar[Optional[str]] = None
_buildtime: ClassVar[Optional[str]] = None
_java_exc: ClassVar[str] = "java"
def __init__(self) -> None:
self.ready: asyncio.Event = asyncio.Event()
self._proc: Optional[asyncio.subprocess.Process] = None # pylint:disable=no-member
self._monitor_task: Optional[asyncio.Task] = None
self._shutdown: bool = False
@property
def path(self) -> Optional[str]:
return self._java_exc
@property
def jvm(self) -> Optional[str]:
return self._jvm
@property
def lavaplayer(self) -> Optional[str]:
return self._lavaplayer
@property
def ll_build(self) -> Optional[int]:
return self._lavalink_build
@property
def ll_branch(self) -> Optional[str]:
return self._lavalink_branch
@property
def build_time(self) -> Optional[str]:
return self._buildtime
async def start(self, java_path: str) -> None:
arch_name = platform.machine()
self._java_exc = java_path
if arch_name in self._blacklisted_archs:
raise asyncio.CancelledError(
"You are attempting to run Lavalink audio on an unsupported machine architecture."
)
if self._proc is not None:
if self._proc.returncode is None:
raise RuntimeError("Internal Lavalink server is already running")
elif self._shutdown:
raise RuntimeError("Server manager has already been used - create another one")
await self.maybe_download_jar()
# Copy the application.yml across.
# For people to customise their Lavalink server configuration they need to run it
# externally
shutil.copyfile(BUNDLED_APP_YML, LAVALINK_APP_YML)
args = await self._get_jar_args()
self._proc = await asyncio.subprocess.create_subprocess_exec( # pylint:disable=no-member
*args,
cwd=str(LAVALINK_DOWNLOAD_DIR),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
)
log.info("Internal Lavalink server started. PID: %s", self._proc.pid)
try:
await asyncio.wait_for(self._wait_for_launcher(), timeout=120)
except asyncio.TimeoutError:
log.warning("Timeout occurred whilst waiting for internal Lavalink server to be ready")
self._monitor_task = asyncio.create_task(self._monitor())
self._monitor_task.add_done_callback(task_callback)
async def _get_jar_args(self) -> List[str]:
(java_available, java_version) = await self._has_java()
if not java_available:
raise RuntimeError("You must install Java 11 for Lavalink to run.")
return [
self._java_exc,
"-Djdk.tls.client.protocols=TLSv1.2",
"-jar",
str(LAVALINK_JAR_FILE),
]
async def _has_java(self) -> Tuple[bool, Optional[Tuple[int, int]]]:
if self._java_available is not None:
# Return cached value if we've checked this before
return self._java_available, self._java_version
java_exec = shutil.which(self._java_exc)
java_available = java_exec is not None
if not java_available:
self.java_available = False
self.java_version = None
else:
self._java_version = version = await self._get_java_version()
self._java_available = (11, 0) <= version < (12, 0)
self._java_exc = java_exec
return self._java_available, self._java_version
async def _get_java_version(self) -> Tuple[int, int]:
"""This assumes we've already checked that java exists."""
_proc: asyncio.subprocess.Process = (
await asyncio.create_subprocess_exec( # pylint:disable=no-member
self._java_exc,
"-version",
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
)
# java -version outputs to stderr
_, err = await _proc.communicate()
version_info: str = err.decode("utf-8")
lines = version_info.splitlines()
for line in lines:
match = _RE_JAVA_VERSION_LINE_PRE223.search(line)
if match is None:
match = _RE_JAVA_VERSION_LINE_223.search(line)
if match is None:
continue
major = int(match["major"])
minor = 0
if minor_str := match["minor"]:
minor = int(minor_str)
return major, minor
raise RuntimeError(f"The output of `{self._java_exc} -version` was unexpected.")
async def _wait_for_launcher(self) -> None:
log.debug("Waiting for Lavalink server to be ready")
lastmessage = 0
for i in itertools.cycle(range(50)):
line = await self._proc.stdout.readline()
if _RE_READY_LINE.search(line):
self.ready.set()
break
if _FAILED_TO_START.search(line):
raise RuntimeError(f"Lavalink failed to start: {line.decode().strip()}")
if self._proc.returncode is not None and lastmessage + 2 < time.time():
# Avoid Console spam only print once every 2 seconds
lastmessage = time.time()
log.critical("Internal lavalink server exited early")
if i == 49:
# Sleep after 50 lines to prevent busylooping
await asyncio.sleep(0.1)
async def _monitor(self) -> None:
while self._proc.returncode is None:
await asyncio.sleep(0.5)
# This task hasn't been cancelled - Lavalink was shut down by something else
log.info("Internal Lavalink jar shutdown unexpectedly")
if not self._has_java_error():
log.info("Restarting internal Lavalink server")
await self.start(self._java_exc)
else:
log.critical(
"Your Java is borked. Please find the hs_err_pid%d.log file"
" in the Audio data folder and report this issue.",
self._proc.pid,
)
def _has_java_error(self) -> bool:
poss_error_file = LAVALINK_DOWNLOAD_DIR / "hs_err_pid{}.log".format(self._proc.pid)
return poss_error_file.exists()
async def shutdown(self) -> None:
if self._shutdown is True or self._proc is None:
# For convenience, calling this method more than once or calling it before starting it
# does nothing.
return
log.info("Shutting down internal Lavalink server")
if self._monitor_task is not None:
self._monitor_task.cancel()
self._proc.terminate()
await self._proc.wait()
self._shutdown = True
async def _download_jar(self) -> None:
log.info("Downloading Lavalink.jar...")
async with aiohttp.ClientSession(json_serialize=json.dumps) as session:
async with session.get(LAVALINK_DOWNLOAD_URL) as response:
if response.status == 404:
# A 404 means our LAVALINK_DOWNLOAD_URL is invalid, so likely the jar version
# hasn't been published yet
raise LavalinkDownloadFailed(
f"Lavalink jar version {JAR_VERSION}_{JAR_BUILD} hasn't been published "
f"yet",
response=response,
should_retry=False,
)
elif 400 <= response.status < 600:
# Other bad responses should be raised but we should retry just incase
raise LavalinkDownloadFailed(response=response, should_retry=True)
fd, path = tempfile.mkstemp()
file = open(fd, "wb")
nbytes = 0
with tqdm(
desc="Lavalink.jar",
total=response.content_length,
file=sys.stdout,
unit="B",
unit_scale=True,
miniters=1,
dynamic_ncols=True,
leave=False,
) as progress_bar:
try:
chunk = await response.content.read(1024)
while chunk:
chunk_size = file.write(chunk)
nbytes += chunk_size
progress_bar.update(chunk_size)
chunk = await response.content.read(1024)
file.flush()
finally:
file.close()
shutil.move(path, str(LAVALINK_JAR_FILE), copy_function=shutil.copyfile)
log.info("Successfully downloaded Lavalink.jar (%s bytes written)", format(nbytes, ","))
await self._is_up_to_date()
async def _is_up_to_date(self):
if self._up_to_date is True:
# Return cached value if we've checked this before
return True
args = await self._get_jar_args()
args.append("--version")
_proc = await asyncio.subprocess.create_subprocess_exec( # pylint:disable=no-member
*args,
cwd=str(LAVALINK_DOWNLOAD_DIR),
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.STDOUT,
)
stdout = (await _proc.communicate())[0]
if (build := _RE_BUILD_LINE.search(stdout)) is None:
# Output is unexpected, suspect corrupted jarfile
return False
if (branch := LAVALINK_BRANCH_LINE.search(stdout)) is None:
# Output is unexpected, suspect corrupted jarfile
return False
if (java := LAVALINK_JAVA_LINE.search(stdout)) is None:
# Output is unexpected, suspect corrupted jarfile
return False
if (lavaplayer := LAVALINK_LAVAPLAYER_LINE.search(stdout)) is None:
# Output is unexpected, suspect corrupted jarfile
return False
if (buildtime := LAVALINK_BUILD_TIME_LINE.search(stdout)) is None:
# Output is unexpected, suspect corrupted jarfile
return False
build = int(build["build"])
date = buildtime["build_time"].decode()
date = date.replace(".", "/")
self._lavalink_build = build
self._lavalink_branch = branch["branch"].decode()
self._jvm = java["jvm"].decode()
self._lavaplayer = lavaplayer["lavaplayer"].decode()
self._buildtime = date
self._up_to_date = build >= JAR_BUILD
return self._up_to_date
async def maybe_download_jar(self):
if not (LAVALINK_JAR_FILE.exists() and await self._is_up_to_date()):
await self._download_jar()
|
import json
import logging
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.components.notify import PLATFORM_SCHEMA, BaseNotificationService
from homeassistant.const import (
CONF_API_KEY,
CONF_RECIPIENT,
CONF_SENDER,
CONF_USERNAME,
CONTENT_TYPE_JSON,
HTTP_OK,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
BASE_API_URL = "https://rest.clicksend.com/v3"
DEFAULT_SENDER = "hass"
TIMEOUT = 5
HEADERS = {CONTENT_TYPE: CONTENT_TYPE_JSON}
PLATFORM_SCHEMA = vol.Schema(
vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_RECIPIENT, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_SENDER, default=DEFAULT_SENDER): cv.string,
}
)
)
)
def get_service(hass, config, discovery_info=None):
"""Get the ClickSend notification service."""
if not _authenticate(config):
_LOGGER.error("You are not authorized to access ClickSend")
return None
return ClicksendNotificationService(config)
class ClicksendNotificationService(BaseNotificationService):
"""Implementation of a notification service for the ClickSend service."""
def __init__(self, config):
"""Initialize the service."""
self.username = config[CONF_USERNAME]
self.api_key = config[CONF_API_KEY]
self.recipients = config[CONF_RECIPIENT]
self.sender = config[CONF_SENDER]
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
data = {"messages": []}
for recipient in self.recipients:
data["messages"].append(
{
"source": "hass.notify",
"from": self.sender,
"to": recipient,
"body": message,
}
)
api_url = f"{BASE_API_URL}/sms/send"
resp = requests.post(
api_url,
data=json.dumps(data),
headers=HEADERS,
auth=(self.username, self.api_key),
timeout=TIMEOUT,
)
if resp.status_code == HTTP_OK:
return
obj = json.loads(resp.text)
response_msg = obj.get("response_msg")
response_code = obj.get("response_code")
_LOGGER.error(
"Error %s : %s (Code %s)", resp.status_code, response_msg, response_code
)
def _authenticate(config):
"""Authenticate with ClickSend."""
api_url = f"{BASE_API_URL}/account"
resp = requests.get(
api_url,
headers=HEADERS,
auth=(config[CONF_USERNAME], config[CONF_API_KEY]),
timeout=TIMEOUT,
)
if resp.status_code != HTTP_OK:
return False
return True
|
import voluptuous as vol
from homeassistant.components.switch import SwitchEntity
from . import DOMAIN, PLATFORM_SCHEMA, XBeeDigitalOut, XBeeDigitalOutConfig
CONF_ON_STATE = "on_state"
DEFAULT_ON_STATE = "high"
STATES = ["high", "low"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Optional(CONF_ON_STATE): vol.In(STATES)})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the XBee Zigbee switch platform."""
zigbee_device = hass.data[DOMAIN]
add_entities([XBeeSwitch(XBeeDigitalOutConfig(config), zigbee_device)])
class XBeeSwitch(XBeeDigitalOut, SwitchEntity):
"""Representation of a XBee Zigbee Digital Out device."""
|
from homeassistant.components.asuswrt import (
CONF_DNSMASQ,
CONF_INTERFACE,
DATA_ASUSWRT,
DOMAIN,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
from homeassistant.setup import async_setup_component
from tests.async_mock import AsyncMock, patch
async def test_password_or_pub_key_required(hass):
"""Test creating an AsusWRT scanner without a pass or pubkey."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().is_connected = False
result = await async_setup_component(
hass, DOMAIN, {DOMAIN: {CONF_HOST: "fake_host", CONF_USERNAME: "fake_user"}}
)
assert not result
async def test_network_unreachable(hass):
"""Test creating an AsusWRT scanner without a pass or pubkey."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock(side_effect=OSError)
AsusWrt().is_connected = False
result = await async_setup_component(
hass, DOMAIN, {DOMAIN: {CONF_HOST: "fake_host", CONF_USERNAME: "fake_user"}}
)
assert result
assert hass.data.get(DATA_ASUSWRT) is None
async def test_get_scanner_with_password_no_pubkey(hass):
"""Test creating an AsusWRT scanner with a password and no pubkey."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().connection.async_get_connected_devices = AsyncMock(return_value={})
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "fake_host",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "4321",
CONF_DNSMASQ: "/",
}
},
)
assert result
assert hass.data[DATA_ASUSWRT] is not None
async def test_specify_non_directory_path_for_dnsmasq(hass):
"""Test creating an AsusWRT scanner with a dnsmasq location which is not a valid directory."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().is_connected = False
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "fake_host",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "4321",
CONF_DNSMASQ: 1234,
}
},
)
assert not result
async def test_interface(hass):
"""Test creating an AsusWRT scanner using interface eth1."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().connection.async_get_connected_devices = AsyncMock(return_value={})
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "fake_host",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "4321",
CONF_DNSMASQ: "/",
CONF_INTERFACE: "eth1",
}
},
)
assert result
assert hass.data[DATA_ASUSWRT] is not None
async def test_no_interface(hass):
"""Test creating an AsusWRT scanner using no interface."""
with patch("homeassistant.components.asuswrt.AsusWrt") as AsusWrt:
AsusWrt().connection.async_connect = AsyncMock()
AsusWrt().is_connected = False
result = await async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {
CONF_HOST: "fake_host",
CONF_USERNAME: "fake_user",
CONF_PASSWORD: "4321",
CONF_DNSMASQ: "/",
CONF_INTERFACE: None,
}
},
)
assert not result
|
import asyncio
import contextlib
import datetime
import functools
import json
import logging
import re
from pathlib import Path
from typing import Any, Final, Mapping, MutableMapping, Pattern, Union, cast
import discord
import lavalink
from discord.embeds import EmptyEmbed
from redbot.core import bank, commands
from redbot.core.commands import Context
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import humanize_number
from ...apis.playlist_interface import get_all_playlist_for_migration23
from ...utils import PlaylistScope, task_callback
from ..abc import MixinMeta
from ..cog_utils import CompositeMetaClass
log = logging.getLogger("red.cogs.Audio.cog.Utilities.miscellaneous")
_ = Translator("Audio", Path(__file__))
_RE_TIME_CONVERTER: Final[Pattern] = re.compile(r"(?:(\d+):)?([0-5]?[0-9]):([0-5][0-9])")
_prefer_lyrics_cache = {}
class MiscellaneousUtilities(MixinMeta, metaclass=CompositeMetaClass):
async def _clear_react(
self, message: discord.Message, emoji: MutableMapping = None
) -> asyncio.Task:
"""Non blocking version of clear_react."""
task = self.bot.loop.create_task(self.clear_react(message, emoji))
task.add_done_callback(task_callback)
return task
async def maybe_charge_requester(self, ctx: commands.Context, jukebox_price: int) -> bool:
jukebox = await self.config.guild(ctx.guild).jukebox()
if jukebox and not await self._can_instaskip(ctx, ctx.author):
can_spend = await bank.can_spend(ctx.author, jukebox_price)
if can_spend:
await bank.withdraw_credits(ctx.author, jukebox_price)
else:
credits_name = await bank.get_currency_name(ctx.guild)
bal = await bank.get_balance(ctx.author)
await self.send_embed_msg(
ctx,
title=_("Not enough {currency}").format(currency=credits_name),
description=_(
"{required_credits} {currency} required, but you have {bal}."
).format(
currency=credits_name,
required_credits=humanize_number(jukebox_price),
bal=humanize_number(bal),
),
)
return can_spend
else:
return True
async def send_embed_msg(
self, ctx: commands.Context, author: Mapping[str, str] = None, **kwargs
) -> discord.Message:
colour = kwargs.get("colour") or kwargs.get("color") or await self.bot.get_embed_color(ctx)
title = kwargs.get("title", EmptyEmbed) or EmptyEmbed
_type = kwargs.get("type", "rich") or "rich"
url = kwargs.get("url", EmptyEmbed) or EmptyEmbed
description = kwargs.get("description", EmptyEmbed) or EmptyEmbed
timestamp = kwargs.get("timestamp")
footer = kwargs.get("footer")
thumbnail = kwargs.get("thumbnail")
contents = dict(title=title, type=_type, url=url, description=description)
if hasattr(kwargs.get("embed"), "to_dict"):
embed = kwargs.get("embed")
if embed is not None:
embed = embed.to_dict()
else:
embed = {}
colour = embed.get("color") if embed.get("color") else colour
contents.update(embed)
if timestamp and isinstance(timestamp, datetime.datetime):
contents["timestamp"] = timestamp
embed = discord.Embed.from_dict(contents)
embed.color = colour
if footer:
embed.set_footer(text=footer)
if thumbnail:
embed.set_thumbnail(url=thumbnail)
if author:
name = author.get("name")
url = author.get("url")
if name and url:
embed.set_author(name=name, icon_url=url)
elif name:
embed.set_author(name=name)
return await ctx.send(embed=embed)
async def maybe_run_pending_db_tasks(self, ctx: commands.Context) -> None:
if self.api_interface is not None:
await self.api_interface.run_tasks(ctx)
async def _close_database(self) -> None:
if self.api_interface is not None:
await self.api_interface.run_all_pending_tasks()
self.api_interface.close()
async def _check_api_tokens(self) -> MutableMapping:
spotify = await self.bot.get_shared_api_tokens("spotify")
youtube = await self.bot.get_shared_api_tokens("youtube")
return {
"spotify_client_id": spotify.get("client_id", ""),
"spotify_client_secret": spotify.get("client_secret", ""),
"youtube_api": youtube.get("api_key", ""),
}
async def update_external_status(self) -> bool:
external = await self.config.use_external_lavalink()
if not external:
if self.player_manager is not None:
await self.player_manager.shutdown()
await self.config.use_external_lavalink.set(True)
return True
else:
return False
def rsetattr(self, obj, attr, val) -> None:
pre, _, post = attr.rpartition(".")
setattr(self.rgetattr(obj, pre) if pre else obj, post, val)
def rgetattr(self, obj, attr, *args) -> Any:
def _getattr(obj2, attr2):
return getattr(obj2, attr2, *args)
return functools.reduce(_getattr, [obj] + attr.split("."))
async def remove_react(
self,
message: discord.Message,
react_emoji: Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str],
react_user: discord.abc.User,
) -> None:
with contextlib.suppress(discord.HTTPException):
await message.remove_reaction(react_emoji, react_user)
async def clear_react(self, message: discord.Message, emoji: MutableMapping = None) -> None:
try:
await message.clear_reactions()
except discord.Forbidden:
if not emoji:
return
with contextlib.suppress(discord.HTTPException):
async for key in AsyncIter(emoji.values(), delay=0.2):
await message.remove_reaction(key, self.bot.user)
except discord.HTTPException:
return
def get_track_json(
self,
player: lavalink.Player,
position: Union[int, str] = None,
other_track: lavalink.Track = None,
) -> MutableMapping:
if position == "np":
queued_track = player.current
elif position is None:
queued_track = other_track
else:
queued_track = player.queue[position]
return self.track_to_json(queued_track)
def track_to_json(self, track: lavalink.Track) -> MutableMapping:
track_keys = track._info.keys()
track_values = track._info.values()
track_id = track.track_identifier
track_info = {}
for k, v in zip(track_keys, track_values):
track_info[k] = v
keys = ["track", "info", "extras"]
values = [track_id, track_info]
track_obj = {}
for key, value in zip(keys, values):
track_obj[key] = value
return track_obj
def time_convert(self, length: Union[int, str]) -> int:
if isinstance(length, int):
return length
match = _RE_TIME_CONVERTER.match(length)
if match is not None:
hr = int(match.group(1)) if match.group(1) else 0
mn = int(match.group(2)) if match.group(2) else 0
sec = int(match.group(3)) if match.group(3) else 0
pos = sec + (mn * 60) + (hr * 3600)
return pos
else:
try:
return int(length)
except ValueError:
return 0
async def queue_duration(self, ctx: commands.Context) -> int:
player = lavalink.get_player(ctx.guild.id)
dur = [
i.length
async for i in AsyncIter(player.queue, steps=50).filter(lambda x: not x.is_stream)
]
queue_dur = sum(dur)
if not player.queue:
queue_dur = 0
try:
if not player.current.is_stream:
remain = player.current.length - player.position
else:
remain = 0
except AttributeError:
remain = 0
queue_total_duration = remain + queue_dur
return queue_total_duration
async def track_remaining_duration(self, ctx: commands.Context) -> int:
player = lavalink.get_player(ctx.guild.id)
if not player.current:
return 0
try:
if not player.current.is_stream:
remain = player.current.length - player.position
else:
remain = 0
except AttributeError:
remain = 0
return remain
def get_time_string(self, seconds: int) -> str:
m, s = divmod(seconds, 60)
h, m = divmod(m, 60)
d, h = divmod(h, 24)
if d > 0:
msg = "{0}d {1}h"
elif d == 0 and h > 0:
msg = "{1}h {2}m"
elif d == 0 and h == 0 and m > 0:
msg = "{2}m {3}s"
elif d == 0 and h == 0 and m == 0 and s > 0:
msg = "{3}s"
else:
msg = ""
return msg.format(d, h, m, s)
def format_time(self, time: int) -> str:
""" Formats the given time into DD:HH:MM:SS """
seconds = time / 1000
days, seconds = divmod(seconds, 24 * 60 * 60)
hours, seconds = divmod(seconds, 60 * 60)
minutes, seconds = divmod(seconds, 60)
day = ""
hour = ""
if days:
day = "%02d:" % days
if hours or day:
hour = "%02d:" % hours
minutes = "%02d:" % minutes
sec = "%02d" % seconds
return f"{day}{hour}{minutes}{sec}"
async def get_lyrics_status(self, ctx: Context) -> bool:
global _prefer_lyrics_cache
prefer_lyrics = _prefer_lyrics_cache.setdefault(
ctx.guild.id, await self.config.guild(ctx.guild).prefer_lyrics()
)
return prefer_lyrics
async def data_schema_migration(self, from_version: int, to_version: int) -> None:
database_entries = []
time_now = int(datetime.datetime.now(datetime.timezone.utc).timestamp())
if from_version == to_version:
return
if from_version < 2 <= to_version:
all_guild_data = await self.config.all_guilds()
all_playlist = {}
async for guild_id, guild_data in AsyncIter(all_guild_data.items()):
temp_guild_playlist = guild_data.pop("playlists", None)
if temp_guild_playlist:
guild_playlist = {}
async for count, (name, data) in AsyncIter(
temp_guild_playlist.items()
).enumerate(start=1000):
if not data or not name:
continue
playlist = {"id": count, "name": name, "guild": int(guild_id)}
playlist.update(data)
guild_playlist[str(count)] = playlist
tracks_in_playlist = data.get("tracks", []) or []
async for t in AsyncIter(tracks_in_playlist):
uri = t.get("info", {}).get("uri")
if uri:
t = {"loadType": "V2_COMPAT", "tracks": [t], "query": uri}
data = json.dumps(t)
if all(
k in data
for k in ["loadType", "playlistInfo", "isSeekable", "isStream"]
):
database_entries.append(
{
"query": uri,
"data": data,
"last_updated": time_now,
"last_fetched": time_now,
}
)
if guild_playlist:
all_playlist[str(guild_id)] = guild_playlist
await self.config.custom(PlaylistScope.GUILD.value).set(all_playlist)
# new schema is now in place
await self.config.schema_version.set(2)
# migration done, now let's delete all the old stuff
async for guild_id in AsyncIter(all_guild_data):
await self.config.guild(
cast(discord.Guild, discord.Object(id=guild_id))
).clear_raw("playlists")
if from_version < 3 <= to_version:
for scope in PlaylistScope.list():
scope_playlist = await get_all_playlist_for_migration23(
self.bot, self.playlist_api, self.config, scope
)
async for p in AsyncIter(scope_playlist):
await p.save()
await self.config.custom(scope).clear()
await self.config.schema_version.set(3)
if database_entries:
await self.api_interface.local_cache_api.lavalink.insert(database_entries)
|
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
PERCENTAGE,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from . import DOMAIN, UPDATE_TOPIC
TEMP_UNITS = [TEMP_CELSIUS, TEMP_FAHRENHEIT]
PERCENT_UNITS = [PERCENTAGE, PERCENTAGE]
SALT_UNITS = ["g/L", "PPM"]
WATT_UNITS = [POWER_WATT, POWER_WATT]
NO_UNITS = [None, None]
# sensor_type [ description, unit, icon ]
# sensor_type corresponds to property names in aqualogic.core.AquaLogic
SENSOR_TYPES = {
"air_temp": ["Air Temperature", TEMP_UNITS, "mdi:thermometer"],
"pool_temp": ["Pool Temperature", TEMP_UNITS, "mdi:oil-temperature"],
"spa_temp": ["Spa Temperature", TEMP_UNITS, "mdi:oil-temperature"],
"pool_chlorinator": ["Pool Chlorinator", PERCENT_UNITS, "mdi:gauge"],
"spa_chlorinator": ["Spa Chlorinator", PERCENT_UNITS, "mdi:gauge"],
"salt_level": ["Salt Level", SALT_UNITS, "mdi:gauge"],
"pump_speed": ["Pump Speed", PERCENT_UNITS, "mdi:speedometer"],
"pump_power": ["Pump Power", WATT_UNITS, "mdi:gauge"],
"status": ["Status", NO_UNITS, "mdi:alert"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
)
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the sensor platform."""
sensors = []
processor = hass.data[DOMAIN]
for sensor_type in config[CONF_MONITORED_CONDITIONS]:
sensors.append(AquaLogicSensor(processor, sensor_type))
async_add_entities(sensors)
class AquaLogicSensor(Entity):
"""Sensor implementation for the AquaLogic component."""
def __init__(self, processor, sensor_type):
"""Initialize sensor."""
self._processor = processor
self._type = sensor_type
self._state = None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return f"AquaLogic {SENSOR_TYPES[self._type][0]}"
@property
def unit_of_measurement(self):
"""Return the unit of measurement the value is expressed in."""
panel = self._processor.panel
if panel is None:
return None
if panel.is_metric:
return SENSOR_TYPES[self._type][1][0]
return SENSOR_TYPES[self._type][1][1]
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return SENSOR_TYPES[self._type][2]
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
UPDATE_TOPIC, self.async_update_callback
)
)
@callback
def async_update_callback(self):
"""Update callback."""
panel = self._processor.panel
if panel is not None:
self._state = getattr(panel, self._type)
self.async_write_ha_state()
|
import json
from absl import flags
from perfkitbenchmarker import resource
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
class TargetGroup(resource.BaseResource):
"""Class represeting an AWS target group."""
def __init__(self, vpc, port):
"""Initializes the TargetGroup object.
Args:
vpc: AwsVpc object which contains the targets for load balancing.
port: The internal port that the load balancer connects to.
"""
super(TargetGroup, self).__init__()
self.arn = None
self.region = vpc.region
self.name = 'pkb-%s' % FLAGS.run_uri
self.protocol = 'TCP'
self.port = port
self.vpc_id = vpc.id
def _Create(self):
"""Create the target group."""
create_cmd = util.AWS_PREFIX + [
'--region', self.region,
'elbv2', 'create-target-group',
'--target-type', 'ip',
'--name', self.name,
'--protocol', self.protocol,
'--port', str(self.port),
'--vpc-id', self.vpc_id
]
stdout, _, _ = vm_util.IssueCommand(create_cmd)
response = json.loads(stdout)
self.arn = response['TargetGroups'][0]['TargetGroupArn']
def _Delete(self):
"""Delete the target group."""
if self.arn is None:
return
delete_cmd = util.AWS_PREFIX + [
'--region', self.region,
'elbv2', 'delete-target-group',
'--target-group-arn', self.arn
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
class LoadBalancer(resource.BaseResource):
"""Class representing an AWS load balancer."""
def __init__(self, subnets):
"""Initializes the LoadBalancer object.
Args:
subnets: List of AwsSubnet objects.
"""
super(LoadBalancer, self).__init__()
self.region = subnets[0].region
self.name = 'pkb-%s' % FLAGS.run_uri
self.subnet_ids = [subnet.id for subnet in subnets]
self.type = 'network'
self.arn = None
self.dns_name = None
def _Create(self):
"""Create the load balancer."""
create_cmd = util.AWS_PREFIX + [
'--region', self.region,
'elbv2', 'create-load-balancer',
'--name', self.name,
'--type', self.type,
'--tags'] + util.MakeFormattedDefaultTags()
# Add --subnets argument to the command.
create_cmd.append('--subnets')
create_cmd.extend(self.subnet_ids)
stdout, _, _ = vm_util.IssueCommand(create_cmd)
load_balancer = json.loads(stdout)['LoadBalancers'][0]
self.arn = load_balancer['LoadBalancerArn']
self.dns_name = load_balancer['DNSName']
def _Delete(self):
"""Delete the load balancer."""
if self.arn is None:
return
delete_cmd = util.AWS_PREFIX + [
'--region', self.region,
'elbv2', 'delete-load-balancer',
'--load-balancer-arn', self.arn
]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
class Listener(resource.BaseResource):
"""Class representing an AWS listener."""
def __init__(self, load_balancer, target_group, port):
super(Listener, self).__init__()
self.load_balancer_arn = load_balancer.arn
self.target_group_arn = target_group.arn
self.port = port
self.protocol = target_group.protocol
self.region = target_group.region
def _GetDefaultActions(self):
"""Returns a JSON representation of the default actions for the listener."""
actions = [{
'Type': 'forward',
'TargetGroupArn': self.target_group_arn
}]
return json.dumps(actions)
def _Create(self):
"""Create the listener."""
create_cmd = util.AWS_PREFIX + [
'--region', self.region,
'elbv2', 'create-listener',
'--load-balancer-arn', self.load_balancer_arn,
'--protocol', self.protocol,
'--port', str(self.port),
'--default-actions', self._GetDefaultActions()
]
vm_util.IssueCommand(create_cmd)
def _Delete(self):
"""Listeners will be deleted along with their associated load balancers."""
pass
|
from homeassistant.core import State
from tests.common import async_mock_service
async def test_reproducing_states(hass, caplog):
"""Test reproducing Switch states."""
hass.states.async_set("switch.entity_off", "off", {})
hass.states.async_set("switch.entity_on", "on", {})
turn_on_calls = async_mock_service(hass, "switch", "turn_on")
turn_off_calls = async_mock_service(hass, "switch", "turn_off")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[State("switch.entity_off", "off"), State("switch.entity_on", "on", {})],
)
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("switch.entity_off", "not_supported")]
)
assert "not_supported" in caplog.text
assert len(turn_on_calls) == 0
assert len(turn_off_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("switch.entity_on", "off"),
State("switch.entity_off", "on", {}),
# Should not raise
State("switch.non_existing", "on"),
]
)
assert len(turn_on_calls) == 1
assert turn_on_calls[0].domain == "switch"
assert turn_on_calls[0].data == {"entity_id": "switch.entity_off"}
assert len(turn_off_calls) == 1
assert turn_off_calls[0].domain == "switch"
assert turn_off_calls[0].data == {"entity_id": "switch.entity_on"}
|
from datetime import timedelta
from typing import Callable, Union
from pyisy.constants import (
CMD_OFF,
CMD_ON,
ISY_VALUE_UNKNOWN,
PROTO_INSTEON,
PROTO_ZWAVE,
)
from pyisy.nodes import Group, Node
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_COLD,
DEVICE_CLASS_HEAT,
DEVICE_CLASS_LIGHT,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
DEVICE_CLASS_OPENING,
DEVICE_CLASS_PROBLEM,
DOMAIN as BINARY_SENSOR,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers.event import async_track_point_in_utc_time
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from .const import (
_LOGGER,
BINARY_SENSOR_DEVICE_TYPES_ISY,
BINARY_SENSOR_DEVICE_TYPES_ZWAVE,
DOMAIN as ISY994_DOMAIN,
ISY994_NODES,
ISY994_PROGRAMS,
SUBNODE_CLIMATE_COOL,
SUBNODE_CLIMATE_HEAT,
SUBNODE_DUSK_DAWN,
SUBNODE_HEARTBEAT,
SUBNODE_LOW_BATTERY,
SUBNODE_MOTION_DISABLED,
SUBNODE_NEGATIVE,
SUBNODE_TAMPER,
TYPE_CATEGORY_CLIMATE,
TYPE_INSTEON_MOTION,
)
from .entity import ISYNodeEntity, ISYProgramEntity
from .helpers import migrate_old_unique_ids
DEVICE_PARENT_REQUIRED = [
DEVICE_CLASS_OPENING,
DEVICE_CLASS_MOISTURE,
DEVICE_CLASS_MOTION,
]
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 binary sensor platform."""
devices = []
devices_by_address = {}
child_nodes = []
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
for node in hass_isy_data[ISY994_NODES][BINARY_SENSOR]:
device_class, device_type = _detect_device_type_and_class(node)
if node.protocol == PROTO_INSTEON:
if node.parent_node is not None:
# We'll process the Insteon child nodes last, to ensure all parent
# nodes have been processed
child_nodes.append((node, device_class, device_type))
continue
device = ISYInsteonBinarySensorEntity(node, device_class)
else:
device = ISYBinarySensorEntity(node, device_class)
devices.append(device)
devices_by_address[node.address] = device
# Handle some special child node cases for Insteon Devices
for (node, device_class, device_type) in child_nodes:
subnode_id = int(node.address.split(" ")[-1], 16)
# Handle Insteon Thermostats
if device_type.startswith(TYPE_CATEGORY_CLIMATE):
if subnode_id == SUBNODE_CLIMATE_COOL:
# Subnode 2 is the "Cool Control" sensor
# It never reports its state until first use is
# detected after an ISY Restart, so we assume it's off.
# As soon as the ISY Event Stream connects if it has a
# valid state, it will be set.
device = ISYInsteonBinarySensorEntity(node, DEVICE_CLASS_COLD, False)
devices.append(device)
elif subnode_id == SUBNODE_CLIMATE_HEAT:
# Subnode 3 is the "Heat Control" sensor
device = ISYInsteonBinarySensorEntity(node, DEVICE_CLASS_HEAT, False)
devices.append(device)
continue
if device_class in DEVICE_PARENT_REQUIRED:
parent_device = devices_by_address.get(node.parent_node.address)
if not parent_device:
_LOGGER.error(
"Node %s has a parent node %s, but no device "
"was created for the parent. Skipping",
node.address,
node.parent_node,
)
continue
if device_class in (DEVICE_CLASS_OPENING, DEVICE_CLASS_MOISTURE):
# These sensors use an optional "negative" subnode 2 to
# snag all state changes
if subnode_id == SUBNODE_NEGATIVE:
parent_device.add_negative_node(node)
elif subnode_id == SUBNODE_HEARTBEAT:
# Subnode 4 is the heartbeat node, which we will
# represent as a separate binary_sensor
device = ISYBinarySensorHeartbeat(node, parent_device)
parent_device.add_heartbeat_device(device)
devices.append(device)
continue
if (
device_class == DEVICE_CLASS_MOTION
and device_type is not None
and any([device_type.startswith(t) for t in TYPE_INSTEON_MOTION])
):
# Special cases for Insteon Motion Sensors I & II:
# Some subnodes never report status until activated, so
# the initial state is forced "OFF"/"NORMAL" if the
# parent device has a valid state. This is corrected
# upon connection to the ISY event stream if subnode has a valid state.
initial_state = None if parent_device.state is None else False
if subnode_id == SUBNODE_DUSK_DAWN:
# Subnode 2 is the Dusk/Dawn sensor
device = ISYInsteonBinarySensorEntity(node, DEVICE_CLASS_LIGHT)
devices.append(device)
continue
if subnode_id == SUBNODE_LOW_BATTERY:
# Subnode 3 is the low battery node
device = ISYInsteonBinarySensorEntity(
node, DEVICE_CLASS_BATTERY, initial_state
)
devices.append(device)
continue
if subnode_id in SUBNODE_TAMPER:
# Tamper Sub-node for MS II. Sometimes reported as "A" sometimes
# reported as "10", which translate from Hex to 10 and 16 resp.
device = ISYInsteonBinarySensorEntity(
node, DEVICE_CLASS_PROBLEM, initial_state
)
devices.append(device)
continue
if subnode_id in SUBNODE_MOTION_DISABLED:
# Motion Disabled Sub-node for MS II ("D" or "13")
device = ISYInsteonBinarySensorEntity(node)
devices.append(device)
continue
# We don't yet have any special logic for other sensor
# types, so add the nodes as individual devices
device = ISYBinarySensorEntity(node, device_class)
devices.append(device)
for name, status, _ in hass_isy_data[ISY994_PROGRAMS][BINARY_SENSOR]:
devices.append(ISYBinarySensorProgramEntity(name, status))
await migrate_old_unique_ids(hass, BINARY_SENSOR, devices)
async_add_entities(devices)
def _detect_device_type_and_class(node: Union[Group, Node]) -> (str, str):
try:
device_type = node.type
except AttributeError:
# The type attribute didn't exist in the ISY's API response
return (None, None)
# Z-Wave Devices:
if node.protocol == PROTO_ZWAVE:
device_type = f"Z{node.zwave_props.category}"
for device_class in [*BINARY_SENSOR_DEVICE_TYPES_ZWAVE]:
if (
node.zwave_props.category
in BINARY_SENSOR_DEVICE_TYPES_ZWAVE[device_class]
):
return device_class, device_type
return (None, device_type)
# Other devices (incl Insteon.)
for device_class in [*BINARY_SENSOR_DEVICE_TYPES_ISY]:
if any(
[
device_type.startswith(t)
for t in set(BINARY_SENSOR_DEVICE_TYPES_ISY[device_class])
]
):
return device_class, device_type
return (None, device_type)
class ISYBinarySensorEntity(ISYNodeEntity, BinarySensorEntity):
"""Representation of a basic ISY994 binary sensor device."""
def __init__(self, node, force_device_class=None, unknown_state=None) -> None:
"""Initialize the ISY994 binary sensor device."""
super().__init__(node)
self._device_class = force_device_class
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on."""
if self._node.status == ISY_VALUE_UNKNOWN:
return None
return bool(self._node.status)
@property
def device_class(self) -> str:
"""Return the class of this device.
This was discovered by parsing the device type code during init
"""
return self._device_class
class ISYInsteonBinarySensorEntity(ISYBinarySensorEntity):
"""Representation of an ISY994 Insteon binary sensor device.
Often times, a single device is represented by multiple nodes in the ISY,
allowing for different nuances in how those devices report their on and
off events. This class turns those multiple nodes into a single Home
Assistant entity and handles both ways that ISY binary sensors can work.
"""
def __init__(self, node, force_device_class=None, unknown_state=None) -> None:
"""Initialize the ISY994 binary sensor device."""
super().__init__(node, force_device_class)
self._negative_node = None
self._heartbeat_device = None
if self._node.status == ISY_VALUE_UNKNOWN:
self._computed_state = unknown_state
self._status_was_unknown = True
else:
self._computed_state = bool(self._node.status)
self._status_was_unknown = False
async def async_added_to_hass(self) -> None:
"""Subscribe to the node and subnode event emitters."""
await super().async_added_to_hass()
self._node.control_events.subscribe(self._positive_node_control_handler)
if self._negative_node is not None:
self._negative_node.control_events.subscribe(
self._negative_node_control_handler
)
def add_heartbeat_device(self, device) -> None:
"""Register a heartbeat device for this sensor.
The heartbeat node beats on its own, but we can gain a little
reliability by considering any node activity for this sensor
to be a heartbeat as well.
"""
self._heartbeat_device = device
def _heartbeat(self) -> None:
"""Send a heartbeat to our heartbeat device, if we have one."""
if self._heartbeat_device is not None:
self._heartbeat_device.heartbeat()
def add_negative_node(self, child) -> None:
"""Add a negative node to this binary sensor device.
The negative node is a node that can receive the 'off' events
for the sensor, depending on device configuration and type.
"""
self._negative_node = child
if self._negative_node.status != ISY_VALUE_UNKNOWN:
# If the negative node has a value, it means the negative node is
# in use for this device. Next we need to check to see if the
# negative and positive nodes disagree on the state (both ON or
# both OFF).
if self._negative_node.status == self._node.status:
# The states disagree, therefore we cannot determine the state
# of the sensor until we receive our first ON event.
self._computed_state = None
def _negative_node_control_handler(self, event: object) -> None:
"""Handle an "On" control event from the "negative" node."""
if event.control == CMD_ON:
_LOGGER.debug(
"Sensor %s turning Off via the Negative node sending a DON command",
self.name,
)
self._computed_state = False
self.schedule_update_ha_state()
self._heartbeat()
def _positive_node_control_handler(self, event: object) -> None:
"""Handle On and Off control event coming from the primary node.
Depending on device configuration, sometimes only On events
will come to this node, with the negative node representing Off
events
"""
if event.control == CMD_ON:
_LOGGER.debug(
"Sensor %s turning On via the Primary node sending a DON command",
self.name,
)
self._computed_state = True
self.schedule_update_ha_state()
self._heartbeat()
if event.control == CMD_OFF:
_LOGGER.debug(
"Sensor %s turning Off via the Primary node sending a DOF command",
self.name,
)
self._computed_state = False
self.schedule_update_ha_state()
self._heartbeat()
def on_update(self, event: object) -> None:
"""Primary node status updates.
We MOSTLY ignore these updates, as we listen directly to the Control
events on all nodes for this device. However, there is one edge case:
If a leak sensor is unknown, due to a recent reboot of the ISY, the
status will get updated to dry upon the first heartbeat. This status
update is the only way that a leak sensor's status changes without
an accompanying Control event, so we need to watch for it.
"""
if self._status_was_unknown and self._computed_state is None:
self._computed_state = bool(self._node.status)
self._status_was_unknown = False
self.schedule_update_ha_state()
self._heartbeat()
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on.
Insteon leak sensors set their primary node to On when the state is
DRY, not WET, so we invert the binary state if the user indicates
that it is a moisture sensor.
"""
if self._computed_state is None:
# Do this first so we don't invert None on moisture sensors
return None
if self.device_class == DEVICE_CLASS_MOISTURE:
return not self._computed_state
return self._computed_state
class ISYBinarySensorHeartbeat(ISYNodeEntity, BinarySensorEntity):
"""Representation of the battery state of an ISY994 sensor."""
def __init__(self, node, parent_device) -> None:
"""Initialize the ISY994 binary sensor device.
Computed state is set to UNKNOWN unless the ISY provided a valid
state. See notes above regarding ISY Sensor status on ISY restart.
If a valid state is provided (either on or off), the computed state in
HA is set to OFF (Normal). If the heartbeat is not received in 25 hours
then the computed state is set to ON (Low Battery).
"""
super().__init__(node)
self._parent_device = parent_device
self._heartbeat_timer = None
self._computed_state = None
if self.state is None:
self._computed_state = False
async def async_added_to_hass(self) -> None:
"""Subscribe to the node and subnode event emitters."""
await super().async_added_to_hass()
self._node.control_events.subscribe(self._heartbeat_node_control_handler)
# Start the timer on bootup, so we can change from UNKNOWN to OFF
self._restart_timer()
def _heartbeat_node_control_handler(self, event: object) -> None:
"""Update the heartbeat timestamp when any ON/OFF event is sent.
The ISY uses both DON and DOF commands (alternating) for a heartbeat.
"""
if event.control in [CMD_ON, CMD_OFF]:
self.heartbeat()
def heartbeat(self):
"""Mark the device as online, and restart the 25 hour timer.
This gets called when the heartbeat node beats, but also when the
parent sensor sends any events, as we can trust that to mean the device
is online. This mitigates the risk of false positives due to a single
missed heartbeat event.
"""
self._computed_state = False
self._restart_timer()
self.schedule_update_ha_state()
def _restart_timer(self):
"""Restart the 25 hour timer."""
try:
self._heartbeat_timer()
self._heartbeat_timer = None
except TypeError:
# No heartbeat timer is active
pass
@callback
def timer_elapsed(now) -> None:
"""Heartbeat missed; set state to ON to indicate dead battery."""
self._computed_state = True
self._heartbeat_timer = None
self.schedule_update_ha_state()
point_in_time = dt_util.utcnow() + timedelta(hours=25)
_LOGGER.debug(
"Heartbeat timer starting. Now: %s Then: %s",
dt_util.utcnow(),
point_in_time,
)
self._heartbeat_timer = async_track_point_in_utc_time(
self.hass, timer_elapsed, point_in_time
)
def on_update(self, event: object) -> None:
"""Ignore node status updates.
We listen directly to the Control events for this device.
"""
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on.
Note: This method will return false if the current state is UNKNOWN
which occurs after a restart until the first heartbeat or control
parent control event is received.
"""
return bool(self._computed_state)
@property
def device_class(self) -> str:
"""Get the class of this device."""
return DEVICE_CLASS_BATTERY
@property
def device_state_attributes(self):
"""Get the state attributes for the device."""
attr = super().device_state_attributes
attr["parent_entity_id"] = self._parent_device.entity_id
return attr
class ISYBinarySensorProgramEntity(ISYProgramEntity, BinarySensorEntity):
"""Representation of an ISY994 binary sensor program.
This does not need all of the subnode logic in the device version of binary
sensors.
"""
@property
def is_on(self) -> bool:
"""Get whether the ISY994 binary sensor device is on."""
return bool(self._node.status)
|
from itertools import product
from warnings import warn
import numbers
import numpy as np
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.extern import tabulate
class TabularCPD(DiscreteFactor):
"""
Defines the conditional probability distribution table (cpd table)
Parameters
----------
variable: int, string (any hashable python object)
The variable whose CPD is defined.
variable_card: integer
cardinality of variable
values: 2d array, 2d list or 2d tuple
values of the cpd table
evidence: array-like
evidences(if any) w.r.t. which cpd is defined
evidence_card: integer, array-like
cardinality of evidences (if any)
Examples
--------
For a distribution of P(grade|diff, intel)
+-------+--------------------+------------------+
|diff | easy | hard |
+-------+-----+------+-------+------+----+------+
|intel |dumb | avg | smart | dumb |avg |smart |
+-------+-----+------+-------+------+----+------+
|gradeA |0.1 | 0.1 | 0.1 | 0.1 |0.1 | 0.1 |
+-------+-----+------+-------+------+----+------+
|gradeB |0.1 | 0.1 | 0.1 | 0.1 |0.1 | 0.1 |
+-------+-----+------+-------+------+----+------+
|gradeC |0.8 | 0.8 | 0.8 | 0.8 |0.8 | 0.8 |
+-------+-----+------+-------+------+----+------+
values should be
[[0.1,0.1,0.1,0.1,0.1,0.1],
[0.1,0.1,0.1,0.1,0.1,0.1],
[0.8,0.8,0.8,0.8,0.8,0.8]]
>>> cpd = TabularCPD('grade',3,[[0.1,0.1,0.1,0.1,0.1,0.1],
... [0.1,0.1,0.1,0.1,0.1,0.1],
... [0.8,0.8,0.8,0.8,0.8,0.8]],
... evidence=['diff', 'intel'], evidence_card=[2,3])
>>> print(cpd)
+---------+---------+---------+---------+---------+---------+---------+
| diff | diff_0 | diff_0 | diff_0 | diff_1 | diff_1 | diff_1 |
+---------+---------+---------+---------+---------+---------+---------+
| intel | intel_0 | intel_1 | intel_2 | intel_0 | intel_1 | intel_2 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_0 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 | 0.1 |
+---------+---------+---------+---------+---------+---------+---------+
| grade_2 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 | 0.8 |
+---------+---------+---------+---------+---------+---------+---------+
>>> cpd.values
array([[[ 0.1, 0.1, 0.1],
[ 0.1, 0.1, 0.1]],
[[ 0.1, 0.1, 0.1],
[ 0.1, 0.1, 0.1]],
[[ 0.8, 0.8, 0.8],
[ 0.8, 0.8, 0.8]]])
>>> cpd.variables
['grade', 'diff', 'intel']
>>> cpd.cardinality
array([3, 2, 3])
>>> cpd.variable
'grade'
>>> cpd.variable_card
3
"""
def __init__(
self,
variable,
variable_card,
values,
evidence=None,
evidence_card=None,
state_names={},
):
self.variable = variable
self.variable_card = None
variables = [variable]
if not isinstance(variable_card, numbers.Integral):
raise TypeError("Event cardinality must be an integer")
self.variable_card = variable_card
cardinality = [variable_card]
if evidence_card is not None:
if isinstance(evidence_card, numbers.Real):
raise TypeError("Evidence card must be a list of numbers")
cardinality.extend(evidence_card)
if evidence is not None:
if isinstance(evidence, str):
raise TypeError("Evidence must be list, tuple or array of strings.")
variables.extend(evidence)
if not len(evidence_card) == len(evidence):
raise ValueError(
"Length of evidence_card doesn't match length of evidence"
)
values = np.array(values)
if values.ndim != 2:
raise TypeError("Values must be a 2D list/array")
if evidence is None:
expected_cpd_shape = (variable_card, 1)
else:
expected_cpd_shape = (variable_card, np.product(evidence_card))
if values.shape != expected_cpd_shape:
raise ValueError(
f"values must be of shape {expected_cpd_shape}. Got shape: {values.shape}"
)
super(TabularCPD, self).__init__(
variables, cardinality, values.flatten("C"), state_names=state_names
)
def __repr__(self):
var_str = f"<TabularCPD representing P({self.variable}:{self.variable_card}"
evidence = self.variables[1:]
evidence_card = self.cardinality[1:]
if evidence:
evidence_str = " | " + ", ".join(
[f"{var}:{card}" for var, card in zip(evidence, evidence_card)]
)
else:
evidence_str = ""
return var_str + evidence_str + f") at {hex(id(self))}>"
def get_values(self):
"""
Returns the cpd
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> cpd = TabularCPD('grade', 3, [[0.1, 0.1],
... [0.1, 0.1],
... [0.8, 0.8]],
... evidence='evi1', evidence_card=2)
>>> cpd.get_values()
array([[ 0.1, 0.1],
[ 0.1, 0.1],
[ 0.8, 0.8]])
"""
if self.variable in self.variables:
return self.values.reshape(
self.cardinality[0], np.prod(self.cardinality[1:])
)
else:
return self.values.reshape(np.prod(self.cardinality), 1)
def __str__(self):
return self._make_table_str(tablefmt="grid")
def _str(self, phi_or_p="p", tablefmt="fancy_grid"):
return super(self, TabularCPD)._str(phi_or_p, tablefmt)
def _make_table_str(self, tablefmt="fancy_grid", print_state_names=True):
headers_list = []
# build column headers
evidence = self.variables[1:]
evidence_card = self.cardinality[1:]
if evidence:
col_indexes = np.array(list(product(*[range(i) for i in evidence_card])))
if self.state_names and print_state_names:
for i in range(len(evidence_card)):
column_header = [str(evidence[i])] + [
"{var}({state})".format(
var=evidence[i], state=self.state_names[evidence[i]][d]
)
for d in col_indexes.T[i]
]
headers_list.append(column_header)
else:
for i in range(len(evidence_card)):
column_header = [str(evidence[i])] + [
f"{evidence[i]}_{d}" for d in col_indexes.T[i]
]
headers_list.append(column_header)
# Build row headers
if self.state_names and print_state_names:
variable_array = [
[
"{var}({state})".format(
var=self.variable, state=self.state_names[self.variable][i]
)
for i in range(self.variable_card)
]
]
else:
variable_array = [
[f"{self.variable}_{i}" for i in range(self.variable_card)]
]
# Stack with data
labeled_rows = np.hstack(
(np.array(variable_array).T, self.get_values())
).tolist()
# No support for multi-headers in tabulate
cdf_str = tabulate(headers_list + labeled_rows, tablefmt=tablefmt)
return cdf_str
def copy(self):
"""
Returns a copy of the TabularCPD object.
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> cpd = TabularCPD('grade', 2,
... [[0.7, 0.6, 0.6, 0.2],[0.3, 0.4, 0.4, 0.8]],
... ['intel', 'diff'], [2, 2])
>>> copy = cpd.copy()
>>> copy.variable
'grade'
>>> copy.variable_card
2
>>> copy.evidence
['intel', 'diff']
>>> copy.values
array([[[ 0.7, 0.6],
[ 0.6, 0.2]],
[[ 0.3, 0.4],
[ 0.4, 0.8]]])
"""
evidence = self.variables[1:] if len(self.variables) > 1 else None
evidence_card = self.cardinality[1:] if len(self.variables) > 1 else None
return TabularCPD(
self.variable,
self.variable_card,
self.get_values(),
evidence,
evidence_card,
state_names=self.state_names.copy(),
)
def normalize(self, inplace=True):
"""
Normalizes the cpd table.
Parameters
----------
inplace: boolean
If inplace=True it will modify the CPD itself, else would return
a new CPD
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> cpd_table = TabularCPD('grade', 2,
... [[0.7, 0.2, 0.6, 0.2],[0.4, 0.4, 0.4, 0.8]],
... ['intel', 'diff'], [2, 2])
>>> cpd_table.normalize()
>>> cpd_table.get_values()
array([[ 0.63636364, 0.33333333, 0.6 , 0.2 ],
[ 0.36363636, 0.66666667, 0.4 , 0.8 ]])
"""
tabular_cpd = self if inplace else self.copy()
cpd = tabular_cpd.get_values()
tabular_cpd.values = (cpd / cpd.sum(axis=0)).reshape(tabular_cpd.cardinality)
if not inplace:
return tabular_cpd
def marginalize(self, variables, inplace=True):
"""
Modifies the cpd table with marginalized values.
Parameters
----------
variables: list, array-like
list of variable to be marginalized
inplace: boolean
If inplace=True it will modify the CPD itself, else would return
a new CPD
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> cpd_table = TabularCPD('grade', 2,
... [[0.7, 0.6, 0.6, 0.2],[0.3, 0.4, 0.4, 0.8]],
... ['intel', 'diff'], [2, 2])
>>> cpd_table.marginalize(['diff'])
>>> cpd_table.get_values()
array([[ 0.65, 0.4 ],
[ 0.35, 0.6 ]])
"""
if self.variable in variables:
raise ValueError(
"Marginalization not allowed on the variable on which CPD is defined"
)
tabular_cpd = self if inplace else self.copy()
super(TabularCPD, tabular_cpd).marginalize(variables)
tabular_cpd.normalize()
if not inplace:
return tabular_cpd
def reduce(self, values, inplace=True):
"""
Reduces the cpd table to the context of given variable values.
Parameters
----------
values: list, array-like
A list of tuples of the form (variable_name, variable_state).
inplace: boolean
If inplace=True it will modify the factor itself, else would return
a new factor.
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> cpd_table = TabularCPD('grade', 2,
... [[0.7, 0.6, 0.6, 0.2],[0.3, 0.4, 0.4, 0.8]],
... ['intel', 'diff'], [2, 2])
>>> cpd_table.reduce([('diff', 0)])
>>> cpd_table.get_values()
array([[ 0.7, 0.6],
[ 0.3, 0.4]])
"""
if self.variable in (value[0] for value in values):
raise ValueError(
"Reduce not allowed on the variable on which CPD is defined"
)
tabular_cpd = self if inplace else self.copy()
super(TabularCPD, tabular_cpd).reduce(values)
tabular_cpd.normalize()
if not inplace:
return tabular_cpd
def to_factor(self):
"""
Returns an equivalent factor with the same variables, cardinality, values as that of the cpd
Examples
--------
>>> from pgmpy.factors.discrete import TabularCPD
>>> cpd = TabularCPD('grade', 3, [[0.1, 0.1],
... [0.1, 0.1],
... [0.8, 0.8]],
... evidence='evi1', evidence_card=2)
>>> factor = cpd.to_factor()
>>> factor
<DiscreteFactor representing phi(grade:3, evi1:2) at 0x7f847a4f2d68>
"""
return DiscreteFactor(
variables=self.variables,
cardinality=self.cardinality,
values=self.values,
state_names=self.state_names,
)
def reorder_parents(self, new_order, inplace=True):
"""
Returns a new cpd table according to provided order.
Parameters
----------
new_order: list
list of new ordering of variables
inplace: boolean
If inplace == True it will modify the CPD itself
otherwise new value will be returned without affecting old values
Examples
--------
Consider a CPD P(grade| diff, intel)
>>> cpd = TabularCPD('grade',3,[[0.1,0.1,0.0,0.4,0.2,0.1],
... [0.3,0.2,0.1,0.4,0.3,0.2],
... [0.6,0.7,0.9,0.2,0.5,0.7]],
... evidence=['diff', 'intel'], evidence_card=[2,3])
>>> print(cpd)
+----------+----------+----------+----------+----------+----------+----------+
| diff | diff(0) | diff(0) | diff(0) | diff(1) | diff(1) | diff(1) |
+----------+----------+----------+----------+----------+----------+----------+
| intel | intel(0) | intel(1) | intel(2) | intel(0) | intel(1) | intel(2) |
+----------+----------+----------+----------+----------+----------+----------+
| grade(0) | 0.1 | 0.1 | 0.0 | 0.4 | 0.2 | 0.1 |
+----------+----------+----------+----------+----------+----------+----------+
| grade(1) | 0.3 | 0.2 | 0.1 | 0.4 | 0.3 | 0.2 |
+----------+----------+----------+----------+----------+----------+----------+
| grade(2) | 0.6 | 0.7 | 0.9 | 0.2 | 0.5 | 0.7 |
+----------+----------+----------+----------+----------+----------+----------+
>>> cpd.values
array([[[ 0.1, 0.1, 0. ],
[ 0.4, 0.2, 0.1]],
[[ 0.3, 0.2, 0.1],
[ 0.4, 0.3, 0.2]],
[[ 0.6, 0.7, 0.9],
[ 0.2, 0.5, 0.7]]])
>>> cpd.variables
['grade', 'diff', 'intel']
>>> cpd.cardinality
array([3, 2, 3])
>>> cpd.variable
'grade'
>>> cpd.variable_card
3
>>> cpd.reorder_parents(['intel', 'diff'])
array([[0.1, 0.4, 0.1, 0.2, 0. , 0.1],
[0.3, 0.4, 0.2, 0.3, 0.1, 0.2],
[0.6, 0.2, 0.7, 0.5, 0.9, 0.7]])
>>> print(cpd)
+----------+----------+----------+----------+----------+----------+----------+
| intel | intel(0) | intel(0) | intel(1) | intel(1) | intel(2) | intel(2) |
+----------+----------+----------+----------+----------+----------+----------+
| diff | diff(0) | diff(1) | diff(0) | diff(1) | diff(0) | diff(1) |
+----------+----------+----------+----------+----------+----------+----------+
| grade(0) | 0.1 | 0.4 | 0.1 | 0.2 | 0.0 | 0.1 |
+----------+----------+----------+----------+----------+----------+----------+
| grade(1) | 0.3 | 0.4 | 0.2 | 0.3 | 0.1 | 0.2 |
+----------+----------+----------+----------+----------+----------+----------+
| grade(2) | 0.6 | 0.2 | 0.7 | 0.5 | 0.9 | 0.7 |
+----------+----------+----------+----------+----------+----------+----------+
>>> cpd.values
array([[[0.1, 0.4],
[0.1, 0.2],
[0. , 0.1]],
[[0.3, 0.4],
[0.2, 0.3],
[0.1, 0.2]],
[[0.6, 0.2],
[0.7, 0.5],
[0.9, 0.7]]])
>>> cpd.variables
['grade', 'intel', 'diff']
>>> cpd.cardinality
array([3, 3, 2])
>>> cpd.variable
'grade'
>>> cpd.variable_card
3
"""
if (
len(self.variables) <= 1
or (set(new_order) - set(self.variables))
or (set(self.variables[1:]) - set(new_order))
):
raise ValueError("New order either has missing or extra arguments")
else:
if new_order != self.variables[1:]:
evidence = self.variables[1:]
evidence_card = self.cardinality[1:]
card_map = dict(zip(evidence, evidence_card))
old_pos_map = dict(zip(evidence, range(len(evidence))))
trans_ord = [0] + [(old_pos_map[letter] + 1) for letter in new_order]
new_values = np.transpose(self.values, trans_ord)
if inplace:
variables = [self.variables[0]] + new_order
cardinality = [self.variable_card] + [
card_map[var] for var in new_order
]
super(TabularCPD, self).__init__(
variables, cardinality, new_values.flatten("C")
)
return self.get_values()
else:
return new_values.reshape(
self.cardinality[0],
np.prod([card_map[var] for var in new_order]),
)
else:
warn("Same ordering provided as current")
return self.get_values()
def get_evidence(self):
return self.variables[:0:-1]
|
import numpy as np
import chainer
from chainer.functions import pad
from chainer.links import Convolution2D
from chainer.utils import conv
def _pair(x):
if hasattr(x, '__getitem__'):
return x
return x, x
def _get_pad(in_size, ksize, stride, tf_padding):
if tf_padding == 'SAME':
tf_out_size = int(np.ceil(float(in_size) / stride))
elif tf_padding == 'VALID':
tf_out_size = int(np.ceil(float(in_size - ksize + 1) / stride))
pad = int(stride * tf_out_size - in_size + ksize - stride)
assert conv.get_conv_outsize(in_size + pad, ksize, stride,
0) == tf_out_size
return pad
def _tf_padding(x, ksize, stride, tf_padding):
pad_2 = _get_pad(x.shape[2], ksize[0], stride[0], tf_padding)
pad_3 = _get_pad(x.shape[3], ksize[1], stride[1], tf_padding)
if pad_2 or pad_3:
return pad(
x,
((0, 0), (0, 0), (pad_2 // 2, int(np.ceil(float(pad_2) / 2))),
(pad_3 // 2, int(np.ceil(float(pad_3) / 2)))),
mode='constant')
else:
return x
class TFConvolution2D(chainer.Chain):
"""Tensorflow compatible Convolution2D
This is a Convolution2D chain that imitates Tensorflow's tf.nn.conv2d.
The arguments are the same as that of
:class:`chainer.links.Convolution2D` except for `pad`.
:obj:`pad` can be set TF's "SAME" or "VALID" in addition to integer value.
If integer value is set,
this chain is equal to :class:`chainer.links.Convolution2D`.
"""
def __init__(self,
in_channels,
out_channels,
ksize=None,
stride=1,
pad='SAME',
nobias=False,
initialW=None,
initial_bias=None,
**kwargs):
super(TFConvolution2D, self).__init__()
if ksize is None:
out_channels, ksize, in_channels = in_channels, out_channels, None
if pad in ('SAME', 'VALID'): # TF compatible pad
self.padding = lambda x: _tf_padding(x, _pair(self.conv.ksize),
_pair(self.conv.stride), pad)
conv_pad = 0
else:
self.padding = None
assert isinstance(pad, int)
conv_pad = pad
with self.init_scope():
self.conv = Convolution2D(in_channels, out_channels, ksize, stride,
conv_pad, nobias, initialW, initial_bias,
**kwargs)
@property
def W(self):
return self.conv.W
@property
def b(self):
return self.conv.b
def forward(self, x):
if self.padding is None:
return self.conv(x)
else:
return self.conv(self.padding(x))
|
from __future__ import absolute_import
from __future__ import unicode_literals
import threading
import unittest
import warnings
import six
from instalooter.pbar import ProgressBar, TqdmProgressBar
class TestProgressBar(unittest.TestCase):
def test_derived_progress_bar(self):
class MyProgressBar(ProgressBar):
_test = {"update": 0, "max": None}
def update(self):
self._test['update'] += 1
def set_maximum(self, maximum):
self._test['max'] = maximum
pb = MyProgressBar(iter(range(10)))
self.assertEqual(pb._test['update'], 0)
self.assertIs(pb._test['max'], None)
self.assertEqual(next(pb), 0)
self.assertEqual(pb._test['update'], 1)
pb.set_maximum(10)
self.assertEqual(pb._test['max'], 10)
self.assertEqual(list(pb), list(range(1, 10)))
self.assertRaises(StopIteration, next, pb)
self.assertEqual(pb._test['update'], 10)
pb.finish()
self.assertRaises(RuntimeError, pb.get_lock)
lock = threading.RLock()
pb.set_lock(lock)
self.assertIs(pb.get_lock(), lock)
def test_tqdm_progress_bar(self):
fh = six.moves.StringIO()
pb = TqdmProgressBar(iter(range(10)), file=fh)
self.assertEqual(pb.n, 0)
self.assertIs(pb.total, None)
self.assertEqual(next(pb), 0)
self.assertEqual(pb.n, 1)
self.assertIs(pb.total, None)
pb.set_maximum(10)
self.assertEqual(pb.total, 10)
self.assertEqual(list(pb), list(range(1, 10)))
self.assertRaises(StopIteration, next, pb)
self.assertEqual(pb.n, 10)
pb.finish()
lock = threading.RLock()
pb.set_lock(lock)
self.assertIs(pb.get_lock(), lock)
def setUpModule():
warnings.simplefilter('ignore')
def tearDownModule():
warnings.simplefilter(warnings.defaultaction)
|
from itertools import groupby
import pymongo
from arctic.chunkstore.chunkstore import SYMBOL, SEGMENT, START
def segment_id_repair(library, symbol=None):
"""
Ensure that symbol(s) have contiguous segment ids
Parameters
----------
library: arctic library
symbol: None, str, list of str
None: all symbols
str: single symbol
list: list of symbols
Returns
-------
list of str - Symbols 'fixed'
"""
ret = []
if symbol is None:
symbol = library.list_symbols()
elif not isinstance(symbol, list):
symbol = [symbol]
by_segment = [(START, pymongo.ASCENDING),
(SEGMENT, pymongo.ASCENDING)]
for sym in symbol:
cursor = library._collection.find({SYMBOL: sym}, sort=by_segment)
# group by chunk
for _, segments in groupby(cursor, key=lambda x: (x[START], x[SYMBOL])):
segments = list(segments)
# if the start segment is not 0, we need to fix this symbol
if segments[0][SEGMENT] == -1:
# since the segment is part of the index, we have to clean up first
library._collection.delete_many({SYMBOL: sym, START: segments[0][START]})
# map each segment in the interval to the correct segment
for index, seg in enumerate(segments):
seg[SEGMENT] = index
library._collection.insert_many(segments)
ret.append(sym)
return ret
|
from diamond.collector import Collector
import os
try:
import libvirt
except ImportError:
libvirt = None
class XENCollector(Collector):
def get_default_config_help(self):
config_help = super(XENCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(XENCollector, self).get_default_config()
config.update({
'path': 'xen'
})
return config
def collect(self):
"""
Collect libvirt data
"""
if libvirt is None:
self.log.error('Unable to import either libvirt')
return {}
# Open a restricted (non-root) connection to the hypervisor
conn = libvirt.openReadOnly(None)
# Get hardware info
conninfo = conn.getInfo()
# Initialize variables
memallocated = 0
coresallocated = 0
totalcores = 0
results = {}
domIds = conn.listDomainsID()
if 0 in domIds:
# Total cores
domU = conn.lookupByID(0)
totalcores = domU.info()[3]
# Free Space
s = os.statvfs('/')
freeSpace = (s.f_bavail * s.f_frsize) / 1024
# Calculate allocated memory and cores
for i in domIds:
# Ignore 0
if i == 0:
continue
domU = conn.lookupByID(i)
dominfo = domU.info()
memallocated += dominfo[2]
if i > 0:
coresallocated += dominfo[3]
results = {
'InstalledMem': conninfo[1],
'MemAllocated': memallocated / 1024,
'MemFree': conninfo[1] - (memallocated / 1024),
'AllocatedCores': coresallocated,
'DiskFree': freeSpace,
'TotalCores': totalcores,
'FreeCores': (totalcores - coresallocated)
}
for k in results.keys():
self.publish(k, results[k], 0)
|
import asyncio
import os
import shutil
from homeassistant.components.media_player.const import (
ATTR_MEDIA_CONTENT_ID,
DOMAIN as DOMAIN_MP,
SERVICE_PLAY_MEDIA,
)
import homeassistant.components.tts as tts
from homeassistant.config import async_process_ha_core_config
from homeassistant.setup import setup_component
from tests.common import assert_setup_component, get_test_home_assistant, mock_service
from tests.components.tts.test_init import mutagen_mock # noqa: F401
class TestTTSVoiceRSSPlatform:
"""Test the voicerss speech component."""
def setup_method(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
asyncio.run_coroutine_threadsafe(
async_process_ha_core_config(
self.hass, {"internal_url": "http://example.local:8123"}
),
self.hass.loop,
)
self.url = "https://api.voicerss.org/"
self.form_data = {
"key": "1234567xx",
"hl": "en-us",
"c": "MP3",
"f": "8khz_8bit_mono",
"src": "I person is on front of your door.",
}
def teardown_method(self):
"""Stop everything that was started."""
default_tts = self.hass.config.path(tts.DEFAULT_CACHE_DIR)
if os.path.isdir(default_tts):
shutil.rmtree(default_tts)
self.hass.stop()
def test_setup_component(self):
"""Test setup component."""
config = {tts.DOMAIN: {"platform": "voicerss", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
def test_setup_component_without_api_key(self):
"""Test setup component without api key."""
config = {tts.DOMAIN: {"platform": "voicerss"}}
with assert_setup_component(0, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
def test_service_say(self, aioclient_mock):
"""Test service call say."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
aioclient_mock.post(self.url, data=self.form_data, status=200, content=b"test")
config = {tts.DOMAIN: {"platform": "voicerss", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"voicerss_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "I person is on front of your door.",
},
)
self.hass.block_till_done()
assert len(calls) == 1
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == self.form_data
assert calls[0].data[ATTR_MEDIA_CONTENT_ID].find(".mp3") != -1
def test_service_say_german_config(self, aioclient_mock):
"""Test service call say with german code in the config."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
self.form_data["hl"] = "de-de"
aioclient_mock.post(self.url, data=self.form_data, status=200, content=b"test")
config = {
tts.DOMAIN: {
"platform": "voicerss",
"api_key": "1234567xx",
"language": "de-de",
}
}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"voicerss_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "I person is on front of your door.",
},
)
self.hass.block_till_done()
assert len(calls) == 1
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == self.form_data
def test_service_say_german_service(self, aioclient_mock):
"""Test service call say with german code in the service."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
self.form_data["hl"] = "de-de"
aioclient_mock.post(self.url, data=self.form_data, status=200, content=b"test")
config = {tts.DOMAIN: {"platform": "voicerss", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"voicerss_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "I person is on front of your door.",
tts.ATTR_LANGUAGE: "de-de",
},
)
self.hass.block_till_done()
assert len(calls) == 1
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == self.form_data
def test_service_say_error(self, aioclient_mock):
"""Test service call say with http response 400."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
aioclient_mock.post(self.url, data=self.form_data, status=400, content=b"test")
config = {tts.DOMAIN: {"platform": "voicerss", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"voicerss_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "I person is on front of your door.",
},
)
self.hass.block_till_done()
assert len(calls) == 0
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == self.form_data
def test_service_say_timeout(self, aioclient_mock):
"""Test service call say with http timeout."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
aioclient_mock.post(self.url, data=self.form_data, exc=asyncio.TimeoutError())
config = {tts.DOMAIN: {"platform": "voicerss", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"voicerss_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "I person is on front of your door.",
},
)
self.hass.block_till_done()
assert len(calls) == 0
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == self.form_data
def test_service_say_error_msg(self, aioclient_mock):
"""Test service call say with http error api message."""
calls = mock_service(self.hass, DOMAIN_MP, SERVICE_PLAY_MEDIA)
aioclient_mock.post(
self.url,
data=self.form_data,
status=200,
content=b"The subscription does not support SSML!",
)
config = {tts.DOMAIN: {"platform": "voicerss", "api_key": "1234567xx"}}
with assert_setup_component(1, tts.DOMAIN):
setup_component(self.hass, tts.DOMAIN, config)
self.hass.services.call(
tts.DOMAIN,
"voicerss_say",
{
"entity_id": "media_player.something",
tts.ATTR_MESSAGE: "I person is on front of your door.",
},
)
self.hass.block_till_done()
assert len(calls) == 0
assert len(aioclient_mock.mock_calls) == 1
assert aioclient_mock.mock_calls[0][2] == self.form_data
|
import io
import os
import shutil
import lxml.html
import pytest
import nikola.plugins.command.init
from nikola import __main__
from .helper import cd
from .test_empty_build import ( # NOQA
test_archive_exists,
test_avoid_double_slash_in_rss,
test_check_files,
test_check_links,
test_index_in_sitemap,
)
def test_translated_titles(build, output_dir, other_locale):
"""Check that translated title is picked up."""
normal_file = os.path.join(output_dir, "pages", "1", "index.html")
translated_file = os.path.join(output_dir, other_locale, "pages", "1", "index.html")
# Files should be created
assert os.path.isfile(normal_file)
assert os.path.isfile(translated_file)
# And now let's check the titles
with io.open(normal_file, "r", encoding="utf8") as inf:
doc = lxml.html.parse(inf)
assert doc.find("//title").text == "Foo | Demo Site"
with io.open(translated_file, "r", encoding="utf8") as inf:
doc = lxml.html.parse(inf)
assert doc.find("//title").text == "Bar | Demo Site"
@pytest.fixture(scope="module")
def build(target_dir, test_dir):
"""Build the site."""
init_command = nikola.plugins.command.init.CommandInit()
init_command.create_empty_site(target_dir)
init_command.create_configuration(target_dir)
src = os.path.join(test_dir, "..", "data", "translated_titles")
for root, dirs, files in os.walk(src):
for src_name in files:
rel_dir = os.path.relpath(root, src)
dst_file = os.path.join(target_dir, rel_dir, src_name)
src_file = os.path.join(root, src_name)
shutil.copy2(src_file, dst_file)
with cd(target_dir):
__main__.main(["build"])
|
import io
import face_recognition # pylint: disable=import-error
from homeassistant.components.image_processing import (
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
ImageProcessingFaceEntity,
)
from homeassistant.core import split_entity_id
# pylint: disable=unused-import
from homeassistant.components.image_processing import ( # noqa: F401, isort:skip
PLATFORM_SCHEMA,
)
ATTR_LOCATION = "location"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Dlib Face detection platform."""
entities = []
for camera in config[CONF_SOURCE]:
entities.append(
DlibFaceDetectEntity(camera[CONF_ENTITY_ID], camera.get(CONF_NAME))
)
add_entities(entities)
class DlibFaceDetectEntity(ImageProcessingFaceEntity):
"""Dlib Face API entity for identify."""
def __init__(self, camera_entity, name=None):
"""Initialize Dlib face entity."""
super().__init__()
self._camera = camera_entity
if name:
self._name = name
else:
self._name = f"Dlib Face {split_entity_id(camera_entity)[1]}"
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def name(self):
"""Return the name of the entity."""
return self._name
def process_image(self, image):
"""Process image."""
fak_file = io.BytesIO(image)
fak_file.name = "snapshot.jpg"
fak_file.seek(0)
image = face_recognition.load_image_file(fak_file)
face_locations = face_recognition.face_locations(image)
face_locations = [{ATTR_LOCATION: location} for location in face_locations]
self.process_faces(face_locations, len(face_locations))
|
from datetime import timedelta
import logging
from math import floor
from aiohttp.hdrs import USER_AGENT
import requests
import voluptuous as vol
from homeassistant.components.binary_sensor import PLATFORM_SCHEMA, BinarySensorEntity
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by the National Oceanic and Atmospheric Administration"
CONF_THRESHOLD = "forecast_threshold"
DEFAULT_DEVICE_CLASS = "visible"
DEFAULT_NAME = "Aurora Visibility"
DEFAULT_THRESHOLD = 75
HA_USER_AGENT = "Home Assistant Aurora Tracker v.0.1.0"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=5)
URL = "http://services.swpc.noaa.gov/text/aurora-nowcast-map.txt"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_THRESHOLD, default=DEFAULT_THRESHOLD): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the aurora sensor."""
if None in (hass.config.latitude, hass.config.longitude):
_LOGGER.error("Lat. or long. not set in Home Assistant config")
return False
name = config[CONF_NAME]
threshold = config[CONF_THRESHOLD]
try:
aurora_data = AuroraData(hass.config.latitude, hass.config.longitude, threshold)
aurora_data.update()
except requests.exceptions.HTTPError as error:
_LOGGER.error("Connection to aurora forecast service failed: %s", error)
return False
add_entities([AuroraSensor(aurora_data, name)], True)
class AuroraSensor(BinarySensorEntity):
"""Implementation of an aurora sensor."""
def __init__(self, aurora_data, name):
"""Initialize the sensor."""
self.aurora_data = aurora_data
self._name = name
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._name}"
@property
def is_on(self):
"""Return true if aurora is visible."""
return self.aurora_data.is_visible if self.aurora_data else False
@property
def device_class(self):
"""Return the class of this device."""
return DEFAULT_DEVICE_CLASS
@property
def device_state_attributes(self):
"""Return the state attributes."""
attrs = {}
if self.aurora_data:
attrs["visibility_level"] = self.aurora_data.visibility_level
attrs["message"] = self.aurora_data.is_visible_text
attrs[ATTR_ATTRIBUTION] = ATTRIBUTION
return attrs
def update(self):
"""Get the latest data from Aurora API and updates the states."""
self.aurora_data.update()
class AuroraData:
"""Get aurora forecast."""
def __init__(self, latitude, longitude, threshold):
"""Initialize the data object."""
self.latitude = latitude
self.longitude = longitude
self.headers = {USER_AGENT: HA_USER_AGENT}
self.threshold = int(threshold)
self.is_visible = None
self.is_visible_text = None
self.visibility_level = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data from the Aurora service."""
try:
self.visibility_level = self.get_aurora_forecast()
if int(self.visibility_level) > self.threshold:
self.is_visible = True
self.is_visible_text = "visible!"
else:
self.is_visible = False
self.is_visible_text = "nothing's out"
except requests.exceptions.HTTPError as error:
_LOGGER.error("Connection to aurora forecast service failed: %s", error)
return False
def get_aurora_forecast(self):
"""Get forecast data and parse for given long/lat."""
raw_data = requests.get(URL, headers=self.headers, timeout=5).text
# We discard comment rows (#)
# We split the raw text by line (\n)
# For each line we trim leading spaces and split by spaces
forecast_table = [
row.strip().split()
for row in raw_data.split("\n")
if not row.startswith("#")
]
# Convert lat and long for data points in table
# Assumes self.latitude belongs to [-90;90[ (South to North)
# Assumes self.longitude belongs to [-180;180[ (West to East)
# No assumptions made regarding the number of rows and columns
converted_latitude = floor((self.latitude + 90) * len(forecast_table) / 180)
converted_longitude = floor(
(self.longitude + 180) * len(forecast_table[converted_latitude]) / 360
)
return forecast_table[converted_latitude][converted_longitude]
|
import collections
from PyQt5.QtCore import QObject, pyqtSignal
from PyQt5.QtSql import QSqlDatabase, QSqlQuery, QSqlError
from qutebrowser.utils import log, debug
class SqliteErrorCode:
"""Error codes as used by sqlite.
See https://sqlite.org/rescode.html - note we only define the codes we use
in qutebrowser here.
"""
ERROR = '1' # generic error code
BUSY = '5' # database is locked
READONLY = '8' # attempt to write a readonly database
IOERR = '10' # disk I/O error
CORRUPT = '11' # database disk image is malformed
FULL = '13' # database or disk is full
CANTOPEN = '14' # unable to open database file
PROTOCOL = '15' # locking protocol error
CONSTRAINT = '19' # UNIQUE constraint failed
NOTADB = '26' # file is not a database
class Error(Exception):
"""Base class for all SQL related errors."""
def __init__(self, msg, error=None):
super().__init__(msg)
self.error = error
def text(self):
"""Get a short text description of the error.
This is a string suitable to show to the user as error message.
"""
if self.error is None:
return str(self)
else:
return self.error.databaseText()
class KnownError(Error):
"""Raised on an error interacting with the SQL database.
This is raised in conditions resulting from the environment (like a full
disk or I/O errors), where qutebrowser isn't to blame.
"""
class BugError(Error):
"""Raised on an error interacting with the SQL database.
This is raised for errors resulting from a qutebrowser bug.
"""
def raise_sqlite_error(msg, error):
"""Raise either a BugError or KnownError."""
error_code = error.nativeErrorCode()
database_text = error.databaseText()
driver_text = error.driverText()
log.sql.debug("SQL error:")
log.sql.debug("type: {}".format(
debug.qenum_key(QSqlError, error.type())))
log.sql.debug("database text: {}".format(database_text))
log.sql.debug("driver text: {}".format(driver_text))
log.sql.debug("error code: {}".format(error_code))
known_errors = [
SqliteErrorCode.BUSY,
SqliteErrorCode.READONLY,
SqliteErrorCode.IOERR,
SqliteErrorCode.CORRUPT,
SqliteErrorCode.FULL,
SqliteErrorCode.CANTOPEN,
SqliteErrorCode.PROTOCOL,
SqliteErrorCode.NOTADB,
]
# https://github.com/qutebrowser/qutebrowser/issues/4681
# If the query we built was too long
too_long_err = (
error_code == SqliteErrorCode.ERROR and
(database_text.startswith("Expression tree is too large") or
database_text in ["too many SQL variables",
"LIKE or GLOB pattern too complex"]))
if error_code in known_errors or too_long_err:
raise KnownError(msg, error)
raise BugError(msg, error)
def init(db_path):
"""Initialize the SQL database connection."""
database = QSqlDatabase.addDatabase('QSQLITE')
if not database.isValid():
raise KnownError('Failed to add database. Are sqlite and Qt sqlite '
'support installed?')
database.setDatabaseName(db_path)
if not database.open():
error = database.lastError()
msg = "Failed to open sqlite database at {}: {}".format(db_path,
error.text())
raise_sqlite_error(msg, error)
# Enable write-ahead-logging and reduce disk write frequency
# see https://sqlite.org/pragma.html and issues #2930 and #3507
Query("PRAGMA journal_mode=WAL").run()
Query("PRAGMA synchronous=NORMAL").run()
def close():
"""Close the SQL connection."""
QSqlDatabase.removeDatabase(QSqlDatabase.database().connectionName())
def version():
"""Return the sqlite version string."""
try:
if not QSqlDatabase.database().isOpen():
init(':memory:')
ver = Query("select sqlite_version()").run().value()
close()
return ver
return Query("select sqlite_version()").run().value()
except KnownError as e:
return 'UNAVAILABLE ({})'.format(e)
class Query:
"""A prepared SQL query."""
def __init__(self, querystr, forward_only=True):
"""Prepare a new SQL query.
Args:
querystr: String to prepare query from.
forward_only: Optimization for queries that will only step forward.
Must be false for completion queries.
"""
self.query = QSqlQuery(QSqlDatabase.database())
log.sql.debug('Preparing SQL query: "{}"'.format(querystr))
ok = self.query.prepare(querystr)
self._check_ok('prepare', ok)
self.query.setForwardOnly(forward_only)
def __iter__(self):
if not self.query.isActive():
raise BugError("Cannot iterate inactive query")
rec = self.query.record()
fields = [rec.fieldName(i) for i in range(rec.count())]
rowtype = collections.namedtuple( # type: ignore[misc]
'ResultRow', fields)
while self.query.next():
rec = self.query.record()
yield rowtype(*[rec.value(i) for i in range(rec.count())])
def _check_ok(self, step, ok):
if not ok:
query = self.query.lastQuery()
error = self.query.lastError()
msg = 'Failed to {} query "{}": "{}"'.format(step, query,
error.text())
raise_sqlite_error(msg, error)
def _bind_values(self, values):
for key, val in values.items():
self.query.bindValue(':{}'.format(key), val)
if any(val is None for val in self.bound_values().values()):
raise BugError("Missing bound values!")
def run(self, **values):
"""Execute the prepared query."""
log.sql.debug('Running SQL query: "{}"'.format(
self.query.lastQuery()))
self._bind_values(values)
log.sql.debug('query bindings: {}'.format(self.bound_values()))
ok = self.query.exec_()
self._check_ok('exec', ok)
return self
def run_batch(self, values):
"""Execute the query in batch mode."""
log.sql.debug('Running SQL query (batch): "{}"'.format(
self.query.lastQuery()))
self._bind_values(values)
db = QSqlDatabase.database()
ok = db.transaction()
self._check_ok('transaction', ok)
ok = self.query.execBatch()
try:
self._check_ok('execBatch', ok)
except Error:
# Not checking the return value here, as we're failing anyways...
db.rollback()
raise
ok = db.commit()
self._check_ok('commit', ok)
def value(self):
"""Return the result of a single-value query (e.g. an EXISTS)."""
if not self.query.next():
raise BugError("No result for single-result query")
return self.query.record().value(0)
def rows_affected(self):
return self.query.numRowsAffected()
def bound_values(self):
return self.query.boundValues()
class SqlTable(QObject):
"""Interface to a SQL table.
Attributes:
_name: Name of the SQL table this wraps.
Signals:
changed: Emitted when the table is modified.
"""
changed = pyqtSignal()
def __init__(self, name, fields, constraints=None, parent=None):
"""Create a new table in the SQL database.
Does nothing if the table already exists.
Args:
name: Name of the table.
fields: A list of field names.
constraints: A dict mapping field names to constraint strings.
"""
super().__init__(parent)
self._name = name
constraints = constraints or {}
column_defs = ['{} {}'.format(field, constraints.get(field, ''))
for field in fields]
q = Query("CREATE TABLE IF NOT EXISTS {name} ({column_defs})"
.format(name=name, column_defs=', '.join(column_defs)))
q.run()
def create_index(self, name, field):
"""Create an index over this table.
Args:
name: Name of the index, should be unique.
field: Name of the field to index.
"""
q = Query("CREATE INDEX IF NOT EXISTS {name} ON {table} ({field})"
.format(name=name, table=self._name, field=field))
q.run()
def __iter__(self):
"""Iterate rows in the table."""
q = Query("SELECT * FROM {table}".format(table=self._name))
q.run()
return iter(q)
def contains_query(self, field):
"""Return a prepared query that checks for the existence of an item.
Args:
field: Field to match.
"""
return Query(
"SELECT EXISTS(SELECT * FROM {table} WHERE {field} = :val)"
.format(table=self._name, field=field))
def __len__(self):
"""Return the count of rows in the table."""
q = Query("SELECT count(*) FROM {table}".format(table=self._name))
q.run()
return q.value()
def delete(self, field, value):
"""Remove all rows for which `field` equals `value`.
Args:
field: Field to use as the key.
value: Key value to delete.
Return:
The number of rows deleted.
"""
q = Query("DELETE FROM {table} where {field} = :val"
.format(table=self._name, field=field))
q.run(val=value)
if not q.rows_affected():
raise KeyError('No row with {} = "{}"'.format(field, value))
self.changed.emit()
def _insert_query(self, values, replace):
params = ', '.join(':{}'.format(key) for key in values)
verb = "REPLACE" if replace else "INSERT"
return Query("{verb} INTO {table} ({columns}) values({params})".format(
verb=verb, table=self._name, columns=', '.join(values),
params=params))
def insert(self, values, replace=False):
"""Append a row to the table.
Args:
values: A dict with a value to insert for each field name.
replace: If set, replace existing values.
"""
q = self._insert_query(values, replace)
q.run(**values)
self.changed.emit()
def insert_batch(self, values, replace=False):
"""Performantly append multiple rows to the table.
Args:
values: A dict with a list of values to insert for each field name.
replace: If true, overwrite rows with a primary key match.
"""
q = self._insert_query(values, replace)
q.run_batch(values)
self.changed.emit()
def delete_all(self):
"""Remove all rows from the table."""
Query("DELETE FROM {table}".format(table=self._name)).run()
self.changed.emit()
def select(self, sort_by, sort_order, limit=-1):
"""Prepare, run, and return a select statement on this table.
Args:
sort_by: name of column to sort by.
sort_order: 'asc' or 'desc'.
limit: max number of rows in result, defaults to -1 (unlimited).
Return: A prepared and executed select query.
"""
q = Query("SELECT * FROM {table} ORDER BY {sort_by} {sort_order} "
"LIMIT :limit"
.format(table=self._name, sort_by=sort_by,
sort_order=sort_order))
q.run(limit=limit)
return q
|
import markups
import multiprocessing as mp
import os
import pickle
import signal
import struct
import traceback
import weakref
try:
from socket import socketpair
except ImportError:
# Windows compatibility: socket.socketpair backport for Python < 3.5
from backports.socketpair import socketpair
from PyQt5.QtCore import pyqtSignal, QObject, QSocketNotifier
def recvall(sock, remaining):
alldata = bytearray()
while remaining > 0:
data = sock.recv(remaining)
if len(data) == 0:
raise EOFError('Received 0 bytes from socket while more bytes were expected. Did the sender process exit unexpectedly?')
alldata.extend(data)
remaining -= len(data)
return alldata
def receiveObject(sock):
sizeBuf = recvall(sock, 4)
size = struct.unpack('I', sizeBuf)[0]
message = recvall(sock, size)
obj = pickle.loads(message)
return obj
def sendObject(sock, obj):
message = pickle.dumps(obj)
sizeBuf = struct.pack('I', len(message))
sock.sendall(sizeBuf)
sock.sendall(message)
class ConversionError(Exception):
pass
class MarkupNotAvailableError(Exception):
pass
def _indent(text, prefix):
return ''.join(('%s%s\n' % (prefix, line) for line in text.splitlines()))
def _converter_process_func(conn_parent, conn_child):
conn_parent.close()
# Ignore ctrl-C. The main application will also receive the signal and
# determine if the application should be stopped or not.
signal.signal(signal.SIGINT, signal.SIG_IGN)
current_markup = None
while True:
job = receiveObject(conn_child)
if job['command'] == 'quit':
break
elif job['command'] == 'convert':
try:
os.chdir(job['current_dir'])
if (not current_markup or
current_markup.name != job['markup_name'] or
current_markup.filename != job['filename']):
markup_class = markups.find_markup_class_by_name(job['markup_name'])
if not markup_class.available():
raise MarkupNotAvailableError('The specified markup was not available')
current_markup = markup_class(job['filename'])
current_markup.requested_extensions = job['requested_extensions']
converted = current_markup.convert(job['text'])
result = ('ok', converted)
except MarkupNotAvailableError as e:
result = ('markupnotavailableerror', e.args)
except Exception:
result = ('conversionerror',
'The background markup conversion process received this exception:\n%s' %
_indent(traceback.format_exc(), ' '))
try:
sendObject(conn_child, result)
except BrokenPipeError:
# Continue despite the broken pipe because we expect that a
# 'quit' command will have been sent. If it has been then we
# should terminate without any error messages. If no command
# was queued we will get an EOFError from the read, giving us a
# second chance to show that something went wrong by exiting
# with a traceback.
continue
class ConverterProcess(QObject):
conversionDone = pyqtSignal()
def __init__(self):
super().__init__()
conn_parent, conn_child = socketpair()
# TODO: figure out which of the two sockets should be set to
# inheritable and which should be passed to the child
if hasattr(conn_child, 'set_inheritable'):
conn_child.set_inheritable(True)
# Use a local variable for child so that we can talk to the child in
# on_finalize without needing a reference to self
child = mp.Process(target=_converter_process_func, args=(conn_parent, conn_child))
child.daemon = True
child.start()
self.child = child
conn_child.close()
self.conn = conn_parent
self.busy = False
self.notificationPending = False
self.conversionNotifier = QSocketNotifier(self.conn.fileno(),
QSocketNotifier.Read)
self.conversionNotifier.activated.connect(self._conversionNotifierActivated)
def on_finalize(conn):
sendObject(conn_parent, {'command':'quit'})
conn_parent.close()
child.join()
weakref.finalize(self, on_finalize, conn_parent)
def _conversionNotifierActivated(self):
# The ready-for-read signal on the socket may be triggered multiple
# times, but we only send a single notification to the client as soon
# as the results of the conversion are starting to come in. This makes
# it easy for clients to avoid multiple calls to get_result for the
# same conversion.
if self.notificationPending:
self.notificationPending = False
# Set the socket to blocking before waking up any interested parties,
# because it has been set to unblocking by QSocketNotifier
self.conn.setblocking(True)
self.conversionDone.emit()
def start_conversion(self, markup_name, filename, requested_extensions, text, current_dir):
if self.busy:
raise RuntimeError('Already converting')
sendObject(self.conn, {'command': 'convert',
'markup_name' : markup_name,
'filename' : filename,
'current_dir': current_dir,
'requested_extensions' : requested_extensions,
'text' : text})
self.busy = True
self.notificationPending = True
def get_result(self):
if not self.busy:
raise RuntimeError('No ongoing conversion')
self.busy = False
status, result = receiveObject(self.conn)
if status == 'markupnotavailableerror':
raise MarkupNotAvailableError(result)
elif status == 'conversionerror':
raise ConversionError(result)
return result
def stop(self):
sendObject(self.conn, {'command': 'quit'})
self.conn.close()
|
from __future__ import print_function, division, absolute_import, unicode_literals
import base64
import copy
import hmac
import random
import string
from hashlib import sha1
import validictory
import json
from gmusicapi.exceptions import CallFailure, ValidationException
from gmusicapi.protocol.shared import Call, authtypes
from gmusicapi.utils import utils, jsarray
base_url = 'https://play.google.com/music/'
service_url = base_url + 'services/'
class Init(Call):
"""Called one time per session, immediately after login.
This performs one-time setup:
it gathers the cookies we need (specifically `xt`), and Google uses it
to create the webclient DOM.
Note the use of the HEAD verb. Google uses GET, but we don't need
the large response containing Google's webui.
"""
static_method = 'HEAD'
static_url = base_url + 'listen'
required_auth = authtypes(sso=True)
# This call doesn't actually request/return anything useful aside from cookies.
@staticmethod
def parse_response(response):
return response.text
@classmethod
def check_success(cls, response, msg):
if response.status_code != 200:
raise CallFailure(('status code %s != 200' % response.status_code), cls.__name__)
if 'xt' not in response.cookies:
raise CallFailure('did not receieve xt cookies', cls.__name__)
class WcCall(Call):
"""Abstract base for web client calls."""
required_auth = authtypes(xt=True, sso=True)
# validictory schema for the response
_res_schema = utils.NotImplementedField
@classmethod
def validate(cls, response, msg):
"""Use validictory and a static schema (stored in cls._res_schema)."""
try:
return validictory.validate(msg, cls._res_schema)
except ValueError as e:
raise ValidationException(str(e)) from e
@classmethod
def check_success(cls, response, msg):
# Failed responses always have a success=False key.
# Some successful responses do not have a success=True key, however.
# TODO remove utils.call_succeeded
if 'success' in msg and not msg['success']:
raise CallFailure(
"the server reported failure. This is usually"
" caused by bad arguments, but can also happen if requests"
" are made too quickly (eg creating a playlist then"
" modifying it before the server has created it)",
cls.__name__)
@classmethod
def parse_response(cls, response):
return cls._parse_json(response.text)
class CreatePlaylist(WcCall):
"""Adds songs to a playlist."""
static_method = 'POST'
static_url = service_url + 'createplaylist'
static_params = {'format': 'jsarray'}
_res_schema = {
"type": "array",
# eg:
# [[0,2]
# ,["id","sharetoken",[]
# ,<millis>]]
}
@staticmethod
def dynamic_data(name, description, public, session_id=""):
return json.dumps([[session_id, 1], [public, name, description, []]])
class AddToPlaylist(WcCall):
"""Adds songs to a playlist."""
static_method = 'POST'
static_url = service_url + 'addtoplaylist'
_res_schema = {
"type": "object",
"properties": {
"playlistId": {"type": "string"},
"songIds": {
"type": "array",
"items": {
"type": "object",
"properties": {
"songId": {"type": "string"},
"playlistEntryId": {"type": "string"}
}
}
}
},
"additionalProperties": False
}
@staticmethod
def dynamic_data(playlist_id, song_ids):
"""
:param playlist_id: id of the playlist to add to.
:param song_ids: a list of song ids
"""
# TODO unsure what type means here. Likely involves uploaded vs store/free.
song_refs = [{'id': sid, 'type': 1} for sid in song_ids]
return {
'json': json.dumps(
{"playlistId": playlist_id, "songRefs": song_refs}
)
}
@staticmethod
def filter_response(msg):
filtered = copy.copy(msg)
filtered['songIds'] = ["<%s songs>" % len(filtered.get('songIds', []))]
return filtered
class ChangePlaylistOrder(WcCall):
"""Reorder existing tracks in a playlist."""
static_method = 'POST'
static_url = service_url + 'changeplaylistorder'
_res_schema = {
"type": "object",
"properties": {
"afterEntryId": {"type": "string", "blank": True},
"playlistId": {"type": "string"},
"movedSongIds": {
"type": "array",
"items": {"type": "string"}
}
},
"additionalProperties": False
}
@staticmethod
def dynamic_data(playlist_id, song_ids_moving, entry_ids_moving,
after_entry_id=None, before_entry_id=None):
"""
:param playlist_id: id of the playlist getting reordered.
:param song_ids_moving: a list of consecutive song ids. Matches entry_ids_moving.
:param entry_ids_moving: a list of consecutive entry ids to move. Matches song_ids_moving.
:param after_entry_id: the entry id to place these songs after. Default first position.
:param before_entry_id: the entry id to place these songs before. Default last position.
"""
# empty string means first/last position
if after_entry_id is None:
after_entry_id = ""
if before_entry_id is None:
before_entry_id = ""
return {
'json': json.dumps(
{
"playlistId": playlist_id,
"movedSongIds": song_ids_moving,
"movedEntryIds": entry_ids_moving,
"afterEntryId": after_entry_id,
"beforeEntryId": before_entry_id
}
)
}
@staticmethod
def filter_response(msg):
filtered = copy.copy(msg)
filtered['movedSongIds'] = ["<%s songs>" % len(filtered.get('movedSongIds', []))]
return filtered
class DeletePlaylist(WcCall):
"""Delete a playlist."""
static_method = 'POST'
static_url = service_url + 'deleteplaylist'
_res_schema = {
"type": "object",
"properties": {
"deleteId": {"type": "string"}
},
"additionalProperties": False
}
@staticmethod
def dynamic_data(playlist_id):
"""
:param playlist_id: id of the playlist to delete.
"""
return {
'json': json.dumps(
{"id": playlist_id}
)
}
class DeleteSongs(WcCall):
"""Delete a song from the entire library or a single playlist."""
static_method = 'POST'
static_url = service_url + 'deletesong'
_res_schema = {
"type": "object",
"properties": {
"listId": {"type": "string"},
"deleteIds":
{
"type": "array",
"items": {"type": "string"}
}
},
"additionalProperties": False
}
@staticmethod
def dynamic_data(song_ids, playlist_id='all', entry_ids=None):
"""
:param song_ids: a list of song ids.
:param playlist_id: playlist id to delete from, or 'all' for deleting from library.
:param entry_ids: when deleting from playlists, corresponding list of entry ids.
"""
if entry_ids is None:
# this is strange, but apparently correct
entry_ids = [''] * len(song_ids)
return {
'json': json.dumps(
{"songIds": song_ids, "entryIds": entry_ids, "listId": playlist_id}
)
}
@staticmethod
def filter_response(msg):
filtered = copy.copy(msg)
filtered['deleteIds'] = ["<%s songs>" % len(filtered.get('deleteIds', []))]
return filtered
class ChangeSongMetadata(WcCall):
"""Edit the metadata of songs."""
static_method = 'POST'
static_url = service_url + 'modifytracks'
static_params = {'format': 'jsarray'}
_res_schema = {
"type": "array",
# eg [[0,1],[1393706382978]]
}
@staticmethod
def dynamic_data(songs, session_id=""):
"""
:param songs: a list of dicts ``{'id': '...', 'albumArtUrl': '...'}``
"""
supported = {'id', 'albumArtUrl', 'title', 'artist', 'albumArtist', 'album'}
for s in songs:
for k in s.keys():
if k not in supported:
raise ValueError("ChangeSongMetadata only supports the the following keys: "
+ str(supported) +
". All other keys must be removed. Key encountered:" + k)
# jsarray is just wonderful
jsarray = [[session_id, 1]]
song_arrays = [[s['id'],
s.get('title'),
s.get('albumArtUrl'),
s.get('artist'),
s.get('album'),
s.get('albumArtist')]
+ [None] * 33 + [[]] for s in songs]
jsarray.append([song_arrays])
return json.dumps(jsarray)
class GetDownloadInfo(WcCall):
"""Get download links and counts for songs."""
static_method = 'POST'
static_url = service_url + 'multidownload'
_res_schema = {
"type": "object",
"properties": {
"downloadCounts": {
"type": "object",
"items": {
"type": "object",
"properties": {
"id": {"type": "integer"}
}
}
},
"url": {"type": "string"}
},
"additionalProperties": False
}
@staticmethod
def dynamic_data(song_ids):
"""
:param: (list) song_ids
"""
return {'json': json.dumps({'songIds': song_ids})}
class GetStreamUrl(WcCall):
"""Used to request a streaming link of a track."""
static_method = 'GET'
static_url = base_url + 'play' # note use of base_url, not service_url
required_auth = authtypes(sso=True) # no xt required
_res_schema = {
"type": "object",
"properties": {
"url": {"type": "string", "required": False},
"urls": {"type": "array", "required": False},
'now': {'type': 'integer', 'required': False},
'tier': {'type': 'integer', 'required': False},
'replayGain': {'type': 'integer'},
'streamAuthId': {'type': 'string'},
'isFreeRadioUser': {'type': 'boolean'},
},
"additionalProperties": False
}
@staticmethod
def dynamic_params(song_id):
# https://github.com/simon-weber/gmusicapi/issues/137
# there are three cases when streaming:
# | track type | guid songid? | slt/sig needed? |
# user-uploaded yes no
# AA track in library yes yes
# AA track not in library no yes
# without the track['type'] field we can't tell between 1 and 2, but
# include slt/sig anyway; the server ignores the extra params.
key = '27f7313e-f75d-445a-ac99-56386a5fe879'.encode("ascii")
salt = ''.join(random.choice(string.ascii_lowercase + string.digits) for x in range(12))
salted_id = (song_id + salt).encode("utf-8")
sig = base64.urlsafe_b64encode(hmac.new(key, salted_id, sha1).digest())[:-1]
params = {
'u': 0,
'pt': 'e',
'slt': salt,
'sig': sig
}
# TODO match guid instead, should be more robust
if song_id[0] == 'T':
# all access
params['mjck'] = song_id
else:
params['songid'] = song_id
return params
class ReportBadSongMatch(WcCall):
"""Request to signal the uploader to reupload a matched track."""
static_method = 'POST'
static_url = service_url + 'fixsongmatch'
static_params = {'format': 'jsarray'}
# This no longer holds.
expected_response = [[0], []]
@classmethod
def validate(cls, response, msg):
pass
# if msg != cls.expected_response:
# raise ValidationException("response != %r" % cls.expected_response)
@staticmethod
def dynamic_data(song_ids):
return json.dumps([["", 1], [song_ids]])
class UploadImage(WcCall):
"""Upload an image for use as album art."""
static_method = 'POST'
static_url = service_url + 'imageupload'
static_params = {'zx': '', # ??
'u': 0}
_res_schema = {
'type': 'object',
'properties': {
'imageUrl': {'type': 'string', 'blank': False},
'imageDisplayUrl': {'type': 'string', 'blank': False},
},
'additionalProperties': False
}
@staticmethod
def dynamic_files(image_filepath):
"""
:param image_filepath: path to an image
"""
with open(image_filepath, 'rb') as f:
contents = f.read()
return {'albumArt': (image_filepath, contents)}
class GetSettings(WcCall):
"""Get data that populates the settings tab: labs and devices."""
static_method = 'POST'
static_url = service_url + 'fetchsettings'
_device_schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'deviceType': {'type': 'integer'},
'id': {'type': 'string'},
'lastAccessedFormatted': {'type': 'string'},
'lastAccessedTimeMillis': {'type': 'integer'},
'lastEventTimeMillis': {'type': 'integer'},
'name': {'type': 'string', 'blank': True},
# only for type == 2 (android phone?):
'model': {'type': 'string', 'blank': True, 'required': False},
'manufacturer': {'type': 'string', 'blank': True, 'required': False},
'carrier': {'type': 'string', 'blank': True, 'required': False},
},
}
_res_schema = {
'type': 'object',
'additionalProperties': False,
'properties': {
'settings': {
'type': 'object',
'additionalProperties': False,
'properties': {
'entitlementInfo': {
'type': 'object',
'additionalProperties': False,
'properties': {
'expirationMillis': {'type': 'integer', 'required': False},
'isCanceled': {'type': 'boolean'},
'isSubscription': {'type': 'boolean'},
'isTrial': {'type': 'boolean'},
}},
'lab': {
'type': 'array',
'items': {
'type': 'object',
'additionalProperties': False,
'properties': {
'description': {'type': 'string'},
'enabled': {'type': 'boolean'},
'displayName': {'type': 'string'},
'experimentName': {'type': 'string'},
},
}},
'maxUploadedTracks': {'type': 'integer'},
'subscriptionNewsletter': {'type': 'boolean', 'required': False},
'uploadDevice': {
'type': 'array',
'items': _device_schema,
}},
}
},
}
@staticmethod
def dynamic_data(session_id):
"""
:param: session_id
"""
return {'json': json.dumps({'sessionId': session_id})}
class DeauthDevice(WcCall):
"""Deauthorize a device from GetSettings."""
static_method = 'POST'
static_url = service_url + 'modifysettings'
@staticmethod
def dynamic_data(device_id, session_id):
return {'json': json.dumps({'deauth': device_id, 'sessionId': session_id})}
@classmethod
def validate(cls, response, msg):
if msg.text != '{}':
raise ValidationException("expected an empty object; received %r" % msg.text)
class GetSharedPlaylist(WcCall):
"""Get the contents and metadata for a shared playlist."""
static_method = 'POST'
static_url = service_url + 'loadsharedplaylist'
static_params = {'format': 'jsarray'}
_res_schema = {
'type': 'array',
}
@classmethod
def parse_response(cls, response):
return cls._parse_json(jsarray.to_json(response.text))
@staticmethod
def dynamic_data(session_id, share_token):
return json.dumps([
[session_id, 1],
[share_token]
])
|
from tensornetwork.block_sparse.caching import (get_cacher, set_caching_status,
get_caching_status, clear_cache,
enable_caching, disable_caching,
_INSTANTIATED_CACHERS)
from tensornetwork.block_sparse.index import Index
from tensornetwork.block_sparse.charge import U1Charge, charge_equal
from tensornetwork.block_sparse.blocksparse_utils import (
_to_string, _find_transposed_diagonal_sparse_blocks)
from tensornetwork.block_sparse.blocksparsetensor import BlockSparseTensor
from tensornetwork.ncon_interface import ncon
import numpy as np
def test_get_cacher():
cacher = get_cacher()
assert len(_INSTANTIATED_CACHERS) == 1
assert _INSTANTIATED_CACHERS[0] is cacher
def test_set_caching_status():
set_caching_status(True)
cacher = get_cacher()
assert len(_INSTANTIATED_CACHERS) == 1
assert _INSTANTIATED_CACHERS[0] is cacher
assert cacher.do_caching
set_caching_status(False)
cacher = get_cacher()
assert len(_INSTANTIATED_CACHERS) == 1
assert _INSTANTIATED_CACHERS[0] is cacher
assert not cacher.do_caching
def test_get_caching_status():
set_caching_status(True)
assert get_caching_status()
set_caching_status(False)
assert not get_caching_status()
def test_enable_caching():
enable_caching()
cacher = get_cacher()
assert len(_INSTANTIATED_CACHERS) == 1
assert _INSTANTIATED_CACHERS[0] is cacher
assert cacher.do_caching
disable_caching()
def test_disable_caching():
disable_caching()
cacher = get_cacher()
assert len(_INSTANTIATED_CACHERS) == 1
assert _INSTANTIATED_CACHERS[0] is cacher
assert not cacher.do_caching
def test_cache():
D = 10
mpsinds = [
Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), False),
Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), False),
Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), False),
Index(U1Charge(np.random.randint(-5, 5, D, dtype=np.int16)), True)
]
A = BlockSparseTensor.random(mpsinds)
B = A.conj()
res_charges = [
A.flat_charges[2], A.flat_charges[3], B.flat_charges[2], B.flat_charges[3]
]
res_flows = [
A.flat_flows[2], A.flat_flows[3], B.flat_flows[2], B.flat_flows[3]
]
enable_caching()
ncon([A, B], [[1, 2, -1, -2], [1, 2, -3, -4]], backend='symmetric')
cacher = get_cacher()
sA = _to_string(A.flat_charges, A.flat_flows, 2, [2, 3, 0, 1])
sB = _to_string(B.flat_charges, B.flat_flows, 2, [0, 1, 2, 3])
sC = _to_string(res_charges, res_flows, 2, [0, 1, 2, 3])
blocksA, chargesA, dimsA = _find_transposed_diagonal_sparse_blocks(
A.flat_charges, A.flat_flows, 2, [2, 3, 0, 1])
blocksB, chargesB, dimsB = _find_transposed_diagonal_sparse_blocks(
B.flat_charges, B.flat_flows, 2, [0, 1, 2, 3])
blocksC, chargesC, dimsC = _find_transposed_diagonal_sparse_blocks(
res_charges, res_flows, 2, [0, 1, 2, 3])
assert sA in cacher.cache
assert sB in cacher.cache
assert sC in cacher.cache
for b1, b2 in zip(cacher.cache[sA][0], blocksA):
np.testing.assert_allclose(b1, b2)
for b1, b2 in zip(cacher.cache[sB][0], blocksB):
np.testing.assert_allclose(b1, b2)
for b1, b2 in zip(cacher.cache[sC][0], blocksC):
np.testing.assert_allclose(b1, b2)
assert charge_equal(cacher.cache[sA][1], chargesA)
assert charge_equal(cacher.cache[sB][1], chargesB)
assert charge_equal(cacher.cache[sC][1], chargesC)
np.testing.assert_allclose(cacher.cache[sA][2], dimsA)
np.testing.assert_allclose(cacher.cache[sB][2], dimsB)
np.testing.assert_allclose(cacher.cache[sC][2], dimsC)
disable_caching()
clear_cache()
def test_clear_cache():
D = 100
M = 5
mpsinds = [
Index(U1Charge(np.random.randint(5, 15, D, dtype=np.int16)), False),
Index(U1Charge(np.array([0, 1, 2, 3], dtype=np.int16)), False),
Index(U1Charge(np.random.randint(5, 18, D, dtype=np.int16)), True)
]
mpoinds = [
Index(U1Charge(np.random.randint(0, 5, M)), False),
Index(U1Charge(np.random.randint(0, 10, M)), True), mpsinds[1],
mpsinds[1].flip_flow()
]
Linds = [mpoinds[0].flip_flow(), mpsinds[0].flip_flow(), mpsinds[0]]
Rinds = [mpoinds[1].flip_flow(), mpsinds[2].flip_flow(), mpsinds[2]]
mps = BlockSparseTensor.random(mpsinds)
mpo = BlockSparseTensor.random(mpoinds)
L = BlockSparseTensor.random(Linds)
R = BlockSparseTensor.random(Rinds)
enable_caching()
ncon([L, mps, mpo, R], [[3, 1, -1], [1, 2, 4], [3, 5, -2, 2], [5, 4, -3]],
backend='symmetric')
cacher = get_cacher()
assert len(cacher.cache) > 0
disable_caching()
clear_cache()
assert len(cacher.cache) == 0
|
import json
import requests
from jinja2 import Undefined
from app import app
from app.entities.models import Entity
def split_sentence(sentence):
return sentence.split("###")
def get_synonyms():
"""
Build synonyms dict from DB
:return:
"""
synonyms = {}
for entity in Entity.objects:
for value in entity.entity_values:
for synonym in value.synonyms:
synonyms[synonym] = value.value
app.logger.info("loaded synonyms %s", synonyms)
return synonyms
def call_api(url, type, headers={}, parameters={}, is_json=False):
"""
Call external API
:param url:
:param type:
:param parameters:
:param is_json:
:return:
"""
app.logger.info("Initiating API Call with following info: url => {} payload => {}".format(url, parameters))
if "GET" in type:
response = requests.get(url, headers=headers, params=parameters, timeout=5)
elif "POST" in type:
if is_json:
response = requests.post(url, headers=headers, json=parameters, timeout=5)
else:
response = requests.post(url, headers=headers, params=parameters, timeout=5)
elif "PUT" in type:
if is_json:
response = requests.put(url, headers=headers, json=parameters, timeout=5)
else:
response = requests.put(url, headers=headers, params=parameters, timeout=5)
elif "DELETE" in type:
response = requests.delete(url, headers=headers, params=parameters, timeout=5)
else:
raise Exception("unsupported request method.")
result = json.loads(response.text)
app.logger.info("API response => %s", result)
return result
class SilentUndefined(Undefined):
"""
Class to suppress jinja2 errors and warnings
"""
def _fail_with_undefined_error(self, *args, **kwargs):
return 'undefined'
__add__ = __radd__ = __mul__ = __rmul__ = __div__ = __rdiv__ = \
__truediv__ = __rtruediv__ = __floordiv__ = __rfloordiv__ = \
__mod__ = __rmod__ = __pos__ = __neg__ = __call__ = \
__getitem__ = __lt__ = __le__ = __gt__ = __ge__ = __int__ = \
__float__ = __complex__ = __pow__ = __rpow__ = \
_fail_with_undefined_error
|
from homeassistant.const import DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, EVENTS_COORDINATOR
CATEGORIES = {
2: "Alarm",
4: "Status",
7: "Trouble",
}
EVENT_ATTRIBUTES = [
"category_id",
"category_name",
"type_id",
"type_name",
"name",
"text",
"partition_id",
"zone_id",
"user_id",
"group",
"priority",
"raw",
]
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up sensors for device."""
coordinator = hass.data[DOMAIN][config_entry.entry_id][EVENTS_COORDINATOR]
sensors = [
RiscoSensor(coordinator, id, [], name) for id, name in CATEGORIES.items()
]
sensors.append(RiscoSensor(coordinator, None, CATEGORIES.keys(), "Other"))
async_add_entities(sensors)
class RiscoSensor(CoordinatorEntity):
"""Sensor for Risco events."""
def __init__(self, coordinator, category_id, excludes, name) -> None:
"""Initialize sensor."""
super().__init__(coordinator)
self._event = None
self._category_id = category_id
self._excludes = excludes
self._name = name
@property
def name(self):
"""Return the name of the sensor."""
return f"Risco {self.coordinator.risco.site_name} {self._name} Events"
@property
def unique_id(self):
"""Return a unique id for this sensor."""
return f"events_{self._name}_{self.coordinator.risco.site_uuid}"
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(
self.coordinator.async_add_listener(self._refresh_from_coordinator)
)
await self.coordinator.async_request_refresh()
def _refresh_from_coordinator(self):
events = self.coordinator.data
for event in reversed(events):
if event.category_id in self._excludes:
continue
if self._category_id is not None and event.category_id != self._category_id:
continue
self._event = event
self.async_write_ha_state()
@property
def state(self):
"""Value of sensor."""
if self._event is None:
return None
return self._event.time
@property
def device_state_attributes(self):
"""State attributes."""
if self._event is None:
return None
return {atr: getattr(self._event, atr, None) for atr in EVENT_ATTRIBUTES}
@property
def device_class(self):
"""Device class of sensor."""
return DEVICE_CLASS_TIMESTAMP
|
import os
from perfkitbenchmarker import vm_util
SDK_REPO = 'https://dl.google.com/dl/cloudsdk/release/google-cloud-sdk.tar.gz'
SDK_DIR = '%s/google-cloud-sdk' % vm_util.VM_TMP_DIR
SDK_INSTALL_FILE = '%s/install.sh' % SDK_DIR
GCLOUD_PATH = '%s/bin/gcloud' % SDK_DIR
GSUTIL_PATH = '%s/bin/gsutil' % SDK_DIR
KUBECTL_PATH = '%s/bin/kubectl' % SDK_DIR
def RunGcloud(vm, cmd):
return vm.RemoteCommand('export CLOUDSDK_CORE_DISABLE_PROMPTS=1 && %s %s '
'--project %s --format json' % (GCLOUD_PATH, cmd,
vm.project))
def Install(vm):
"""Installs google cloud sdk on the VM."""
vm.Install('wget')
vm.RemoteCommand('cd {0} && wget {1} && tar xzf {2} && rm {2}'.format(
vm_util.VM_TMP_DIR, SDK_REPO, os.path.basename(SDK_REPO)))
vm.RemoteCommand('%s --disable-installation-options --usage-report=false '
'--path-update=false --bash-completion=false'
% SDK_INSTALL_FILE)
|
from typing import Callable, List
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_NAME
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from . import NZBGetEntity
from .const import DATA_COORDINATOR, DOMAIN
from .coordinator import NZBGetDataUpdateCoordinator
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up NZBGet sensor based on a config entry."""
coordinator: NZBGetDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
switches = [
NZBGetDownloadSwitch(
coordinator,
entry.entry_id,
entry.data[CONF_NAME],
),
]
async_add_entities(switches)
class NZBGetDownloadSwitch(NZBGetEntity, SwitchEntity):
"""Representation of a NZBGet download switch."""
def __init__(
self,
coordinator: NZBGetDataUpdateCoordinator,
entry_id: str,
entry_name: str,
):
"""Initialize a new NZBGet switch."""
self._unique_id = f"{entry_id}_download"
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
name=f"{entry_name} Download",
)
@property
def unique_id(self) -> str:
"""Return the unique ID of the switch."""
return self._unique_id
@property
def is_on(self):
"""Return the state of the switch."""
return not self.coordinator.data["status"].get("DownloadPaused", False)
async def async_turn_on(self, **kwargs) -> None:
"""Set downloads to enabled."""
await self.hass.async_add_executor_job(self.coordinator.nzbget.resumedownload)
await self.coordinator.async_request_refresh()
async def async_turn_off(self, **kwargs) -> None:
"""Set downloads to paused."""
await self.hass.async_add_executor_job(self.coordinator.nzbget.pausedownload)
await self.coordinator.async_request_refresh()
|
import contextlib
import json
import os
import unittest
from absl import flags
import mock
from perfkitbenchmarker import relational_db
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.providers.aws import aws_disk
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import aws_relational_db
from perfkitbenchmarker.providers.aws.aws_relational_db import (AwsRelationalDb)
from perfkitbenchmarker.relational_db import AURORA_POSTGRES
from perfkitbenchmarker.relational_db import MYSQL
from six.moves import builtins
FLAGS = flags.FLAGS
_BENCHMARK_NAME = 'name'
_BENCHMARK_UID = 'benchmark_uid'
_COMPONENT = 'test_component'
_FLAGS = None
_AWS_PREFIX = 'aws --output json'
def readTestDataFile(filename):
path = os.path.join(
os.path.dirname(__file__), '../../data',
filename)
with open(path) as fp:
return fp.read()
class AwsRelationalDbSpecTestCase(unittest.TestCase):
"""Class that tests the creation of an AwsRelationalDbSpec."""
pass
class AwsRelationalDbFlagsTestCase(unittest.TestCase):
"""Class that tests the flags defined in AwsRelationalDb."""
pass
class AwsRelationalDbTestCase(unittest.TestCase):
def setUp(self):
flag_values = {'run_uri': '123', 'project': None}
p = mock.patch(aws_relational_db.__name__ + '.FLAGS')
flags_mock = p.start()
flags_mock.configure_mock(**flag_values)
FLAGS['use_managed_db'].parse(True)
self.addCleanup(p.stop)
@contextlib.contextmanager
def _PatchCriticalObjects(self, stdout='', stderr='', return_code=0):
"""A context manager that patches a few critical objects with mocks."""
retval = (stdout, stderr, return_code)
with mock.patch(
vm_util.__name__ + '.IssueCommand',
return_value=retval) as issue_command, mock.patch(
builtins.__name__ + '.open'), mock.patch(vm_util.__name__ +
'.NamedTemporaryFile'):
yield issue_command
def VmGroupSpec(self):
return {
'clients': {
'vm_spec': {
'GCP': {
'zone': 'us-central1-c',
'machine_type': 'n1-standard-1'
}
},
'disk_spec': {
'GCP': {
'disk_size': 500,
'disk_type': 'pd-ssd'
}
}
},
'servers': {
'vm_spec': {
'GCP': {
'zone': 'us-central1-c',
'machine_type': 'n1-standard-1'
}
},
'disk_spec': {
'GCP': {
'disk_size': 500,
'disk_type': 'pd-ssd'
}
}
}
}
def CreateMockSpec(self, additional_spec_items={}):
default_server_db_disk_spec = aws_disk.AwsDiskSpec(
_COMPONENT, disk_size=5, disk_type=aws_disk.IO1, iops=1000)
default_server_db_spec = virtual_machine.BaseVmSpec(
'NAME', **{
'machine_type': 'db.t1.micro',
'zone': 'us-west-2b'
})
spec_dict = {
'engine': MYSQL,
'engine_version': '5.7.11',
'run_uri': '123',
'database_name': 'fakedbname',
'database_password': 'fakepassword',
'database_username': 'fakeusername',
'high_availability': False,
'db_spec': default_server_db_spec,
'db_disk_spec': default_server_db_disk_spec,
'vm_groups': self.VmGroupSpec(),
}
spec_dict.update(additional_spec_items)
mock_db_spec = mock.Mock(spec=benchmark_config_spec._RelationalDbSpec)
mock_db_spec.configure_mock(**spec_dict)
return mock_db_spec
def CreateMockClientVM(self, db_class):
m = mock.MagicMock()
m.HasIpAddress = True
m.ip_address = '192.168.0.1'
db_class.client_vm = m
def CreateMockServerVM(self, db_class):
m = mock.MagicMock()
m.HasIpAddress = True
m.ip_address = '192.168.2.1'
db_class.server_vm = m
def CreateDbFromMockSpec(self, mock_spec):
aws_db = AwsRelationalDb(mock_spec)
# Set necessary instance attributes that are not part of the spec
aws_db.security_group_name = 'fake_security_group'
aws_db.db_subnet_group_name = 'fake_db_subnet'
aws_db.security_group_id = 'fake_security_group_id'
self.CreateMockClientVM(aws_db)
return aws_db
def CreateDbFromSpec(self, additional_spec_items={}):
mock_spec = self.CreateMockSpec(additional_spec_items)
return self.CreateDbFromMockSpec(mock_spec)
def Create(self, additional_spec_items={}):
with self._PatchCriticalObjects() as issue_command:
db = self.CreateDbFromSpec(additional_spec_items)
db._Create()
return ' '.join(issue_command.call_args[0][0])
def testCreate(self):
command_string = self.Create()
self.assertTrue(
command_string.startswith('%s rds create-db-instance' % _AWS_PREFIX))
self.assertIn('--db-instance-identifier=pkb-db-instance-123',
command_string)
self.assertIn('--db-instance-class=db.t1.micro', command_string)
self.assertIn('--engine=mysql', command_string)
self.assertIn('--master-user-password=fakepassword', command_string)
def testCorrectVmGroupsPresent(self):
with self._PatchCriticalObjects():
db = self.CreateDbFromSpec()
self.CreateMockServerVM(db)
db._Create()
vms = relational_db.VmsToBoot(db.spec.vm_groups)
self.assertNotIn('servers', vms)
def CreateAuroraMockSpec(self, additional_spec_items={}):
default_server_db_spec = virtual_machine.BaseVmSpec(
'NAME', **{
'machine_type': 'db.t1.micro',
'zone': 'us-west-2b'
})
spec_dict = {
'engine': AURORA_POSTGRES,
'run_uri': '123',
'database_name': 'fakedbname',
'database_password': 'fakepassword',
'database_username': 'fakeusername',
'db_spec': default_server_db_spec,
'zones': ['us-east-1a', 'us-east-1d'],
'engine_version': '9.6.2',
'high_availability': True
}
spec_dict.update(additional_spec_items)
mock_db_spec = mock.Mock(spec=benchmark_config_spec._RelationalDbSpec)
mock_db_spec.configure_mock(**spec_dict)
return mock_db_spec
def CreateAuroraDbFromSpec(self, additional_spec_items={}):
mock_spec = self.CreateAuroraMockSpec(additional_spec_items)
return self.CreateDbFromMockSpec(mock_spec)
def CreateAurora(self, additional_spec_items={}):
with self._PatchCriticalObjects() as issue_command:
db = self.CreateAuroraDbFromSpec(additional_spec_items)
db._Create()
call_results = []
for call in issue_command.call_args_list:
call_results.append(' '.join(call[0][0]))
return call_results
def testCreateAurora(self):
command_strings = self.CreateAurora()
self.assertIn(
'%s rds create-db-cluster' % _AWS_PREFIX, command_strings[0])
self.assertIn('--db-cluster-identifier=pkb-db-cluster-123',
command_strings[0])
self.assertIn('--engine=aurora-postgresql', command_strings[0])
self.assertIn('--master-user-password=fakepassword', command_strings[0])
self.assertIn(
'%s rds create-db-instance' % _AWS_PREFIX, command_strings[1])
self.assertIn('--db-cluster-identifier=pkb-db-cluster-123',
command_strings[1])
self.assertIn('--engine=aurora-postgresql', command_strings[1])
def testNoHighAvailability(self):
spec_dict = {
'multi_az': False,
}
command_string = self.Create(spec_dict)
self.assertNotIn('--multi-az', command_string)
def testHighAvailability(self):
command_string = self.Create()
self.assertNotIn('--multi-az', command_string)
def testDiskWithIops(self):
command_string = self.Create()
self.assertIn('--allocated-storage=5', command_string)
self.assertIn('--storage-type=%s' % aws_disk.IO1, command_string)
self.assertIn('--iops=1000', command_string)
def testDiskWithoutIops(self):
spec_dict = {
'db_disk_spec':
aws_disk.AwsDiskSpec(
_COMPONENT, disk_size=5, disk_type=aws_disk.GP2)
}
command_string = self.Create(spec_dict)
self.assertIn('--allocated-storage=5', command_string)
self.assertIn('--storage-type=%s' % aws_disk.GP2, command_string)
self.assertNotIn('--iops', command_string)
def testUnspecifiedDatabaseVersion(self):
command_string = self.Create()
self.assertIn('--engine-version=5.7.11', command_string)
def testSpecifiedDatabaseVersion(self):
spec_dict = {
'engine_version': '5.6.29',
}
command_string = self.Create(spec_dict)
self.assertIn('--engine-version=5.6.29', command_string)
def testIsNotReady(self):
test_data = readTestDataFile('aws-describe-db-instances-creating.json')
with self._PatchCriticalObjects(stdout=test_data):
db = self.CreateDbFromSpec()
db.all_instance_ids.append('pkb-db-instance-123')
self.assertEqual(False, db._IsReady(timeout=0))
def testIsReady(self):
test_data = readTestDataFile('aws-describe-db-instances-available.json')
with self._PatchCriticalObjects(stdout=test_data):
db = self.CreateDbFromSpec()
db.all_instance_ids.append('pkb-db-instance-123')
self.assertEqual(True, db._IsReady())
def testParseEndpoint(self):
test_data = readTestDataFile('aws-describe-db-instances-available.json')
with self._PatchCriticalObjects():
db = self.CreateDbFromSpec()
self.assertEqual(
'pkb-db-instance-a4499926.cqxeajwjbqne.us-west-2.rds.amazonaws.com',
db._ParseEndpointFromInstance(json.loads(test_data)))
def testParsePort(self):
test_data = readTestDataFile('aws-describe-db-instances-available.json')
with self._PatchCriticalObjects():
db = self.CreateDbFromSpec()
self.assertEqual(3306, db._ParsePortFromInstance(json.loads(test_data)))
def testDelete(self):
with self._PatchCriticalObjects() as issue_command:
db = self.CreateDbFromSpec()
db.all_instance_ids.append('pkb-db-instance-123')
db._Delete()
command_string = ' '.join(issue_command.call_args[0][0])
self.assertIn('aws --output json rds delete-db-instance', command_string)
self.assertIn('--db-instance-identifier=pkb-db-instance-123',
command_string)
self.assertIn('--skip-final-snapshot', command_string)
@mock.patch.object(aws_network.AwsFirewall, '_RuleExists', return_value=True,
autospec=True)
def testCreateUnmanagedDb(self, rule_check_mock): # pylint: disable=unused-argument
FLAGS['use_managed_db'].parse(False)
FLAGS['default_timeout'].parse(300)
with self._PatchCriticalObjects() as issue_command:
db = self.CreateDbFromSpec()
self.CreateMockServerVM(db)
db._Create()
self.assertTrue(db._Exists())
self.assertTrue(hasattr(db, 'firewall'))
self.assertEqual(db.endpoint, db.server_vm.ip_address)
self.assertEqual(db.spec.database_username, 'root')
self.assertEqual(db.spec.database_password, 'perfkitbenchmarker')
self.assertIsNone(issue_command.call_args)
if __name__ == '__main__':
unittest.main()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import os
from absl import flags
from compare_gan import eval_utils
from compare_gan.architectures import abstract_arch
from compare_gan.architectures import arch_ops
import gin
import mock
import numpy as np
import tensorflow as tf
FLAGS = flags.FLAGS
def create_fake_inception_graph():
"""Creates a graph that mocks inception.
It takes the input, multiplies it through a matrix full of 0.001 values
and returns as logits. It makes sure to match the tensor names of
the real inception model.
Returns:
tf.Graph object with a simple mock inception inside.
"""
fake_inception = tf.Graph()
with fake_inception.as_default():
graph_input = tf.placeholder(
tf.float32, shape=[None, 299, 299, 3], name="Mul")
matrix = tf.ones(shape=[299 * 299 * 3, 10]) * 0.00001
output = tf.matmul(tf.layers.flatten(graph_input), matrix)
output = tf.identity(output, name="pool_3")
output = tf.identity(output, name="logits")
return fake_inception.as_graph_def()
class Generator(abstract_arch.AbstractGenerator):
"""Generator with a single linear layer from z to the output."""
def __init__(self, **kwargs):
super(Generator, self).__init__(**kwargs)
self.call_arg_list = []
def apply(self, z, y, is_training):
self.call_arg_list.append(dict(z=z, y=y, is_training=is_training))
batch_size = z.shape[0].value
out = arch_ops.linear(z, np.prod(self._image_shape), scope="fc_noise")
out = tf.nn.sigmoid(out)
return tf.reshape(out, [batch_size] + list(self._image_shape))
class Discriminator(abstract_arch.AbstractDiscriminator):
"""Discriminator with a single linear layer."""
def __init__(self, **kwargs):
super(Discriminator, self).__init__(**kwargs)
self.call_arg_list = []
def apply(self, x, y, is_training):
self.call_arg_list.append(dict(x=x, y=y, is_training=is_training))
h = tf.reduce_mean(x, axis=[1, 2])
out = arch_ops.linear(h, 1)
return tf.nn.sigmoid(out), out, h
class CompareGanTestCase(tf.test.TestCase):
"""Base class for test cases."""
def setUp(self):
super(CompareGanTestCase, self).setUp()
# Use fake datasets instead of reading real files.
FLAGS.data_fake_dataset = True
# Clear the gin cofiguration.
gin.clear_config()
# Mock the inception graph.
fake_inception_graph = create_fake_inception_graph()
self.inception_graph_def_mock = mock.patch.object(
eval_utils,
"get_inception_graph_def",
return_value=fake_inception_graph).start()
def _get_empty_model_dir(self):
unused_sub_dir = str(datetime.datetime.now().microsecond)
model_dir = os.path.join(FLAGS.test_tmpdir, unused_sub_dir)
assert not tf.gfile.Exists(model_dir)
return model_dir
|
import ssl
import unittest
import tests
from pyVmomi import pbm, VmomiSupport, SoapStubAdapter
from pyVim.connect import SmartConnectNoSSL
class PBMTests(tests.VCRTestBase):
def get_pbm_connection(self, vpxd_stub):
VmomiSupport.GetRequestContext()["vcSessionCookie"] = \
vpxd_stub.cookie.split('"')[1]
hostname = vpxd_stub.host.split(":")[0]
pbm_stub = SoapStubAdapter(
host=hostname,
version="pbm.version.version11",
path="/pbm/sdk",
poolSize=0,
sslContext=ssl._create_unverified_context())
pbm_si = pbm.ServiceInstance("ServiceInstance", pbm_stub)
pbm_content = pbm_si.RetrieveContent()
return pbm_content
def get_profile(self, profile_name, pbm_content):
pm = pbm_content.profileManager
profile_ids = pm.PbmQueryProfile(
resourceType=pbm.profile.ResourceType(resourceType="STORAGE"),
profileCategory="REQUIREMENT")
if len(profile_ids) > 0:
profiles = pm.PbmRetrieveContent(profileIds=profile_ids)
for profile in profiles:
if profile_name in profile.name:
return profile
raise Exception('Profile not found')
@tests.VCRTestBase.my_vcr.use_cassette('pbm_check_compatibility.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='Once')
def test_pbm_check_compatibility(self):
si = SmartConnectNoSSL(host='vcsa',
user='[email protected]',
pwd='Admin!23')
# Connect to SPBM Endpoint
pbm_content = self.get_pbm_connection(si._stub)
sp = self.get_profile("test-profile", pbm_content)
pbm_content.placementSolver.PbmCheckCompatibility(profile=sp.profileId)
if __name__ == '__main__':
unittest.main()
|
import logging
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_RADIUS,
CONF_SCAN_INTERVAL,
)
from homeassistant.helpers import config_validation as cv
from .const import ( # pylint: disable=unused-import
CONF_MINIMUM_MAGNITUDE,
CONF_MMI,
DEFAULT_MINIMUM_MAGNITUDE,
DEFAULT_MMI,
DEFAULT_RADIUS,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
)
DATA_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MMI, default=DEFAULT_MMI): vol.All(
vol.Coerce(int), vol.Range(min=-1, max=8)
),
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS): cv.positive_int,
}
)
_LOGGER = logging.getLogger(__name__)
class GeonetnzQuakesFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a GeoNet NZ Quakes config flow."""
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors or {}
)
async def async_step_import(self, import_config):
"""Import a config entry from configuration.yaml."""
return await self.async_step_user(import_config)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
_LOGGER.debug("User input: %s", user_input)
if not user_input:
return await self._show_form()
latitude = user_input.get(CONF_LATITUDE, self.hass.config.latitude)
user_input[CONF_LATITUDE] = latitude
longitude = user_input.get(CONF_LONGITUDE, self.hass.config.longitude)
user_input[CONF_LONGITUDE] = longitude
identifier = f"{user_input[CONF_LATITUDE]}, {user_input[CONF_LONGITUDE]}"
await self.async_set_unique_id(identifier)
self._abort_if_unique_id_configured()
scan_interval = user_input.get(CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL)
user_input[CONF_SCAN_INTERVAL] = scan_interval.seconds
minimum_magnitude = user_input.get(
CONF_MINIMUM_MAGNITUDE, DEFAULT_MINIMUM_MAGNITUDE
)
user_input[CONF_MINIMUM_MAGNITUDE] = minimum_magnitude
return self.async_create_entry(title=identifier, data=user_input)
|
import os
from collections import defaultdict
import natsort
from pygments import highlight
from pygments.lexers import get_lexer_for_filename, guess_lexer, TextLexer
from nikola.plugin_categories import Task
from nikola import utils
class Listings(Task):
"""Render code listings."""
name = "render_listings"
def register_output_name(self, input_folder, rel_name, rel_output_name):
"""Register proper and improper file mappings."""
self.improper_input_file_mapping[rel_name].add(rel_output_name)
self.proper_input_file_mapping[os.path.join(input_folder, rel_name)] = rel_output_name
self.proper_input_file_mapping[rel_output_name] = rel_output_name
def set_site(self, site):
"""Set Nikola site."""
site.register_path_handler('listing', self.listing_path)
site.register_path_handler('listing_source', self.listing_source_path)
# We need to prepare some things for the listings path handler to work.
self.kw = {
"default_lang": site.config["DEFAULT_LANG"],
"listings_folders": site.config["LISTINGS_FOLDERS"],
"output_folder": site.config["OUTPUT_FOLDER"],
"index_file": site.config["INDEX_FILE"],
"strip_indexes": site.config['STRIP_INDEXES'],
"filters": site.config["FILTERS"],
}
# Verify that no folder in LISTINGS_FOLDERS appears twice (on output side)
appearing_paths = set()
for source, dest in self.kw['listings_folders'].items():
if source in appearing_paths or dest in appearing_paths:
problem = source if source in appearing_paths else dest
utils.LOGGER.error("The listings input or output folder '{0}' appears in more than one entry in LISTINGS_FOLDERS, exiting.".format(problem))
continue
appearing_paths.add(source)
appearing_paths.add(dest)
# improper_input_file_mapping maps a relative input file (relative to
# its corresponding input directory) to a list of the output files.
# Since several input directories can contain files of the same name,
# a list is needed. This is needed for compatibility to previous Nikola
# versions, where there was no need to specify the input directory name
# when asking for a link via site.link('listing', ...).
self.improper_input_file_mapping = defaultdict(set)
# proper_input_file_mapping maps relative input file (relative to CWD)
# to a generated output file. Since we don't allow an input directory
# to appear more than once in LISTINGS_FOLDERS, we can map directly to
# a file name (and not a list of files).
self.proper_input_file_mapping = {}
for input_folder, output_folder in self.kw['listings_folders'].items():
for root, _, files in os.walk(input_folder, followlinks=True):
# Compute relative path; can't use os.path.relpath() here as it returns "." instead of ""
rel_path = root[len(input_folder):]
if rel_path[:1] == os.sep:
rel_path = rel_path[1:]
for f in files + [self.kw['index_file']]:
rel_name = os.path.join(rel_path, f)
rel_output_name = os.path.join(output_folder, rel_path, f)
# Register file names in the mapping.
self.register_output_name(input_folder, rel_name, rel_output_name)
return super().set_site(site)
def gen_tasks(self):
"""Render pretty code listings."""
# Things to ignore in listings
ignored_extensions = (".pyc", ".pyo")
def render_listing(in_name, out_name, input_folder, output_folder, folders=[], files=[]):
needs_ipython_css = False
if in_name and in_name.endswith('.ipynb'):
# Special handling: render ipynbs in listings (Issue #1900)
ipynb_plugin = self.site.plugin_manager.getPluginByName("ipynb", "PageCompiler")
if ipynb_plugin is None:
msg = "To use .ipynb files as listings, you must set up the Jupyter compiler in COMPILERS and POSTS/PAGES."
utils.LOGGER.error(msg)
raise ValueError(msg)
ipynb_compiler = ipynb_plugin.plugin_object
with open(in_name, "r", encoding="utf-8-sig") as in_file:
nb_json = ipynb_compiler._nbformat_read(in_file)
code = ipynb_compiler._compile_string(nb_json)
title = os.path.basename(in_name)
needs_ipython_css = True
elif in_name:
with open(in_name, 'r', encoding='utf-8-sig') as fd:
try:
lexer = get_lexer_for_filename(in_name)
except Exception:
try:
lexer = guess_lexer(fd.read())
except Exception:
lexer = TextLexer()
fd.seek(0)
code = highlight(
fd.read(), lexer,
utils.NikolaPygmentsHTML(in_name, linenos='table'))
title = os.path.basename(in_name)
else:
code = ''
title = os.path.split(os.path.dirname(out_name))[1]
crumbs = utils.get_crumbs(os.path.relpath(out_name,
self.kw['output_folder']),
is_file=True)
permalink = self.site.link(
'listing',
os.path.join(
input_folder,
os.path.relpath(
out_name[:-5], # remove '.html'
os.path.join(
self.kw['output_folder'],
output_folder))))
if in_name:
source_link = permalink[:-5] # remove '.html'
else:
source_link = None
context = {
'code': code,
'title': title,
'crumbs': crumbs,
'permalink': permalink,
'lang': self.kw['default_lang'],
'folders': natsort.natsorted(
folders, alg=natsort.ns.F | natsort.ns.IC),
'files': natsort.natsorted(
files, alg=natsort.ns.F | natsort.ns.IC),
'description': title,
'source_link': source_link,
'pagekind': ['listing'],
}
if needs_ipython_css:
# If someone does not have ipynb posts and only listings, we
# need to enable ipynb CSS for ipynb listings.
context['needs_ipython_css'] = True
self.site.render_template('listing.tmpl', out_name, context)
yield self.group_task()
template_deps = self.site.template_system.template_deps('listing.tmpl')
for input_folder, output_folder in self.kw['listings_folders'].items():
for root, dirs, files in os.walk(input_folder, followlinks=True):
files = [f for f in files if os.path.splitext(f)[-1] not in ignored_extensions]
uptodate = {'c': self.site.GLOBAL_CONTEXT}
for k, v in self.site.GLOBAL_CONTEXT['template_hooks'].items():
uptodate['||template_hooks|{0}||'.format(k)] = v.calculate_deps()
for k in self.site._GLOBAL_CONTEXT_TRANSLATABLE:
uptodate[k] = self.site.GLOBAL_CONTEXT[k](self.kw['default_lang'])
# save navigation links as dependencies
uptodate['navigation_links'] = uptodate['c']['navigation_links'](self.kw['default_lang'])
uptodate['kw'] = self.kw
uptodate2 = uptodate.copy()
uptodate2['f'] = files
uptodate2['d'] = dirs
# Compute relative path; can't use os.path.relpath() here as it returns "." instead of ""
rel_path = root[len(input_folder):]
if rel_path[:1] == os.sep:
rel_path = rel_path[1:]
rel_name = os.path.join(rel_path, self.kw['index_file'])
rel_output_name = os.path.join(output_folder, rel_path, self.kw['index_file'])
# Render all files
out_name = os.path.join(self.kw['output_folder'], rel_output_name)
yield utils.apply_filters({
'basename': self.name,
'name': out_name,
'file_dep': template_deps,
'targets': [out_name],
'actions': [(render_listing, [None, out_name, input_folder, output_folder, dirs, files])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(uptodate2, 'nikola.plugins.task.listings:folder')],
'clean': True,
}, self.kw["filters"])
for f in files:
if f == '.DS_Store':
continue
ext = os.path.splitext(f)[-1]
if ext in ignored_extensions:
continue
in_name = os.path.join(root, f)
# Record file names
rel_name = os.path.join(rel_path, f + '.html')
rel_output_name = os.path.join(output_folder, rel_path, f + '.html')
self.register_output_name(input_folder, rel_name, rel_output_name)
# Set up output name
out_name = os.path.join(self.kw['output_folder'], rel_output_name)
# Yield task
yield utils.apply_filters({
'basename': self.name,
'name': out_name,
'file_dep': template_deps + [in_name],
'targets': [out_name],
'actions': [(render_listing, [in_name, out_name, input_folder, output_folder])],
# This is necessary to reflect changes in blog title,
# sidebar links, etc.
'uptodate': [utils.config_changed(uptodate, 'nikola.plugins.task.listings:source')],
'clean': True,
}, self.kw["filters"])
rel_name = os.path.join(rel_path, f)
rel_output_name = os.path.join(output_folder, rel_path, f)
self.register_output_name(input_folder, rel_name, rel_output_name)
out_name = os.path.join(self.kw['output_folder'], rel_output_name)
yield utils.apply_filters({
'basename': self.name,
'name': out_name,
'file_dep': [in_name],
'targets': [out_name],
'actions': [(utils.copy_file, [in_name, out_name])],
'clean': True,
}, self.kw["filters"])
def listing_source_path(self, name, lang):
"""Return a link to the source code for a listing.
It will try to use the file name if it's not ambiguous, or the file path.
Example:
link://listing_source/hello.py => /listings/tutorial/hello.py
link://listing_source/tutorial/hello.py => /listings/tutorial/hello.py
"""
result = self.listing_path(name, lang)
if result[-1].endswith('.html'):
result[-1] = result[-1][:-5]
return result
def listing_path(self, namep, lang):
"""Return a link to a listing.
It will try to use the file name if it's not ambiguous, or the file path.
Example:
link://listing/hello.py => /listings/tutorial/hello.py.html
link://listing/tutorial/hello.py => /listings/tutorial/hello.py.html
"""
namep = namep.replace('/', os.sep)
nameh = namep + '.html'
for name in (namep, nameh):
if name in self.proper_input_file_mapping:
# If the name shows up in this dict, everything's fine.
name = self.proper_input_file_mapping[name]
break
elif name in self.improper_input_file_mapping:
# If the name shows up in this dict, we have to check for
# ambiguities.
if len(self.improper_input_file_mapping[name]) > 1:
utils.LOGGER.error("Using non-unique listing name '{0}', which maps to more than one listing name ({1})!".format(name, str(self.improper_input_file_mapping[name])))
return ["ERROR"]
if len(self.site.config['LISTINGS_FOLDERS']) > 1:
utils.LOGGER.warning("Using listings names in site.link() without input directory prefix while configuration's LISTINGS_FOLDERS has more than one entry.")
name = list(self.improper_input_file_mapping[name])[0]
break
else:
utils.LOGGER.error("Unknown listing name {0}!".format(namep))
return ["ERROR"]
if not name.endswith(os.sep + self.site.config["INDEX_FILE"]):
name += '.html'
path_parts = name.split(os.sep)
return [_f for _f in path_parts if _f]
|
import logging
from typing import Any, Dict, Optional
from spotipy import Spotify
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import persistent_notification
from homeassistant.helpers import config_entry_oauth2_flow
from .const import DOMAIN, SPOTIFY_SCOPES
class SpotifyFlowHandler(
config_entry_oauth2_flow.AbstractOAuth2FlowHandler, domain=DOMAIN
):
"""Config flow to handle Spotify OAuth2 authentication."""
DOMAIN = DOMAIN
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self) -> None:
"""Instantiate config flow."""
super().__init__()
self.entry: Optional[Dict[str, Any]] = None
@property
def logger(self) -> logging.Logger:
"""Return logger."""
return logging.getLogger(__name__)
@property
def extra_authorize_data(self) -> Dict[str, Any]:
"""Extra data that needs to be appended to the authorize url."""
return {"scope": ",".join(SPOTIFY_SCOPES)}
async def async_oauth_create_entry(self, data: Dict[str, Any]) -> Dict[str, Any]:
"""Create an entry for Spotify."""
spotify = Spotify(auth=data["token"]["access_token"])
try:
current_user = await self.hass.async_add_executor_job(spotify.current_user)
except Exception: # pylint: disable=broad-except
return self.async_abort(reason="connection_error")
name = data["id"] = current_user["id"]
if self.entry and self.entry["id"] != current_user["id"]:
return self.async_abort(reason="reauth_account_mismatch")
if current_user.get("display_name"):
name = current_user["display_name"]
data["name"] = name
await self.async_set_unique_id(current_user["id"])
return self.async_create_entry(title=name, data=data)
async def async_step_reauth(self, entry: Dict[str, Any]) -> Dict[str, Any]:
"""Perform reauth upon migration of old entries."""
if entry:
self.entry = entry
assert self.hass
persistent_notification.async_create(
self.hass,
f"Spotify integration for account {entry['id']} needs to be re-authenticated. Please go to the integrations page to re-configure it.",
"Spotify re-authentication",
"spotify_reauth",
)
return await self.async_step_reauth_confirm()
async def async_step_reauth_confirm(
self, user_input: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Confirm reauth dialog."""
if user_input is None:
return self.async_show_form(
step_id="reauth_confirm",
description_placeholders={"account": self.entry["id"]},
data_schema=vol.Schema({}),
errors={},
)
assert self.hass
persistent_notification.async_dismiss(self.hass, "spotify_reauth")
return await self.async_step_pick_implementation(
user_input={"implementation": self.entry["auth_implementation"]}
)
|
import logging
import unittest
import os
import numpy as np
import gensim
from gensim.test.utils import get_tmpfile
class BigCorpus:
"""A corpus of a large number of docs & large vocab"""
def __init__(self, words_only=False, num_terms=200000, num_docs=1000000, doc_len=100):
self.dictionary = gensim.utils.FakeDict(num_terms)
self.words_only = words_only
self.num_docs = num_docs
self.doc_len = doc_len
def __iter__(self):
for _ in range(self.num_docs):
doc_len = np.random.poisson(self.doc_len)
ids = np.random.randint(0, len(self.dictionary), doc_len)
if self.words_only:
yield [str(idx) for idx in ids]
else:
weights = np.random.poisson(3, doc_len)
yield sorted(zip(ids, weights))
if os.environ.get('GENSIM_BIG', False):
class TestLargeData(unittest.TestCase):
"""Try common operations, using large models. You'll need ~8GB RAM to run these tests"""
def testWord2Vec(self):
corpus = BigCorpus(words_only=True, num_docs=100000, num_terms=3000000, doc_len=200)
tmpf = get_tmpfile('gensim_big.tst')
model = gensim.models.Word2Vec(corpus, vector_size=300, workers=4)
model.save(tmpf, ignore=['syn1'])
del model
gensim.models.Word2Vec.load(tmpf)
def testLsiModel(self):
corpus = BigCorpus(num_docs=50000)
tmpf = get_tmpfile('gensim_big.tst')
model = gensim.models.LsiModel(corpus, num_topics=500, id2word=corpus.dictionary)
model.save(tmpf)
del model
gensim.models.LsiModel.load(tmpf)
def testLdaModel(self):
corpus = BigCorpus(num_docs=5000)
tmpf = get_tmpfile('gensim_big.tst')
model = gensim.models.LdaModel(corpus, num_topics=500, id2word=corpus.dictionary)
model.save(tmpf)
del model
gensim.models.LdaModel.load(tmpf)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
from copy import deepcopy
import numpy as np
from ..utils import logger, _check_option, _validate_type, verbose
from . import plot_sensors
from ..io._digitization import _get_fid_coords
@verbose
def plot_montage(montage, scale_factor=20, show_names=True, kind='topomap',
show=True, sphere=None, verbose=None):
"""Plot a montage.
Parameters
----------
montage : instance of DigMontage
The montage to visualize.
scale_factor : float
Determines the size of the points.
show_names : bool
Whether to show the channel names.
kind : str
Whether to plot the montage as '3d' or 'topomap' (default).
show : bool
Show figure if True.
%(topomap_sphere_auto)s
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
The figure object.
"""
from scipy.spatial.distance import cdist
from ..channels import DigMontage, make_dig_montage
from ..io import RawArray
from .. import create_info
_check_option('kind', kind, ['topomap', '3d'])
_validate_type(montage, DigMontage, item_name='montage')
ch_names = montage.ch_names
title = None
if len(ch_names) == 0:
raise RuntimeError('No valid channel positions found.')
pos = np.array(list(montage._get_ch_pos().values()))
dists = cdist(pos, pos)
# only consider upper triangular part by setting the rest to np.nan
dists[np.tril_indices(dists.shape[0])] = np.nan
dupes = np.argwhere(np.isclose(dists, 0))
if dupes.any():
montage = deepcopy(montage)
n_chans = pos.shape[0]
n_dupes = dupes.shape[0]
idx = np.setdiff1d(np.arange(len(pos)), dupes[:, 1]).tolist()
logger.info("{} duplicate electrode labels found:".format(n_dupes))
logger.info(", ".join([ch_names[d[0]] + "/" + ch_names[d[1]]
for d in dupes]))
logger.info("Plotting {} unique labels.".format(n_chans - n_dupes))
ch_names = [ch_names[i] for i in idx]
ch_pos = dict(zip(ch_names, pos[idx, :]))
# XXX: this might cause trouble if montage was originally in head
fid, _ = _get_fid_coords(montage.dig)
montage = make_dig_montage(ch_pos=ch_pos, **fid)
info = create_info(ch_names, sfreq=256, ch_types="eeg")
raw = RawArray(np.zeros((len(ch_names), 1)), info, copy=None)
raw.set_montage(montage, on_missing='ignore')
fig = plot_sensors(info, kind=kind, show_names=show_names, show=show,
title=title, sphere=sphere)
collection = fig.axes[0].collections[0]
collection.set_sizes([scale_factor])
return fig
|
import unittest
from absl import flags
import mock
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker.linux_benchmarks import iperf_benchmark
FLAGS = flags.FLAGS
FLAGS.mark_as_parsed()
class IperfBenchmarkTestCase(unittest.TestCase):
def setUp(self):
super(IperfBenchmarkTestCase, self).setUp()
self.vm_spec = mock.MagicMock(spec=benchmark_spec.BenchmarkSpec)
vm0 = mock.MagicMock(
internal_ip='10.128.0.1',
machine_type='mock_machine_1',
zone='antarctica-1a')
vm1 = mock.MagicMock(
internal_ip='10.128.0.2',
machine_type='mock_machine_1',
zone='antarctica-1a')
self.vm_spec.vms = [vm0, vm1]
def testIperfParseResultsUDPSingleThread(self):
iperf_output = """
Client connecting to 10.128.0.2, UDP port 25000 with pid 10159
Sending 1470 byte datagrams, IPG target: 11215.21 us (kalman adjust)
UDP buffer size: 0.20 MByte (default)
------------------------------------------------------------
[ 3] local 10.128.0.3 port 37350 connected with 10.128.0.2 port 25000
[ ID] Interval Transfer Bandwidth Write/Err PPS
[ 3] 0.00-60.00 sec 7.50 MBytes 1.05 Mbits/sec 5350/0 89 pps
[ 3] Sent 5350 datagrams
[ 3] Server Report:
[ 3] 0.0-60.0 sec 7.50 MBytes 1.05 Mbits/sec 0.017 ms 0/ 5350 (0%)
"""
self.vm_spec.vms[0].RemoteCommand.side_effect = [(iperf_output, '')]
results = iperf_benchmark._RunIperf(self.vm_spec.vms[0],
self.vm_spec.vms[1], '10.128.0.2', 1,
'INTERNAL', 'UDP')
expected_results = {
'receiving_machine_type': 'mock_machine_1',
'receiving_zone': 'antarctica-1a',
'sending_machine_type': 'mock_machine_1',
'sending_thread_count': 1,
'sending_zone': 'antarctica-1a',
'runtime_in_seconds': 60,
'ip_type': 'INTERNAL',
'buffer_size': 0.20,
'datagram_size_bytes': 1470,
'write_packet_count': 5350,
'err_packet_count': 0,
'pps': 89,
'ipg_target': 11215.21,
'ipg_target_unit': 'us',
'jitter': 0.017,
'jitter_unit': 'ms',
'lost_datagrams': 0,
'total_datagrams': 5350,
'out_of_order_datagrams': 0
}
self.assertEqual(results.value, 1.05)
self.assertEqual(expected_results, results.metadata)
def testIperfParseResultsUDPMultiThread(self):
iperf_output = """
Client connecting to 10.128.0.2, UDP port 25000 with pid 10188
Sending 1470 byte datagrams, IPG target: 11215.21 us (kalman adjust)
UDP buffer size: 0.20 MByte (default)
------------------------------------------------------------
[ 4] local 10.128.0.3 port 51681 connected with 10.128.0.2 port 25000
[ 3] local 10.128.0.3 port 58632 connected with 10.128.0.2 port 25000
[ ID] Interval Transfer Bandwidth Write/Err PPS
[ 3] 0.00-60.00 sec 7.50 MBytes 1.05 Mbits/sec 5350/0 89 pps
[ 3] Sent 5350 datagrams
[ 3] Server Report:
[ 3] 0.0-60.0 sec 7.50 MBytes 1.05 Mbits/sec 0.026 ms 1/ 5350 (0.019%)
[ 4] 0.00-60.00 sec 7.50 MBytes 1.05 Mbits/sec 5350/0 89 pps
[ 4] Sent 5350 datagrams
[SUM] 0.00-60.00 sec 15.0 MBytes 2.10 Mbits/sec 10700/0 178 pps
[SUM] Sent 10700 datagrams
[ 4] Server Report:
[ 4] 0.0-60.0 sec 7.50 MBytes 1.05 Mbits/sec 0.039 ms 0/ 5350 (0%)
[ 4] 0.00-60.00 sec 1 datagrams received out-of-order
"""
self.vm_spec.vms[0].RemoteCommand.side_effect = [(iperf_output, '')]
results = iperf_benchmark._RunIperf(self.vm_spec.vms[0],
self.vm_spec.vms[1], '10.128.0.2', 2,
'INTERNAL', 'UDP')
expected_results = {
'receiving_machine_type': 'mock_machine_1',
'receiving_zone': 'antarctica-1a',
'sending_machine_type': 'mock_machine_1',
'sending_thread_count': 2,
'sending_zone': 'antarctica-1a',
'runtime_in_seconds': 60,
'ip_type': 'INTERNAL',
'buffer_size': 0.20,
'datagram_size_bytes': 1470,
'write_packet_count': 10700,
'err_packet_count': 0,
'pps': 178,
'ipg_target': 11215.21,
'ipg_target_unit': 'us',
'jitter': 0.0325,
'jitter_unit': 'ms',
'lost_datagrams': 1,
'total_datagrams': 10700,
'out_of_order_datagrams': 1
}
self.assertEqual(expected_results, results.metadata)
self.assertEqual(results.value, 2.10)
def testIperfParseResultsTCPSingleThread(self):
iperf_output = """
Client connecting to 10.128.0.2, TCP port 20000 with pid 10208
Write buffer size: 0.12 MByte
TCP window size: 1.67 MByte (default)
------------------------------------------------------------
[ 3] local 10.128.0.3 port 33738 connected with 10.128.0.2 port 20000 (ct=1.62 ms)
[ ID] Interval Transfer Bandwidth Write/Err Rtry Cwnd/RTT NetPwr
[ 3] 0.00-60.00 sec 14063 MBytes 1966 Mbits/sec 112505/0 0 -1K/1346 us 182579.69
"""
self.vm_spec.vms[0].RemoteCommand.side_effect = [(iperf_output, '')]
results = iperf_benchmark._RunIperf(self.vm_spec.vms[0],
self.vm_spec.vms[1], '10.128.0.2', 1,
'INTERNAL', 'TCP')
expected_results = {
'receiving_machine_type': 'mock_machine_1',
'receiving_zone': 'antarctica-1a',
'sending_machine_type': 'mock_machine_1',
'sending_thread_count': 1,
'sending_zone': 'antarctica-1a',
'runtime_in_seconds': 60,
'ip_type': 'INTERNAL',
'buffer_size': 0.12,
'tcp_window_size': 1.67,
'write_packet_count': 112505,
'err_packet_count': 0,
'retry_packet_count': 0,
'congestion_window': -1,
'rtt': 1346,
'rtt_unit': 'us',
'netpwr': 182579.69
}
self.assertEqual(expected_results, results.metadata)
self.assertEqual(results.value, 1966.0)
def testIperfParseResultsTCPMultiThread(self):
iperf_output = """
Client connecting to 10.128.0.2, TCP port 20000 with pid 10561
Write buffer size: 0.12 MByte
TCP window size: 0.17 MByte (default)
------------------------------------------------------------
[ 4] local 10.128.0.2 port 54718 connected with 10.128.0.3 port 20000 (ct=0.16 ms)
[ 3] local 10.128.0.2 port 54716 connected with 10.128.0.3 port 20000 (ct=0.30 ms)
[ ID] Interval Transfer Bandwidth Write/Err Rtry Cwnd/RTT NetPwr
[ 4] 0.00-60.01 sec 7047 MBytes 985 Mbits/sec 56373/0 0 -1K/238 us 517366.48
[ 3] 0.00-60.00 sec 7048 MBytes 985 Mbits/sec 56387/0 0 -1K/129 us 954839.38
[SUM] 0.00-60.01 sec 14095 MBytes 1970 Mbits/sec 112760/0 0
"""
self.vm_spec.vms[0].RemoteCommand.side_effect = [(iperf_output, '')]
results = iperf_benchmark._RunIperf(self.vm_spec.vms[0],
self.vm_spec.vms[1], '10.128.0.2', 2,
'INTERNAL', 'TCP')
expected_results = {
'receiving_machine_type': 'mock_machine_1',
'receiving_zone': 'antarctica-1a',
'sending_machine_type': 'mock_machine_1',
'sending_thread_count': 2,
'sending_zone': 'antarctica-1a',
'runtime_in_seconds': 60,
'ip_type': 'INTERNAL',
'buffer_size': 0.12,
'tcp_window_size': 0.17,
'write_packet_count': 112760,
'err_packet_count': 0,
'retry_packet_count': 0,
'congestion_window': -1,
'rtt': 183.5,
'rtt_unit': 'us',
'netpwr': 736102.93
}
self.assertEqual(expected_results, results.metadata)
self.assertEqual(results.value, 1970.0)
if __name__ == '__main__':
unittest.main()
|
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.zwave.const import (
CONF_AUTOHEAL,
CONF_NETWORK_KEY,
CONF_POLLING_INTERVAL,
CONF_USB_STICK_PATH,
)
from homeassistant.components.zwave.websocket_api import ID, TYPE
async def test_zwave_ws_api(hass, mock_openzwave, hass_ws_client):
"""Test Z-Wave websocket API."""
await async_setup_component(
hass,
"zwave",
{
"zwave": {
CONF_AUTOHEAL: False,
CONF_USB_STICK_PATH: "/dev/zwave",
CONF_POLLING_INTERVAL: 6000,
CONF_NETWORK_KEY: "0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST",
}
},
)
await hass.async_block_till_done()
client = await hass_ws_client(hass)
await client.send_json({ID: 5, TYPE: "zwave/get_config"})
msg = await client.receive_json()
result = msg["result"]
assert result[CONF_USB_STICK_PATH] == "/dev/zwave"
assert not result[CONF_AUTOHEAL]
assert result[CONF_POLLING_INTERVAL] == 6000
await client.send_json({ID: 6, TYPE: "zwave/get_migration_config"})
msg = await client.receive_json()
result = msg["result"]
assert result[CONF_USB_STICK_PATH] == "/dev/zwave"
assert (
result[CONF_NETWORK_KEY]
== "0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST, 0xTE, 0xST"
)
|
from abc import ABC, abstractmethod
from typing import Optional
from homeassistant.core import Context
from homeassistant.helpers import intent
class AbstractConversationAgent(ABC):
"""Abstract conversation agent."""
@property
def attribution(self):
"""Return the attribution."""
return None
async def async_get_onboarding(self):
"""Get onboard data."""
return None
async def async_set_onboarding(self, shown):
"""Set onboard data."""
return True
@abstractmethod
async def async_process(
self, text: str, context: Context, conversation_id: Optional[str] = None
) -> intent.IntentResponse:
"""Process a sentence."""
|
import os
import os.path
import re
import sys
import time
import json
import logging
import collections
import textwrap
import subprocess
import shutil
import pytest
import pytest_bdd as bdd
import qutebrowser
from qutebrowser.utils import log, utils, docutils
from qutebrowser.browser import pdfjs
from helpers import utils as testutils
def _get_echo_exe_path():
"""Return the path to an echo-like command, depending on the system.
Return:
Path to the "echo"-utility.
"""
if utils.is_windows:
return os.path.join(testutils.abs_datapath(), 'userscripts',
'echo.bat')
else:
return shutil.which("echo")
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
"""Add a BDD section to the test output."""
outcome = yield
if call.when not in ['call', 'teardown']:
return
report = outcome.get_result()
if report.passed:
return
if (not hasattr(report.longrepr, 'addsection') or
not hasattr(report, 'scenario')):
# In some conditions (on macOS and Windows it seems), report.longrepr
# is actually a tuple. This is handled similarly in pytest-qt too.
#
# Since this hook is invoked for any test, we also need to skip it for
# non-BDD ones.
return
if ((sys.stdout.isatty() or testutils.ON_CI) and
item.config.getoption('--color') != 'no'):
colors = {
'failed': log.COLOR_ESCAPES['red'],
'passed': log.COLOR_ESCAPES['green'],
'keyword': log.COLOR_ESCAPES['cyan'],
'reset': log.RESET_ESCAPE,
}
else:
colors = {
'failed': '',
'passed': '',
'keyword': '',
'reset': '',
}
output = []
if testutils.ON_CI:
output.append(testutils.gha_group_begin('Scenario'))
output.append("{kw_color}Feature:{reset} {name}".format(
kw_color=colors['keyword'],
name=report.scenario['feature']['name'],
reset=colors['reset'],
))
output.append(
" {kw_color}Scenario:{reset} {name} ({filename}:{line})".format(
kw_color=colors['keyword'],
name=report.scenario['name'],
filename=report.scenario['feature']['rel_filename'],
line=report.scenario['line_number'],
reset=colors['reset'])
)
for step in report.scenario['steps']:
output.append(
" {kw_color}{keyword}{reset} {color}{name}{reset} "
"({duration:.2f}s)".format(
kw_color=colors['keyword'],
color=colors['failed'] if step['failed'] else colors['passed'],
keyword=step['keyword'],
name=step['name'],
duration=step['duration'],
reset=colors['reset'])
)
if testutils.ON_CI:
output.append(testutils.gha_group_end())
report.longrepr.addsection("BDD scenario", '\n'.join(output))
## Given
@bdd.given(bdd.parsers.parse("I set {opt} to {value}"))
def set_setting_given(quteproc, server, opt, value):
"""Set a qutebrowser setting.
This is available as "Given:" step so it can be used as "Background:".
"""
if value == '<empty>':
value = ''
value = value.replace('(port)', str(server.port))
quteproc.set_setting(opt, value)
@bdd.given(bdd.parsers.parse("I open {path}"))
def open_path_given(quteproc, path):
"""Open a URL.
This is available as "Given:" step so it can be used as "Background:".
It always opens a new tab, unlike "When I open ..."
"""
quteproc.open_path(path, new_tab=True)
@bdd.given(bdd.parsers.parse("I run {command}"))
def run_command_given(quteproc, command):
"""Run a qutebrowser command.
This is available as "Given:" step so it can be used as "Background:".
"""
quteproc.send_cmd(command)
@bdd.given(bdd.parsers.parse("I also run {command}"))
def run_command_given_2(quteproc, command):
"""Run a qutebrowser command.
Separate from the above as a hack to run two commands in a Background
without having to use ";;". This is needed because pytest-bdd doesn't allow
re-using a Given step...
"""
quteproc.send_cmd(command)
@bdd.given("I have a fresh instance")
def fresh_instance(quteproc):
"""Restart qutebrowser instance for tests needing a fresh state."""
quteproc.terminate()
quteproc.start()
@bdd.given("I clean up open tabs")
def clean_open_tabs(quteproc):
"""Clean up open windows and tabs."""
quteproc.set_setting('tabs.last_close', 'blank')
quteproc.send_cmd(':window-only')
quteproc.send_cmd(':tab-only --force')
quteproc.send_cmd(':tab-close --force')
quteproc.wait_for_load_finished_url('about:blank')
@bdd.given('pdfjs is available')
def pdfjs_available(data_tmpdir):
if not pdfjs.is_available():
pytest.skip("No pdfjs installation found.")
@bdd.given('I clear the log')
def clear_log_lines(quteproc):
quteproc.clear_data()
## When
@bdd.when(bdd.parsers.parse("I open {path}"))
def open_path(quteproc, server, path):
"""Open a URL.
- If used like "When I open ... in a new tab", the URL is opened in a new
tab.
- With "... in a new window", it's opened in a new window.
- With "... in a private window" it's opened in a new private window.
- With "... as a URL", it's opened according to new_instance_open_target.
"""
path = path.replace('(port)', str(server.port))
new_tab = False
new_bg_tab = False
new_window = False
private = False
as_url = False
wait = True
new_tab_suffix = ' in a new tab'
new_bg_tab_suffix = ' in a new background tab'
new_window_suffix = ' in a new window'
private_suffix = ' in a private window'
do_not_wait_suffix = ' without waiting'
as_url_suffix = ' as a URL'
while True:
if path.endswith(new_tab_suffix):
path = path[:-len(new_tab_suffix)]
new_tab = True
elif path.endswith(new_bg_tab_suffix):
path = path[:-len(new_bg_tab_suffix)]
new_bg_tab = True
elif path.endswith(new_window_suffix):
path = path[:-len(new_window_suffix)]
new_window = True
elif path.endswith(private_suffix):
path = path[:-len(private_suffix)]
private = True
elif path.endswith(as_url_suffix):
path = path[:-len(as_url_suffix)]
as_url = True
elif path.endswith(do_not_wait_suffix):
path = path[:-len(do_not_wait_suffix)]
wait = False
else:
break
quteproc.open_path(path, new_tab=new_tab, new_bg_tab=new_bg_tab,
new_window=new_window, private=private, as_url=as_url,
wait=wait)
@bdd.when(bdd.parsers.parse("I set {opt} to {value}"))
def set_setting(quteproc, server, opt, value):
"""Set a qutebrowser setting."""
if value == '<empty>':
value = ''
value = value.replace('(port)', str(server.port))
quteproc.set_setting(opt, value)
@bdd.when(bdd.parsers.parse("I run {command}"))
def run_command(quteproc, server, tmpdir, command):
"""Run a qutebrowser command.
The suffix "with count ..." can be used to pass a count to the command.
"""
if 'with count' in command:
command, count = command.split(' with count ')
count = int(count)
else:
count = None
invalid_tag = ' (invalid command)'
if command.endswith(invalid_tag):
command = command[:-len(invalid_tag)]
invalid = True
else:
invalid = False
command = command.replace('(port)', str(server.port))
command = command.replace('(testdata)', testutils.abs_datapath())
command = command.replace('(tmpdir)', str(tmpdir))
command = command.replace('(dirsep)', os.sep)
command = command.replace('(echo-exe)', _get_echo_exe_path())
quteproc.send_cmd(command, count=count, invalid=invalid)
@bdd.when(bdd.parsers.parse("I reload"))
def reload(qtbot, server, quteproc, command):
"""Reload and wait until a new request is received."""
with qtbot.waitSignal(server.new_request):
quteproc.send_cmd(':reload')
@bdd.when(bdd.parsers.parse("I wait until {path} is loaded"))
def wait_until_loaded(quteproc, path):
"""Wait until the given path is loaded (as per qutebrowser log)."""
quteproc.wait_for_load_finished(path)
@bdd.when(bdd.parsers.re(r'I wait for (?P<is_regex>regex )?"'
r'(?P<pattern>[^"]+)" in the log(?P<do_skip> or skip '
r'the test)?'))
def wait_in_log(quteproc, is_regex, pattern, do_skip):
"""Wait for a given pattern in the qutebrowser log.
If used like "When I wait for regex ... in the log" the argument is treated
as regex. Otherwise, it's treated as a pattern (* can be used as wildcard).
"""
if is_regex:
pattern = re.compile(pattern)
line = quteproc.wait_for(message=pattern, do_skip=bool(do_skip))
line.expected = True
@bdd.when(bdd.parsers.re(r'I wait for the (?P<category>error|message|warning) '
r'"(?P<message>.*)"'))
def wait_for_message(quteproc, server, category, message):
"""Wait for a given statusbar message/error/warning."""
quteproc.log_summary('Waiting for {} "{}"'.format(category, message))
expect_message(quteproc, server, category, message)
@bdd.when(bdd.parsers.parse("I wait {delay}s"))
def wait_time(quteproc, delay):
"""Sleep for the given delay."""
time.sleep(float(delay))
@bdd.when(bdd.parsers.re('I press the keys? "(?P<keys>[^"]*)"'))
def press_keys(quteproc, keys):
"""Send the given fake keys to qutebrowser."""
quteproc.press_keys(keys)
@bdd.when("selection is supported")
def selection_supported(qapp):
"""Skip the test if selection isn't supported."""
if not qapp.clipboard().supportsSelection():
pytest.skip("OS doesn't support primary selection!")
@bdd.when("selection is not supported")
def selection_not_supported(qapp):
"""Skip the test if selection is supported."""
if qapp.clipboard().supportsSelection():
pytest.skip("OS supports primary selection!")
@bdd.when(bdd.parsers.re(r'I put "(?P<content>.*)" into the '
r'(?P<what>primary selection|clipboard)'))
def fill_clipboard(quteproc, server, what, content):
content = content.replace('(port)', str(server.port))
content = content.replace(r'\n', '\n')
quteproc.send_cmd(':debug-set-fake-clipboard "{}"'.format(content))
@bdd.when(bdd.parsers.re(r'I put the following lines into the '
r'(?P<what>primary selection|clipboard):\n'
r'(?P<content>.+)$', flags=re.DOTALL))
def fill_clipboard_multiline(quteproc, server, what, content):
fill_clipboard(quteproc, server, what, textwrap.dedent(content))
@bdd.when(bdd.parsers.parse('I hint with args "{args}"'))
def hint(quteproc, args):
quteproc.send_cmd(':hint {}'.format(args))
quteproc.wait_for(message='hints: *')
@bdd.when(bdd.parsers.parse('I hint with args "{args}" and follow {letter}'))
def hint_and_follow(quteproc, args, letter):
args = args.replace('(testdata)', testutils.abs_datapath())
args = args.replace('(python-executable)', sys.executable)
quteproc.send_cmd(':hint {}'.format(args))
quteproc.wait_for(message='hints: *')
quteproc.send_cmd(':follow-hint {}'.format(letter))
@bdd.when("I wait until the scroll position changed")
def wait_scroll_position(quteproc):
quteproc.wait_scroll_pos_changed()
@bdd.when(bdd.parsers.parse("I wait until the scroll position changed to "
"{x}/{y}"))
def wait_scroll_position_arg(quteproc, x, y):
quteproc.wait_scroll_pos_changed(x, y)
@bdd.when(bdd.parsers.parse('I wait for the javascript message "{message}"'))
def javascript_message_when(quteproc, message):
"""Make sure the given message was logged via javascript."""
quteproc.wait_for_js(message)
@bdd.when("I clear SSL errors")
def clear_ssl_errors(request, quteproc):
if request.config.webengine:
quteproc.terminate()
quteproc.start()
else:
quteproc.send_cmd(':debug-clear-ssl-errors')
@bdd.when("the documentation is up to date")
def update_documentation():
"""Update the docs before testing :help."""
base_path = os.path.dirname(os.path.abspath(qutebrowser.__file__))
doc_path = os.path.join(base_path, 'html', 'doc')
script_path = os.path.join(base_path, '..', 'scripts')
try:
os.mkdir(doc_path)
except FileExistsError:
pass
files = os.listdir(doc_path)
if files and all(docutils.docs_up_to_date(p) for p in files):
return
try:
subprocess.run(['asciidoc'], stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL, check=True)
except OSError:
pytest.skip("Docs outdated and asciidoc unavailable!")
update_script = os.path.join(script_path, 'asciidoc2html.py')
subprocess.run([sys.executable, update_script], check=True)
## Then
@bdd.then(bdd.parsers.parse("{path} should be loaded"))
def path_should_be_loaded(quteproc, path):
"""Make sure the given path was loaded according to the log.
This is usually the better check compared to "should be requested" as the
page could be loaded from local cache.
"""
quteproc.wait_for_load_finished(path)
@bdd.then(bdd.parsers.parse("{path} should be requested"))
def path_should_be_requested(server, path):
"""Make sure the given path was loaded from the webserver."""
server.wait_for(verb='GET', path='/' + path)
@bdd.then(bdd.parsers.parse("The requests should be:\n{pages}"))
def list_of_requests(server, pages):
"""Make sure the given requests were done from the webserver."""
expected_requests = [server.ExpectedRequest('GET', '/' + path.strip())
for path in pages.split('\n')]
actual_requests = server.get_requests()
assert actual_requests == expected_requests
@bdd.then(bdd.parsers.parse("The unordered requests should be:\n{pages}"))
def list_of_requests_unordered(server, pages):
"""Make sure the given requests were done (in no particular order)."""
expected_requests = [server.ExpectedRequest('GET', '/' + path.strip())
for path in pages.split('\n')]
actual_requests = server.get_requests()
# Requests are not hashable, we need to convert to ExpectedRequests
actual_requests = [server.ExpectedRequest.from_request(req)
for req in actual_requests]
assert (collections.Counter(actual_requests) ==
collections.Counter(expected_requests))
@bdd.then(bdd.parsers.re(r'the (?P<category>error|message|warning) '
r'"(?P<message>.*)" should be shown'))
def expect_message(quteproc, server, category, message):
"""Expect the given message in the qutebrowser log."""
category_to_loglevel = {
'message': logging.INFO,
'error': logging.ERROR,
'warning': logging.WARNING,
}
message = message.replace('(port)', str(server.port))
quteproc.mark_expected(category='message',
loglevel=category_to_loglevel[category],
message=message)
@bdd.then(bdd.parsers.re(r'(?P<is_regex>regex )?"(?P<pattern>[^"]+)" should '
r'be logged( with level (?P<loglevel>.*))?'))
def should_be_logged(quteproc, server, is_regex, pattern, loglevel):
"""Expect the given pattern on regex in the log."""
if is_regex:
pattern = re.compile(pattern)
else:
pattern = pattern.replace('(port)', str(server.port))
args = {
'message': pattern,
}
if loglevel:
args['loglevel'] = getattr(logging, loglevel.upper())
line = quteproc.wait_for(**args)
line.expected = True
@bdd.then(bdd.parsers.parse('"{pattern}" should not be logged'))
def ensure_not_logged(quteproc, pattern):
"""Make sure the given pattern was *not* logged."""
quteproc.ensure_not_logged(message=pattern)
@bdd.then(bdd.parsers.parse('the javascript message "{message}" should be '
'logged'))
def javascript_message_logged(quteproc, message):
"""Make sure the given message was logged via javascript."""
quteproc.wait_for_js(message)
@bdd.then(bdd.parsers.parse('the javascript message "{message}" should not be '
'logged'))
def javascript_message_not_logged(quteproc, message):
"""Make sure the given message was *not* logged via javascript."""
quteproc.ensure_not_logged(category='js',
message='[*] {}'.format(message))
@bdd.then(bdd.parsers.parse("The session should look like:\n{expected}"))
def compare_session(request, quteproc, expected):
"""Compare the current sessions against the given template.
partial_compare is used, which means only the keys/values listed will be
compared.
"""
quteproc.compare_session(expected)
@bdd.then("no crash should happen")
def no_crash():
"""Don't do anything.
This is actually a NOP as a crash is already checked in the log.
"""
time.sleep(0.5)
@bdd.then(bdd.parsers.parse("the header {header} should be set to {value}"))
def check_header(quteproc, header, value):
"""Check if a given header is set correctly.
This assumes we're on the server header page.
"""
content = quteproc.get_content()
data = json.loads(content)
print(data)
if value == '<unset>':
assert header not in data['headers']
else:
actual = data['headers'][header]
assert testutils.pattern_match(pattern=value, value=actual)
@bdd.then(bdd.parsers.parse('the page should contain the html "{text}"'))
def check_contents_html(quteproc, text):
"""Check the current page's content based on a substring."""
content = quteproc.get_content(plain=False)
assert text in content
@bdd.then(bdd.parsers.parse('the page should contain the plaintext "{text}"'))
def check_contents_plain(quteproc, text):
"""Check the current page's content based on a substring."""
content = quteproc.get_content().strip()
assert text in content
@bdd.then(bdd.parsers.parse('the page should not contain the plaintext '
'"{text}"'))
def check_not_contents_plain(quteproc, text):
"""Check the current page's content based on a substring."""
content = quteproc.get_content().strip()
assert text not in content
@bdd.then(bdd.parsers.parse('the json on the page should be:\n{text}'))
def check_contents_json(quteproc, text):
"""Check the current page's content as json."""
content = quteproc.get_content().strip()
expected = json.loads(text)
actual = json.loads(content)
assert actual == expected
@bdd.then(bdd.parsers.parse("the following tabs should be open:\n{tabs}"))
def check_open_tabs(quteproc, request, tabs):
"""Check the list of open tabs in the session.
This is a lightweight alternative for "The session should look like: ...".
It expects a list of URLs, with an optional "(active)" suffix.
"""
session = quteproc.get_session()
active_suffix = ' (active)'
pinned_suffix = ' (pinned)'
tabs = tabs.splitlines()
assert len(session['windows']) == 1
assert len(session['windows'][0]['tabs']) == len(tabs)
# If we don't have (active) anywhere, don't check it
has_active = any(active_suffix in line for line in tabs)
has_pinned = any(pinned_suffix in line for line in tabs)
for i, line in enumerate(tabs):
line = line.strip()
assert line.startswith('- ')
line = line[2:] # remove "- " prefix
active = False
pinned = False
while line.endswith(active_suffix) or line.endswith(pinned_suffix):
if line.endswith(active_suffix):
# active
line = line[:-len(active_suffix)]
active = True
else:
# pinned
line = line[:-len(pinned_suffix)]
pinned = True
session_tab = session['windows'][0]['tabs'][i]
current_page = session_tab['history'][-1]
assert current_page['url'] == quteproc.path_to_url(line)
if active:
assert session_tab['active']
elif has_active:
assert 'active' not in session_tab
if pinned:
assert current_page['pinned']
elif has_pinned:
assert not current_page['pinned']
@bdd.then(bdd.parsers.re(r'the (?P<what>primary selection|clipboard) should '
r'contain "(?P<content>.*)"'))
def clipboard_contains(quteproc, server, what, content):
expected = content.replace('(port)', str(server.port))
expected = expected.replace('\\n', '\n')
expected = expected.replace('(linesep)', os.linesep)
quteproc.wait_for(message='Setting fake {}: {}'.format(
what, json.dumps(expected)))
@bdd.then(bdd.parsers.parse('the clipboard should contain:\n{content}'))
def clipboard_contains_multiline(quteproc, server, content):
expected = textwrap.dedent(content).replace('(port)', str(server.port))
quteproc.wait_for(message='Setting fake clipboard: {}'.format(
json.dumps(expected)))
@bdd.then("qutebrowser should quit")
def should_quit(qtbot, quteproc):
quteproc.wait_for_quit()
def _get_scroll_values(quteproc):
data = quteproc.get_session()
pos = data['windows'][0]['tabs'][0]['history'][-1]['scroll-pos']
return (pos['x'], pos['y'])
@bdd.then(bdd.parsers.re(r"the page should be scrolled "
r"(?P<direction>horizontally|vertically)"))
def check_scrolled(quteproc, direction):
quteproc.wait_scroll_pos_changed()
x, y = _get_scroll_values(quteproc)
if direction == 'horizontally':
assert x > 0
assert y == 0
else:
assert x == 0
assert y > 0
@bdd.then("the page should not be scrolled")
def check_not_scrolled(request, quteproc):
x, y = _get_scroll_values(quteproc)
assert x == 0
assert y == 0
@bdd.then(bdd.parsers.parse("the option {option} should be set to {value}"))
def check_option(quteproc, option, value):
actual_value = quteproc.get_setting(option)
assert actual_value == value
@bdd.then(bdd.parsers.parse("the per-domain option {option} should be set to "
"{value} for {pattern}"))
def check_option_per_domain(quteproc, option, value, pattern, server):
pattern = pattern.replace('(port)', str(server.port))
actual_value = quteproc.get_setting(option, pattern=pattern)
assert actual_value == value
|
from test import CollectorTestCase
from test import get_collector_config
from mock import patch
import os
from diamond.collector import Collector
from gridengine import GridEngineCollector
class TestGridEngineCollector(CollectorTestCase):
"""Set up the fixtures for the test
"""
def setUp(self):
config = get_collector_config('GridEngineCollector', {})
self.collector = GridEngineCollector(config, None)
self.fixtures_dir = os.path.abspath(os.path.join(
os.path.dirname(__file__), 'fixtures'))
def test_import(self):
"""Test that import succeeds
"""
self.assertTrue(GridEngineCollector)
@patch.object(GridEngineCollector, '_queue_stats_xml')
@patch.object(Collector, 'publish')
def test_queue_stats_should_work_with_real_data(
self, publish_mock, xml_mock):
"""Test that fixtures are parsed correctly
"""
xml_mock.return_value = self.getFixture('queue_stats.xml').getvalue()
self.collector._collect_queue_stats()
published_metrics = {
'queues.hadoop.load': 0.00532,
'queues.hadoop.used': 0,
'queues.hadoop.resv': 0,
'queues.hadoop.available': 0,
'queues.hadoop.total': 36,
'queues.hadoop.temp_disabled': 0,
'queues.hadoop.manual_intervention': 36,
'queues.primary_q.load': 0.20509,
'queues.primary_q.used': 1024,
'queues.primary_q.resv': 0,
'queues.primary_q.available': 1152,
'queues.primary_q.total': 2176,
'queues.primary_q.temp_disabled': 0,
'queues.primary_q.manual_intervention': 0,
'queues.secondary_q.load': 0.00460,
'queues.secondary_q.used': 145,
'queues.secondary_q.resv': 0,
'queues.secondary_q.available': 1007,
'queues.secondary_q.total': 1121,
'queues.secondary_q.temp_disabled': 1,
'queues.secondary_q.manual_intervention': 0
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=published_metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, published_metrics)
@patch.object(GridEngineCollector, '_queue_stats_xml')
@patch.object(Collector, 'publish')
def test_707(
self, publish_mock, xml_mock):
"""Test that fixtures are parsed correctly
"""
xml_mock.return_value = self.getFixture('707.xml').getvalue()
self.collector._collect_queue_stats()
|
from functools import wraps
import logging
from pytradfri.error import PytradfriError
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
def handle_error(func):
"""Handle tradfri api call error."""
@wraps(func)
async def wrapper(command):
"""Decorate api call."""
try:
await func(command)
except PytradfriError as err:
_LOGGER.error("Unable to execute command %s: %s", command, err)
return wrapper
class TradfriBaseClass(Entity):
"""Base class for IKEA TRADFRI.
All devices and groups should ultimately inherit from this class.
"""
def __init__(self, device, api, gateway_id):
"""Initialize a device."""
self._api = handle_error(api)
self._device = None
self._device_control = None
self._device_data = None
self._gateway_id = gateway_id
self._name = None
self._unique_id = None
self._refresh(device)
@callback
def _async_start_observe(self, exc=None):
"""Start observation of device."""
if exc:
self.async_write_ha_state()
_LOGGER.warning("Observation failed for %s", self._name, exc_info=exc)
try:
cmd = self._device.observe(
callback=self._observe_update,
err_callback=self._async_start_observe,
duration=0,
)
self.hass.async_create_task(self._api(cmd))
except PytradfriError as err:
_LOGGER.warning("Observation failed, trying again", exc_info=err)
self._async_start_observe()
async def async_added_to_hass(self):
"""Start thread when added to hass."""
self._async_start_observe()
@property
def name(self):
"""Return the display name of this device."""
return self._name
@property
def should_poll(self):
"""No polling needed for tradfri device."""
return False
@property
def unique_id(self):
"""Return unique ID for device."""
return self._unique_id
@callback
def _observe_update(self, device):
"""Receive new state data for this device."""
self._refresh(device)
self.async_write_ha_state()
def _refresh(self, device):
"""Refresh the device data."""
self._device = device
self._name = device.name
class TradfriBaseDevice(TradfriBaseClass):
"""Base class for a TRADFRI device.
All devices should inherit from this class.
"""
def __init__(self, device, api, gateway_id):
"""Initialize a device."""
super().__init__(device, api, gateway_id)
self._available = True
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_info(self):
"""Return the device info."""
info = self._device.device_info
return {
"identifiers": {(DOMAIN, self._device.id)},
"manufacturer": info.manufacturer,
"model": info.model_number,
"name": self._name,
"sw_version": info.firmware_version,
"via_device": (DOMAIN, self._gateway_id),
}
def _refresh(self, device):
"""Refresh the device data."""
super()._refresh(device)
self._available = device.reachable
|
import homeassistant.components.remote as remote
from homeassistant.components.remote import (
ATTR_ALTERNATIVE,
ATTR_COMMAND,
ATTR_COMMAND_TYPE,
ATTR_DELAY_SECS,
ATTR_DEVICE,
ATTR_NUM_REPEATS,
ATTR_TIMEOUT,
DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_PLATFORM,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from tests.common import async_mock_service
TEST_PLATFORM = {DOMAIN: {CONF_PLATFORM: "test"}}
SERVICE_SEND_COMMAND = "send_command"
SERVICE_LEARN_COMMAND = "learn_command"
SERVICE_DELETE_COMMAND = "delete_command"
ENTITY_ID = "entity_id_val"
async def test_is_on(hass):
"""Test is_on."""
hass.states.async_set("remote.test", STATE_ON)
assert remote.is_on(hass, "remote.test")
hass.states.async_set("remote.test", STATE_OFF)
assert not remote.is_on(hass, "remote.test")
async def test_turn_on(hass):
"""Test turn_on."""
turn_on_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_ON)
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: ENTITY_ID})
await hass.async_block_till_done()
assert len(turn_on_calls) == 1
call = turn_on_calls[-1]
assert DOMAIN == call.domain
async def test_turn_off(hass):
"""Test turn_off."""
turn_off_calls = async_mock_service(hass, DOMAIN, SERVICE_TURN_OFF)
await hass.services.async_call(
DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: ENTITY_ID}
)
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
call = turn_off_calls[-1]
assert call.domain == DOMAIN
assert call.service == SERVICE_TURN_OFF
assert call.data[ATTR_ENTITY_ID] == ENTITY_ID
async def test_send_command(hass):
"""Test send_command."""
send_command_calls = async_mock_service(hass, DOMAIN, SERVICE_SEND_COMMAND)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_DEVICE: "test_device",
ATTR_COMMAND: ["test_command"],
ATTR_NUM_REPEATS: "4",
ATTR_DELAY_SECS: "0.6",
}
await hass.services.async_call(DOMAIN, SERVICE_SEND_COMMAND, data)
await hass.async_block_till_done()
assert len(send_command_calls) == 1
call = send_command_calls[-1]
assert call.domain == DOMAIN
assert call.service == SERVICE_SEND_COMMAND
assert call.data[ATTR_ENTITY_ID] == ENTITY_ID
async def test_learn_command(hass):
"""Test learn_command."""
learn_command_calls = async_mock_service(hass, DOMAIN, SERVICE_LEARN_COMMAND)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_DEVICE: "test_device",
ATTR_COMMAND: ["test_command"],
ATTR_COMMAND_TYPE: "rf",
ATTR_ALTERNATIVE: True,
ATTR_TIMEOUT: 20,
}
await hass.services.async_call(DOMAIN, SERVICE_LEARN_COMMAND, data)
await hass.async_block_till_done()
assert len(learn_command_calls) == 1
call = learn_command_calls[-1]
assert call.domain == DOMAIN
assert call.service == SERVICE_LEARN_COMMAND
assert call.data[ATTR_ENTITY_ID] == ENTITY_ID
async def test_delete_command(hass):
"""Test delete_command."""
delete_command_calls = async_mock_service(
hass, remote.DOMAIN, SERVICE_DELETE_COMMAND
)
data = {
ATTR_ENTITY_ID: ENTITY_ID,
ATTR_DEVICE: "test_device",
ATTR_COMMAND: ["test_command"],
}
await hass.services.async_call(DOMAIN, SERVICE_DELETE_COMMAND, data)
await hass.async_block_till_done()
assert len(delete_command_calls) == 1
call = delete_command_calls[-1]
assert call.domain == remote.DOMAIN
assert call.service == SERVICE_DELETE_COMMAND
assert call.data[ATTR_ENTITY_ID] == ENTITY_ID
async def test_deprecated_base_class(caplog):
"""Test deprecated base class."""
class CustomRemote(remote.RemoteDevice):
pass
CustomRemote()
assert "RemoteDevice is deprecated, modify CustomRemote" in caplog.text
|
from typing import Callable, List, Optional
import pyvera as veraApi
from homeassistant.components.binary_sensor import (
DOMAIN as PLATFORM_DOMAIN,
ENTITY_ID_FORMAT,
BinarySensorEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from . import VeraDevice
from .common import ControllerData, get_controller_data
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up the sensor config entry."""
controller_data = get_controller_data(hass, entry)
async_add_entities(
[
VeraBinarySensor(device, controller_data)
for device in controller_data.devices.get(PLATFORM_DOMAIN)
]
)
class VeraBinarySensor(VeraDevice[veraApi.VeraBinarySensor], BinarySensorEntity):
"""Representation of a Vera Binary Sensor."""
def __init__(
self, vera_device: veraApi.VeraBinarySensor, controller_data: ControllerData
):
"""Initialize the binary_sensor."""
self._state = False
VeraDevice.__init__(self, vera_device, controller_data)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def is_on(self) -> Optional[bool]:
"""Return true if sensor is on."""
return self._state
def update(self) -> None:
"""Get the latest data and update the state."""
self._state = self.vera_device.is_tripped
|
import logging
from apns2.client import APNsClient
from apns2.errors import Unregistered
from apns2.payload import Payload
import voluptuous as vol
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.config import load_yaml_config_file
from homeassistant.const import ATTR_NAME, CONF_NAME, CONF_PLATFORM
from homeassistant.helpers import template as template_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_state_change
from .const import DOMAIN
APNS_DEVICES = "apns.yaml"
CONF_CERTFILE = "cert_file"
CONF_TOPIC = "topic"
CONF_SANDBOX = "sandbox"
ATTR_PUSH_ID = "push_id"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PLATFORM): "apns",
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_CERTFILE): cv.isfile,
vol.Required(CONF_TOPIC): cv.string,
vol.Optional(CONF_SANDBOX, default=False): cv.boolean,
}
)
REGISTER_SERVICE_SCHEMA = vol.Schema(
{vol.Required(ATTR_PUSH_ID): cv.string, vol.Optional(ATTR_NAME): cv.string}
)
def get_service(hass, config, discovery_info=None):
"""Return push service."""
name = config[CONF_NAME]
cert_file = config[CONF_CERTFILE]
topic = config[CONF_TOPIC]
sandbox = config[CONF_SANDBOX]
service = ApnsNotificationService(hass, name, topic, sandbox, cert_file)
hass.services.register(
DOMAIN, f"apns_{name}", service.register, schema=REGISTER_SERVICE_SCHEMA
)
return service
class ApnsDevice:
"""
The APNS Device class.
Stores information about a device that is registered for push
notifications.
"""
def __init__(self, push_id, name, tracking_device_id=None, disabled=False):
"""Initialize APNS Device."""
self.device_push_id = push_id
self.device_name = name
self.tracking_id = tracking_device_id
self.device_disabled = disabled
@property
def push_id(self):
"""Return the APNS id for the device."""
return self.device_push_id
@property
def name(self):
"""Return the friendly name for the device."""
return self.device_name
@property
def tracking_device_id(self):
"""
Return the device Id.
The id of a device that is tracked by the device
tracking component.
"""
return self.tracking_id
@property
def full_tracking_device_id(self):
"""
Return the fully qualified device id.
The full id of a device that is tracked by the device
tracking component.
"""
return f"{DEVICE_TRACKER_DOMAIN}.{self.tracking_id}"
@property
def disabled(self):
"""Return the state of the service."""
return self.device_disabled
def disable(self):
"""Disable the device from receiving notifications."""
self.device_disabled = True
def __eq__(self, other):
"""Return the comparison."""
if isinstance(other, self.__class__):
return self.push_id == other.push_id and self.name == other.name
return NotImplemented
def __ne__(self, other):
"""Return the comparison."""
return not self.__eq__(other)
def _write_device(out, device):
"""Write a single device to file."""
attributes = []
if device.name is not None:
attributes.append(f"name: {device.name}")
if device.tracking_device_id is not None:
attributes.append(f"tracking_device_id: {device.tracking_device_id}")
if device.disabled:
attributes.append("disabled: True")
out.write(device.push_id)
out.write(": {")
if attributes:
separator = ", "
out.write(separator.join(attributes))
out.write("}\n")
class ApnsNotificationService(BaseNotificationService):
"""Implement the notification service for the APNS service."""
def __init__(self, hass, app_name, topic, sandbox, cert_file):
"""Initialize APNS application."""
self.hass = hass
self.app_name = app_name
self.sandbox = sandbox
self.certificate = cert_file
self.yaml_path = hass.config.path(f"{app_name}_{APNS_DEVICES}")
self.devices = {}
self.device_states = {}
self.topic = topic
try:
self.devices = {
str(key): ApnsDevice(
str(key),
value.get("name"),
value.get("tracking_device_id"),
value.get("disabled", False),
)
for (key, value) in load_yaml_config_file(self.yaml_path).items()
}
except FileNotFoundError:
pass
tracking_ids = [
device.full_tracking_device_id
for (key, device) in self.devices.items()
if device.tracking_device_id is not None
]
track_state_change(hass, tracking_ids, self.device_state_changed_listener)
def device_state_changed_listener(self, entity_id, from_s, to_s):
"""
Listen for state change.
Track device state change if a device has a tracking id specified.
"""
self.device_states[entity_id] = str(to_s.state)
def write_devices(self):
"""Write all known devices to file."""
with open(self.yaml_path, "w+") as out:
for device in self.devices.values():
_write_device(out, device)
def register(self, call):
"""Register a device to receive push messages."""
push_id = call.data.get(ATTR_PUSH_ID)
device_name = call.data.get(ATTR_NAME)
current_device = self.devices.get(push_id)
current_tracking_id = (
None if current_device is None else current_device.tracking_device_id
)
device = ApnsDevice(push_id, device_name, current_tracking_id)
if current_device is None:
self.devices[push_id] = device
with open(self.yaml_path, "a") as out:
_write_device(out, device)
return True
if device != current_device:
self.devices[push_id] = device
self.write_devices()
return True
def send_message(self, message=None, **kwargs):
"""Send push message to registered devices."""
apns = APNsClient(
self.certificate, use_sandbox=self.sandbox, use_alternative_port=False
)
device_state = kwargs.get(ATTR_TARGET)
message_data = kwargs.get(ATTR_DATA)
if message_data is None:
message_data = {}
if isinstance(message, str):
rendered_message = message
elif isinstance(message, template_helper.Template):
rendered_message = message.render(parse_result=False)
else:
rendered_message = ""
payload = Payload(
alert=rendered_message,
badge=message_data.get("badge"),
sound=message_data.get("sound"),
category=message_data.get("category"),
custom=message_data.get("custom", {}),
content_available=message_data.get("content_available", False),
)
device_update = False
for push_id, device in self.devices.items():
if not device.disabled:
state = None
if device.tracking_device_id is not None:
state = self.device_states.get(device.full_tracking_device_id)
if device_state is None or state == str(device_state):
try:
apns.send_notification(push_id, payload, topic=self.topic)
except Unregistered:
logging.error("Device %s has unregistered", push_id)
device_update = True
device.disable()
if device_update:
self.write_devices()
return True
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.fan import DOMAIN
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
async def test_get_actions(hass, device_reg, entity_reg):
"""Test we get the expected actions from a fan."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_actions = [
{
"domain": DOMAIN,
"type": "turn_on",
"device_id": device_entry.id,
"entity_id": "fan.test_5678",
},
{
"domain": DOMAIN,
"type": "turn_off",
"device_id": device_entry.id,
"entity_id": "fan.test_5678",
},
]
actions = await async_get_device_automations(hass, "action", device_entry.id)
assert_lists_same(actions, expected_actions)
async def test_action(hass):
"""Test for turn_on and turn_off actions."""
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_off",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "fan.entity",
"type": "turn_off",
},
},
{
"trigger": {
"platform": "event",
"event_type": "test_event_turn_on",
},
"action": {
"domain": DOMAIN,
"device_id": "abcdefgh",
"entity_id": "fan.entity",
"type": "turn_on",
},
},
]
},
)
turn_off_calls = async_mock_service(hass, "fan", "turn_off")
turn_on_calls = async_mock_service(hass, "fan", "turn_on")
hass.bus.async_fire("test_event_turn_off")
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
assert len(turn_on_calls) == 0
hass.bus.async_fire("test_event_turn_on")
await hass.async_block_till_done()
assert len(turn_off_calls) == 1
assert len(turn_on_calls) == 1
|
import diamond.collector
try:
from pyutmp import UtmpFile
except ImportError:
UtmpFile = None
try:
from utmp import UtmpRecord
import UTMPCONST
except ImportError:
UtmpRecord = None
class UsersCollector(diamond.collector.Collector):
def get_default_config_help(self):
"""
Returns the default collector help text
"""
config_help = super(UsersCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(UsersCollector, self).get_default_config()
config.update({
'path': 'users',
'utmp': None,
})
return config
def collect(self):
if UtmpFile is None and UtmpRecord is None:
self.log.error('Unable to import either pyutmp or python-utmp')
return False
metrics = {'total': 0}
if UtmpFile:
for utmp in UtmpFile(path=self.config['utmp']):
if utmp.ut_user_process:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0) + 1
metrics['total'] = metrics['total'] + 1
if UtmpRecord:
for utmp in UtmpRecord(fname=self.config['utmp']):
if utmp.ut_type == UTMPCONST.USER_PROCESS:
metrics[utmp.ut_user] = metrics.get(utmp.ut_user, 0) + 1
metrics['total'] = metrics['total'] + 1
for metric_name in metrics.keys():
self.publish(metric_name, metrics[metric_name])
return True
|
from datetime import timedelta
from bond_api import Action, DeviceType
from homeassistant import core
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
DOMAIN as LIGHT_DOMAIN,
SUPPORT_BRIGHTNESS,
)
from homeassistant.const import (
ATTR_ASSUMED_STATE,
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import (
help_test_entity_available,
patch_bond_action,
patch_bond_device_state,
setup_platform,
)
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
"""Create a ceiling fan (that has built-in light) with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": [Action.TURN_LIGHT_ON, Action.TURN_LIGHT_OFF],
}
def dimmable_ceiling_fan(name: str):
"""Create a ceiling fan (that has built-in light) with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": [Action.TURN_LIGHT_ON, Action.TURN_LIGHT_OFF, Action.SET_BRIGHTNESS],
}
def fireplace(name: str):
"""Create a fireplace with given name."""
return {
"name": name,
"type": DeviceType.FIREPLACE,
"actions": [Action.TURN_ON, Action.TURN_OFF],
}
def fireplace_with_light(name: str):
"""Create a fireplace with given name."""
return {
"name": name,
"type": DeviceType.FIREPLACE,
"actions": [
Action.TURN_ON,
Action.TURN_OFF,
Action.TURN_LIGHT_ON,
Action.TURN_LIGHT_OFF,
],
}
async def test_fan_entity_registry(hass: core.HomeAssistant):
"""Tests that fan with light devices are registered in the entity registry."""
await setup_platform(
hass,
LIGHT_DOMAIN,
ceiling_fan("fan-name"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.entities["light.fan_name"]
assert entity.unique_id == "test-hub-id_test-device-id"
async def test_fireplace_entity_registry(hass: core.HomeAssistant):
"""Tests that flame fireplace devices are registered in the entity registry."""
await setup_platform(
hass,
LIGHT_DOMAIN,
fireplace("fireplace-name"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.entities["light.fireplace_name"]
assert entity.unique_id == "test-hub-id_test-device-id"
async def test_fireplace_with_light_entity_registry(hass: core.HomeAssistant):
"""Tests that flame+light devices are registered in the entity registry."""
await setup_platform(
hass,
LIGHT_DOMAIN,
fireplace_with_light("fireplace-name"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
entity_flame = registry.entities["light.fireplace_name"]
assert entity_flame.unique_id == "test-hub-id_test-device-id"
entity_light = registry.entities["light.fireplace_name_2"]
assert entity_light.unique_id == "test-hub-id_test-device-id_light"
async def test_sbb_trust_state(hass: core.HomeAssistant):
"""Assumed state should be False if device is a Smart by Bond."""
version = {
"model": "MR123A",
"bondid": "test-bond-id",
}
await setup_platform(
hass, LIGHT_DOMAIN, ceiling_fan("name-1"), bond_version=version
)
device = hass.states.get("light.name_1")
assert device.attributes.get(ATTR_ASSUMED_STATE) is not True
async def test_trust_state_not_specified(hass: core.HomeAssistant):
"""Assumed state should be True if Trust State is not specified."""
await setup_platform(hass, LIGHT_DOMAIN, ceiling_fan("name-1"))
device = hass.states.get("light.name_1")
assert device.attributes.get(ATTR_ASSUMED_STATE) is True
async def test_trust_state(hass: core.HomeAssistant):
"""Assumed state should be True if Trust State is False."""
await setup_platform(
hass, LIGHT_DOMAIN, ceiling_fan("name-1"), props={"trust_state": False}
)
device = hass.states.get("light.name_1")
assert device.attributes.get(ATTR_ASSUMED_STATE) is True
async def test_no_trust_state(hass: core.HomeAssistant):
"""Assumed state should be False if Trust State is True."""
await setup_platform(
hass, LIGHT_DOMAIN, ceiling_fan("name-1"), props={"trust_state": True}
)
device = hass.states.get("light.name_1")
assert device.attributes.get(ATTR_ASSUMED_STATE) is not True
async def test_turn_on_light(hass: core.HomeAssistant):
"""Tests that turn on command delegates to API."""
await setup_platform(
hass, LIGHT_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_light_on, patch_bond_device_state():
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_light_on.assert_called_once_with("test-device-id", Action.turn_light_on())
async def test_turn_off_light(hass: core.HomeAssistant):
"""Tests that turn off command delegates to API."""
await setup_platform(
hass, LIGHT_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_light_off, patch_bond_device_state():
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_light_off.assert_called_once_with(
"test-device-id", Action.turn_light_off()
)
async def test_brightness_support(hass: core.HomeAssistant):
"""Tests that a dimmable light should support the brightness feature."""
await setup_platform(
hass,
LIGHT_DOMAIN,
dimmable_ceiling_fan("name-1"),
bond_device_id="test-device-id",
)
state = hass.states.get("light.name_1")
assert state.attributes[ATTR_SUPPORTED_FEATURES] & SUPPORT_BRIGHTNESS
async def test_brightness_not_supported(hass: core.HomeAssistant):
"""Tests that a non-dimmable light should not support the brightness feature."""
await setup_platform(
hass,
LIGHT_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
)
state = hass.states.get("light.name_1")
assert not state.attributes[ATTR_SUPPORTED_FEATURES] & SUPPORT_BRIGHTNESS
async def test_turn_on_light_with_brightness(hass: core.HomeAssistant):
"""Tests that turn on command, on a dimmable light, delegates to API and parses brightness."""
await setup_platform(
hass,
LIGHT_DOMAIN,
dimmable_ceiling_fan("name-1"),
bond_device_id="test-device-id",
)
with patch_bond_action() as mock_set_brightness, patch_bond_device_state():
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.name_1", ATTR_BRIGHTNESS: 128},
blocking=True,
)
await hass.async_block_till_done()
mock_set_brightness.assert_called_once_with(
"test-device-id", Action(Action.SET_BRIGHTNESS, 50)
)
async def test_update_reports_light_is_on(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports the light is on."""
await setup_platform(hass, LIGHT_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"light": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("light.name_1").state == "on"
async def test_update_reports_light_is_off(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports the light is off."""
await setup_platform(hass, LIGHT_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"light": 0}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("light.name_1").state == "off"
async def test_turn_on_fireplace_with_brightness(hass: core.HomeAssistant):
"""Tests that turn on command delegates to set flame API."""
await setup_platform(
hass, LIGHT_DOMAIN, fireplace("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_flame, patch_bond_device_state():
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.name_1", ATTR_BRIGHTNESS: 128},
blocking=True,
)
await hass.async_block_till_done()
mock_set_flame.assert_called_once_with("test-device-id", Action.set_flame(50))
async def test_turn_on_fireplace_without_brightness(hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn on API."""
await setup_platform(
hass, LIGHT_DOMAIN, fireplace("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_on, patch_bond_device_state():
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "light.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_on.assert_called_once_with("test-device-id", Action.turn_on())
async def test_turn_off_fireplace(hass: core.HomeAssistant):
"""Tests that turn off command delegates to API."""
await setup_platform(
hass, LIGHT_DOMAIN, fireplace("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_once_with("test-device-id", Action.turn_off())
async def test_flame_converted_to_brightness(hass: core.HomeAssistant):
"""Tests that reported flame level (0..100) converted to HA brightness (0...255)."""
await setup_platform(hass, LIGHT_DOMAIN, fireplace("name-1"))
with patch_bond_device_state(return_value={"power": 1, "flame": 50}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("light.name_1").attributes[ATTR_BRIGHTNESS] == 128
async def test_light_available(hass: core.HomeAssistant):
"""Tests that available state is updated based on API errors."""
await help_test_entity_available(
hass, LIGHT_DOMAIN, ceiling_fan("name-1"), "light.name_1"
)
async def test_parse_brightness(hass: core.HomeAssistant):
"""Tests that reported brightness level (0..100) converted to HA brightness (0...255)."""
await setup_platform(hass, LIGHT_DOMAIN, dimmable_ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"light": 1, "brightness": 50}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("light.name_1").attributes[ATTR_BRIGHTNESS] == 128
|
from typing import List
from aiohttp import StreamReader
from hass_nabucasa import Cloud
from hass_nabucasa.voice import VoiceError
from homeassistant.components.stt import Provider, SpeechMetadata, SpeechResult
from homeassistant.components.stt.const import (
AudioBitRates,
AudioChannels,
AudioCodecs,
AudioFormats,
AudioSampleRates,
SpeechResultState,
)
from .const import DOMAIN
SUPPORT_LANGUAGES = [
"da-DK",
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-US",
"es-ES",
"fi-FI",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
"nl-NL",
"pl-PL",
"pt-PT",
"ru-RU",
"sv-SE",
"th-TH",
"zh-CN",
"zh-HK",
]
async def async_get_engine(hass, config, discovery_info=None):
"""Set up Cloud speech component."""
cloud: Cloud = hass.data[DOMAIN]
return CloudProvider(cloud)
class CloudProvider(Provider):
"""NabuCasa speech API provider."""
def __init__(self, cloud: Cloud) -> None:
"""Home Assistant NabuCasa Speech to text."""
self.cloud = cloud
@property
def supported_languages(self) -> List[str]:
"""Return a list of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_formats(self) -> List[AudioFormats]:
"""Return a list of supported formats."""
return [AudioFormats.WAV, AudioFormats.OGG]
@property
def supported_codecs(self) -> List[AudioCodecs]:
"""Return a list of supported codecs."""
return [AudioCodecs.PCM, AudioCodecs.OPUS]
@property
def supported_bit_rates(self) -> List[AudioBitRates]:
"""Return a list of supported bitrates."""
return [AudioBitRates.BITRATE_16]
@property
def supported_sample_rates(self) -> List[AudioSampleRates]:
"""Return a list of supported samplerates."""
return [AudioSampleRates.SAMPLERATE_16000]
@property
def supported_channels(self) -> List[AudioChannels]:
"""Return a list of supported channels."""
return [AudioChannels.CHANNEL_MONO]
async def async_process_audio_stream(
self, metadata: SpeechMetadata, stream: StreamReader
) -> SpeechResult:
"""Process an audio stream to STT service."""
content = f"audio/{metadata.format!s}; codecs=audio/{metadata.codec!s}; samplerate=16000"
# Process STT
try:
result = await self.cloud.voice.process_stt(
stream, content, metadata.language
)
except VoiceError:
return SpeechResult(None, SpeechResultState.ERROR)
# Return Speech as Text
return SpeechResult(
result.text,
SpeechResultState.SUCCESS if result.success else SpeechResultState.ERROR,
)
|
import xml
import typing
from pathlib import Path
import keras
import pandas as pd
import matchzoo
_train_dev_url = "http://alt.qcri.org/semeval2016/task3/data/uploads/" \
"semeval2016-task3-cqa-ql-traindev-v3.2.zip"
_test_url = "http://alt.qcri.org/semeval2016/task3/data/uploads/" \
"semeval2016_task3_test.zip"
def load_data(
stage: str = 'train',
task: str = 'classification',
target_label: str = 'PerfectMatch',
return_classes: bool = False,
match_type: str = 'question',
mode: str = 'both',
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load CQA-QL-16 data.
:param stage: One of `train`, `dev`, and `test`.
(default: `train`)
:param task: Could be one of `ranking`, `classification` or instance
of :class:`matchzoo.engine.BaseTask`. (default: `classification`)
:param target_label: If `ranking`, choose one of classification
label as the positive label. (default: `PerfectMatch`)
:param return_classes: `True` to return classes for classification
task, `False` otherwise.
:param match_type: Matching text types. One of `question`,
`answer`, and `external_answer`. (default: `question`)
:param mode: Train data use method. One of `part1`, `part2`,
and `both`. (default: `both`)
:return: A DataPack unless `task` is `classification` and `return_classes`
is `True`: a tuple of `(DataPack, classes)` in that case.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
if match_type not in ('question', 'answer', 'external_answer'):
raise ValueError(f"{match_type} is not a valid method. Must be one of"
f" `question`, `answer`, `external_answer`.")
if mode not in ('part1', 'part2', 'both'):
raise ValueError(f"{mode} is not a valid method."
f"Must be one of `part1`, `part2`, `both`.")
data_root = _download_data(stage)
data_pack = _read_data(data_root, stage, match_type, mode)
if task == 'ranking':
if match_type in ('anwer', 'external_answer') and target_label not in [
'Good', 'PotentiallyUseful', 'Bad']:
raise ValueError(f"{target_label} is not a valid target label."
f"Must be one of `Good`, `PotentiallyUseful`,"
f" `Bad`.")
elif match_type == 'question' and target_label not in [
'PerfectMatch', 'Relevant', 'Irrelevant']:
raise ValueError(f"{target_label} is not a valid target label."
f" Must be one of `PerfectMatch`, `Relevant`,"
f" `Irrelevant`.")
binary = (data_pack.relation['label'] == target_label).astype(float)
data_pack.relation['label'] = binary
return data_pack
elif task == 'classification':
if match_type in ('answer', 'external_answer'):
classes = ['Good', 'PotentiallyUseful', 'Bad']
else:
classes = ['PerfectMatch', 'Relevant', 'Irrelevant']
label = data_pack.relation['label'].apply(classes.index)
data_pack.relation['label'] = label
data_pack.one_hot_encode_label(num_classes=3, inplace=True)
if return_classes:
return data_pack, classes
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task."
f"Must be one of `Ranking` and `Classification`.")
def _download_data(stage):
if stage in ['train', 'dev']:
return _download_train_dev_data()
else:
return _download_test_data()
def _download_train_dev_data():
ref_path = keras.utils.data_utils.get_file(
'semeval_train', _train_dev_url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='semeval_train'
)
return Path(ref_path).parent.joinpath('v3.2')
def _download_test_data():
ref_path = keras.utils.data_utils.get_file(
'semeval_test', _test_url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='semeval_test'
)
return Path(ref_path).parent.joinpath('SemEval2016_task3_test/English')
def _read_data(path, stage, match_type, mode='both'):
if stage == 'train':
if mode == 'part1':
path = path.joinpath(
'train/SemEval2016-Task3-CQA-QL-train-part1.xml')
data = _load_data_by_type(path, match_type)
elif mode == 'part2':
path = path.joinpath(
'train/SemEval2016-Task3-CQA-QL-train-part2.xml')
data = _load_data_by_type(path, match_type)
else:
part1 = path.joinpath(
'train/SemEval2016-Task3-CQA-QL-train-part1.xml')
p1 = _load_data_by_type(part1, match_type)
part2 = path.joinpath(
'train/SemEval2016-Task3-CQA-QL-train-part1.xml')
p2 = _load_data_by_type(part2, match_type)
data = pd.concat([p1, p2], ignore_index=True)
return matchzoo.pack(data)
elif stage == 'dev':
path = path.joinpath('dev/SemEval2016-Task3-CQA-QL-dev.xml')
data = _load_data_by_type(path, match_type)
return matchzoo.pack(data)
else:
path = path.joinpath('SemEval2016-Task3-CQA-QL-test.xml')
data = _load_data_by_type(path, match_type)
return matchzoo.pack(data)
def _load_data_by_type(path, match_type):
if match_type == 'question':
return _load_question(path)
elif match_type == 'answer':
return _load_answer(path)
else:
return _load_external_answer(path)
def _load_question(path):
doc = xml.etree.ElementTree.parse(path)
dataset = []
for question in doc.iterfind('OrgQuestion'):
qid = question.attrib['ORGQ_ID']
query = question.findtext('OrgQBody')
rel_question = question.find('Thread').find('RelQuestion')
question = rel_question.findtext('RelQBody')
question_id = rel_question.attrib['RELQ_ID']
dataset.append([qid, question_id, query, question,
rel_question.attrib['RELQ_RELEVANCE2ORGQ']])
df = pd.DataFrame(dataset, columns=[
'id_left', 'id_right', 'text_left', 'text_right', 'label'])
return df
def _load_answer(path):
doc = xml.etree.ElementTree.parse(path)
dataset = []
for org_q in doc.iterfind('OrgQuestion'):
for thread in org_q.iterfind('Thread'):
ques = thread.find('RelQuestion')
qid = ques.attrib['RELQ_ID']
question = ques.findtext('RelQBody')
for comment in thread.iterfind('RelComment'):
aid = comment.attrib['RELC_ID']
answer = comment.findtext('RelCText')
dataset.append([qid, aid, question, answer,
comment.attrib['RELC_RELEVANCE2RELQ']])
df = pd.DataFrame(dataset, columns=[
'id_left', 'id_right', 'text_left', 'text_right', 'label'])
return df
def _load_external_answer(path):
doc = xml.etree.ElementTree.parse(path)
dataset = []
for question in doc.iterfind('OrgQuestion'):
qid = question.attrib['ORGQ_ID']
query = question.findtext('OrgQBody')
thread = question.find('Thread')
for comment in thread.iterfind('RelComment'):
answer = comment.findtext('RelCText')
aid = comment.attrib['RELC_ID']
dataset.append([qid, aid, query, answer,
comment.attrib['RELC_RELEVANCE2ORGQ']])
df = pd.DataFrame(dataset, columns=[
'id_left', 'id_right', 'text_left', 'text_right', 'label'])
return df
|
from typing import List
from xknx.devices import Notification as XknxNotification
from homeassistant.components.notify import BaseNotificationService
from .const import DOMAIN
async def async_get_service(hass, config, discovery_info=None):
"""Get the KNX notification service."""
notification_devices = []
for device in hass.data[DOMAIN].xknx.devices:
if isinstance(device, XknxNotification):
notification_devices.append(device)
return (
KNXNotificationService(notification_devices) if notification_devices else None
)
class KNXNotificationService(BaseNotificationService):
"""Implement demo notification service."""
def __init__(self, devices: List[XknxNotification]):
"""Initialize the service."""
self.devices = devices
@property
def targets(self):
"""Return a dictionary of registered targets."""
ret = {}
for device in self.devices:
ret[device.name] = device.name
return ret
async def async_send_message(self, message="", **kwargs):
"""Send a notification to knx bus."""
if "target" in kwargs:
await self._async_send_to_device(message, kwargs["target"])
else:
await self._async_send_to_all_devices(message)
async def _async_send_to_all_devices(self, message):
"""Send a notification to knx bus to all connected devices."""
for device in self.devices:
await device.set(message)
async def _async_send_to_device(self, message, names):
"""Send a notification to knx bus to device with given names."""
for device in self.devices:
if device.name in names:
await device.set(message)
|
import re
from collections import defaultdict
from . import Tree, Token
from .common import ParserConf
from .parsers import earley
from .grammar import Rule, Terminal, NonTerminal
def is_discarded_terminal(t):
return t.is_term and t.filter_out
class _MakeTreeMatch:
def __init__(self, name, expansion):
self.name = name
self.expansion = expansion
def __call__(self, args):
t = Tree(self.name, args)
t.meta.match_tree = True
t.meta.orig_expansion = self.expansion
return t
def _best_from_group(seq, group_key, cmp_key):
d = {}
for item in seq:
key = group_key(item)
if key in d:
v1 = cmp_key(item)
v2 = cmp_key(d[key])
if v2 > v1:
d[key] = item
else:
d[key] = item
return list(d.values())
def _best_rules_from_group(rules):
rules = _best_from_group(rules, lambda r: r, lambda r: -len(r.expansion))
rules.sort(key=lambda r: len(r.expansion))
return rules
def _match(term, token):
if isinstance(token, Tree):
name, _args = parse_rulename(term.name)
return token.data == name
elif isinstance(token, Token):
return term == Terminal(token.type)
assert False
def make_recons_rule(origin, expansion, old_expansion):
return Rule(origin, expansion, alias=_MakeTreeMatch(origin.name, old_expansion))
def make_recons_rule_to_term(origin, term):
return make_recons_rule(origin, [Terminal(term.name)], [term])
def parse_rulename(s):
"Parse rule names that may contain a template syntax (like rule{a, b, ...})"
name, args_str = re.match(r'(\w+)(?:{(.+)})?', s).groups()
args = args_str and [a.strip() for a in args_str.split(',')]
return name, args
class ChildrenLexer:
def __init__(self, children):
self.children = children
def lex(self, parser_state):
return self.children
class TreeMatcher:
"""Match the elements of a tree node, based on an ontology
provided by a Lark grammar.
Supports templates and inlined rules (`rule{a, b,..}` and `_rule`)
Initiialize with an instance of Lark.
"""
def __init__(self, parser):
# XXX TODO calling compile twice returns different results!
assert parser.options.maybe_placeholders == False
# XXX TODO: we just ignore the potential existence of a postlexer
self.tokens, rules, _extra = parser.grammar.compile(parser.options.start, set())
self.rules_for_root = defaultdict(list)
self.rules = list(self._build_recons_rules(rules))
self.rules.reverse()
# Choose the best rule from each group of {rule => [rule.alias]}, since we only really need one derivation.
self.rules = _best_rules_from_group(self.rules)
self.parser = parser
self._parser_cache = {}
def _build_recons_rules(self, rules):
"Convert tree-parsing/construction rules to tree-matching rules"
expand1s = {r.origin for r in rules if r.options.expand1}
aliases = defaultdict(list)
for r in rules:
if r.alias:
aliases[r.origin].append(r.alias)
rule_names = {r.origin for r in rules}
nonterminals = {sym for sym in rule_names
if sym.name.startswith('_') or sym in expand1s or sym in aliases}
seen = set()
for r in rules:
recons_exp = [sym if sym in nonterminals else Terminal(sym.name)
for sym in r.expansion if not is_discarded_terminal(sym)]
# Skip self-recursive constructs
if recons_exp == [r.origin] and r.alias is None:
continue
sym = NonTerminal(r.alias) if r.alias else r.origin
rule = make_recons_rule(sym, recons_exp, r.expansion)
if sym in expand1s and len(recons_exp) != 1:
self.rules_for_root[sym.name].append(rule)
if sym.name not in seen:
yield make_recons_rule_to_term(sym, sym)
seen.add(sym.name)
else:
if sym.name.startswith('_') or sym in expand1s:
yield rule
else:
self.rules_for_root[sym.name].append(rule)
for origin, rule_aliases in aliases.items():
for alias in rule_aliases:
yield make_recons_rule_to_term(origin, NonTerminal(alias))
yield make_recons_rule_to_term(origin, origin)
def match_tree(self, tree, rulename):
"""Match the elements of `tree` to the symbols of rule `rulename`.
Parameters:
tree (Tree): the tree node to match
rulename (str): The expected full rule name (including template args)
Returns:
Tree: an unreduced tree that matches `rulename`
Raises:
UnexpectedToken: If no match was found.
Note:
It's the callers' responsibility match the tree recursively.
"""
if rulename:
# validate
name, _args = parse_rulename(rulename)
assert tree.data == name
else:
rulename = tree.data
# TODO: ambiguity?
try:
parser = self._parser_cache[rulename]
except KeyError:
rules = self.rules + _best_rules_from_group(self.rules_for_root[rulename])
# TODO pass callbacks through dict, instead of alias?
callbacks = {rule: rule.alias for rule in rules}
conf = ParserConf(rules, callbacks, [rulename])
parser = earley.Parser(conf, _match, resolve_ambiguity=True)
self._parser_cache[rulename] = parser
# find a full derivation
unreduced_tree = parser.parse(ChildrenLexer(tree.children), rulename)
assert unreduced_tree.data == rulename
return unreduced_tree
|
import copy
import json
from typing import Dict, List, Text
from absl import flags
from perfkitbenchmarker import edw_service
from perfkitbenchmarker.providers import aws
FLAGS = flags.FLAGS
def GetSnowflakeClientInterface(warehouse: str, database: str,
schema: str) -> edw_service.EdwClientInterface:
"""Builds and Returns the requested Snowflake client Interface.
Args:
warehouse: String name of the Snowflake virtual warehouse to use during the
benchmark
database: String name of the Snowflake database to use during the benchmark
schema: String name of the Snowflake schema to use during the benchmark
Returns:
A concrete Client Interface object (subclass of EdwClientInterface)
Raises:
RuntimeError: if an unsupported snowflake_client_interface is requested
"""
if FLAGS.snowflake_client_interface == 'JDBC':
return JdbcClientInterface(warehouse, database, schema)
raise RuntimeError('Unknown Snowflake Client Interface requested.')
class JdbcClientInterface(edw_service.EdwClientInterface):
"""Jdbc Client Interface class for Snowflake.
Attributes:
warehouse: String name of the virtual warehouse used during benchmark
database: String name of the database to benchmark
schema: String name of the schema to benchmark
"""
def __init__(self, warehouse: str, database: str, schema: str):
self.warehouse = warehouse
self.database = database
self.schema = schema
def Prepare(self, package_name: str) -> None:
"""Prepares the client vm to execute query.
Installs a java client application that uses the JDBC driver for connecting
to a database server.
https://docs.snowflake.com/en/user-guide/jdbc.html
Args:
package_name: String name of the package defining the preprovisioned data
(certificates, etc.) to extract and use during client vm preparation.
"""
self.client_vm.Install('openjdk')
# Push the executable jar to the working directory on client vm
self.client_vm.InstallPreprovisionedPackageData(
package_name, ['snowflake-jdbc-client-2.0.jar'], '')
def ExecuteQuery(self, query_name: Text) -> (float, Dict[str, str]):
"""Executes a query and returns performance details.
Args:
query_name: String name of the query to execute
Returns:
A tuple of (execution_time, execution details)
execution_time: A Float variable set to the query's completion time in
secs. -1.0 is used as a sentinel value implying the query failed. For a
successful query the value is expected to be positive.
performance_details: A dictionary of query execution attributes eg. job_id
"""
query_command = ('java -cp snowflake-jdbc-client-2.0.jar '
'com.google.cloud.performance.edw.Single --warehouse {} '
'--database {} --schema {} --query_file {}').format(
self.warehouse, self.database, self.schema, query_name)
stdout, _ = self.client_vm.RemoteCommand(query_command)
details = copy.copy(self.GetMetadata()) # Copy the base metadata
details.update(json.loads(stdout)['details'])
return json.loads(stdout)['query_wall_time_in_secs'], details
def ExecuteSimultaneous(self, submission_interval: int,
queries: List[str]) -> str:
"""Executes queries simultaneously on client and return performance details.
Simultaneous app expects queries as white space separated query file names.
Args:
submission_interval: Simultaneous query submission interval in
milliseconds.
queries: List of strings (names) of queries to execute.
Returns:
A serialized dictionary of execution details.
"""
query_command = (
'java -cp snowflake-jdbc-client-2.0.jar '
'com.google.cloud.performance.edw.Simultaneous --warehouse {} '
'--database {} --schema {} --submission_interval {} --query_files {}'
).format(self.warehouse, self.database, self.schema, submission_interval,
' '.join(queries))
stdout, _ = self.client_vm.RemoteCommand(query_command)
return stdout
def ExecuteThroughput(self, concurrency_streams: List[List[str]]) -> str:
"""Executes a throughput test and returns performance details.
Args:
concurrency_streams: List of streams to execute simultaneously, each of
which is a list of string names of queries.
Returns:
A serialized dictionary of execution details.
"""
query_command = ('java -cp snowflake-jdbc-client-2.0.jar '
'com.google.cloud.performance.edw.Throughput --warehouse'
' {} --database {} --schema {} --query_streams {}').format(
self.warehouse, self.database, self.schema, ' '.join([
','.join(stream) for stream in concurrency_streams
]))
stdout, _ = self.client_vm.RemoteCommand(query_command)
return stdout
def GetMetadata(self) -> Dict[str, str]:
"""Gets the Metadata attributes for the Client Interface."""
return {'client': FLAGS.snowflake_client_interface}
class Snowflake(edw_service.EdwService):
"""Object representing a Snowflake Data Warehouse Instance hosted on AWS."""
CLOUD = aws.CLOUD
SERVICE_TYPE = 'snowflake_aws'
def __init__(self, edw_service_spec):
super(Snowflake, self).__init__(edw_service_spec)
self.warehouse = FLAGS.snowflake_warehouse
self.database = FLAGS.snowflake_database
self.schema = FLAGS.snowflake_schema
self.client_interface = GetSnowflakeClientInterface(self.warehouse,
self.database,
self.schema)
def IsUserManaged(self, edw_service_spec):
# TODO(saksena): Remove the assertion after implementing provisioning of
# virtual warehouses.
return True
def _Create(self):
"""Create a Snowflake cluster."""
raise NotImplementedError
def _Exists(self):
"""Method to validate the existence of a Snowflake cluster.
Returns:
Boolean value indicating the existence of a cluster.
"""
return True
def _Delete(self):
"""Delete a Snowflake cluster."""
raise NotImplementedError
def GetMetadata(self):
"""Return a metadata dictionary of the benchmarked Snowflake cluster."""
basic_data = super(Snowflake, self).GetMetadata()
basic_data['warehouse'] = self.warehouse
basic_data['database'] = self.database
basic_data['schema'] = self.schema
basic_data.update(self.client_interface.GetMetadata())
return basic_data
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from nets.mobilenetdet import *
from configs.kitti_config import config
import numpy as np
class MobileNetDetTest(tf.test.TestCase):
def test_xywh_to_yxyx(self):
with self.test_session() as sess:
bbox = tf.constant([1, 2, 3, 4], dtype=tf.float32)
bbox_yxyx = xywh_to_yxyx(bbox)
output = sess.run(bbox_yxyx)
self.assertAllEqual(output, [0, -0.5, 4, 2.5])
bbox = tf.constant([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=tf.float32)
bbox_yxyx = xywh_to_yxyx(bbox)
output = sess.run(bbox_yxyx)
self.assertAllEqual(output, [[0, -0.5, 4, 2.5], [0, -0.5, 4, 2.5]])
def test_scale_bbox(self):
with self.test_session() as sess:
bbox = tf.constant([[1, 2, 3, 4], [1, 2, 3, 4]], dtype=tf.float32)
scaled_bbox = scale_bboxes(bbox, [10., 10.])
output = sess.run(scaled_bbox)
print(output)
def test_iou(self):
with self.test_session() as sess:
bbox_1 = tf.constant([0.1, 0.1, 0.2, 0.2], dtype=tf.float32)
bbox_2 = tf.constant([0.15, 0.15, 0.25, 0.25], dtype=tf.float32)
iou_ = iou(bbox_1, bbox_2)
output = sess.run(iou_)
self.assertLess(np.abs(output - 1 / 7.), 1e-4)
def test_compute_delta(self):
with self.test_session() as sess:
image_shape = [config.IMG_HEIGHT, config.IMG_WIDTH]
fea_shape = [3, 3]
anchors = set_anchors(image_shape, fea_shape)
gt_box = tf.convert_to_tensor([0.25, 0.25, 0.0805153, 0.26666668])
delta = compute_delta(gt_box, anchors[0][0][0])
print(sess.run(delta))
def test_batch_iou(self):
with self.test_session() as sess:
anchors = set_anchors(img_shape=[config.IMG_HEIGHT, config.IMG_WIDTH],
fea_shape=[config.FEA_HEIGHT, config.FEA_WIDTH])
anchors_shape = anchors.get_shape().as_list()
fea_h = anchors_shape[0]
fea_w = anchors_shape[1]
num_anchors = anchors_shape[2] * fea_h * fea_w
anchors = tf.reshape(anchors, [num_anchors, 4]) # reshape anchors
anchors = xywh_to_yxyx(anchors)
bbox = tf.constant([0.75, 0.75, 0.2, 0.2], dtype=tf.float32)
bbox = xywh_to_yxyx(bbox)
iou = batch_iou(anchors, bbox)
anchor_idx = tf.arg_max(iou, dimension=0)
anchors, output, anchor_idx = sess.run([anchors, iou, anchor_idx])
print(anchors)
print(output)
print(anchor_idx)
def test_batch_iou_(self):
anchors = set_anchors(img_shape=[config.IMG_HEIGHT, config.IMG_WIDTH],
fea_shape=[config.FEA_HEIGHT, config.FEA_WIDTH])
anchors_shape = anchors.get_shape().as_list()
fea_h = anchors_shape[0]
fea_w = anchors_shape[1]
num_anchors = anchors_shape[2] * fea_h * fea_w
anchors = tf.reshape(anchors, [num_anchors, 4]) # reshape anchors
anchors = xywh_to_yxyx(anchors)
bboxes = tf.placeholder(dtype=tf.float32, shape=[None, 4])
bboxes_ = xywh_to_yxyx(bboxes)
ious, indices = batch_iou_(anchors, bboxes_)
with self.test_session() as sess:
ious, indices, bboxes_ = sess.run([ious, indices, bboxes], feed_dict={bboxes: [[0.25, 0.25, 0.5, 0.5],
[0.75, 0.75, 0.2, 0.2]]}
)
print(ious)
print(indices)
print(bboxes_)
def test_batch_iou_fast(self):
anchors = set_anchors(img_shape=[config.IMG_HEIGHT, config.IMG_WIDTH],
fea_shape=[config.FEA_HEIGHT, config.FEA_WIDTH])
anchors_shape = anchors.get_shape().as_list()
fea_h = anchors_shape[0]
fea_w = anchors_shape[1]
num_anchors = anchors_shape[2] * fea_h * fea_w
anchors = tf.reshape(anchors, [num_anchors, 4]) # reshape anchors
anchors = xywh_to_yxyx(anchors)
bboxes = tf.placeholder(dtype=tf.float32, shape=[None, 4])
bboxes_ = xywh_to_yxyx(bboxes)
ious, indices = batch_iou_fast(anchors, bboxes_)
with self.test_session() as sess:
ious, indices, bboxes_ = sess.run([ious, indices, bboxes],
feed_dict={bboxes: [[0.07692308, 0.025, 0.13333334, 0.04025765],
[0.75, 0.75, 0.2, 0.2]]}
)
print(ious)
print(indices)
print(bboxes_)
def test_encode_annos(self):
with self.test_session() as sess:
num_obj = 2
image_shape = [config.IMG_HEIGHT, config.IMG_WIDTH]
fea_shape = [config.FEA_HEIGHT, config.FEA_WIDTH]
num_classes = config.NUM_CLASSES
images = tf.constant(0, shape=[image_shape[0], image_shape[1], 3])
labels = tf.constant(1, shape=[num_obj])
anchors = set_anchors(image_shape, fea_shape)
# Construct test bbox
bbox_1 = tf.convert_to_tensor(xywh_to_yxyx(anchors[0][0][0]), dtype=tf.float32)
bbox_2 = tf.convert_to_tensor(xywh_to_yxyx(anchors[2][2][1]), dtype=tf.float32)
bboxes = tf.stack([bbox_1, bbox_2], axis=0)
input_mask, labels_input, box_delta_input, box_input = \
encode_annos(images, labels, bboxes, anchors, num_classes)
out_input_mask, out_labels_input, out_box_delta_input, out_box_input = \
sess.run([input_mask, labels_input, box_delta_input, box_input])
print("input_mask:", out_input_mask)
print("box_input:", out_box_input)
print("label_input:", out_labels_input)
print("box_delta_input:", out_box_delta_input)
print("shape:",
"input_mask:", np.shape(out_input_mask),
"labels_input:", np.shape(out_labels_input),
"box_delta_input:", np.shape(out_box_delta_input),
"box_input:", np.shape(out_box_input)
)
def test_set_anchors(self):
with self.test_session() as sess:
anchors = set_anchors(img_shape=[config.IMG_HEIGHT, config.IMG_WIDTH],
fea_shape=[config.FEA_HEIGHT, config.FEA_WIDTH])
output = sess.run(anchors)
self.assertAllEqual(np.shape(output), [config.FEA_HEIGHT, config.FEA_WIDTH, config.NUM_ANCHORS, 4])
print("Anchors:", output)
print("Anchors shape:", np.shape(output))
print("Num of anchors:", config.NUM_ANCHORS)
|
import logging
from aurorapy.client import AuroraError, AuroraSerialClient
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_ADDRESS,
CONF_DEVICE,
CONF_NAME,
DEVICE_CLASS_POWER,
POWER_WATT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
DEFAULT_ADDRESS = 2
DEFAULT_NAME = "Solar PV"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_DEVICE): cv.string,
vol.Optional(CONF_ADDRESS, default=DEFAULT_ADDRESS): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Aurora ABB PowerOne device."""
devices = []
comport = config[CONF_DEVICE]
address = config[CONF_ADDRESS]
name = config[CONF_NAME]
_LOGGER.debug("Intitialising com port=%s address=%s", comport, address)
client = AuroraSerialClient(address, comport, parity="N", timeout=1)
devices.append(AuroraABBSolarPVMonitorSensor(client, name, "Power"))
add_entities(devices, True)
class AuroraABBSolarPVMonitorSensor(Entity):
"""Representation of a Sensor."""
def __init__(self, client, name, typename):
"""Initialize the sensor."""
self._name = f"{name} {typename}"
self.client = client
self._state = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return POWER_WATT
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_POWER
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
try:
self.client.connect()
# read ADC channel 3 (grid power output)
power_watts = self.client.measure(3, True)
self._state = round(power_watts, 1)
# _LOGGER.debug("Got reading %fW" % self._state)
except AuroraError as error:
# aurorapy does not have different exceptions (yet) for dealing
# with timeout vs other comms errors.
# This means the (normal) situation of no response during darkness
# raises an exception.
# aurorapy (gitlab) pull request merged 29/5/2019. When >0.2.6 is
# released, this could be modified to :
# except AuroraTimeoutError as e:
# Workaround: look at the text of the exception
if "No response after" in str(error):
_LOGGER.debug("No response from inverter (could be dark)")
else:
raise error
self._state = None
finally:
if self.client.serline.isOpen():
self.client.close()
|
import logging
from sentry_sdk.utils import BadDsn
from homeassistant.components.sentry.const import (
CONF_ENVIRONMENT,
CONF_EVENT_CUSTOM_COMPONENTS,
CONF_EVENT_HANDLED,
CONF_EVENT_THIRD_PARTY_PACKAGES,
CONF_LOGGING_EVENT_LEVEL,
CONF_LOGGING_LEVEL,
CONF_TRACING,
CONF_TRACING_SAMPLE_RATE,
DOMAIN,
)
from homeassistant.config_entries import SOURCE_USER
from homeassistant.data_entry_flow import RESULT_TYPE_ABORT, RESULT_TYPE_FORM
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_full_user_flow_implementation(hass):
"""Test we get the form."""
await async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["errors"] == {}
with patch("homeassistant.components.sentry.config_flow.Dsn"), patch(
"homeassistant.components.sentry.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.sentry.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"dsn": "http://[email protected]/1"},
)
assert result2["type"] == "create_entry"
assert result2["title"] == "Sentry"
assert result2["data"] == {
"dsn": "http://[email protected]/1",
}
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_integration_already_exists(hass):
"""Test we only allow a single config flow."""
MockConfigEntry(domain=DOMAIN).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_user_flow_bad_dsn(hass):
"""Test we handle bad dsn error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch(
"homeassistant.components.sentry.config_flow.Dsn",
side_effect=BadDsn,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"dsn": "foo"},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "bad_dsn"}
async def test_user_flow_unkown_exception(hass):
"""Test we handle any unknown exception error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
with patch(
"homeassistant.components.sentry.config_flow.Dsn",
side_effect=Exception,
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{"dsn": "foo"},
)
assert result2["type"] == RESULT_TYPE_FORM
assert result2["errors"] == {"base": "unknown"}
async def test_options_flow(hass):
"""Test options config flow."""
entry = MockConfigEntry(
domain=DOMAIN,
data={"dsn": "http://[email protected]/1"},
)
entry.add_to_hass(hass)
with patch("homeassistant.components.sentry.async_setup_entry", return_value=True):
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_ENVIRONMENT: "Test",
CONF_EVENT_CUSTOM_COMPONENTS: True,
CONF_EVENT_HANDLED: True,
CONF_EVENT_THIRD_PARTY_PACKAGES: True,
CONF_LOGGING_EVENT_LEVEL: logging.DEBUG,
CONF_LOGGING_LEVEL: logging.DEBUG,
CONF_TRACING: True,
CONF_TRACING_SAMPLE_RATE: 0.5,
},
)
assert result["type"] == "create_entry"
assert result["data"] == {
CONF_ENVIRONMENT: "Test",
CONF_EVENT_CUSTOM_COMPONENTS: True,
CONF_EVENT_HANDLED: True,
CONF_EVENT_THIRD_PARTY_PACKAGES: True,
CONF_LOGGING_EVENT_LEVEL: logging.DEBUG,
CONF_LOGGING_LEVEL: logging.DEBUG,
CONF_TRACING: True,
CONF_TRACING_SAMPLE_RATE: 0.5,
}
|
from datetime import timedelta
import logging
import async_timeout
from pyatag import AtagException, AtagOne
from homeassistant.components.climate import DOMAIN as CLIMATE
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.components.water_heater import DOMAIN as WATER_HEATER
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, asyncio
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "atag"
PLATFORMS = [CLIMATE, WATER_HEATER, SENSOR]
async def async_setup(hass: HomeAssistant, config):
"""Set up the Atag component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Atag integration from a config entry."""
session = async_get_clientsession(hass)
coordinator = AtagDataUpdateCoordinator(hass, session, entry)
await coordinator.async_refresh()
if not coordinator.last_update_success:
raise ConfigEntryNotReady
hass.data.setdefault(DOMAIN, {})
hass.data[DOMAIN][entry.entry_id] = coordinator
if entry.unique_id is None:
hass.config_entries.async_update_entry(entry, unique_id=coordinator.atag.id)
for platform in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, platform)
)
return True
class AtagDataUpdateCoordinator(DataUpdateCoordinator):
"""Define an object to hold Atag data."""
def __init__(self, hass, session, entry):
"""Initialize."""
self.atag = AtagOne(session=session, **entry.data)
super().__init__(
hass, _LOGGER, name=DOMAIN, update_interval=timedelta(seconds=30)
)
async def _async_update_data(self):
"""Update data via library."""
with async_timeout.timeout(20):
try:
if not await self.atag.update():
raise UpdateFailed("No data received")
except AtagException as error:
raise UpdateFailed(error) from error
return self.atag.report
async def async_unload_entry(hass, entry):
"""Unload Atag config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class AtagEntity(CoordinatorEntity):
"""Defines a base Atag entity."""
def __init__(self, coordinator: AtagDataUpdateCoordinator, atag_id: str) -> None:
"""Initialize the Atag entity."""
super().__init__(coordinator)
self._id = atag_id
self._name = DOMAIN.title()
@property
def device_info(self) -> dict:
"""Return info for device registry."""
device = self.coordinator.atag.id
version = self.coordinator.atag.apiversion
return {
"identifiers": {(DOMAIN, device)},
"name": "Atag Thermostat",
"model": "Atag One",
"sw_version": version,
"manufacturer": "Atag",
}
@property
def name(self) -> str:
"""Return the name of the entity."""
return self._name
@property
def unique_id(self):
"""Return a unique ID to use for this entity."""
return f"{self.coordinator.atag.id}-{self._id}"
|
import glob
import os
import subprocess
import sys
from bs4 import BeautifulSoup
JEKYLL_HEADER = """---
layout: help
title: Meld - Help
---
"""
SCSS_HEADER = """
#help-content {
border-left: solid 1px #e0e0df;
border-right: solid 1px #e0e0df;
background-color: #ffffff;
}
#help-content div.body {
border: none !important; }
#help-content div.headbar {
margin: 10px !important;
}
#help-content div.footbar {
margin: 10px !important;
}
#help-content {
.title {
line-height: 1em;
}
h1 {
font-family: sans-serif;
font-weight: bold;
text-shadow: none;
color: black;
}
h2 {
font-family: sans-serif;
text-shadow: none;
color: black;
}
"""
SCSS_FOOTER = """
}
"""
def munge_html(filename):
if not os.path.exists(filename):
print("File not found: " + filename, file=sys.stderr)
sys.exit(1)
with open(filename) as f:
contents = f.read()
soup = BeautifulSoup(contents, "lxml")
body = "".join([str(tag) for tag in soup.body])
body = JEKYLL_HEADER + body
print("Rewriting " + filename)
with open(filename, "w") as f:
f.write(body)
def munge_css(filename):
if not os.path.exists(filename):
print("File not found: " + filename, file=sys.stderr)
sys.exit(1)
with open(filename) as f:
contents = f.read()
contents = SCSS_HEADER + contents + SCSS_FOOTER
new_css = sassify(contents)
print("Rewriting " + filename)
with open(filename, 'w') as f:
f.write(new_css)
def sassify(scss_string):
scss = subprocess.Popen(
['scss', '-s'],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
universal_newlines=True,
)
stdout, stderr = scss.communicate(scss_string)
return stdout
if __name__ == "__main__":
if os.path.exists('html'):
print("Refusing to overwrite existing html/ folder", file=sys.stderr)
sys.exit(1)
print("Generating CSS with gnome-doc-tool...", file=sys.stderr)
subprocess.check_call(['gnome-doc-tool', 'css'])
print("Generating HTML with gnome-doc-tool...", file=sys.stderr)
subprocess.check_call(['gnome-doc-tool', 'html', '-c', 'index.css',
'--copy-graphics', '*.page'])
os.mkdir('html')
for filename in glob.glob('*.html'):
munge_html(filename)
os.rename(filename, os.path.join('html', filename))
munge_css('index.css')
os.rename('index.css', os.path.join('html', 'index.css'))
print("Embeddable documentation written to html/", file=sys.stderr)
|
from __future__ import print_function
import numpy
import os
import pickle
import random
import sys
import time
from osgeo import gdal
from openstreetmap_labels import download_and_extract
from geo_util import lat_lon_to_pixel, pixel_to_lat_lon, pixel_to_lat_lon_web_mercator
from naip_images import NAIP_DATA_DIR, NAIPDownloader
from src.config import CACHE_PATH, LABEL_CACHE_DIRECTORY, LABELS_DATA_DIR, IMAGE_CACHE_DIRECTORY, \
METADATA_PATH
# there is a 300 pixel buffer around NAIPs to be trimmed off, where NAIPs overlap...
# otherwise using overlapping images makes wonky train/test splits
NAIP_PIXEL_BUFFER = 300
def read_naip(file_path, bands_to_use):
"""
Read in a NAIP, based on www.machinalis.com/blog/python-for-geospatial-data-processing.
Bands_to_use is an array like [0,0,0,1], designating whether to use each band (R, G, B, IR).
"""
raster_dataset = gdal.Open(file_path, gdal.GA_ReadOnly)
bands_data = []
index = 0
for b in range(1, raster_dataset.RasterCount + 1):
band = raster_dataset.GetRasterBand(b)
if bands_to_use[index] == 1:
bands_data.append(band.ReadAsArray())
index += 1
bands_data = numpy.dstack(bands_data)
return raster_dataset, bands_data
def tile_naip(raster_data_path, raster_dataset, bands_data, bands_to_use, tile_size, tile_overlap):
"""
Cut a 4-band raster image into tiles.
Tiles are cubes - up to 4 bands, and N height x N width based on tile_size.
"""
on_band_count = 0
for b in bands_to_use:
if b == 1:
on_band_count += 1
rows, cols, n_bands = bands_data.shape
print("OPENED NAIP with {} rows, {} cols, and {} bands".format(rows, cols, n_bands))
print("GEO-BOUNDS for image chunk is {}".format(bounds_for_naip(raster_dataset, rows, cols)))
all_tiled_data = []
for col in range(NAIP_PIXEL_BUFFER, cols - NAIP_PIXEL_BUFFER, tile_size / tile_overlap):
for row in range(NAIP_PIXEL_BUFFER, rows - NAIP_PIXEL_BUFFER, tile_size / tile_overlap):
if row + tile_size < rows - NAIP_PIXEL_BUFFER and \
col + tile_size < cols - NAIP_PIXEL_BUFFER:
new_tile = bands_data[row:row + tile_size, col:col + tile_size, 0:on_band_count]
all_tiled_data.append((new_tile, (col, row), raster_data_path))
return all_tiled_data
def way_bitmap_for_naip(
ways, raster_data_path,
raster_dataset,
rows, cols, pixels_to_fatten_roads=None):
"""
Generate a matrix of size rows x cols, initialized to all zeroes.
Set matrix to 1 for any pixel where an OSM way runs over.
"""
parts = raster_data_path.split('/')
naip_grid = parts[len(parts)-2]
naip_filename = parts[len(parts)-1]
cache_filename = LABELS_DATA_DIR + '/' + naip_grid + '/' + naip_filename + '-ways.bitmap.npy'
try:
arr = numpy.load(cache_filename)
print("CACHED: read label data from disk")
return arr
except:
pass
# print "ERROR reading bitmap cache from disk: {}".format(cache_filename)
way_bitmap = numpy.zeros([rows, cols], dtype=numpy.int)
bounds = bounds_for_naip(raster_dataset, rows, cols)
ways_on_naip = []
for way in ways:
for point_tuple in way['linestring']:
if bounds_contains_point(bounds, point_tuple):
ways_on_naip.append(way)
break
print("EXTRACTED {} highways in NAIP bounds, of {} ways".format(len(ways_on_naip), len(ways)))
print("MAKING BITMAP for way presence...", end="")
t0 = time.time()
for w in ways_on_naip:
for x in range(len(w['linestring']) - 1):
current_point = w['linestring'][x]
next_point = w['linestring'][x + 1]
if not bounds_contains_point(bounds, current_point) or \
not bounds_contains_point(bounds, next_point):
continue
current_pix = lat_lon_to_pixel(raster_dataset, current_point)
next_pix = lat_lon_to_pixel(raster_dataset, next_point)
add_pixels_between(current_pix, next_pix, cols, rows, way_bitmap,
pixels_to_fatten_roads)
print(" {0:.1f}s".format(time.time() - t0))
print("CACHING %s..." % cache_filename, end="")
t0 = time.time()
# make sure cache_filename's parent folder exists
try:
os.makedirs(os.path.dirname(cache_filename))
except:
pass
# then save file to cache_filename
numpy.save(cache_filename, way_bitmap)
print(" {0:.1f}s".format(time.time() - t0))
return way_bitmap
def bounds_for_naip(raster_dataset, rows, cols):
"""Clip the NAIP to 0 to cols, 0 to rows."""
left_x, right_x, top_y, bottom_y = \
NAIP_PIXEL_BUFFER, cols - NAIP_PIXEL_BUFFER, NAIP_PIXEL_BUFFER, rows - NAIP_PIXEL_BUFFER
sw = pixel_to_lat_lon(raster_dataset, left_x, bottom_y)
ne = pixel_to_lat_lon(raster_dataset, right_x, top_y)
return {'sw': sw, 'ne': ne}
def add_pixels_between(start_pixel, end_pixel, cols, rows, way_bitmap, pixels_to_fatten_roads):
"""Add the pixels between the start and end to way_bitmap, maybe thickened based on config."""
if end_pixel[0] - start_pixel[0] == 0:
for y in range(min(end_pixel[1], start_pixel[1]), max(end_pixel[1], start_pixel[1])):
safe_add_pixel(end_pixel[0], y, way_bitmap)
# if configged, fatten lines
for x in range(1, pixels_to_fatten_roads + 1):
safe_add_pixel(end_pixel[0] - x, y, way_bitmap)
safe_add_pixel(end_pixel[0] + x, y, way_bitmap)
return
slope = (end_pixel[1] - start_pixel[1]) / float(end_pixel[0] - start_pixel[0])
offset = end_pixel[1] - slope * end_pixel[0]
i = 0
while i < cols:
floatx = start_pixel[0] + (end_pixel[0] - start_pixel[0]) * i / float(cols)
p = (int(floatx), int(offset + slope * floatx))
safe_add_pixel(p[0], p[1], way_bitmap)
i += 1
# if configged, fatten lines
for x in range(1, pixels_to_fatten_roads + 1):
safe_add_pixel(p[0], p[1] - x, way_bitmap)
safe_add_pixel(p[0], p[1] + x, way_bitmap)
safe_add_pixel(p[0] - x, p[1], way_bitmap)
safe_add_pixel(p[0] + x, p[1], way_bitmap)
def safe_add_pixel(x, y, way_bitmap):
"""Turn on a pixel in way_bitmap if its in bounds."""
if x < NAIP_PIXEL_BUFFER or y < NAIP_PIXEL_BUFFER or x >= len(way_bitmap[
0]) - NAIP_PIXEL_BUFFER or y >= len(way_bitmap) - NAIP_PIXEL_BUFFER:
return
way_bitmap[y][x] = 1
def bounds_contains_point(bounds, point_tuple):
"""Return True if the bounds geographically contains the point_tuple."""
if point_tuple[0] > bounds['ne'][0]:
return False
if point_tuple[0] < bounds['sw'][0]:
return False
if point_tuple[1] > bounds['ne'][1]:
return False
if point_tuple[1] < bounds['sw'][1]:
return False
return True
def create_tiled_training_data(raster_data_paths, extract_type, band_list, tile_size,
pixels_to_fatten_roads, label_data_files, tile_overlap, naip_state):
"""Save tiles for training data to disk, file names are padded with 16 0s.
Images are named 0000000000000000.colors.
Labels are named 0000000000000000.lbl.
"""
# tile images and labels
waymap = download_and_extract(label_data_files, extract_type)
tile_index = 0
for raster_data_path in raster_data_paths:
# TODO need new code to check cache
raster_dataset, bands_data = read_naip(raster_data_path, band_list)
rows = bands_data.shape[0]
cols = bands_data.shape[1]
way_bitmap_npy = way_bitmap_for_naip(waymap.extracter.ways, raster_data_path,
raster_dataset, rows, cols, pixels_to_fatten_roads)
left_x, right_x = NAIP_PIXEL_BUFFER, cols - NAIP_PIXEL_BUFFER
top_y, bottom_y = NAIP_PIXEL_BUFFER, rows - NAIP_PIXEL_BUFFER
# tile the way bitmap
origin_tile_index = tile_index
for col in range(left_x, right_x, tile_size / tile_overlap):
for row in range(top_y, bottom_y, tile_size / tile_overlap):
if row + tile_size < bottom_y and col + tile_size < right_x:
file_suffix = '{0:016d}'.format(tile_index)
label_filepath = "{}/{}.lbl".format(LABEL_CACHE_DIRECTORY, file_suffix)
new_tile = way_bitmap_npy[row:row + tile_size, col:col + tile_size]
with open(label_filepath, 'w') as outfile:
numpy.save(outfile, numpy.asarray((new_tile, col, row, raster_data_path)))
tile_index += 1
tile_index = origin_tile_index
# tile the NAIP
for tile in tile_naip(raster_data_path, raster_dataset, bands_data, band_list, tile_size,
tile_overlap):
file_suffix = '{0:016d}'.format(tile_index)
img_filepath = "{}/{}.colors".format(IMAGE_CACHE_DIRECTORY, file_suffix)
with open(img_filepath, 'w') as outfile:
numpy.save(outfile, tile)
tile_index += 1
# dump the metadata to disk for configuring the analysis script later
training_info = {'bands': band_list, 'tile_size': tile_size, 'naip_state': naip_state}
with open(CACHE_PATH + METADATA_PATH, 'w') as outfile:
pickle.dump(training_info, outfile)
def equalize_data(road_labels, naip_tiles, save_clippings):
"""Make sure labeled data includes an equal set of ON and OFF tiles."""
wayless_indices = []
way_indices = []
for x in range(0, len(road_labels)):
if road_labels[x][0] == 0:
way_indices.append(x)
else:
wayless_indices.append(x)
count_wayless = len(wayless_indices)
count_withways = len(way_indices)
equal_count_way_list = []
equal_count_tile_list = []
for x in range(min(count_wayless, count_withways)):
way_index = way_indices[x]
wayless_index = wayless_indices[x]
equal_count_way_list.append(road_labels[way_index])
equal_count_way_list.append(road_labels[wayless_index])
equal_count_tile_list.append(naip_tiles[way_index])
equal_count_tile_list.append(naip_tiles[wayless_index])
return equal_count_way_list, equal_count_tile_list
def has_ways_in_center(tile, tolerance):
"""Return true if the tile has road pixels withing tolerance pixels of the tile center."""
center_x = len(tile) / 2
center_y = len(tile[0]) / 2
for x in range(center_x - tolerance, center_x + tolerance):
for y in range(center_y - tolerance, center_y + tolerance):
pixel_value = tile[x][y]
if pixel_value != 0:
return True
return False
def format_as_onehot_arrays(new_label_paths):
"""Return a list of one-hot array labels, for a list of tiles.
Converts to a one-hot array of whether the tile has ways (i.e. [0,1] or [1,0] for each).
"""
training_images, onehot_training_labels = [], []
print("CREATING ONE-HOT LABELS...")
t0 = time.time()
on_count = 0
off_count = 0
for filename in new_label_paths:
full_path = "{}/{}".format(LABEL_CACHE_DIRECTORY, filename)
label = numpy.load(full_path)
parts = full_path.split('.')[0].split('/')
file_suffix = parts[len(parts)-1]
img_path = "{}/{}.colors".format(IMAGE_CACHE_DIRECTORY, file_suffix)
if has_ways_in_center(label[0], 1):
onehot_training_labels.append([0, 1])
on_count += 1
training_images.append(numpy.load(img_path))
elif not has_ways_in_center(label[0], 16):
onehot_training_labels.append([1, 0])
off_count += 1
training_images.append(numpy.load(img_path))
print("one-hotting took {0:.1f}s".format(time.time() - t0))
return training_images, onehot_training_labels
def load_training_tiles(number_of_tiles):
"""Return number_of_tiles worth of training_label_paths."""
print("LOADING DATA: reading from disk and unpickling")
t0 = time.time()
training_label_paths = []
all_paths = os.listdir(LABEL_CACHE_DIRECTORY)
for x in range(0, number_of_tiles):
label_path = random.choice(all_paths)
training_label_paths.append(label_path)
print("DATA LOADED: time to deserialize test data {0:.1f}s".format(time.time() - t0))
return training_label_paths
def load_all_training_tiles(naip_path, bands):
"""Return the image and label tiles for the naip_path."""
print("LOADING DATA: reading from disk and unpickling")
t0 = time.time()
tile_size = 64
tile_overlap = 1
raster_dataset, bands_data = read_naip(naip_path, bands)
training_images = tile_naip(naip_path, raster_dataset, bands_data, bands, tile_size,
tile_overlap)
rows = bands_data.shape[0]
cols = bands_data.shape[1]
parts = naip_path.split('/')
naip_grid = parts[len(parts)-2]
naip_filename = parts[len(parts)-1]
cache_filename = LABELS_DATA_DIR + '/' + naip_grid + '/' + naip_filename + '-ways.bitmap.npy'
way_bitmap_npy = numpy.load(cache_filename)
left_x, right_x = NAIP_PIXEL_BUFFER, cols - NAIP_PIXEL_BUFFER
top_y, bottom_y = NAIP_PIXEL_BUFFER, rows - NAIP_PIXEL_BUFFER
training_labels = []
for col in range(left_x, right_x, tile_size / tile_overlap):
for row in range(top_y, bottom_y, tile_size / tile_overlap):
if row + tile_size < bottom_y and col + tile_size < right_x:
new_tile = way_bitmap_npy[row:row + tile_size, col:col + tile_size]
training_labels.append(numpy.asarray((new_tile, col, row, naip_path)))
print("DATA LOADED: time to deserialize test data {0:.1f}s".format(time.time() - t0))
return training_labels, training_images
def tag_with_locations(test_images, predictions, tile_size, state_abbrev):
"""Combine image data with label data, so info can be rendered in a map and list UI.
Add location data for convenience too.
"""
combined_data = []
for idx, img_loc_tuple in enumerate(test_images):
raster_filename = img_loc_tuple[2]
raster_dataset = gdal.Open(os.path.join(NAIP_DATA_DIR, raster_filename), gdal.GA_ReadOnly)
raster_tile_x = img_loc_tuple[1][0]
raster_tile_y = img_loc_tuple[1][1]
ne_lat, ne_lon = pixel_to_lat_lon_web_mercator(raster_dataset, raster_tile_x +
tile_size, raster_tile_y)
sw_lat, sw_lon = pixel_to_lat_lon_web_mercator(raster_dataset, raster_tile_x,
raster_tile_y + tile_size)
certainty = predictions[idx][0]
formatted_info = {'certainty': certainty, 'ne_lat': ne_lat, 'ne_lon': ne_lon,
'sw_lat': sw_lat, 'sw_lon': sw_lon, 'raster_tile_x': raster_tile_x,
'raster_tile_y': raster_tile_y, 'raster_filename': raster_filename,
'state_abbrev': state_abbrev, 'country_abbrev': 'USA'
}
combined_data.append(formatted_info)
return combined_data
def download_and_serialize(number_of_naips,
randomize_naips,
naip_state,
naip_year,
extract_type,
bands,
tile_size,
pixels_to_fatten_roads,
label_data_files,
tile_overlap):
"""Download NAIP images, PBF files, and serialize training data."""
raster_data_paths = NAIPDownloader(number_of_naips,
randomize_naips,
naip_state,
naip_year).download_naips()
create_tiled_training_data(raster_data_paths,
extract_type,
bands,
tile_size,
pixels_to_fatten_roads,
label_data_files,
tile_overlap,
naip_state)
return raster_data_paths
if __name__ == "__main__":
print("Use bin/create_training_data.py instead of running this script.", file=sys.stderr)
sys.exit(1)
|
import os
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import linux_packages
from perfkitbenchmarker import sample
_BENCHMARKS = [
'avrora', 'batik', 'eclipse', 'fop', 'h2', 'jython', 'luindex', 'lusearch',
'pmd', 'sunflow', 'tomcat', 'tradebeans', 'tradesoap', 'xalan'
]
flags.DEFINE_string('dacapo_jar_filename', 'dacapo-9.12-MR1-bach.jar',
'Filename of DaCapo jar file.')
flags.DEFINE_enum('dacapo_benchmark', 'luindex', _BENCHMARKS,
'Name of specific DaCapo benchmark to execute.')
flags.DEFINE_integer('dacapo_num_iters', 1, 'Number of iterations to execute.')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'dacapo'
BENCHMARK_CONFIG = """
dacapo:
description: Runs DaCapo benchmarks
vm_groups:
default:
vm_spec: *default_single_core
"""
_PASS_PATTERN = re.compile(r'^=====.*PASSED in (\d+) msec =====$')
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Install the DaCapo benchmark suite on the vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
benchmark_spec.vms[0].Install('dacapo')
def Run(benchmark_spec):
"""Run the DaCapo benchmark on the vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A singleton list of sample.Sample objects containing the DaCapo benchmark
run time (in msec).
Raises:
errors.Benchmarks.RunError if the DaCapo benchmark didn't succeed.
"""
_, stderr = benchmark_spec.vms[0].RemoteCommand(
'java -jar %s %s -n %i --scratch-directory=%s' %
(os.path.join(linux_packages.INSTALL_DIR, FLAGS.dacapo_jar_filename),
FLAGS.dacapo_benchmark, FLAGS.dacapo_num_iters,
os.path.join(linux_packages.INSTALL_DIR, 'dacapo_scratch')))
for line in stderr.splitlines():
m = _PASS_PATTERN.match(line)
if m:
metadata = {'dacapo_benchmark': FLAGS.dacapo_benchmark}
return [sample.Sample('run_time', float(m.group(1)), 'ms', metadata)]
raise errors.Benchmarks.RunError(
'DaCapo benchmark %s failed.' % FLAGS.dacapo_benchmark)
def Cleanup(benchmark_spec):
"""Cleanup the DaCapo benchmark on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
benchmark_spec.vms[0].RemoteCommand(
'rm -rf %s' % os.path.join(linux_packages.INSTALL_DIR, 'dacapo_scratch'))
|
import os
import sys
import tempfile
import shutil
from flexx.util.testing import run_tests_if_main, raises, skip
from flexx.app._assetstore import assets, AssetStore as _AssetStore
from flexx.app._session import Session
from flexx import app
N_STANDARD_ASSETS = 3
test_filename = os.path.join(tempfile.gettempdir(), 'flexx_asset_cache.test')
class AssetStore(_AssetStore):
_test_mode = True
def test_asset_store_collect():
s = AssetStore()
s.update_modules()
assert len(s.modules) > 1
assert 'flexx.app._component2' in s.modules
assert 'JsComponent.prototype =' in s.get_asset('flexx.app._component2.js').to_string()
assert 'JsComponent.prototype =' in s.get_asset('flexx.app.js').to_string()
assert 'JsComponent.prototype =' in s.get_asset('flexx.js').to_string()
#assert 'JsComponent.prototype =' not in s.get_asset('flexx.ui.js').to_string()
assert 'JsComponent.prototype =' not in s.get_asset('pscript-std.js').to_string()
def test_asset_store_collect2():
try:
from flexx import ui
except ImportError:
skip('no flexx.ui')
s = AssetStore()
s.update_modules()
assert len(s.modules) > 10
assert 'flexx.ui._widget' in s.modules
assert '$Widget =' in s.get_asset('flexx.ui._widget.js').to_string()
assert '$Widget =' in s.get_asset('flexx.ui.js').to_string()
assert '$Widget =' in s.get_asset('flexx.js').to_string()
assert '$Widget =' not in s.get_asset('flexx.app.js').to_string()
def test_asset_store_adding_assets():
s = AssetStore()
# Adding
s.add_shared_asset('foo.js', 'XXX')
with raises(ValueError):
s.add_shared_asset('foo.js', 'XXX') # asset with that name already present
# Getting
assert 'XXX' in s.get_asset('foo.js').to_string()
with raises(ValueError):
s.get_asset('foo.png') # only .js and .css allowed
with raises(KeyError):
s.get_asset('foo-not-exists.js') # does not exist
def test_associate_asset():
s = AssetStore()
with raises(TypeError):
s.associate_asset('module.name1', 'foo.js') # no source given
s.associate_asset('module.name1', 'foo.js', 'xxx')
assert s.get_asset('foo.js').to_string() == 'xxx'
# Now we can "re-use" the asset
s.associate_asset('module.name2', 'foo.js')
# And its an error to overload it
with raises(TypeError):
s.associate_asset('module.name2', 'foo.js', 'zzz')
# Add one more
s.associate_asset('module.name2', 'bar.js', 'yyy')
# Check
assert s.get_associated_assets('module.name1') == ('foo.js', )
assert s.get_associated_assets('module.name2') == ('foo.js', 'bar.js')
def test_asset_store_data():
s = AssetStore()
assert len(s.get_asset_names()) == N_STANDARD_ASSETS
assert len(s.get_data_names()) == 0
# Add data
s.add_shared_data('xx', b'xxxx')
s.add_shared_data('yy', b'yyyy')
assert len(s.get_asset_names()) == N_STANDARD_ASSETS
assert len(s.get_data_names()) == 2
assert 'xx' in s.get_data_names()
assert 'yy' in s.get_data_names()
assert '2 data' in repr(s)
# get_data()
assert s.get_data('xx') == b'xxxx'
assert s.get_data('zz') is None
# Add data with same name
with raises(ValueError):
s.add_shared_data('xx', b'zzzz')
# # Add url data
# s.add_shared_data('readme', 'https://github.com/flexxui/flexx/blob/master/README.md')
# # assert 'Flexx is' in s.get_data('readme').decode()
# assert s.get_data('readme').startswith('https://github')
# Add BS data
with raises(TypeError):
s.add_shared_data('dd') # no data
with raises(TypeError):
s.add_shared_data('dd', 4) # not an asset
if sys.version_info > (3, ):
with raises(TypeError):
s.add_shared_data('dd', 'not bytes')
with raises(TypeError):
s.add_shared_data(b'dd', b'yes, bytes') # name not str
with raises(TypeError):
s.add_shared_data(4, b'zzzz') # name not a str
def test_not_allowing_local_files():
""" At some point, flexx allowed adding local files as data, but
this was removed for its potential security whole. This test
is a remnant to ensure its gone.
"""
s = AssetStore()
# Add shared data from local file, dont allow!
filename = __file__
assert os.path.isfile(filename)
with raises(TypeError):
s.add_shared_data('testfile3', 'file://' + filename)
# Add local file without "file://" prefix
if sys.version_info > (3, ):
with raises(TypeError):
s.add_shared_data('testfile4', filename)
run_tests_if_main()
|
from .base import FieldType, DerivedType
from dedupe import predicates
from categorical import CategoricalComparator
class CategoricalType(FieldType):
type = "Categorical"
_predicate_functions = [predicates.wholeFieldPredicate]
def _categories(self, definition):
try:
categories = definition["categories"]
except KeyError:
raise ValueError('No "categories" defined')
return categories
def __init__(self, definition):
super(CategoricalType, self).__init__(definition)
categories = self._categories(definition)
self.comparator = CategoricalComparator(categories)
self.higher_vars = []
for higher_var in self.comparator.dummy_names:
dummy_var = DerivedType({'name': higher_var,
'type': 'Dummy',
'has missing': self.has_missing})
self.higher_vars.append(dummy_var)
def __len__(self):
return len(self.higher_vars)
|
import json
from brother import SnmpError, UnsupportedModel
from homeassistant import data_entry_flow
from homeassistant.components.brother.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_HOST, CONF_TYPE
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
CONFIG = {CONF_HOST: "localhost", CONF_TYPE: "laser"}
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
async def test_create_entry_with_hostname(hass):
"""Test that the user step works with printer hostname."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "HL-L2340DW 0123456789"
assert result["data"][CONF_HOST] == CONFIG[CONF_HOST]
assert result["data"][CONF_TYPE] == CONFIG[CONF_TYPE]
async def test_create_entry_with_ip_address(hass):
"""Test that the user step works with printer IP address."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "127.0.0.1", CONF_TYPE: "laser"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "HL-L2340DW 0123456789"
assert result["data"][CONF_HOST] == "127.0.0.1"
assert result["data"][CONF_TYPE] == "laser"
async def test_invalid_hostname(hass):
"""Test invalid hostname in user_input."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data={CONF_HOST: "invalid/hostname", CONF_TYPE: "laser"},
)
assert result["errors"] == {CONF_HOST: "wrong_host"}
async def test_connection_error(hass):
"""Test connection to host error."""
with patch("brother.Brother._get_data", side_effect=ConnectionError()):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["errors"] == {"base": "cannot_connect"}
async def test_snmp_error(hass):
"""Test SNMP error."""
with patch("brother.Brother._get_data", side_effect=SnmpError("error")):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["errors"] == {"base": "snmp_error"}
async def test_unsupported_model_error(hass):
"""Test unsupported printer model error."""
with patch("brother.Brother._get_data", side_effect=UnsupportedModel("error")):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unsupported_model"
async def test_device_exists_abort(hass):
"""Test we abort config flow if Brother printer already configured."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
MockConfigEntry(domain=DOMAIN, unique_id="0123456789", data=CONFIG).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_no_data(hass):
"""Test we abort if zeroconf provides no data."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_zeroconf_not_brother_printer_error(hass):
"""Test we abort zeroconf flow if printer isn't Brother."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"hostname": "example.local.", "name": "Another Printer"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_brother_printer"
async def test_zeroconf_snmp_error(hass):
"""Test we abort zeroconf flow on SNMP error."""
with patch("brother.Brother._get_data", side_effect=SnmpError("error")):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"hostname": "example.local.", "name": "Brother Printer"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "cannot_connect"
async def test_zeroconf_device_exists_abort(hass):
"""Test we abort zeroconf flow if Brother printer already configured."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
MockConfigEntry(domain=DOMAIN, unique_id="0123456789", data=CONFIG).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"hostname": "example.local.", "name": "Brother Printer"},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_zeroconf_confirm_create_entry(hass):
"""Test zeroconf confirmation and create config entry."""
with patch(
"brother.Brother._get_data",
return_value=json.loads(load_fixture("brother_printer_data.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_ZEROCONF},
data={"hostname": "example.local.", "name": "Brother Printer"},
)
assert result["step_id"] == "zeroconf_confirm"
assert result["description_placeholders"]["model"] == "HL-L2340DW"
assert result["description_placeholders"]["serial_number"] == "0123456789"
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_TYPE: "laser"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "HL-L2340DW 0123456789"
assert result["data"][CONF_HOST] == "example.local"
assert result["data"][CONF_TYPE] == "laser"
|
import os
import sys
# Setuptools has to be imported before distutils, or things break.
from setuptools import setup
from distutils.core import Extension # pylint: disable=wrong-import-order
from distutils.command.build_ext import build_ext # pylint: disable=wrong-import-order
from distutils import errors # pylint: disable=wrong-import-order
# Get or massage our metadata. We exec coverage/version.py so we can avoid
# importing the product code into setup.py.
classifiers = """\
Environment :: Console
Intended Audience :: Developers
License :: OSI Approved :: Apache Software License
Operating System :: OS Independent
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 2.7
Programming Language :: Python :: 3
Programming Language :: Python :: 3.5
Programming Language :: Python :: 3.6
Programming Language :: Python :: 3.7
Programming Language :: Python :: 3.8
Programming Language :: Python :: 3.9
Programming Language :: Python :: 3.10
Programming Language :: Python :: Implementation :: CPython
Programming Language :: Python :: Implementation :: PyPy
Topic :: Software Development :: Quality Assurance
Topic :: Software Development :: Testing
"""
cov_ver_py = os.path.join(os.path.split(__file__)[0], "coverage/version.py")
with open(cov_ver_py) as version_file:
# __doc__ will be overwritten by version.py.
doc = __doc__
# Keep pylint happy.
__version__ = __url__ = version_info = ""
# Execute the code in version.py.
exec(compile(version_file.read(), cov_ver_py, 'exec'))
with open("README.rst") as readme:
long_description = readme.read().replace("https://coverage.readthedocs.io", __url__)
with open("CONTRIBUTORS.txt", "rb") as contributors:
paras = contributors.read().split(b"\n\n")
num_others = len(paras[-1].splitlines())
num_others += 1 # Count Gareth Rees, who is mentioned in the top paragraph.
classifier_list = classifiers.splitlines()
if version_info[3] == 'alpha':
devstat = "3 - Alpha"
elif version_info[3] in ['beta', 'candidate']:
devstat = "4 - Beta"
else:
assert version_info[3] == 'final'
devstat = "5 - Production/Stable"
classifier_list.append("Development Status :: " + devstat)
# Create the keyword arguments for setup()
setup_args = dict(
name='coverage',
version=__version__,
packages=[
'coverage',
],
package_data={
'coverage': [
'htmlfiles/*.*',
'fullcoverage/*.*',
]
},
entry_points={
# Install a script as "coverage", and as "coverage[23]", and as
# "coverage-2.7" (or whatever).
'console_scripts': [
'coverage = coverage.cmdline:main',
'coverage%d = coverage.cmdline:main' % sys.version_info[:1],
'coverage-%d.%d = coverage.cmdline:main' % sys.version_info[:2],
],
},
extras_require={
# Enable pyproject.toml support.
'toml': ['toml'],
},
# We need to get HTML assets from our htmlfiles directory.
zip_safe=False,
author='Ned Batchelder and {} others'.format(num_others),
author_email='[email protected]',
description=doc,
long_description=long_description,
long_description_content_type='text/x-rst',
keywords='code coverage testing',
license='Apache 2.0',
classifiers=classifier_list,
url="https://github.com/nedbat/coveragepy",
project_urls={
'Documentation': __url__,
'Funding': (
'https://tidelift.com/subscription/pkg/pypi-coverage'
'?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=pypi'
),
'Issues': 'https://github.com/nedbat/coveragepy/issues',
},
python_requires=">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4",
)
# A replacement for the build_ext command which raises a single exception
# if the build fails, so we can fallback nicely.
ext_errors = (
errors.CCompilerError,
errors.DistutilsExecError,
errors.DistutilsPlatformError,
)
if sys.platform == 'win32':
# distutils.msvc9compiler can raise an IOError when failing to
# find the compiler
ext_errors += (IOError,)
class BuildFailed(Exception):
"""Raise this to indicate the C extension wouldn't build."""
def __init__(self):
Exception.__init__(self)
self.cause = sys.exc_info()[1] # work around py 2/3 different syntax
class ve_build_ext(build_ext):
"""Build C extensions, but fail with a straightforward exception."""
def run(self):
"""Wrap `run` with `BuildFailed`."""
try:
build_ext.run(self)
except errors.DistutilsPlatformError:
raise BuildFailed()
def build_extension(self, ext):
"""Wrap `build_extension` with `BuildFailed`."""
try:
# Uncomment to test compile failure handling:
# raise errors.CCompilerError("OOPS")
build_ext.build_extension(self, ext)
except ext_errors:
raise BuildFailed()
except ValueError as err:
# this can happen on Windows 64 bit, see Python issue 7511
if "'path'" in str(err): # works with both py 2/3
raise BuildFailed()
raise
# There are a few reasons we might not be able to compile the C extension.
# Figure out if we should attempt the C extension or not.
compile_extension = True
if sys.platform.startswith('java'):
# Jython can't compile C extensions
compile_extension = False
if '__pypy__' in sys.builtin_module_names:
# Pypy can't compile C extensions
compile_extension = False
if compile_extension:
setup_args.update(dict(
ext_modules=[
Extension(
"coverage.tracer",
sources=[
"coverage/ctracer/datastack.c",
"coverage/ctracer/filedisp.c",
"coverage/ctracer/module.c",
"coverage/ctracer/tracer.c",
],
),
],
cmdclass={
'build_ext': ve_build_ext,
},
))
def main():
"""Actually invoke setup() with the arguments we built above."""
# For a variety of reasons, it might not be possible to install the C
# extension. Try it with, and if it fails, try it without.
try:
setup(**setup_args)
except BuildFailed as exc:
msg = "Couldn't install with extension module, trying without it..."
exc_msg = "%s: %s" % (exc.__class__.__name__, exc.cause)
print("**\n** %s\n** %s\n**" % (msg, exc_msg))
del setup_args['ext_modules']
setup(**setup_args)
if __name__ == '__main__':
main()
|
import re
import math
import itertools
import string
import abc
from doublemetaphone import doublemetaphone
from dedupe.cpredicates import ngrams, initials
import dedupe.tfidf as tfidf
import dedupe.levenshtein as levenshtein
from typing import Sequence, Callable, Any, Tuple, Set
from dedupe._typing import RecordDict
words = re.compile(r"[\w']+").findall
integers = re.compile(r"\d+").findall
start_word = re.compile(r"^([\w']+)").match
start_integer = re.compile(r"^(\d+)").match
alpha_numeric = re.compile(r"(?=\w*\d)[a-zA-Z\d]+").findall
PUNCTABLE = str.maketrans("", "", string.punctuation)
def strip_punc(s):
return s.translate(PUNCTABLE)
class Predicate(abc.ABC):
def __iter__(self):
yield self
def __repr__(self):
return "%s: %s" % (self.type, self.__name__)
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(repr(self))
return h
def __eq__(self, other):
return repr(self) == repr(other)
def __len__(self):
return 1
@abc.abstractmethod
def __call__(self, record, **kwargs) -> tuple:
pass
def __add__(self, other: 'Predicate') -> 'CompoundPredicate':
if isinstance(other, CompoundPredicate):
return CompoundPredicate((self,) + tuple(other))
elif isinstance(other, Predicate):
return CompoundPredicate((self, other))
else:
raise ValueError('Can only combine predicates')
class SimplePredicate(Predicate):
type = "SimplePredicate"
def __init__(self, func: Callable[[Any], Tuple[str, ...]], field: str):
self.func = func
self.__name__ = "(%s, %s)" % (func.__name__, field)
self.field = field
def __call__(self, record: RecordDict, **kwargs) -> Tuple[str, ...]:
column = record[self.field]
if column:
return self.func(column)
else:
return ()
class StringPredicate(SimplePredicate):
def __call__(self, record: RecordDict, **kwargs):
column = record[self.field]
if column:
return self.func(" ".join(strip_punc(column).split()))
else:
return ()
class ExistsPredicate(Predicate):
type = "ExistsPredicate"
def __init__(self, field):
self.__name__ = "(Exists, %s)" % (field,)
self.field = field
@staticmethod
def func(column):
if column:
return ('1',)
else:
return ('0',)
def __call__(self, record, **kwargs):
column = record[self.field]
return self.func(column)
class IndexPredicate(Predicate):
def __init__(self, threshold, field):
self.__name__ = '(%s, %s)' % (threshold, field)
self.field = field
self.threshold = threshold
self.index = None
def __getstate__(self):
odict = self.__dict__.copy()
odict['index'] = None
return odict
def __setstate__(self, d):
self.__dict__.update(d)
# backwards compatibility
if not hasattr(self, 'index'):
self.index = None
def reset(self):
...
def bust_cache(self):
self._cache = {}
class CanopyPredicate(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.canopy = {}
self._cache = {}
def freeze(self, records):
self._cache = {record[self.field]: self(record) for record in records}
self.canopy = {}
self.index = None
def reset(self):
self._cache = {}
self.canopy = {}
self.index = None
def __call__(self, record, **kwargs):
block_key = None
column = record[self.field]
if column:
if column in self._cache:
return self._cache[column]
doc = self.preprocess(column)
try:
doc_id = self.index._doc_to_id[doc]
except AttributeError:
raise AttributeError("Attempting to block with an index "
"predicate without indexing records")
if doc_id in self.canopy:
block_key = self.canopy[doc_id]
else:
canopy_members = self.index.search(doc,
self.threshold)
for member in canopy_members:
if member not in self.canopy:
self.canopy[member] = doc_id
if canopy_members:
block_key = doc_id
self.canopy[doc_id] = doc_id
else:
self.canopy[doc_id] = None
if block_key is None:
return []
else:
return [str(block_key)]
class SearchPredicate(object):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self._cache = {}
def freeze(self, records_1, records_2):
self._cache = {(record[self.field], False): self(record, False)
for record in records_1}
self._cache.update({(record[self.field], True): self(record, True)
for record in records_2})
self.index = None
def reset(self):
self._cache = {}
self.index = None
def __call__(self, record, target=False, **kwargs):
column = record[self.field]
if column:
if (column, target) in self._cache:
return self._cache[(column, target)]
else:
doc = self.preprocess(column)
try:
if target:
centers = [self.index._doc_to_id[doc]]
else:
centers = self.index.search(doc, self.threshold)
except AttributeError:
raise AttributeError("Attempting to block with an index "
"predicate without indexing records")
result = [str(center) for center in centers]
self._cache[(column, target)] = result
return result
else:
return ()
class TfidfPredicate(IndexPredicate):
def initIndex(self):
self.reset()
return tfidf.TfIdfIndex()
class TfidfCanopyPredicate(CanopyPredicate, TfidfPredicate):
pass
class TfidfSearchPredicate(SearchPredicate, TfidfPredicate):
pass
class TfidfTextPredicate(object):
def preprocess(self, doc):
return tuple(words(doc))
class TfidfSetPredicate(object):
def preprocess(self, doc):
return doc
class TfidfNGramPredicate(object):
def preprocess(self, doc):
return tuple(sorted(ngrams(" ".join(strip_punc(doc).split()), 2)))
class TfidfTextSearchPredicate(TfidfTextPredicate,
TfidfSearchPredicate):
type = "TfidfTextSearchPredicate"
class TfidfSetSearchPredicate(TfidfSetPredicate,
TfidfSearchPredicate):
type = "TfidfSetSearchPredicate"
class TfidfNGramSearchPredicate(TfidfNGramPredicate,
TfidfSearchPredicate):
type = "TfidfNGramSearchPredicate"
class TfidfTextCanopyPredicate(TfidfTextPredicate,
TfidfCanopyPredicate):
type = "TfidfTextCanopyPredicate"
class TfidfSetCanopyPredicate(TfidfSetPredicate,
TfidfCanopyPredicate):
type = "TfidfSetCanopyPredicate"
class TfidfNGramCanopyPredicate(TfidfNGramPredicate,
TfidfCanopyPredicate):
type = "TfidfNGramCanopyPredicate"
class LevenshteinPredicate(IndexPredicate):
def initIndex(self):
self.reset()
return levenshtein.LevenshteinIndex()
def preprocess(self, doc):
return " ".join(strip_punc(doc).split())
class LevenshteinCanopyPredicate(CanopyPredicate, LevenshteinPredicate):
type = "LevenshteinCanopyPredicate"
class LevenshteinSearchPredicate(SearchPredicate, LevenshteinPredicate):
type = "LevenshteinSearchPredicate"
class CompoundPredicate(tuple, Predicate):
type = "CompoundPredicate"
def __hash__(self):
try:
return self._cached_hash
except AttributeError:
h = self._cached_hash = hash(frozenset(self))
return h
def __eq__(self, other):
return frozenset(self) == frozenset(other)
def __call__(self, record, **kwargs):
predicate_keys = [predicate(record, **kwargs)
for predicate in self]
return [
u':'.join(
# must escape : to avoid confusion with : join separator
b.replace(u':', u'\\:') for b in block_key
)
for block_key
in itertools.product(*predicate_keys)
]
def __add__(self, other: Predicate) -> 'CompoundPredicate': # type: ignore
if isinstance(other, CompoundPredicate):
return CompoundPredicate(tuple(self) + tuple(other))
elif isinstance(other, Predicate):
return CompoundPredicate(tuple(self) + (other,))
else:
raise ValueError('Can only combine predicates')
def wholeFieldPredicate(field: Any) -> Tuple[str]:
"""return the whole field"""
return (str(field), )
def tokenFieldPredicate(field):
"""returns the tokens"""
return set(words(field))
def firstTokenPredicate(field: str) -> Sequence[str]:
first_token = start_word(field)
if first_token:
return first_token.groups()
else:
return ()
def commonIntegerPredicate(field: str) -> Set[str]:
"""return any integers"""
return {str(int(i)) for i in integers(field)}
def alphaNumericPredicate(field: str) -> Set[str]:
return set(alpha_numeric(field))
def nearIntegersPredicate(field: str) -> Set[str]:
"""return any integers N, N+1, and N-1"""
ints = integers(field)
near_ints = set()
for char in ints:
num = int(char)
near_ints.add(str(num - 1))
near_ints.add(str(num))
near_ints.add(str(num + 1))
return near_ints
def hundredIntegerPredicate(field: str) -> Set[str]:
return {str(int(i))[:-2] + '00' for i in integers(field)}
def hundredIntegersOddPredicate(field: str) -> Set[str]:
return {str(int(i))[:-2] + '0' + str(int(i) % 2) for i in integers(field)}
def firstIntegerPredicate(field: str) -> Sequence[str]:
first_token = start_integer(field)
if first_token:
return first_token.groups()
else:
return ()
def ngramsTokens(field: Sequence[Any], n: int) -> Set[str]:
grams = set()
n_tokens = len(field)
for i in range(n_tokens):
for j in range(i + n, min(n_tokens, i + n) + 1):
grams.add(' '.join(str(tok) for tok in field[i:j]))
return grams
def commonTwoTokens(field: str) -> Set[str]:
return ngramsTokens(field.split(), 2)
def commonThreeTokens(field: str) -> Set[str]:
return ngramsTokens(field.split(), 3)
def fingerprint(field: str) -> Tuple[str]:
return (u''.join(sorted(field.split())).strip(),)
def oneGramFingerprint(field: str) -> Tuple[str]:
return (u''.join(sorted(set(ngrams(field.replace(' ', ''), 1)))).strip(),)
def twoGramFingerprint(field: str) -> Tuple[str, ...]:
if len(field) > 1:
return (u''.join(sorted(gram.strip() for gram
in set(ngrams(field.replace(' ', ''), 2)))),)
else:
return ()
def commonFourGram(field: str) -> Set[str]:
"""return 4-grams"""
return set(ngrams(field.replace(' ', ''), 4))
def commonSixGram(field: str) -> Set[str]:
"""return 6-grams"""
return set(ngrams(field.replace(' ', ''), 6))
def sameThreeCharStartPredicate(field: str) -> Tuple[str]:
"""return first three characters"""
return initials(field.replace(' ', ''), 3)
def sameFiveCharStartPredicate(field: str) -> Tuple[str]:
"""return first five characters"""
return initials(field.replace(' ', ''), 5)
def sameSevenCharStartPredicate(field: str) -> Tuple[str]:
"""return first seven characters"""
return initials(field.replace(' ', ''), 7)
def suffixArray(field):
n = len(field) - 4
if n > 0:
for i in range(0, n):
yield field[i:]
def sortedAcronym(field: str) -> Tuple[str]:
return (''.join(sorted(each[0] for each in field.split())),)
def doubleMetaphone(field):
return {metaphone for metaphone in doublemetaphone(field) if metaphone}
def metaphoneToken(field):
return {metaphone_token for metaphone_token
in itertools.chain(*(doublemetaphone(token)
for token in set(field.split())))
if metaphone_token}
def wholeSetPredicate(field_set):
return (str(field_set),)
def commonSetElementPredicate(field_set):
"""return set as individual elements"""
return tuple([str(each) for each in field_set])
def commonTwoElementsPredicate(field):
sequence = sorted(field)
return ngramsTokens(sequence, 2)
def commonThreeElementsPredicate(field):
sequence = sorted(field)
return ngramsTokens(sequence, 3)
def lastSetElementPredicate(field_set):
return (str(max(field_set)), )
def firstSetElementPredicate(field_set):
return (str(min(field_set)), )
def magnitudeOfCardinality(field_set):
return orderOfMagnitude(len(field_set))
def latLongGridPredicate(field, digits=1):
"""
Given a lat / long pair, return the grid coordinates at the
nearest base value. e.g., (42.3, -5.4) returns a grid at 0.1
degree resolution of 0.1 degrees of latitude ~ 7km, so this is
effectively a 14km lat grid. This is imprecise for longitude,
since 1 degree of longitude is 0km at the poles, and up to 111km
at the equator. But it should be reasonably precise given some
prior logical block (e.g., country).
"""
if any(field):
return (str([round(dim, digits) for dim in field]),)
else:
return ()
def orderOfMagnitude(field):
if field > 0:
return (str(int(round(math.log10(field)))), )
else:
return ()
def roundTo1(field): # thanks http://stackoverflow.com/questions/3410976/how-to-round-a-number-to-significant-figures-in-python
abs_num = abs(field)
order = int(math.floor(math.log10(abs_num)))
rounded = round(abs_num, -order)
return (str(int(math.copysign(rounded, field))),)
|
from homeassistant.components.elgato.const import CONF_SERIAL_NUMBER, DOMAIN
from homeassistant.const import CONF_HOST, CONF_PORT, CONTENT_TYPE_JSON
from homeassistant.core import HomeAssistant
from tests.common import MockConfigEntry, load_fixture
from tests.test_util.aiohttp import AiohttpClientMocker
async def init_integration(
hass: HomeAssistant,
aioclient_mock: AiohttpClientMocker,
skip_setup: bool = False,
) -> MockConfigEntry:
"""Set up the Elgato Key Light integration in Home Assistant."""
aioclient_mock.get(
"http://1.2.3.4:9123/elgato/accessory-info",
text=load_fixture("elgato/info.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.put(
"http://1.2.3.4:9123/elgato/lights",
text=load_fixture("elgato/state.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"http://1.2.3.4:9123/elgato/lights",
text=load_fixture("elgato/state.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
aioclient_mock.get(
"http://5.6.7.8:9123/elgato/accessory-info",
text=load_fixture("elgato/info.json"),
headers={"Content-Type": CONTENT_TYPE_JSON},
)
entry = MockConfigEntry(
domain=DOMAIN,
unique_id="CN11A1A00001",
data={
CONF_HOST: "1.2.3.4",
CONF_PORT: 9123,
CONF_SERIAL_NUMBER: "CN11A1A00001",
},
)
entry.add_to_hass(hass)
if not skip_setup:
await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
return entry
|
import diamond.collector
import diamond.convertor
import time
import os
import re
try:
import psutil
except ImportError:
psutil = None
class DiskUsageCollector(diamond.collector.Collector):
MAX_VALUES = {
'reads': 4294967295,
'reads_merged': 4294967295,
'reads_milliseconds': 4294967295,
'writes': 4294967295,
'writes_merged': 4294967295,
'writes_milliseconds': 4294967295,
'io_milliseconds': 4294967295,
'io_milliseconds_weighted': 4294967295
}
LastCollectTime = None
def get_default_config_help(self):
config_help = super(DiskUsageCollector, self).get_default_config_help()
config_help.update({
'devices': "A regex of which devices to gather metrics for." +
" Defaults to md, sd, xvd, disk, and dm devices",
'sector_size': 'The size to use to calculate sector usage',
'send_zero': 'Send io data even when there is no io',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(DiskUsageCollector, self).get_default_config()
config.update({
'path': 'iostat',
'devices': ('PhysicalDrive[0-9]+$' +
'|md[0-9]+$' +
'|sd[a-z]+[0-9]*$' +
'|x?vd[a-z]+[0-9]*$' +
'|disk[0-9]+$' +
'|dm\-[0-9]+$'),
'sector_size': 512,
'send_zero': False,
})
return config
def get_disk_statistics(self):
"""
Create a map of disks in the machine.
http://www.kernel.org/doc/Documentation/iostats.txt
Returns:
(major, minor) -> DiskStatistics(device, ...)
"""
result = {}
if os.access('/proc/diskstats', os.R_OK):
self.proc_diskstats = True
fp = open('/proc/diskstats')
try:
for line in fp:
try:
columns = line.split()
# On early linux v2.6 versions, partitions have only 4
# output fields not 11. From linux 2.6.25 partitions
# have the full stats set.
if len(columns) < 14:
continue
major = int(columns[0])
minor = int(columns[1])
device = columns[2]
if ((device.startswith('ram') or
device.startswith('loop'))):
continue
result[(major, minor)] = {
'device': device,
'reads': float(columns[3]),
'reads_merged': float(columns[4]),
'reads_sectors': float(columns[5]),
'reads_milliseconds': float(columns[6]),
'writes': float(columns[7]),
'writes_merged': float(columns[8]),
'writes_sectors': float(columns[9]),
'writes_milliseconds': float(columns[10]),
'io_in_progress': float(columns[11]),
'io_milliseconds': float(columns[12]),
'io_milliseconds_weighted': float(columns[13])
}
except ValueError:
continue
finally:
fp.close()
else:
self.proc_diskstats = False
if not psutil:
self.log.error('Unable to import psutil')
return None
disks = psutil.disk_io_counters(True)
sector_size = int(self.config['sector_size'])
for disk in disks:
result[(0, len(result))] = {
'device': disk,
'reads': disks[disk].read_count,
'reads_sectors': disks[disk].read_bytes / sector_size,
'reads_milliseconds': disks[disk].read_time,
'writes': disks[disk].write_count,
'writes_sectors': disks[disk].write_bytes / sector_size,
'writes_milliseconds': disks[disk].write_time,
'io_milliseconds':
disks[disk].read_time + disks[disk].write_time,
'io_milliseconds_weighted':
disks[disk].read_time + disks[disk].write_time
}
return result
def collect(self):
# Handle collection time intervals correctly
CollectTime = time.time()
time_delta = float(self.config['interval'])
if self.LastCollectTime:
time_delta = CollectTime - self.LastCollectTime
if not time_delta:
time_delta = float(self.config['interval'])
self.LastCollectTime = CollectTime
exp = self.config['devices']
reg = re.compile(exp)
results = self.get_disk_statistics()
if not results:
self.log.error('No diskspace metrics retrieved')
return None
for key, info in results.iteritems():
metrics = {}
name = info['device']
if not reg.match(name):
continue
for key, value in info.iteritems():
if key == 'device':
continue
oldkey = key
for unit in self.config['byte_unit']:
key = oldkey
if key.endswith('sectors'):
key = key.replace('sectors', unit)
value /= (1024 / int(self.config['sector_size']))
value = diamond.convertor.binary.convert(value=value,
oldUnit='kB',
newUnit=unit)
self.MAX_VALUES[key] = diamond.convertor.binary.convert(
value=diamond.collector.MAX_COUNTER,
oldUnit='byte',
newUnit=unit)
metric_name = '.'.join([info['device'], key])
# io_in_progress is a point in time counter, !derivative
if key != 'io_in_progress':
metric_value = self.derivative(
metric_name,
value,
self.MAX_VALUES[key],
time_delta=False)
else:
metric_value = value
metrics[key] = metric_value
if self.proc_diskstats:
metrics['read_requests_merged_per_second'] = (
metrics['reads_merged'] / time_delta)
metrics['write_requests_merged_per_second'] = (
metrics['writes_merged'] / time_delta)
metrics['reads_per_second'] = metrics['reads'] / time_delta
metrics['writes_per_second'] = metrics['writes'] / time_delta
for unit in self.config['byte_unit']:
metric_name = 'read_%s_per_second' % unit
key = 'reads_%s' % unit
metrics[metric_name] = metrics[key] / time_delta
metric_name = 'write_%s_per_second' % unit
key = 'writes_%s' % unit
metrics[metric_name] = metrics[key] / time_delta
# Set to zero so the nodes are valid even if we have 0 io for
# the metric duration
metric_name = 'average_request_size_%s' % unit
metrics[metric_name] = 0
metrics['io'] = metrics['reads'] + metrics['writes']
metrics['average_queue_length'] = (
metrics['io_milliseconds_weighted'] / time_delta / 1000.0)
metrics['util_percentage'] = (
metrics['io_milliseconds'] / time_delta / 10.0)
if metrics['reads'] > 0:
metrics['read_await'] = (
metrics['reads_milliseconds'] / metrics['reads'])
else:
metrics['read_await'] = 0
if metrics['writes'] > 0:
metrics['write_await'] = (
metrics['writes_milliseconds'] / metrics['writes'])
else:
metrics['write_await'] = 0
for unit in self.config['byte_unit']:
rkey = 'reads_%s' % unit
wkey = 'writes_%s' % unit
metric_name = 'average_request_size_%s' % unit
if (metrics['io'] > 0):
metrics[metric_name] = (
metrics[rkey] + metrics[wkey]) / metrics['io']
else:
metrics[metric_name] = 0
metrics['iops'] = metrics['io'] / time_delta
if (metrics['io'] > 0):
metrics['service_time'] = (
metrics['io_milliseconds'] / metrics['io'])
metrics['await'] = (
metrics['reads_milliseconds'] +
metrics['writes_milliseconds']) / metrics['io']
else:
metrics['service_time'] = 0
metrics['await'] = 0
# http://www.scribd.com/doc/15013525
# Page 28
metrics['concurrent_io'] = (
(metrics['reads_per_second'] + metrics['writes_per_second']) *
(metrics['service_time'] / 1000.0))
# Only publish when we have io figures
if (metrics['io'] > 0 or self.config['send_zero']):
for key in metrics:
metric_name = '.'.join([info['device'], key]).replace(
'/', '_')
self.publish(metric_name, metrics[key], precision=3)
|
from typing import Any
from homeassistant.components.scene import Scene
from . import _LOGGER, DATA_VELUX
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the scenes for Velux platform."""
entities = [VeluxScene(scene) for scene in hass.data[DATA_VELUX].pyvlx.scenes]
async_add_entities(entities)
class VeluxScene(Scene):
"""Representation of a Velux scene."""
def __init__(self, scene):
"""Init velux scene."""
_LOGGER.info("Adding Velux scene: %s", scene)
self.scene = scene
@property
def name(self):
"""Return the name of the scene."""
return self.scene.name
async def async_activate(self, **kwargs: Any) -> None:
"""Activate the scene."""
await self.scene.run(wait_for_completion=False)
|
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import CONF_ID, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from .device import EnOceanEntity
CONF_CHANNEL = "channel"
DEFAULT_NAME = "EnOcean Switch"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ID): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CHANNEL, default=0): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the EnOcean switch platform."""
channel = config.get(CONF_CHANNEL)
dev_id = config.get(CONF_ID)
dev_name = config.get(CONF_NAME)
add_entities([EnOceanSwitch(dev_id, dev_name, channel)])
class EnOceanSwitch(EnOceanEntity, ToggleEntity):
"""Representation of an EnOcean switch device."""
def __init__(self, dev_id, dev_name, channel):
"""Initialize the EnOcean switch device."""
super().__init__(dev_id, dev_name)
self._light = None
self._on_state = False
self._on_state2 = False
self.channel = channel
@property
def is_on(self):
"""Return whether the switch is on or off."""
return self._on_state
@property
def name(self):
"""Return the device name."""
return self.dev_name
def turn_on(self, **kwargs):
"""Turn on the switch."""
optional = [0x03]
optional.extend(self.dev_id)
optional.extend([0xFF, 0x00])
self.send_command(
data=[0xD2, 0x01, self.channel & 0xFF, 0x64, 0x00, 0x00, 0x00, 0x00, 0x00],
optional=optional,
packet_type=0x01,
)
self._on_state = True
def turn_off(self, **kwargs):
"""Turn off the switch."""
optional = [0x03]
optional.extend(self.dev_id)
optional.extend([0xFF, 0x00])
self.send_command(
data=[0xD2, 0x01, self.channel & 0xFF, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00],
optional=optional,
packet_type=0x01,
)
self._on_state = False
def value_changed(self, packet):
"""Update the internal state of the switch."""
if packet.data[0] == 0xA5:
# power meter telegram, turn on if > 10 watts
packet.parse_eep(0x12, 0x01)
if packet.parsed["DT"]["raw_value"] == 1:
raw_val = packet.parsed["MR"]["raw_value"]
divisor = packet.parsed["DIV"]["raw_value"]
watts = raw_val / (10 ** divisor)
if watts > 1:
self._on_state = True
self.schedule_update_ha_state()
elif packet.data[0] == 0xD2:
# actuator status telegram
packet.parse_eep(0x01, 0x01)
if packet.parsed["CMD"]["raw_value"] == 4:
channel = packet.parsed["IO"]["raw_value"]
output = packet.parsed["OV"]["raw_value"]
if channel == self.channel:
self._on_state = output > 0
self.schedule_update_ha_state()
|
from logilab.common.testlib import TestCase, unittest_main, TestSuite
from logilab.common.cache import Cache
class CacheTestCase(TestCase):
def setUp(self):
self.cache = Cache(5)
self.testdict = {}
def test_setitem1(self):
"""Checks that the setitem method works"""
self.cache[1] = 'foo'
self.assertEqual(self.cache[1], 'foo', "1:foo is not in cache")
self.assertEqual(len(self.cache._usage), 1)
self.assertEqual(self.cache._usage[-1], 1,
'1 is not the most recently used key')
self.assertCountEqual(self.cache._usage,
self.cache.keys(),
"usage list and data keys are different")
def test_setitem2(self):
"""Checks that the setitem method works for multiple items"""
self.cache[1] = 'foo'
self.cache[2] = 'bar'
self.assertEqual(self.cache[2], 'bar',
"2 : 'bar' is not in cache.data")
self.assertEqual(len(self.cache._usage), 2,
"lenght of usage list is not 2")
self.assertEqual(self.cache._usage[-1], 2,
'1 is not the most recently used key')
self.assertCountEqual(self.cache._usage,
self.cache.keys())# usage list and data keys are different
def test_setitem3(self):
"""Checks that the setitem method works when replacing an element in the cache"""
self.cache[1] = 'foo'
self.cache[1] = 'bar'
self.assertEqual(self.cache[1], 'bar', "1 : 'bar' is not in cache.data")
self.assertEqual(len(self.cache._usage), 1, "lenght of usage list is not 1")
self.assertEqual(self.cache._usage[-1], 1, '1 is not the most recently used key')
self.assertCountEqual(self.cache._usage,
self.cache.keys())# usage list and data keys are different
def test_recycling1(self):
"""Checks the removal of old elements"""
self.cache[1] = 'foo'
self.cache[2] = 'bar'
self.cache[3] = 'baz'
self.cache[4] = 'foz'
self.cache[5] = 'fuz'
self.cache[6] = 'spam'
self.assertTrue(1 not in self.cache,
'key 1 has not been suppressed from the cache dictionnary')
self.assertTrue(1 not in self.cache._usage,
'key 1 has not been suppressed from the cache LRU list')
self.assertEqual(len(self.cache._usage), 5, "lenght of usage list is not 5")
self.assertEqual(self.cache._usage[-1], 6, '6 is not the most recently used key')
self.assertCountEqual(self.cache._usage,
self.cache.keys())# usage list and data keys are different
def test_recycling2(self):
"""Checks that accessed elements get in the front of the list"""
self.cache[1] = 'foo'
self.cache[2] = 'bar'
self.cache[3] = 'baz'
self.cache[4] = 'foz'
a = self.cache[1]
self.assertEqual(a, 'foo')
self.assertEqual(self.cache._usage[-1], 1, '1 is not the most recently used key')
self.assertCountEqual(self.cache._usage,
self.cache.keys())# usage list and data keys are different
def test_delitem(self):
"""Checks that elements are removed from both element dict and element
list.
"""
self.cache['foo'] = 'bar'
del self.cache['foo']
self.assertTrue('foo' not in self.cache.keys(), "Element 'foo' was not removed cache dictionnary")
self.assertTrue('foo' not in self.cache._usage, "Element 'foo' was not removed usage list")
self.assertCountEqual(self.cache._usage,
self.cache.keys())# usage list and data keys are different
def test_nullsize(self):
"""Checks that a 'NULL' size cache doesn't store anything
"""
null_cache = Cache(0)
null_cache['foo'] = 'bar'
self.assertEqual(null_cache.size, 0, 'Cache size should be O, not %d' % \
null_cache.size)
self.assertEqual(len(null_cache), 0, 'Cache should be empty !')
# Assert null_cache['foo'] raises a KeyError
self.assertRaises(KeyError, null_cache.__getitem__, 'foo')
# Deleting element raises a KeyError
self.assertRaises(KeyError, null_cache.__delitem__, 'foo')
def test_getitem(self):
""" Checks that getitem doest not modify the _usage attribute
"""
try:
self.cache['toto']
except KeyError:
self.assertTrue('toto' not in self.cache._usage)
else:
self.fail('excepted KeyError')
if __name__ == "__main__":
unittest_main()
|
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN, SwitchEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_UNSUBSCRIBE, DOMAIN
from .entity import ZWaveDeviceEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Z-Wave switch from config entry."""
@callback
def async_add_switch(value):
"""Add Z-Wave Switch."""
switch = ZWaveSwitch(value)
async_add_entities([switch])
hass.data[DOMAIN][config_entry.entry_id][DATA_UNSUBSCRIBE].append(
async_dispatcher_connect(
hass, f"{DOMAIN}_new_{SWITCH_DOMAIN}", async_add_switch
)
)
class ZWaveSwitch(ZWaveDeviceEntity, SwitchEntity):
"""Representation of a Z-Wave switch."""
@property
def is_on(self):
"""Return a boolean for the state of the switch."""
return bool(self.values.primary.value)
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
self.values.primary.send_value(True)
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
self.values.primary.send_value(False)
|
from flexx import app, event, ui
## Synced sliders
class SyncedSlidersBase(ui.Widget):
def init(self):
with ui.VBox():
ui.Label(text=self.TITLE)
self.slider1 = ui.Slider()
self.slider2 = ui.Slider()
ui.Widget(flex=1)
@event.reaction('slider1.value', 'slider2.value')
def sleep_when_slider_changes(self, *events):
global time # time() is a PScript builtin
for ev in events:
etime = time() + 0.05
while time() < etime:
pass
class SyncedSlidersWrong(SyncedSlidersBase):
""" This example syncs two sliders by implementing one reaction for each
slider. This is probably the worst way to do it; if there is some delay
in your app, you quickly get into a situation where the event system has
two queued actions: one to set slider 1 to value A and another to set
slider 2 to value B. And these will keep interchanging.
"""
TITLE = 'Synced sliders, done wrong'
@event.reaction('slider1.value')
def __slider1(self, *events):
self.slider2.set_value(events[-1].new_value)
@event.reaction('slider2.value')
def __slider2(self, *events):
self.slider1.set_value(events[-1].new_value)
class SyncedSlidersRight(SyncedSlidersBase):
""" This example syncs two sliders in a much better way, making use of a
single reaction, which is marked as greedy. This ensures that all events
to either of the sliders get handled in a single call to the reaction,
which avoids a ping-pong effect. Only having a single (normal) reaction
reduced the chance of a ping-pong effect, but does not elliminate it.
Even better would be to react to ``user_value`` or ``user_done``
to avoid ping-ping altogether.
A nice addition would be to add an action that sets both slider
values at the same time.
"""
TITLE = 'Synced sliders, done right'
@event.reaction('slider1.value', 'slider2.value', mode='greedy')
def __slider1(self, *events):
value = events[-1].new_value
self.slider1.set_value(value)
self.slider2.set_value(value)
## Main
class Tricky(ui.Widget):
""" A collection of tricky cases.
"""
def init(self):
with ui.VBox():
self.reset = ui.Button(text='Reset event system')
with ui.HFix(flex=1):
SyncedSlidersWrong(flex=1)
SyncedSlidersRight(flex=1)
ui.Widget(flex=1) # spacer
@event.reaction('reset.pointer_click')
def _reset(self):
# You probably don't want to ever do this in a normal app.
# Do via a timeout because reactions get handled by the event system,
# so the reset will not work correctly.
global window
window.setTimeout(event.loop.reset, 0)
m = app.launch(Tricky, 'app')
app.run()
|
import time
from google_nest_sdm.device_manager import DeviceManager
from google_nest_sdm.event import EventCallback, EventMessage
from google_nest_sdm.google_nest_subscriber import GoogleNestSubscriber
from homeassistant.components.nest import DOMAIN
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
CONFIG = {
"nest": {
"client_id": "some-client-id",
"client_secret": "some-client-secret",
# Required fields for using SDM API
"project_id": "some-project-id",
"subscriber_id": "some-subscriber-id",
},
}
CONFIG_ENTRY_DATA = {
"sdm": {}, # Indicates new SDM API, not legacy API
"auth_implementation": "local",
"token": {
"expires_at": time.time() + 86400,
"access_token": {
"token": "some-token",
},
},
}
class FakeDeviceManager(DeviceManager):
"""Fake DeviceManager that can supply a list of devices and structures."""
def __init__(self, devices: dict, structures: dict):
"""Initialize FakeDeviceManager."""
super().__init__()
self._devices = devices
@property
def structures(self) -> dict:
"""Override structures with fake result."""
return self._structures
@property
def devices(self) -> dict:
"""Override devices with fake result."""
return self._devices
class FakeSubscriber(GoogleNestSubscriber):
"""Fake subscriber that supplies a FakeDeviceManager."""
def __init__(self, device_manager: FakeDeviceManager):
"""Initialize Fake Subscriber."""
self._device_manager = device_manager
self._callback = None
def set_update_callback(self, callback: EventCallback):
"""Capture the callback set by Home Assistant."""
self._callback = callback
async def start_async(self) -> DeviceManager:
"""Return the fake device manager."""
return self._device_manager
async def async_get_device_manager(self) -> DeviceManager:
"""Return the fake device manager."""
return self._device_manager
def stop_async(self):
"""No-op to stop the subscriber."""
return None
def receive_event(self, event_message: EventMessage):
"""Simulate a received pubsub message, invoked by tests."""
# Update device state, then invoke HomeAssistant to refresh
self._device_manager.handle_event(event_message)
self._callback.handle_event(event_message)
async def async_setup_sdm_platform(hass, platform, devices={}, structures={}):
"""Set up the platform and prerequisites."""
MockConfigEntry(domain=DOMAIN, data=CONFIG_ENTRY_DATA).add_to_hass(hass)
device_manager = FakeDeviceManager(devices=devices, structures=structures)
subscriber = FakeSubscriber(device_manager)
with patch(
"homeassistant.helpers.config_entry_oauth2_flow.async_get_config_entry_implementation"
), patch("homeassistant.components.nest.PLATFORMS", [platform]), patch(
"homeassistant.components.nest.GoogleNestSubscriber", return_value=subscriber
):
assert await async_setup_component(hass, DOMAIN, CONFIG)
await hass.async_block_till_done()
return subscriber
|
from unittest import mock
import pytest
from vcr import errors
from vcr.cassette import Cassette
@mock.patch("vcr.cassette.Cassette.find_requests_with_most_matches")
@pytest.mark.parametrize(
"most_matches, expected_message",
[
# No request match found
([], "No similar requests, that have not been played, found."),
# One matcher failed
(
[("similar request", ["method", "path"], [("query", "failed : query")])],
"Found 1 similar requests with 1 different matcher(s) :\n"
"\n1 - ('similar request').\n"
"Matchers succeeded : ['method', 'path']\n"
"Matchers failed :\n"
"query - assertion failure :\n"
"failed : query\n",
),
# Multiple failed matchers
(
[("similar request", ["method"], [("query", "failed : query"), ("path", "failed : path")])],
"Found 1 similar requests with 2 different matcher(s) :\n"
"\n1 - ('similar request').\n"
"Matchers succeeded : ['method']\n"
"Matchers failed :\n"
"query - assertion failure :\n"
"failed : query\n"
"path - assertion failure :\n"
"failed : path\n",
),
# Multiple similar requests
(
[
("similar request", ["method"], [("query", "failed : query")]),
("similar request 2", ["method"], [("query", "failed : query 2")]),
],
"Found 2 similar requests with 1 different matcher(s) :\n"
"\n1 - ('similar request').\n"
"Matchers succeeded : ['method']\n"
"Matchers failed :\n"
"query - assertion failure :\n"
"failed : query\n"
"\n2 - ('similar request 2').\n"
"Matchers succeeded : ['method']\n"
"Matchers failed :\n"
"query - assertion failure :\n"
"failed : query 2\n",
),
],
)
def test_CannotOverwriteExistingCassetteException_get_message(
mock_find_requests_with_most_matches, most_matches, expected_message
):
mock_find_requests_with_most_matches.return_value = most_matches
cassette = Cassette("path")
failed_request = "request"
exception_message = errors.CannotOverwriteExistingCassetteException._get_message(cassette, "request")
expected = (
"Can't overwrite existing cassette (%r) in your current record mode (%r).\n"
"No match for the request (%r) was found.\n"
"%s" % (cassette._path, cassette.record_mode, failed_request, expected_message)
)
assert exception_message == expected
|
import asyncio
from functools import partial, wraps
import logging
from typing import (
TYPE_CHECKING,
Any,
Awaitable,
Callable,
Dict,
Iterable,
List,
Optional,
Set,
Tuple,
Union,
)
import voluptuous as vol
from homeassistant.auth.permissions.const import CAT_ENTITIES, POLICY_CONTROL
from homeassistant.const import (
ATTR_AREA_ID,
ATTR_ENTITY_ID,
CONF_SERVICE,
CONF_SERVICE_TEMPLATE,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
)
import homeassistant.core as ha
from homeassistant.exceptions import (
HomeAssistantError,
TemplateError,
Unauthorized,
UnknownUser,
)
from homeassistant.helpers import template
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.template import Template
from homeassistant.helpers.typing import ConfigType, HomeAssistantType, TemplateVarsType
from homeassistant.loader import Integration, async_get_integration, bind_hass
from homeassistant.util.yaml import load_yaml
from homeassistant.util.yaml.loader import JSON_TYPE
if TYPE_CHECKING:
from homeassistant.helpers.entity import Entity # noqa
from homeassistant.helpers.entity_platform import EntityPlatform
CONF_SERVICE_ENTITY_ID = "entity_id"
CONF_SERVICE_DATA = "data"
CONF_SERVICE_DATA_TEMPLATE = "data_template"
_LOGGER = logging.getLogger(__name__)
SERVICE_DESCRIPTION_CACHE = "service_description_cache"
@bind_hass
def call_from_config(
hass: HomeAssistantType,
config: ConfigType,
blocking: bool = False,
variables: TemplateVarsType = None,
validate_config: bool = True,
) -> None:
"""Call a service based on a config hash."""
asyncio.run_coroutine_threadsafe(
async_call_from_config(hass, config, blocking, variables, validate_config),
hass.loop,
).result()
@bind_hass
async def async_call_from_config(
hass: HomeAssistantType,
config: ConfigType,
blocking: bool = False,
variables: TemplateVarsType = None,
validate_config: bool = True,
context: Optional[ha.Context] = None,
) -> None:
"""Call a service based on a config hash."""
try:
parms = async_prepare_call_from_config(hass, config, variables, validate_config)
except HomeAssistantError as ex:
if blocking:
raise
_LOGGER.error(ex)
else:
await hass.services.async_call(*parms, blocking, context)
@ha.callback
@bind_hass
def async_prepare_call_from_config(
hass: HomeAssistantType,
config: ConfigType,
variables: TemplateVarsType = None,
validate_config: bool = False,
) -> Tuple[str, str, Dict[str, Any]]:
"""Prepare to call a service based on a config hash."""
if validate_config:
try:
config = cv.SERVICE_SCHEMA(config)
except vol.Invalid as ex:
raise HomeAssistantError(
f"Invalid config for calling service: {ex}"
) from ex
if CONF_SERVICE in config:
domain_service = config[CONF_SERVICE]
else:
domain_service = config[CONF_SERVICE_TEMPLATE]
if isinstance(domain_service, Template):
try:
domain_service.hass = hass
domain_service = domain_service.async_render(variables)
domain_service = cv.service(domain_service)
except TemplateError as ex:
raise HomeAssistantError(
f"Error rendering service name template: {ex}"
) from ex
except vol.Invalid as ex:
raise HomeAssistantError(
f"Template rendered invalid service: {domain_service}"
) from ex
domain, service = domain_service.split(".", 1)
service_data = {}
for conf in [CONF_SERVICE_DATA, CONF_SERVICE_DATA_TEMPLATE]:
if conf not in config:
continue
try:
template.attach(hass, config[conf])
service_data.update(template.render_complex(config[conf], variables))
except TemplateError as ex:
raise HomeAssistantError(f"Error rendering data template: {ex}") from ex
if CONF_SERVICE_ENTITY_ID in config:
service_data[ATTR_ENTITY_ID] = config[CONF_SERVICE_ENTITY_ID]
return domain, service, service_data
@bind_hass
def extract_entity_ids(
hass: HomeAssistantType, service_call: ha.ServiceCall, expand_group: bool = True
) -> Set[str]:
"""Extract a list of entity ids from a service call.
Will convert group entity ids to the entity ids it represents.
"""
return asyncio.run_coroutine_threadsafe(
async_extract_entity_ids(hass, service_call, expand_group), hass.loop
).result()
@bind_hass
async def async_extract_entities(
hass: HomeAssistantType,
entities: Iterable["Entity"],
service_call: ha.ServiceCall,
expand_group: bool = True,
) -> List["Entity"]:
"""Extract a list of entity objects from a service call.
Will convert group entity ids to the entity ids it represents.
"""
data_ent_id = service_call.data.get(ATTR_ENTITY_ID)
if data_ent_id == ENTITY_MATCH_ALL:
return [entity for entity in entities if entity.available]
entity_ids = await async_extract_entity_ids(hass, service_call, expand_group)
found = []
for entity in entities:
if entity.entity_id not in entity_ids:
continue
entity_ids.remove(entity.entity_id)
if not entity.available:
continue
found.append(entity)
if entity_ids:
_LOGGER.warning(
"Unable to find referenced entities %s", ", ".join(sorted(entity_ids))
)
return found
@bind_hass
async def async_extract_entity_ids(
hass: HomeAssistantType, service_call: ha.ServiceCall, expand_group: bool = True
) -> Set[str]:
"""Extract a list of entity ids from a service call.
Will convert group entity ids to the entity ids it represents.
"""
entity_ids = service_call.data.get(ATTR_ENTITY_ID)
area_ids = service_call.data.get(ATTR_AREA_ID)
extracted: Set[str] = set()
if entity_ids in (None, ENTITY_MATCH_NONE) and area_ids in (
None,
ENTITY_MATCH_NONE,
):
return extracted
if entity_ids and entity_ids != ENTITY_MATCH_NONE:
# Entity ID attr can be a list or a string
if isinstance(entity_ids, str):
entity_ids = [entity_ids]
if expand_group:
entity_ids = hass.components.group.expand_entity_ids(entity_ids)
extracted.update(entity_ids)
if area_ids and area_ids != ENTITY_MATCH_NONE:
if isinstance(area_ids, str):
area_ids = [area_ids]
dev_reg, ent_reg = await asyncio.gather(
hass.helpers.device_registry.async_get_registry(),
hass.helpers.entity_registry.async_get_registry(),
)
extracted.update(
entry.entity_id
for area_id in area_ids
for entry in hass.helpers.entity_registry.async_entries_for_area(
ent_reg, area_id
)
)
devices = [
device
for area_id in area_ids
for device in hass.helpers.device_registry.async_entries_for_area(
dev_reg, area_id
)
]
extracted.update(
entry.entity_id
for device in devices
for entry in hass.helpers.entity_registry.async_entries_for_device(
ent_reg, device.id
)
if not entry.area_id
)
return extracted
def _load_services_file(hass: HomeAssistantType, integration: Integration) -> JSON_TYPE:
"""Load services file for an integration."""
try:
return load_yaml(str(integration.file_path / "services.yaml"))
except FileNotFoundError:
_LOGGER.warning(
"Unable to find services.yaml for the %s integration", integration.domain
)
return {}
except HomeAssistantError:
_LOGGER.warning(
"Unable to parse services.yaml for the %s integration", integration.domain
)
return {}
def _load_services_files(
hass: HomeAssistantType, integrations: Iterable[Integration]
) -> List[JSON_TYPE]:
"""Load service files for multiple intergrations."""
return [_load_services_file(hass, integration) for integration in integrations]
@bind_hass
async def async_get_all_descriptions(
hass: HomeAssistantType,
) -> Dict[str, Dict[str, Any]]:
"""Return descriptions (i.e. user documentation) for all service calls."""
descriptions_cache = hass.data.setdefault(SERVICE_DESCRIPTION_CACHE, {})
format_cache_key = "{}.{}".format
services = hass.services.async_services()
# See if there are new services not seen before.
# Any service that we saw before already has an entry in description_cache.
missing = set()
for domain in services:
for service in services[domain]:
if format_cache_key(domain, service) not in descriptions_cache:
missing.add(domain)
break
# Files we loaded for missing descriptions
loaded = {}
if missing:
integrations = await asyncio.gather(
*(async_get_integration(hass, domain) for domain in missing)
)
contents = await hass.async_add_executor_job(
_load_services_files, hass, integrations
)
for domain, content in zip(missing, contents):
loaded[domain] = content
# Build response
descriptions: Dict[str, Dict[str, Any]] = {}
for domain in services:
descriptions[domain] = {}
for service in services[domain]:
cache_key = format_cache_key(domain, service)
description = descriptions_cache.get(cache_key)
# Cache missing descriptions
if description is None:
domain_yaml = loaded[domain]
yaml_description = domain_yaml.get(service, {}) # type: ignore
# Don't warn for missing services, because it triggers false
# positives for things like scripts, that register as a service
description = descriptions_cache[cache_key] = {
"description": yaml_description.get("description", ""),
"fields": yaml_description.get("fields", {}),
}
descriptions[domain][service] = description
return descriptions
@ha.callback
@bind_hass
def async_set_service_schema(
hass: HomeAssistantType, domain: str, service: str, schema: Dict[str, Any]
) -> None:
"""Register a description for a service."""
hass.data.setdefault(SERVICE_DESCRIPTION_CACHE, {})
description = {
"description": schema.get("description") or "",
"fields": schema.get("fields") or {},
}
hass.data[SERVICE_DESCRIPTION_CACHE][f"{domain}.{service}"] = description
@bind_hass
async def entity_service_call(
hass: HomeAssistantType,
platforms: Iterable["EntityPlatform"],
func: Union[str, Callable[..., Any]],
call: ha.ServiceCall,
required_features: Optional[Iterable[int]] = None,
) -> None:
"""Handle an entity service call.
Calls all platforms simultaneously.
"""
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(context=call.context)
entity_perms: Optional[
Callable[[str, str], bool]
] = user.permissions.check_entity
else:
entity_perms = None
target_all_entities = call.data.get(ATTR_ENTITY_ID) == ENTITY_MATCH_ALL
if not target_all_entities:
# A set of entities we're trying to target.
entity_ids = await async_extract_entity_ids(hass, call, True)
# If the service function is a string, we'll pass it the service call data
if isinstance(func, str):
data: Union[Dict, ha.ServiceCall] = {
key: val
for key, val in call.data.items()
if key not in cv.ENTITY_SERVICE_FIELDS
}
# If the service function is not a string, we pass the service call
else:
data = call
# Check the permissions
# A list with entities to call the service on.
entity_candidates: List["Entity"] = []
if entity_perms is None:
for platform in platforms:
if target_all_entities:
entity_candidates.extend(platform.entities.values())
else:
entity_candidates.extend(
[
entity
for entity in platform.entities.values()
if entity.entity_id in entity_ids
]
)
elif target_all_entities:
# If we target all entities, we will select all entities the user
# is allowed to control.
for platform in platforms:
entity_candidates.extend(
[
entity
for entity in platform.entities.values()
if entity_perms(entity.entity_id, POLICY_CONTROL)
]
)
else:
for platform in platforms:
platform_entities = []
for entity in platform.entities.values():
if entity.entity_id not in entity_ids:
continue
if not entity_perms(entity.entity_id, POLICY_CONTROL):
raise Unauthorized(
context=call.context,
entity_id=entity.entity_id,
permission=POLICY_CONTROL,
)
platform_entities.append(entity)
entity_candidates.extend(platform_entities)
if not target_all_entities:
for entity in entity_candidates:
entity_ids.remove(entity.entity_id)
if entity_ids:
_LOGGER.warning(
"Unable to find referenced entities %s", ", ".join(sorted(entity_ids))
)
entities = []
for entity in entity_candidates:
if not entity.available:
continue
# Skip entities that don't have the required feature.
if required_features is not None and (
entity.supported_features is None
or not any(
entity.supported_features & feature_set == feature_set
for feature_set in required_features
)
):
continue
entities.append(entity)
if not entities:
return
done, pending = await asyncio.wait(
[
entity.async_request_call(
_handle_entity_call(hass, entity, func, data, call.context)
)
for entity in entities
]
)
assert not pending
for future in done:
future.result() # pop exception if have
tasks = []
for entity in entities:
if not entity.should_poll:
continue
# Context expires if the turn on commands took a long time.
# Set context again so it's there when we update
entity.async_set_context(call.context)
tasks.append(entity.async_update_ha_state(True))
if tasks:
done, pending = await asyncio.wait(tasks)
assert not pending
for future in done:
future.result() # pop exception if have
async def _handle_entity_call(
hass: HomeAssistantType,
entity: "Entity",
func: Union[str, Callable[..., Any]],
data: Union[Dict, ha.ServiceCall],
context: ha.Context,
) -> None:
"""Handle calling service method."""
entity.async_set_context(context)
if isinstance(func, str):
result = hass.async_add_job(partial(getattr(entity, func), **data)) # type: ignore
else:
result = hass.async_add_job(func, entity, data)
# Guard because callback functions do not return a task when passed to async_add_job.
if result is not None:
await result
if asyncio.iscoroutine(result):
_LOGGER.error(
"Service %s for %s incorrectly returns a coroutine object. Await result instead in service handler. Report bug to integration author",
func,
entity.entity_id,
)
await result # type: ignore
@bind_hass
@ha.callback
def async_register_admin_service(
hass: HomeAssistantType,
domain: str,
service: str,
service_func: Callable[[ha.ServiceCall], Optional[Awaitable]],
schema: vol.Schema = vol.Schema({}, extra=vol.PREVENT_EXTRA),
) -> None:
"""Register a service that requires admin access."""
@wraps(service_func)
async def admin_handler(call: ha.ServiceCall) -> None:
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(context=call.context)
if not user.is_admin:
raise Unauthorized(context=call.context)
result = hass.async_add_job(service_func, call)
if result is not None:
await result
hass.services.async_register(domain, service, admin_handler, schema)
@bind_hass
@ha.callback
def verify_domain_control(hass: HomeAssistantType, domain: str) -> Callable:
"""Ensure permission to access any entity under domain in service call."""
def decorator(service_handler: Callable[[ha.ServiceCall], Any]) -> Callable:
"""Decorate."""
if not asyncio.iscoroutinefunction(service_handler):
raise HomeAssistantError("Can only decorate async functions.")
async def check_permissions(call: ha.ServiceCall) -> Any:
"""Check user permission and raise before call if unauthorized."""
if not call.context.user_id:
return await service_handler(call)
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
)
reg = await hass.helpers.entity_registry.async_get_registry()
authorized = False
for entity in reg.entities.values():
if entity.platform != domain:
continue
if user.permissions.check_entity(entity.entity_id, POLICY_CONTROL):
authorized = True
break
if not authorized:
raise Unauthorized(
context=call.context,
permission=POLICY_CONTROL,
user_id=call.context.user_id,
perm_category=CAT_ENTITIES,
)
return await service_handler(call)
return check_permissions
return decorator
|
revision = "4c50b903d1ae"
down_revision = "33de094da890"
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column("domains", sa.Column("sensitive", sa.Boolean(), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column("domains", "sensitive")
### end Alembic commands ###
|
import asyncio
import json
import logging
from aioharmony.const import ClientCallbackType
import aioharmony.exceptions as aioexc
from aioharmony.harmonyapi import HarmonyAPI as HarmonyClient, SendCommandDevice
import voluptuous as vol
from homeassistant.components import remote
from homeassistant.components.remote import (
ATTR_ACTIVITY,
ATTR_DELAY_SECS,
ATTR_DEVICE,
ATTR_HOLD_SECS,
ATTR_NUM_REPEATS,
DEFAULT_DELAY_SECS,
PLATFORM_SCHEMA,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import ATTR_ENTITY_ID, CONF_HOST, CONF_NAME
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.restore_state import RestoreEntity
from .const import (
ACTIVITY_POWER_OFF,
ATTR_ACTIVITY_LIST,
ATTR_ACTIVITY_STARTING,
ATTR_CURRENT_ACTIVITY,
ATTR_DEVICES_LIST,
ATTR_LAST_ACTIVITY,
DOMAIN,
HARMONY_OPTIONS_UPDATE,
PREVIOUS_ACTIVE_ACTIVITY,
SERVICE_CHANGE_CHANNEL,
SERVICE_SYNC,
UNIQUE_ID,
)
from .util import (
find_best_name_for_remote,
find_matching_config_entries_for_host,
find_unique_id_for_remote,
get_harmony_client_if_available,
list_names_from_hublist,
)
_LOGGER = logging.getLogger(__name__)
# We want to fire remote commands right away
PARALLEL_UPDATES = 0
ATTR_CHANNEL = "channel"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(ATTR_ACTIVITY): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Optional(ATTR_DELAY_SECS, default=DEFAULT_DELAY_SECS): vol.Coerce(float),
vol.Required(CONF_HOST): cv.string,
# The client ignores port so lets not confuse the user by pretenting we do anything with this
},
extra=vol.ALLOW_EXTRA,
)
HARMONY_SYNC_SCHEMA = vol.Schema({vol.Optional(ATTR_ENTITY_ID): cv.entity_ids})
HARMONY_CHANGE_CHANNEL_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_CHANNEL): cv.positive_int,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Harmony platform."""
if discovery_info:
# Now handled by ssdp in the config flow
return
if find_matching_config_entries_for_host(hass, config[CONF_HOST]):
return
# We do the validation to verify we can connect
# so we can raise PlatformNotReady to force
# a retry so we can avoid a scenario where the config
# entry cannot be created via import because hub
# is not yet ready.
harmony = await get_harmony_client_if_available(config[CONF_HOST])
if not harmony:
raise PlatformNotReady
validated_config = config.copy()
validated_config[UNIQUE_ID] = find_unique_id_for_remote(harmony)
validated_config[CONF_NAME] = find_best_name_for_remote(config, harmony)
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=validated_config
)
)
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Set up the Harmony config entry."""
device = hass.data[DOMAIN][entry.entry_id]
_LOGGER.debug("Harmony Remote: %s", device)
async_add_entities([device])
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SYNC,
HARMONY_SYNC_SCHEMA,
"sync",
)
platform.async_register_entity_service(
SERVICE_CHANGE_CHANNEL, HARMONY_CHANGE_CHANNEL_SCHEMA, "change_channel"
)
class HarmonyRemote(remote.RemoteEntity, RestoreEntity):
"""Remote representation used to control a Harmony device."""
def __init__(self, name, unique_id, host, activity, out_path, delay_secs):
"""Initialize HarmonyRemote class."""
self._name = name
self.host = host
self._state = None
self._current_activity = ACTIVITY_POWER_OFF
self.default_activity = activity
self._activity_starting = None
self._is_initial_update = True
self._client = HarmonyClient(ip_address=host)
self._config_path = out_path
self.delay_secs = delay_secs
self._available = False
self._unique_id = unique_id
self._last_activity = None
@property
def activity_names(self):
"""Names of all the remotes activities."""
activities = [activity["label"] for activity in self._client.config["activity"]]
# Remove both ways of representing PowerOff
if None in activities:
activities.remove(None)
if ACTIVITY_POWER_OFF in activities:
activities.remove(ACTIVITY_POWER_OFF)
return activities
async def _async_update_options(self, data):
"""Change options when the options flow does."""
if ATTR_DELAY_SECS in data:
self.delay_secs = data[ATTR_DELAY_SECS]
if ATTR_ACTIVITY in data:
self.default_activity = data[ATTR_ACTIVITY]
def _update_callbacks(self):
callbacks = {
"config_updated": self.new_config,
"connect": self.got_connected,
"disconnect": self.got_disconnected,
"new_activity_starting": self.new_activity,
"new_activity": self._new_activity_finished,
}
self._client.callbacks = ClientCallbackType(**callbacks)
def _new_activity_finished(self, activity_info: tuple) -> None:
"""Call for finished updated current activity."""
self._activity_starting = None
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Complete the initialization."""
await super().async_added_to_hass()
_LOGGER.debug("%s: Harmony Hub added", self._name)
# Register the callbacks
self._update_callbacks()
self.async_on_remove(
async_dispatcher_connect(
self.hass,
f"{HARMONY_OPTIONS_UPDATE}-{self.unique_id}",
self._async_update_options,
)
)
# Store Harmony HUB config, this will also update our current
# activity
await self.new_config()
# Restore the last activity so we know
# how what to turn on if nothing
# is specified
last_state = await self.async_get_last_state()
if not last_state:
return
if ATTR_LAST_ACTIVITY not in last_state.attributes:
return
if self.is_on:
return
self._last_activity = last_state.attributes[ATTR_LAST_ACTIVITY]
async def shutdown(self):
"""Close connection on shutdown."""
_LOGGER.debug("%s: Closing Harmony Hub", self._name)
try:
await self._client.close()
except aioexc.TimeOut:
_LOGGER.warning("%s: Disconnect timed-out", self._name)
@property
def device_info(self):
"""Return device info."""
model = "Harmony Hub"
if "ethernetStatus" in self._client.hub_config.info:
model = "Harmony Hub Pro 2400"
return {
"identifiers": {(DOMAIN, self.unique_id)},
"manufacturer": "Logitech",
"sw_version": self._client.hub_config.info.get(
"hubSwVersion", self._client.fw_version
),
"name": self.name,
"model": model,
}
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the Harmony device's name."""
return self._name
@property
def should_poll(self):
"""Return the fact that we should not be polled."""
return False
@property
def device_state_attributes(self):
"""Add platform specific attributes."""
return {
ATTR_ACTIVITY_STARTING: self._activity_starting,
ATTR_CURRENT_ACTIVITY: self._current_activity,
ATTR_ACTIVITY_LIST: list_names_from_hublist(
self._client.hub_config.activities
),
ATTR_DEVICES_LIST: list_names_from_hublist(self._client.hub_config.devices),
ATTR_LAST_ACTIVITY: self._last_activity,
}
@property
def is_on(self):
"""Return False if PowerOff is the current activity, otherwise True."""
return self._current_activity not in [None, "PowerOff"]
@property
def available(self):
"""Return True if connected to Hub, otherwise False."""
return self._available
async def connect(self):
"""Connect to the Harmony HUB."""
_LOGGER.debug("%s: Connecting", self._name)
try:
if not await self._client.connect():
_LOGGER.warning("%s: Unable to connect to HUB", self._name)
await self._client.close()
return False
except aioexc.TimeOut:
_LOGGER.warning("%s: Connection timed-out", self._name)
return False
return True
def new_activity(self, activity_info: tuple) -> None:
"""Call for updating the current activity."""
activity_id, activity_name = activity_info
_LOGGER.debug("%s: activity reported as: %s", self._name, activity_name)
self._current_activity = activity_name
if self._is_initial_update:
self._is_initial_update = False
else:
self._activity_starting = activity_name
if activity_id != -1:
# Save the activity so we can restore
# to that activity if none is specified
# when turning on
self._last_activity = activity_name
self._state = bool(activity_id != -1)
self._available = True
self.async_write_ha_state()
async def new_config(self, _=None):
"""Call for updating the current activity."""
_LOGGER.debug("%s: configuration has been updated", self._name)
self.new_activity(self._client.current_activity)
await self.hass.async_add_executor_job(self.write_config_file)
async def got_connected(self, _=None):
"""Notification that we're connected to the HUB."""
_LOGGER.debug("%s: connected to the HUB", self._name)
if not self._available:
# We were disconnected before.
await self.new_config()
async def got_disconnected(self, _=None):
"""Notification that we're disconnected from the HUB."""
_LOGGER.debug("%s: disconnected from the HUB", self._name)
self._available = False
# We're going to wait for 10 seconds before announcing we're
# unavailable, this to allow a reconnection to happen.
await asyncio.sleep(10)
if not self._available:
# Still disconnected. Let the state engine know.
self.async_write_ha_state()
async def async_turn_on(self, **kwargs):
"""Start an activity from the Harmony device."""
_LOGGER.debug("%s: Turn On", self.name)
activity = kwargs.get(ATTR_ACTIVITY, self.default_activity)
if not activity or activity == PREVIOUS_ACTIVE_ACTIVITY:
if self._last_activity:
activity = self._last_activity
else:
all_activities = list_names_from_hublist(
self._client.hub_config.activities
)
if all_activities:
activity = all_activities[0]
if activity:
activity_id = None
activity_name = None
if activity.isdigit() or activity == "-1":
_LOGGER.debug("%s: Activity is numeric", self.name)
activity_name = self._client.get_activity_name(int(activity))
if activity_name:
activity_id = activity
if activity_id is None:
_LOGGER.debug("%s: Find activity ID based on name", self.name)
activity_name = str(activity)
activity_id = self._client.get_activity_id(activity_name)
if activity_id is None:
_LOGGER.error("%s: Activity %s is invalid", self.name, activity)
return
if self._current_activity == activity_name:
# Automations or HomeKit may turn the device on multiple times
# when the current activity is already active which will cause
# harmony to loose state. This behavior is unexpected as turning
# the device on when its already on isn't expected to reset state.
_LOGGER.debug(
"%s: Current activity is already %s", self.name, activity_name
)
return
try:
await self._client.start_activity(activity_id)
except aioexc.TimeOut:
_LOGGER.error("%s: Starting activity %s timed-out", self.name, activity)
else:
_LOGGER.error("%s: No activity specified with turn_on service", self.name)
async def async_turn_off(self, **kwargs):
"""Start the PowerOff activity."""
_LOGGER.debug("%s: Turn Off", self.name)
try:
await self._client.power_off()
except aioexc.TimeOut:
_LOGGER.error("%s: Powering off timed-out", self.name)
async def async_send_command(self, command, **kwargs):
"""Send a list of commands to one device."""
_LOGGER.debug("%s: Send Command", self.name)
device = kwargs.get(ATTR_DEVICE)
if device is None:
_LOGGER.error("%s: Missing required argument: device", self.name)
return
device_id = None
if device.isdigit():
_LOGGER.debug("%s: Device %s is numeric", self.name, device)
if self._client.get_device_name(int(device)):
device_id = device
if device_id is None:
_LOGGER.debug(
"%s: Find device ID %s based on device name", self.name, device
)
device_id = self._client.get_device_id(str(device).strip())
if device_id is None:
_LOGGER.error("%s: Device %s is invalid", self.name, device)
return
num_repeats = kwargs[ATTR_NUM_REPEATS]
delay_secs = kwargs.get(ATTR_DELAY_SECS, self.delay_secs)
hold_secs = kwargs[ATTR_HOLD_SECS]
_LOGGER.debug(
"Sending commands to device %s holding for %s seconds "
"with a delay of %s seconds",
device,
hold_secs,
delay_secs,
)
# Creating list of commands to send.
snd_cmnd_list = []
for _ in range(num_repeats):
for single_command in command:
send_command = SendCommandDevice(
device=device_id, command=single_command, delay=hold_secs
)
snd_cmnd_list.append(send_command)
if delay_secs > 0:
snd_cmnd_list.append(float(delay_secs))
_LOGGER.debug("%s: Sending commands", self.name)
try:
result_list = await self._client.send_commands(snd_cmnd_list)
except aioexc.TimeOut:
_LOGGER.error("%s: Sending commands timed-out", self.name)
return
for result in result_list:
_LOGGER.error(
"Sending command %s to device %s failed with code %s: %s",
result.command.command,
result.command.device,
result.code,
result.msg,
)
async def change_channel(self, channel):
"""Change the channel using Harmony remote."""
_LOGGER.debug("%s: Changing channel to %s", self.name, channel)
try:
await self._client.change_channel(channel)
except aioexc.TimeOut:
_LOGGER.error("%s: Changing channel to %s timed-out", self.name, channel)
async def sync(self):
"""Sync the Harmony device with the web service."""
_LOGGER.debug("%s: Syncing hub with Harmony cloud", self.name)
try:
await self._client.sync()
except aioexc.TimeOut:
_LOGGER.error("%s: Syncing hub with Harmony cloud timed-out", self.name)
else:
await self.hass.async_add_executor_job(self.write_config_file)
def write_config_file(self):
"""Write Harmony configuration file."""
_LOGGER.debug(
"%s: Writing hub configuration to file: %s", self.name, self._config_path
)
if self._client.config is None:
_LOGGER.warning("%s: No configuration received from hub", self.name)
return
try:
with open(self._config_path, "w+", encoding="utf-8") as file_out:
json.dump(self._client.json_config, file_out, sort_keys=True, indent=4)
except OSError as exc:
_LOGGER.error(
"%s: Unable to write HUB configuration to %s: %s",
self.name,
self._config_path,
exc,
)
|
from __future__ import unicode_literals
import copy
import types
class AttribDict(dict):
def __init__(self, indict=None, attribute=None):
if indict is None:
indict = {}
# Set any attributes here - before initialisation
# these remain as normal attributes
self.attribute = attribute
dict.__init__(self, indict)
self.__initialised = True
# After initialisation, setting attributes
# is the same as setting an item
def __getattr__(self, item):
"""
Maps values to attributes
Only called if there *is NOT* an attribute with this name
"""
try:
return self.__getitem__(item)
except KeyError:
raise AttributeError("unable to access item '%s'" % item)
def __setattr__(self, item, value):
"""
Maps attributes to values
Only if we are initialised
"""
# This test allows attributes to be set in the __init__ method
if "_AttribDict__initialised" not in self.__dict__:
return dict.__setattr__(self, item, value)
# Any normal attributes are handled normally
elif item in self.__dict__:
dict.__setattr__(self, item, value)
else:
self.__setitem__(item, value)
def __getstate__(self):
return self.__dict__
def __setstate__(self, dict):
self.__dict__ = dict
def __deepcopy__(self, memo):
retVal = self.__class__()
memo[id(self)] = retVal
for attr in dir(self):
if not attr.startswith('_'):
value = getattr(self, attr)
if not isinstance(value, (types.BuiltinFunctionType, types.FunctionType, types.MethodType)):
setattr(retVal, attr, copy.deepcopy(value, memo))
for key, value in self.items():
retVal.__setitem__(key, copy.deepcopy(value, memo))
return retVal
|
import argparse
import os
import sys
def main(args):
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("status", action="store", nargs="?", default=0, type=int, help="status code")
ns = p.parse_args(args)
sys.exit(ns.status)
if __name__ == "__main__":
main(sys.argv[1:])
|
from time import time
import mne
from mne.preprocessing import ICA
from mne.datasets import sample
print(__doc__)
###############################################################################
# Read and preprocess the data. Preprocessing consists of:
#
# - MEG channel selection
# - 1-30 Hz band-pass filter
data_path = sample.data_path()
raw_fname = data_path + '/MEG/sample/sample_audvis_filt-0-40_raw.fif'
raw = mne.io.read_raw_fif(raw_fname, preload=True)
picks = mne.pick_types(raw.info, meg=True)
reject = dict(mag=5e-12, grad=4000e-13)
raw.filter(1, 30, fir_design='firwin')
###############################################################################
# Define a function that runs ICA on the raw MEG data and plots the components
def run_ica(method, fit_params=None):
ica = ICA(n_components=20, method=method, fit_params=fit_params,
random_state=0)
t0 = time()
ica.fit(raw, picks=picks, reject=reject)
fit_time = time() - t0
title = ('ICA decomposition using %s (took %.1fs)' % (method, fit_time))
ica.plot_components(title=title)
###############################################################################
# FastICA
run_ica('fastica')
###############################################################################
# Picard
run_ica('picard')
###############################################################################
# Infomax
run_ica('infomax')
###############################################################################
# Extended Infomax
run_ica('infomax', fit_params=dict(extended=True))
|
import asyncio
from datetime import timedelta
import logging
from pybotvac import Account, Neato, Vorwerk
from pybotvac.exceptions import NeatoException, NeatoLoginException, NeatoRobotException
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.util import Throttle
from .config_flow import NeatoConfigFlow
from .const import (
CONF_VENDOR,
NEATO_CONFIG,
NEATO_DOMAIN,
NEATO_LOGIN,
NEATO_MAP_DATA,
NEATO_PERSISTENT_MAPS,
NEATO_ROBOTS,
VALID_VENDORS,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
NEATO_DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_VENDOR, default="neato"): vol.In(VALID_VENDORS),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Neato component."""
if NEATO_DOMAIN not in config:
# There is an entry and nothing in configuration.yaml
return True
entries = hass.config_entries.async_entries(NEATO_DOMAIN)
hass.data[NEATO_CONFIG] = config[NEATO_DOMAIN]
if entries:
# There is an entry and something in the configuration.yaml
entry = entries[0]
conf = config[NEATO_DOMAIN]
if (
entry.data[CONF_USERNAME] == conf[CONF_USERNAME]
and entry.data[CONF_PASSWORD] == conf[CONF_PASSWORD]
and entry.data[CONF_VENDOR] == conf[CONF_VENDOR]
):
# The entry is not outdated
return True
# The entry is outdated
error = await hass.async_add_executor_job(
NeatoConfigFlow.try_login,
conf[CONF_USERNAME],
conf[CONF_PASSWORD],
conf[CONF_VENDOR],
)
if error is not None:
_LOGGER.error(error)
return False
# Update the entry
hass.config_entries.async_update_entry(entry, data=config[NEATO_DOMAIN])
else:
# Create the new entry
hass.async_create_task(
hass.config_entries.flow.async_init(
NEATO_DOMAIN,
context={"source": SOURCE_IMPORT},
data=config[NEATO_DOMAIN],
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up config entry."""
hub = NeatoHub(hass, entry.data, Account)
await hass.async_add_executor_job(hub.login)
if not hub.logged_in:
_LOGGER.debug("Failed to login to Neato API")
return False
try:
await hass.async_add_executor_job(hub.update_robots)
except NeatoRobotException as ex:
_LOGGER.debug("Failed to connect to Neato API")
raise ConfigEntryNotReady from ex
hass.data[NEATO_LOGIN] = hub
for component in ("camera", "vacuum", "switch", "sensor"):
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass, entry):
"""Unload config entry."""
hass.data.pop(NEATO_LOGIN)
await asyncio.gather(
hass.config_entries.async_forward_entry_unload(entry, "camera"),
hass.config_entries.async_forward_entry_unload(entry, "vacuum"),
hass.config_entries.async_forward_entry_unload(entry, "switch"),
hass.config_entries.async_forward_entry_unload(entry, "sensor"),
)
return True
class NeatoHub:
"""A My Neato hub wrapper class."""
def __init__(self, hass, domain_config, neato):
"""Initialize the Neato hub."""
self.config = domain_config
self._neato = neato
self._hass = hass
if self.config[CONF_VENDOR] == "vorwerk":
self._vendor = Vorwerk()
else: # Neato
self._vendor = Neato()
self.my_neato = None
self.logged_in = False
def login(self):
"""Login to My Neato."""
_LOGGER.debug("Trying to connect to Neato API")
try:
self.my_neato = self._neato(
self.config[CONF_USERNAME], self.config[CONF_PASSWORD], self._vendor
)
except NeatoException as ex:
if isinstance(ex, NeatoLoginException):
_LOGGER.error("Invalid credentials")
else:
_LOGGER.error("Unable to connect to Neato API")
raise ConfigEntryNotReady from ex
self.logged_in = False
return
self.logged_in = True
_LOGGER.debug("Successfully connected to Neato API")
@Throttle(timedelta(minutes=1))
def update_robots(self):
"""Update the robot states."""
_LOGGER.debug("Running HUB.update_robots %s", self._hass.data.get(NEATO_ROBOTS))
self._hass.data[NEATO_ROBOTS] = self.my_neato.robots
self._hass.data[NEATO_PERSISTENT_MAPS] = self.my_neato.persistent_maps
self._hass.data[NEATO_MAP_DATA] = self.my_neato.maps
def download_map(self, url):
"""Download a new map image."""
map_image_data = self.my_neato.get_map_image(url)
return map_image_data
|
import mock
import pytest
from paasta_tools.monitoring.check_marathon_has_apps import check_marathon_apps
def test_check_marathon_jobs_no_config(capfd):
with mock.patch(
"paasta_tools.marathon_tools.get_list_of_marathon_clients",
autospec=True,
return_value=[],
):
with pytest.raises(SystemExit) as error:
check_marathon_apps()
out, err = capfd.readouterr()
assert "UNKNOWN" in out
assert error.value.code == 3
def test_marathon_jobs_no_jobs(capfd):
mock_client = mock.MagicMock()
mock_client.list_apps.return_value = []
with mock.patch(
# We expect this is tested properly elsewhere
"paasta_tools.marathon_tools.get_list_of_marathon_clients",
autospec=True,
return_value=[mock_client],
):
with pytest.raises(SystemExit) as error:
check_marathon_apps()
out, err = capfd.readouterr()
assert "CRITICAL" in out
assert error.value.code == 2
def test_marathon_jobs_some_jobs(capfd):
mock_client = mock.MagicMock()
with mock.patch(
# We expect this is tested properly elsewhere
"paasta_tools.marathon_tools.get_list_of_marathon_clients",
autospec=True,
return_value=[mock_client],
), mock.patch(
"paasta_tools.metrics.metastatus_lib.get_all_marathon_apps",
autospec=True,
return_value=["foo", "bar"],
):
with pytest.raises(SystemExit) as error:
check_marathon_apps()
out, err = capfd.readouterr()
assert "OK" in out
assert "2" in out
assert error.value.code == 0
|
import inspect
def list_recursive_concrete_subclasses(base):
"""List all concrete subclasses of `base` recursively."""
return _filter_concrete(_bfs(base))
def _filter_concrete(classes):
return list(filter(lambda c: not inspect.isabstract(c), classes))
def _bfs(base):
return base.__subclasses__() + sum([
_bfs(subclass)
for subclass in base.__subclasses__()
], [])
|
import itertools
import csv
import os
import time
import optparse
import logging
import dedupe
import exampleIO
def canonicalImport(filename):
preProcess = exampleIO.preProcess
data_d = {}
with open(filename) as f:
reader = csv.DictReader(f)
for i, row in enumerate(reader):
clean_row = {k: preProcess(v) for (k, v) in
row.items()}
data_d[filename + str(i)] = clean_row
return data_d, reader.fieldnames
def evaluateDuplicates(found_dupes, true_dupes):
true_positives = found_dupes.intersection(true_dupes)
false_positives = found_dupes.difference(true_dupes)
print('found duplicate')
print(len(found_dupes))
print('precision')
print(1 - len(false_positives) / float(len(found_dupes)))
print('recall')
print(len(true_positives) / float(len(true_dupes)))
if __name__ == '__main__':
optp = optparse.OptionParser()
optp.add_option('-v', '--verbose', dest='verbose', action='count',
help='Increase verbosity (specify multiple times for more)'
)
(opts, args) = optp.parse_args()
log_level = logging.WARNING
if opts.verbose:
if opts.verbose == 1:
log_level = logging.INFO
elif opts.verbose >= 2:
log_level = logging.DEBUG
logging.getLogger().setLevel(log_level)
settings_file = 'canonical_data_matching_learned_settings'
data_1, header = canonicalImport('tests/datasets/restaurant-1.csv')
data_2, _ = canonicalImport('tests/datasets/restaurant-2.csv')
training_pairs = dedupe.training_data_link(data_1, data_2, 'unique_id', 5000)
all_data = data_1.copy()
all_data.update(data_2)
duplicates_s = set()
for _, pair in itertools.groupby(sorted(all_data.items(),
key=lambda x: x[1]['unique_id']),
key=lambda x: x[1]['unique_id']):
pair = list(pair)
if len(pair) == 2:
a, b = pair
duplicates_s.add(frozenset((a[0], b[0])))
t0 = time.time()
print('number of known duplicate pairs', len(duplicates_s))
if os.path.exists(settings_file):
with open(settings_file, 'rb') as f:
deduper = dedupe.StaticRecordLink(f)
else:
fields = [{'field': 'name', 'type': 'String'},
{'field': 'address', 'type': 'String'},
{'field': 'cuisine', 'type': 'String'},
{'field': 'city', 'type': 'String'}
]
deduper = dedupe.RecordLink(fields)
deduper.prepare_training(data_1, data_2, sample_size=10000)
deduper.mark_pairs(training_pairs)
deduper.train()
with open(settings_file, 'wb') as f:
deduper.write_settings(f)
# print candidates
print('clustering...')
clustered_dupes = deduper.join(data_1, data_2, threshold=0.5)
print('Evaluate Clustering')
confirm_dupes = set(frozenset(pair)
for pair, score in clustered_dupes)
evaluateDuplicates(confirm_dupes, duplicates_s)
print('ran in ', time.time() - t0, 'seconds')
# print candidates
print('clustering...')
clustered_dupes = deduper.join(data_1, data_2, threshold=0.5, constraint='many-to-one')
print('Evaluate Clustering')
confirm_dupes = set(frozenset(pair)
for pair, score in clustered_dupes)
evaluateDuplicates(confirm_dupes, duplicates_s)
print('ran in ', time.time() - t0, 'seconds')
|
from datetime import datetime, timedelta
import pytest
from homeassistant.components import recorder
import homeassistant.components.plant as plant
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONDUCTIVITY,
LIGHT_LUX,
STATE_OK,
STATE_PROBLEM,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import State
from homeassistant.setup import async_setup_component
from tests.common import init_recorder_component
GOOD_DATA = {
"moisture": 50,
"battery": 90,
"temperature": 23.4,
"conductivity": 777,
"brightness": 987,
}
BRIGHTNESS_ENTITY = "sensor.mqtt_plant_brightness"
MOISTURE_ENTITY = "sensor.mqtt_plant_moisture"
GOOD_CONFIG = {
"sensors": {
"moisture": MOISTURE_ENTITY,
"battery": "sensor.mqtt_plant_battery",
"temperature": "sensor.mqtt_plant_temperature",
"conductivity": "sensor.mqtt_plant_conductivity",
"brightness": BRIGHTNESS_ENTITY,
},
"min_moisture": 20,
"max_moisture": 60,
"min_battery": 17,
"min_conductivity": 500,
"min_temperature": 15,
"min_brightness": 500,
}
async def test_valid_data(hass):
"""Test processing valid data."""
sensor = plant.Plant("my plant", GOOD_CONFIG)
sensor.entity_id = "sensor.mqtt_plant_battery"
sensor.hass = hass
for reading, value in GOOD_DATA.items():
sensor.state_changed(
GOOD_CONFIG["sensors"][reading],
State(GOOD_CONFIG["sensors"][reading], value),
)
assert sensor.state == "ok"
attrib = sensor.state_attributes
for reading, value in GOOD_DATA.items():
# battery level has a different name in
# the JSON format than in hass
assert attrib[reading] == value
async def test_low_battery(hass):
"""Test processing with low battery data and limit set."""
sensor = plant.Plant("other plant", GOOD_CONFIG)
sensor.entity_id = "sensor.mqtt_plant_battery"
sensor.hass = hass
assert sensor.state_attributes["problem"] == "none"
sensor.state_changed(
"sensor.mqtt_plant_battery",
State("sensor.mqtt_plant_battery", 10),
)
assert sensor.state == "problem"
assert sensor.state_attributes["problem"] == "battery low"
async def test_initial_states(hass):
"""Test plant initialises attributes if sensor already exists."""
hass.states.async_set(MOISTURE_ENTITY, 5, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY})
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert 5 == state.attributes[plant.READING_MOISTURE]
async def test_update_states(hass):
"""Test updating the state of a sensor.
Make sure that plant processes this correctly.
"""
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
hass.states.async_set(MOISTURE_ENTITY, 5, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_PROBLEM == state.state
assert 5 == state.attributes[plant.READING_MOISTURE]
async def test_unavailable_state(hass):
"""Test updating the state with unavailable.
Make sure that plant processes this correctly.
"""
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
hass.states.async_set(
MOISTURE_ENTITY, STATE_UNAVAILABLE, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY}
)
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert state.state == STATE_PROBLEM
assert state.attributes[plant.READING_MOISTURE] == STATE_UNAVAILABLE
async def test_state_problem_if_unavailable(hass):
"""Test updating the state with unavailable after setting it to valid value.
Make sure that plant processes this correctly.
"""
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
hass.states.async_set(MOISTURE_ENTITY, 42, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert state.state == STATE_OK
assert state.attributes[plant.READING_MOISTURE] == 42
hass.states.async_set(
MOISTURE_ENTITY, STATE_UNAVAILABLE, {ATTR_UNIT_OF_MEASUREMENT: CONDUCTIVITY}
)
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert state.state == STATE_PROBLEM
assert state.attributes[plant.READING_MOISTURE] == STATE_UNAVAILABLE
@pytest.mark.skipif(
plant.ENABLE_LOAD_HISTORY is False,
reason="tests for loading from DB are unstable, thus"
"this feature is turned of until tests become"
"stable",
)
async def test_load_from_db(hass):
"""Test bootstrapping the brightness history from the database.
This test can should only be executed if the loading of the history
is enabled via plant.ENABLE_LOAD_HISTORY.
"""
init_recorder_component(hass)
plant_name = "wise_plant"
for value in [20, 30, 10]:
hass.states.async_set(
BRIGHTNESS_ENTITY, value, {ATTR_UNIT_OF_MEASUREMENT: "Lux"}
)
await hass.async_block_till_done()
# wait for the recorder to really store the data
hass.data[recorder.DATA_INSTANCE].block_till_done()
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_UNKNOWN == state.state
max_brightness = state.attributes.get(plant.ATTR_MAX_BRIGHTNESS_HISTORY)
assert 30 == max_brightness
async def test_brightness_history(hass):
"""Test the min_brightness check."""
plant_name = "some_plant"
assert await async_setup_component(
hass, plant.DOMAIN, {plant.DOMAIN: {plant_name: GOOD_CONFIG}}
)
hass.states.async_set(BRIGHTNESS_ENTITY, 100, {ATTR_UNIT_OF_MEASUREMENT: LIGHT_LUX})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_PROBLEM == state.state
hass.states.async_set(BRIGHTNESS_ENTITY, 600, {ATTR_UNIT_OF_MEASUREMENT: LIGHT_LUX})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_OK == state.state
hass.states.async_set(BRIGHTNESS_ENTITY, 100, {ATTR_UNIT_OF_MEASUREMENT: LIGHT_LUX})
await hass.async_block_till_done()
state = hass.states.get(f"plant.{plant_name}")
assert STATE_OK == state.state
def test_daily_history_no_data(hass):
"""Test with empty history."""
dh = plant.DailyHistory(3)
assert dh.max is None
def test_daily_history_one_day(hass):
"""Test storing data for the same day."""
dh = plant.DailyHistory(3)
values = [-2, 10, 0, 5, 20]
for i in range(len(values)):
dh.add_measurement(values[i])
max_value = max(values[0 : i + 1])
assert 1 == len(dh._days)
assert dh.max == max_value
def test_daily_history_multiple_days(hass):
"""Test storing data for different days."""
dh = plant.DailyHistory(3)
today = datetime.now()
today_minus_1 = today - timedelta(days=1)
today_minus_2 = today_minus_1 - timedelta(days=1)
today_minus_3 = today_minus_2 - timedelta(days=1)
days = [today_minus_3, today_minus_2, today_minus_1, today]
values = [10, 1, 7, 3]
max_values = [10, 10, 10, 7]
for i in range(len(days)):
dh.add_measurement(values[i], days[i])
assert max_values[i] == dh.max
|
import cherrypy
from cherrypy.test import helper
class WSGI_Namespace_Test(helper.CPWebCase):
@staticmethod
def setup_server():
class WSGIResponse(object):
def __init__(self, appresults):
self.appresults = appresults
self.iter = iter(appresults)
def __iter__(self):
return self
def next(self):
return self.iter.next()
def __next__(self):
return next(self.iter)
def close(self):
if hasattr(self.appresults, 'close'):
self.appresults.close()
class ChangeCase(object):
def __init__(self, app, to=None):
self.app = app
self.to = to
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class CaseResults(WSGIResponse):
def next(this):
return getattr(this.iter.next(), self.to)()
def __next__(this):
return getattr(next(this.iter), self.to)()
return CaseResults(res)
class Replacer(object):
def __init__(self, app, map={}):
self.app = app
self.map = map
def __call__(self, environ, start_response):
res = self.app(environ, start_response)
class ReplaceResults(WSGIResponse):
def next(this):
line = this.iter.next()
for k, v in self.map.iteritems():
line = line.replace(k, v)
return line
def __next__(this):
line = next(this.iter)
for k, v in self.map.items():
line = line.replace(k, v)
return line
return ReplaceResults(res)
class Root(object):
@cherrypy.expose
def index(self):
return 'HellO WoRlD!'
root_conf = {'wsgi.pipeline': [('replace', Replacer)],
'wsgi.replace.map': {b'L': b'X',
b'l': b'r'},
}
app = cherrypy.Application(Root())
app.wsgiapp.pipeline.append(('changecase', ChangeCase))
app.wsgiapp.config['changecase'] = {'to': 'upper'}
cherrypy.tree.mount(app, config={'/': root_conf})
def test_pipeline(self):
if not cherrypy.server.httpserver:
return self.skip()
self.getPage('/')
# If body is "HEXXO WORXD!", the middleware was applied out of order.
self.assertBody('HERRO WORRD!')
|
import logging
import time
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
WeatherEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import CONF_MODE, TEMP_CELSIUS
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from homeassistant.util import dt as dt_util
from .const import (
ATTRIBUTION,
CONDITION_CLASSES,
COORDINATOR_FORECAST,
DOMAIN,
FORECAST_MODE_DAILY,
FORECAST_MODE_HOURLY,
)
_LOGGER = logging.getLogger(__name__)
def format_condition(condition: str):
"""Return condition from dict CONDITION_CLASSES."""
for key, value in CONDITION_CLASSES.items():
if condition in value:
return key
return condition
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Meteo-France weather platform."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR_FORECAST]
async_add_entities(
[
MeteoFranceWeather(
coordinator,
entry.options.get(CONF_MODE, FORECAST_MODE_DAILY),
)
],
True,
)
_LOGGER.debug(
"Weather entity (%s) added for %s.",
entry.options.get(CONF_MODE, FORECAST_MODE_DAILY),
coordinator.data.position["name"],
)
class MeteoFranceWeather(CoordinatorEntity, WeatherEntity):
"""Representation of a weather condition."""
def __init__(self, coordinator: DataUpdateCoordinator, mode: str):
"""Initialise the platform with a data instance and station name."""
super().__init__(coordinator)
self._city_name = self.coordinator.data.position["name"]
self._mode = mode
self._unique_id = f"{self.coordinator.data.position['lat']},{self.coordinator.data.position['lon']}"
@property
def unique_id(self):
"""Return the unique id of the sensor."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return self._city_name
@property
def condition(self):
"""Return the current condition."""
return format_condition(
self.coordinator.data.current_forecast["weather"]["desc"]
)
@property
def temperature(self):
"""Return the temperature."""
return self.coordinator.data.current_forecast["T"]["value"]
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
return self.coordinator.data.current_forecast["sea_level"]
@property
def humidity(self):
"""Return the humidity."""
return self.coordinator.data.current_forecast["humidity"]
@property
def wind_speed(self):
"""Return the wind speed."""
# convert from API m/s to km/h
return round(self.coordinator.data.current_forecast["wind"]["speed"] * 3.6)
@property
def wind_bearing(self):
"""Return the wind bearing."""
wind_bearing = self.coordinator.data.current_forecast["wind"]["direction"]
if wind_bearing != -1:
return wind_bearing
@property
def forecast(self):
"""Return the forecast."""
forecast_data = []
if self._mode == FORECAST_MODE_HOURLY:
today = time.time()
for forecast in self.coordinator.data.forecast:
# Can have data in the past
if forecast["dt"] < today:
continue
forecast_data.append(
{
ATTR_FORECAST_TIME: dt_util.utc_from_timestamp(
forecast["dt"]
).isoformat(),
ATTR_FORECAST_CONDITION: format_condition(
forecast["weather"]["desc"]
),
ATTR_FORECAST_TEMP: forecast["T"]["value"],
ATTR_FORECAST_PRECIPITATION: forecast["rain"].get("1h"),
ATTR_FORECAST_WIND_SPEED: forecast["wind"]["speed"],
ATTR_FORECAST_WIND_BEARING: forecast["wind"]["direction"]
if forecast["wind"]["direction"] != -1
else None,
}
)
else:
for forecast in self.coordinator.data.daily_forecast:
# stop when we don't have a weather condition (can happen around last days of forcast, max 14)
if not forecast.get("weather12H"):
break
forecast_data.append(
{
ATTR_FORECAST_TIME: self.coordinator.data.timestamp_to_locale_time(
forecast["dt"]
),
ATTR_FORECAST_CONDITION: format_condition(
forecast["weather12H"]["desc"]
),
ATTR_FORECAST_TEMP: forecast["T"]["max"],
ATTR_FORECAST_TEMP_LOW: forecast["T"]["min"],
ATTR_FORECAST_PRECIPITATION: forecast["precipitation"]["24h"],
}
)
return forecast_data
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.architectures import arch_ops as ops
from compare_gan.architectures import resnet_ops
from six.moves import range
import tensorflow as tf
class Generator(resnet_ops.ResNetGenerator):
"""ResNet generator, 3 blocks, supporting 48x48 resolution."""
def apply(self, z, y, is_training):
"""Build the generator network for the given inputs.
Args:
z: `Tensor` of shape [batch_size, z_dim] with latent code.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: boolean, are we in train or eval model.
Returns:
A tensor of size [batch_size, 32, 32, colors] with values in [0, 1].
"""
ch = 64
colors = self._image_shape[2]
batch_size = z.get_shape().as_list()[0]
magic = [(8, 4), (4, 2), (2, 1)]
output = ops.linear(z, 6 * 6 * 512, scope="fc_noise")
output = tf.reshape(output, [batch_size, 6, 6, 512], name="fc_reshaped")
for block_idx in range(3):
block = self._resnet_block(
name="B{}".format(block_idx + 1),
in_channels=ch * magic[block_idx][0],
out_channels=ch * magic[block_idx][1],
scale="up")
output = block(output, z=z, y=y, is_training=is_training)
output = self.batch_norm(
output, z=z, y=y, is_training=is_training, scope="final_norm")
output = tf.nn.relu(output)
output = ops.conv2d(output, output_dim=colors, k_h=3, k_w=3, d_h=1, d_w=1,
name="final_conv")
return tf.nn.sigmoid(output)
class Discriminator(resnet_ops.ResNetDiscriminator):
"""ResNet discriminator, 4 blocks, suports 48x48 resolution."""
def apply(self, x, y, is_training):
"""Apply the discriminator on a input.
Args:
x: `Tensor` of shape [batch_size, 32, 32, ?] with real or fake images.
y: `Tensor` of shape [batch_size, num_classes] with one hot encoded
labels.
is_training: Boolean, whether the architecture should be constructed for
training or inference.
Returns:
Tuple of 3 Tensors, the final prediction of the discriminator, the logits
before the final output activation function and logits form the second
last layer.
"""
resnet_ops.validate_image_inputs(x, validate_power2=False)
colors = x.shape[-1].value
if colors not in [1, 3]:
raise ValueError("Number of color channels unknown: %s" % colors)
ch = 64
block = self._resnet_block(
name="B0", in_channels=colors, out_channels=ch, scale="down")
output = block(x, z=None, y=y, is_training=is_training)
magic = [(1, 2), (2, 4), (4, 8), (8, 16)]
for block_idx in range(4):
block = self._resnet_block(
name="B{}".format(block_idx + 1),
in_channels=ch * magic[block_idx][0],
out_channels=ch * magic[block_idx][1],
scale="down" if block_idx < 3 else "none")
output = block(output, z=None, y=y, is_training=is_training)
output = tf.nn.relu(output)
pre_logits = tf.reduce_mean(output, axis=[1, 2])
out_logit = ops.linear(pre_logits, 1, scope="disc_final_fc",
use_sn=self._spectral_norm)
out = tf.nn.sigmoid(out_logit)
return out, out_logit, pre_logits
|
from pyhap.loader import get_loader
import pytest
from homeassistant.components.alarm_control_panel import DOMAIN
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
SUPPORT_ALARM_TRIGGER,
)
from homeassistant.components.homekit.const import ATTR_VALUE
from homeassistant.components.homekit.type_security_systems import SecuritySystem
from homeassistant.const import (
ATTR_CODE,
ATTR_ENTITY_ID,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
STATE_UNKNOWN,
)
from tests.common import async_mock_service
async def test_switch_set_state(hass, hk_driver, events):
"""Test if accessory and HA are updated accordingly."""
code = "1234"
config = {ATTR_CODE: code}
entity_id = "alarm_control_panel.test"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = SecuritySystem(hass, hk_driver, "SecuritySystem", entity_id, 2, config)
await acc.run_handler()
await hass.async_block_till_done()
assert acc.aid == 2
assert acc.category == 11 # AlarmSystem
assert acc.char_current_state.value == 3
assert acc.char_target_state.value == 3
hass.states.async_set(entity_id, STATE_ALARM_ARMED_AWAY)
await hass.async_block_till_done()
assert acc.char_target_state.value == 1
assert acc.char_current_state.value == 1
hass.states.async_set(entity_id, STATE_ALARM_ARMED_HOME)
await hass.async_block_till_done()
assert acc.char_target_state.value == 0
assert acc.char_current_state.value == 0
hass.states.async_set(entity_id, STATE_ALARM_ARMED_NIGHT)
await hass.async_block_till_done()
assert acc.char_target_state.value == 2
assert acc.char_current_state.value == 2
hass.states.async_set(entity_id, STATE_ALARM_DISARMED)
await hass.async_block_till_done()
assert acc.char_target_state.value == 3
assert acc.char_current_state.value == 3
hass.states.async_set(entity_id, STATE_ALARM_TRIGGERED)
await hass.async_block_till_done()
assert acc.char_target_state.value == 3
assert acc.char_current_state.value == 4
hass.states.async_set(entity_id, STATE_UNKNOWN)
await hass.async_block_till_done()
assert acc.char_target_state.value == 3
assert acc.char_current_state.value == 4
# Set from HomeKit
call_arm_home = async_mock_service(hass, DOMAIN, "alarm_arm_home")
call_arm_away = async_mock_service(hass, DOMAIN, "alarm_arm_away")
call_arm_night = async_mock_service(hass, DOMAIN, "alarm_arm_night")
call_disarm = async_mock_service(hass, DOMAIN, "alarm_disarm")
await hass.async_add_executor_job(acc.char_target_state.client_update_value, 0)
await hass.async_block_till_done()
assert call_arm_home
assert call_arm_home[0].data[ATTR_ENTITY_ID] == entity_id
assert call_arm_home[0].data[ATTR_CODE] == code
assert acc.char_target_state.value == 0
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_target_state.client_update_value, 1)
await hass.async_block_till_done()
assert call_arm_away
assert call_arm_away[0].data[ATTR_ENTITY_ID] == entity_id
assert call_arm_away[0].data[ATTR_CODE] == code
assert acc.char_target_state.value == 1
assert len(events) == 2
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_target_state.client_update_value, 2)
await hass.async_block_till_done()
assert call_arm_night
assert call_arm_night[0].data[ATTR_ENTITY_ID] == entity_id
assert call_arm_night[0].data[ATTR_CODE] == code
assert acc.char_target_state.value == 2
assert len(events) == 3
assert events[-1].data[ATTR_VALUE] is None
await hass.async_add_executor_job(acc.char_target_state.client_update_value, 3)
await hass.async_block_till_done()
assert call_disarm
assert call_disarm[0].data[ATTR_ENTITY_ID] == entity_id
assert call_disarm[0].data[ATTR_CODE] == code
assert acc.char_target_state.value == 3
assert len(events) == 4
assert events[-1].data[ATTR_VALUE] is None
@pytest.mark.parametrize("config", [{}, {ATTR_CODE: None}])
async def test_no_alarm_code(hass, hk_driver, config, events):
"""Test accessory if security_system doesn't require an alarm_code."""
entity_id = "alarm_control_panel.test"
hass.states.async_set(entity_id, None)
await hass.async_block_till_done()
acc = SecuritySystem(hass, hk_driver, "SecuritySystem", entity_id, 2, config)
# Set from HomeKit
call_arm_home = async_mock_service(hass, DOMAIN, "alarm_arm_home")
await hass.async_add_executor_job(acc.char_target_state.client_update_value, 0)
await hass.async_block_till_done()
assert call_arm_home
assert call_arm_home[0].data[ATTR_ENTITY_ID] == entity_id
assert ATTR_CODE not in call_arm_home[0].data
assert acc.char_target_state.value == 0
assert len(events) == 1
assert events[-1].data[ATTR_VALUE] is None
async def test_supported_states(hass, hk_driver, events):
"""Test different supported states."""
code = "1234"
config = {ATTR_CODE: code}
entity_id = "alarm_control_panel.test"
loader = get_loader()
default_current_states = loader.get_char(
"SecuritySystemCurrentState"
).properties.get("ValidValues")
default_target_services = loader.get_char(
"SecuritySystemTargetState"
).properties.get("ValidValues")
# Set up a number of test configuration
test_configs = [
{
"features": SUPPORT_ALARM_ARM_HOME,
"current_values": [
default_current_states["Disarmed"],
default_current_states["AlarmTriggered"],
default_current_states["StayArm"],
],
"target_values": [
default_target_services["Disarm"],
default_target_services["StayArm"],
],
},
{
"features": SUPPORT_ALARM_ARM_AWAY,
"current_values": [
default_current_states["Disarmed"],
default_current_states["AlarmTriggered"],
default_current_states["AwayArm"],
],
"target_values": [
default_target_services["Disarm"],
default_target_services["AwayArm"],
],
},
{
"features": SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY,
"current_values": [
default_current_states["Disarmed"],
default_current_states["AlarmTriggered"],
default_current_states["StayArm"],
default_current_states["AwayArm"],
],
"target_values": [
default_target_services["Disarm"],
default_target_services["StayArm"],
default_target_services["AwayArm"],
],
},
{
"features": SUPPORT_ALARM_ARM_HOME
| SUPPORT_ALARM_ARM_AWAY
| SUPPORT_ALARM_ARM_NIGHT,
"current_values": [
default_current_states["Disarmed"],
default_current_states["AlarmTriggered"],
default_current_states["StayArm"],
default_current_states["AwayArm"],
default_current_states["NightArm"],
],
"target_values": [
default_target_services["Disarm"],
default_target_services["StayArm"],
default_target_services["AwayArm"],
default_target_services["NightArm"],
],
},
{
"features": SUPPORT_ALARM_ARM_HOME
| SUPPORT_ALARM_ARM_AWAY
| SUPPORT_ALARM_ARM_NIGHT
| SUPPORT_ALARM_TRIGGER,
"current_values": [
default_current_states["Disarmed"],
default_current_states["AlarmTriggered"],
default_current_states["StayArm"],
default_current_states["AwayArm"],
default_current_states["NightArm"],
],
"target_values": [
default_target_services["Disarm"],
default_target_services["StayArm"],
default_target_services["AwayArm"],
default_target_services["NightArm"],
],
},
]
for test_config in test_configs:
attrs = {"supported_features": test_config.get("features")}
hass.states.async_set(entity_id, None, attributes=attrs)
await hass.async_block_till_done()
acc = SecuritySystem(hass, hk_driver, "SecuritySystem", entity_id, 2, config)
await acc.run_handler()
await hass.async_block_till_done()
valid_current_values = acc.char_current_state.properties.get("ValidValues")
valid_target_values = acc.char_target_state.properties.get("ValidValues")
for val in valid_current_values.values():
assert val in test_config.get("current_values")
for val in valid_target_values.values():
assert val in test_config.get("target_values")
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.