text
stringlengths 213
32.3k
|
---|
from django import forms
from django.contrib.admin.widgets import RelatedFieldWidgetWrapper
from django.utils.translation import gettext_lazy as _
from mptt.forms import TreeNodeChoiceField
from zinnia.admin.fields import MPTTModelMultipleChoiceField
from zinnia.admin.widgets import MPTTFilteredSelectMultiple
from zinnia.admin.widgets import MiniTextarea
from zinnia.admin.widgets import TagAutoComplete
from zinnia.models.category import Category
from zinnia.models.entry import Entry
class CategoryAdminForm(forms.ModelForm):
"""
Form for Category's Admin.
"""
parent = TreeNodeChoiceField(
label=_('Parent category'),
empty_label=_('No parent category'),
level_indicator='|--', required=False,
queryset=Category.objects.all())
def __init__(self, *args, **kwargs):
super(CategoryAdminForm, self).__init__(*args, **kwargs)
self.fields['parent'].widget = RelatedFieldWidgetWrapper(
self.fields['parent'].widget,
Category.parent.field.remote_field,
self.admin_site)
def clean_parent(self):
"""
Check if category parent is not selfish.
"""
data = self.cleaned_data['parent']
if data == self.instance:
raise forms.ValidationError(
_('A category cannot be parent of itself.'),
code='self_parenting')
return data
class Meta:
"""
CategoryAdminForm's Meta.
"""
model = Category
fields = forms.ALL_FIELDS
class EntryAdminForm(forms.ModelForm):
"""
Form for Entry's Admin.
"""
categories = MPTTModelMultipleChoiceField(
label=_('Categories'), required=False,
queryset=Category.objects.all(),
widget=MPTTFilteredSelectMultiple(_('categories')))
def __init__(self, *args, **kwargs):
super(EntryAdminForm, self).__init__(*args, **kwargs)
self.fields['categories'].widget = RelatedFieldWidgetWrapper(
self.fields['categories'].widget,
Entry.categories.field.remote_field,
self.admin_site)
class Meta:
"""
EntryAdminForm's Meta.
"""
model = Entry
fields = forms.ALL_FIELDS
widgets = {
'tags': TagAutoComplete,
'lead': MiniTextarea,
'excerpt': MiniTextarea,
'image_caption': MiniTextarea,
}
|
import os
import shutil
import tempfile
import unittest
import homeassistant.components.kira as kira
from homeassistant.setup import setup_component
from tests.async_mock import MagicMock, patch
from tests.common import get_test_home_assistant
TEST_CONFIG = {
kira.DOMAIN: {
"sensors": [
{"name": "test_sensor", "host": "127.0.0.1", "port": 34293},
{"name": "second_sensor", "port": 29847},
],
"remotes": [
{"host": "127.0.0.1", "port": 34293},
{"name": "one_more", "host": "127.0.0.1", "port": 29847},
],
}
}
KIRA_CODES = """
- name: test
code: "K 00FF"
- invalid: not_a_real_code
"""
class TestKiraSetup(unittest.TestCase):
"""Test class for kira."""
# pylint: disable=invalid-name
def setUp(self):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
_base_mock = MagicMock()
pykira = _base_mock.pykira
pykira.__file__ = "test"
self._module_patcher = patch.dict("sys.modules", {"pykira": pykira})
self._module_patcher.start()
self.work_dir = tempfile.mkdtemp()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Stop everything that was started."""
self.hass.stop()
self._module_patcher.stop()
shutil.rmtree(self.work_dir, ignore_errors=True)
def test_kira_empty_config(self):
"""Kira component should load a default sensor."""
setup_component(self.hass, kira.DOMAIN, {})
assert len(self.hass.data[kira.DOMAIN]["sensor"]) == 1
def test_kira_setup(self):
"""Ensure platforms are loaded correctly."""
setup_component(self.hass, kira.DOMAIN, TEST_CONFIG)
assert len(self.hass.data[kira.DOMAIN]["sensor"]) == 2
assert sorted(self.hass.data[kira.DOMAIN]["sensor"].keys()) == [
"kira",
"kira_1",
]
assert len(self.hass.data[kira.DOMAIN]["remote"]) == 2
assert sorted(self.hass.data[kira.DOMAIN]["remote"].keys()) == [
"kira",
"kira_1",
]
def test_kira_creates_codes(self):
"""Kira module should create codes file if missing."""
code_path = os.path.join(self.work_dir, "codes.yaml")
kira.load_codes(code_path)
assert os.path.exists(code_path), "Kira component didn't create codes file"
def test_load_codes(self):
"""Kira should ignore invalid codes."""
code_path = os.path.join(self.work_dir, "codes.yaml")
with open(code_path, "w") as code_file:
code_file.write(KIRA_CODES)
res = kira.load_codes(code_path)
assert len(res) == 1, "Expected exactly 1 valid Kira code"
|
import pytest
from vcr.serializers.jsonserializer import serialize
from vcr.request import Request
def test_serialize_binary():
request = Request(method="GET", uri="http://localhost/", body="", headers={})
cassette = {"requests": [request], "responses": [{"body": b"\x8c"}]}
with pytest.raises(Exception) as e:
serialize(cassette)
assert (
e.message
== "Error serializing cassette to JSON. Does this \
HTTP interaction contain binary data? If so, use a different \
serializer (like the yaml serializer) for this request"
)
|
from pprint import pprint
import requests
from .util import get_lokalise_token
def get_api(project_id, debug=False) -> "Lokalise":
"""Get Lokalise API."""
return Lokalise(project_id, get_lokalise_token(), debug)
class Lokalise:
"""Lokalise API."""
def __init__(self, project_id, token, debug):
"""Initialize Lokalise API."""
self.project_id = project_id
self.token = token
self.debug = debug
def request(self, method, path, data):
"""Make a request to the Lokalise API."""
method = method.upper()
kwargs = {"headers": {"x-api-token": self.token}}
if method == "GET":
kwargs["params"] = data
else:
kwargs["json"] = data
if self.debug:
print(method, f"{self.project_id}/{path}", data)
req = requests.request(
method,
f"https://api.lokalise.com/api2/projects/{self.project_id}/{path}",
**kwargs,
)
req.raise_for_status()
if self.debug:
pprint(req.json())
print()
return req.json()
def keys_list(self, params={}):
"""List keys.
https://app.lokalise.com/api2docs/curl/#transition-list-all-keys-get
"""
return self.request("GET", "keys", params)["keys"]
def keys_create(self, keys):
"""Create keys.
https://app.lokalise.com/api2docs/curl/#transition-create-keys-post
"""
return self.request("POST", "keys", {"keys": keys})["keys"]
def keys_delete_multiple(self, key_ids):
"""Delete multiple keys.
https://app.lokalise.com/api2docs/curl/#transition-delete-multiple-keys-delete
"""
return self.request("DELETE", "keys", {"keys": key_ids})
def keys_bulk_update(self, updates):
"""Update multiple keys.
https://app.lokalise.com/api2docs/curl/#transition-bulk-update-put
"""
return self.request("PUT", "keys", {"keys": updates})["keys"]
def translations_list(self, params={}):
"""List translations.
https://app.lokalise.com/api2docs/curl/#transition-list-all-translations-get
"""
return self.request("GET", "translations", params)["translations"]
def languages_list(self, params={}):
"""List languages.
https://app.lokalise.com/api2docs/curl/#transition-list-project-languages-get
"""
return self.request("GET", "languages", params)["languages"]
|
import json
import logging
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA, SOURCE_TYPE_ROUTER
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import HTTP_BAD_REQUEST, HTTP_UNPROCESSABLE_ENTITY
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
CONF_VALIDATOR = "validator"
CONF_SECRET = "secret"
URL = "/api/meraki"
VERSION = "2.0"
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_VALIDATOR): cv.string, vol.Required(CONF_SECRET): cv.string}
)
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up an endpoint for the Meraki tracker."""
hass.http.register_view(MerakiView(config, async_see))
return True
class MerakiView(HomeAssistantView):
"""View to handle Meraki requests."""
url = URL
name = "api:meraki"
def __init__(self, config, async_see):
"""Initialize Meraki URL endpoints."""
self.async_see = async_see
self.validator = config[CONF_VALIDATOR]
self.secret = config[CONF_SECRET]
async def get(self, request):
"""Meraki message received as GET."""
return self.validator
async def post(self, request):
"""Meraki CMX message received."""
try:
data = await request.json()
except ValueError:
return self.json_message("Invalid JSON", HTTP_BAD_REQUEST)
_LOGGER.debug("Meraki Data from Post: %s", json.dumps(data))
if not data.get("secret", False):
_LOGGER.error("secret invalid")
return self.json_message("No secret", HTTP_UNPROCESSABLE_ENTITY)
if data["secret"] != self.secret:
_LOGGER.error("Invalid Secret received from Meraki")
return self.json_message("Invalid secret", HTTP_UNPROCESSABLE_ENTITY)
if data["version"] != VERSION:
_LOGGER.error("Invalid API version: %s", data["version"])
return self.json_message("Invalid version", HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.debug("Valid Secret")
if data["type"] not in ("DevicesSeen", "BluetoothDevicesSeen"):
_LOGGER.error("Unknown Device %s", data["type"])
return self.json_message("Invalid device type", HTTP_UNPROCESSABLE_ENTITY)
_LOGGER.debug("Processing %s", data["type"])
if not data["data"]["observations"]:
_LOGGER.debug("No observations found")
return
self._handle(request.app["hass"], data)
@callback
def _handle(self, hass, data):
for i in data["data"]["observations"]:
data["data"]["secret"] = "hidden"
lat = i["location"]["lat"]
lng = i["location"]["lng"]
try:
accuracy = int(float(i["location"]["unc"]))
except ValueError:
accuracy = 0
mac = i["clientMac"]
_LOGGER.debug("clientMac: %s", mac)
if lat == "NaN" or lng == "NaN":
_LOGGER.debug("No coordinates received, skipping location for: %s", mac)
gps_location = None
accuracy = None
else:
gps_location = (lat, lng)
attrs = {}
if i.get("os", False):
attrs["os"] = i["os"]
if i.get("manufacturer", False):
attrs["manufacturer"] = i["manufacturer"]
if i.get("ipv4", False):
attrs["ipv4"] = i["ipv4"]
if i.get("ipv6", False):
attrs["ipv6"] = i["ipv6"]
if i.get("seenTime", False):
attrs["seenTime"] = i["seenTime"]
if i.get("ssid", False):
attrs["ssid"] = i["ssid"]
hass.async_create_task(
self.async_see(
gps=gps_location,
mac=mac,
source_type=SOURCE_TYPE_ROUTER,
gps_accuracy=accuracy,
attributes=attrs,
)
)
|
import sys
import traceback
#: safe_str takes encoding from this file by default.
#: :func:`set_default_encoding_file` can used to set the
#: default output file.
default_encoding_file = None
def set_default_encoding_file(file):
"""Set file used to get codec information."""
global default_encoding_file
default_encoding_file = file
def get_default_encoding_file():
"""Get file used to get codec information."""
return default_encoding_file
if sys.platform.startswith('java'): # pragma: no cover
def default_encoding(file=None):
"""Get default encoding."""
return 'utf-8'
else:
def default_encoding(file=None): # noqa
"""Get default encoding."""
file = file or get_default_encoding_file()
return getattr(file, 'encoding', None) or sys.getfilesystemencoding()
def str_to_bytes(s):
"""Convert str to bytes."""
if isinstance(s, str):
return s.encode()
return s
def bytes_to_str(s):
"""Convert bytes to str."""
if isinstance(s, bytes):
return s.decode(errors='replace')
return s
def from_utf8(s, *args, **kwargs):
"""Get str from utf-8 encoding."""
return s
def ensure_bytes(s):
"""Ensure s is bytes, not str."""
if not isinstance(s, bytes):
return str_to_bytes(s)
return s
def default_encode(obj):
"""Encode using default encoding."""
return obj
def safe_str(s, errors='replace'):
"""Safe form of str(), void of unicode errors."""
s = bytes_to_str(s)
if not isinstance(s, (str, bytes)):
return safe_repr(s, errors)
return _safe_str(s, errors)
def _safe_str(s, errors='replace', file=None):
if isinstance(s, str):
return s
try:
return str(s)
except Exception as exc:
return '<Unrepresentable {!r}: {!r} {!r}>'.format(
type(s), exc, '\n'.join(traceback.format_stack()))
def safe_repr(o, errors='replace'):
"""Safe form of repr, void of Unicode errors."""
try:
return repr(o)
except Exception:
return _safe_str(o, errors)
|
import tensorflow as tf
from tensorflow.keras.layers import Layer # type: ignore
from tensorflow.keras import activations
from tensorflow.keras import initializers
from typing import List, Optional, Text, Tuple
import tensornetwork as tn
from tensornetwork import Node
import numpy as np
import math
# pytype: disable=module-attr
@tf.keras.utils.register_keras_serializable(package='tensornetwork')
# pytype: enable=module-attr
class DenseCondenser(Layer):
"""Condenser TN layer. Greatly reduces dimensionality of input.
Used in conjunction with DenseEntangler to achieve very large hidden layers.
This layer can take an input shape of arbitrary dimension, with the first
dimension expected to be a batch dimension. The weight matrix will be
constructed from and applied to the last input dimension.
Example:
::
# as first layer in a sequential model:
model = Sequential()
model.add(
DenseCondenser(exp_base=2
num_nodes=3,
use_bias=True,
activation='relu',
input_shape=(1024,)))
# now the model will take as input arrays of shape (*, 1024)
# and output arrays of shape (*, 128).
# After the first layer, you don't need to specify
# the size of the input anymore:
model.add(
DenseCondenser(exp_base=2,
num_nodes=3,
use_bias=True,
activation='relu'))
Args:
exp_base: Positive integer, base of the dimensionality reduction term.
num_nodes: Positive integer, number of nodes in condenser.
The output dim will be input_shape[-1] // (exp_base**num_nodes)
so increasing num_nodes will decrease the output dim exponentially.
activation: Activation function to use.
If you don't specify anything, no activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the weight matrices.
bias_initializer: Initializer for the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., input_shape[-1] //
(exp_base**num_nodes))`.
"""
def __init__(self,
exp_base: int,
num_nodes: int,
use_bias: Optional[bool] = True,
activation: Optional[Text] = None,
kernel_initializer: Optional[Text] = 'glorot_uniform',
bias_initializer: Optional[Text] = 'zeros',
**kwargs) -> None:
if 'input_shape' not in kwargs and 'input_dim' in kwargs:
kwargs['input_shape'] = (kwargs.pop('input_dim'),)
super().__init__(**kwargs)
self.exp_base = exp_base
self.num_nodes = num_nodes
self.nodes = []
self.use_bias = use_bias
self.activation = activations.get(activation)
self.kernel_initializer = initializers.get(kernel_initializer)
self.bias_initializer = initializers.get(bias_initializer)
def build(self, input_shape: List[int]) -> None:
# Disable the attribute-defined-outside-init violations in this function
# pylint: disable=attribute-defined-outside-init
if input_shape[-1] is None:
raise ValueError('The last dimension of the inputs to `Dense` '
'should be defined. Found `None`.')
super().build(input_shape)
self.output_dim = input_shape[-1] // (self.exp_base**self.num_nodes)
for i in range(self.num_nodes):
self.nodes.append(
self.add_weight(name=f'node_{i}',
shape=(self.output_dim, self.exp_base,
self.output_dim),
trainable=True,
initializer=self.kernel_initializer))
self.bias_var = self.add_weight(
name='bias',
shape=(self.output_dim,),
trainable=True,
initializer=self.bias_initializer) if self.use_bias else None
def call(self, inputs: tf.Tensor, **kwargs) -> tf.Tensor: # pylint: disable=unused-argument
def f(x: tf.Tensor, nodes: List[Node], output_dim: int, exp_base: int,
num_nodes: int, use_bias: bool, bias_var: tf.Tensor) -> tf.Tensor:
input_reshaped = tf.reshape(x, (exp_base,) * num_nodes + (output_dim,))
state_node = tn.Node(input_reshaped, name='xnode', backend="tensorflow")
# The TN will be connected like this:
# xxxxxxxxx
# | | | |
# | | 11111
# | | |
# | 22222
# | |
# 33333
# |
# |
for i in range(num_nodes):
op = tn.Node(nodes[i], name=f'node_{i}', backend="tensorflow")
tn.connect(state_node.edges[-1], op[0])
tn.connect(state_node.edges[-2], op[1])
state_node = tn.contract_between(state_node, op)
result = tf.reshape(state_node.tensor, (-1,))
if use_bias:
result += bias_var
return result
input_shape = list(inputs.shape)
inputs = tf.reshape(inputs, (-1, input_shape[-1]))
result = tf.vectorized_map(
lambda vec: f(vec, self.nodes, self.output_dim, self.exp_base, self.
num_nodes, self.use_bias, self.bias_var), inputs)
if self.activation is not None:
result = self.activation(result)
result = tf.reshape(result, [-1] + input_shape[1:-1] + [self.output_dim,])
return result
def compute_output_shape(self, input_shape: List[int]) -> Tuple[int, int]:
return tuple(input_shape[0:-1]) + (self.output_dim,)
def get_config(self) -> dict:
"""Returns the config of the layer.
The same layer can be reinstantiated later
(without its trained weights) from this configuration.
Returns:
Python dictionary containing the configuration of the layer.
"""
config = {}
# Include the Condenser-specific arguments
args = ['exp_base', 'num_nodes', 'use_bias']
for arg in args:
config[arg] = getattr(self, arg)
# Serialize the activation
config['activation'] = activations.serialize(getattr(self, 'activation'))
# Serialize the initializers
initializers_list = ['kernel_initializer', 'bias_initializer']
for initializer_arg in initializers_list:
config[initializer_arg] = initializers.serialize(
getattr(self, initializer_arg))
# Get base config
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
|
from datetime import timedelta
import logging
import requests
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
import homeassistant.util.dt as dt_util
from . import DOMAIN
from .entity import RingEntityMixin
_LOGGER = logging.getLogger(__name__)
SIREN_ICON = "mdi:alarm-bell"
# It takes a few seconds for the API to correctly return an update indicating
# that the changes have been made. Once we request a change (i.e. a light
# being turned on) we simply wait for this time delta before we allow
# updates to take place.
SKIP_UPDATES_DELAY = timedelta(seconds=5)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Create the switches for the Ring devices."""
devices = hass.data[DOMAIN][config_entry.entry_id]["devices"]
switches = []
for device in devices["stickup_cams"]:
if device.has_capability("siren"):
switches.append(SirenSwitch(config_entry.entry_id, device))
async_add_entities(switches)
class BaseRingSwitch(RingEntityMixin, SwitchEntity):
"""Represents a switch for controlling an aspect of a ring device."""
def __init__(self, config_entry_id, device, device_type):
"""Initialize the switch."""
super().__init__(config_entry_id, device)
self._device_type = device_type
self._unique_id = f"{self._device.id}-{self._device_type}"
@property
def name(self):
"""Name of the device."""
return f"{self._device.name} {self._device_type}"
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
class SirenSwitch(BaseRingSwitch):
"""Creates a switch to turn the ring cameras siren on and off."""
def __init__(self, config_entry_id, device):
"""Initialize the switch for a device with a siren."""
super().__init__(config_entry_id, device, "siren")
self._no_updates_until = dt_util.utcnow()
self._siren_on = device.siren > 0
@callback
def _update_callback(self):
"""Call update method."""
if self._no_updates_until > dt_util.utcnow():
return
self._siren_on = self._device.siren > 0
self.async_write_ha_state()
def _set_switch(self, new_state):
"""Update switch state, and causes Home Assistant to correctly update."""
try:
self._device.siren = new_state
except requests.Timeout:
_LOGGER.error("Time out setting %s siren to %s", self.entity_id, new_state)
return
self._siren_on = new_state > 0
self._no_updates_until = dt_util.utcnow() + SKIP_UPDATES_DELAY
self.schedule_update_ha_state()
@property
def is_on(self):
"""If the switch is currently on or off."""
return self._siren_on
def turn_on(self, **kwargs):
"""Turn the siren on for 30 seconds."""
self._set_switch(1)
def turn_off(self, **kwargs):
"""Turn the siren off."""
self._set_switch(0)
@property
def icon(self):
"""Return the icon."""
return SIREN_ICON
|
import logging
from multiprocessing.pool import ThreadPool
try:
from lz4.block import compress as lz4_compress, decompress as lz4_decompress
lz4_compressHC = lambda _str: lz4_compress(_str, mode='high_compression')
except ImportError as e:
from lz4 import compress as lz4_compress, compressHC as lz4_compressHC, decompress as lz4_decompress
# ENABLE_PARALLEL mutated in global_scope. Do not remove.
from ._config import ENABLE_PARALLEL, LZ4_HIGH_COMPRESSION, LZ4_WORKERS, LZ4_N_PARALLEL, LZ4_MINSZ_PARALLEL, \
BENCHMARK_MODE # noqa # pylint: disable=unused-import
logger = logging.getLogger(__name__)
_compress_thread_pool = None
def enable_parallel_lz4(mode):
"""
Set the global multithread compression mode
Parameters
----------
mode: `bool`
True: Use parallel compression. False: Use sequential compression
"""
global ENABLE_PARALLEL
ENABLE_PARALLEL = bool(mode)
logger.info("Setting parallelisation mode to {}".format("multi-threaded" if mode else "single-threaded"))
def set_compression_pool_size(pool_size):
"""
Set the size of the compression workers thread pool.
If the pool is already created, it waits until all jobs are finished, and then proceeds with setting the new size.
Parameters
----------
pool_size : `int`
The size of the pool (must be a positive integer)
Returns
-------
`None`
"""
pool_size = int(pool_size)
if pool_size < 1:
raise ValueError("The compression thread pool size cannot be of size {}".format(pool_size))
global _compress_thread_pool
if _compress_thread_pool is not None:
_compress_thread_pool.close()
_compress_thread_pool.join()
_compress_thread_pool = ThreadPool(pool_size)
def compress_array(str_list, withHC=LZ4_HIGH_COMPRESSION):
"""
Compress an array of strings
Parameters
----------
str_list: `list[str]`
The input list of strings which need to be compressed.
withHC: `bool`
This flag controls whether lz4HC will be used.
Returns
-------
`list[str`
The list of the compressed strings.
"""
global _compress_thread_pool
if not str_list:
return str_list
do_compress = lz4_compressHC if withHC else lz4_compress
def can_parallelize_strlist(strlist):
return len(strlist) > LZ4_N_PARALLEL and len(strlist[0]) > LZ4_MINSZ_PARALLEL
use_parallel = (ENABLE_PARALLEL and withHC) or can_parallelize_strlist(str_list)
if BENCHMARK_MODE or use_parallel:
if _compress_thread_pool is None:
_compress_thread_pool = ThreadPool(LZ4_WORKERS)
return _compress_thread_pool.map(do_compress, str_list)
return [do_compress(s) for s in str_list]
def compress(_str):
"""
Compress a string
By default LZ4 mode is standard in interactive mode,
and high compresion in applications/scripts
"""
return lz4_compress(_str)
def compressHC(_str):
"""
HC compression
"""
return lz4_compressHC(_str)
def compressHC_array(str_list):
"""
HC compression
"""
return compress_array(str_list, withHC=True)
def decompress(_str):
"""
Decompress a string
"""
return lz4_decompress(_str)
def decompress_array(str_list):
"""
Decompress a list of strings
"""
global _compress_thread_pool
if not str_list:
return str_list
if not ENABLE_PARALLEL or len(str_list) <= LZ4_N_PARALLEL:
return [lz4_decompress(chunk) for chunk in str_list]
if _compress_thread_pool is None:
_compress_thread_pool = ThreadPool(LZ4_WORKERS)
return _compress_thread_pool.map(lz4_decompress, str_list)
|
from functools import wraps
from typing import Callable, Dict, List, Optional, cast
from .const import SUBCAT_ALL
from .models import PermissionLookup
from .types import CategoryType, SubCategoryDict, ValueType
LookupFunc = Callable[[PermissionLookup, SubCategoryDict, str], Optional[ValueType]]
SubCatLookupType = Dict[str, LookupFunc]
def lookup_all(
perm_lookup: PermissionLookup, lookup_dict: SubCategoryDict, object_id: str
) -> ValueType:
"""Look up permission for all."""
# In case of ALL category, lookup_dict IS the schema.
return cast(ValueType, lookup_dict)
def compile_policy(
policy: CategoryType, subcategories: SubCatLookupType, perm_lookup: PermissionLookup
) -> Callable[[str, str], bool]:
"""Compile policy into a function that tests policy.
Subcategories are mapping key -> lookup function, ordered by highest
priority first.
"""
# None, False, empty dict
if not policy:
def apply_policy_deny_all(entity_id: str, key: str) -> bool:
"""Decline all."""
return False
return apply_policy_deny_all
if policy is True:
def apply_policy_allow_all(entity_id: str, key: str) -> bool:
"""Approve all."""
return True
return apply_policy_allow_all
assert isinstance(policy, dict)
funcs: List[Callable[[str, str], Optional[bool]]] = []
for key, lookup_func in subcategories.items():
lookup_value = policy.get(key)
# If any lookup value is `True`, it will always be positive
if isinstance(lookup_value, bool):
return lambda object_id, key: True
if lookup_value is not None:
funcs.append(_gen_dict_test_func(perm_lookup, lookup_func, lookup_value))
if len(funcs) == 1:
func = funcs[0]
@wraps(func)
def apply_policy_func(object_id: str, key: str) -> bool:
"""Apply a single policy function."""
return func(object_id, key) is True
return apply_policy_func
def apply_policy_funcs(object_id: str, key: str) -> bool:
"""Apply several policy functions."""
for func in funcs:
result = func(object_id, key)
if result is not None:
return result
return False
return apply_policy_funcs
def _gen_dict_test_func(
perm_lookup: PermissionLookup, lookup_func: LookupFunc, lookup_dict: SubCategoryDict
) -> Callable[[str, str], Optional[bool]]:
"""Generate a lookup function."""
def test_value(object_id: str, key: str) -> Optional[bool]:
"""Test if permission is allowed based on the keys."""
schema: ValueType = lookup_func(perm_lookup, lookup_dict, object_id)
if schema is None or isinstance(schema, bool):
return schema
assert isinstance(schema, dict)
return schema.get(key)
return test_value
def test_all(policy: CategoryType, key: str) -> bool:
"""Test if a policy has an ALL access for a specific key."""
if not isinstance(policy, dict):
return bool(policy)
all_policy = policy.get(SUBCAT_ALL)
if not isinstance(all_policy, dict):
return bool(all_policy)
return all_policy.get(key, False)
|
import os
import os.path
import functools
import contextlib
import html
from typing import Any, Callable, FrozenSet, Iterator, List, Set, Tuple
import jinja2
import jinja2.nodes
from PyQt5.QtCore import QUrl
from qutebrowser.utils import utils, urlutils, log, qtutils, javascript
from qutebrowser.misc import debugcachestats
html_fallback = """
<!DOCTYPE html>
<html>
<head>
<meta charset="utf-8">
<title>Error while loading template</title>
</head>
<body>
<p><span style="font-size:120%;color:red">
The %FILE% template could not be found!<br>
Please check your qutebrowser installation
</span><br>
%ERROR%
</p>
</body>
</html>
"""
class Loader(jinja2.BaseLoader):
"""Jinja loader which uses utils.read_file to load templates.
Attributes:
_subdir: The subdirectory to find templates in.
"""
def __init__(self, subdir: str) -> None:
self._subdir = subdir
def get_source(
self,
_env: jinja2.Environment,
template: str
) -> Tuple[str, str, Callable[[], bool]]:
path = os.path.join(self._subdir, template)
try:
source = utils.read_file(path)
except OSError as e:
source = html_fallback.replace("%ERROR%", html.escape(str(e)))
source = source.replace("%FILE%", html.escape(template))
log.misc.exception("The {} template could not be loaded from {}"
.format(template, path))
# Currently we don't implement auto-reloading, so we always return True
# for up-to-date.
return source, path, lambda: True
class Environment(jinja2.Environment):
"""Our own jinja environment which is more strict."""
def __init__(self) -> None:
super().__init__(loader=Loader('html'),
autoescape=lambda _name: self._autoescape,
undefined=jinja2.StrictUndefined)
self.globals['resource_url'] = self._resource_url
self.globals['file_url'] = urlutils.file_url
self.globals['data_url'] = self._data_url
self.globals['qcolor_to_qsscolor'] = qtutils.qcolor_to_qsscolor
self.filters['js_string_escape'] = javascript.string_escape
self._autoescape = True
@contextlib.contextmanager
def no_autoescape(self) -> Iterator[None]:
"""Context manager to temporarily turn off autoescaping."""
self._autoescape = False
yield
self._autoescape = True
def _resource_url(self, path: str) -> str:
"""Load images from a relative path (to qutebrowser).
Arguments:
path: The relative path to the image
"""
image = utils.resource_filename(path)
url = QUrl.fromLocalFile(image)
urlstr = url.toString(QUrl.FullyEncoded) # type: ignore[arg-type]
return urlstr
def _data_url(self, path: str) -> str:
"""Get a data: url for the broken qutebrowser logo."""
data = utils.read_file(path, binary=True)
filename = utils.resource_filename(path)
mimetype = utils.guess_mimetype(filename)
return urlutils.data_url(mimetype, data).toString()
def getattr(self, obj: Any, attribute: str) -> Any:
"""Override jinja's getattr() to be less clever.
This means it doesn't fall back to __getitem__, and it doesn't hide
AttributeError.
"""
return getattr(obj, attribute)
def render(template: str, **kwargs: Any) -> str:
"""Render the given template and pass the given arguments to it."""
return environment.get_template(template).render(**kwargs)
environment = Environment()
js_environment = jinja2.Environment(loader=Loader('javascript'))
@debugcachestats.register()
@functools.lru_cache()
def template_config_variables(template: str) -> FrozenSet[str]:
"""Return the config variables used in the template."""
unvisted_nodes = [environment.parse(template)]
result: Set[str] = set()
while unvisted_nodes:
node = unvisted_nodes.pop()
if not isinstance(node, jinja2.nodes.Getattr):
unvisted_nodes.extend(node.iter_child_nodes())
continue
# List of attribute names in reverse order.
# For example it's ['ab', 'c', 'd'] for 'conf.d.c.ab'.
attrlist: List[str] = []
while isinstance(node, jinja2.nodes.Getattr):
attrlist.append(node.attr) # type: ignore[attr-defined]
node = node.node # type: ignore[attr-defined]
if isinstance(node, jinja2.nodes.Name):
if node.name == 'conf': # type: ignore[attr-defined]
result.add('.'.join(reversed(attrlist)))
# otherwise, the node is a Name node so it doesn't have any
# child nodes
else:
unvisted_nodes.append(node)
from qutebrowser.config import config
for option in result:
config.instance.ensure_has_opt(option)
return frozenset(result)
|
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.forked_daapd.const import (
CONF_LIBRESPOT_JAVA_PORT,
CONF_MAX_PLAYLISTS,
CONF_TTS_PAUSE_TIME,
CONF_TTS_VOLUME,
DOMAIN,
)
from homeassistant.config_entries import (
CONN_CLASS_LOCAL_PUSH,
SOURCE_USER,
SOURCE_ZEROCONF,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT
from tests.async_mock import AsyncMock, patch
from tests.common import MockConfigEntry
SAMPLE_CONFIG = {
"websocket_port": 3688,
"version": "25.0",
"buildoptions": [
"ffmpeg",
"iTunes XML",
"Spotify",
"LastFM",
"MPD",
"Device verification",
"Websockets",
"ALSA",
],
}
@pytest.fixture(name="config_entry")
def config_entry_fixture():
"""Create hass config_entry fixture."""
data = {
CONF_HOST: "192.168.1.1",
CONF_PORT: "2345",
CONF_PASSWORD: "",
}
return MockConfigEntry(
version=1,
domain=DOMAIN,
title="",
data=data,
options={},
system_options={},
source=SOURCE_USER,
connection_class=CONN_CLASS_LOCAL_PUSH,
entry_id=1,
)
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
async def test_config_flow(hass, config_entry):
"""Test that the user step works."""
with patch(
"homeassistant.components.forked_daapd.config_flow.ForkedDaapdAPI.test_connection",
new=AsyncMock(),
) as mock_test_connection, patch(
"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI.get_request",
autospec=True,
) as mock_get_request:
mock_get_request.return_value = SAMPLE_CONFIG
mock_test_connection.return_value = ["ok", "My Music on myhost"]
config_data = config_entry.data
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=config_data
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "My Music on myhost"
assert result["data"][CONF_HOST] == config_data[CONF_HOST]
assert result["data"][CONF_PORT] == config_data[CONF_PORT]
assert result["data"][CONF_PASSWORD] == config_data[CONF_PASSWORD]
# Also test that creating a new entry with the same host aborts
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
data=config_entry.data,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_zeroconf_updates_title(hass, config_entry):
"""Test that zeroconf updates title and aborts with same host."""
MockConfigEntry(domain=DOMAIN, data={CONF_HOST: "different host"}).add_to_hass(hass)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
discovery_info = {
"host": "192.168.1.1",
"port": 23,
"properties": {"mtd-version": "27.0", "Machine Name": "zeroconf_test"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert config_entry.title == "zeroconf_test"
assert len(hass.config_entries.async_entries(DOMAIN)) == 2
async def test_config_flow_no_websocket(hass, config_entry):
"""Test config flow setup without websocket enabled on server."""
with patch(
"homeassistant.components.forked_daapd.config_flow.ForkedDaapdAPI.test_connection",
new=AsyncMock(),
) as mock_test_connection:
# test invalid config data
mock_test_connection.return_value = ["websocket_not_enabled"]
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=config_entry.data
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_config_flow_zeroconf_invalid(hass):
"""Test that an invalid zeroconf entry doesn't work."""
# test with no discovery properties
discovery_info = {"host": "127.0.0.1", "port": 23}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
# test with forked-daapd version < 27
discovery_info = {
"host": "127.0.0.1",
"port": 23,
"properties": {"mtd-version": "26.3", "Machine Name": "forked-daapd"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
# test with verbose mtd-version from Firefly
discovery_info = {
"host": "127.0.0.1",
"port": 23,
"properties": {"mtd-version": "0.2.4.1", "Machine Name": "firefly"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
# test with svn mtd-version from Firefly
discovery_info = {
"host": "127.0.0.1",
"port": 23,
"properties": {"mtd-version": "svn-1676", "Machine Name": "firefly"},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
) # doesn't create the entry, tries to show form but gets abort
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "not_forked_daapd"
async def test_config_flow_zeroconf_valid(hass):
"""Test that a valid zeroconf entry works."""
discovery_info = {
"host": "192.168.1.1",
"port": 23,
"properties": {
"mtd-version": "27.0",
"Machine Name": "zeroconf_test",
"Machine ID": "5E55EEFF",
},
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}, data=discovery_info
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_options_flow(hass, config_entry):
"""Test config flow options."""
with patch(
"homeassistant.components.forked_daapd.media_player.ForkedDaapdAPI.get_request",
autospec=True,
) as mock_get_request:
mock_get_request.return_value = SAMPLE_CONFIG
config_entry.add_to_hass(hass)
await config_entry.async_setup(hass)
await hass.async_block_till_done()
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.options.async_configure(
result["flow_id"],
user_input={
CONF_TTS_PAUSE_TIME: 0.05,
CONF_TTS_VOLUME: 0.8,
CONF_LIBRESPOT_JAVA_PORT: 0,
CONF_MAX_PLAYLISTS: 8,
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
|
import pytest
from homeassistant.components.local_ip import DOMAIN
from homeassistant.setup import async_setup_component
from homeassistant.util import get_local_ip
@pytest.fixture(name="config")
def config_fixture():
"""Create hass config fixture."""
return {DOMAIN: {}}
async def test_basic_setup(hass, config):
"""Test component setup creates entry from config."""
assert await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
local_ip = await hass.async_add_executor_job(get_local_ip)
state = hass.states.get(f"sensor.{DOMAIN}")
assert state
assert state.state == local_ip
|
import os
import stat
import sys
from subprocess import PIPE
from subprocess import Popen
from catkin.find_in_workspaces import find_in_workspaces as catkin_find
import roslib.manifest # noqa: F401
import rospkg
SRC_DIR = 'src'
# aliases
ROS_PACKAGE_PATH = rospkg.environment.ROS_PACKAGE_PATH
ROS_ROOT = rospkg.environment.ROS_ROOT
class ROSPkgException(Exception):
"""
Base class of package-related errors.
"""
pass
class InvalidROSPkgException(ROSPkgException):
"""
Exception that indicates that a ROS package does not exist
"""
pass
class MultipleNodesException(ROSPkgException):
"""
Exception that indicates that multiple ROS nodes by the same name are in the same package.
"""
pass
# TODO: go through the code and eliminate unused methods -- there's far too many combos here
MANIFEST_FILE = 'manifest.xml'
PACKAGE_FILE = 'package.xml'
#
# Map package/directory structure
#
def get_dir_pkg(d):
"""
Get the package that the directory is contained within. This is
determined by finding the nearest parent manifest.xml file. This
isn't 100% reliable, but symlinks can fool any heuristic that
relies on ROS_ROOT.
@param d: directory path
@type d: str
@return: (package_directory, package) of the specified directory, or None,None if not in a package
@rtype: (str, str)
"""
# TODO: the realpath is going to create issues with symlinks, most likely
parent = os.path.dirname(os.path.realpath(d))
# walk up until we hit ros root or ros/pkg
while not os.path.exists(os.path.join(d, MANIFEST_FILE)) and not os.path.exists(os.path.join(d, PACKAGE_FILE)) and parent != d:
d = parent
parent = os.path.dirname(d)
if os.path.exists(os.path.join(d, MANIFEST_FILE)) or os.path.exists(os.path.join(d, PACKAGE_FILE)):
pkg = os.path.basename(os.path.abspath(d))
return d, pkg
return None, None
_pkg_dir_cache = {}
def get_pkg_dir(package, required=True, ros_root=None, ros_package_path=None):
"""
Locate directory package is stored in. This routine uses an
internal cache.
NOTE: cache does *not* rebuild if packages are relocated after
this process is initiated.
@param package: package name
@type package: str
@param required: if True, an exception will be raised if the
package directory cannot be located.
@type required: bool
@param ros_root: if specified, override ROS_ROOT
@type ros_root: str
@param ros_package_path: if specified, override ROS_PACKAGE_PATH
@type ros_package_path: str
@return: directory containing package or None if package cannot be found and required is False.
@rtype: str
@raise InvalidROSPkgException: if required is True and package cannot be located
"""
# UNIXONLY
# TODO: replace with non-rospack-based solution (e.g. os.walk())
try:
penv = os.environ.copy()
if ros_root:
ros_root = rospkg.environment._resolve_path(ros_root)
penv[ROS_ROOT] = ros_root
elif ROS_ROOT in os.environ:
# record setting for _pkg_dir_cache
ros_root = os.environ[ROS_ROOT]
# determine rospack exe name
rospack = 'rospack'
if ros_package_path is not None:
ros_package_path = rospkg.environment._resolve_paths(ros_package_path)
penv[ROS_PACKAGE_PATH] = ros_package_path
elif ROS_PACKAGE_PATH in os.environ:
# record setting for _pkg_dir_cache
ros_package_path = os.environ[ROS_PACKAGE_PATH]
# update cache if we haven't. NOTE: we only get one cache
if not _pkg_dir_cache:
_read_rospack_cache(_pkg_dir_cache, ros_root, ros_package_path)
# now that we've resolved the args, check the cache
if package in _pkg_dir_cache:
dir_, rr, rpp = _pkg_dir_cache[package]
if rr == ros_root and rpp == ros_package_path:
if os.path.isfile(os.path.join(dir_, MANIFEST_FILE)):
return dir_
else:
# invalidate cache
_invalidate_cache(_pkg_dir_cache)
rpout, rperr = Popen([rospack, 'find', package],
stdout=PIPE, stderr=PIPE, env=penv).communicate()
pkg_dir = (rpout or '').strip()
# python3.1 popen returns as bytes
if (isinstance(pkg_dir, bytes)):
pkg_dir = pkg_dir.decode()
if not pkg_dir:
raise InvalidROSPkgException('Cannot locate installation of package %s: %s. ROS_ROOT[%s] ROS_PACKAGE_PATH[%s]' % (package, rperr.strip(), ros_root, ros_package_path))
pkg_dir = os.path.normpath(pkg_dir)
if not os.path.exists(pkg_dir):
raise InvalidROSPkgException('Cannot locate installation of package %s: [%s] is not a valid path. ROS_ROOT[%s] ROS_PACKAGE_PATH[%s]' % (package, pkg_dir, ros_root, ros_package_path))
elif not os.path.isdir(pkg_dir):
raise InvalidROSPkgException('Package %s is invalid: file [%s] is in the way' % (package, pkg_dir))
# don't update cache: this should only be updated from
# rospack_cache as it will corrupt package list otherwise.
# _pkg_dir_cache[package] = (pkg_dir, ros_root, ros_package_path)
return pkg_dir
except OSError as e:
if required:
raise InvalidROSPkgException('Environment configuration is invalid: cannot locate rospack (%s)' % e)
return None
except Exception:
if required:
raise
return None
def _get_pkg_subdir_by_dir(package_dir, subdir, required=True, env=None):
"""
@param required: if True, will attempt to create the subdirectory
if it does not exist. An exception will be raised if this fails.
@type required: bool
@param package_dir: directory of package
@type package_dir: str
@param subdir: name of subdirectory to locate
@type subdir: str
@param env: override os.environ dictionary
@type env: dict
@param required: if True, directory must exist
@type required: bool
@return: Package subdirectory if package exist, otherwise None.
@rtype: str
@raise InvalidROSPkgException: if required is True and directory does not exist
"""
if env is None:
env = os.environ
try:
if not package_dir:
raise Exception("Cannot create a '%(subdir)s' directory in %(package_dir)s: package %(package) cannot be located" % locals())
d = os.path.join(package_dir, subdir)
if required and os.path.isfile(d):
raise Exception("""Package '%(package)s' is improperly configured:
file %(d)s is preventing the creation of a directory""" % locals())
elif required and not os.path.isdir(d):
try:
os.makedirs(d) # lazy create
except os.error:
raise Exception("""Package '%(package)s' is improperly configured:
Cannot create a '%(subdir)s' directory in %(package_dir)s.
Please check permissions and try again.
""" % locals())
return d
except Exception:
if required:
raise
return None
def get_pkg_subdir(package, subdir, required=True, env=None):
"""
@param required: if True, will attempt to create the subdirectory
if it does not exist. An exception will be raised if this fails.
@type required: bool
@param package: name of package
@type package: str
@param env: override os.environ dictionary
@type env: dict
@param required: if True, directory must exist
@type required: bool
@return: Package subdirectory if package exist, otherwise None.
@rtype: str
@raise InvalidROSPkgException: if required is True and directory does not exist
"""
if env is None:
env = os.environ
pkg_dir = get_pkg_dir(package, required, ros_root=env[ROS_ROOT])
return _get_pkg_subdir_by_dir(pkg_dir, subdir, required, env)
#
# Map ROS resources to files
#
def resource_file(package, subdir, resource_name):
"""
@param subdir: name of subdir -- these should be one of the
string constants, e.g. MSG_DIR
@type subdir: str
@return: path to resource in the specified subdirectory of the
package, or None if the package does not exists
@rtype: str
@raise roslib.packages.InvalidROSPkgException: If package does not exist
"""
d = get_pkg_subdir(package, subdir, False)
if d is None:
raise InvalidROSPkgException(package)
return os.path.join(d, resource_name)
def _update_rospack_cache(env=None):
"""
Internal routine to update global package directory cache
@return: True if cache is valid
@rtype: bool
"""
if env is None:
env = os.environ
cache = _pkg_dir_cache
if cache:
return True
ros_root = env[ROS_ROOT]
ros_package_path = env.get(ROS_PACKAGE_PATH, '')
return _read_rospack_cache(cache, ros_root, ros_package_path)
def _invalidate_cache(cache):
# I've only made this a separate routine because roslib.packages should really be using
# the roslib.stacks cache implementation instead with the separate cache marker
cache.clear()
def _read_rospack_cache(cache, ros_root, ros_package_path):
"""
Read in rospack_cache data into cache. On-disk cache specifies a
ROS_ROOT and ROS_PACKAGE_PATH, which must match the requested
environment.
@param cache: empty dictionary to store package list in.
If no cache argument provided, will use internal _pkg_dir_cache
and will return cached answers if available.
The format of the cache is {package_name: dir_path, ros_root, ros_package_path}.
@type cache: {str: str, str, str}
@param ros_package_path: ROS_ROOT value
@type ros_root: str
@param ros_package_path: ROS_PACKAGE_PATH value or '' if not specified
@type ros_package_path: str
@return: True if on-disk cache matches and was loaded, false otherwise
@rtype: bool
"""
try:
with open(os.path.join(rospkg.get_ros_home(), 'rospack_cache')) as f:
for l in f.readlines():
l = l[:-1]
if not len(l):
continue
if l[0] == '#':
# check that the cache matches our env
if l.startswith('#ROS_ROOT='):
if not l[len('#ROS_ROOT='):] == ros_root:
return False
elif l.startswith('#ROS_PACKAGE_PATH='):
if not l[len('#ROS_PACKAGE_PATH='):] == ros_package_path:
return False
else:
cache[os.path.basename(l)] = l, ros_root, ros_package_path
return True
except Exception:
pass
def list_pkgs_by_path(path, packages=None, cache=None, env=None):
"""
List ROS packages within the specified path.
Optionally, a cache dictionary can be provided, which will be
updated with the package->path mappings. list_pkgs_by_path() does
NOT returned cached results -- it only updates the cache.
@param path: path to list packages in
@type path: str
@param packages: list of packages to append to. If package is
already present in packages, it will be ignored.
@type packages: [str]
@param cache: (optional) package path cache to update. Maps package name to directory path.
@type cache: {str: str}
@return: complete list of package names in ROS environment. Same as packages parameter.
@rtype: [str]
"""
if packages is None:
packages = []
if env is None:
env = os.environ
# record settings for cache
ros_root = env[ROS_ROOT]
ros_package_path = env.get(ROS_PACKAGE_PATH, '')
path = os.path.abspath(path)
for d, dirs, files in os.walk(path, topdown=True):
if MANIFEST_FILE in files:
package = os.path.basename(d)
if package not in packages:
packages.append(package)
if cache is not None:
cache[package] = d, ros_root, ros_package_path
del dirs[:]
continue # leaf
elif 'rospack_nosubdirs' in files:
del dirs[:]
continue # leaf
# small optimization
elif '.svn' in dirs:
dirs.remove('.svn')
elif '.git' in dirs:
dirs.remove('.git')
for sub_d in dirs:
# followlinks=True only available in Python 2.6, so we
# have to implement manually
sub_p = os.path.join(d, sub_d)
if os.path.islink(sub_p):
packages.extend(list_pkgs_by_path(sub_p, cache=cache))
return packages
def find_node(pkg, node_type, rospack=None):
"""
Warning: unstable API due to catkin.
Locate the executable that implements the node
:param node_type: type of node, ``str``
:returns: path to node or None if node is not in the package ``str``
:raises: :exc:rospkg.ResourceNotFound` If package does not exist
"""
if rospack is None:
rospack = rospkg.RosPack()
return find_resource(pkg, node_type, filter_fn=_executable_filter, rospack=rospack)
def _executable_filter(test_path):
s = os.stat(test_path)
flags = stat.S_IRUSR | stat.S_IXUSR
# Python scripts in ROS tend to omit .py extension since they could become executable
# by adding a shebang line (#!/usr/bin/env python) in Linux environments
# special handle this case in Windows environment
if os.name == 'nt' and os.path.splitext(test_path)[1].lower() in ['.py', '']:
flags = stat.S_IRUSR
return (s.st_mode & flags) == flags
def _find_resource(d, resource_name, filter_fn=None):
"""
subroutine of find_resource
"""
matches = []
# TODO: figure out how to generalize find_resource to take multiple resource name options
if sys.platform in ['win32', 'cygwin']:
# Windows logic requires more file patterns to resolve and is
# not case-sensitive, so leave it separate
# in the near-term, just hack in support for .exe/.bat/.py. In the long
# term this needs to:
#
# * parse PATHEXT to generate matches
# * perform case-insensitive compares against potential
# matches, in path-ext order
# - We still have to look for bare node_type as user may have
# specified extension manually
resource_name = resource_name.lower()
patterns = [resource_name, resource_name+'.exe', resource_name+'.bat', resource_name+'.py']
for p, dirs, files in os.walk(d):
# case insensitive
files = [f.lower() for f in files]
for name in patterns:
if name in files:
test_path = os.path.join(p, name)
if filter_fn is not None:
if filter_fn(test_path):
matches.append(test_path)
else:
matches.append(test_path)
# remove .svn/.git/etc
to_prune = [x for x in dirs if x.startswith('.')]
for x in to_prune:
dirs.remove(x)
else: # UNIX
for p, dirs, files in os.walk(d, followlinks=True):
if resource_name in files:
test_path = os.path.join(p, resource_name)
if filter_fn is not None:
if filter_fn(test_path):
matches.append(test_path)
else:
matches.append(test_path)
# remove .svn/.git/etc
to_prune = [x for x in dirs if x.startswith('.')]
for x in to_prune:
dirs.remove(x)
return [os.path.abspath(m) for m in matches]
# TODO: this routine really belongs in rospkg, but the catkin-isms really, really don't
# belong in rospkg. With more thought, they can probably be abstracted out so as
# to no longer be catkin-specific.
def find_resource(pkg, resource_name, filter_fn=None, rospack=None):
"""
Warning: unstable API due to catkin.
Locate the file named resource_name in package, optionally
matching specified filter. find_resource() will return a list of
matches, but only for a given scope. If the resource is found in
the binary build directory, it will only return matches in that
directory; it will not return matches from the ROS_PACKAGE_PATH as
well in this case.
:param filter: function that takes in a path argument and
returns True if the it matches the desired resource, ``fn(str)``
:param rospack: `rospkg.RosPack` instance to use
:returns: lists of matching paths for resource within a given scope, ``[str]``
:raises: :exc:`rospkg.ResourceNotFound` If package does not exist
"""
# New resource-location policy in Fuerte, induced by the new catkin
# build system:
# (1) Use catkin_find to find libexec and share locations, look
# recursively there. If the resource is found, done.
# Else continue:
# (2) If ROS_PACKAGE_PATH is set, look recursively there. If the
# resource is found, done. Else raise
#
# NOTE: package *must* exist on ROS_PACKAGE_PATH no matter what
if rospack is None:
rospack = rospkg.RosPack()
# lookup package as it *must* exist
pkg_path = rospack.get_path(pkg)
source_path_to_packages = rospack.get_custom_cache('source_path_to_packages', {})
# if found in binary dir, start with that. in any case, use matches
# from ros_package_path
matches = []
search_paths = catkin_find(
search_dirs=['libexec', 'share'], project=pkg, first_matching_workspace_only=True,
source_path_to_packages=source_path_to_packages)
# persist mapping of packages in rospack instance
if source_path_to_packages:
rospack.set_custom_cache('source_path_to_packages', source_path_to_packages)
for search_path in search_paths:
matches.extend(_find_resource(search_path, resource_name, filter_fn=filter_fn))
matches.extend(_find_resource(pkg_path, resource_name, filter_fn=filter_fn))
# Uniquify the results, in case we found the same file twice, while keeping order
unique_matches = []
for match in matches:
if match not in unique_matches:
unique_matches.append(match)
return unique_matches
|
import os
import sys
import inspect
def get_diamond_version():
try:
from diamond.version import __VERSION__
return __VERSION__
except ImportError:
return "Unknown"
def load_modules_from_path(path):
"""
Import all modules from the given directory
"""
# Check and fix the path
if path[-1:] != '/':
path += '/'
# Get a list of files in the directory, if the directory exists
if not os.path.exists(path):
raise OSError("Directory does not exist: %s" % path)
# Add path to the system path
sys.path.append(path)
# Load all the files in path
for f in os.listdir(path):
# Ignore anything that isn't a .py file
if len(f) > 3 and f[-3:] == '.py':
modname = f[:-3]
# Import the module
__import__(modname, globals(), locals(), ['*'])
def load_class_from_name(fqcn):
# Break apart fqcn to get module and classname
paths = fqcn.split('.')
modulename = '.'.join(paths[:-1])
classname = paths[-1]
# Import the module
__import__(modulename, globals(), locals(), ['*'])
# Get the class
cls = getattr(sys.modules[modulename], classname)
# Check cls
if not inspect.isclass(cls):
raise TypeError("%s is not a class" % fqcn)
# Return class
return cls
|
from urllib.parse import urlparse
import pywilight
from homeassistant.components import ssdp
from homeassistant.config_entries import CONN_CLASS_LOCAL_PUSH, ConfigFlow
from homeassistant.const import CONF_HOST
from .const import DOMAIN # pylint: disable=unused-import
CONF_SERIAL_NUMBER = "serial_number"
CONF_MODEL_NAME = "model_name"
WILIGHT_MANUFACTURER = "All Automacao Ltda"
# List the components supported by this integration.
ALLOWED_WILIGHT_COMPONENTS = ["light"]
class WiLightFlowHandler(ConfigFlow, domain=DOMAIN):
"""Handle a WiLight config flow."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the WiLight flow."""
self._host = None
self._serial_number = None
self._title = None
self._model_name = None
self._wilight_components = []
self._components_text = ""
def _wilight_update(self, host, serial_number, model_name):
self._host = host
self._serial_number = serial_number
self._title = f"WL{serial_number}"
self._model_name = model_name
self._wilight_components = pywilight.get_components_from_model(model_name)
self._components_text = ", ".join(self._wilight_components)
return self._components_text != ""
def _get_entry(self):
data = {
CONF_HOST: self._host,
CONF_SERIAL_NUMBER: self._serial_number,
CONF_MODEL_NAME: self._model_name,
}
return self.async_create_entry(title=self._title, data=data)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered WiLight."""
# Filter out basic information
if (
ssdp.ATTR_SSDP_LOCATION not in discovery_info
or ssdp.ATTR_UPNP_MANUFACTURER not in discovery_info
or ssdp.ATTR_UPNP_SERIAL not in discovery_info
or ssdp.ATTR_UPNP_MODEL_NAME not in discovery_info
or ssdp.ATTR_UPNP_MODEL_NUMBER not in discovery_info
):
return self.async_abort(reason="not_wilight_device")
# Filter out non-WiLight devices
if discovery_info[ssdp.ATTR_UPNP_MANUFACTURER] != WILIGHT_MANUFACTURER:
return self.async_abort(reason="not_wilight_device")
host = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION]).hostname
serial_number = discovery_info[ssdp.ATTR_UPNP_SERIAL]
model_name = discovery_info[ssdp.ATTR_UPNP_MODEL_NAME]
if not self._wilight_update(host, serial_number, model_name):
return self.async_abort(reason="not_wilight_device")
# Check if all components of this WiLight are allowed in this version of the HA integration
component_ok = all(
wilight_component in ALLOWED_WILIGHT_COMPONENTS
for wilight_component in self._wilight_components
)
if not component_ok:
return self.async_abort(reason="not_supported_device")
await self.async_set_unique_id(self._serial_number)
self._abort_if_unique_id_configured(updates={CONF_HOST: self._host})
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context["title_placeholders"] = {"name": self._title}
return await self.async_step_confirm()
async def async_step_confirm(self, user_input=None):
"""Handle user-confirmation of discovered WiLight."""
if user_input is not None:
return self._get_entry()
return self.async_show_form(
step_id="confirm",
description_placeholders={
"name": self._title,
"components": self._components_text,
},
errors={},
)
|
from __future__ import division
import numpy as np
import unittest
from chainer import testing
from chainercv.links.model.ssd import random_crop_with_bbox_constraints
from chainercv.utils import bbox_iou
from chainercv.utils import generate_random_bbox
class TestRandomCropWithBboxConstraints(unittest.TestCase):
def test_random_crop_with_bbox_constraints(self):
img = np.random.randint(0, 256, size=(3, 480, 640)).astype(np.float32)
bbox = generate_random_bbox(10, img.shape[1:], 0.1, 0.9)
out, param = random_crop_with_bbox_constraints(
img, bbox,
min_scale=0.3, max_scale=1,
max_aspect_ratio=2,
return_param=True)
if param['constraint'] is None:
np.testing.assert_equal(out, img)
else:
np.testing.assert_equal(
out, img[:, param['y_slice'], param['x_slice']])
# to ignore rounding error, add 1
self.assertGreaterEqual(
out.shape[0] * (out.shape[1] + 1) * (out.shape[2] + 1),
img.size * 0.3 * 0.3)
self.assertLessEqual(out.size, img.size * 1 * 1)
self.assertLessEqual(
out.shape[1] / (out.shape[2] + 1),
img.shape[1] / img.shape[2] * 2)
self.assertLessEqual(
out.shape[2] / (out.shape[1] + 1),
img.shape[2] / img.shape[1] * 2)
bb = np.array((
param['y_slice'].start, param['x_slice'].start,
param['y_slice'].stop, param['x_slice'].stop))
iou = bbox_iou(bb[np.newaxis], bbox)
min_iou, max_iou = param['constraint']
if min_iou:
self.assertGreaterEqual(iou.min(), min_iou)
if max_iou:
self.assertLessEqual(iou.max(), max_iou)
testing.run_module(__name__, __file__)
|
from pi4ioe5v9xxxx import pi4ioe5v9xxxx # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
CONF_PINS = "pins"
CONF_INVERT_LOGIC = "invert_logic"
CONF_I2CBUS = "i2c_bus"
CONF_I2CADDR = "i2c_address"
CONF_BITS = "bits"
DEFAULT_INVERT_LOGIC = False
DEFAULT_BITS = 24
DEFAULT_BUS = 1
DEFAULT_ADDR = 0x20
_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PINS): _SWITCHES_SCHEMA,
vol.Optional(CONF_I2CBUS, default=DEFAULT_BUS): cv.positive_int,
vol.Optional(CONF_I2CADDR, default=DEFAULT_ADDR): cv.positive_int,
vol.Optional(CONF_BITS, default=DEFAULT_BITS): cv.positive_int,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the swiches devices."""
pins = config.get(CONF_PINS)
switches = []
pi4ioe5v9xxxx.setup(
i2c_bus=config[CONF_I2CBUS],
i2c_addr=config[CONF_I2CADDR],
bits=config[CONF_BITS],
read_mode=False,
invert=False,
)
for pin, name in pins.items():
switches.append(Pi4ioe5v9Switch(name, pin, config[CONF_INVERT_LOGIC]))
add_entities(switches)
class Pi4ioe5v9Switch(SwitchEntity):
"""Representation of a pi4ioe5v9 IO expansion IO."""
def __init__(self, name, pin, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._pin = pin
self._invert_logic = invert_logic
self._state = not invert_logic
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
pi4ioe5v9xxxx.pin_to_memory(self._pin, not self._invert_logic)
pi4ioe5v9xxxx.memory_to_hw()
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
pi4ioe5v9xxxx.pin_to_memory(self._pin, self._invert_logic)
pi4ioe5v9xxxx.memory_to_hw()
self._state = False
self.schedule_update_ha_state()
|
import json
import pytest
from homeassistant.components import alexa
from homeassistant.components.alexa import intent
from homeassistant.const import CONTENT_TYPE_JSON
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
SESSION_ID = "amzn1.echo-api.session.0000000-0000-0000-0000-00000000000"
APPLICATION_ID = "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00ebe"
REQUEST_ID = "amzn1.echo-api.request.0000000-0000-0000-0000-00000000000"
AUTHORITY_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.ZODIAC"
BUILTIN_AUTH_ID = "amzn1.er-authority.000000-d0ed-0000-ad00-000000d00ebe.TEST"
# pylint: disable=invalid-name
calls = []
NPR_NEWS_MP3_URL = "https://pd.npr.org/anon.npr-mp3/npr/news/newscast.mp3"
@pytest.fixture
def alexa_client(loop, hass, hass_client):
"""Initialize a Home Assistant server for testing this module."""
@callback
def mock_service(call):
calls.append(call)
hass.services.async_register("test", "alexa", mock_service)
assert loop.run_until_complete(
async_setup_component(
hass,
alexa.DOMAIN,
{
# Key is here to verify we allow other keys in config too
"homeassistant": {},
"alexa": {},
},
)
)
assert loop.run_until_complete(
async_setup_component(
hass,
"intent_script",
{
"intent_script": {
"WhereAreWeIntent": {
"speech": {
"type": "plain",
"text": """
{%- if is_state("device_tracker.paulus", "home")
and is_state("device_tracker.anne_therese",
"home") -%}
You are both home, you silly
{%- else -%}
Anne Therese is at {{
states("device_tracker.anne_therese")
}} and Paulus is at {{
states("device_tracker.paulus")
}}
{% endif %}
""",
}
},
"GetZodiacHoroscopeIntent": {
"speech": {
"type": "plain",
"text": "You told us your sign is {{ ZodiacSign }}.",
}
},
"AMAZON.PlaybackAction<object@MusicCreativeWork>": {
"speech": {
"type": "plain",
"text": "Playing {{ object_byArtist_name }}.",
}
},
"CallServiceIntent": {
"speech": {
"type": "plain",
"text": "Service called for {{ ZodiacSign }}",
},
"card": {
"type": "simple",
"title": "Card title for {{ ZodiacSign }}",
"content": "Card content: {{ ZodiacSign }}",
},
"action": {
"service": "test.alexa",
"data_template": {"hello": "{{ ZodiacSign }}"},
"entity_id": "switch.test",
},
},
APPLICATION_ID: {
"speech": {
"type": "plain",
"text": "LaunchRequest has been received.",
}
},
}
},
)
)
return loop.run_until_complete(hass_client())
def _intent_req(client, data=None):
return client.post(
intent.INTENTS_API_ENDPOINT,
data=json.dumps(data or {}),
headers={"content-type": CONTENT_TYPE_JSON},
)
async def test_intent_launch_request(alexa_client):
"""Test the launch of a request."""
data = {
"version": "1.0",
"session": {
"new": True,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "LaunchRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "LaunchRequest has been received."
async def test_intent_launch_request_not_configured(alexa_client):
"""Test the launch of a request."""
data = {
"version": "1.0",
"session": {
"new": True,
"sessionId": SESSION_ID,
"application": {
"applicationId": "amzn1.echo-sdk-ams.app.000000-d0ed-0000-ad00-000000d00000"
},
"attributes": {},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "LaunchRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "This intent is not yet configured within Home Assistant."
async def test_intent_request_with_slots(alexa_client):
"""Test a request with slots."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}},
},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You told us your sign is virgo."
async def test_intent_request_with_slots_and_synonym_resolution(alexa_client):
"""Test a request with slots and a name synonym."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {
"ZodiacSign": {
"name": "ZodiacSign",
"value": "V zodiac",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": AUTHORITY_ID,
"status": {"code": "ER_SUCCESS_MATCH"},
"values": [{"value": {"name": "Virgo"}}],
},
{
"authority": BUILTIN_AUTH_ID,
"status": {"code": "ER_SUCCESS_NO_MATCH"},
"values": [{"value": {"name": "Test"}}],
},
]
},
}
},
},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You told us your sign is Virgo."
async def test_intent_request_with_slots_and_multi_synonym_resolution(alexa_client):
"""Test a request with slots and multiple name synonyms."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {
"ZodiacSign": {
"name": "ZodiacSign",
"value": "V zodiac",
"resolutions": {
"resolutionsPerAuthority": [
{
"authority": AUTHORITY_ID,
"status": {"code": "ER_SUCCESS_MATCH"},
"values": [{"value": {"name": "Virgo"}}],
},
{
"authority": BUILTIN_AUTH_ID,
"status": {"code": "ER_SUCCESS_MATCH"},
"values": [{"value": {"name": "Test"}}],
},
]
},
}
},
},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You told us your sign is V zodiac."
async def test_intent_request_with_slots_but_no_value(alexa_client):
"""Test a request with slots but no value."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "GetZodiacHoroscopeIntent",
"slots": {"ZodiacSign": {"name": "ZodiacSign"}},
},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You told us your sign is ."
async def test_intent_request_without_slots(hass, alexa_client):
"""Test a request without slots."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {"name": "WhereAreWeIntent"},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
json = await req.json()
text = json.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "Anne Therese is at unknown and Paulus is at unknown"
hass.states.async_set("device_tracker.paulus", "home")
hass.states.async_set("device_tracker.anne_therese", "home")
req = await _intent_req(alexa_client, data)
assert req.status == 200
json = await req.json()
text = json.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "You are both home, you silly"
async def test_intent_request_calling_service(alexa_client):
"""Test a request for calling a service."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "IntentRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"intent": {
"name": "CallServiceIntent",
"slots": {"ZodiacSign": {"name": "ZodiacSign", "value": "virgo"}},
},
},
}
call_count = len(calls)
req = await _intent_req(alexa_client, data)
assert req.status == 200
assert call_count + 1 == len(calls)
call = calls[-1]
assert call.domain == "test"
assert call.service == "alexa"
assert call.data.get("entity_id") == ["switch.test"]
assert call.data.get("hello") == "virgo"
data = await req.json()
assert data["response"]["card"]["title"] == "Card title for virgo"
assert data["response"]["card"]["content"] == "Card content: virgo"
assert data["response"]["outputSpeech"]["type"] == "PlainText"
assert data["response"]["outputSpeech"]["text"] == "Service called for virgo"
async def test_intent_session_ended_request(alexa_client):
"""Test the request for ending the session."""
data = {
"version": "1.0",
"session": {
"new": False,
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
"attributes": {
"supportedHoroscopePeriods": {
"daily": True,
"weekly": False,
"monthly": False,
}
},
"user": {"userId": "amzn1.account.AM3B00000000000000000000000"},
},
"request": {
"type": "SessionEndedRequest",
"requestId": REQUEST_ID,
"timestamp": "2015-05-13T12:34:56Z",
"reason": "USER_INITIATED",
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
text = await req.text()
assert text == ""
async def test_intent_from_built_in_intent_library(alexa_client):
"""Test intents from the Built-in Intent Library."""
data = {
"request": {
"intent": {
"name": "AMAZON.PlaybackAction<object@MusicCreativeWork>",
"slots": {
"object.byArtist.name": {
"name": "object.byArtist.name",
"value": "the shins",
},
"object.composer.name": {"name": "object.composer.name"},
"object.contentSource": {"name": "object.contentSource"},
"object.era": {"name": "object.era"},
"object.genre": {"name": "object.genre"},
"object.name": {"name": "object.name"},
"object.owner.name": {"name": "object.owner.name"},
"object.select": {"name": "object.select"},
"object.sort": {"name": "object.sort"},
"object.type": {"name": "object.type", "value": "music"},
},
},
"timestamp": "2016-12-14T23:23:37Z",
"type": "IntentRequest",
"requestId": REQUEST_ID,
},
"session": {
"sessionId": SESSION_ID,
"application": {"applicationId": APPLICATION_ID},
},
}
req = await _intent_req(alexa_client, data)
assert req.status == 200
data = await req.json()
text = data.get("response", {}).get("outputSpeech", {}).get("text")
assert text == "Playing the shins."
|
import os
from log import Log
import sys
import importlib
import importlib.machinery
class GcpModuleFinder(importlib.abc.MetaPathFinder):
_MODULES = ['google.cloud.bigquery', 'google.cloud.storage', 'google.cloud.automl_v1beta1']
_KAGGLE_GCP_PATH = 'kaggle_gcp.py'
def __init__(self):
pass
def _is_called_from_kaggle_gcp(self):
import inspect
for frame in inspect.stack():
if os.path.basename(frame.filename) == self._KAGGLE_GCP_PATH:
return True
return False
def find_spec(self, fullname, path, target=None):
if fullname in self._MODULES:
# If being called from kaggle_gcp, don't return our
# monkeypatched module to avoid circular dependency,
# since we call kaggle_gcp to load the module.
if self._is_called_from_kaggle_gcp():
return None
return importlib.machinery.ModuleSpec(fullname, GcpModuleLoader())
class GcpModuleLoader(importlib.abc.Loader):
def __init__(self):
pass
def create_module(self, spec):
"""Create the gcp module from the spec.
"""
import kaggle_gcp
_LOADERS = {
'google.cloud.bigquery': kaggle_gcp.init_bigquery,
'google.cloud.storage': kaggle_gcp.init_gcs,
'google.cloud.automl_v1beta1': kaggle_gcp.init_automl,
}
monkeypatch_gcp_module = _LOADERS[spec.name]()
return monkeypatch_gcp_module
def exec_module(self, module):
pass
if not hasattr(sys, 'frozen'):
sys.meta_path.insert(0, GcpModuleFinder())
|
import unittest
import copy
import logging
import numbers
import numpy as np
from gensim import matutils
from gensim.models import nmf
from gensim.test import basetmtests
from gensim.test.utils import datapath, get_tmpfile, common_corpus, common_dictionary
class TestNmf(unittest.TestCase, basetmtests.TestBaseTopicModel):
def setUp(self):
self.model = nmf.Nmf(
common_corpus,
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=100,
random_state=42,
)
def testGenerator(self):
model_1 = nmf.Nmf(
iter(common_corpus * 100),
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=1,
random_state=42,
)
model_2 = nmf.Nmf(
common_corpus * 100,
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=1,
random_state=42,
)
self.assertTrue(np.allclose(model_1.get_topics(), model_2.get_topics()))
def testUpdate(self):
model = copy.deepcopy(self.model)
model.update(common_corpus)
self.assertFalse(np.allclose(self.model.get_topics(), model.get_topics()))
def testRandomState(self):
model_1 = nmf.Nmf(
common_corpus,
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=100,
random_state=42,
)
model_2 = nmf.Nmf(
common_corpus,
id2word=common_dictionary,
chunksize=1,
num_topics=2,
passes=100,
random_state=0,
)
self.assertTrue(np.allclose(self.model.get_topics(), model_1.get_topics()))
self.assertFalse(np.allclose(self.model.get_topics(), model_2.get_topics()))
def testTransform(self):
# transform one document
doc = list(common_corpus)[0]
transformed = self.model[doc]
vec = matutils.sparse2full(transformed, 2) # convert to dense vector, for easier equality tests
expected = [0., 1.]
# must contain the same values, up to re-ordering
self.assertTrue(np.allclose(sorted(vec), sorted(expected)))
# transform one word
word = 5
transformed = self.model.get_term_topics(word)
vec = matutils.sparse2full(transformed, 2)
expected = [0.35023746, 0.64976251]
# must contain the same values, up to re-ordering
self.assertTrue(np.allclose(sorted(vec), sorted(expected), rtol=1e-3))
def testTopTopics(self):
top_topics = self.model.top_topics(common_corpus)
for topic, score in top_topics:
self.assertTrue(isinstance(topic, list))
self.assertTrue(isinstance(score, float))
for v, k in topic:
self.assertTrue(isinstance(k, str))
self.assertTrue(np.issubdtype(v, float))
def testGetTopicTerms(self):
topic_terms = self.model.get_topic_terms(1)
for k, v in topic_terms:
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(np.issubdtype(v, float))
def testGetDocumentTopics(self):
doc_topics = self.model.get_document_topics(common_corpus)
for topic in doc_topics:
self.assertTrue(isinstance(topic, list))
for k, v in topic:
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(np.issubdtype(v, float))
# Test case to use the get_document_topic function for the corpus
all_topics = self.model.get_document_topics(common_corpus)
print(list(all_topics))
for topic in all_topics:
self.assertTrue(isinstance(topic, list))
for k, v in topic: # list of doc_topics
self.assertTrue(isinstance(k, numbers.Integral))
self.assertTrue(np.issubdtype(v, float))
def testTermTopics(self):
# check with word_type
result = self.model.get_term_topics(2)
for topic_no, probability in result:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(np.issubdtype(probability, float))
# if user has entered word instead, check with word
result = self.model.get_term_topics(str(self.model.id2word[2]))
for topic_no, probability in result:
self.assertTrue(isinstance(topic_no, int))
self.assertTrue(np.issubdtype(probability, float))
def testPersistence(self):
fname = get_tmpfile('gensim_models_nmf.tst')
self.model.save(fname)
model2 = nmf.Nmf.load(fname)
tstvec = []
self.assertTrue(np.allclose(self.model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmap(self):
fname = get_tmpfile('gensim_models_nmf.tst')
# simulate storing large arrays separately
self.model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
model2 = nmf.Nmf.load(fname, mmap='r')
self.assertEqual(self.model.num_topics, model2.num_topics)
tstvec = []
self.assertTrue(np.allclose(self.model[tstvec], model2[tstvec])) # try projecting an empty vector
def testLargeMmapCompressed(self):
fname = get_tmpfile('gensim_models_nmf.tst.gz')
# simulate storing large arrays separately
self.model.save(fname, sep_limit=0)
# test loading the large model arrays with mmap
self.assertRaises(IOError, nmf.Nmf.load, fname, mmap='r')
def testDtypeBackwardCompatibility(self):
nmf_fname = datapath('nmf_model')
test_doc = [(0, 1), (1, 1), (2, 1)]
expected_topics = [(1, 1.0)]
# save model to use in test
# self.model.save(nmf_fname)
# load a model saved using the latest version of Gensim
model = nmf.Nmf.load(nmf_fname)
# and test it on a predefined document
topics = model[test_doc]
self.assertTrue(np.allclose(expected_topics, topics))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
unittest.main()
|
from homeassistant.components.rpi_power.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.core import HomeAssistant
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.async_mock import MagicMock
from tests.common import patch
MODULE = "homeassistant.components.rpi_power.config_flow.new_under_voltage"
async def test_setup(hass: HomeAssistant) -> None:
"""Test setting up manually."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
assert not result["errors"]
with patch(MODULE, return_value=MagicMock()):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
async def test_not_supported(hass: HomeAssistant) -> None:
"""Test setting up on not supported system."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
with patch(MODULE, return_value=None):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "no_devices_found"
async def test_onboarding(hass: HomeAssistant) -> None:
"""Test setting up via onboarding."""
with patch(MODULE, return_value=MagicMock()):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "onboarding"},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
async def test_onboarding_not_supported(hass: HomeAssistant) -> None:
"""Test setting up via onboarding with unsupported system."""
with patch(MODULE, return_value=None):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "onboarding"},
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "no_devices_found"
|
import asyncio
import logging
import secrets
import pyatmo
import voluptuous as vol
from homeassistant.components import cloud
from homeassistant.components.webhook import (
async_register as webhook_register,
async_unregister as webhook_unregister,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_WEBHOOK_ID,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import CoreState, HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow, config_validation as cv
from . import api, config_flow
from .const import (
AUTH,
CONF_CLOUDHOOK_URL,
DATA_CAMERAS,
DATA_DEVICE_IDS,
DATA_EVENTS,
DATA_HANDLER,
DATA_HOMES,
DATA_PERSONS,
DATA_SCHEDULES,
DOMAIN,
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
)
from .data_handler import NetatmoDataHandler
from .webhook import handle_webhook
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["camera", "climate", "sensor"]
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Netatmo component."""
hass.data[DOMAIN] = {}
hass.data[DOMAIN][DATA_PERSONS] = {}
hass.data[DOMAIN][DATA_DEVICE_IDS] = {}
hass.data[DOMAIN][DATA_SCHEDULES] = {}
hass.data[DOMAIN][DATA_HOMES] = {}
hass.data[DOMAIN][DATA_EVENTS] = {}
hass.data[DOMAIN][DATA_CAMERAS] = {}
if DOMAIN not in config:
return True
config_flow.NetatmoFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
OAUTH2_AUTHORIZE,
OAUTH2_TOKEN,
),
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Netatmo from a config entry."""
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
# Set unique id if non was set (migration)
if not entry.unique_id:
hass.config_entries.async_update_entry(entry, unique_id=DOMAIN)
hass.data[DOMAIN][entry.entry_id] = {
AUTH: api.ConfigEntryNetatmoAuth(hass, entry, implementation)
}
data_handler = NetatmoDataHandler(hass, entry)
await data_handler.async_setup()
hass.data[DOMAIN][entry.entry_id][DATA_HANDLER] = data_handler
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
async def unregister_webhook(_):
if CONF_WEBHOOK_ID not in entry.data:
return
_LOGGER.debug("Unregister Netatmo webhook (%s)", entry.data[CONF_WEBHOOK_ID])
webhook_unregister(hass, entry.data[CONF_WEBHOOK_ID])
async def register_webhook(event):
if CONF_WEBHOOK_ID not in entry.data:
data = {**entry.data, CONF_WEBHOOK_ID: secrets.token_hex()}
hass.config_entries.async_update_entry(entry, data=data)
if hass.components.cloud.async_active_subscription():
if CONF_CLOUDHOOK_URL not in entry.data:
webhook_url = await hass.components.cloud.async_create_cloudhook(
entry.data[CONF_WEBHOOK_ID]
)
data = {**entry.data, CONF_CLOUDHOOK_URL: webhook_url}
hass.config_entries.async_update_entry(entry, data=data)
else:
webhook_url = entry.data[CONF_CLOUDHOOK_URL]
else:
webhook_url = hass.components.webhook.async_generate_url(
entry.data[CONF_WEBHOOK_ID]
)
if entry.data["auth_implementation"] == "cloud" and not webhook_url.startswith(
"https://"
):
_LOGGER.warning(
"Webhook not registered - "
"https and port 443 is required to register the webhook"
)
return
try:
webhook_register(
hass, DOMAIN, "Netatmo", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
await hass.async_add_executor_job(
hass.data[DOMAIN][entry.entry_id][AUTH].addwebhook, webhook_url
)
_LOGGER.info("Register Netatmo webhook: %s", webhook_url)
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "light")
)
except pyatmo.ApiError as err:
_LOGGER.error("Error during webhook registration - %s", err)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, unregister_webhook)
if hass.state == CoreState.running:
await register_webhook(None)
else:
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, register_webhook)
hass.services.async_register(DOMAIN, "register_webhook", register_webhook)
hass.services.async_register(DOMAIN, "unregister_webhook", unregister_webhook)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
if CONF_WEBHOOK_ID in entry.data:
await hass.async_add_executor_job(
hass.data[DOMAIN][entry.entry_id][AUTH].dropwebhook
)
_LOGGER.info("Unregister Netatmo webhook.")
await hass.data[DOMAIN][entry.entry_id][DATA_HANDLER].async_cleanup()
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
async def async_remove_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Cleanup when entry is removed."""
if (
CONF_WEBHOOK_ID in entry.data
and hass.components.cloud.async_active_subscription()
):
try:
_LOGGER.debug(
"Removing Netatmo cloudhook (%s)", entry.data[CONF_WEBHOOK_ID]
)
await cloud.async_delete_cloudhook(hass, entry.data[CONF_WEBHOOK_ID])
except cloud.CloudNotAvailable:
pass
|
import numpy as np
def resize_point(point, in_size, out_size):
"""Adapt point coordinates to the rescaled image space.
Args:
point (~numpy.ndarray or list of arrays): See the table below.
in_size (tuple): A tuple of length 2. The height and the width
of the image before resized.
out_size (tuple): A tuple of length 2. The height and the width
of the image after resized.
.. csv-table::
:header: name, shape, dtype, format
:obj:`point`, ":math:`(R, K, 2)` or :math:`[(K, 2)]`", \
:obj:`float32`, ":math:`(y, x)`"
Returns:
~numpy.ndarray or list of arrays:
Points rescaled according to the given image shapes.
"""
y_scale = float(out_size[0]) / in_size[0]
x_scale = float(out_size[1]) / in_size[1]
if isinstance(point, np.ndarray):
out_point = point.copy()
out_point[:, :, 0] = y_scale * point[:, :, 0]
out_point[:, :, 1] = x_scale * point[:, :, 1]
else:
out_point = []
for pnt in point:
out_pnt = pnt.copy()
out_pnt[:, 0] = y_scale * pnt[:, 0]
out_pnt[:, 1] = x_scale * pnt[:, 1]
out_point.append(out_pnt)
return out_point
|
from datetime import timedelta
import logging
import async_timeout
from pyflick import FlickAPI, FlickPrice
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION, ATTR_FRIENDLY_NAME
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.util.dt import utcnow
from .const import ATTR_COMPONENTS, ATTR_END_AT, ATTR_START_AT, DOMAIN
_LOGGER = logging.getLogger(__name__)
_AUTH_URL = "https://api.flick.energy/identity/oauth/token"
_RESOURCE = "https://api.flick.energy/customer/mobile_provider/price"
SCAN_INTERVAL = timedelta(minutes=5)
ATTRIBUTION = "Data provided by Flick Electric"
FRIENDLY_NAME = "Flick Power Price"
UNIT_NAME = "cents"
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities
):
"""Flick Sensor Setup."""
api: FlickAPI = hass.data[DOMAIN][entry.entry_id]
async_add_entities([FlickPricingSensor(api)], True)
class FlickPricingSensor(Entity):
"""Entity object for Flick Electric sensor."""
def __init__(self, api: FlickAPI):
"""Entity object for Flick Electric sensor."""
self._api: FlickAPI = api
self._price: FlickPrice = None
self._attributes = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_FRIENDLY_NAME: FRIENDLY_NAME,
}
@property
def name(self):
"""Return the name of the sensor."""
return FRIENDLY_NAME
@property
def state(self):
"""Return the state of the sensor."""
return self._price.price
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return UNIT_NAME
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
async def async_update(self):
"""Get the Flick Pricing data from the web service."""
if self._price and self._price.end_at >= utcnow():
return # Power price data is still valid
with async_timeout.timeout(60):
self._price = await self._api.getPricing()
self._attributes[ATTR_START_AT] = self._price.start_at
self._attributes[ATTR_END_AT] = self._price.end_at
for component in self._price.components:
if component.charge_setter not in ATTR_COMPONENTS:
_LOGGER.warning("Found unknown component: %s", component.charge_setter)
continue
self._attributes[component.charge_setter] = float(component.value)
|
import pytest
from lemur.sources.views import * # noqa
from .vectors import (
VALID_ADMIN_API_TOKEN,
VALID_ADMIN_HEADER_TOKEN,
VALID_USER_HEADER_TOKEN,
WILDCARD_CERT_STR,
WILDCARD_CERT_KEY,
)
def validate_source_schema(client):
from lemur.sources.schemas import SourceInputSchema
input_data = {
"label": "exampleSource",
"options": {},
"plugin": {"slug": "aws-source"},
}
data, errors = SourceInputSchema().load(input_data)
assert not errors
def test_create_certificate(user, source):
from lemur.sources.service import certificate_create
with pytest.raises(Exception):
certificate_create({}, source)
data = {
"body": WILDCARD_CERT_STR,
"private_key": WILDCARD_CERT_KEY,
"owner": "[email protected]",
"creator": user["user"],
}
cert = certificate_create(data, source)
assert cert.notifications
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 404),
(VALID_ADMIN_HEADER_TOKEN, 404),
(VALID_ADMIN_API_TOKEN, 404),
("", 401),
],
)
def test_source_get(client, source_plugin, token, status):
assert (
client.get(api.url_for(Sources, source_id=43543), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_source_post_(client, token, status):
assert (
client.post(
api.url_for(Sources, source_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_source_put(client, token, status):
assert (
client.put(
api.url_for(Sources, source_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_source_delete(client, token, status):
assert (
client.delete(api.url_for(Sources, source_id=1), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_source_patch(client, token, status):
assert (
client.patch(
api.url_for(Sources, source_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_sources_list_get(client, source_plugin, token, status):
assert client.get(api.url_for(SourcesList), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_sources_list_post(client, token, status):
assert (
client.post(api.url_for(SourcesList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_sources_list_put(client, token, status):
assert (
client.put(api.url_for(SourcesList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_sources_list_delete(client, token, status):
assert client.delete(api.url_for(SourcesList), headers=token).status_code == status
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_sources_list_patch(client, token, status):
assert (
client.patch(api.url_for(SourcesList), data={}, headers=token).status_code
== status
)
|
import os
import sys
import unittest
import logging
import tempfile
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import requests
from stash import stash
from stash.system.shcommon import _STASH_ROOT, PY3
ON_TRAVIS = "TRAVIS" in os.environ
def network_is_available():
"""
Check whether the network is available.
:return: whether the network is available.
:rtype: bool
"""
# to be sure, test multiple sites in case one of them is offline
test_sites = [
"https://github.com/ywangd/stash/", # main StaSh repo
"https://forum.omz-software.com/", # pythonista forums
"https://python.org/", # python website
]
for url in test_sites:
try:
requests.get(url, timeout=5.0)
except (requests.ConnectionError, requests.Timeout):
# can not connect, retry.
continue
else:
# successfully connected.
return True
return False
def requires_network(f):
"""
Decorator for specifying that a test needs a network connection.
If no network connection is available, skip test.
:param f: test function
:type f: callable
:return: decorated function
:rtype: callable
"""
network_unavailable = (not network_is_available())
return unittest.skipIf(network_unavailable, "No network connection available.")(f)
def expected_failure_on_py3(f):
"""
Decorator for specifying that a test will probably fail on py3.
:param f: test function
:type f: callable
:return: decorated function
:rtype: callable
"""
if PY3:
return unittest.expectedFailure(f)
else:
return f
class StashTestCase(unittest.TestCase):
"""A test case implementing utility methods for testing StaSh"""
cwd = "$STASH_ROOT"
setup_commands = []
environment = {
"STASH_ROOT": _STASH_ROOT,
"TMP": tempfile.gettempdir(),
"TMPDIR": tempfile.gettempdir(),
}
maxDiff = 4096 # max diff size
def get_data_path(self):
"""return the data/ sibling path"""
curpath = os.path.dirname(sys.modules[self.__module__].__file__)
return os.path.abspath(os.path.join(curpath, "data"))
def setUp(self):
self.logger = logging.getLogger(self.__class__.__name__)
self.stash = stash.StaSh()
self.logger.debug(u"preparing environment...")
for kn in self.environment:
if kn not in os.environ:
v = self.environment[kn]
self.logger.debug(u"Setting $" + str(kn) + " to: " + repr(v))
os.environ[kn] = v
self.logger.debug(u"preparing sys.path...")
libpath = os.path.abspath(os.path.join(_STASH_ROOT, "lib"))
self.logger.debug(u"Enabling tracebacks...")
if libpath not in sys.path:
sys.path.append(libpath)
self.stash("stashconf py_traceback 1")
self.cwd = os.path.abspath(os.path.expandvars(self.cwd))
self.logger.info(u"Target CWD is: " + str(self.cwd))
self.stash('cd ' + self.cwd, persistent_level=1)
self.logger.debug(u"After cd, CWD is: " + os.getcwd())
for c in self.setup_commands:
self.logger.debug(u"executing setup command: " + repr(c))
self.stash(c, persistent_level=1)
self.stash('clear')
def tearDown(self):
assert self.stash.runtime.child_thread is None, u'child thread is not cleared'
assert len(self.stash.runtime.worker_registry) == 0, u'worker registry not empty'
del self.stash
def do_test(self, cmd, cmp_str, ensure_same_cwd=True, ensure_undefined=(), ensure_defined=(), exitcode=None):
saved_cwd = os.getcwd()
self.logger.info(u"executing {c} in {d}...".format(c=cmd, d=saved_cwd))
# 1 for mimicking running from console
worker = self.stash(cmd, persistent_level=1)
self.assertEqual(cmp_str, self.stash.main_screen.text, u'output not identical')
if exitcode is not None:
self.assertEqual(worker.state.return_value, exitcode, u"unexpected exitcode")
else:
self.logger.info(u"Exitcode: " + str(worker.state.return_value))
if ensure_same_cwd:
assert os.getcwd() == saved_cwd, 'cwd changed'
else:
if os.getcwd() != saved_cwd:
self.logger.warning(u"CWD changed from '{o}' to '{n}'!".format(o=saved_cwd, n=os.getcwd()))
for v in ensure_undefined:
assert v not in self.stash.runtime.state.environ.keys(), u'%s should be undefined' % v
for v in ensure_defined:
assert v in self.stash.runtime.state.environ.keys(), u'%s should be defined' % v
def run_command(self, command, exitcode=None):
"""
Run a command and return its output.
:param command: command to run
:type command: str
:param exitcode: expected exitcode, None to ignore
:type exitcode: int or None
:return: output of the command
:rtype: str
"""
# for debug purposes, locate script
try:
scriptname = command.split(" ")[0]
scriptfile = self.stash.runtime.find_script_file(scriptname)
self.logger.debug(u"Scriptfile for command: " + str(scriptfile))
except Exception as e:
self.logger.warning(u"Could not find script for command: " + repr(e))
# do NOT return here, script may be alias
outs = StringIO()
self.logger.info(u"Executing: " + repr(command))
worker = self.stash(
command,
persistent_level=1,
final_outs=outs,
final_errs=outs,
cwd=self.cwd
) # 1 for mimicking running from console
output = outs.getvalue()
returnvalue = worker.state.return_value
self.logger.debug(output)
self.logger.debug("Exitcode: " + str(returnvalue))
if exitcode is not None:
self.assertEqual(
returnvalue,
exitcode,
u"unexpected exitcode ({e} expected, got {g})\nOutput:\n{o}\n".format(e=exitcode,
g=returnvalue,
o=output),
)
return output
|
from __future__ import print_function
import sqlite3
import os
import cmd
import sys
class SqliteCMD(cmd.Cmd):
'''
Simple sqlite3 shell
'''
prompt = 'sqlite3>'
def __init__(self, db=None):
cmd.Cmd.__init__(self)
self.database = db or ':memory:'
self.separator = '|'
self.conn = sqlite3.connect(self.database)
self.conn.row_factory = sqlite3.Row
self.cur = self.conn.cursor()
self.commands = []
self.headers = True
self.output = sys.stdout
def preloop(self):
print('sqlite3 version %s' % sqlite3.sqlite_version)
print('.(dot) is used for all none sql commands.')
print('Use .help for non sqlite command list')
print('All sql commands must end with ;')
if self.database == ':memory:':
print('Using database :memory:\nuse .open ?file? to open a database')
else:
print('Using databasse: %s' % self.database)
def do_exit(self, *args):
'''Exit shell'''
return True
def emptyline(self):
pass
def command_list(self, command):
if ';' in command:
SqliteCMD.prompt = 'sqlite3>'
self.commands.append(command)
rtn = ' '.join(self.commands)
self.commands = []
return rtn
else:
self.commands.append(command)
SqliteCMD.prompt = '>>>'
return False
def display(self, line):
if self.output == sys.stdout:
print(line)
else:
with open(self.output, 'a+') as f:
f.write(line + '\n')
def do_output(self, line):
'''.output ?file?
Set output to a file default: stdout'''
self.output = sys.stdout if line == 'stdout' else line
def do_separator(self, separator):
"""Set the separator, default: |"""
self.separator = separator
def do_headers(self, state):
'''.headers ?on|off?
Turn headers on or off, default: on'''
self.headers = state.lower() == 'on'
def do_dump(self, line):
'''.dump ?table?
Dumps a database into a sql string
If table is specified, dump that table.
'''
try:
if not line:
for row in self.conn.iterdump():
self.display(row)
else:
conn = sqlite3.connect(':memory:')
cu = conn.cursor()
cu.execute("attach database '" + self.database + "' as attached_db")
cu.execute("select sql from attached_db.sqlite_master " "where type='table' and name='" + line + "'")
sql_create_table = cu.fetchone()[0]
cu.execute(sql_create_table)
cu.execute("insert into " + line + " select * from attached_db." + line)
conn.commit()
cu.execute("detach database attached_db")
self.display("\n".join(conn.iterdump()))
except:
print('Invalid table specified')
def do_backup(self, line):
'''.backup ?DB? FILE
Backup DB (default "main") to FILE'''
with open(self.detabase, 'rb') as f:
with open(line, 'wb') as new_db:
new_db.write(f.read())
def do_clone(self, line):
'''.clone NEWDB
Clone data into NEWDB from the existing database'''
if not os.path.isfile(line):
try:
conn = sqlite3.connect(line)
cur = conn.cursor()
cur.executescript('\n'.join(self.conn.iterdump()))
print("Switched to database: %s" % line)
self.conn = conn
self.cur = cur
except sqlite3.Error as e:
print('There was an error with the clone %s' % e.args[0])
def do_open(self, line):
''' .open ?FILENAME?
Close existing database and reopen FILENAME
'''
if line:
self.database = line
self.conn = sqlite3.connect(line)
self.conn.row_factory = sqlite3.Row
self.cur = self.conn.cursor()
def do_read(self, line):
''' .read FILENAME
Execute SQL in FILENAME
'''
if line:
if os.path.isfile(line):
with open(line, 'r') as f:
self.cur.executescript(f.read())
self.conn.commit()
def do_schema(self, line):
''' .schema ?TABLE?
Show the CREATE statements
If TABLE specified, only show tables matching
LIKE pattern TABLE.
'''
try:
res = self.cur.execute("SELECT * FROM sqlite_master ORDER BY name;")
if not line:
for row in res:
self.display(row['sql'])
else:
for row in res:
if row['tbl_name'] == line:
self.display(row['sql'])
except:
pass
def do_tables(self, line):
''' .tables
List names of tables
'''
res = self.cur.execute("SELECT * FROM sqlite_master ORDER BY name;")
self.display(' '.join([a['tbl_name'] for a in res]))
def onecmd(self, line):
"""Mostly ripped from Python's cmd.py"""
if line[:1] == '.':
cmd, arg, line = self.parseline(line[1:])
else:
cmd = None
if not line:
return self.emptyline()
if cmd is None:
return self.default(line)
self.lastcmd = line
if cmd == '':
return self.default(line)
else:
try:
func = getattr(self, 'do_' + cmd)
except AttributeError:
return self.default(line)
return func(arg)
def format_print(self, result):
if self.headers:
headers = [header[0] for header in self.cur.description]
self.display(self.separator.join(headers))
for field in result:
self.display(self.separator.join(str(x) for x in field))
def default(self, line):
try:
rtn = self.command_list(line)
if rtn:
self.cur.execute(rtn)
self.conn.commit()
if rtn.lstrip().upper().startswith('SELECT') or rtn.lstrip().upper().startswith('PRAGMA'):
self.format_print(self.cur.fetchall())
except sqlite3.Error as e:
print(e)
print('An Error occured:', e.args[0])
def do_EOF(self, line):
return True
if __name__ == '__main__':
#sqlitedb = SqliteCMD()
if len(sys.argv) == 2:
SqliteCMD(sys.argv[1]).cmdloop()
elif len(sys.argv) > 2:
SqliteCMD(sys.argv[1]).onecmd(sys.argv[2])
else:
SqliteCMD().cmdloop()
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from mesos import MesosCollector
##########################################################################
class TestMesosCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('MesosCollector', {})
self.collector = MesosCollector(config, None)
def test_import(self):
self.assertTrue(MesosCollector)
def test_import2(self):
self.assertTrue(self.collector.config['path'], 'mesos')
@patch.object(Collector, 'publish')
def test_should_work_for_master_with_real_data(self, publish_mock):
returns = self.getFixture('master_metrics_snapshot.json')
urlopen_mock = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: returns))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
# check how many fixtures were consumed
self.assertEqual(urlopen_mock.new.call_count, 1)
metrics = {
'master.elected': (1, 0),
"system.mem_free_bytes": (5663678464.1, 0),
"registrar.state_store_ms.p9999": (17.8412544, 6)
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_for_slave_with_real_data(self, publish_mock):
config = get_collector_config('MesosCollector', {'master': False})
self.collector = MesosCollector(config, None)
self.assertEqual(self.collector.master, False)
returns = [
self.getFixture('master_metrics_snapshot.json'),
self.getFixture('slave_metrics_state.json'),
self.getFixture('slave_monitor_statistics.json')
]
urlopen_mock = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: returns.pop(0)))
urlopen_mock.start()
self.collector.collect()
urlopen_mock.stop()
# check how many fixtures were consumed
self.assertEqual(urlopen_mock.new.call_count, 3)
metrics = {
'master.elected': 1,
'system.mem_free_bytes': 5663678464.1,
'registrar.state_store_ms.p9999': (17.8412544, 6),
'staged_tasks': 20,
'failed_tasks': 6,
'finished_tasks': 1,
'frameworks.marathon-0_7_6.executors.task_name.'
'09b6f20c-b6a9-11e4-99f6-fa163ef210c0.cpus_limit': (0.6, 1),
'frameworks.marathon-0_7_6.executors.task_name.'
'06247c78-b6a9-11e4-99f6-fa163ef210c0.cpus_limit': (1.1, 1),
'frameworks.marathon-0_7_6.executors.task_name.'
'cpus_limit': (1.7, 1),
'frameworks.marathon-0_7_6.executors.task_name.'
'instances_count': (2, 0),
'frameworks.marathon-0_7_6.executors.'
'com_domain_group_anotherApp.mem_mapped_file_bytes': 45056,
'frameworks.marathon-0_7_6.executors.task_name.'
'mem_percent': (0.19, 2)
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_compute_cpus_utilisation(self, publish_mock):
self.fixture_cpu_utilisation(publish_mock)
metrics = {
'frameworks.marathon-0_7_6.executors.task_name.'
'09b6f20c-b6a9-11e4-99f6-fa163ef210c0.cpus_utilisation': 0.25,
'frameworks.marathon-0_7_6.executors.task_name.'
'06247c78-b6a9-11e4-99f6-fa163ef210c0.cpus_utilisation': 0.25,
'frameworks.marathon-0_7_6.executors.task_name.'
'cpus_utilisation': 0.5,
}
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
patch_urlopen = patch('urllib2.urlopen', Mock(
return_value=self.getFixture('metrics_blank')))
patch_urlopen.start()
self.collector.collect()
patch_urlopen.stop()
self.assertPublishedMany(publish_mock, {})
@patch.object(Collector, 'publish')
def test_should_compute_cpus_percent(self, publish_mock):
self.fixture_cpu_utilisation(publish_mock)
self.assertPublished(
publish_mock,
'frameworks.marathon-0_7_6.executors.task_name.cpus_percent',
0.5/1.7)
def fixture_cpu_utilisation(self, publish_mock):
config = get_collector_config('MesosCollector', {'master': False})
self.collector = MesosCollector(config, None)
self.assertEqual(self.collector.master, False)
# we need 2 collect calls to see new metrics
returns = [
self.getFixture('master_metrics_snapshot.json'),
self.getFixture('slave_metrics_state.json'),
self.getFixture(
'slave_monitor_statistics_cpus_utilisation_next.json'),
self.getFixture('master_metrics_snapshot.json'),
self.getFixture('slave_metrics_state.json'),
self.getFixture('slave_monitor_statistics_cpus_utilisation.json'),
]
urlopen_mock = patch('urllib2.urlopen', Mock(
side_effect=lambda *args: returns.pop(0)))
urlopen_mock.start()
self.collector.collect()
publish_mock.reset_mock()
self.collector.collect()
urlopen_mock.stop()
def test_http(self):
self.collector.config['host'] = 'localhost'
self.assertEqual('http://localhost:5050/metrics/snapshot',
self.collector._get_url("metrics/snapshot"))
def test_https(self):
self.collector.config['host'] = 'https://localhost'
self.assertEqual('https://localhost:5050/metrics/snapshot',
self.collector._get_url("metrics/snapshot"))
def test_sum_statistics(self):
metrics_1 = {'cpu': 50, 'mem': 30, 'loadavg': 1}
metrics_2 = {'cpu': 10, 'mem': 30, 'network': 10}
self.assertEqual(self.collector._sum_statistics(metrics_1, metrics_2),
{'mem': 60, 'loadavg': 1, 'network': 10, 'cpu': 60})
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import asyncio
from collections import OrderedDict
import logging
import aiobotocore
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import ATTR_CREDENTIALS, CONF_NAME, CONF_PROFILE_NAME
from homeassistant.helpers import config_validation as cv, discovery
# Loading the config flow file will register the flow
from . import config_flow # noqa: F401
from .const import (
CONF_ACCESS_KEY_ID,
CONF_CONTEXT,
CONF_CREDENTIAL_NAME,
CONF_CREDENTIALS,
CONF_NOTIFY,
CONF_REGION,
CONF_SECRET_ACCESS_KEY,
CONF_SERVICE,
CONF_VALIDATE,
DATA_CONFIG,
DATA_HASS_CONFIG,
DATA_SESSIONS,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
AWS_CREDENTIAL_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_VALIDATE, default=True): cv.boolean,
}
)
DEFAULT_CREDENTIAL = [
{CONF_NAME: "default", CONF_PROFILE_NAME: "default", CONF_VALIDATE: False}
]
SUPPORTED_SERVICES = ["lambda", "sns", "sqs"]
NOTIFY_PLATFORM_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SERVICE): vol.All(
cv.string, vol.Lower, vol.In(SUPPORTED_SERVICES)
),
vol.Required(CONF_REGION): vol.All(cv.string, vol.Lower),
vol.Inclusive(CONF_ACCESS_KEY_ID, ATTR_CREDENTIALS): cv.string,
vol.Inclusive(CONF_SECRET_ACCESS_KEY, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_PROFILE_NAME, ATTR_CREDENTIALS): cv.string,
vol.Exclusive(CONF_CREDENTIAL_NAME, ATTR_CREDENTIALS): cv.string,
vol.Optional(CONF_CONTEXT): vol.Coerce(dict),
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_CREDENTIALS, default=DEFAULT_CREDENTIAL): vol.All(
cv.ensure_list, [AWS_CREDENTIAL_SCHEMA]
),
vol.Optional(CONF_NOTIFY, default=[]): vol.All(
cv.ensure_list, [NOTIFY_PLATFORM_SCHEMA]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up AWS component."""
hass.data[DATA_HASS_CONFIG] = config
conf = config.get(DOMAIN)
if conf is None:
# create a default conf using default profile
conf = CONFIG_SCHEMA({ATTR_CREDENTIALS: DEFAULT_CREDENTIAL})
hass.data[DATA_CONFIG] = conf
hass.data[DATA_SESSIONS] = OrderedDict()
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass, entry):
"""Load a config entry.
Validate and save sessions per aws credential.
"""
config = hass.data.get(DATA_HASS_CONFIG)
conf = hass.data.get(DATA_CONFIG)
if entry.source == config_entries.SOURCE_IMPORT:
if conf is None:
# user removed config from configuration.yaml, abort setup
hass.async_create_task(hass.config_entries.async_remove(entry.entry_id))
return False
if conf != entry.data:
# user changed config from configuration.yaml, use conf to setup
hass.config_entries.async_update_entry(entry, data=conf)
if conf is None:
conf = CONFIG_SCHEMA({DOMAIN: entry.data})[DOMAIN]
# validate credentials and create sessions
validation = True
tasks = []
for cred in conf[ATTR_CREDENTIALS]:
tasks.append(_validate_aws_credentials(hass, cred))
if tasks:
results = await asyncio.gather(*tasks, return_exceptions=True)
for index, result in enumerate(results):
name = conf[ATTR_CREDENTIALS][index][CONF_NAME]
if isinstance(result, Exception):
_LOGGER.error(
"Validating credential [%s] failed: %s",
name,
result,
exc_info=result,
)
validation = False
else:
hass.data[DATA_SESSIONS][name] = result
# set up notify platform, no entry support for notify component yet,
# have to use discovery to load platform.
for notify_config in conf[CONF_NOTIFY]:
hass.async_create_task(
discovery.async_load_platform(hass, "notify", DOMAIN, notify_config, config)
)
return validation
async def _validate_aws_credentials(hass, credential):
"""Validate AWS credential config."""
aws_config = credential.copy()
del aws_config[CONF_NAME]
del aws_config[CONF_VALIDATE]
profile = aws_config.get(CONF_PROFILE_NAME)
if profile is not None:
session = aiobotocore.AioSession(profile=profile)
del aws_config[CONF_PROFILE_NAME]
if CONF_ACCESS_KEY_ID in aws_config:
del aws_config[CONF_ACCESS_KEY_ID]
if CONF_SECRET_ACCESS_KEY in aws_config:
del aws_config[CONF_SECRET_ACCESS_KEY]
else:
session = aiobotocore.AioSession()
if credential[CONF_VALIDATE]:
async with session.create_client("iam", **aws_config) as client:
await client.get_user()
return session
|
from datetime import timedelta
from homeassistant.components.nuheat.const import DOMAIN
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from .mocks import (
_get_mock_nuheat,
_get_mock_thermostat_run,
_get_mock_thermostat_schedule_hold_available,
_get_mock_thermostat_schedule_hold_unavailable,
_get_mock_thermostat_schedule_temporary_hold,
_mock_get_config,
)
from tests.async_mock import patch
from tests.common import async_fire_time_changed
async def test_climate_thermostat_run(hass):
"""Test a thermostat with the schedule running."""
mock_thermostat = _get_mock_thermostat_run()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat",
return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.master_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 22.2,
"friendly_name": "Master bathroom",
"hvac_action": "heating",
"hvac_modes": ["auto", "heat"],
"max_temp": 69.4,
"min_temp": 5.0,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 22.2,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_hold_unavailable(hass):
"""Test a thermostat with the schedule hold that is offline."""
mock_thermostat = _get_mock_thermostat_schedule_hold_unavailable()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat",
return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.guest_bathroom")
assert state.state == "unavailable"
expected_attributes = {
"friendly_name": "Guest bathroom",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -6.1,
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_hold_available(hass):
"""Test a thermostat with the schedule hold that is online."""
mock_thermostat = _get_mock_thermostat_schedule_hold_available()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat",
return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.available_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 38.9,
"friendly_name": "Available bathroom",
"hvac_action": "idle",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -6.1,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 26.1,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
async def test_climate_thermostat_schedule_temporary_hold(hass):
"""Test a thermostat with the temporary schedule hold that is online."""
mock_thermostat = _get_mock_thermostat_schedule_temporary_hold()
mock_nuheat = _get_mock_nuheat(get_thermostat=mock_thermostat)
with patch(
"homeassistant.components.nuheat.nuheat.NuHeat",
return_value=mock_nuheat,
):
assert await async_setup_component(hass, DOMAIN, _mock_get_config())
await hass.async_block_till_done()
state = hass.states.get("climate.temp_bathroom")
assert state.state == "auto"
expected_attributes = {
"current_temperature": 94.4,
"friendly_name": "Temp bathroom",
"hvac_action": "idle",
"hvac_modes": ["auto", "heat"],
"max_temp": 180.6,
"min_temp": -0.6,
"preset_mode": "Run Schedule",
"preset_modes": ["Run Schedule", "Temporary Hold", "Permanent Hold"],
"supported_features": 17,
"temperature": 37.2,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(item in state.attributes.items() for item in expected_attributes.items())
await hass.services.async_call(
"climate",
"set_temperature",
service_data={ATTR_ENTITY_ID: "climate.temp_bathroom", "temperature": 90},
blocking=True,
)
await hass.async_block_till_done()
# opportunistic set
state = hass.states.get("climate.temp_bathroom")
assert state.attributes["preset_mode"] == "Temporary Hold"
assert state.attributes["temperature"] == 50.0
# and the api poll returns it to the mock
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(seconds=3))
await hass.async_block_till_done()
state = hass.states.get("climate.temp_bathroom")
assert state.attributes["preset_mode"] == "Run Schedule"
assert state.attributes["temperature"] == 37.2
|
from __future__ import absolute_import
from calendar import HTMLCalendar
from datetime import date
from django.urls import reverse
from django.utils.dates import MONTHS
from django.utils.dates import WEEKDAYS_ABBR
from django.utils.formats import date_format
from django.utils.formats import get_format
from zinnia.models.entry import Entry
AMERICAN_TO_EUROPEAN_WEEK_DAYS = [6, 0, 1, 2, 3, 4, 5]
class Calendar(HTMLCalendar):
"""
Extension of the HTMLCalendar.
"""
def __init__(self):
"""
Retrieve and convert the localized first week day
at initialization.
"""
HTMLCalendar.__init__(self, AMERICAN_TO_EUROPEAN_WEEK_DAYS[
get_format('FIRST_DAY_OF_WEEK')])
def formatday(self, day, weekday):
"""
Return a day as a table cell with a link
if entries are published this day.
"""
if day and day in self.day_entries:
day_date = date(self.current_year, self.current_month, day)
archive_day_url = reverse('zinnia:entry_archive_day',
args=[day_date.strftime('%Y'),
day_date.strftime('%m'),
day_date.strftime('%d')])
return '<td class="%s entry"><a href="%s" '\
'class="archives">%d</a></td>' % (
self.cssclasses[weekday], archive_day_url, day)
return super(Calendar, self).formatday(day, weekday)
def formatweekday(self, day):
"""
Return a weekday name translated as a table header.
"""
return '<th class="%s">%s</th>' % (self.cssclasses[day],
WEEKDAYS_ABBR[day].title())
def formatweekheader(self):
"""
Return a header for a week as a table row.
"""
return '<thead>%s</thead>' % super(Calendar, self).formatweekheader()
def formatfooter(self, previous_month, next_month):
"""
Return a footer for a previous and next month.
"""
footer = '<tfoot><tr>' \
'<td colspan="3" class="prev">%s</td>' \
'<td class="pad"> </td>' \
'<td colspan="3" class="next">%s</td>' \
'</tr></tfoot>'
if previous_month:
previous_content = '<a href="%s" class="previous-month">%s</a>' % (
reverse('zinnia:entry_archive_month', args=[
previous_month.strftime('%Y'),
previous_month.strftime('%m')]),
date_format(previous_month, 'YEAR_MONTH_FORMAT'))
else:
previous_content = ' '
if next_month:
next_content = '<a href="%s" class="next-month">%s</a>' % (
reverse('zinnia:entry_archive_month', args=[
next_month.strftime('%Y'),
next_month.strftime('%m')]),
date_format(next_month, 'YEAR_MONTH_FORMAT'))
else:
next_content = ' '
return footer % (previous_content, next_content)
def formatmonthname(self, theyear, themonth, withyear=True):
"""Return a month name translated as a table row."""
monthname = '%s %s' % (MONTHS[themonth].title(), theyear)
return '<caption>%s</caption>' % monthname
def formatmonth(self, theyear, themonth, withyear=True,
previous_month=None, next_month=None):
"""
Return a formatted month as a table
with new attributes computed for formatting a day,
and thead/tfooter.
"""
self.current_year = theyear
self.current_month = themonth
self.day_entries = [date.day
for date in Entry.published.filter(
publication_date__year=theyear,
publication_date__month=themonth
).datetimes('publication_date', 'day')]
v = []
a = v.append
a('<table class="%s">' % (
self.day_entries and 'entries-calendar' or 'no-entries-calendar'))
a('\n')
a(self.formatmonthname(theyear, themonth, withyear=withyear))
a('\n')
a(self.formatweekheader())
a('\n')
a(self.formatfooter(previous_month, next_month))
a('\n<tbody>\n')
for week in self.monthdays2calendar(theyear, themonth):
a(self.formatweek(week))
a('\n')
a('</tbody>\n</table>')
a('\n')
return ''.join(v)
|
class TemplateError(Exception):
"""Baseclass for all template errors."""
def __init__(self, message=None):
super().__init__(message)
@property
def message(self):
if self.args:
return self.args[0]
class TemplateNotFound(IOError, LookupError, TemplateError):
"""Raised if a template does not exist.
.. versionchanged:: 2.11
If the given name is :class:`Undefined` and no message was
provided, an :exc:`UndefinedError` is raised.
"""
# Silence the Python warning about message being deprecated since
# it's not valid here.
message = None
def __init__(self, name, message=None):
IOError.__init__(self, name)
if message is None:
from .runtime import Undefined
if isinstance(name, Undefined):
name._fail_with_undefined_error()
message = name
self.message = message
self.name = name
self.templates = [name]
def __str__(self):
return self.message
class TemplatesNotFound(TemplateNotFound):
"""Like :class:`TemplateNotFound` but raised if multiple templates
are selected. This is a subclass of :class:`TemplateNotFound`
exception, so just catching the base exception will catch both.
.. versionchanged:: 2.11
If a name in the list of names is :class:`Undefined`, a message
about it being undefined is shown rather than the empty string.
.. versionadded:: 2.2
"""
def __init__(self, names=(), message=None):
if message is None:
from .runtime import Undefined
parts = []
for name in names:
if isinstance(name, Undefined):
parts.append(name._undefined_message)
else:
parts.append(name)
message = "none of the templates given were found: " + ", ".join(
map(str, parts)
)
TemplateNotFound.__init__(self, names[-1] if names else None, message)
self.templates = list(names)
class TemplateSyntaxError(TemplateError):
"""Raised to tell the user that there is a problem with the template."""
def __init__(self, message, lineno, name=None, filename=None):
TemplateError.__init__(self, message)
self.lineno = lineno
self.name = name
self.filename = filename
self.source = None
# this is set to True if the debug.translate_syntax_error
# function translated the syntax error into a new traceback
self.translated = False
def __str__(self):
# for translated errors we only return the message
if self.translated:
return self.message
# otherwise attach some stuff
location = f"line {self.lineno}"
name = self.filename or self.name
if name:
location = f'File "{name}", {location}'
lines = [self.message, " " + location]
# if the source is set, add the line to the output
if self.source is not None:
try:
line = self.source.splitlines()[self.lineno - 1]
except IndexError:
line = None
if line:
lines.append(" " + line.strip())
return "\n".join(lines)
def __reduce__(self):
# https://bugs.python.org/issue1692335 Exceptions that take
# multiple required arguments have problems with pickling.
# Without this, raises TypeError: __init__() missing 1 required
# positional argument: 'lineno'
return self.__class__, (self.message, self.lineno, self.name, self.filename)
class TemplateAssertionError(TemplateSyntaxError):
"""Like a template syntax error, but covers cases where something in the
template caused an error at compile time that wasn't necessarily caused
by a syntax error. However it's a direct subclass of
:exc:`TemplateSyntaxError` and has the same attributes.
"""
class TemplateRuntimeError(TemplateError):
"""A generic runtime error in the template engine. Under some situations
Jinja may raise this exception.
"""
class UndefinedError(TemplateRuntimeError):
"""Raised if a template tries to operate on :class:`Undefined`."""
class SecurityError(TemplateRuntimeError):
"""Raised if a template tries to do something insecure if the
sandbox is enabled.
"""
class FilterArgumentError(TemplateRuntimeError):
"""This error is raised if a filter was called with inappropriate
arguments
"""
|
from django.contrib.auth.hashers import check_password
from django.core.exceptions import ValidationError
from django.utils.translation import gettext as _
from weblate.accounts.models import AuditLog
class CharsPasswordValidator:
"""Validate whether the password is not only whitespace or single char."""
def validate(self, password, user=None):
if not password:
return
if password.strip() == "":
raise ValidationError(
_("This password consists of only whitespace."),
code="password_whitespace",
)
if password.strip(password[0]) == "":
raise ValidationError(
_("This password is only a single character."),
code="password_same_chars",
)
def get_help_text(self):
return _(
"Your password can't consist of a " "single character or only whitespace."
)
class PastPasswordsValidator:
"""Validate whether the password was not used before."""
def validate(self, password, user=None):
if user is not None:
passwords = []
if user.has_usable_password():
passwords.append(user.password)
for log in AuditLog.objects.get_password(user=user):
if "password" in log.params:
passwords.append(log.params["password"])
for old in passwords:
if check_password(password, old):
raise ValidationError(
_("Can not reuse previously used password."),
code="password-past",
)
def get_help_text(self):
return _("Your password can't match a password " "you have used in the past.")
|
from ... import event
from .._widget import Widget, create_element
class Slider(Widget):
""" An input widget to select a value in a certain range.
The ``node`` of this widget is a
`<div> <https://developer.mozilla.org/docs/Web/HTML/Element/div>`_
containing a few HTML elements for rendering. It does not use
a ``<input type='range'>`` because of its different appearance and
behaviour accross browsers.
"""
DEFAULT_MIN_SIZE = 40, 20
CSS = """
.flx-Slider:focus {
outline: none;
}
.flx-Slider > .gutter {
box-sizing: border-box;
-webkit-user-select: none;
-moz-user-select: none;
-ms-user-select: none;
user-select: none;
margin: 0 5px; /* half width of slider */
position: absolute;
top: calc(50% - 2px);
height: 4px;
width: calc(100% - 10px);
border-radius: 6px;
background: rgba(0, 0, 0, 0.2);
color: rgba(0,0,0,0);
text-align: center;
transition: top 0.2s, height 0.2s;
}
.flx-Slider.flx-dragging > .gutter, .flx-Slider:focus > .gutter {
top: calc(50% - 10px);
height: 20px;
color: rgba(0,0,0,1);
}
.flx-Slider .slider, .flx-Slider .range {
box-sizing: border-box;
text-align: center;
border-radius: 3px;
background: #48c;
border: 2px solid #48c;
transition: top 0.2s, height 0.2s, background 0.4s;
position: absolute;
top: calc(50% - 8px);
height: 16px;
width: 10px;
}
.flx-Slider .range {
border-width: 1px 0px 1px 0px;
top: calc(50% - 4px);
height: 8px;
width: auto;
}
.flx-Slider.flx-dragging .slider, .flx-Slider:focus .slider,
.flx-Slider.flx-dragging .range, .flx-Slider:focus .range {
background: none;
top: calc(50% - 10px);
height: 20px;
}
.flx-Slider > .gutter > .slider.disabled {
background: #888;
border: none;
}
"""
step = event.FloatProp(0.01, settable=True, doc="""
The step size for the slider.
""")
min = event.FloatProp(0, settable=True, doc="""
The minimal slider value.
""")
max = event.FloatProp(1, settable=True, doc="""
The maximum slider value.
""")
value = event.FloatProp(0, settable=True, doc="""
The current slider value.
""")
text = event.StringProp('{value}', settable=True, doc="""
The label to display on the slider during dragging. Occurances of
"{percent}" are replaced with the current percentage, and
"{value}" with the current value. Default "{value}".
""")
disabled = event.BoolProp(False, settable=True, doc="""
Whether the slider is disabled.
""")
def init(self):
self._dragging = None
self._drag_target = 0
@event.emitter
def user_value(self, value):
""" Event emitted when the user manipulates the slider.
Has ``old_value`` and ``new_value`` attributes.
"""
d = {'old_value': self.value, 'new_value': value}
self.set_value(value)
return d
@event.emitter
def user_done(self):
""" Event emitted when the user stops manipulating the slider. Has
``old_value`` and ``new_value`` attributes (which have the same value).
"""
d = {'old_value': self.value, 'new_value': self.value}
return d
@event.action
def set_value(self, value):
global Math
value = max(self.min, value)
value = min(self.max, value)
value = Math.round(value / self.step) * self.step
self._mutate_value(value)
@event.reaction('min', 'max', 'step')
def __keep_value_constrained(self, *events):
self.set_value(self.value)
def _render_dom(self):
global Math
value = self.value
mi, ma = self.min, self.max
perc = 100 * (value - mi) / (ma - mi)
valuestr = str(value)
if '.' in valuestr and valuestr[-4:-1] == '000':
valuestr = valuestr[:-1].rstrip('0')
label = self.text
label = label.replace('{value}', valuestr)
label = label.replace('{percent}', Math.round(perc) + '%')
attr = {'className': 'slider disabled' if self.disabled else 'slider',
'style__left': 'calc(' + perc + '% - 5px)'
}
return [create_element('div', {'className': 'gutter'},
create_element('span', {}, label),
create_element('div', attr),
)
]
# Use the Flexx pointer event system, so we can make use of capturing ...
def _getgutter(self):
return self.node.children[0]
def _snap2handle(self, x):
# Snap to the slider handle
gutter = self._getgutter()
left = gutter.getBoundingClientRect().left + gutter.children[1].offsetLeft
if left <= x <= left + 10:
return x
else:
return left + 5 # center of the slider handle
@event.emitter
def pointer_down(self, e):
if not self.disabled:
e.stopPropagation()
x1 = e.changedTouches[0].clientX if e.changedTouches else e.clientX
x1 = self._snap2handle(x1)
self._dragging = self.value, x1
self.outernode.classList.add('flx-dragging')
else:
return super().pointer_down(e)
@event.emitter
def pointer_up(self, e):
if self._dragging is not None and len(self._dragging) == 3:
self.outernode.blur()
self._dragging = None
self._drag_target = 0
self.outernode.classList.remove('flx-dragging')
self.user_done()
return super().pointer_down(e)
@event.emitter
def pointer_move(self, e):
if self._dragging is not None:
e.stopPropagation()
ref_value, x1 = self._dragging[0], self._dragging[1]
self._dragging = ref_value, x1, True # mark as moved
x2 = e.changedTouches[0].clientX if e.changedTouches else e.clientX
mi, ma = self.min, self.max
value_diff = (x2 - x1) / self._getgutter().clientWidth * (ma - mi)
self.user_value(ref_value + value_diff)
else:
return super().pointer_move(e)
@event.reaction('key_down')
def __on_key(self, *events):
for ev in events:
value = self.value
if ev.key == 'Escape':
self.outernode.blur()
self.user_done()
elif ev.key == 'ArrowRight':
if isinstance(value, float):
self.user_value(value + self.step)
else:
self.user_value([v + self.step for v in value])
elif ev.key == 'ArrowLeft':
if isinstance(value, float):
self.user_value(value - self.step)
else:
self.user_value([v - self.step for v in value])
class RangeSlider(Slider):
"""An input widget to select a range (i.e having two handles instead of one).
The ``node`` of this widget is a
`<div> <https://developer.mozilla.org/docs/Web/HTML/Element/div>`_
containing a few HTML elements for rendering.
"""
value = event.FloatPairProp((0, 1), settable=True, doc="""
The current slider value as a two-tuple.
""")
@event.action
def set_value(self, *value):
""" Set the RangeSlider's value. Can be called using
``set_value([val1, val2])`` or ``set_value(val1, val2)``.
"""
global Math
if len(value) == 1 and isinstance(value[0], list):
value = value[0]
assert len(value) == 2, 'RangeSlider value must be a 2-tuple.'
value = min(value[0], value[1]), max(value[0], value[1])
for i in range(2):
value[i] = max(self.min, value[i])
value[i] = min(self.max, value[i])
value[i] = Math.round(value[i] / self.step) * self.step
self._mutate_value(value)
def _render_dom(self):
global Math
value1, value2 = self.value
mi, ma = self.min, self.max
perc1 = 100 * (value1 - mi) / (ma - mi)
perc2 = 100 * (value2 - mi) / (ma - mi)
valuestr1 = str(value1)
valuestr2 = str(value2)
if '.' in valuestr1 and valuestr1[-4:-1] == '000':
valuestr1 = valuestr1[:-1].rstrip('0')
elif '.' in valuestr2 and valuestr2[-4:-1] == '000':
valuestr2 = valuestr2[:-1].rstrip('0')
label = self.text
label = label.replace('{value}', valuestr1 + ' - ' + valuestr2)
label = label.replace('{percent}',
Math.round(perc1) + '% - ' + Math.round(perc2) + '%')
attr0 = {'className': 'range',
'style__left': perc1 + '%',
'style__right': (100 - perc2) + '%'
}
attr1 = {'className': 'slider disabled' if self.disabled else 'slider',
'style__left': 'calc(' + perc1 + '% - 5px)'
}
attr2 = {'className': 'slider disabled' if self.disabled else 'slider',
'style__left': 'calc(' + perc2 + '% - 5px)'
}
return [create_element('div', {'className': 'gutter'},
create_element('span', {}, label),
create_element('div', attr0),
create_element('div', attr1),
create_element('div', attr2),
)
]
def _snap2handle(self, x):
# Snap to a slider handle or the center
gutter = self._getgutter()
h1 = gutter.getBoundingClientRect().left + gutter.children[2].offsetLeft + 5
h2 = gutter.getBoundingClientRect().left + gutter.children[3].offsetLeft + 5
hc = 0.5 * (h1 + h2)
# Distances
d1, d2, dc = abs(x - h1), abs(x - h2), abs(x - hc)
# Decide
if dc < d1 and dc < d2:
self._drag_target = 3
return x
elif d1 < d2:
self._drag_target = 1
return h1
else:
self._drag_target = 2
return h2
@event.emitter
def pointer_move(self, e):
if self._dragging is not None:
e.stopPropagation()
ref_value, x1 = self._dragging[0], self._dragging[1]
self._dragging = ref_value, x1, True # mark as moved
x2 = e.changedTouches[0].clientX if e.changedTouches else e.clientX
mi, ma = self.min, self.max
value_diff = (x2 - x1) / self._getgutter().clientWidth * (ma - mi)
value1, value2 = ref_value
if 1 & self._drag_target:
value1 += value_diff
if 2 & self._drag_target:
value2 += value_diff
self.user_value((value1, value2))
else:
return super().pointer_move(e)
|
import logging
import subprocess
_LOGGER = logging.getLogger(__name__)
def call_shell_with_timeout(command, timeout, *, log_return_code=True):
"""Run a shell command with a timeout.
If log_return_code is set to False, it will not print an error if a non-zero
return code is returned.
"""
try:
_LOGGER.debug("Running command: %s", command)
subprocess.check_output(
command, shell=True, timeout=timeout # nosec # shell by design
)
return 0
except subprocess.CalledProcessError as proc_exception:
if log_return_code:
_LOGGER.error("Command failed: %s", command)
return proc_exception.returncode
except subprocess.TimeoutExpired:
_LOGGER.error("Timeout for command: %s", command)
return -1
except subprocess.SubprocessError:
_LOGGER.error("Error trying to exec command: %s", command)
return -1
def check_output_or_log(command, timeout):
"""Run a shell command with a timeout and return the output."""
try:
return_value = subprocess.check_output(
command, shell=True, timeout=timeout # nosec # shell by design
)
return return_value.strip().decode("utf-8")
except subprocess.CalledProcessError:
_LOGGER.error("Command failed: %s", command)
except subprocess.TimeoutExpired:
_LOGGER.error("Timeout for command: %s", command)
except subprocess.SubprocessError:
_LOGGER.error("Error trying to exec command: %s", command)
return None
|
import datetime
from gi.repository import GObject, Gtk
class CellRendererDate(Gtk.CellRendererText):
__gtype_name__ = "CellRendererDate"
#: We use negative 32-bit Unix timestamp to threshold our valid values
MIN_TIMESTAMP = -2147483648
DATETIME_FORMAT = "%a %d %b %Y %H:%M:%S"
def get_timestamp(self):
return getattr(self, '_datetime', self.MIN_TIMESTAMP)
def set_timestamp(self, value):
if value == self.get_timestamp():
return
if value <= self.MIN_TIMESTAMP:
time_str = ''
else:
try:
mod_datetime = datetime.datetime.fromtimestamp(value)
time_str = mod_datetime.strftime(self.DATETIME_FORMAT)
except Exception:
time_str = ''
self.props.markup = time_str
self._datetime = value
timestamp = GObject.Property(
type=float,
nick="Unix timestamp to display",
getter=get_timestamp,
setter=set_timestamp,
)
class CellRendererByteSize(Gtk.CellRendererText):
__gtype_name__ = "CellRendererByteSize"
def get_bytesize(self):
return getattr(self, '_bytesize', -1)
def set_bytesize(self, value):
if value == self.get_bytesize():
return
if value == -1:
byte_str = ''
else:
suffixes = (
'B', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB', 'ZB', 'YB'
)
size = float(value)
unit = 0
while size > 1000 and unit < len(suffixes) - 1:
size /= 1000
unit += 1
format_str = "%.1f %s" if unit > 0 else "%d %s"
byte_str = format_str % (size, suffixes[unit])
self.props.markup = byte_str
self._bytesize = value
bytesize = GObject.Property(
type=GObject.TYPE_INT64,
nick="Byte size to display",
getter=get_bytesize,
setter=set_bytesize,
)
class CellRendererFileMode(Gtk.CellRendererText):
__gtype_name__ = "CellRendererFileMode"
def get_file_mode(self):
return getattr(self, '_file_mode', -1)
def set_file_mode(self, value):
if value == self.get_file_mode():
return
if value == -1.0:
mode_str = ''
else:
perms = []
rwx = ((4, 'r'), (2, 'w'), (1, 'x'))
for group_index in (6, 3, 0):
group = value >> group_index & 7
perms.extend([p if group & i else '-' for i, p in rwx])
mode_str = "".join(perms)
self.props.markup = mode_str
self._file_mode = value
file_mode = GObject.Property(
type=int,
nick="Byte size to display",
getter=get_file_mode,
setter=set_file_mode,
)
|
from __future__ import print_function
import os
import sys
import fileinput
import argparse
def main(args):
ap = argparse.ArgumentParser()
ap.add_argument('files', nargs='*', help='files to sort')
ap.add_argument('-r', '--reverse', action='store_true', default=False, help='reverse the result of comparisons')
ns = ap.parse_args(args)
def _print(lines):
if lines is not None:
lines = sorted(lines)
if ns.reverse:
lines = lines[::-1]
print(''.join(lines))
fileinput.close() # in case it is not closed
try:
lines = None
for line in fileinput.input(ns.files, openhook=fileinput.hook_encoded("utf-8")):
if fileinput.isfirstline():
_print(lines)
lines = []
lines.append(line)
_print(lines)
finally:
fileinput.close()
if __name__ == '__main__':
main(sys.argv[1:])
|
import os
import time
from datetime import datetime
import pytz
from tzlocal import get_localzone as tzlocal_get_localzone
import paasta_tools.paastaapi.models as paastamodels
from paasta_tools.api import client
def get_localzone():
if "TZ" in os.environ:
return pytz.timezone(os.environ["TZ"])
else:
return tzlocal_get_localzone()
def print_paused_message(pause_time):
local_tz = get_localzone()
paused_readable = local_tz.localize(datetime.fromtimestamp(pause_time)).strftime(
"%F %H:%M:%S %Z"
)
print(f"Service autoscaler is paused until {paused_readable}")
def get_service_autoscale_pause_time(cluster):
api = client.get_paasta_oapi_client(cluster=cluster, http_res=True)
if not api:
print("Could not connect to paasta api. Maybe you misspelled the cluster?")
return 1
pause_time, status, _ = api.default.get_service_autoscaler_pause(
_return_http_data_only=False
)
if status == 500:
print("Could not connect to zookeeper server")
return 2
pause_time = float(pause_time)
if pause_time < time.time():
print("Service autoscaler is not paused")
else:
print_paused_message(pause_time)
return 0
def update_service_autoscale_pause_time(cluster, mins):
api = client.get_paasta_oapi_client(cluster=cluster, http_res=True)
if not api:
print("Could not connect to paasta api. Maybe you misspelled the cluster?")
return 1
res, status, _ = api.default.update_service_autoscaler_pause(
paastamodels.InlineObject(minutes=int(mins)), _return_http_data_only=False
)
if status == 500:
print("Could not connect to zookeeper server")
return 2
print(f"Service autoscaler is paused for {mins}")
return 0
def delete_service_autoscale_pause_time(cluster):
api = client.get_paasta_oapi_client(cluster=cluster, http_res=True)
if not api:
print("Could not connect to paasta api. Maybe you misspelled the cluster?")
return 1
res, status, _ = api.default.delete_service_autoscaler_pause(
_return_http_data_only=False
)
if status == 500:
print("Could not connect to zookeeper server")
return 2
print("Service autoscaler is unpaused")
return 0
|
import os
import re
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_hostname(host):
assert re.search(r'instance-[12]', host.check_output('hostname -s'))
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
@pytest.mark.skip(reason='Scenario tests not implemented yet')
def test_etc_molecule_ansible_hostname_file(host):
filename = '/etc/molecule/{}'.format(host.check_output('hostname -s'))
f = host.file(filename)
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
import pytest
import homeassistant.components.mfi.switch as mfi
import homeassistant.components.switch as switch_component
from homeassistant.setup import async_setup_component
import tests.async_mock as mock
PLATFORM = mfi
COMPONENT = switch_component
THING = "switch"
GOOD_CONFIG = {
"switch": {
"platform": "mfi",
"host": "foo",
"port": 6123,
"username": "user",
"password": "pass",
"ssl": True,
"verify_ssl": True,
}
}
async def test_setup_adds_proper_devices(hass):
"""Test if setup adds devices."""
with mock.patch(
"homeassistant.components.mfi.switch.MFiClient"
) as mock_client, mock.patch(
"homeassistant.components.mfi.switch.MfiSwitch"
) as mock_switch:
ports = {
i: mock.MagicMock(model=model) for i, model in enumerate(mfi.SWITCH_MODELS)
}
ports["bad"] = mock.MagicMock(model="notaswitch")
print(ports["bad"].model)
mock_client.return_value.get_devices.return_value = [
mock.MagicMock(ports=ports)
]
assert await async_setup_component(hass, COMPONENT.DOMAIN, GOOD_CONFIG)
await hass.async_block_till_done()
for ident, port in ports.items():
if ident != "bad":
mock_switch.assert_any_call(port)
assert mock.call(ports["bad"], hass) not in mock_switch.mock_calls
@pytest.fixture(name="port")
def port_fixture():
"""Port fixture."""
return mock.MagicMock()
@pytest.fixture(name="switch")
def switch_fixture(port):
"""Switch fixture."""
return mfi.MfiSwitch(port)
async def test_name(port, switch):
"""Test the name."""
assert port.label == switch.name
async def test_update(port, switch):
"""Test update."""
switch.update()
assert port.refresh.call_count == 1
assert port.refresh.call_args == mock.call()
async def test_update_with_target_state(port, switch):
"""Test update with target state."""
# pylint: disable=protected-access
switch._target_state = True
port.data = {}
port.data["output"] = "stale"
switch.update()
assert port.data["output"] == 1.0
# pylint: disable=protected-access
assert switch._target_state is None
port.data["output"] = "untouched"
switch.update()
assert port.data["output"] == "untouched"
async def test_turn_on(port, switch):
"""Test turn_on."""
switch.turn_on()
assert port.control.call_count == 1
assert port.control.call_args == mock.call(True)
# pylint: disable=protected-access
assert switch._target_state
async def test_turn_off(port, switch):
"""Test turn_off."""
switch.turn_off()
assert port.control.call_count == 1
assert port.control.call_args == mock.call(False)
# pylint: disable=protected-access
assert not switch._target_state
async def test_current_power_w(port, switch):
"""Test current power."""
port.data = {"active_pwr": 10}
assert switch.current_power_w == 10
async def test_current_power_w_no_data(port, switch):
"""Test current power if there is no data."""
port.data = {"notpower": 123}
assert switch.current_power_w == 0
async def test_device_state_attributes(port, switch):
"""Test the state attributes."""
port.data = {"v_rms": 1.25, "i_rms": 2.75}
assert switch.device_state_attributes == {"volts": 1.2, "amps": 2.8}
|
import gzip
import io
from unittest import mock
from http.client import IncompleteRead
from urllib.parse import quote as url_quote
import cherrypy
from cherrypy._cpcompat import ntob, ntou
from cherrypy.test import helper
europoundUnicode = ntou('£', encoding='utf-8')
sing = ntou('毛泽东: Sing, Little Birdie?', encoding='utf-8')
sing8 = sing.encode('utf-8')
sing16 = sing.encode('utf-16')
class EncodingTests(helper.CPWebCase):
@staticmethod
def setup_server():
class Root:
@cherrypy.expose
def index(self, param):
assert param == europoundUnicode, '%r != %r' % (
param, europoundUnicode)
yield europoundUnicode
@cherrypy.expose
def mao_zedong(self):
return sing
@cherrypy.expose
@cherrypy.config(**{'tools.encode.encoding': 'utf-8'})
def utf8(self):
return sing8
@cherrypy.expose
def cookies_and_headers(self):
# if the headers have non-ascii characters and a cookie has
# any part which is unicode (even ascii), the response
# should not fail.
cherrypy.response.cookie['candy'] = 'bar'
cherrypy.response.cookie['candy']['domain'] = 'cherrypy.org'
cherrypy.response.headers[
'Some-Header'] = 'My d\xc3\xb6g has fleas'
cherrypy.response.headers[
'Bytes-Header'] = b'Bytes given header'
return 'Any content'
@cherrypy.expose
def reqparams(self, *args, **kwargs):
return b', '.join(
[': '.join((k, v)).encode('utf8')
for k, v in sorted(cherrypy.request.params.items())]
)
@cherrypy.expose
@cherrypy.config(**{
'tools.encode.text_only': False,
'tools.encode.add_charset': True,
})
def nontext(self, *args, **kwargs):
cherrypy.response.headers[
'Content-Type'] = 'application/binary'
return '\x00\x01\x02\x03'
class GZIP:
@cherrypy.expose
def index(self):
yield 'Hello, world'
@cherrypy.expose
# Turn encoding off so the gzip tool is the one doing the collapse.
@cherrypy.config(**{'tools.encode.on': False})
def noshow(self):
# Test for ticket #147, where yield showed no exceptions
# (content-encoding was still gzip even though traceback
# wasn't zipped).
raise IndexError()
yield 'Here be dragons'
@cherrypy.expose
@cherrypy.config(**{'response.stream': True})
def noshow_stream(self):
# Test for ticket #147, where yield showed no exceptions
# (content-encoding was still gzip even though traceback
# wasn't zipped).
raise IndexError()
yield 'Here be dragons'
class Decode:
@cherrypy.expose
@cherrypy.config(**{
'tools.decode.on': True,
'tools.decode.default_encoding': ['utf-16'],
})
def extra_charset(self, *args, **kwargs):
return ', '.join([': '.join((k, v))
for k, v in cherrypy.request.params.items()])
@cherrypy.expose
@cherrypy.config(**{
'tools.decode.on': True,
'tools.decode.encoding': 'utf-16',
})
def force_charset(self, *args, **kwargs):
return ', '.join([': '.join((k, v))
for k, v in cherrypy.request.params.items()])
root = Root()
root.gzip = GZIP()
root.decode = Decode()
cherrypy.tree.mount(root, config={'/gzip': {'tools.gzip.on': True}})
def test_query_string_decoding(self):
URI_TMPL = '/reqparams?q={q}'
europoundUtf8_2_bytes = europoundUnicode.encode('utf-8')
europoundUtf8_2nd_byte = europoundUtf8_2_bytes[1:2]
# Encoded utf8 query strings MUST be parsed correctly.
# Here, q is the POUND SIGN U+00A3 encoded in utf8 and then %HEX
self.getPage(URI_TMPL.format(q=url_quote(europoundUtf8_2_bytes)))
# The return value will be encoded as utf8.
self.assertBody(b'q: ' + europoundUtf8_2_bytes)
# Query strings that are incorrectly encoded MUST raise 404.
# Here, q is the second byte of POUND SIGN U+A3 encoded in utf8
# and then %HEX
# TODO: check whether this shouldn't raise 400 Bad Request instead
self.getPage(URI_TMPL.format(q=url_quote(europoundUtf8_2nd_byte)))
self.assertStatus(404)
self.assertErrorPage(
404,
'The given query string could not be processed. Query '
"strings for this resource must be encoded with 'utf8'.")
def test_urlencoded_decoding(self):
# Test the decoding of an application/x-www-form-urlencoded entity.
europoundUtf8 = europoundUnicode.encode('utf-8')
body = b'param=' + europoundUtf8
self.getPage('/',
method='POST',
headers=[
('Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body),
self.assertBody(europoundUtf8)
# Encoded utf8 entities MUST be parsed and decoded correctly.
# Here, q is the POUND SIGN U+00A3 encoded in utf8
body = b'q=\xc2\xa3'
self.getPage('/reqparams', method='POST',
headers=[(
'Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body),
self.assertBody(b'q: \xc2\xa3')
# ...and in utf16, which is not in the default attempt_charsets list:
body = b'\xff\xfeq\x00=\xff\xfe\xa3\x00'
self.getPage('/reqparams',
method='POST',
headers=[
('Content-Type',
'application/x-www-form-urlencoded;charset=utf-16'),
('Content-Length', str(len(body))),
],
body=body),
self.assertBody(b'q: \xc2\xa3')
# Entities that are incorrectly encoded MUST raise 400.
# Here, q is the POUND SIGN U+00A3 encoded in utf16, but
# the Content-Type incorrectly labels it utf-8.
body = b'\xff\xfeq\x00=\xff\xfe\xa3\x00'
self.getPage('/reqparams',
method='POST',
headers=[
('Content-Type',
'application/x-www-form-urlencoded;charset=utf-8'),
('Content-Length', str(len(body))),
],
body=body),
self.assertStatus(400)
self.assertErrorPage(
400,
'The request entity could not be decoded. The following charsets '
"were attempted: ['utf-8']")
def test_decode_tool(self):
# An extra charset should be tried first, and succeed if it matches.
# Here, we add utf-16 as a charset and pass a utf-16 body.
body = b'\xff\xfeq\x00=\xff\xfe\xa3\x00'
self.getPage('/decode/extra_charset', method='POST',
headers=[(
'Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body),
self.assertBody(b'q: \xc2\xa3')
# An extra charset should be tried first, and continue to other default
# charsets if it doesn't match.
# Here, we add utf-16 as a charset but still pass a utf-8 body.
body = b'q=\xc2\xa3'
self.getPage('/decode/extra_charset', method='POST',
headers=[(
'Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body),
self.assertBody(b'q: \xc2\xa3')
# An extra charset should error if force is True and it doesn't match.
# Here, we force utf-16 as a charset but still pass a utf-8 body.
body = b'q=\xc2\xa3'
self.getPage('/decode/force_charset', method='POST',
headers=[(
'Content-Type', 'application/x-www-form-urlencoded'),
('Content-Length', str(len(body))),
],
body=body),
self.assertErrorPage(
400,
'The request entity could not be decoded. The following charsets '
"were attempted: ['utf-16']")
def test_multipart_decoding(self):
# Test the decoding of a multipart entity when the charset (utf16) is
# explicitly given.
body = ntob('\r\n'.join([
'--X',
'Content-Type: text/plain;charset=utf-16',
'Content-Disposition: form-data; name="text"',
'',
'\xff\xfea\x00b\x00\x1c c\x00',
'--X',
'Content-Type: text/plain;charset=utf-16',
'Content-Disposition: form-data; name="submit"',
'',
'\xff\xfeC\x00r\x00e\x00a\x00t\x00e\x00',
'--X--'
]))
self.getPage('/reqparams', method='POST',
headers=[(
'Content-Type', 'multipart/form-data;boundary=X'),
('Content-Length', str(len(body))),
],
body=body),
self.assertBody(b'submit: Create, text: ab\xe2\x80\x9cc')
@mock.patch('cherrypy._cpreqbody.Part.maxrambytes', 1)
def test_multipart_decoding_bigger_maxrambytes(self):
"""
Decoding of a multipart entity should also pass when
the entity is bigger than maxrambytes. See ticket #1352.
"""
self.test_multipart_decoding()
def test_multipart_decoding_no_charset(self):
# Test the decoding of a multipart entity when the charset (utf8) is
# NOT explicitly given, but is in the list of charsets to attempt.
body = ntob('\r\n'.join([
'--X',
'Content-Disposition: form-data; name="text"',
'',
'\xe2\x80\x9c',
'--X',
'Content-Disposition: form-data; name="submit"',
'',
'Create',
'--X--'
]))
self.getPage('/reqparams', method='POST',
headers=[(
'Content-Type', 'multipart/form-data;boundary=X'),
('Content-Length', str(len(body))),
],
body=body),
self.assertBody(b'submit: Create, text: \xe2\x80\x9c')
def test_multipart_decoding_no_successful_charset(self):
# Test the decoding of a multipart entity when the charset (utf16) is
# NOT explicitly given, and is NOT in the list of charsets to attempt.
body = ntob('\r\n'.join([
'--X',
'Content-Disposition: form-data; name="text"',
'',
'\xff\xfea\x00b\x00\x1c c\x00',
'--X',
'Content-Disposition: form-data; name="submit"',
'',
'\xff\xfeC\x00r\x00e\x00a\x00t\x00e\x00',
'--X--'
]))
self.getPage('/reqparams', method='POST',
headers=[(
'Content-Type', 'multipart/form-data;boundary=X'),
('Content-Length', str(len(body))),
],
body=body),
self.assertStatus(400)
self.assertErrorPage(
400,
'The request entity could not be decoded. The following charsets '
"were attempted: ['us-ascii', 'utf-8']")
def test_nontext(self):
self.getPage('/nontext')
self.assertHeader('Content-Type', 'application/binary;charset=utf-8')
self.assertBody('\x00\x01\x02\x03')
def testEncoding(self):
# Default encoding should be utf-8
self.getPage('/mao_zedong')
self.assertBody(sing8)
# Ask for utf-16.
self.getPage('/mao_zedong', [('Accept-Charset', 'utf-16')])
self.assertHeader('Content-Type', 'text/html;charset=utf-16')
self.assertBody(sing16)
# Ask for multiple encodings. ISO-8859-1 should fail, and utf-16
# should be produced.
self.getPage('/mao_zedong', [('Accept-Charset',
'iso-8859-1;q=1, utf-16;q=0.5')])
self.assertBody(sing16)
# The "*" value should default to our default_encoding, utf-8
self.getPage('/mao_zedong', [('Accept-Charset', '*;q=1, utf-7;q=.2')])
self.assertBody(sing8)
# Only allow iso-8859-1, which should fail and raise 406.
self.getPage('/mao_zedong', [('Accept-Charset', 'iso-8859-1, *;q=0')])
self.assertStatus('406 Not Acceptable')
self.assertInBody('Your client sent this Accept-Charset header: '
'iso-8859-1, *;q=0. We tried these charsets: '
'iso-8859-1.')
# Ask for x-mac-ce, which should be unknown. See ticket #569.
self.getPage('/mao_zedong', [('Accept-Charset',
'us-ascii, ISO-8859-1, x-mac-ce')])
self.assertStatus('406 Not Acceptable')
self.assertInBody('Your client sent this Accept-Charset header: '
'us-ascii, ISO-8859-1, x-mac-ce. We tried these '
'charsets: ISO-8859-1, us-ascii, x-mac-ce.')
# Test the 'encoding' arg to encode.
self.getPage('/utf8')
self.assertBody(sing8)
self.getPage('/utf8', [('Accept-Charset', 'us-ascii, ISO-8859-1')])
self.assertStatus('406 Not Acceptable')
# Test malformed quality value, which should raise 400.
self.getPage('/mao_zedong', [('Accept-Charset',
'ISO-8859-1,utf-8;q=0.7,*;q=0.7)')])
self.assertStatus('400 Bad Request')
def testGzip(self):
zbuf = io.BytesIO()
zfile = gzip.GzipFile(mode='wb', fileobj=zbuf, compresslevel=9)
zfile.write(b'Hello, world')
zfile.close()
self.getPage('/gzip/', headers=[('Accept-Encoding', 'gzip')])
self.assertInBody(zbuf.getvalue()[:3])
self.assertHeader('Vary', 'Accept-Encoding')
self.assertHeader('Content-Encoding', 'gzip')
# Test when gzip is denied.
self.getPage('/gzip/', headers=[('Accept-Encoding', 'identity')])
self.assertHeader('Vary', 'Accept-Encoding')
self.assertNoHeader('Content-Encoding')
self.assertBody('Hello, world')
self.getPage('/gzip/', headers=[('Accept-Encoding', 'gzip;q=0')])
self.assertHeader('Vary', 'Accept-Encoding')
self.assertNoHeader('Content-Encoding')
self.assertBody('Hello, world')
# Test that trailing comma doesn't cause IndexError
# Ref: https://github.com/cherrypy/cherrypy/issues/988
self.getPage('/gzip/', headers=[('Accept-Encoding', 'gzip,deflate,')])
self.assertStatus(200)
self.assertNotInBody('IndexError')
self.getPage('/gzip/', headers=[('Accept-Encoding', '*;q=0')])
self.assertStatus(406)
self.assertNoHeader('Content-Encoding')
self.assertErrorPage(406, 'identity, gzip')
# Test for ticket #147
self.getPage('/gzip/noshow', headers=[('Accept-Encoding', 'gzip')])
self.assertNoHeader('Content-Encoding')
self.assertStatus(500)
self.assertErrorPage(500, pattern='IndexError\n')
# In this case, there's nothing we can do to deliver a
# readable page, since 1) the gzip header is already set,
# and 2) we may have already written some of the body.
# The fix is to never stream yields when using gzip.
if (cherrypy.server.protocol_version == 'HTTP/1.0' or
getattr(cherrypy.server, 'using_apache', False)):
self.getPage('/gzip/noshow_stream',
headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertInBody('\x1f\x8b\x08\x00')
else:
# The wsgiserver will simply stop sending data, and the HTTP client
# will error due to an incomplete chunk-encoded stream.
self.assertRaises((ValueError, IncompleteRead), self.getPage,
'/gzip/noshow_stream',
headers=[('Accept-Encoding', 'gzip')])
def test_UnicodeHeaders(self):
self.getPage('/cookies_and_headers')
self.assertBody('Any content')
def test_BytesHeaders(self):
self.getPage('/cookies_and_headers')
self.assertBody('Any content')
self.assertHeader('Bytes-Header', 'Bytes given header')
|
import collections
import json
import logging
import threading
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import linux_virtual_machine
from perfkitbenchmarker import os_types
from perfkitbenchmarker import resource
from perfkitbenchmarker import virtual_machine
FLAGS = flags.FLAGS
flags.DEFINE_list('static_vm_tags', None,
'The tags of static VMs for PKB to run with. Even if other '
'VMs are specified in a config, if they aren\'t in this list '
'they will be skipped during VM creation.')
class StaticVmSpec(virtual_machine.BaseVmSpec):
"""Object containing all info needed to create a Static VM."""
CLOUD = 'Static'
def __init__(self, component_full_name, ip_address=None, user_name=None,
ssh_private_key=None, internal_ip=None, ssh_port=22,
password=None, disk_specs=None, os_type=None, tag=None,
zone=None, **kwargs):
"""Initialize the StaticVmSpec object.
Args:
component_full_name: string. Fully qualified name of the configurable
component containing the config options.
ip_address: The public ip address of the VM.
user_name: The username of the VM that the keyfile corresponds to.
ssh_private_key: The absolute path to the private keyfile to use to ssh
to the VM.
internal_ip: The internal ip address of the VM.
ssh_port: The port number to use for SSH and SCP commands.
password: The password used to log into the VM (Windows Only).
disk_specs: None or a list of dictionaries containing kwargs used to
create disk.BaseDiskSpecs.
os_type: The OS type of the VM. See the flag of the same name for more
information.
tag: A string that allows the VM to be included or excluded from a run
by using the 'static_vm_tags' flag.
zone: The VM's zone.
**kwargs: Other args for the superclass.
"""
super(StaticVmSpec, self).__init__(component_full_name, **kwargs)
self.ip_address = ip_address
self.user_name = user_name
self.ssh_private_key = ssh_private_key
self.internal_ip = internal_ip
self.ssh_port = ssh_port
self.password = password
self.os_type = os_type
self.tag = tag
self.zone = zone
self.disk_specs = [
disk.BaseDiskSpec(
'{0}.disk_specs[{1}]'.format(component_full_name, i),
flag_values=kwargs.get('flag_values'), **disk_spec)
for i, disk_spec in enumerate(disk_specs or ())]
class StaticDisk(disk.BaseDisk):
"""Object representing a static Disk."""
def _Create(self):
"""StaticDisks don't implement _Create()."""
pass
def _Delete(self):
"""StaticDisks don't implement _Delete()."""
pass
def Attach(self):
"""StaticDisks don't implement Attach()."""
pass
def Detach(self):
"""StaticDisks don't implement Detach()."""
pass
class StaticVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a Static Virtual Machine."""
CLOUD = 'Static'
is_static = True
vm_pool = collections.deque()
vm_pool_lock = threading.Lock()
def __init__(self, vm_spec):
"""Initialize a static virtual machine.
Args:
vm_spec: A StaticVmSpec object containing arguments.
"""
super(StaticVirtualMachine, self).__init__(vm_spec)
self.ip_address = vm_spec.ip_address
self.user_name = vm_spec.user_name
self.ssh_private_key = vm_spec.ssh_private_key
self.internal_ip = vm_spec.internal_ip
self.zone = self.zone or ('Static - %s@%s' % (self.user_name,
self.ip_address))
self.ssh_port = vm_spec.ssh_port
self.password = vm_spec.password
self.disk_specs = vm_spec.disk_specs
self.from_pool = False
def _Create(self):
"""StaticVirtualMachines do not implement _Create()."""
pass
def _Delete(self):
"""Returns the virtual machine to the pool."""
if self.from_pool:
with self.vm_pool_lock:
self.vm_pool.appendleft(self)
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
spec = self.disk_specs[len(self.scratch_disks)]
self.scratch_disks.append(StaticDisk(spec))
def DeleteScratchDisks(self):
"""StaticVirtualMachines do not delete scratch disks."""
pass
@classmethod
def ReadStaticVirtualMachineFile(cls, file_obj):
"""Read a file describing the static VMs to use.
This function will read the static VM information from the provided file,
instantiate VMs corresponding to the info, and add the VMs to the static
VM pool. The provided file should contain a single array in JSON-format.
Each element in the array must be an object with required format:
ip_address: string.
user_name: string.
keyfile_path: string.
ssh_port: integer, optional. Default 22
internal_ip: string, optional.
zone: string, optional.
local_disks: array of strings, optional.
scratch_disk_mountpoints: array of strings, optional
os_type: string, optional (see package_managers)
install_packages: bool, optional
Args:
file_obj: An open handle to a file containing the static VM info.
Raises:
ValueError: On missing required keys, or invalid keys.
"""
vm_arr = json.load(file_obj)
if not isinstance(vm_arr, list):
raise ValueError('Invalid static VM file. Expected array, got: %s.' %
type(vm_arr))
required_keys = frozenset(['ip_address', 'user_name'])
linux_required_keys = required_keys | frozenset(['keyfile_path'])
required_keys_by_os = {
os_types.WINDOWS: required_keys | frozenset(['password']),
os_types.DEBIAN: linux_required_keys,
os_types.RHEL: linux_required_keys,
os_types.CLEAR: linux_required_keys,
os_types.UBUNTU_CONTAINER: linux_required_keys,
}
# assume linux_required_keys for unknown os_type
required_keys = required_keys_by_os.get(FLAGS.os_type, linux_required_keys)
optional_keys = frozenset(['internal_ip', 'zone', 'local_disks',
'scratch_disk_mountpoints', 'os_type',
'ssh_port', 'install_packages'])
allowed_keys = required_keys | optional_keys
def VerifyItemFormat(item):
"""Verify that the decoded JSON object matches the required schema."""
item_keys = frozenset(item)
extra_keys = sorted(item_keys - allowed_keys)
missing_keys = required_keys - item_keys
if extra_keys:
raise ValueError('Unexpected keys: {0}'.format(', '.join(extra_keys)))
elif missing_keys:
raise ValueError('Missing required keys: {0}'.format(
', '.join(missing_keys)))
for item in vm_arr:
VerifyItemFormat(item)
ip_address = item['ip_address']
user_name = item['user_name']
keyfile_path = item.get('keyfile_path')
internal_ip = item.get('internal_ip')
zone = item.get('zone')
local_disks = item.get('local_disks', [])
password = item.get('password')
if not isinstance(local_disks, list):
raise ValueError('Expected a list of local disks, got: {0}'.format(
local_disks))
scratch_disk_mountpoints = item.get('scratch_disk_mountpoints', [])
if not isinstance(scratch_disk_mountpoints, list):
raise ValueError(
'Expected a list of disk mount points, got: {0}'.format(
scratch_disk_mountpoints))
ssh_port = item.get('ssh_port', 22)
os_type = item.get('os_type')
install_packages = item.get('install_packages', True)
if ((os_type == os_types.WINDOWS and FLAGS.os_type != os_types.WINDOWS) or
(os_type != os_types.WINDOWS and FLAGS.os_type == os_types.WINDOWS)):
raise ValueError('Please only use Windows VMs when using '
'--os_type=windows and vice versa.')
disk_kwargs_list = []
for path in scratch_disk_mountpoints:
disk_kwargs_list.append({'mount_point': path})
for local_disk in local_disks:
disk_kwargs_list.append({'device_path': local_disk})
vm_spec = StaticVmSpec(
'static_vm_file', ip_address=ip_address, user_name=user_name,
ssh_port=ssh_port, install_packages=install_packages,
ssh_private_key=keyfile_path, internal_ip=internal_ip, zone=zone,
disk_specs=disk_kwargs_list, password=password,
flag_values=flags.FLAGS)
vm_class = GetStaticVmClass(os_type)
vm = vm_class(vm_spec)
cls.vm_pool.append(vm)
@classmethod
def GetStaticVirtualMachine(cls):
"""Pull a Static VM from the pool of static VMs.
If there are no VMs left in the pool, the method will return None.
Returns:
A static VM from the pool, or None if there are no static VMs left.
"""
with cls.vm_pool_lock:
if cls.vm_pool:
vm = cls.vm_pool.popleft()
vm.from_pool = True
return vm
else:
return None
def GetStaticVmClass(os_type):
"""Returns the static VM class that corresponds to the os_type."""
if not os_type:
os_type = os_types.DEFAULT
logging.warning('Could not find os type for VM. Defaulting to %s.', os_type)
return resource.GetResourceClass(virtual_machine.BaseVirtualMachine,
CLOUD=StaticVirtualMachine.CLOUD,
OS_TYPE=os_type)
class Ubuntu1604BasedStaticVirtualMachine(
StaticVirtualMachine, linux_virtual_machine.Ubuntu1604Mixin):
pass
class Ubuntu1804BasedStaticVirtualMachine(
StaticVirtualMachine, linux_virtual_machine.Ubuntu1804Mixin):
pass
class Ubuntu2004BasedStaticVirtualMachine(
StaticVirtualMachine, linux_virtual_machine.Ubuntu2004Mixin):
pass
class ClearBasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.ClearMixin):
pass
class Rhel7BasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.Rhel7Mixin):
pass
class Rhel8BasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.Rhel8Mixin):
pass
class CentOs7BasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.CentOs7Mixin):
pass
class CentOs8BasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.CentOs8Mixin):
pass
class Debian9BasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.Debian9Mixin):
pass
class Debian10BasedStaticVirtualMachine(StaticVirtualMachine,
linux_virtual_machine.Debian10Mixin):
pass
|
import diamond.collector
import diamond.convertor
import os
from decimal import Decimal
try:
import psutil
psutil # workaround for pyflakes issue #13
except ImportError:
psutil = None
_KEY_MAPPING = [
'MemTotal',
'MemFree',
'MemAvailable', # needs kernel 3.14
'Buffers',
'Cached',
'Active',
'Dirty',
'Inactive',
'Shmem',
'SwapTotal',
'SwapFree',
'SwapCached',
'VmallocTotal',
'VmallocUsed',
'VmallocChunk',
'Committed_AS',
]
class MemoryCollector(diamond.collector.Collector):
PROC = '/proc/meminfo'
def get_default_config_help(self):
config_help = super(MemoryCollector, self).get_default_config_help()
config_help.update({
'detailed': 'Set to True to Collect all the nodes',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(MemoryCollector, self).get_default_config()
config.update({
'path': 'memory',
'method': 'Threaded',
'force_psutil': 'False'
# Collect all the nodes or just a few standard ones?
# Uncomment to enable
# 'detailed': 'True'
})
return config
def collect(self):
"""
Collect memory stats
"""
if ((os.access(self.PROC, os.R_OK) and
self.config.get('force_psutil') != 'True')):
file = open(self.PROC)
data = file.read()
file.close()
memory_total = None
memory_available = None
for line in data.splitlines():
try:
name, value, units = line.split()
name = name.rstrip(':')
value = int(value)
if ((name not in _KEY_MAPPING and
'detailed' not in self.config)):
continue
if name in 'MemTotal':
memory_total = value
elif name in 'MemAvailable':
memory_available = value
for unit in self.config['byte_unit']:
value = diamond.convertor.binary.convert(value=value,
oldUnit=units,
newUnit=unit)
self.publish(name, value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
except ValueError:
continue
if memory_total is not None and memory_available is not None:
memory_used = memory_total - memory_available
memory_used_percent = Decimal(str(100.0 *
memory_used /
memory_total))
self.publish('MemUsedPercentage',
round(memory_used_percent, 2),
metric_type='GAUGE')
return True
else:
if not psutil:
self.log.error('Unable to import psutil')
self.log.error('No memory metrics retrieved')
return None
# psutil.phymem_usage() and psutil.virtmem_usage() are deprecated.
if hasattr(psutil, "phymem_usage"):
phymem_usage = psutil.phymem_usage()
virtmem_usage = psutil.virtmem_usage()
else:
phymem_usage = psutil.virtual_memory()
virtmem_usage = psutil.swap_memory()
units = 'B'
for unit in self.config['byte_unit']:
memory_total = value = diamond.convertor.binary.convert(
value=phymem_usage.total, oldUnit=units, newUnit=unit)
self.publish('MemTotal', value, metric_type='GAUGE')
memory_available = value = diamond.convertor.binary.convert(
value=phymem_usage.available, oldUnit=units, newUnit=unit)
self.publish('MemAvailable', value, metric_type='GAUGE')
memory_used = memory_total - memory_available
memory_used_percent = Decimal(str(100.0 *
memory_used /
memory_total))
self.publish('MemUsedPercentage',
round(memory_used_percent, 2),
metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=phymem_usage.free, oldUnit=units, newUnit=unit)
self.publish('MemFree', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.total, oldUnit=units, newUnit=unit)
self.publish('SwapTotal', value, metric_type='GAUGE')
value = diamond.convertor.binary.convert(
value=virtmem_usage.free, oldUnit=units, newUnit=unit)
self.publish('SwapFree', value, metric_type='GAUGE')
# TODO: We only support one unit node here. Fix it!
break
return True
|
import os
import os.path as op
import numpy as np
from distutils.version import LooseVersion
from ...utils import (_fetch_file, verbose, _TempDir, _check_pandas_installed,
_on_missing)
from ..utils import _get_path
AGE_SLEEP_RECORDS = op.join(op.dirname(__file__), 'age_records.csv')
TEMAZEPAM_SLEEP_RECORDS = op.join(op.dirname(__file__),
'temazepam_records.csv')
TEMAZEPAM_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/ST-subjects.xls' # noqa: E501
TEMAZEPAM_RECORDS_URL_SHA1 = 'f52fffe5c18826a2bd4c5d5cb375bb4a9008c885'
AGE_RECORDS_URL = 'https://physionet.org/physiobank/database/sleep-edfx/SC-subjects.xls' # noqa: E501
AGE_RECORDS_URL_SHA1 = '0ba6650892c5d33a8e2b3f62ce1cc9f30438c54f'
sha1sums_fname = op.join(op.dirname(__file__), 'SHA1SUMS')
def _fetch_one(fname, hashsum, path, force_update, base_url):
# Fetch the file
url = base_url + '/' + fname
destination = op.join(path, fname)
if not op.isfile(destination) or force_update:
if op.isfile(destination):
os.remove(destination)
if not op.isdir(op.dirname(destination)):
os.makedirs(op.dirname(destination))
_fetch_file(url, destination, print_destination=False,
hash_=hashsum, hash_type='sha1')
return destination
@verbose
def _data_path(path=None, force_update=False, update_path=None, verbose=None):
"""Get path to local copy of EEG Physionet age Polysomnography dataset URL.
This is a low-level function useful for getting a local copy of a
remote Polysomnography dataset [1]_ which is available at PhysioNet [2]_.
Parameters
----------
path : None | str
Location of where to look for the data storing location.
If None, the environment variable or config parameter
``MNE_DATASETS_PHYSIONET_SLEEP_PATH`` is used. If it doesn't exist, the
"~/mne_data" directory is used. If the dataset
is not found under the given path, the data
will be automatically downloaded to the specified folder.
force_update : bool
Force update of the dataset even if a local copy exists.
update_path : bool | None
If True, set the MNE_DATASETS_PHYSIONET_SLEEP_PATH in mne-python
config to the given path. If None, the user is prompted.
%(verbose)s
Returns
-------
path : list of str
Local path to the given data file. This path is contained inside a list
of length one, for compatibility.
References
----------
.. [1] B Kemp, AH Zwinderman, B Tuk, HAC Kamphuisen, JJL Oberyé. Analysis of
a sleep-dependent neuronal feedback loop: the slow-wave microcontinuity
of the EEG. IEEE-BME 47(9):1185-1194 (2000).
.. [2] Goldberger AL, Amaral LAN, Glass L, Hausdorff JM, Ivanov PCh,
Mark RG, Mietus JE, Moody GB, Peng C-K, Stanley HE. (2000)
PhysioBank, PhysioToolkit, and PhysioNet: Components of a New
Research Resource for Complex Physiologic Signals.
Circulation 101(23):e215-e220
""" # noqa: E501
key = 'PHYSIONET_SLEEP_PATH'
name = 'PHYSIONET_SLEEP'
path = _get_path(path, key, name)
return op.join(path, 'physionet-sleep-data')
def _update_sleep_temazepam_records(fname=TEMAZEPAM_SLEEP_RECORDS):
"""Help function to download Physionet's temazepam dataset records."""
pd = _check_pandas_installed()
tmp = _TempDir()
# Download subjects info.
subjects_fname = op.join(tmp, 'ST-subjects.xls')
_fetch_file(url=TEMAZEPAM_RECORDS_URL,
file_name=subjects_fname,
hash_=TEMAZEPAM_RECORDS_URL_SHA1,
hash_type='sha1')
# Load and Massage the checksums.
sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None,
names=['sha', 'fname'], engine='python')
select_age_records = (sha1_df.fname.str.startswith('ST') &
sha1_df.fname.str.endswith('edf'))
sha1_df = sha1_df[select_age_records]
sha1_df['id'] = [name[:6] for name in sha1_df.fname]
# Load and massage the data.
data = pd.read_excel(subjects_fname, header=[0, 1])
if LooseVersion(pd.__version__) >= LooseVersion('0.24.0'):
data = data.set_index(('Subject - age - sex', 'Nr'))
data.index.name = 'subject'
data.columns.names = [None, None]
data = (data.set_index([('Subject - age - sex', 'Age'),
('Subject - age - sex', 'M1/F2')], append=True)
.stack(level=0).reset_index())
data = data.rename(columns={('Subject - age - sex', 'Age'): 'age',
('Subject - age - sex', 'M1/F2'): 'sex',
'level_3': 'drug'})
data['id'] = ['ST7{:02d}{:1d}'.format(s, n)
for s, n in zip(data.subject, data['night nr'])]
data = pd.merge(sha1_df, data, how='outer', on='id')
data['record type'] = (data.fname.str.split('-', expand=True)[1]
.str.split('.', expand=True)[0]
.astype('category'))
data = data.set_index(['id', 'subject', 'age', 'sex', 'drug',
'lights off', 'night nr', 'record type']).unstack()
data.columns = [l1 + '_' + l2 for l1, l2 in data.columns]
if LooseVersion(pd.__version__) < LooseVersion('0.21.0'):
data = data.reset_index().drop(labels=['id'], axis=1)
else:
data = data.reset_index().drop(columns=['id'])
data['sex'] = (data.sex.astype('category')
.cat.rename_categories({1: 'male', 2: 'female'}))
data['drug'] = data['drug'].str.split(expand=True)[0]
data['subject_orig'] = data['subject']
data['subject'] = data.index // 2 # to make sure index is from 0 to 21
# Save the data.
data.to_csv(fname, index=False)
def _update_sleep_age_records(fname=AGE_SLEEP_RECORDS):
"""Help function to download Physionet's age dataset records."""
pd = _check_pandas_installed()
tmp = _TempDir()
# Download subjects info.
subjects_fname = op.join(tmp, 'SC-subjects.xls')
_fetch_file(url=AGE_RECORDS_URL,
file_name=subjects_fname,
hash_=AGE_RECORDS_URL_SHA1,
hash_type='sha1')
# Load and Massage the checksums.
sha1_df = pd.read_csv(sha1sums_fname, sep=' ', header=None,
names=['sha', 'fname'], engine='python')
select_age_records = (sha1_df.fname.str.startswith('SC') &
sha1_df.fname.str.endswith('edf'))
sha1_df = sha1_df[select_age_records]
sha1_df['id'] = [name[:6] for name in sha1_df.fname]
# Load and massage the data.
data = pd.read_excel(subjects_fname)
data = data.rename(index=str, columns={'sex (F=1)': 'sex',
'LightsOff': 'lights off'})
data['sex'] = (data.sex.astype('category')
.cat.rename_categories({1: 'female', 2: 'male'}))
data['id'] = ['SC4{:02d}{:1d}'.format(s, n)
for s, n in zip(data.subject, data.night)]
data = data.set_index('id').join(sha1_df.set_index('id')).dropna()
data['record type'] = (data.fname.str.split('-', expand=True)[1]
.str.split('.', expand=True)[0]
.astype('category'))
if LooseVersion(pd.__version__) < LooseVersion('0.21.0'):
data = data.reset_index().drop(labels=['id'], axis=1)
else:
data = data.reset_index().drop(columns=['id'])
data = data[['subject', 'night', 'record type', 'age', 'sex', 'lights off',
'sha', 'fname']]
# Save the data.
data.to_csv(fname, index=False)
def _check_subjects(subjects, n_subjects, missing=None, on_missing='raise'):
"""Check whether subjects are available.
Parameters
----------
subjects : list
Subject numbers to be checked.
n_subjects : int
Number of subjects available.
missing : list | None
Subject numbers that are missing.
on_missing : 'raise' | 'warn' | 'ignore'
What to do if one or several subjects are not available. Valid keys
are 'raise' | 'warn' | 'ignore'. Default is 'error'. If on_missing
is 'warn' it will proceed but warn, if 'ignore' it will proceed
silently.
"""
valid_subjects = np.arange(n_subjects)
if missing is not None:
valid_subjects = np.setdiff1d(valid_subjects, missing)
unknown_subjects = np.setdiff1d(subjects, valid_subjects)
if unknown_subjects.size > 0:
subjects_list = ', '.join([str(s) for s in unknown_subjects])
msg = (f'This dataset contains subjects 0 to {n_subjects - 1} with '
f'missing subjects {missing}. Unknown subjects: '
f'{subjects_list}.')
_on_missing(on_missing, msg)
|
from homeassistant.components.light import SUPPORT_BRIGHTNESS, SUPPORT_COLOR
from tests.components.homekit_controller.common import (
Helper,
setup_accessories_from_file,
setup_test_accessories,
)
async def test_aqara_gateway_setup(hass):
"""Test that a Aqara Gateway can be correctly setup in HA."""
accessories = await setup_accessories_from_file(hass, "aqara_gateway.json")
config_entry, pairing = await setup_test_accessories(hass, accessories)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
# Check that the light is correctly found and set up
alarm_id = "alarm_control_panel.aqara_hub_1563"
alarm = entity_registry.async_get(alarm_id)
assert alarm.unique_id == "homekit-0000000123456789-66304"
alarm_helper = Helper(
hass,
"alarm_control_panel.aqara_hub_1563",
pairing,
accessories[0],
config_entry,
)
alarm_state = await alarm_helper.poll_and_get_state()
assert alarm_state.attributes["friendly_name"] == "Aqara Hub-1563"
# Check that the light is correctly found and set up
light = entity_registry.async_get("light.aqara_hub_1563")
assert light.unique_id == "homekit-0000000123456789-65792"
light_helper = Helper(
hass, "light.aqara_hub_1563", pairing, accessories[0], config_entry
)
light_state = await light_helper.poll_and_get_state()
assert light_state.attributes["friendly_name"] == "Aqara Hub-1563"
assert light_state.attributes["supported_features"] == (
SUPPORT_BRIGHTNESS | SUPPORT_COLOR
)
device_registry = await hass.helpers.device_registry.async_get_registry()
# All the entities are services of the same accessory
# So it looks at the protocol like a single physical device
assert alarm.device_id == light.device_id
device = device_registry.async_get(light.device_id)
assert device.manufacturer == "Aqara"
assert device.name == "Aqara Hub-1563"
assert device.model == "ZHWA11LM"
assert device.sw_version == "1.4.7"
assert device.via_device_id is None
|
import pycrfsuite
from flask import current_app as app
from app.nlu import spacy_tokenizer
class EntityExtractor:
"""
Performs NER training, prediction, model import/export
"""
def __init__(self, synonyms=[]):
self.synonyms = synonyms
def replace_synonyms(self, entities):
"""
replace extracted entity values with
root word by matching with synonyms dict.
:param entities:
:return:
"""
for entity in entities.keys():
entity_value = str(entities[entity])
if entity_value.lower() in self.synonyms:
entities[entity] = self.synonyms[entity_value.lower()]
return entities
def extract_features(self, sent, i):
"""
Extract features for a given sentence
:param sent:
:param i:
:return:
"""
word = sent[i][0]
postag = sent[i][1]
features = [
'bias',
'word.lower=' + word.lower(),
'word[-3:]=' + word[-3:],
'word[-2:]=' + word[-2:],
'word.isupper=%s' % word.isupper(),
'word.istitle=%s' % word.istitle(),
'word.isdigit=%s' % word.isdigit(),
'postag=' + postag,
'postag[:2]=' + postag[:2],
]
if i > 0:
word1 = sent[i - 1][0]
postag1 = sent[i - 1][1]
features.extend([
'-1:word.lower=' + word1.lower(),
'-1:word.istitle=%s' % word1.istitle(),
'-1:word.isupper=%s' % word1.isupper(),
'-1:postag=' + postag1,
'-1:postag[:2]=' + postag1[:2],
])
else:
features.append('BOS')
if i < len(sent) - 1:
word1 = sent[i + 1][0]
postag1 = sent[i + 1][1]
features.extend([
'+1:word.lower=' + word1.lower(),
'+1:word.istitle=%s' % word1.istitle(),
'+1:word.isupper=%s' % word1.isupper(),
'+1:postag=' + postag1,
'+1:postag[:2]=' + postag1[:2],
])
else:
features.append('EOS')
return features
def sent_to_features(self, sent):
"""
Extract features from training Data
:param sent:
:return:
"""
return [self.extract_features(sent, i) for i in range(len(sent))]
def sent_to_labels(self, sent):
"""
Extract labels from training data
:param sent:
:return:
"""
return [label for token, postag, label in sent]
def sent_to_tokens(self, sent):
"""
Extract tokens from training data
:param sent:
:return:
"""
return [token for token, postag, label in sent]
def train(self, train_sentences, model_name):
"""
Train NER model for given model
:param train_sentences:
:param model_name:
:return:
"""
features = [self.sent_to_features(s) for s in train_sentences]
labels = [self.sent_to_labels(s) for s in train_sentences]
trainer = pycrfsuite.Trainer(verbose=False)
for xseq, yseq in zip(features, labels):
trainer.append(xseq, yseq)
trainer.set_params({
'c1': 1.0, # coefficient for L1 penalty
'c2': 1e-3, # coefficient for L2 penalty
'max_iterations': 50, # stop earlier
# include transitions that are possible, but not observed
'feature.possible_transitions': True
})
trainer.train('model_files/%s.model' % model_name)
return True
# Extract Labels from BIO tagged sentence
def crf2json(self, tagged_sentence):
"""
Extract label-value pair from NER prediction output
:param tagged_sentence:
:return:
"""
labeled = {}
labels = set()
for s, tp in tagged_sentence:
if tp != "O":
label = tp[2:]
if tp.startswith("B"):
labeled[label] = s
labels.add(label)
elif tp.startswith("I") and (label in labels):
labeled[label] += " %s" % s
return labeled
def extract_ner_labels(self, predicted_labels):
"""
Extract name of labels from NER
:param predicted_labels:
:return:
"""
labels = []
for tp in predicted_labels:
if tp != "O":
labels.append(tp[2:])
return labels
def predict(self, model_name, sentence):
"""
Predict NER labels for given model and query
:param model_name:
:param sentence:
:return:
"""
from app.nlu.tasks import pos_tagger
doc = spacy_tokenizer(sentence)
words = [token.text for token in doc]
tagged_token = pos_tagger(sentence)
tagger = pycrfsuite.Tagger()
tagger.open("{}/{}.model".format(app.config["MODELS_DIR"], model_name))
predicted_labels = tagger.tag(self.sent_to_features(tagged_token))
extracted_entities = self.crf2json(
zip(words, predicted_labels))
return self.replace_synonyms(extracted_entities)
@staticmethod
def json2crf(training_data):
"""
Takes json annotated data and converts to
CRFSuite training data representation
:param training_data:
:return labeled_examples:
"""
from app.nlu.tasks import sentence_tokenize, pos_tag_and_label
labeled_examples = []
for example in training_data:
# POS tag and initialize bio label as 'O' for all the tokens
tagged_example = pos_tag_and_label(example.get("text"))
# find no of words before selection
for enitity in example.get("entities"):
try:
begin_index = enitity.get("begin")
end_index = enitity.get("end")
# find no of words before the entity
inverse_selection = example.get("text")[0:begin_index - 1]
inverse_selection = sentence_tokenize(inverse_selection)
inverse_selection = inverse_selection.split(" ")
inverse_word_count = len(inverse_selection)
# get the entity value from selection
selection = example.get("text")[begin_index:end_index]
tokens = sentence_tokenize(selection).split(" ")
selection_word_count = len(tokens)
# build BIO tagging
for i in range(1, selection_word_count + 1):
if i == 1:
bio = "B-" + enitity.get("name")
else:
bio = "I-" + enitity.get("name")
tagged_example[(inverse_word_count + i) - 1][2] = bio
except:
# catches and skips invalid offsets and annotation
continue
labeled_examples.append(tagged_example)
return labeled_examples
|
import logging
import threading
import datetime
import time
try:
import notify2
except ImportError:
notify2 = None
# TODO https://askubuntu.com/questions/110969/notify-send-ignores-timeout
NOTIFY_TIMEOUT = 4000
class BatteryNotifier(threading.Thread):
"""
Thread to notify about battery
"""
def __init__(self, parent, device_id, device_name):
super(BatteryNotifier, self).__init__()
self._logger = logging.getLogger('razer.device{0}.batterynotifier'.format(device_id))
self._notify2 = notify2 is not None
self.event = threading.Event()
self.frequency = 0
if self._notify2:
try:
notify2.init('openrazer_daemon')
except Exception as err:
self._logger.warning("Failed to init notification daemon, err: {0}".format(err))
self._notify2 = False
self._shutdown = False
self._device_name = device_name
# Could save reference to parent but only need battery level function
self._get_battery_func = parent.getBattery
if self._notify2:
self._notification = notify2.Notification(summary="{0}")
self._notification.set_timeout(NOTIFY_TIMEOUT)
self._last_notify_time = datetime.datetime(1970, 1, 1)
@property
def shutdown(self):
"""
Get the shutdown flag
"""
return self._shutdown
@shutdown.setter
def shutdown(self, value):
"""
Set the shutdown flag
:param value: Shutdown
:type value: bool
"""
self._shutdown = value
def notify_battery(self):
now = datetime.datetime.now()
if (now - self._last_notify_time).seconds > self.frequency:
# Update last notified
self._last_notify_time = now
battery_level = self._get_battery_func()
# Sometimes on wifi don't get batt
if battery_level == -1.0:
time.sleep(0.2)
battery_level = self._get_battery_func()
if battery_level < 10.0:
if self._notify2:
self._notification.update(summary="{0} Battery at {1:.1f}%".format(self._device_name, battery_level), message='Please charge your device', icon='notification-battery-low')
self._notification.show()
else:
if self._notify2:
self._notification.update(summary="{0} Battery at {1:.1f}%".format(self._device_name, battery_level))
self._notification.show()
if self._notify2:
self._logger.debug("{0} Battery at {1:.1f}%".format(self._device_name, battery_level))
def run(self):
"""
Main thread function
"""
while not self._shutdown:
if self.event.is_set() and self.frequency > 0:
self.notify_battery()
time.sleep(0.1)
self._logger.debug("Shutting down battery notifier")
class BatteryManager(object):
"""
Class which manages the overall process of notifing battery levels
"""
def __init__(self, parent, device_number, device_name):
self._logger = logging.getLogger('razer.device{0}.batterymanager'.format(device_number))
self._parent = parent
self._battery_thread = BatteryNotifier(parent, device_number, device_name)
self._battery_thread.start()
self._is_closed = False
def close(self):
"""
Close the manager, stop ripple thread
"""
if not self._is_closed:
self._logger.debug("Closing Battery Manager")
self._is_closed = True
self._battery_thread.shutdown = True
self._battery_thread.join(timeout=2)
if self._battery_thread.is_alive():
self._logger.error("Could not stop BatteryNotify thread")
def __del__(self):
self.close()
@property
def active(self):
return self._battery_thread.event.is_set()
@active.setter
def active(self, value):
if value:
self._battery_thread.event.set()
else:
self._battery_thread.event.clear()
@property
def frequency(self):
return self._battery_thread.frequency
@frequency.setter
def frequency(self, frequency):
self._battery_thread.frequency = frequency
|
from . import nodes
from .visitor import NodeTransformer
def optimize(node, environment):
"""The context hint can be used to perform an static optimization
based on the context given."""
optimizer = Optimizer(environment)
return optimizer.visit(node)
class Optimizer(NodeTransformer):
def __init__(self, environment):
self.environment = environment
def generic_visit(self, node, *args, **kwargs):
node = super().generic_visit(node, *args, **kwargs)
# Do constant folding. Some other nodes besides Expr have
# as_const, but folding them causes errors later on.
if isinstance(node, nodes.Expr):
try:
return nodes.Const.from_untrusted(
node.as_const(args[0] if args else None),
lineno=node.lineno,
environment=self.environment,
)
except nodes.Impossible:
pass
return node
|
from __future__ import absolute_import
import unittest
from .common_imports import etree, HelperTestCase, _bytes
from lxml.etree import PythonElementClassLookup
xml_str = _bytes('''\
<obj:root xmlns:obj="objectified" xmlns:other="otherNS">
<obj:c1 a1="A1" a2="A2" other:a3="A3">
<obj:c2>0</obj:c2>
<obj:c2>1</obj:c2>
<obj:c2>2</obj:c2>
<other:c2>3</other:c2>
<c2>3</c2>
</obj:c1>
</obj:root>''')
class PyClassLookupTestCase(HelperTestCase):
"""Test cases for the lxml.pyclasslookup class lookup mechanism.
"""
etree = etree
parser = etree.XMLParser()
Element = parser.makeelement
def tearDown(self):
self.parser.set_element_class_lookup(None)
super(PyClassLookupTestCase, self).tearDown()
def _setClassLookup(self, lookup_function):
class Lookup(PythonElementClassLookup):
def lookup(self, *args):
return lookup_function(*args)
self.parser.set_element_class_lookup( Lookup() )
def _buildElementClass(self):
class LocalElement(etree.ElementBase):
pass
return LocalElement
def XML(self, xml):
return self.etree.XML(xml, self.parser)
# --- Test cases
def test_lookup(self):
el_class = self._buildElementClass()
el_class.i = 1
def lookup(*args):
if el_class.i == 1:
el_class.i = 2
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertEqual(2, el_class.i)
def test_lookup_keep_ref_assertion(self):
el_class = self._buildElementClass()
el_class.EL = None
def lookup(doc, el):
if el_class.EL is None:
el_class.EL = el
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertNotEqual(None, el_class.EL)
self.assertRaises(ReferenceError, el_class.EL.getchildren)
def test_lookup_tag(self):
el_class = self._buildElementClass()
el_class.TAG = None
def lookup(doc, el):
if el_class.TAG is None:
el_class.TAG = el.tag
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertNotEqual(None, root.TAG)
self.assertEqual(root.tag, root.TAG)
def test_lookup_text(self):
el_class = self._buildElementClass()
el_class.TEXT = None
def lookup(doc, el):
if el_class.TEXT is None:
el_class.TEXT = el.text
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertNotEqual(None, root.TEXT)
self.assertEqual(root.text, root.TEXT)
def test_lookup_tail(self):
el_class = self._buildElementClass()
el_class.TAIL = None
def lookup(doc, el):
if el_class.TAIL is None:
el_class.TAIL = el.tail
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertEqual(root.tail, root.TAIL)
def test_lookup_attrib(self):
el_class = self._buildElementClass()
el_class.ATTRIB = None
def lookup(doc, el):
if el_class.ATTRIB is None:
el_class.ATTRIB = el[0].attrib
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
items1 = list(root[0].attrib.items())
items1.sort()
items2 = list(root.ATTRIB.items())
items2.sort()
self.assertEqual(items1, items2)
def test_lookup_prefix(self):
el_class = self._buildElementClass()
el_class.PREFIX = None
def lookup(doc, el):
if el_class.PREFIX is None:
el_class.PREFIX = el.prefix
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertEqual(root.prefix, root.PREFIX)
def test_lookup_sourceline(self):
el_class = self._buildElementClass()
el_class.LINE = None
def lookup(doc, el):
if el_class.LINE is None:
el_class.LINE = el.sourceline
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertEqual(root.sourceline, root.LINE)
def test_lookup_getitem(self):
el_class = self._buildElementClass()
el_class.CHILD_TAG = None
def lookup(doc, el):
el_class.CHILD_TAG = el[0].tag
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
child_tag = root.CHILD_TAG
self.assertNotEqual(None, child_tag)
self.assertEqual(root[0].tag, child_tag)
def test_lookup_getitem_neg(self):
el_class = self._buildElementClass()
el_class.CHILD_TAG = None
def lookup(doc, el):
if el_class.CHILD_TAG is None:
el_class.CHILD_TAG = el[-1].tag
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
child_tag = root.CHILD_TAG
self.assertNotEqual(None, child_tag)
self.assertEqual(root[-1].tag, child_tag)
def test_lookup_getslice(self):
el_class = self._buildElementClass()
el_class.CHILD_TAGS = None
def lookup(doc, el):
if el_class.CHILD_TAGS is None:
el_class.CHILD_TAGS = [ c.tag for c in el[1:-1] ]
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
child_tags = root.CHILD_TAGS
self.assertNotEqual(None, child_tags)
self.assertEqual([ c.tag for c in root[1:-1] ],
child_tags)
def test_lookup_len(self):
el_class = self._buildElementClass()
el_class.LEN = None
def lookup(doc, el):
if el_class.LEN is None:
el_class.LEN = len(el)
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertEqual(1, el_class.LEN)
def test_lookup_bool(self):
el_class = self._buildElementClass()
el_class.TRUE = None
def lookup(doc, el):
if el_class.TRUE is None:
el_class.TRUE = bool(el)
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertTrue(el_class.TRUE)
def test_lookup_get(self):
el_class = self._buildElementClass()
el_class.VAL = None
def lookup(doc, el):
if el_class.VAL is None:
el_class.VAL = el[0].get('a1')
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertNotEqual(None, el_class.VAL)
self.assertEqual(root[0].get('a1'), el_class.VAL)
def test_lookup_get_default(self):
el_class = self._buildElementClass()
default = str(id(el_class))
el_class.VAL = None
def lookup(doc, el):
if el_class.VAL is None:
el_class.VAL = el[0].get('unknownattribute', default)
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertEqual(default, el_class.VAL)
def test_lookup_getchildren(self):
el_class = self._buildElementClass()
el_class.CHILD_TAGS = None
def lookup(doc, el):
if el_class.CHILD_TAGS is None:
el_class.CHILD_TAGS = [ c.tag for c in el.getchildren() ]
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
child_tags = root.CHILD_TAGS
self.assertNotEqual(None, child_tags)
self.assertEqual([ c.tag for c in root.getchildren() ],
child_tags)
def test_lookup_iter_children(self):
el_class = self._buildElementClass()
el_class.CHILD_TAGS = None
def lookup(doc, el):
if el_class.CHILD_TAGS is None:
el_class.CHILD_TAGS = [ c.tag for c in el ]
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
child_tags = root.CHILD_TAGS
self.assertNotEqual(None, child_tags)
self.assertEqual([ c.tag for c in root.getchildren() ],
child_tags)
def test_lookup_iterchildren(self):
el_class = self._buildElementClass()
el_class.CHILD_TAGS = None
def lookup(doc, el):
if el_class.CHILD_TAGS is None:
el_class.CHILD_TAGS = [ c.tag for c in el.iterchildren() ]
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
child_tags = root.CHILD_TAGS
self.assertNotEqual(None, child_tags)
self.assertEqual([ c.tag for c in root.getchildren() ],
child_tags)
def test_lookup_iterchildren_tag(self):
el_class = self._buildElementClass()
el_class.CHILD_TAGS = None
def lookup(doc, el):
if not el_class.CHILD_TAGS:
el_class.CHILD_TAGS = [
c.tag for c in el.iterchildren(tag='{objectified}c2') ]
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
child_tags = root.CHILD_TAGS
self.assertNotEqual(None, child_tags)
self.assertEqual([], child_tags)
c1 = root[0]
child_tags = root.CHILD_TAGS
self.assertNotEqual(None, child_tags)
self.assertNotEqual([], child_tags)
self.assertEqual(
[ c.tag for c in root[0].iterchildren(tag='{objectified}c2') ],
child_tags)
def test_lookup_getparent(self):
el_class = self._buildElementClass()
el_class.PARENT = None
def lookup(doc, el):
if el_class.PARENT is None:
el_class.PARENT = el[0].getparent().tag
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertEqual(root.tag, root.PARENT)
def test_lookup_getnext(self):
el_class = self._buildElementClass()
el_class.NEXT = None
def lookup(doc, el):
if el_class.NEXT is None:
el_class.NEXT = el[0][1].getnext().tag
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertNotEqual(None, el_class.NEXT)
self.assertEqual(root[0][1].getnext().tag, el_class.NEXT)
def test_lookup_getprevious(self):
el_class = self._buildElementClass()
el_class.PREV = None
def lookup(doc, el):
if el_class.PREV is None:
el_class.PREV = el[0][1].getprevious().tag
return el_class
self._setClassLookup(lookup)
root = self.XML(xml_str)
self.assertNotEqual(None, el_class.PREV)
self.assertEqual(root[0][1].getprevious().tag, el_class.PREV)
def test_comments_fallback(self):
def return_none(*args):
return None
self._setClassLookup(return_none)
el = self.XML('<a><!-- hello world --></a>')
self.assertEqual(el[0].tag, self.etree.Comment)
self.assertEqual(el[0].text, " hello world ")
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(PyClassLookupTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
import asyncio
import codecs
import json
import logging
import re
import aiohttp
import async_timeout
from hangups import event, exceptions
logger = logging.getLogger(__name__)
Utf8IncrementalDecoder = codecs.getincrementaldecoder('utf-8')
LEN_REGEX = re.compile(r'([0-9]+)\n', re.MULTILINE)
CHANNEL_URL = 'https://0.client-channel.google.com/client-channel/channel/bind'
# Long-polling requests send heartbeats every 15-30 seconds, so if we miss two
# in a row, consider the connection dead.
PUSH_TIMEOUT = 60
MAX_READ_BYTES = 1024 * 1024
class ChannelSessionError(exceptions.HangupsError):
"""hangups channel session error"""
def _best_effort_decode(data_bytes):
"""Decode as much of data_bytes as possible as UTF-8."""
decoder = Utf8IncrementalDecoder()
return decoder.decode(data_bytes)
class ChunkParser:
"""Parse data from the backward channel into chunks.
Responses from the backward channel consist of a sequence of chunks which
are streamed to the client. Each chunk is prefixed with its length,
followed by a newline. The length allows the client to identify when the
entire chunk has been received.
"""
def __init__(self):
# Buffer for bytes containing utf-8 text:
self._buf = b''
def get_chunks(self, new_data_bytes):
"""Yield chunks generated from received data.
The buffer may not be decodable as UTF-8 if there's a split multi-byte
character at the end. To handle this, do a "best effort" decode of the
buffer to decode as much of it as possible.
The length is actually the length of the string as reported by
JavaScript. JavaScript's string length function returns the number of
code units in the string, represented in UTF-16. We can emulate this by
encoding everything in UTF-16 and multiplying the reported length by 2.
Note that when encoding a string in UTF-16, Python will prepend a
byte-order character, so we need to remove the first two bytes.
"""
self._buf += new_data_bytes
while True:
buf_decoded = _best_effort_decode(self._buf)
buf_utf16 = buf_decoded.encode('utf-16')[2:]
length_str_match = LEN_REGEX.match(buf_decoded)
if length_str_match is None:
break
else:
length_str = length_str_match.group(1)
# Both lengths are in number of bytes in UTF-16 encoding.
# The length of the submission:
length = int(length_str) * 2
# The length of the submission length and newline:
length_length = len((length_str + '\n').encode('utf-16')[2:])
if len(buf_utf16) - length_length < length:
break
submission = buf_utf16[length_length:length_length + length]
yield submission.decode('utf-16')
# Drop the length and the submission itself from the beginning
# of the buffer.
drop_length = (len((length_str + '\n').encode()) +
len(submission.decode('utf-16').encode()))
self._buf = self._buf[drop_length:]
def _parse_sid_response(res):
"""Parse response format for request for new channel SID.
Example format (after parsing JS):
[ [0,["c","SID_HERE","",8]],
[1,[{"gsid":"GSESSIONID_HERE"}]]]
Returns (SID, gsessionid) tuple.
"""
res = json.loads(list(ChunkParser().get_chunks(res))[0])
sid = res[0][1][1]
gsessionid = res[1][1][0]['gsid']
return (sid, gsessionid)
class Channel:
"""BrowserChannel client."""
##########################################################################
# Public methods
##########################################################################
def __init__(self, session, max_retries, retry_backoff_base):
"""Create a new channel.
Args:
session (http_utils.Session): Request session.
max_retries (int): Number of retries for long-polling request.
retry_backoff_base (int): The base term for the long-polling
exponential backoff.
"""
# Event fired when channel connects with arguments ():
self.on_connect = event.Event('Channel.on_connect')
# Event fired when channel reconnects with arguments ():
self.on_reconnect = event.Event('Channel.on_reconnect')
# Event fired when channel disconnects with arguments ():
self.on_disconnect = event.Event('Channel.on_disconnect')
# Event fired when an array is received with arguments (array):
self.on_receive_array = event.Event('Channel.on_receive_array')
self._max_retries = max_retries
self._retry_backoff_base = retry_backoff_base
# True if the channel is currently connected:
self._is_connected = False
# True if the on_connect event has been called at least once:
self._on_connect_called = False
# Parser for assembling messages:
self._chunk_parser = None
# Session for HTTP requests:
self._session = session
# Discovered parameters:
self._sid_param = None
self._gsessionid_param = None
@property
def is_connected(self):
"""Whether the channel is currently connected."""
return self._is_connected
async def listen(self):
"""Listen for messages on the backwards channel.
This method only returns when the connection has been closed due to an
error.
"""
retries = 0 # Number of retries attempted so far
need_new_sid = True # whether a new SID is needed
while retries <= self._max_retries:
# After the first failed retry, back off exponentially longer after
# each attempt.
if retries > 0:
backoff_seconds = self._retry_backoff_base ** retries
logger.info('Backing off for %s seconds', backoff_seconds)
await asyncio.sleep(backoff_seconds)
# Request a new SID if we don't have one yet, or the previous one
# became invalid.
if need_new_sid:
await self._fetch_channel_sid()
need_new_sid = False
# Clear any previous push data, since if there was an error it
# could contain garbage.
self._chunk_parser = ChunkParser()
try:
await self._longpoll_request()
except ChannelSessionError as err:
logger.warning('Long-polling interrupted: %s', err)
need_new_sid = True
except exceptions.NetworkError as err:
logger.warning('Long-polling request failed: %s', err)
else:
# The connection closed successfully, so reset the number of
# retries.
retries = 0
continue
retries += 1
logger.info('retry attempt count is now %s', retries)
if self._is_connected:
self._is_connected = False
await self.on_disconnect.fire()
# If the request ended with an error, the client must account for
# messages being dropped during this time.
logger.error('Ran out of retries for long-polling request')
async def send_maps(self, map_list):
"""Sends a request to the server containing maps (dicts)."""
params = {
'VER': 8, # channel protocol version
'RID': 81188, # request identifier
'ctype': 'hangouts', # client type
}
if self._gsessionid_param is not None:
params['gsessionid'] = self._gsessionid_param
if self._sid_param is not None:
params['SID'] = self._sid_param
data_dict = dict(count=len(map_list), ofs=0)
for map_num, map_ in enumerate(map_list):
for map_key, map_val in map_.items():
data_dict['req{}_{}'.format(map_num, map_key)] = map_val
res = await self._session.fetch(
'post', CHANNEL_URL, params=params, data=data_dict
)
return res
##########################################################################
# Private methods
##########################################################################
async def _fetch_channel_sid(self):
"""Creates a new channel for receiving push data.
Sending an empty forward channel request will create a new channel on
the server.
There's a separate API to get the gsessionid alone that Hangouts for
Chrome uses, but if we don't send a gsessionid with this request, it
will return a gsessionid as well as the SID.
Raises hangups.NetworkError if the channel can not be created.
"""
logger.info('Requesting new gsessionid and SID...')
# Set SID and gsessionid to None so they aren't sent in by send_maps.
self._sid_param = None
self._gsessionid_param = None
res = await self.send_maps([])
self._sid_param, self._gsessionid_param = _parse_sid_response(res.body)
logger.info('New SID: {}'.format(self._sid_param))
logger.info('New gsessionid: {}'.format(self._gsessionid_param))
async def _longpoll_request(self):
"""Open a long-polling request and receive arrays.
This method uses keep-alive to make re-opening the request faster, but
the remote server will set the "Connection: close" header once an hour.
Raises hangups.NetworkError or ChannelSessionError.
"""
params = {
'VER': 8, # channel protocol version
'gsessionid': self._gsessionid_param,
'RID': 'rpc', # request identifier
't': 1, # trial
'SID': self._sid_param, # session ID
'CI': 0, # 0 if streaming/chunked requests should be used
'ctype': 'hangouts', # client type
'TYPE': 'xmlhttp', # type of request
}
logger.info('Opening new long-polling request')
try:
async with self._session.fetch_raw('GET', CHANNEL_URL,
params=params) as res:
if res.status != 200:
if res.status == 400 and res.reason == 'Unknown SID':
raise ChannelSessionError('SID became invalid')
raise exceptions.NetworkError(
'Request return unexpected status: {}: {}'.format(
res.status, res.reason))
while True:
async with async_timeout.timeout(PUSH_TIMEOUT):
chunk = await res.content.read(MAX_READ_BYTES)
if not chunk:
break
await self._on_push_data(chunk)
except asyncio.TimeoutError:
raise exceptions.NetworkError('Request timed out')
except aiohttp.ServerDisconnectedError as err:
raise exceptions.NetworkError(
'Server disconnected error: %s' % err)
except aiohttp.ClientPayloadError:
raise ChannelSessionError('SID is about to expire')
except aiohttp.ClientError as err:
raise exceptions.NetworkError('Request connection error: %s' % err)
async def _on_push_data(self, data_bytes):
"""Parse push data and trigger events."""
logger.debug('Received chunk:\n{}'.format(data_bytes))
for chunk in self._chunk_parser.get_chunks(data_bytes):
# Consider the channel connected once the first chunk is received.
if not self._is_connected:
if self._on_connect_called:
self._is_connected = True
await self.on_reconnect.fire()
else:
self._on_connect_called = True
self._is_connected = True
await self.on_connect.fire()
# chunk contains a container array
container_array = json.loads(chunk)
# container array is an array of inner arrays
for inner_array in container_array:
# inner_array always contains 2 elements, the array_id and the
# data_array.
array_id, data_array = inner_array
logger.debug('Chunk contains data array with id %r:\n%r',
array_id, data_array)
await self.on_receive_array.fire(data_array)
|
import os
import sys
import time
import warnings
import contextlib
import portend
class Timeouts:
occupied = 5
free = 1
class ServerAdapter(object):
"""Adapter for an HTTP server.
If you need to start more than one HTTP server (to serve on multiple
ports, or protocols, etc.), you can manually register each one and then
start them all with bus.start::
s1 = ServerAdapter(bus, MyWSGIServer(host='0.0.0.0', port=80))
s2 = ServerAdapter(bus, another.HTTPServer(host='127.0.0.1', SSL=True))
s1.subscribe()
s2.subscribe()
bus.start()
"""
def __init__(self, bus, httpserver=None, bind_addr=None):
self.bus = bus
self.httpserver = httpserver
self.bind_addr = bind_addr
self.interrupt = None
self.running = False
def subscribe(self):
self.bus.subscribe('start', self.start)
self.bus.subscribe('stop', self.stop)
def unsubscribe(self):
self.bus.unsubscribe('start', self.start)
self.bus.unsubscribe('stop', self.stop)
def start(self):
"""Start the HTTP server."""
if self.running:
self.bus.log('Already serving on %s' % self.description)
return
self.interrupt = None
if not self.httpserver:
raise ValueError('No HTTP server has been created.')
if not os.environ.get('LISTEN_PID', None):
# Start the httpserver in a new thread.
if isinstance(self.bind_addr, tuple):
portend.free(*self.bind_addr, timeout=Timeouts.free)
import threading
t = threading.Thread(target=self._start_http_thread)
t.setName('HTTPServer ' + t.getName())
t.start()
self.wait()
self.running = True
self.bus.log('Serving on %s' % self.description)
start.priority = 75
@property
def description(self):
"""
A description about where this server is bound.
"""
if self.bind_addr is None:
on_what = 'unknown interface (dynamic?)'
elif isinstance(self.bind_addr, tuple):
on_what = self._get_base()
else:
on_what = 'socket file: %s' % self.bind_addr
return on_what
def _get_base(self):
if not self.httpserver:
return ''
host, port = self.bound_addr
if getattr(self.httpserver, 'ssl_adapter', None):
scheme = 'https'
if port != 443:
host += ':%s' % port
else:
scheme = 'http'
if port != 80:
host += ':%s' % port
return '%s://%s' % (scheme, host)
def _start_http_thread(self):
"""HTTP servers MUST be running in new threads, so that the
main thread persists to receive KeyboardInterrupt's. If an
exception is raised in the httpserver's thread then it's
trapped here, and the bus (and therefore our httpserver)
are shut down.
"""
try:
self.httpserver.start()
except KeyboardInterrupt:
self.bus.log('<Ctrl-C> hit: shutting down HTTP server')
self.interrupt = sys.exc_info()[1]
self.bus.exit()
except SystemExit:
self.bus.log('SystemExit raised: shutting down HTTP server')
self.interrupt = sys.exc_info()[1]
self.bus.exit()
raise
except Exception:
self.interrupt = sys.exc_info()[1]
self.bus.log('Error in HTTP server: shutting down',
traceback=True, level=40)
self.bus.exit()
raise
def wait(self):
"""Wait until the HTTP server is ready to receive requests."""
while not getattr(self.httpserver, 'ready', False):
if self.interrupt:
raise self.interrupt
time.sleep(.1)
# bypass check when LISTEN_PID is set
if os.environ.get('LISTEN_PID', None):
return
# bypass check when running via socket-activation
# (for socket-activation the port will be managed by systemd)
if not isinstance(self.bind_addr, tuple):
return
# wait for port to be occupied
with _safe_wait(*self.bound_addr):
portend.occupied(*self.bound_addr, timeout=Timeouts.occupied)
@property
def bound_addr(self):
"""
The bind address, or if it's an ephemeral port and the
socket has been bound, return the actual port bound.
"""
host, port = self.bind_addr
if port == 0 and self.httpserver.socket:
# Bound to ephemeral port. Get the actual port allocated.
port = self.httpserver.socket.getsockname()[1]
return host, port
def stop(self):
"""Stop the HTTP server."""
if self.running:
# stop() MUST block until the server is *truly* stopped.
self.httpserver.stop()
# Wait for the socket to be truly freed.
if isinstance(self.bind_addr, tuple):
portend.free(*self.bound_addr, timeout=Timeouts.free)
self.running = False
self.bus.log('HTTP Server %s shut down' % self.httpserver)
else:
self.bus.log('HTTP Server %s already shut down' % self.httpserver)
stop.priority = 25
def restart(self):
"""Restart the HTTP server."""
self.stop()
self.start()
class FlupCGIServer(object):
"""Adapter for a flup.server.cgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the CGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.cgi import WSGIServer
self.cgiserver = WSGIServer(*self.args, **self.kwargs)
self.ready = True
self.cgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
class FlupFCGIServer(object):
"""Adapter for a flup.server.fcgi.WSGIServer."""
def __init__(self, *args, **kwargs):
if kwargs.get('bindAddress', None) is None:
import socket
if not hasattr(socket, 'fromfd'):
raise ValueError(
'Dynamic FCGI server not available on this platform. '
'You must use a static or external one by providing a '
'legal bindAddress.')
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the FCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.fcgi import WSGIServer
self.fcgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.fcgiserver._installSignalHandlers = lambda: None
self.fcgiserver._oldSIGs = []
self.ready = True
self.fcgiserver.run()
def stop(self):
"""Stop the HTTP server."""
# Forcibly stop the fcgi server main event loop.
self.fcgiserver._keepGoing = False
# Force all worker threads to die off.
self.fcgiserver._threadPool.maxSpare = (
self.fcgiserver._threadPool._idleCount)
self.ready = False
class FlupSCGIServer(object):
"""Adapter for a flup.server.scgi.WSGIServer."""
def __init__(self, *args, **kwargs):
self.args = args
self.kwargs = kwargs
self.ready = False
def start(self):
"""Start the SCGI server."""
# We have to instantiate the server class here because its __init__
# starts a threadpool. If we do it too early, daemonize won't work.
from flup.server.scgi import WSGIServer
self.scgiserver = WSGIServer(*self.args, **self.kwargs)
# TODO: report this bug upstream to flup.
# If we don't set _oldSIGs on Windows, we get:
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 108, in run
# self._restoreSignalHandlers()
# File "C:\Python24\Lib\site-packages\flup\server\threadedserver.py",
# line 156, in _restoreSignalHandlers
# for signum,handler in self._oldSIGs:
# AttributeError: 'WSGIServer' object has no attribute '_oldSIGs'
self.scgiserver._installSignalHandlers = lambda: None
self.scgiserver._oldSIGs = []
self.ready = True
self.scgiserver.run()
def stop(self):
"""Stop the HTTP server."""
self.ready = False
# Forcibly stop the scgi server main event loop.
self.scgiserver._keepGoing = False
# Force all worker threads to die off.
self.scgiserver._threadPool.maxSpare = 0
@contextlib.contextmanager
def _safe_wait(host, port):
"""
On systems where a loopback interface is not available and the
server is bound to all interfaces, it's difficult to determine
whether the server is in fact occupying the port. In this case,
just issue a warning and move on. See issue #1100.
"""
try:
yield
except portend.Timeout:
if host == portend.client_host(host):
raise
msg = 'Unable to verify that the server is bound on %r' % port
warnings.warn(msg)
|
from mock import patch, Mock
from arctic._compression import compress, compress_array, decompress, decompress_array, enable_parallel_lz4
def test_compress():
assert len(compress(b'foobar')) > 0
def test_compress_LZ4():
cfn = Mock()
with patch('arctic._compression.lz4_compress', cfn):
compress(b"foo")
assert cfn.call_count == 1
def test_compress_array():
assert len(compress_array([b"foobar"*10])) > 0
assert isinstance(compress_array([b"foobar"*10]), list)
def test_compress_array_usesLZ4():
cfn = Mock()
with patch('arctic._compression.lz4_compress', cfn):
compress_array([b"foo"] * 100)
assert len(cfn.call_args_list) == 100 # call_count is not thread safe
def test_compress_array_LZ4_sequential():
cfn = Mock()
with patch('arctic._compression.lz4_compress', cfn):
compress_array([b"foo"] * 49)
assert len(cfn.call_args_list) == 49
def test_decompress():
assert decompress(compress(b"foo")) == b"foo"
def test_decompress_array():
ll = [('foo%s' % i).encode('ascii') for i in range(100)]
assert decompress_array(compress_array(ll)) == ll
def test_compression_equal_regardless_parallel_mode():
a = [b'spam '] * 666
with patch('arctic._compression.ENABLE_PARALLEL', True):
parallel = compress_array(a)
with patch('arctic._compression.ENABLE_PARALLEL', False):
serial = compress_array(a)
assert serial == parallel
def test_enable_parallel_lz4():
enable_parallel_lz4(True)
from arctic._compression import ENABLE_PARALLEL
assert(ENABLE_PARALLEL is True)
enable_parallel_lz4(False)
from arctic._compression import ENABLE_PARALLEL
assert(ENABLE_PARALLEL is False)
def test_compress_empty_string():
assert(decompress(compress(b'')) == b'')
|
import os.path
import cherrypy
class GeneratorDemo:
def header(self):
return '<html><body><h2>Generators rule!</h2>'
def footer(self):
return '</body></html>'
@cherrypy.expose
def index(self):
# Let's make up a list of users for presentation purposes
users = ['Remi', 'Carlos', 'Hendrik', 'Lorenzo Lamas']
# Every yield line adds one part to the total result body.
yield self.header()
yield '<h3>List of users:</h3>'
for user in users:
yield '%s<br/>' % user
yield self.footer()
tutconf = os.path.join(os.path.dirname(__file__), 'tutorial.conf')
if __name__ == '__main__':
# CherryPy always starts with app.root when trying to map request URIs
# to objects, so we need to mount a request handler root. A request
# to '/' will be mapped to HelloWorld().index().
cherrypy.quickstart(GeneratorDemo(), config=tutconf)
|
from datetime import datetime, timedelta
import logging
from todoist.api import TodoistAPI
import voluptuous as vol
from homeassistant.components.calendar import PLATFORM_SCHEMA, CalendarEventDevice
from homeassistant.const import CONF_ID, CONF_NAME, CONF_TOKEN
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.template import DATE_STR_FORMAT
from homeassistant.util import dt
from .const import (
ALL_DAY,
ALL_TASKS,
CHECKED,
COMPLETED,
CONF_EXTRA_PROJECTS,
CONF_PROJECT_DUE_DATE,
CONF_PROJECT_LABEL_WHITELIST,
CONF_PROJECT_WHITELIST,
CONTENT,
DATETIME,
DESCRIPTION,
DOMAIN,
DUE,
DUE_DATE,
DUE_DATE_LANG,
DUE_DATE_STRING,
DUE_DATE_VALID_LANGS,
DUE_TODAY,
END,
ID,
LABELS,
NAME,
OVERDUE,
PRIORITY,
PROJECT_ID,
PROJECT_NAME,
PROJECTS,
SERVICE_NEW_TASK,
START,
SUMMARY,
TASKS,
)
_LOGGER = logging.getLogger(__name__)
NEW_TASK_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(CONTENT): cv.string,
vol.Optional(PROJECT_NAME, default="inbox"): vol.All(cv.string, vol.Lower),
vol.Optional(LABELS): cv.ensure_list_csv,
vol.Optional(PRIORITY): vol.All(vol.Coerce(int), vol.Range(min=1, max=4)),
vol.Exclusive(DUE_DATE_STRING, "due_date"): cv.string,
vol.Optional(DUE_DATE_LANG): vol.All(cv.string, vol.In(DUE_DATE_VALID_LANGS)),
vol.Exclusive(DUE_DATE, "due_date"): cv.string,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_EXTRA_PROJECTS, default=[]): vol.All(
cv.ensure_list,
vol.Schema(
[
vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_PROJECT_DUE_DATE): vol.Coerce(int),
vol.Optional(CONF_PROJECT_WHITELIST, default=[]): vol.All(
cv.ensure_list, [vol.All(cv.string, vol.Lower)]
),
vol.Optional(
CONF_PROJECT_LABEL_WHITELIST, default=[]
): vol.All(cv.ensure_list, [vol.All(cv.string, vol.Lower)]),
}
)
]
),
),
}
)
SCAN_INTERVAL = timedelta(minutes=15)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Todoist platform."""
token = config.get(CONF_TOKEN)
# Look up IDs based on (lowercase) names.
project_id_lookup = {}
label_id_lookup = {}
api = TodoistAPI(token)
api.sync()
# Setup devices:
# Grab all projects.
projects = api.state[PROJECTS]
# Grab all labels
labels = api.state[LABELS]
# Add all Todoist-defined projects.
project_devices = []
for project in projects:
# Project is an object, not a dict!
# Because of that, we convert what we need to a dict.
project_data = {CONF_NAME: project[NAME], CONF_ID: project[ID]}
project_devices.append(TodoistProjectDevice(hass, project_data, labels, api))
# Cache the names so we can easily look up name->ID.
project_id_lookup[project[NAME].lower()] = project[ID]
# Cache all label names
for label in labels:
label_id_lookup[label[NAME].lower()] = label[ID]
# Check config for more projects.
extra_projects = config[CONF_EXTRA_PROJECTS]
for project in extra_projects:
# Special filter: By date
project_due_date = project.get(CONF_PROJECT_DUE_DATE)
# Special filter: By label
project_label_filter = project[CONF_PROJECT_LABEL_WHITELIST]
# Special filter: By name
# Names must be converted into IDs.
project_name_filter = project[CONF_PROJECT_WHITELIST]
project_id_filter = [
project_id_lookup[project_name.lower()]
for project_name in project_name_filter
]
# Create the custom project and add it to the devices array.
project_devices.append(
TodoistProjectDevice(
hass,
project,
labels,
api,
project_due_date,
project_label_filter,
project_id_filter,
)
)
add_entities(project_devices)
def handle_new_task(call):
"""Call when a user creates a new Todoist Task from Home Assistant."""
project_name = call.data[PROJECT_NAME]
project_id = project_id_lookup[project_name]
# Create the task
item = api.items.add(call.data[CONTENT], project_id=project_id)
if LABELS in call.data:
task_labels = call.data[LABELS]
label_ids = [label_id_lookup[label.lower()] for label in task_labels]
item.update(labels=label_ids)
if PRIORITY in call.data:
item.update(priority=call.data[PRIORITY])
_due: dict = {}
if DUE_DATE_STRING in call.data:
_due["string"] = call.data[DUE_DATE_STRING]
if DUE_DATE_LANG in call.data:
_due["lang"] = call.data[DUE_DATE_LANG]
if DUE_DATE in call.data:
due_date = dt.parse_datetime(call.data[DUE_DATE])
if due_date is None:
due = dt.parse_date(call.data[DUE_DATE])
due_date = datetime(due.year, due.month, due.day)
# Format it in the manner Todoist expects
due_date = dt.as_utc(due_date)
date_format = "%Y-%m-%dT%H:%M"
due_date = datetime.strftime(due_date, date_format)
_due["date"] = due_date
if _due:
item.update(due=_due)
# Commit changes
api.commit()
_LOGGER.debug("Created Todoist task: %s", call.data[CONTENT])
hass.services.register(
DOMAIN, SERVICE_NEW_TASK, handle_new_task, schema=NEW_TASK_SERVICE_SCHEMA
)
def _parse_due_date(data: dict) -> datetime:
"""Parse the due date dict into a datetime object."""
# Add time information to date only strings.
if len(data["date"]) == 10:
data["date"] += "T00:00:00"
# If there is no timezone provided, use UTC.
if data["timezone"] is None:
data["date"] += "Z"
return dt.parse_datetime(data["date"])
class TodoistProjectDevice(CalendarEventDevice):
"""A device for getting the next Task from a Todoist Project."""
def __init__(
self,
hass,
data,
labels,
token,
latest_task_due_date=None,
whitelisted_labels=None,
whitelisted_projects=None,
):
"""Create the Todoist Calendar Event Device."""
self.data = TodoistProjectData(
data,
labels,
token,
latest_task_due_date,
whitelisted_labels,
whitelisted_projects,
)
self._cal_data = {}
self._name = data[CONF_NAME]
@property
def event(self):
"""Return the next upcoming event."""
return self.data.event
@property
def name(self):
"""Return the name of the entity."""
return self._name
def update(self):
"""Update all Todoist Calendars."""
self.data.update()
# Set Todoist-specific data that can't easily be grabbed
self._cal_data[ALL_TASKS] = [
task[SUMMARY] for task in self.data.all_project_tasks
]
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
return await self.data.async_get_events(hass, start_date, end_date)
@property
def device_state_attributes(self):
"""Return the device state attributes."""
if self.data.event is None:
# No tasks, we don't REALLY need to show anything.
return None
return {
DUE_TODAY: self.data.event[DUE_TODAY],
OVERDUE: self.data.event[OVERDUE],
ALL_TASKS: self._cal_data[ALL_TASKS],
PRIORITY: self.data.event[PRIORITY],
LABELS: self.data.event[LABELS],
}
class TodoistProjectData:
"""
Class used by the Task Device service object to hold all Todoist Tasks.
This is analogous to the GoogleCalendarData found in the Google Calendar
component.
Takes an object with a 'name' field and optionally an 'id' field (either
user-defined or from the Todoist API), a Todoist API token, and an optional
integer specifying the latest number of days from now a task can be due (7
means everything due in the next week, 0 means today, etc.).
This object has an exposed 'event' property (used by the Calendar platform
to determine the next calendar event) and an exposed 'update' method (used
by the Calendar platform to poll for new calendar events).
The 'event' is a representation of a Todoist Task, with defined parameters
of 'due_today' (is the task due today?), 'all_day' (does the task have a
due date?), 'task_labels' (all labels assigned to the task), 'message'
(the content of the task, e.g. 'Fetch Mail'), 'description' (a URL pointing
to the task on the Todoist website), 'end_time' (what time the event is
due), 'start_time' (what time this event was last updated), 'overdue' (is
the task past its due date?), 'priority' (1-4, how important the task is,
with 4 being the most important), and 'all_tasks' (all tasks in this
project, sorted by how important they are).
'offset_reached', 'location', and 'friendly_name' are defined by the
platform itself, but are not used by this component at all.
The 'update' method polls the Todoist API for new projects/tasks, as well
as any updates to current projects/tasks. This occurs every SCAN_INTERVAL minutes.
"""
def __init__(
self,
project_data,
labels,
api,
latest_task_due_date=None,
whitelisted_labels=None,
whitelisted_projects=None,
):
"""Initialize a Todoist Project."""
self.event = None
self._api = api
self._name = project_data[CONF_NAME]
# If no ID is defined, fetch all tasks.
self._id = project_data.get(CONF_ID)
# All labels the user has defined, for easy lookup.
self._labels = labels
# Not tracked: order, indent, comment_count.
self.all_project_tasks = []
# The latest date a task can be due (for making lists of everything
# due today, or everything due in the next week, for example).
if latest_task_due_date is not None:
self._latest_due_date = dt.utcnow() + timedelta(days=latest_task_due_date)
else:
self._latest_due_date = None
# Only tasks with one of these labels will be included.
if whitelisted_labels is not None:
self._label_whitelist = whitelisted_labels
else:
self._label_whitelist = []
# This project includes only projects with these names.
if whitelisted_projects is not None:
self._project_id_whitelist = whitelisted_projects
else:
self._project_id_whitelist = []
def create_todoist_task(self, data):
"""
Create a dictionary based on a Task passed from the Todoist API.
Will return 'None' if the task is to be filtered out.
"""
task = {}
# Fields are required to be in all returned task objects.
task[SUMMARY] = data[CONTENT]
task[COMPLETED] = data[CHECKED] == 1
task[PRIORITY] = data[PRIORITY]
task[DESCRIPTION] = "https://todoist.com/showTask?id={}".format(data[ID])
# All task Labels (optional parameter).
task[LABELS] = [
label[NAME].lower() for label in self._labels if label[ID] in data[LABELS]
]
if self._label_whitelist and (
not any(label in task[LABELS] for label in self._label_whitelist)
):
# We're not on the whitelist, return invalid task.
return None
# Due dates (optional parameter).
# The due date is the END date -- the task cannot be completed
# past this time.
# That means that the START date is the earliest time one can
# complete the task.
# Generally speaking, that means right now.
task[START] = dt.utcnow()
if data[DUE] is not None:
task[END] = _parse_due_date(data[DUE])
if self._latest_due_date is not None and (
task[END] > self._latest_due_date
):
# This task is out of range of our due date;
# it shouldn't be counted.
return None
task[DUE_TODAY] = task[END].date() == datetime.today().date()
# Special case: Task is overdue.
if task[END] <= task[START]:
task[OVERDUE] = True
# Set end time to the current time plus 1 hour.
# We're pretty much guaranteed to update within that 1 hour,
# so it should be fine.
task[END] = task[START] + timedelta(hours=1)
else:
task[OVERDUE] = False
else:
# If we ask for everything due before a certain date, don't count
# things which have no due dates.
if self._latest_due_date is not None:
return None
# Define values for tasks without due dates
task[END] = None
task[ALL_DAY] = True
task[DUE_TODAY] = False
task[OVERDUE] = False
# Not tracked: id, comments, project_id order, indent, recurring.
return task
@staticmethod
def select_best_task(project_tasks):
"""
Search through a list of events for the "best" event to select.
The "best" event is determined by the following criteria:
* A proposed event must not be completed
* A proposed event must have an end date (otherwise we go with
the event at index 0, selected above)
* A proposed event must be on the same day or earlier as our
current event
* If a proposed event is an earlier day than what we have so
far, select it
* If a proposed event is on the same day as our current event
and the proposed event has a higher priority than our current
event, select it
* If a proposed event is on the same day as our current event,
has the same priority as our current event, but is due earlier
in the day, select it
"""
# Start at the end of the list, so if tasks don't have a due date
# the newest ones are the most important.
event = project_tasks[-1]
for proposed_event in project_tasks:
if event == proposed_event:
continue
if proposed_event[COMPLETED]:
# Event is complete!
continue
if proposed_event[END] is None:
# No end time:
if event[END] is None and (proposed_event[PRIORITY] < event[PRIORITY]):
# They also have no end time,
# but we have a higher priority.
event = proposed_event
continue
if event[END] is None:
# We have an end time, they do not.
event = proposed_event
continue
if proposed_event[END].date() > event[END].date():
# Event is too late.
continue
if proposed_event[END].date() < event[END].date():
# Event is earlier than current, select it.
event = proposed_event
continue
if proposed_event[PRIORITY] > event[PRIORITY]:
# Proposed event has a higher priority.
event = proposed_event
continue
if proposed_event[PRIORITY] == event[PRIORITY] and (
proposed_event[END] < event[END]
):
event = proposed_event
continue
return event
async def async_get_events(self, hass, start_date, end_date):
"""Get all tasks in a specific time frame."""
if self._id is None:
project_task_data = [
task
for task in self._api.state[TASKS]
if not self._project_id_whitelist
or task[PROJECT_ID] in self._project_id_whitelist
]
else:
project_data = await hass.async_add_executor_job(
self._api.projects.get_data, self._id
)
project_task_data = project_data[TASKS]
events = []
for task in project_task_data:
if task["due"] is None:
continue
due_date = _parse_due_date(task["due"])
if start_date < due_date < end_date:
if due_date.hour == 0 and due_date.minute == 0:
# If the due date has no time data, return just the date so that it
# will render correctly as an all day event on a calendar.
due_date_value = due_date.strftime("%Y-%m-%d")
else:
due_date_value = due_date.isoformat()
event = {
"uid": task["id"],
"title": task["content"],
"start": due_date_value,
"end": due_date_value,
"allDay": True,
"summary": task["content"],
}
events.append(event)
return events
def update(self):
"""Get the latest data."""
if self._id is None:
self._api.reset_state()
self._api.sync()
project_task_data = [
task
for task in self._api.state[TASKS]
if not self._project_id_whitelist
or task[PROJECT_ID] in self._project_id_whitelist
]
else:
project_task_data = self._api.projects.get_data(self._id)[TASKS]
# If we have no data, we can just return right away.
if not project_task_data:
_LOGGER.debug("No data for %s", self._name)
self.event = None
return
# Keep an updated list of all tasks in this project.
project_tasks = []
for task in project_task_data:
todoist_task = self.create_todoist_task(task)
if todoist_task is not None:
# A None task means it is invalid for this project
project_tasks.append(todoist_task)
if not project_tasks:
# We had no valid tasks
_LOGGER.debug("No valid tasks for %s", self._name)
self.event = None
return
# Make sure the task collection is reset to prevent an
# infinite collection repeating the same tasks
self.all_project_tasks.clear()
# Organize the best tasks (so users can see all the tasks
# they have, organized)
while project_tasks:
best_task = self.select_best_task(project_tasks)
_LOGGER.debug("Found Todoist Task: %s", best_task[SUMMARY])
project_tasks.remove(best_task)
self.all_project_tasks.append(best_task)
self.event = self.all_project_tasks[0]
# Convert datetime to a string again
if self.event is not None:
if self.event[START] is not None:
self.event[START] = {
DATETIME: self.event[START].strftime(DATE_STR_FORMAT)
}
if self.event[END] is not None:
self.event[END] = {DATETIME: self.event[END].strftime(DATE_STR_FORMAT)}
else:
# Home Assistant gets cranky if a calendar event never ends
# Let's set our "due date" to tomorrow
self.event[END] = {
DATETIME: (datetime.utcnow() + timedelta(days=1)).strftime(
DATE_STR_FORMAT
)
}
_LOGGER.debug("Updated %s", self._name)
|
import logging
from typing import Any, Dict, Optional
from canary.api import Api
from requests import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.config_entries import CONN_CLASS_CLOUD_POLL, ConfigFlow, OptionsFlow
from homeassistant.const import CONF_PASSWORD, CONF_TIMEOUT, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from .const import CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS, DEFAULT_TIMEOUT
from .const import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
def validate_input(hass: HomeAssistantType, data: dict) -> Dict[str, Any]:
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
# constructor does login call
Api(
data[CONF_USERNAME],
data[CONF_PASSWORD],
data.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
)
return True
class CanaryConfigFlow(ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Canary."""
VERSION = 1
CONNECTION_CLASS = CONN_CLASS_CLOUD_POLL
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return CanaryOptionsFlowHandler(config_entry)
async def async_step_import(
self, user_input: Optional[ConfigType] = None
) -> Dict[str, Any]:
"""Handle a flow initiated by configuration file."""
return await self.async_step_user(user_input)
async def async_step_user(
self, user_input: Optional[ConfigType] = None
) -> Dict[str, Any]:
"""Handle a flow initiated by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
errors = {}
default_username = ""
if user_input is not None:
if CONF_TIMEOUT not in user_input:
user_input[CONF_TIMEOUT] = DEFAULT_TIMEOUT
default_username = user_input[CONF_USERNAME]
try:
await self.hass.async_add_executor_job(
validate_input, self.hass, user_input
)
except (ConnectTimeout, HTTPError):
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
return self.async_abort(reason="unknown")
else:
return self.async_create_entry(
title=user_input[CONF_USERNAME],
data=user_input,
)
data_schema = {
vol.Required(CONF_USERNAME, default=default_username): str,
vol.Required(CONF_PASSWORD): str,
}
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(data_schema),
errors=errors or {},
)
class CanaryOptionsFlowHandler(OptionsFlow):
"""Handle Canary client options."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input: Optional[ConfigType] = None):
"""Manage Canary options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
options = {
vol.Optional(
CONF_FFMPEG_ARGUMENTS,
default=self.config_entry.options.get(
CONF_FFMPEG_ARGUMENTS, DEFAULT_FFMPEG_ARGUMENTS
),
): str,
vol.Optional(
CONF_TIMEOUT,
default=self.config_entry.options.get(CONF_TIMEOUT, DEFAULT_TIMEOUT),
): int,
}
return self.async_show_form(step_id="init", data_schema=vol.Schema(options))
|
import logging
import voluptuous as vol
from homeassistant.components import fan, mqtt
from homeassistant.components.fan import (
ATTR_SPEED,
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_OSCILLATE,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.const import (
CONF_DEVICE,
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_STATE,
CONF_UNIQUE_ID,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from . import (
ATTR_DISCOVERY_HASH,
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
DOMAIN,
PLATFORMS,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from .debug_info import log_messages
from .discovery import MQTT_DISCOVERY_NEW, clear_discovery_hash
_LOGGER = logging.getLogger(__name__)
CONF_STATE_VALUE_TEMPLATE = "state_value_template"
CONF_SPEED_STATE_TOPIC = "speed_state_topic"
CONF_SPEED_COMMAND_TOPIC = "speed_command_topic"
CONF_SPEED_VALUE_TEMPLATE = "speed_value_template"
CONF_OSCILLATION_STATE_TOPIC = "oscillation_state_topic"
CONF_OSCILLATION_COMMAND_TOPIC = "oscillation_command_topic"
CONF_OSCILLATION_VALUE_TEMPLATE = "oscillation_value_template"
CONF_PAYLOAD_OSCILLATION_ON = "payload_oscillation_on"
CONF_PAYLOAD_OSCILLATION_OFF = "payload_oscillation_off"
CONF_PAYLOAD_OFF_SPEED = "payload_off_speed"
CONF_PAYLOAD_LOW_SPEED = "payload_low_speed"
CONF_PAYLOAD_MEDIUM_SPEED = "payload_medium_speed"
CONF_PAYLOAD_HIGH_SPEED = "payload_high_speed"
CONF_SPEED_LIST = "speeds"
DEFAULT_NAME = "MQTT Fan"
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_OPTIMISTIC = False
OSCILLATE_ON_PAYLOAD = "oscillate_on"
OSCILLATE_OFF_PAYLOAD = "oscillate_off"
OSCILLATION = "oscillation"
PLATFORM_SCHEMA = (
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_OSCILLATION_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_OSCILLATION_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_OSCILLATION_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_PAYLOAD_HIGH_SPEED, default=SPEED_HIGH): cv.string,
vol.Optional(CONF_PAYLOAD_LOW_SPEED, default=SPEED_LOW): cv.string,
vol.Optional(CONF_PAYLOAD_MEDIUM_SPEED, default=SPEED_MEDIUM): cv.string,
vol.Optional(CONF_PAYLOAD_OFF_SPEED, default=SPEED_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_OFF, default=OSCILLATE_OFF_PAYLOAD
): cv.string,
vol.Optional(
CONF_PAYLOAD_OSCILLATION_ON, default=OSCILLATE_ON_PAYLOAD
): cv.string,
vol.Optional(CONF_SPEED_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_SPEED_LIST,
default=[SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH],
): cv.ensure_list,
vol.Optional(CONF_SPEED_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_SPEED_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_UNIQUE_ID): cv.string,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
)
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT fan through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, config, async_add_entities)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT fan dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT fan."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
hass, config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(fan.DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
hass, config, async_add_entities, config_entry=None, discovery_data=None
):
"""Set up the MQTT fan."""
async_add_entities([MqttFan(hass, config, config_entry, discovery_data)])
class MqttFan(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
FanEntity,
):
"""A MQTT fan component."""
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the MQTT fan."""
self.hass = hass
self._unique_id = config.get(CONF_UNIQUE_ID)
self._state = False
self._speed = None
self._oscillation = None
self._supported_features = 0
self._sub_state = None
self._topic = None
self._payload = None
self._templates = None
self._optimistic = None
self._optimistic_oscillation = None
self._optimistic_speed = None
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_data, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._config = config
self._topic = {
key: config.get(key)
for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_SPEED_STATE_TOPIC,
CONF_SPEED_COMMAND_TOPIC,
CONF_OSCILLATION_STATE_TOPIC,
CONF_OSCILLATION_COMMAND_TOPIC,
)
}
self._templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_SPEED: config.get(CONF_SPEED_VALUE_TEMPLATE),
OSCILLATION: config.get(CONF_OSCILLATION_VALUE_TEMPLATE),
}
self._payload = {
"STATE_ON": config[CONF_PAYLOAD_ON],
"STATE_OFF": config[CONF_PAYLOAD_OFF],
"OSCILLATE_ON_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_ON],
"OSCILLATE_OFF_PAYLOAD": config[CONF_PAYLOAD_OSCILLATION_OFF],
"SPEED_LOW": config[CONF_PAYLOAD_LOW_SPEED],
"SPEED_MEDIUM": config[CONF_PAYLOAD_MEDIUM_SPEED],
"SPEED_HIGH": config[CONF_PAYLOAD_HIGH_SPEED],
"SPEED_OFF": config[CONF_PAYLOAD_OFF_SPEED],
}
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_oscillation = (
optimistic or self._topic[CONF_OSCILLATION_STATE_TOPIC] is None
)
self._optimistic_speed = (
optimistic or self._topic[CONF_SPEED_STATE_TOPIC] is None
)
self._supported_features = 0
self._supported_features |= (
self._topic[CONF_OSCILLATION_COMMAND_TOPIC] is not None
and SUPPORT_OSCILLATE
)
self._supported_features |= (
self._topic[CONF_SPEED_COMMAND_TOPIC] is not None and SUPPORT_SET_SPEED
)
for key, tpl in list(self._templates.items()):
if tpl is None:
self._templates[key] = lambda value: value
else:
tpl.hass = self.hass
self._templates[key] = tpl.async_render_with_possible_json_value
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new received MQTT message."""
payload = self._templates[CONF_STATE](msg.payload)
if payload == self._payload["STATE_ON"]:
self._state = True
elif payload == self._payload["STATE_OFF"]:
self._state = False
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def speed_received(msg):
"""Handle new received MQTT message for the speed."""
payload = self._templates[ATTR_SPEED](msg.payload)
if payload == self._payload["SPEED_LOW"]:
self._speed = SPEED_LOW
elif payload == self._payload["SPEED_MEDIUM"]:
self._speed = SPEED_MEDIUM
elif payload == self._payload["SPEED_HIGH"]:
self._speed = SPEED_HIGH
elif payload == self._payload["SPEED_OFF"]:
self._speed = SPEED_OFF
self.async_write_ha_state()
if self._topic[CONF_SPEED_STATE_TOPIC] is not None:
topics[CONF_SPEED_STATE_TOPIC] = {
"topic": self._topic[CONF_SPEED_STATE_TOPIC],
"msg_callback": speed_received,
"qos": self._config[CONF_QOS],
}
self._speed = SPEED_OFF
@callback
@log_messages(self.hass, self.entity_id)
def oscillation_received(msg):
"""Handle new received MQTT message for the oscillation."""
payload = self._templates[OSCILLATION](msg.payload)
if payload == self._payload["OSCILLATE_ON_PAYLOAD"]:
self._oscillation = True
elif payload == self._payload["OSCILLATE_OFF_PAYLOAD"]:
self._oscillation = False
self.async_write_ha_state()
if self._topic[CONF_OSCILLATION_STATE_TOPIC] is not None:
topics[CONF_OSCILLATION_STATE_TOPIC] = {
"topic": self._topic[CONF_OSCILLATION_STATE_TOPIC],
"msg_callback": oscillation_received,
"qos": self._config[CONF_QOS],
}
self._oscillation = False
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
@property
def should_poll(self):
"""No polling needed for a MQTT fan."""
return False
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def name(self) -> str:
"""Get entity name."""
return self._config[CONF_NAME]
@property
def speed_list(self) -> list:
"""Get the list of available speeds."""
return self._config[CONF_SPEED_LIST]
@property
def supported_features(self) -> int:
"""Flag supported features."""
return self._supported_features
@property
def speed(self):
"""Return the current speed."""
return self._speed
@property
def oscillating(self):
"""Return the oscillation state."""
return self._oscillation
async def async_turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
self._payload["STATE_ON"],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if speed:
await self.async_set_speed(speed)
if self._optimistic:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity.
This method is a coroutine.
"""
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
self._payload["STATE_OFF"],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self._state = False
self.async_write_ha_state()
async def async_set_speed(self, speed: str) -> None:
"""Set the speed of the fan.
This method is a coroutine.
"""
if speed == SPEED_LOW:
mqtt_payload = self._payload["SPEED_LOW"]
elif speed == SPEED_MEDIUM:
mqtt_payload = self._payload["SPEED_MEDIUM"]
elif speed == SPEED_HIGH:
mqtt_payload = self._payload["SPEED_HIGH"]
elif speed == SPEED_OFF:
mqtt_payload = self._payload["SPEED_OFF"]
else:
mqtt_payload = speed
mqtt.async_publish(
self.hass,
self._topic[CONF_SPEED_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic_speed:
self._speed = speed
self.async_write_ha_state()
async def async_oscillate(self, oscillating: bool) -> None:
"""Set oscillation.
This method is a coroutine.
"""
if oscillating is False:
payload = self._payload["OSCILLATE_OFF_PAYLOAD"]
else:
payload = self._payload["OSCILLATE_ON_PAYLOAD"]
mqtt.async_publish(
self.hass,
self._topic[CONF_OSCILLATION_COMMAND_TOPIC],
payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic_oscillation:
self._oscillation = oscillating
self.async_write_ha_state()
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
|
import asyncio
import logging
import aiohttp
from aiohttp.hdrs import ACCEPT, AUTHORIZATION
import async_timeout
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONTENT_TYPE_JSON, HTTP_NOT_FOUND, HTTP_UNAUTHORIZED
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from . import DATA_TTN, TTN_ACCESS_KEY, TTN_APP_ID, TTN_DATA_STORAGE_URL
_LOGGER = logging.getLogger(__name__)
ATTR_DEVICE_ID = "device_id"
ATTR_RAW = "raw"
ATTR_TIME = "time"
DEFAULT_TIMEOUT = 10
CONF_DEVICE_ID = "device_id"
CONF_VALUES = "values"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_DEVICE_ID): cv.string,
vol.Required(CONF_VALUES): {cv.string: cv.string},
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up The Things Network Data storage sensors."""
ttn = hass.data.get(DATA_TTN)
device_id = config.get(CONF_DEVICE_ID)
values = config.get(CONF_VALUES)
app_id = ttn.get(TTN_APP_ID)
access_key = ttn.get(TTN_ACCESS_KEY)
ttn_data_storage = TtnDataStorage(hass, app_id, device_id, access_key, values)
success = await ttn_data_storage.async_update()
if not success:
return
devices = []
for value, unit_of_measurement in values.items():
devices.append(
TtnDataSensor(ttn_data_storage, device_id, value, unit_of_measurement)
)
async_add_entities(devices, True)
class TtnDataSensor(Entity):
"""Representation of a The Things Network Data Storage sensor."""
def __init__(self, ttn_data_storage, device_id, value, unit_of_measurement):
"""Initialize a The Things Network Data Storage sensor."""
self._ttn_data_storage = ttn_data_storage
self._state = None
self._device_id = device_id
self._unit_of_measurement = unit_of_measurement
self._value = value
self._name = f"{self._device_id} {self._value}"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the entity."""
if self._ttn_data_storage.data is not None:
try:
return round(self._state[self._value], 1)
except (KeyError, TypeError):
return None
return None
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
if self._ttn_data_storage.data is not None:
return {
ATTR_DEVICE_ID: self._device_id,
ATTR_RAW: self._state["raw"],
ATTR_TIME: self._state["time"],
}
async def async_update(self):
"""Get the current state."""
await self._ttn_data_storage.async_update()
self._state = self._ttn_data_storage.data
class TtnDataStorage:
"""Get the latest data from The Things Network Data Storage."""
def __init__(self, hass, app_id, device_id, access_key, values):
"""Initialize the data object."""
self.data = None
self._hass = hass
self._app_id = app_id
self._device_id = device_id
self._values = values
self._url = TTN_DATA_STORAGE_URL.format(
app_id=app_id, endpoint="api/v2/query", device_id=device_id
)
self._headers = {ACCEPT: CONTENT_TYPE_JSON, AUTHORIZATION: f"key {access_key}"}
async def async_update(self):
"""Get the current state from The Things Network Data Storage."""
try:
session = async_get_clientsession(self._hass)
with async_timeout.timeout(DEFAULT_TIMEOUT):
response = await session.get(self._url, headers=self._headers)
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Error while accessing: %s", self._url)
return None
status = response.status
if status == 204:
_LOGGER.error("The device is not available: %s", self._device_id)
return None
if status == HTTP_UNAUTHORIZED:
_LOGGER.error("Not authorized for Application ID: %s", self._app_id)
return None
if status == HTTP_NOT_FOUND:
_LOGGER.error("Application ID is not available: %s", self._app_id)
return None
data = await response.json()
self.data = data[-1]
for value in self._values.items():
if value[0] not in self.data:
_LOGGER.warning("Value not available: %s", value[0])
return response
|
from aioshelly import Block
from homeassistant.components.cover import (
ATTR_POSITION,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
SUPPORT_STOP,
CoverEntity,
)
from homeassistant.core import callback
from . import ShellyDeviceWrapper
from .const import DATA_CONFIG_ENTRY, DOMAIN
from .entity import ShellyBlockEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up cover for device."""
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id]
blocks = [block for block in wrapper.device.blocks if block.type == "roller"]
if not blocks:
return
async_add_entities(ShellyCover(wrapper, block) for block in blocks)
class ShellyCover(ShellyBlockEntity, CoverEntity):
"""Switch that controls a cover block on Shelly devices."""
def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:
"""Initialize light."""
super().__init__(wrapper, block)
self.control_result = None
self._supported_features = SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP
if self.wrapper.device.settings["rollers"][0]["positioning"]:
self._supported_features |= SUPPORT_SET_POSITION
@property
def is_closed(self):
"""If cover is closed."""
if self.control_result:
return self.control_result["current_pos"] == 0
return self.block.rollerPos == 0
@property
def current_cover_position(self):
"""Position of the cover."""
if self.control_result:
return self.control_result["current_pos"]
return self.block.rollerPos
@property
def is_closing(self):
"""Return if the cover is closing."""
if self.control_result:
return self.control_result["state"] == "close"
return self.block.roller == "close"
@property
def is_opening(self):
"""Return if the cover is opening."""
if self.control_result:
return self.control_result["state"] == "open"
return self.block.roller == "open"
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_close_cover(self, **kwargs):
"""Close cover."""
self.control_result = await self.block.set_state(go="close")
self.async_write_ha_state()
async def async_open_cover(self, **kwargs):
"""Open cover."""
self.control_result = await self.block.set_state(go="open")
self.async_write_ha_state()
async def async_set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
self.control_result = await self.block.set_state(
go="to_pos", roller_pos=kwargs[ATTR_POSITION]
)
self.async_write_ha_state()
async def async_stop_cover(self, **_kwargs):
"""Stop the cover."""
self.control_result = await self.block.set_state(go="stop")
self.async_write_ha_state()
@callback
def _update_callback(self):
"""When device updates, clear control result that overrides state."""
self.control_result = None
super()._update_callback()
|
import logging
import random
import six
from jinja2 import Template
from kalliope.core import OrderListener
from kalliope.core.HookManager import HookManager
from kalliope.core.ConfigurationManager import SettingLoader, BrainLoader
from kalliope.core.Cortex import Cortex
from kalliope.core.Lifo.LifoManager import LifoManager
from kalliope.core.Models.MatchedSynapse import MatchedSynapse
from kalliope.core.NeuronExceptions import NeuronExceptions
from kalliope.core.OrderAnalyser import OrderAnalyser
from kalliope.core.Utils.Utils import Utils
logging.basicConfig()
logger = logging.getLogger("kalliope")
class InvalidParameterException(NeuronExceptions):
"""
Some Neuron parameters are invalid.
"""
def __init__(self, message):
# Call the base class constructor with the parameters it needs
super(InvalidParameterException, self).__init__(message)
class MissingParameterException(NeuronExceptions):
"""
Some Neuron parameters are missing.
"""
def __init__(self, message=None):
# Call the base class constructor with the parameters it needs
super(MissingParameterException, self).__init__(message)
self.message = message
class NoTemplateException(Exception):
"""
You must specify a say_template or a file_template
"""
pass
class TemplateFileNotFoundException(Exception):
"""
Template file can not be found. Check the provided path.
"""
pass
class TTSModuleNotFound(Exception):
"""
TTS module can not be find. It must be configured in the settings file.
"""
pass
class NeuronModule(object):
"""
This Abstract Class is representing main Class for Neuron.
Each Neuron must implement this Class.
"""
def __init__(self, **kwargs):
"""
Class used by neuron for talking
:param kwargs: Same parameter as the Child. Can contain info about the tts to use instead of the
default one
"""
# get the child who called the class
child_name = self.__class__.__name__
self.neuron_name = child_name
sl = SettingLoader()
self.settings = sl.settings
brain_loader = BrainLoader()
self.brain = brain_loader.brain
self.tts = self._get_tts_object(settings=self.settings)
# get templates if provided
# Check if there is a template associate to the output message
self.say_template = kwargs.get('say_template', None)
# check if there is a template file associate to the output message
self.file_template = kwargs.get('file_template', None)
# keep the generated message
self.tts_message = None
# if the current call is api one
self.is_api_call = kwargs.get('is_api_call', False)
# boolean to know id the synapse is waiting for an answer
self.is_waiting_for_answer = False
# the synapse name to add the the buffer
self.pending_synapse = None
# a dict of parameters the user ask to save in short term memory
self.kalliope_memory = kwargs.get('kalliope_memory', None)
# parameters loaded from the order can be save now
Cortex.save_parameter_from_order_in_memory(self.kalliope_memory)
def __str__(self):
retuned_string = ""
retuned_string += self.tts_message
return retuned_string
def serialize(self):
"""
This method allows to serialize in a proper way this object
:return: A dict of name and parameters
:rtype: Dict
"""
self.tts_message = Utils.encode_text_utf8(self.tts_message)
return {
'neuron_name': self.neuron_name,
'generated_message': self.tts_message
}
def say(self, message):
"""
USe TTS to speak out loud the Message.
A message can be a string, a list or a dict
If it's a string, simply use the TTS with the message
If it's a list, we select randomly a string in the list and give it to the TTS
If it's a dict, we use the template given in parameter to create a string that we give to the TTS
:param message: Can be a String or a dict or a list
.. raises:: TTSModuleNotFound
"""
logger.debug("[NeuronModule] Say() called with message: %s" % message)
tts_message = None
# we can save parameters from the neuron in memory
Cortex.save_neuron_parameter_in_memory(self.kalliope_memory, message)
if isinstance(message, str) or isinstance(message, six.text_type):
logger.debug("[NeuronModule] message is string")
tts_message = message
if isinstance(message, list):
logger.debug("[NeuronModule] message is list")
tts_message = random.choice(message)
if isinstance(message, dict):
logger.debug("[NeuronModule] message is dict")
tts_message = self._get_message_from_dict(message)
if message is None:
logger.debug("[NeuronModule] message is empty, try to load a template")
tts_message = self._get_message_from_dict(message)
if tts_message is not None:
logger.debug("[NeuronModule] tts_message to say: %s" % tts_message)
self.tts_message = tts_message
Utils.print_success(tts_message)
# save in kalliope memory the last tts message
Cortex.save("kalliope_last_tts_message", tts_message)
# process the audio only if the mute flag is false
if self.settings.options.mute:
logger.debug("[NeuronModule] mute is True, Kalliope is muted")
else:
logger.debug("[NeuronModule] mute is False, make Kalliope speaking")
HookManager.on_start_speaking()
# get the instance of the TTS module
tts_folder = None
if self.settings.resources:
tts_folder = self.settings.resources.tts_folder
tts_module_instance = Utils.get_dynamic_class_instantiation(package_name="tts",
module_name=self.tts.name,
parameters=self.tts.parameters,
resources_dir=tts_folder)
# generate the audio file and play it
tts_module_instance.say(tts_message)
HookManager.on_stop_speaking()
def _get_message_from_dict(self, message_dict):
"""
Generate a message that can be played by a TTS engine from a dict of variable and the jinja template
:param message_dict: the dict of message
:return: The message to say
.. raises:: TemplateFileNotFoundException
"""
returned_message = None
# the user chooses a say_template option
if self.say_template is not None:
returned_message = self._get_say_template(self.say_template, message_dict)
# the user chooses a file_template option
if self.file_template is not None: # the user choose a file_template option
returned_message = self._get_file_template(self.file_template, message_dict)
return returned_message
@staticmethod
def _get_say_template(list_say_template, message_dict):
if isinstance(list_say_template, list):
# then we pick randomly one template
list_say_template = random.choice(list_say_template)
t = Template(list_say_template)
return t.render(**message_dict)
@classmethod
def _get_file_template(cls, file_template, message_dict):
real_file_template_path = Utils.get_real_file_path(file_template)
if real_file_template_path is None:
raise TemplateFileNotFoundException("Template file %s not found in templates folder"
% real_file_template_path)
# load the content of the file as template
t = Template(cls._get_content_of_file(real_file_template_path))
# add kalliope memory
final_message_dict = dict()
final_message_dict["kalliope_memory"] = Cortex.get_memory()
if message_dict:
final_message_dict.update(**message_dict)
returned_message = t.render(final_message_dict)
return returned_message
@staticmethod
def run_synapse_by_name(synapse_name, user_order=None, synapse_order=None, high_priority=False,
is_api_call=False, overriding_parameter_dict=None):
"""
call the lifo for adding a synapse to execute in the list of synapse list to process
:param synapse_name: The name of the synapse to run
:param user_order: The user order
:param synapse_order: The synapse order
:param high_priority: If True, the synapse is executed before the end of the current synapse list
:param is_api_call: If true, the current call comes from the api
:param overriding_parameter_dict: dict of value to add to neuron parameters
"""
synapse = BrainLoader().brain.get_synapse_by_name(synapse_name)
matched_synapse = MatchedSynapse(matched_synapse=synapse,
matched_order=synapse_order,
user_order=user_order,
overriding_parameter=overriding_parameter_dict)
list_synapse_to_process = list()
list_synapse_to_process.append(matched_synapse)
# get the singleton
lifo_buffer = LifoManager.get_singleton_lifo()
lifo_buffer.add_synapse_list_to_lifo(list_synapse_to_process, high_priority=high_priority)
lifo_buffer.execute(is_api_call=is_api_call)
@staticmethod
def is_order_matching(order_said, order_match):
return OrderAnalyser().is_normal_matching(signal_order=order_match,
user_order=order_said)
@staticmethod
def _get_content_of_file(real_file_template_path):
"""
Return the content of a file in path <real_file_template_path>
:param real_file_template_path: path of the file to return the content
:return: file content str
"""
with open(real_file_template_path, 'r') as content_file:
return content_file.read()
@staticmethod
def get_audio_from_stt(callback):
"""
Call the default STT to get an audio sample and return it into the callback method
:param callback: A callback function
"""
HookManager.on_start_listening()
# call the order listener
ol = OrderListener(callback=callback)
ol.start()
ol.join()
# wait that the STT engine has finish his job (or the neurotransmitter neuron will be killed)
if ol.stt_instance is not None:
ol.stt_instance.join()
HookManager.on_stop_listening()
def get_neuron_name(self):
"""
Return the name of the neuron who call the mother class
:return:
"""
return self.neuron_name
@staticmethod
def _get_tts_object(tts_name=None, override_parameter=None, settings=None):
"""
Return a TTS model object
If no tts name provided, return the default TTS defined in the settings
If the TTS name is provided, get the default configuration for this TTS in settings and override each parameters
with parameters provided in override_parameter
:param tts_name: name of the TTS to load
:param override_parameter: dict of parameter to override the default configuration of the TTS
:param settings: current settings
:return: Tts model object
"""
# if the tts_name is not provided, we get the default tts from settings
if tts_name is None:
tts_name = settings.default_tts_name
# create a tts object from the tts the user want to use
tts_object = next((x for x in settings.ttss if x.name == tts_name), None)
if tts_object is None:
raise TTSModuleNotFound("[NeuronModule] The tts module name %s does not exist in settings file" % tts_name)
if override_parameter is not None: # the user want to override the default TTS configuration
logger.debug("[NeuronModule] args for TTS plugin before update: %s" % str(tts_object.parameters))
for key, value in override_parameter.items():
tts_object.parameters[key] = value
logger.debug("[NeuronModule] args for TTS plugin after update: %s" % str(tts_object.parameters))
logger.debug("[NeuronModule] TTS args: %s" % tts_object)
return tts_object
|
import asyncio
import contextlib
import logging
import time
from enum import Enum, unique
from pathlib import Path
from typing import MutableMapping
import discord
from redbot.core import commands
from redbot.core.i18n import Translator
log = logging.getLogger("red.cogs.Audio.task.callback")
_ = Translator("Audio", Path(__file__))
class CacheLevel:
__slots__ = ("value",)
def __init__(self, level=0):
if not isinstance(level, int):
raise TypeError(
f"Expected int parameter, received {level.__class__.__name__} instead."
)
elif level < 0:
level = 0
elif level > 0b11111:
level = 0b11111
self.value = level
def __eq__(self, other):
return isinstance(other, CacheLevel) and self.value == other.value
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self.value)
def __add__(self, other):
return CacheLevel(self.value + other.value)
def __radd__(self, other):
return CacheLevel(other.value + self.value)
def __sub__(self, other):
return CacheLevel(self.value - other.value)
def __rsub__(self, other):
return CacheLevel(other.value - self.value)
def __str__(self):
return "{0:b}".format(self.value)
def __format__(self, format_spec):
return "{r:{f}}".format(r=self.value, f=format_spec)
def __repr__(self):
return f"<CacheLevel value={self.value}>"
def is_subset(self, other):
"""Returns ``True`` if self has the same or fewer caching levels as other."""
return (self.value & other.value) == self.value
def is_superset(self, other):
"""Returns ``True`` if self has the same or more caching levels as other."""
return (self.value | other.value) == self.value
def is_strict_subset(self, other):
"""Returns ``True`` if the caching level on other are a strict subset of those on self."""
return self.is_subset(other) and self != other
def is_strict_superset(self, other):
"""Returns ``True`` if the caching level on
other are a strict superset of those on self."""
return self.is_superset(other) and self != other
__le__ = is_subset
__ge__ = is_superset
__lt__ = is_strict_subset
__gt__ = is_strict_superset
@classmethod
def all(cls):
"""A factory method that creates a :class:`CacheLevel` with max caching level."""
return cls(0b11111)
@classmethod
def none(cls):
"""A factory method that creates a :class:`CacheLevel` with no caching."""
return cls(0)
@classmethod
def set_spotify(cls):
"""A factory method that creates a :class:`CacheLevel` with Spotify caching level."""
return cls(0b00011)
@classmethod
def set_youtube(cls):
"""A factory method that creates a :class:`CacheLevel` with YouTube caching level."""
return cls(0b00100)
@classmethod
def set_lavalink(cls):
"""A factory method that creates a :class:`CacheLevel` with lavalink caching level."""
return cls(0b11000)
def _bit(self, index):
return bool((self.value >> index) & 1)
def _set(self, index, value):
if value is True:
self.value |= 1 << index
elif value is False:
self.value &= ~(1 << index)
else:
raise TypeError("Value to set for CacheLevel must be a bool.")
@property
def lavalink(self):
""":class:`bool`: Returns ``True`` if a user can deafen other users."""
return self._bit(4)
@lavalink.setter
def lavalink(self, value):
self._set(4, value)
@property
def youtube(self):
""":class:`bool`: Returns ``True`` if a user can move users between other voice
channels."""
return self._bit(2)
@youtube.setter
def youtube(self, value):
self._set(2, value)
@property
def spotify(self):
""":class:`bool`: Returns ``True`` if a user can use voice activation in voice channels."""
return self._bit(1)
@spotify.setter
def spotify(self, value):
self._set(1, value)
class Notifier:
def __init__(
self, ctx: commands.Context, message: discord.Message, updates: MutableMapping, **kwargs
):
self.context = ctx
self.message = message
self.updates = updates
self.color = None
self.last_msg_time = 0
self.cooldown = 5
async def notify_user(
self,
current: int = None,
total: int = None,
key: str = None,
seconds_key: str = None,
seconds: str = None,
):
"""This updates an existing message.
Based on the message found in :variable:`Notifier.updates` as per the `key` param
"""
if self.last_msg_time + self.cooldown > time.time() and not current == total:
return
if self.color is None:
self.color = await self.context.embed_colour()
embed2 = discord.Embed(
colour=self.color,
title=self.updates.get(key, "").format(num=current, total=total, seconds=seconds),
)
if seconds and seconds_key:
embed2.set_footer(text=self.updates.get(seconds_key, "").format(seconds=seconds))
try:
await self.message.edit(embed=embed2)
self.last_msg_time = int(time.time())
except discord.errors.NotFound:
pass
async def update_text(self, text: str):
embed2 = discord.Embed(colour=self.color, title=text)
try:
await self.message.edit(embed=embed2)
except discord.errors.NotFound:
pass
async def update_embed(self, embed: discord.Embed):
try:
await self.message.edit(embed=embed)
self.last_msg_time = int(time.time())
except discord.errors.NotFound:
pass
@unique
class PlaylistScope(Enum):
GLOBAL = "GLOBALPLAYLIST"
GUILD = "GUILDPLAYLIST"
USER = "USERPLAYLIST"
def __str__(self):
return "{0}".format(self.value)
@staticmethod
def list():
return list(map(lambda c: c.value, PlaylistScope))
def task_callback(task: asyncio.Task) -> None:
with contextlib.suppress(asyncio.CancelledError, asyncio.InvalidStateError):
if exc := task.exception():
log.exception(f"{task.get_name()} raised an Exception", exc_info=exc)
def has_internal_server():
async def pred(ctx: commands.Context):
external = await ctx.cog.config.use_external_lavalink()
return not external
return commands.check(pred)
|
from homeassistant.components.media_player import BrowseMedia
from homeassistant.components.media_player.const import (
MEDIA_CLASS_APP,
MEDIA_CLASS_CHANNEL,
MEDIA_CLASS_DIRECTORY,
MEDIA_TYPE_APP,
MEDIA_TYPE_APPS,
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_CHANNELS,
)
CONTENT_TYPE_MEDIA_CLASS = {
MEDIA_TYPE_APP: MEDIA_CLASS_APP,
MEDIA_TYPE_APPS: MEDIA_CLASS_APP,
MEDIA_TYPE_CHANNEL: MEDIA_CLASS_CHANNEL,
MEDIA_TYPE_CHANNELS: MEDIA_CLASS_CHANNEL,
}
CONTAINER_TYPES_SPECIFIC_MEDIA_CLASS = {
MEDIA_TYPE_APPS: MEDIA_CLASS_DIRECTORY,
MEDIA_TYPE_CHANNELS: MEDIA_CLASS_DIRECTORY,
}
PLAYABLE_MEDIA_TYPES = [
MEDIA_TYPE_APP,
MEDIA_TYPE_CHANNEL,
]
EXPANDABLE_MEDIA_TYPES = [
MEDIA_TYPE_APPS,
MEDIA_TYPE_CHANNELS,
]
def build_item_response(coordinator, payload):
"""Create response payload for the provided media query."""
search_id = payload["search_id"]
search_type = payload["search_type"]
thumbnail = None
title = None
media = None
children_media_class = None
if search_type == MEDIA_TYPE_APPS:
title = "Apps"
media = [
{"app_id": item.app_id, "title": item.name, "type": MEDIA_TYPE_APP}
for item in coordinator.data.apps
]
children_media_class = MEDIA_CLASS_APP
elif search_type == MEDIA_TYPE_CHANNELS:
title = "Channels"
media = [
{
"channel_number": item.number,
"title": item.name,
"type": MEDIA_TYPE_CHANNEL,
}
for item in coordinator.data.channels
]
children_media_class = MEDIA_CLASS_CHANNEL
if media is None:
return None
return BrowseMedia(
media_class=CONTAINER_TYPES_SPECIFIC_MEDIA_CLASS.get(
search_type, MEDIA_CLASS_DIRECTORY
),
media_content_id=search_id,
media_content_type=search_type,
title=title,
can_play=search_type in PLAYABLE_MEDIA_TYPES and search_id,
can_expand=True,
children=[item_payload(item, coordinator) for item in media],
children_media_class=children_media_class,
thumbnail=thumbnail,
)
def item_payload(item, coordinator):
"""
Create response payload for a single media item.
Used by async_browse_media.
"""
thumbnail = None
if "app_id" in item:
media_content_type = MEDIA_TYPE_APP
media_content_id = item["app_id"]
thumbnail = coordinator.roku.app_icon_url(item["app_id"])
elif "channel_number" in item:
media_content_type = MEDIA_TYPE_CHANNEL
media_content_id = item["channel_number"]
else:
media_content_type = item["type"]
media_content_id = ""
title = item["title"]
can_play = media_content_type in PLAYABLE_MEDIA_TYPES and media_content_id
can_expand = media_content_type in EXPANDABLE_MEDIA_TYPES
return BrowseMedia(
title=title,
media_class=CONTENT_TYPE_MEDIA_CLASS[media_content_type],
media_content_type=media_content_type,
media_content_id=media_content_id,
can_play=can_play,
can_expand=can_expand,
thumbnail=thumbnail,
)
def library_payload(coordinator):
"""
Create response payload to describe contents of a specific library.
Used by async_browse_media.
"""
library_info = BrowseMedia(
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id="library",
media_content_type="library",
title="Media Library",
can_play=False,
can_expand=True,
children=[],
)
library = {
MEDIA_TYPE_APPS: "Apps",
MEDIA_TYPE_CHANNELS: "Channels",
}
for item in [{"title": name, "type": type_} for type_, name in library.items()]:
if (
item["type"] == MEDIA_TYPE_CHANNELS
and coordinator.data.info.device_type != "tv"
):
continue
library_info.children.append(
item_payload(
{"title": item["title"], "type": item["type"]},
coordinator,
)
)
if all(
child.media_content_type == MEDIA_TYPE_APPS for child in library_info.children
):
library_info.children_media_class = MEDIA_CLASS_APP
elif all(
child.media_content_type == MEDIA_TYPE_CHANNELS
for child in library_info.children
):
library_info.children_media_class = MEDIA_CLASS_CHANNEL
return library_info
|
import pytest
from homeassistant import config_entries, data_entry_flow
from homeassistant.components.izone.const import DISPATCH_CONTROLLER_DISCOVERED, IZONE
from tests.async_mock import Mock, patch
@pytest.fixture
def mock_disco():
"""Mock discovery service."""
disco = Mock()
disco.pi_disco = Mock()
disco.pi_disco.controllers = {}
yield disco
def _mock_start_discovery(hass, mock_disco):
from homeassistant.helpers.dispatcher import async_dispatcher_send
def do_disovered(*args):
async_dispatcher_send(hass, DISPATCH_CONTROLLER_DISCOVERED, True)
return mock_disco
return do_disovered
async def test_not_found(hass, mock_disco):
"""Test not finding iZone controller."""
with patch(
"homeassistant.components.izone.config_flow.async_start_discovery_service"
) as start_disco, patch(
"homeassistant.components.izone.config_flow.async_stop_discovery_service",
return_value=None,
) as stop_disco:
start_disco.side_effect = _mock_start_discovery(hass, mock_disco)
result = await hass.config_entries.flow.async_init(
IZONE, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
await hass.async_block_till_done()
stop_disco.assert_called_once()
async def test_found(hass, mock_disco):
"""Test not finding iZone controller."""
mock_disco.pi_disco.controllers["blah"] = object()
with patch(
"homeassistant.components.izone.climate.async_setup_entry",
return_value=True,
) as mock_setup, patch(
"homeassistant.components.izone.config_flow.async_start_discovery_service"
) as start_disco, patch(
"homeassistant.components.izone.async_start_discovery_service",
return_value=None,
):
start_disco.side_effect = _mock_start_discovery(hass, mock_disco)
result = await hass.config_entries.flow.async_init(
IZONE, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
mock_setup.assert_called_once()
|
import random
from fnvhash import fnv1a_32
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.entity_registry import RegistryEntry
from homeassistant.helpers.storage import Store
from .util import get_aid_storage_filename_for_entry_id
AID_MANAGER_STORAGE_VERSION = 1
AID_MANAGER_SAVE_DELAY = 2
ALLOCATIONS_KEY = "allocations"
UNIQUE_IDS_KEY = "unique_ids"
INVALID_AIDS = (0, 1)
AID_MIN = 2
AID_MAX = 18446744073709551615
def get_system_unique_id(entity: RegistryEntry):
"""Determine the system wide unique_id for an entity."""
return f"{entity.platform}.{entity.domain}.{entity.unique_id}"
def _generate_aids(unique_id: str, entity_id: str) -> int:
"""Generate accessory aid."""
if unique_id:
# Use fnv1a_32 of the unique id as
# fnv1a_32 has less collisions than
# adler32
yield fnv1a_32(unique_id.encode("utf-8"))
# If there is no unique id we use
# fnv1a_32 as it is unlikely to collide
yield fnv1a_32(entity_id.encode("utf-8"))
# If called again resort to random allocations.
# Given the size of the range its unlikely we'll encounter duplicates
# But try a few times regardless
for _ in range(5):
yield random.randrange(AID_MIN, AID_MAX)
class AccessoryAidStorage:
"""
Holds a map of entity ID to HomeKit ID.
Will generate new ID's, ensure they are unique and store them to make sure they
persist over reboots.
"""
def __init__(self, hass: HomeAssistant, entry: ConfigEntry):
"""Create a new entity map store."""
self.hass = hass
self.allocations = {}
self.allocated_aids = set()
self._entry = entry
self.store = None
self._entity_registry = None
async def async_initialize(self):
"""Load the latest AID data."""
self._entity_registry = (
await self.hass.helpers.entity_registry.async_get_registry()
)
aidstore = get_aid_storage_filename_for_entry_id(self._entry)
self.store = Store(self.hass, AID_MANAGER_STORAGE_VERSION, aidstore)
raw_storage = await self.store.async_load()
if not raw_storage:
# There is no data about aid allocations yet
return
self.allocations = raw_storage.get(ALLOCATIONS_KEY, {})
self.allocated_aids = set(self.allocations.values())
def get_or_allocate_aid_for_entity_id(self, entity_id: str):
"""Generate a stable aid for an entity id."""
entity = self._entity_registry.async_get(entity_id)
if not entity:
return self._get_or_allocate_aid(None, entity_id)
sys_unique_id = get_system_unique_id(entity)
return self._get_or_allocate_aid(sys_unique_id, entity_id)
def _get_or_allocate_aid(self, unique_id: str, entity_id: str):
"""Allocate (and return) a new aid for an accessory."""
if unique_id and unique_id in self.allocations:
return self.allocations[unique_id]
if entity_id in self.allocations:
return self.allocations[entity_id]
for aid in _generate_aids(unique_id, entity_id):
if aid in INVALID_AIDS:
continue
if aid not in self.allocated_aids:
# Prefer the unique_id over the entitiy_id
storage_key = unique_id or entity_id
self.allocations[storage_key] = aid
self.allocated_aids.add(aid)
self.async_schedule_save()
return aid
raise ValueError(
f"Unable to generate unique aid allocation for {entity_id} [{unique_id}]"
)
def delete_aid(self, storage_key: str):
"""Delete an aid allocation."""
if storage_key not in self.allocations:
return
aid = self.allocations.pop(storage_key)
self.allocated_aids.discard(aid)
self.async_schedule_save()
@callback
def async_schedule_save(self):
"""Schedule saving the entity map cache."""
self.store.async_delay_save(self._data_to_save, AID_MANAGER_SAVE_DELAY)
async def async_save(self):
"""Save the entity map cache."""
return await self.store.async_save(self._data_to_save())
@callback
def _data_to_save(self):
"""Return data of entity map to store in a file."""
return {ALLOCATIONS_KEY: self.allocations}
|
import json
from pathlib import Path
from typing import Any, Dict, Tuple
from .info_schemas import REPO_SCHEMA, update_mixin
from .log import log
class RepoJSONMixin:
INFO_FILE_NAME = "info.json"
def __init__(self, repo_folder: Path):
self._repo_folder = repo_folder
self.author: Tuple[str, ...]
self.install_msg: str
self.short: str
self.description: str
self._info_file = repo_folder / self.INFO_FILE_NAME
self._info: Dict[str, Any]
self._read_info_file()
def _read_info_file(self) -> None:
if self._info_file.exists():
try:
with self._info_file.open(encoding="utf-8") as f:
info = json.load(f)
except json.JSONDecodeError as e:
log.error(
"Invalid JSON information file at path: %s\nError: %s", self._info_file, str(e)
)
info = {}
else:
info = {}
if not isinstance(info, dict):
log.warning(
"Invalid top-level structure (expected dict, got %s)"
" in JSON information file at path: %s",
type(info).__name__,
self._info_file,
)
info = {}
self._info = info
update_mixin(self, REPO_SCHEMA)
|
from homeassistant.helpers.update_coordinator import CoordinatorEntity
class RiscoEntity(CoordinatorEntity):
"""Risco entity base class."""
def _get_data_from_coordinator(self):
raise NotImplementedError
def _refresh_from_coordinator(self):
self._get_data_from_coordinator()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(
self.coordinator.async_add_listener(self._refresh_from_coordinator)
)
@property
def _risco(self):
"""Return the Risco API object."""
return self.coordinator.risco
|
from sqlalchemy import func
from flask import current_app
from lemur import database
from lemur.models import certificate_destination_associations
from lemur.destinations.models import Destination
from lemur.certificates.models import Certificate
from lemur.sources.service import add_aws_destination_to_sources
def create(label, plugin_name, options, description=None):
"""
Creates a new destination, that can then be used as a destination for certificates.
:param label: Destination common name
:param description:
:rtype : Destination
:return: New destination
"""
# remove any sub-plugin objects before try to save the json options
for option in options:
if "plugin" in option["type"]:
del option["value"]["plugin_object"]
destination = Destination(
label=label, options=options, plugin_name=plugin_name, description=description
)
current_app.logger.info("Destination: %s created", label)
# add the destination as source, to avoid new destinations that are not in source, as long as an AWS destination
if add_aws_destination_to_sources(destination):
current_app.logger.info("Source: %s created", label)
return database.create(destination)
def update(destination_id, label, plugin_name, options, description):
"""
Updates an existing destination.
:param destination_id: Lemur assigned ID
:param label: Destination common name
:param plugin_name:
:param options:
:param description:
:rtype : Destination
:return:
"""
destination = get(destination_id)
destination.label = label
destination.plugin_name = plugin_name
# remove any sub-plugin objects before try to save the json options
for option in options:
if "plugin" in option["type"]:
del option["value"]["plugin_object"]
destination.options = options
destination.description = description
return database.update(destination)
def delete(destination_id):
"""
Deletes an destination.
:param destination_id: Lemur assigned ID
"""
database.delete(get(destination_id))
def get(destination_id):
"""
Retrieves an destination by its lemur assigned ID.
:param destination_id: Lemur assigned ID
:rtype : Destination
:return:
"""
return database.get(Destination, destination_id)
def get_by_label(label):
"""
Retrieves a destination by its label
:param label:
:return:
"""
return database.get(Destination, label, field="label")
def get_all():
"""
Retrieves all destination currently known by Lemur.
:return:
"""
query = database.session_query(Destination)
return database.find_all(query, Destination, {}).all()
def render(args):
filt = args.pop("filter")
certificate_id = args.pop("certificate_id", None)
if certificate_id:
query = database.session_query(Destination).join(
Certificate, Destination.certificate
)
query = query.filter(Certificate.id == certificate_id)
else:
query = database.session_query(Destination)
if filt:
terms = filt.split(";")
query = database.filter(query, Destination, terms)
return database.sort_and_page(query, Destination, args)
def stats(**kwargs):
"""
Helper that defines some useful statistics about destinations.
:param kwargs:
:return:
"""
items = (
database.db.session.query(
Destination.label,
func.count(certificate_destination_associations.c.certificate_id),
)
.join(certificate_destination_associations)
.group_by(Destination.label)
.all()
)
keys = []
values = []
for key, count in items:
keys.append(key)
values.append(count)
return {"labels": keys, "values": values}
|
import json
import logging
from absl import flags
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import aws_virtual_machine
from perfkitbenchmarker.providers.aws import s3
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
flags.DEFINE_string('dpb_emr_release_label', None,
'DEPRECATED use dpb_service.version.')
SPARK_SAMPLE_LOCATION = 'file:///usr/lib/spark/examples/jars/spark-examples.jar'
INVALID_STATES = ['TERMINATED_WITH_ERRORS', 'TERMINATED']
READY_CHECK_SLEEP = 30
READY_CHECK_TRIES = 60
READY_STATE = 'WAITING'
JOB_WAIT_SLEEP = 30
EMR_TIMEOUT = 14400
disk_to_hdfs_map = {
'st1': 'HDD',
'gp2': 'SSD'
}
class EMRRetryableException(Exception):
pass
class AwsDpbEmr(dpb_service.BaseDpbService):
"""Object representing a AWS EMR cluster.
Attributes:
cluster_id: ID of the cluster.
project: ID of the project in which the cluster is being launched.
dpb_service_type: Set to 'emr'.
cmd_prefix: Setting default prefix for the emr commands (region optional).
network: Dedicated network for the EMR cluster
storage_service: Region specific instance of S3 for bucket management.
bucket_to_delete: Cluster associated bucket to be cleaned up.
dpb_version: EMR version to use.
"""
CLOUD = aws.CLOUD
SERVICE_TYPE = 'emr'
PERSISTENT_FS_PREFIX = 's3://'
def __init__(self, dpb_service_spec):
super(AwsDpbEmr, self).__init__(dpb_service_spec)
self.dpb_service_type = AwsDpbEmr.SERVICE_TYPE
self.project = None
self.cmd_prefix = list(util.AWS_PREFIX)
if self.dpb_service_zone:
self.region = util.GetRegionFromZone(self.dpb_service_zone)
else:
raise errors.Setup.InvalidSetupError(
'dpb_service_zone must be provided, for provisioning.')
self.cmd_prefix += ['--region', self.region]
self.network = aws_network.AwsNetwork.GetNetworkFromNetworkSpec(
aws_network.AwsNetworkSpec(zone=self.dpb_service_zone))
self.storage_service = s3.S3Service()
self.storage_service.PrepareService(self.region)
self.bucket_to_delete = None
self.dpb_version = FLAGS.dpb_emr_release_label or self.dpb_version
if not self.dpb_version:
raise errors.Setup.InvalidSetupError(
'dpb_service.version must be provided.')
@staticmethod
def CheckPrerequisites(benchmark_config):
del benchmark_config # Unused
@property
def security_group_id(self):
"""Returns the security group ID of this Cluster."""
return self.network.regional_network.vpc.default_security_group_id
def _CreateLogBucket(self):
"""Create the s3 bucket for the EMR cluster's logs."""
log_bucket_name = 'pkb-{0}-emr'.format(FLAGS.run_uri)
self.storage_service.MakeBucket(log_bucket_name)
return 's3://{}'.format(log_bucket_name)
def _DeleteLogBucket(self):
"""Delete the s3 bucket holding the EMR cluster's logs.
This method is part of the Delete lifecycle of the resource.
"""
# TODO(saksena): Deprecate the use of FLAGS.run_uri and plumb as argument.
log_bucket_name = 'pkb-{0}-emr'.format(FLAGS.run_uri)
self.storage_service.DeleteBucket(log_bucket_name)
def _CreateDependencies(self):
"""Set up the ssh key."""
aws_virtual_machine.AwsKeyFileManager.ImportKeyfile(self.region)
def _Create(self):
"""Creates the cluster."""
name = 'pkb_' + FLAGS.run_uri
# Set up ebs details if disk_spec is present int he config
ebs_configuration = None
if self.spec.worker_group.disk_spec:
# Make sure nothing we are ignoring is included in the disk spec
assert self.spec.worker_group.disk_spec.device_path is None
assert self.spec.worker_group.disk_spec.disk_number is None
assert self.spec.worker_group.disk_spec.iops is None
ebs_configuration = {'EbsBlockDeviceConfigs': [
{'VolumeSpecification': {
'SizeInGB': self.spec.worker_group.disk_spec.disk_size,
'VolumeType': self.spec.worker_group.disk_spec.disk_type},
'VolumesPerInstance': self.spec.worker_group.disk_count}]}
self.dpb_hdfs_type = disk_to_hdfs_map[
self.spec.worker_group.disk_spec.disk_type]
# Create the specification for the master and the worker nodes
instance_groups = []
core_instances = {'InstanceCount': self.spec.worker_count,
'InstanceGroupType': 'CORE',
'InstanceType':
self.spec.worker_group.vm_spec.machine_type}
if ebs_configuration:
core_instances.update({'EbsConfiguration': ebs_configuration})
master_instance = {'InstanceCount': 1,
'InstanceGroupType': 'MASTER',
'InstanceType':
self.spec.worker_group.vm_spec.machine_type}
if ebs_configuration:
master_instance.update({'EbsConfiguration': ebs_configuration})
instance_groups.append(core_instances)
instance_groups.append(master_instance)
# Create the log bucket to hold job's log output
# TODO(saksena): Deprecate aws_emr_loguri flag and move
# the log bucket creation to Create dependencies.
logs_bucket = self._CreateLogBucket()
# Spark SQL needs to access Hive
cmd = self.cmd_prefix + ['emr', 'create-cluster', '--name', name,
'--release-label', self.dpb_version,
'--use-default-roles',
'--instance-groups',
json.dumps(instance_groups),
'--application', 'Name=Spark',
'Name=Hadoop', 'Name=Hive',
'--log-uri', logs_bucket]
ec2_attributes = [
'KeyName=' + aws_virtual_machine.AwsKeyFileManager.GetKeyNameForRun(),
'SubnetId=' + self.network.subnet.id,
# Place all VMs in default security group for simplicity and speed of
# provisioning
'EmrManagedMasterSecurityGroup=' + self.security_group_id,
'EmrManagedSlaveSecurityGroup=' + self.security_group_id,
]
cmd += ['--ec2-attributes', ','.join(ec2_attributes)]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
self.cluster_id = result['ClusterId']
logging.info('Cluster created with id %s', self.cluster_id)
for tag_key, tag_value in util.MakeDefaultTags().items():
self._AddTag(tag_key, tag_value)
def _AddTag(self, key, value):
cmd = self.cmd_prefix + ['emr', 'add-tags',
'--resource-id', self.cluster_id,
'--tag',
'{}={}'.format(key, value)]
vm_util.IssueCommand(cmd)
def _Delete(self):
if self.cluster_id:
delete_cmd = self.cmd_prefix + ['emr',
'terminate-clusters',
'--cluster-ids',
self.cluster_id]
vm_util.IssueCommand(delete_cmd, raise_on_failure=False)
def _DeleteDependencies(self):
self._DeleteLogBucket()
aws_virtual_machine.AwsKeyFileManager.DeleteKeyfile(self.region)
def _Exists(self):
"""Check to see whether the cluster exists."""
if not self.cluster_id:
return False
cmd = self.cmd_prefix + ['emr',
'describe-cluster',
'--cluster-id',
self.cluster_id]
stdout, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
return False
result = json.loads(stdout)
if result['Cluster']['Status']['State'] in INVALID_STATES:
return False
else:
return True
def _IsReady(self):
"""Check to see if the cluster is ready."""
logging.info('Checking _Ready cluster: %s', self.cluster_id)
cmd = self.cmd_prefix + ['emr',
'describe-cluster', '--cluster-id',
self.cluster_id]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
# TODO(saksena): Handle error outcomees when spinning up emr clusters
return result['Cluster']['Status']['State'] == READY_STATE
def _IsStepDone(self, step_id):
"""Determine whether the step is done.
Args:
step_id: The step id to query.
Returns:
A dictionary describing the step if the step the step is complete,
None otherwise.
Raises:
JobSubmissionError if job fails.
"""
cmd = self.cmd_prefix + ['emr', 'describe-step', '--cluster-id',
self.cluster_id, '--step-id', step_id]
stdout, _, _ = vm_util.IssueCommand(cmd)
result = json.loads(stdout)
state = result['Step']['Status']['State']
if state == 'FAILED':
raise dpb_service.JobSubmissionError(
result['Step']['Status']['FailureDetails'])
if state == 'COMPLETED':
return result
else:
return None
def SubmitJob(self,
jarfile=None,
classname=None,
pyspark_file=None,
query_file=None,
job_poll_interval=5,
job_arguments=None,
job_files=None,
job_jars=None,
job_stdout_file=None,
job_type=None,
properties=None):
"""See base class."""
@vm_util.Retry(
timeout=EMR_TIMEOUT,
poll_interval=job_poll_interval,
fuzz=0,
retryable_exceptions=(EMRRetryableException,))
def WaitForStep(step_id):
result = self._IsStepDone(step_id)
if result is None:
raise EMRRetryableException('Step {0} not complete.'.format(step_id))
return result
if job_arguments:
# Escape commas in arguments
job_arguments = (arg.replace(',', '\\,') for arg in job_arguments)
all_properties = self.GetJobProperties()
all_properties.update(properties or {})
if job_type == 'hadoop':
if not (jarfile or classname):
raise ValueError('You must specify jarfile or classname.')
if jarfile and classname:
raise ValueError('You cannot specify both jarfile and classname.')
arg_list = []
if classname:
# EMR does not support passing classnames as jobs. Instead manually
# invoke `hadoop CLASSNAME` using command-runner.jar
jarfile = 'command-runner.jar'
arg_list = ['hadoop', classname]
# Order is important
arg_list += ['-D{}={}'.format(k, v) for k, v in all_properties.items()]
if job_arguments:
arg_list += job_arguments
arg_spec = 'Args=[' + ','.join(arg_list) + ']'
step_list = ['Jar=' + jarfile, arg_spec]
elif job_type == self.SPARK_JOB_TYPE:
arg_list = []
if job_files:
arg_list += ['--files', ','.join(job_files)]
if job_jars:
arg_list += ['--jars', ','.join(job_jars)]
for k, v in all_properties.items():
arg_list += ['--conf', '{}={}'.format(k, v)]
# jarfile must be last before args
arg_list += ['--class', classname, jarfile]
if job_arguments:
arg_list += job_arguments
arg_spec = '[' + ','.join(arg_list) + ']'
step_type_spec = 'Type=Spark'
step_list = [step_type_spec, 'Args=' + arg_spec]
elif job_type == self.PYSPARK_JOB_TYPE:
arg_list = []
if job_files:
arg_list += ['--files', ','.join(job_files)]
if job_jars:
arg_list += ['--jars', ','.join(job_jars)]
for k, v in all_properties.items():
arg_list += ['--conf', '{}={}'.format(k, v)]
# pyspark_file must be last before args
arg_list += [pyspark_file]
if job_arguments:
arg_list += job_arguments
arg_spec = 'Args=[{}]'.format(','.join(arg_list))
step_list = ['Type=Spark', arg_spec]
elif job_type == self.SPARKSQL_JOB_TYPE:
assert not job_arguments
arg_list = [query_file]
jar_spec = 'Jar="command-runner.jar"'
for k, v in all_properties.items():
arg_list += ['--conf', '{}={}'.format(k, v)]
arg_spec = 'Args=[spark-sql,-f,{}]'.format(','.join(arg_list))
step_list = [jar_spec, arg_spec]
step_string = ','.join(step_list)
step_cmd = self.cmd_prefix + ['emr',
'add-steps',
'--cluster-id',
self.cluster_id,
'--steps',
step_string]
stdout, _, _ = vm_util.IssueCommand(step_cmd)
result = json.loads(stdout)
step_id = result['StepIds'][0]
result = WaitForStep(step_id)
pending_time = result['Step']['Status']['Timeline']['CreationDateTime']
start_time = result['Step']['Status']['Timeline']['StartDateTime']
end_time = result['Step']['Status']['Timeline']['EndDateTime']
return dpb_service.JobResult(
run_time=end_time - start_time,
pending_time=start_time - pending_time)
def SetClusterProperty(self):
pass
def CreateBucket(self, source_bucket):
"""Create a bucket on S3 for use during the persistent data processing.
Args:
source_bucket: String, name of the bucket to create.
"""
self.storage_service.MakeBucket(source_bucket)
def DeleteBucket(self, source_bucket):
"""Delete a bucket on S3 used during the persistent data processing.
Args:
source_bucket: String, name of the bucket to delete.
"""
self.storage_service.DeleteBucket(source_bucket)
def distributed_copy(self, source_location, destination_location):
"""Method to copy data using a distributed job on the cluster."""
@vm_util.Retry(timeout=EMR_TIMEOUT,
poll_interval=5, fuzz=0)
def WaitForStep(step_id):
result = self._IsStepDone(step_id)
if result is None:
raise EMRRetryableException('Step {0} not complete.'.format(step_id))
return result
job_arguments = ['s3-dist-cp', '--s3Endpoint=s3.amazonaws.com']
job_arguments.append('--src={}'.format(source_location))
job_arguments.append('--dest={}'.format(destination_location))
arg_spec = '[' + ','.join(job_arguments) + ']'
step_type_spec = 'Type=CUSTOM_JAR'
step_name = 'Name="S3DistCp"'
step_action_on_failure = 'ActionOnFailure=CONTINUE'
jar_spec = 'Jar=command-runner.jar'
step_list = [step_type_spec, step_name, step_action_on_failure, jar_spec]
step_list.append('Args=' + arg_spec)
step_string = ','.join(step_list)
step_cmd = self.cmd_prefix + ['emr',
'add-steps',
'--cluster-id',
self.cluster_id,
'--steps',
step_string]
stdout, _, _ = vm_util.IssueCommand(step_cmd)
result = json.loads(stdout)
step_id = result['StepIds'][0]
metrics = {}
result = WaitForStep(step_id)
pending_time = result['Step']['Status']['Timeline']['CreationDateTime']
start_time = result['Step']['Status']['Timeline']['StartDateTime']
end_time = result['Step']['Status']['Timeline']['EndDateTime']
metrics[dpb_service.WAITING] = start_time - pending_time
metrics[dpb_service.RUNTIME] = end_time - start_time
step_state = result['Step']['Status']['State']
metrics[dpb_service.SUCCESS] = step_state == 'COMPLETED'
return metrics
|
import itertools
from absl import flags
PROVISION = 'provision'
PREPARE = 'prepare'
RUN = 'run'
CLEANUP = 'cleanup'
TEARDOWN = 'teardown'
STAGES = [PROVISION, PREPARE, RUN, CLEANUP, TEARDOWN]
_NEXT_STAGE = {PROVISION: PREPARE, PREPARE: RUN, RUN: CLEANUP,
CLEANUP: TEARDOWN}
_ALL = 'all'
_VALID_FLAG_VALUES = PROVISION, PREPARE, RUN, CLEANUP, TEARDOWN, _ALL
_SYNTACTIC_HELP = (
"A complete benchmark execution consists of {0} stages: {1}. Possible flag "
"values include an individual stage, a comma-separated list of stages, or "
"'all'. If a list of stages is provided, they must be in order without "
"skipping any stage.".format(len(STAGES), ', '.join(STAGES)))
class RunStageParser(flags.ListParser):
"""Parse a string containing PKB run stages.
See _SYNTACTIC_HELP for more information.
"""
def __init__(self, *args, **kwargs):
super(RunStageParser, self).__init__(*args, **kwargs)
self.syntactic_help = _SYNTACTIC_HELP
def parse(self, argument):
"""Parses a list of stages.
Args:
argument: string or list of strings.
Returns:
list of strings whose elements are chosen from STAGES.
Raises:
ValueError: If argument does not conform to the guidelines explained in
syntactic_help.
"""
stage_list = super(RunStageParser, self).parse(argument)
if not stage_list:
raise ValueError('Unable to parse {0}. Stage list cannot be '
'empty.'.format(repr(argument)))
invalid_items = set(stage_list).difference(_VALID_FLAG_VALUES)
if invalid_items:
raise ValueError(
'Unable to parse {0}. Unrecognized stages were found: {1}'.format(
repr(argument), ', '.join(sorted(invalid_items))))
if _ALL in stage_list:
if len(stage_list) > 1:
raise ValueError(
"Unable to parse {0}. If 'all' stages are specified, individual "
"stages cannot also be specified.".format(repr(argument)))
return list(STAGES)
previous_stage = stage_list[0]
for stage in itertools.islice(stage_list, 1, None):
expected_stage = _NEXT_STAGE.get(previous_stage)
if not expected_stage:
raise ValueError("Unable to parse {0}. '{1}' should be the last "
"stage.".format(repr(argument), previous_stage))
if stage != expected_stage:
raise ValueError(
"Unable to parse {0}. The stage after '{1}' should be '{2}', not "
"'{3}'.".format(repr(argument), previous_stage, expected_stage,
stage))
previous_stage = stage
return stage_list
flags.DEFINE(
RunStageParser(), 'run_stage', STAGES,
"The stage or stages of perfkitbenchmarker to run.",
flags.FLAGS, flags.ListSerializer(','))
|
import platform
import os
import diamond.collector
# Detect the architecture of the system
# and set the counters for MAX_VALUES
# appropriately. Otherwise, rolling over
# counters will cause incorrect or
# negative values.
if platform.architecture()[0] == '64bit':
counter = (2 ** 64) - 1
else:
counter = (2 ** 32) - 1
class SoftInterruptCollector(diamond.collector.Collector):
PROC = '/proc/stat'
def get_default_config_help(self):
config_help = super(SoftInterruptCollector,
self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(SoftInterruptCollector, self).get_default_config()
config.update({
'path': 'softirq'
})
return config
def collect(self):
"""
Collect interrupt data
"""
if not os.access(self.PROC, os.R_OK):
return False
# Open PROC file
file = open(self.PROC, 'r')
# Get data
for line in file:
if not line.startswith('softirq'):
continue
data = line.split()
metric_name = 'total'
metric_value = int(data[1])
metric_value = int(self.derivative(
metric_name,
long(metric_value), counter))
self.publish(metric_name, metric_value)
for i in range(2, len(data)):
metric_name = str(i - 2)
metric_value = int(data[i])
metric_value = int(self.derivative(
metric_name,
long(metric_value), counter))
self.publish(metric_name, metric_value)
# Close file
file.close()
|
from typing import Any, Dict
from homematicip.aio.device import (
AsyncBrandSwitchMeasuring,
AsyncFullFlushSwitchMeasuring,
AsyncHeatingThermostat,
AsyncHeatingThermostatCompact,
AsyncLightSensor,
AsyncMotionDetectorIndoor,
AsyncMotionDetectorOutdoor,
AsyncMotionDetectorPushButton,
AsyncPassageDetector,
AsyncPlugableSwitchMeasuring,
AsyncPresenceDetectorIndoor,
AsyncRoomControlDeviceAnalog,
AsyncTemperatureHumiditySensorDisplay,
AsyncTemperatureHumiditySensorOutdoor,
AsyncTemperatureHumiditySensorWithoutDisplay,
AsyncWeatherSensor,
AsyncWeatherSensorPlus,
AsyncWeatherSensorPro,
)
from homematicip.base.enums import ValveState
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
LENGTH_MILLIMETERS,
LIGHT_LUX,
PERCENTAGE,
POWER_WATT,
SPEED_KILOMETERS_PER_HOUR,
TEMP_CELSIUS,
)
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN as HMIPC_DOMAIN, HomematicipGenericEntity
from .generic_entity import ATTR_IS_GROUP, ATTR_MODEL_TYPE
from .hap import HomematicipHAP
ATTR_CURRENT_ILLUMINATION = "current_illumination"
ATTR_LOWEST_ILLUMINATION = "lowest_illumination"
ATTR_HIGHEST_ILLUMINATION = "highest_illumination"
ATTR_LEFT_COUNTER = "left_counter"
ATTR_RIGHT_COUNTER = "right_counter"
ATTR_TEMPERATURE_OFFSET = "temperature_offset"
ATTR_WIND_DIRECTION = "wind_direction"
ATTR_WIND_DIRECTION_VARIATION = "wind_direction_variation_in_degree"
ILLUMINATION_DEVICE_ATTRIBUTES = {
"currentIllumination": ATTR_CURRENT_ILLUMINATION,
"lowestIllumination": ATTR_LOWEST_ILLUMINATION,
"highestIllumination": ATTR_HIGHEST_ILLUMINATION,
}
async def async_setup_entry(
hass: HomeAssistantType, config_entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the HomematicIP Cloud sensors from a config entry."""
hap = hass.data[HMIPC_DOMAIN][config_entry.unique_id]
entities = [HomematicipAccesspointStatus(hap)]
for device in hap.home.devices:
if isinstance(device, (AsyncHeatingThermostat, AsyncHeatingThermostatCompact)):
entities.append(HomematicipHeatingThermostat(hap, device))
entities.append(HomematicipTemperatureSensor(hap, device))
if isinstance(
device,
(
AsyncTemperatureHumiditySensorDisplay,
AsyncTemperatureHumiditySensorWithoutDisplay,
AsyncTemperatureHumiditySensorOutdoor,
AsyncWeatherSensor,
AsyncWeatherSensorPlus,
AsyncWeatherSensorPro,
),
):
entities.append(HomematicipTemperatureSensor(hap, device))
entities.append(HomematicipHumiditySensor(hap, device))
elif isinstance(device, (AsyncRoomControlDeviceAnalog,)):
entities.append(HomematicipTemperatureSensor(hap, device))
if isinstance(
device,
(
AsyncLightSensor,
AsyncMotionDetectorIndoor,
AsyncMotionDetectorOutdoor,
AsyncMotionDetectorPushButton,
AsyncPresenceDetectorIndoor,
AsyncWeatherSensor,
AsyncWeatherSensorPlus,
AsyncWeatherSensorPro,
),
):
entities.append(HomematicipIlluminanceSensor(hap, device))
if isinstance(
device,
(
AsyncPlugableSwitchMeasuring,
AsyncBrandSwitchMeasuring,
AsyncFullFlushSwitchMeasuring,
),
):
entities.append(HomematicipPowerSensor(hap, device))
if isinstance(
device, (AsyncWeatherSensor, AsyncWeatherSensorPlus, AsyncWeatherSensorPro)
):
entities.append(HomematicipWindspeedSensor(hap, device))
if isinstance(device, (AsyncWeatherSensorPlus, AsyncWeatherSensorPro)):
entities.append(HomematicipTodayRainSensor(hap, device))
if isinstance(device, AsyncPassageDetector):
entities.append(HomematicipPassageDetectorDeltaCounter(hap, device))
if entities:
async_add_entities(entities)
class HomematicipAccesspointStatus(HomematicipGenericEntity):
"""Representation of then HomeMaticIP access point."""
def __init__(self, hap: HomematicipHAP) -> None:
"""Initialize access point status entity."""
super().__init__(hap, device=hap.home, post="Duty Cycle")
@property
def device_info(self) -> Dict[str, Any]:
"""Return device specific attributes."""
# Adds a sensor to the existing HAP device
return {
"identifiers": {
# Serial numbers of Homematic IP device
(HMIPC_DOMAIN, self._home.id)
}
}
@property
def icon(self) -> str:
"""Return the icon of the access point entity."""
return "mdi:access-point-network"
@property
def state(self) -> float:
"""Return the state of the access point."""
return self._home.dutyCycle
@property
def available(self) -> bool:
"""Return if access point is available."""
return self._home.connected
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return PERCENTAGE
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the access point."""
state_attr = super().device_state_attributes
state_attr[ATTR_MODEL_TYPE] = "HmIP-HAP"
state_attr[ATTR_IS_GROUP] = False
return state_attr
class HomematicipHeatingThermostat(HomematicipGenericEntity):
"""Representation of the HomematicIP heating thermostat."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize heating thermostat device."""
super().__init__(hap, device, post="Heating")
@property
def icon(self) -> str:
"""Return the icon."""
if super().icon:
return super().icon
if self._device.valveState != ValveState.ADAPTION_DONE:
return "mdi:alert"
return "mdi:radiator"
@property
def state(self) -> int:
"""Return the state of the radiator valve."""
if self._device.valveState != ValveState.ADAPTION_DONE:
return self._device.valveState
return round(self._device.valvePosition * 100)
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return PERCENTAGE
class HomematicipHumiditySensor(HomematicipGenericEntity):
"""Representation of the HomematicIP humidity sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the thermometer device."""
super().__init__(hap, device, post="Humidity")
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_HUMIDITY
@property
def state(self) -> int:
"""Return the state."""
return self._device.humidity
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return PERCENTAGE
class HomematicipTemperatureSensor(HomematicipGenericEntity):
"""Representation of the HomematicIP thermometer."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the thermometer device."""
super().__init__(hap, device, post="Temperature")
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_TEMPERATURE
@property
def state(self) -> float:
"""Return the state."""
if hasattr(self._device, "valveActualTemperature"):
return self._device.valveActualTemperature
return self._device.actualTemperature
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return TEMP_CELSIUS
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the windspeed sensor."""
state_attr = super().device_state_attributes
temperature_offset = getattr(self._device, "temperatureOffset", None)
if temperature_offset:
state_attr[ATTR_TEMPERATURE_OFFSET] = temperature_offset
return state_attr
class HomematicipIlluminanceSensor(HomematicipGenericEntity):
"""Representation of the HomematicIP Illuminance sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the device."""
super().__init__(hap, device, post="Illuminance")
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_ILLUMINANCE
@property
def state(self) -> float:
"""Return the state."""
if hasattr(self._device, "averageIllumination"):
return self._device.averageIllumination
return self._device.illumination
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return LIGHT_LUX
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the wind speed sensor."""
state_attr = super().device_state_attributes
for attr, attr_key in ILLUMINATION_DEVICE_ATTRIBUTES.items():
attr_value = getattr(self._device, attr, None)
if attr_value:
state_attr[attr_key] = attr_value
return state_attr
class HomematicipPowerSensor(HomematicipGenericEntity):
"""Representation of the HomematicIP power measuring sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the device."""
super().__init__(hap, device, post="Power")
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_POWER
@property
def state(self) -> float:
"""Return the power consumption value."""
return self._device.currentPowerConsumption
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return POWER_WATT
class HomematicipWindspeedSensor(HomematicipGenericEntity):
"""Representation of the HomematicIP wind speed sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the windspeed sensor."""
super().__init__(hap, device, post="Windspeed")
@property
def state(self) -> float:
"""Return the wind speed value."""
return self._device.windSpeed
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return SPEED_KILOMETERS_PER_HOUR
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the wind speed sensor."""
state_attr = super().device_state_attributes
wind_direction = getattr(self._device, "windDirection", None)
if wind_direction is not None:
state_attr[ATTR_WIND_DIRECTION] = _get_wind_direction(wind_direction)
wind_direction_variation = getattr(self._device, "windDirectionVariation", None)
if wind_direction_variation:
state_attr[ATTR_WIND_DIRECTION_VARIATION] = wind_direction_variation
return state_attr
class HomematicipTodayRainSensor(HomematicipGenericEntity):
"""Representation of the HomematicIP rain counter of a day sensor."""
def __init__(self, hap: HomematicipHAP, device) -> None:
"""Initialize the device."""
super().__init__(hap, device, post="Today Rain")
@property
def state(self) -> float:
"""Return the today's rain value."""
return round(self._device.todayRainCounter, 2)
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return LENGTH_MILLIMETERS
class HomematicipPassageDetectorDeltaCounter(HomematicipGenericEntity):
"""Representation of the HomematicIP passage detector delta counter."""
@property
def state(self) -> int:
"""Return the passage detector delta counter value."""
return self._device.leftRightCounterDelta
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the delta counter."""
state_attr = super().device_state_attributes
state_attr[ATTR_LEFT_COUNTER] = self._device.leftCounter
state_attr[ATTR_RIGHT_COUNTER] = self._device.rightCounter
return state_attr
def _get_wind_direction(wind_direction_degree: float) -> str:
"""Convert wind direction degree to named direction."""
if 11.25 <= wind_direction_degree < 33.75:
return "NNE"
if 33.75 <= wind_direction_degree < 56.25:
return "NE"
if 56.25 <= wind_direction_degree < 78.75:
return "ENE"
if 78.75 <= wind_direction_degree < 101.25:
return "E"
if 101.25 <= wind_direction_degree < 123.75:
return "ESE"
if 123.75 <= wind_direction_degree < 146.25:
return "SE"
if 146.25 <= wind_direction_degree < 168.75:
return "SSE"
if 168.75 <= wind_direction_degree < 191.25:
return "S"
if 191.25 <= wind_direction_degree < 213.75:
return "SSW"
if 213.75 <= wind_direction_degree < 236.25:
return "SW"
if 236.25 <= wind_direction_degree < 258.75:
return "WSW"
if 258.75 <= wind_direction_degree < 281.25:
return "W"
if 281.25 <= wind_direction_degree < 303.75:
return "WNW"
if 303.75 <= wind_direction_degree < 326.25:
return "NW"
if 326.25 <= wind_direction_degree < 348.75:
return "NNW"
return "N"
|
import asyncio
import time
import pytest
from homeassistant.util.timeout import TimeoutManager
async def test_simple_global_timeout():
"""Test a simple global timeout."""
timeout = TimeoutManager()
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.1):
await asyncio.sleep(0.3)
async def test_simple_global_timeout_with_executor_job(hass):
"""Test a simple global timeout with executor job."""
timeout = TimeoutManager()
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.1):
await hass.async_add_executor_job(lambda: time.sleep(0.2))
async def test_simple_global_timeout_freeze():
"""Test a simple global timeout freeze."""
timeout = TimeoutManager()
async with timeout.async_timeout(0.2):
async with timeout.async_freeze():
await asyncio.sleep(0.3)
async def test_simple_zone_timeout_freeze_inside_executor_job(hass):
"""Test a simple zone timeout freeze inside an executor job."""
timeout = TimeoutManager()
def _some_sync_work():
with timeout.freeze("recorder"):
time.sleep(0.3)
async with timeout.async_timeout(1.0):
async with timeout.async_timeout(0.2, zone_name="recorder"):
await hass.async_add_executor_job(_some_sync_work)
async def test_simple_global_timeout_freeze_inside_executor_job(hass):
"""Test a simple global timeout freeze inside an executor job."""
timeout = TimeoutManager()
def _some_sync_work():
with timeout.freeze():
time.sleep(0.3)
async with timeout.async_timeout(0.2):
await hass.async_add_executor_job(_some_sync_work)
async def test_mix_global_timeout_freeze_and_zone_freeze_inside_executor_job(hass):
"""Test a simple global timeout freeze inside an executor job."""
timeout = TimeoutManager()
def _some_sync_work():
with timeout.freeze("recorder"):
time.sleep(0.3)
async with timeout.async_timeout(0.1):
async with timeout.async_timeout(0.2, zone_name="recorder"):
await hass.async_add_executor_job(_some_sync_work)
async def test_mix_global_timeout_freeze_and_zone_freeze_different_order(hass):
"""Test a simple global timeout freeze inside an executor job before timeout was set."""
timeout = TimeoutManager()
def _some_sync_work():
with timeout.freeze("recorder"):
time.sleep(0.4)
async with timeout.async_timeout(0.1):
hass.async_add_executor_job(_some_sync_work)
async with timeout.async_timeout(0.2, zone_name="recorder"):
await asyncio.sleep(0.3)
async def test_mix_global_timeout_freeze_and_zone_freeze_other_zone_inside_executor_job(
hass,
):
"""Test a simple global timeout freeze other zone inside an executor job."""
timeout = TimeoutManager()
def _some_sync_work():
with timeout.freeze("not_recorder"):
time.sleep(0.3)
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.1):
async with timeout.async_timeout(0.2, zone_name="recorder"):
async with timeout.async_timeout(0.2, zone_name="not_recorder"):
await hass.async_add_executor_job(_some_sync_work)
async def test_mix_global_timeout_freeze_and_zone_freeze_inside_executor_job_second_job_outside_zone_context(
hass,
):
"""Test a simple global timeout freeze inside an executor job with second job outside of zone context."""
timeout = TimeoutManager()
def _some_sync_work():
with timeout.freeze("recorder"):
time.sleep(0.3)
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.1):
async with timeout.async_timeout(0.2, zone_name="recorder"):
await hass.async_add_executor_job(_some_sync_work)
await hass.async_add_executor_job(lambda: time.sleep(0.2))
async def test_simple_global_timeout_freeze_with_executor_job(hass):
"""Test a simple global timeout freeze with executor job."""
timeout = TimeoutManager()
async with timeout.async_timeout(0.2):
async with timeout.async_freeze():
await hass.async_add_executor_job(lambda: time.sleep(0.3))
async def test_simple_global_timeout_freeze_reset():
"""Test a simple global timeout freeze reset."""
timeout = TimeoutManager()
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.2):
async with timeout.async_freeze():
await asyncio.sleep(0.1)
await asyncio.sleep(0.2)
async def test_simple_zone_timeout():
"""Test a simple zone timeout."""
timeout = TimeoutManager()
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.1, "test"):
await asyncio.sleep(0.3)
async def test_multiple_zone_timeout():
"""Test a simple zone timeout."""
timeout = TimeoutManager()
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.1, "test"):
async with timeout.async_timeout(0.5, "test"):
await asyncio.sleep(0.3)
async def test_different_zone_timeout():
"""Test a simple zone timeout."""
timeout = TimeoutManager()
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.1, "test"):
async with timeout.async_timeout(0.5, "other"):
await asyncio.sleep(0.3)
async def test_simple_zone_timeout_freeze():
"""Test a simple zone timeout freeze."""
timeout = TimeoutManager()
async with timeout.async_timeout(0.2, "test"):
async with timeout.async_freeze("test"):
await asyncio.sleep(0.3)
async def test_simple_zone_timeout_freeze_without_timeout():
"""Test a simple zone timeout freeze on a zone that does not have a timeout set."""
timeout = TimeoutManager()
async with timeout.async_timeout(0.1, "test"):
async with timeout.async_freeze("test"):
await asyncio.sleep(0.3)
async def test_simple_zone_timeout_freeze_reset():
"""Test a simple zone timeout freeze reset."""
timeout = TimeoutManager()
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.2, "test"):
async with timeout.async_freeze("test"):
await asyncio.sleep(0.1)
await asyncio.sleep(0.2, "test")
async def test_mix_zone_timeout_freeze_and_global_freeze():
"""Test a mix zone timeout freeze and global freeze."""
timeout = TimeoutManager()
async with timeout.async_timeout(0.2, "test"):
async with timeout.async_freeze("test"):
async with timeout.async_freeze():
await asyncio.sleep(0.3)
async def test_mix_global_and_zone_timeout_freeze_():
"""Test a mix zone timeout freeze and global freeze."""
timeout = TimeoutManager()
async with timeout.async_timeout(0.2, "test"):
async with timeout.async_freeze():
async with timeout.async_freeze("test"):
await asyncio.sleep(0.3)
async def test_mix_zone_timeout_freeze():
"""Test a mix zone timeout global freeze."""
timeout = TimeoutManager()
async with timeout.async_timeout(0.2, "test"):
async with timeout.async_freeze():
await asyncio.sleep(0.3)
async def test_mix_zone_timeout():
"""Test a mix zone timeout global."""
timeout = TimeoutManager()
async with timeout.async_timeout(0.1):
try:
async with timeout.async_timeout(0.2, "test"):
await asyncio.sleep(0.4)
except asyncio.TimeoutError:
pass
async def test_mix_zone_timeout_trigger_global():
"""Test a mix zone timeout global with trigger it."""
timeout = TimeoutManager()
with pytest.raises(asyncio.TimeoutError):
async with timeout.async_timeout(0.1):
try:
async with timeout.async_timeout(0.1, "test"):
await asyncio.sleep(0.3)
except asyncio.TimeoutError:
pass
await asyncio.sleep(0.3)
async def test_mix_zone_timeout_trigger_global_cool_down():
"""Test a mix zone timeout global with trigger it with cool_down."""
timeout = TimeoutManager()
async with timeout.async_timeout(0.1, cool_down=0.3):
try:
async with timeout.async_timeout(0.1, "test"):
await asyncio.sleep(0.3)
except asyncio.TimeoutError:
pass
await asyncio.sleep(0.2)
|
from pscript import window
from ... import event
from ._canvas import CanvasWidget
class PlotWidget(CanvasWidget):
""" Widget to show a plot of x vs y values. Enough for simple
plotting tasks.
"""
DEFAULT_MIN_SIZE = 300, 200
xdata = event.TupleProp((), doc="""
A list of values for the x-axis. Set via the ``set_data()`` action.
""")
ydata = event.TupleProp((), doc="""
A list of values for the y-axis. Set via the ``set_data()`` action.
""")
@event.action
def set_data(self, xdata, ydata):
""" Set the xdata and ydata.
"""
xdata = [float(i) for i in xdata]
ydata = [float(i) for i in ydata]
if len(xdata) != len(ydata):
raise ValueError('xdata and ydata must be of equal length.')
self._mutate('xdata', xdata)
self._mutate('ydata', ydata)
yrange = event.FloatPairProp((0, 0), settable=True, doc="""
The range for the y-axis. If (0, 0) (default) it is determined
from the data.
""")
line_color = event.ColorProp('#5af', settable=True, doc="""
The color of the line. Set to the empty string to hide the line.
""")
marker_color = event.ColorProp('#5af', settable=True, doc="""
The color of the marker. Set to the empty string to hide the marker.
""")
line_width = event.FloatProp(2, settable=True, doc="""
The width of the line, in pixels.
""")
marker_size = event.FloatProp(6, settable=True, doc="""
The size of the marker, in pixels.
""")
xlabel = event.StringProp('', settable=True, doc="""
The label to show on the x-axis.
""")
ylabel = event.StringProp('', settable=True, doc="""
The label to show on the y-axis.
""")
def init(self):
super().init()
self._context = self.node.getContext('2d')
# create tick units
self._tick_units = []
for e in range(-10, 10):
for i in [10, 20, 25, 50]:
self._tick_units.append(i*10**e)
@event.reaction('xdata', 'ydata', 'yrange', 'line_color', 'line_width',
'marker_color', 'marker_size', 'xlabel', 'ylabel',
'title', 'size')
def update(self, *events):
window.requestAnimationFrame(self._update)
def _update(self):
xx, yy = self.xdata, self.ydata
yrange = self.yrange
lc, lw = self.line_color, self.line_width
mc, ms = self.marker_color, self.marker_size
title, xlabel, ylabel = self.title, self.xlabel, self.ylabel
# Prepare
ctx = self._context
w, h = self.node.clientWidth, self.node.clientHeight
# Get range
x1, x2 = min(xx), max(xx)
y1, y2 = min(yy), max(yy)
#
if xx:
x1 -= (x2-x1) * 0.02
x2 += (x2-x1) * 0.02
else:
x1, x2 = 0, 1
#
if yrange != (0, 0):
y1, y2 = yrange
elif yy:
y1 -= (y2-y1) * 0.02
y2 += (y2-y1) * 0.02
else:
y1, y2 = 0, 1
# Convert to screen coordinates
# 0.5 offset so we land on whole pixels with axis
lpad = rpad = bpad = tpad = 25.5
lpad += 30
if title:
tpad += 10
if xlabel:
bpad += 20
if ylabel:
lpad += 20
scale_x = (w-lpad-rpad) / (x2-x1)
scale_y = (h-bpad-tpad) / (y2-y1)
sxx = [lpad + (x-x1)*scale_x for x in xx]
syy = [bpad + (y-y1)*scale_y for y in yy]
# Define ticks
x_ticks = self._get_ticks(scale_x, x1, x2)
y_ticks = self._get_ticks(scale_y, y1, y2)
sx_ticks = [lpad + (x-x1)*scale_x for x in x_ticks]
sy_ticks = [bpad + (y-y1)*scale_y for y in y_ticks]
ctx.clearRect(0, 0, w, h)
# Draw inner background
ctx.fillStyle = 'white'
ctx.fillRect(lpad, tpad, w-lpad-rpad, h-bpad-tpad)
# Draw ticks
ctx.beginPath()
ctx.lineWidth= 1
ctx.strokeStyle = "#444"
for sx in sx_ticks:
ctx.moveTo(sx, h-bpad)
ctx.lineTo(sx, h-bpad+5)
for sy in sy_ticks:
ctx.moveTo(lpad, h-sy)
ctx.lineTo(lpad-5, h-sy)
ctx.stroke()
# Draw gridlines
ctx.beginPath()
ctx.lineWidth= 1
ctx.setLineDash([2, 2])
ctx.strokeStyle = "#ccc"
for sx in sx_ticks:
ctx.moveTo(sx, h-bpad)
ctx.lineTo(sx, tpad)
for sy in sy_ticks:
ctx.moveTo(lpad, h-sy)
ctx.lineTo(w-rpad, h-sy)
ctx.stroke()
ctx.setLineDash([])
# Draw tick labels
ctx.font = '11px verdana'
ctx.fillStyle = 'black'
ctx.textAlign = "center"
ctx.textBaseline = 'top'
for x, sx in zip(x_ticks, sx_ticks):
ctx.fillText(x, sx, h-bpad+8)
ctx.textAlign = "end"
ctx.textBaseline = 'middle'
for y, sy in zip(y_ticks, sy_ticks):
ctx.fillText(y, lpad-8, h-sy)
# Draw labels
ctx.textAlign = "center"
if title:
ctx.font = '20px verdana'
ctx.textBaseline = 'top'
ctx.fillText(title, w/2, 5)
if xlabel:
ctx.font = '16px verdana'
ctx.textBaseline = 'bottom'
ctx.fillText(xlabel, w/2, h-5)
if ylabel:
ctx.save()
ctx.translate(0, h/2)
ctx.rotate(-window.Math.PI/2)
ctx.textBaseline = 'top'
ctx.fillText(ylabel, 0, 5)
ctx.restore()
# Draw axis
ctx.beginPath()
ctx.lineWidth= 1
ctx.strokeStyle = "#444"
ctx.moveTo(lpad, tpad)
ctx.lineTo(lpad, h-bpad)
ctx.lineTo(w-rpad, h-bpad)
ctx.stroke()
# Draw line
if lc.alpha and lw:
ctx.beginPath()
ctx.lineWidth= lw
ctx.strokeStyle = lc.css
ctx.moveTo(sxx[0], h-syy[0])
for x, y in zip(sxx, syy):
ctx.lineTo(x, h-y)
ctx.stroke()
# Draw markers
if mc.alpha and ms:
ctx.fillStyle = mc.css
for x, y in zip(sxx, syy):
ctx.beginPath()
ctx.arc(x, h-y, ms/2, 0, 2*window.Math.PI)
ctx.fill()
def _get_ticks(self, scale, t1, t2, min_tick_dist=40):
# Get tick unit
for tick_unit in self._tick_units:
if tick_unit * scale >= min_tick_dist:
break
else:
return []
# Calculate tick values
first_tick = window.Math.ceil(t1 / tick_unit) * tick_unit
last_tick = window.Math.floor(t2 / tick_unit) * tick_unit
ticks = []
t = first_tick
while t <= last_tick:
ticks.append(t)
t += tick_unit
for i in range(len(ticks)):
t = ticks[i].toPrecision(4)
if '.' in t:
t = t.replace(window.RegExp("[0]+$"), "")
if t[-1] == '.':
t += '0'
ticks[i] = t
return ticks
|
import pytest
from keras.models import Sequential
from elephas.utils import serialization
def test_model_to_dict():
model = Sequential()
dict_model = serialization.model_to_dict(model)
assert dict_model.keys() == ['model', 'weights']
def test_dict_to_model():
model = Sequential()
dict_model = serialization.model_to_dict(model)
recovered = serialization.dict_to_model(dict_model)
assert recovered.to_json() == model.to_json()
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hostname(host):
assert 'instance' == host.check_output('hostname -s')
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
f = host.file('/etc/molecule/instance')
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
import diamond.collector
import os
class KVMCollector(diamond.collector.Collector):
PROC = '/sys/kernel/debug/kvm'
def get_default_config_help(self):
config_help = super(KVMCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(KVMCollector, self).get_default_config()
config.update({
'path': 'kvm',
})
return config
def collect(self):
if not os.path.isdir(self.PROC):
self.log.error('/sys/kernel/debug/kvm is missing. Did you' +
' "mount -t debugfs debugfs /sys/kernel/debug"?')
return {}
for filename in os.listdir(self.PROC):
filepath = os.path.abspath(os.path.join(self.PROC, filename))
fh = open(filepath, 'r')
metric_value = self.derivative(filename,
float(fh.readline()),
4294967295)
self.publish(filename, metric_value)
|
import os
import win32api
import win32con
import win32event
import win32service
import win32serviceutil
from cherrypy.process import wspbus, plugins
class ConsoleCtrlHandler(plugins.SimplePlugin):
"""A WSPBus plugin for handling Win32 console events (like Ctrl-C)."""
def __init__(self, bus):
self.is_set = False
plugins.SimplePlugin.__init__(self, bus)
def start(self):
if self.is_set:
self.bus.log('Handler for console events already set.', level=20)
return
result = win32api.SetConsoleCtrlHandler(self.handle, 1)
if result == 0:
self.bus.log('Could not SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Set handler for console events.', level=20)
self.is_set = True
def stop(self):
if not self.is_set:
self.bus.log('Handler for console events already off.', level=20)
return
try:
result = win32api.SetConsoleCtrlHandler(self.handle, 0)
except ValueError:
# "ValueError: The object has not been registered"
result = 1
if result == 0:
self.bus.log('Could not remove SetConsoleCtrlHandler (error %r)' %
win32api.GetLastError(), level=40)
else:
self.bus.log('Removed handler for console events.', level=20)
self.is_set = False
def handle(self, event):
"""Handle console control events (like Ctrl-C)."""
if event in (win32con.CTRL_C_EVENT, win32con.CTRL_LOGOFF_EVENT,
win32con.CTRL_BREAK_EVENT, win32con.CTRL_SHUTDOWN_EVENT,
win32con.CTRL_CLOSE_EVENT):
self.bus.log('Console event %s: shutting down bus' % event)
# Remove self immediately so repeated Ctrl-C doesn't re-call it.
try:
self.stop()
except ValueError:
pass
self.bus.exit()
# 'First to return True stops the calls'
return 1
return 0
class Win32Bus(wspbus.Bus):
"""A Web Site Process Bus implementation for Win32.
Instead of time.sleep, this bus blocks using native win32event objects.
"""
def __init__(self):
self.events = {}
wspbus.Bus.__init__(self)
def _get_state_event(self, state):
"""Return a win32event for the given state (creating it if needed)."""
try:
return self.events[state]
except KeyError:
event = win32event.CreateEvent(None, 0, 0,
'WSPBus %s Event (pid=%r)' %
(state.name, os.getpid()))
self.events[state] = event
return event
@property
def state(self):
return self._state
@state.setter
def state(self, value):
self._state = value
event = self._get_state_event(value)
win32event.PulseEvent(event)
def wait(self, state, interval=0.1, channel=None):
"""Wait for the given state(s), KeyboardInterrupt or SystemExit.
Since this class uses native win32event objects, the interval
argument is ignored.
"""
if isinstance(state, (tuple, list)):
# Don't wait for an event that beat us to the punch ;)
if self.state not in state:
events = tuple([self._get_state_event(s) for s in state])
win32event.WaitForMultipleObjects(
events, 0, win32event.INFINITE)
else:
# Don't wait for an event that beat us to the punch ;)
if self.state != state:
event = self._get_state_event(state)
win32event.WaitForSingleObject(event, win32event.INFINITE)
class _ControlCodes(dict):
"""Control codes used to "signal" a service via ControlService.
User-defined control codes are in the range 128-255. We generally use
the standard Python value for the Linux signal and add 128. Example:
>>> signal.SIGUSR1
10
control_codes['graceful'] = 128 + 10
"""
def key_for(self, obj):
"""For the given value, return its corresponding key."""
for key, val in self.items():
if val is obj:
return key
raise ValueError('The given object could not be found: %r' % obj)
control_codes = _ControlCodes({'graceful': 138})
def signal_child(service, command):
if command == 'stop':
win32serviceutil.StopService(service)
elif command == 'restart':
win32serviceutil.RestartService(service)
else:
win32serviceutil.ControlService(service, control_codes[command])
class PyWebService(win32serviceutil.ServiceFramework):
"""Python Web Service."""
_svc_name_ = 'Python Web Service'
_svc_display_name_ = 'Python Web Service'
_svc_deps_ = None # sequence of service names on which this depends
_exe_name_ = 'pywebsvc'
_exe_args_ = None # Default to no arguments
# Only exists on Windows 2000 or later, ignored on windows NT
_svc_description_ = 'Python Web Service'
def SvcDoRun(self):
from cherrypy import process
process.bus.start()
process.bus.block()
def SvcStop(self):
from cherrypy import process
self.ReportServiceStatus(win32service.SERVICE_STOP_PENDING)
process.bus.exit()
def SvcOther(self, control):
from cherrypy import process
process.bus.publish(control_codes.key_for(control))
if __name__ == '__main__':
win32serviceutil.HandleCommandLine(PyWebService)
|
import asyncio
from typing import Dict, Optional
import aiohttp
import async_timeout
from sharkiqpy import SharkIqAuthError, get_ayla_api
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from .const import _LOGGER, DOMAIN # pylint:disable=unused-import
SHARKIQ_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect."""
ayla_api = get_ayla_api(
username=data[CONF_USERNAME],
password=data[CONF_PASSWORD],
websession=hass.helpers.aiohttp_client.async_get_clientsession(hass),
)
try:
with async_timeout.timeout(10):
_LOGGER.debug("Initialize connection to Ayla networks API")
await ayla_api.async_sign_in()
except (asyncio.TimeoutError, aiohttp.ClientError) as errors:
raise CannotConnect from errors
except SharkIqAuthError as error:
raise InvalidAuth from error
# Return info that you want to store in the config entry.
return {"title": data[CONF_USERNAME]}
class SharkIqConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Shark IQ."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def _async_validate_input(self, user_input):
"""Validate form input."""
errors = {}
info = None
# noinspection PyBroadException
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return info, errors
async def async_step_user(self, user_input: Optional[Dict] = None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
info, errors = await self._async_validate_input(user_input)
if info:
await self.async_set_unique_id(user_input[CONF_USERNAME])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=SHARKIQ_SCHEMA, errors=errors
)
async def async_step_reauth(self, user_input: Optional[dict] = None):
"""Handle re-auth if login is invalid."""
errors = {}
if user_input is not None:
_, errors = await self._async_validate_input(user_input)
if not errors:
for entry in self._async_current_entries():
if entry.unique_id == self.unique_id:
self.hass.config_entries.async_update_entry(
entry, data=user_input
)
return self.async_abort(reason="reauth_successful")
if errors["base"] != "invalid_auth":
return self.async_abort(reason=errors["base"])
return self.async_show_form(
step_id="reauth",
data_schema=SHARKIQ_SCHEMA,
errors=errors,
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
import functools
import logging
from unittest.mock import patch, MagicMock
import httpx
from vcr.request import Request as VcrRequest
from vcr.errors import CannotOverwriteExistingCassetteException
_logger = logging.getLogger(__name__)
def _transform_headers(httpx_reponse):
"""
Some headers can appear multiple times, like "Set-Cookie".
Therefore transform to every header key to list of values.
"""
out = {}
for key, var in httpx_reponse.headers.raw:
decoded_key = key.decode("utf-8")
out.setdefault(decoded_key, [])
out[decoded_key].append(var.decode("utf-8"))
return out
def _to_serialized_response(httpx_reponse):
return {
"status_code": httpx_reponse.status_code,
"http_version": httpx_reponse.http_version,
"headers": _transform_headers(httpx_reponse),
"content": httpx_reponse.content.decode("utf-8", "ignore"),
}
def _from_serialized_headers(headers):
"""
httpx accepts headers as list of tuples of header key and value.
"""
header_list = []
for key, values in headers.items():
for v in values:
header_list.append((key, v))
return header_list
@patch("httpx.Response.close", MagicMock())
@patch("httpx.Response.read", MagicMock())
def _from_serialized_response(request, serialized_response, history=None):
content = serialized_response.get("content").encode()
response = httpx.Response(
status_code=serialized_response.get("status_code"),
request=request,
http_version=serialized_response.get("http_version"),
headers=_from_serialized_headers(serialized_response.get("headers")),
content=content,
history=history or [],
)
response._content = content
return response
def _make_vcr_request(httpx_request, **kwargs):
body = httpx_request.read().decode("utf-8")
uri = str(httpx_request.url)
headers = dict(httpx_request.headers)
return VcrRequest(httpx_request.method, uri, body, headers)
def _shared_vcr_send(cassette, real_send, *args, **kwargs):
real_request = args[1]
vcr_request = _make_vcr_request(real_request, **kwargs)
if cassette.can_play_response_for(vcr_request):
return vcr_request, _play_responses(cassette, real_request, vcr_request, args[0], kwargs)
if cassette.write_protected and cassette.filter_request(vcr_request):
raise CannotOverwriteExistingCassetteException(cassette=cassette, failed_request=vcr_request)
_logger.info("%s not in cassette, sending to real server", vcr_request)
return vcr_request, None
def _record_responses(cassette, vcr_request, real_response):
for past_real_response in real_response.history:
past_vcr_request = _make_vcr_request(past_real_response.request)
cassette.append(past_vcr_request, _to_serialized_response(past_real_response))
if real_response.history:
# If there was a redirection keep we want the request which will hold the
# final redirect value
vcr_request = _make_vcr_request(real_response.request)
cassette.append(vcr_request, _to_serialized_response(real_response))
return real_response
def _play_responses(cassette, request, vcr_request, client, kwargs):
history = []
allow_redirects = kwargs.get("allow_redirects", True)
vcr_response = cassette.play_response(vcr_request)
response = _from_serialized_response(request, vcr_response)
while allow_redirects and 300 <= response.status_code <= 399:
next_url = response.headers.get("location")
if not next_url:
break
vcr_request = VcrRequest("GET", next_url, None, dict(response.headers))
vcr_request = cassette.find_requests_with_most_matches(vcr_request)[0][0]
history.append(response)
# add cookies from response to session cookie store
client.cookies.extract_cookies(response)
vcr_response = cassette.play_response(vcr_request)
response = _from_serialized_response(vcr_request, vcr_response, history)
return response
async def _async_vcr_send(cassette, real_send, *args, **kwargs):
vcr_request, response = _shared_vcr_send(cassette, real_send, *args, **kwargs)
if response:
# add cookies from response to session cookie store
args[0].cookies.extract_cookies(response)
return response
real_response = await real_send(*args, **kwargs)
return _record_responses(cassette, vcr_request, real_response)
def async_vcr_send(cassette, real_send):
@functools.wraps(real_send)
def _inner_send(*args, **kwargs):
return _async_vcr_send(cassette, real_send, *args, **kwargs)
return _inner_send
def _sync_vcr_send(cassette, real_send, *args, **kwargs):
vcr_request, response = _shared_vcr_send(cassette, real_send, *args, **kwargs)
if response:
# add cookies from response to session cookie store
args[0].cookies.extract_cookies(response)
return response
real_response = real_send(*args, **kwargs)
return _record_responses(cassette, vcr_request, real_response)
def sync_vcr_send(cassette, real_send):
@functools.wraps(real_send)
def _inner_send(*args, **kwargs):
return _sync_vcr_send(cassette, real_send, *args, **kwargs)
return _inner_send
|
import logging
from velbus.util import VelbusException
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_HEAT,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from . import VelbusEntity
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up Velbus binary sensor based on config_entry."""
cntrl = hass.data[DOMAIN][entry.entry_id]["cntrl"]
modules_data = hass.data[DOMAIN][entry.entry_id]["climate"]
entities = []
for address, channel in modules_data:
module = cntrl.get_module(address)
entities.append(VelbusClimate(module, channel))
async_add_entities(entities)
class VelbusClimate(VelbusEntity, ClimateEntity):
"""Representation of a Velbus thermostat."""
@property
def supported_features(self):
"""Return the list off supported features."""
return SUPPORT_TARGET_TEMPERATURE
@property
def temperature_unit(self):
"""Return the unit this state is expressed in."""
if self._module.get_unit(self._channel) == TEMP_CELSIUS:
return TEMP_CELSIUS
return TEMP_FAHRENHEIT
@property
def current_temperature(self):
"""Return the current temperature."""
return self._module.get_state(self._channel)
@property
def hvac_mode(self):
"""Return hvac operation ie. heat, cool mode.
Need to be one of HVAC_MODE_*.
"""
return HVAC_MODE_HEAT
@property
def hvac_modes(self):
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return [HVAC_MODE_HEAT]
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._module.get_climate_target()
def set_temperature(self, **kwargs):
"""Set new target temperatures."""
temp = kwargs.get(ATTR_TEMPERATURE)
if temp is None:
return
try:
self._module.set_temp(temp)
except VelbusException as err:
_LOGGER.error("A Velbus error occurred: %s", err)
return
self.schedule_update_ha_state()
def set_hvac_mode(self, hvac_mode):
"""Set new target hvac mode."""
|
import logging
import opengarage
import voluptuous as vol
from homeassistant.components.cover import (
DEVICE_CLASS_GARAGE,
PLATFORM_SCHEMA,
SUPPORT_CLOSE,
SUPPORT_OPEN,
CoverEntity,
)
from homeassistant.const import (
CONF_COVERS,
CONF_HOST,
CONF_NAME,
CONF_PORT,
CONF_SSL,
CONF_VERIFY_SSL,
STATE_CLOSED,
STATE_CLOSING,
STATE_OPEN,
STATE_OPENING,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.device_registry import format_mac
_LOGGER = logging.getLogger(__name__)
ATTR_DISTANCE_SENSOR = "distance_sensor"
ATTR_DOOR_STATE = "door_state"
ATTR_SIGNAL_STRENGTH = "wifi_signal"
CONF_DEVICE_KEY = "device_key"
DEFAULT_NAME = "OpenGarage"
DEFAULT_PORT = 80
STATES_MAP = {0: STATE_CLOSED, 1: STATE_OPEN}
COVER_SCHEMA = vol.Schema(
{
vol.Required(CONF_DEVICE_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_COVERS): cv.schema_with_slug_keys(COVER_SCHEMA)}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the OpenGarage covers."""
covers = []
devices = config.get(CONF_COVERS)
for device_config in devices.values():
opengarage_url = (
f"{'https' if device_config[CONF_SSL] else 'http'}://"
f"{device_config.get(CONF_HOST)}:{device_config.get(CONF_PORT)}"
)
open_garage = opengarage.OpenGarage(
opengarage_url,
device_config[CONF_DEVICE_KEY],
device_config[CONF_VERIFY_SSL],
async_get_clientsession(hass),
)
status = await open_garage.update_state()
covers.append(
OpenGarageCover(
device_config.get(CONF_NAME), open_garage, format_mac(status["mac"])
)
)
async_add_entities(covers, True)
class OpenGarageCover(CoverEntity):
"""Representation of a OpenGarage cover."""
def __init__(self, name, open_garage, device_id):
"""Initialize the cover."""
self._name = name
self._open_garage = open_garage
self._state = None
self._state_before_move = None
self._device_state_attributes = {}
self._available = True
self._device_id = device_id
@property
def name(self):
"""Return the name of the cover."""
return self._name
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._device_state_attributes
@property
def is_closed(self):
"""Return if the cover is closed."""
if self._state is None:
return None
return self._state in [STATE_CLOSED, STATE_OPENING]
async def async_close_cover(self, **kwargs):
"""Close the cover."""
if self._state in [STATE_CLOSED, STATE_CLOSING]:
return
self._state_before_move = self._state
self._state = STATE_CLOSING
await self._push_button()
async def async_open_cover(self, **kwargs):
"""Open the cover."""
if self._state in [STATE_OPEN, STATE_OPENING]:
return
self._state_before_move = self._state
self._state = STATE_OPENING
await self._push_button()
async def async_update(self):
"""Get updated status from API."""
status = await self._open_garage.update_state()
if status is None:
_LOGGER.error("Unable to connect to OpenGarage device")
self._available = False
return
if self._name is None and status["name"] is not None:
self._name = status["name"]
state = STATES_MAP.get(status.get("door"))
if self._state_before_move is not None:
if self._state_before_move != state:
self._state = state
self._state_before_move = None
else:
self._state = state
_LOGGER.debug("%s status: %s", self._name, self._state)
if status.get("rssi") is not None:
self._device_state_attributes[ATTR_SIGNAL_STRENGTH] = status.get("rssi")
if status.get("dist") is not None:
self._device_state_attributes[ATTR_DISTANCE_SENSOR] = status.get("dist")
if self._state is not None:
self._device_state_attributes[ATTR_DOOR_STATE] = self._state
self._available = True
async def _push_button(self):
"""Send commands to API."""
result = await self._open_garage.push_button()
if result is None:
_LOGGER.error("Unable to connect to OpenGarage device")
if result == 1:
return
if result == 2:
_LOGGER.error("Unable to control %s: Device key is incorrect", self._name)
elif result > 2:
_LOGGER.error("Unable to control %s: Error code %s", self._name, result)
self._state = self._state_before_move
self._state_before_move = None
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return DEVICE_CLASS_GARAGE
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
@property
def unique_id(self):
"""Return a unique ID."""
return self._device_id
|
from django.conf import settings
from django.contrib.auth.backends import ModelBackend
from django.db.models.signals import pre_save
from django.dispatch.dispatcher import receiver
from weblate.auth.models import User
def try_get_user(username, list_all=False):
"""Wrapper to get User object for authentication."""
if list_all:
method = User.objects.filter
else:
method = User.objects.get
if "@" in username:
return method(email=username)
return method(username=username)
class WeblateUserBackend(ModelBackend):
"""Weblate authentication backend."""
def authenticate(self, request, username=None, password=None, **kwargs):
"""Prohibit login for anonymous user and allows to login by e-mail."""
if username == settings.ANONYMOUS_USER_NAME or username is None:
return None
try:
user = try_get_user(username)
if user.check_password(password):
return user
except (User.DoesNotExist, User.MultipleObjectsReturned):
pass
return None
def get_user(self, user_id):
try:
user = User.objects.select_related("profile").get(pk=user_id)
except User.DoesNotExist:
return None
return user if self.user_can_authenticate(user) else None
@receiver(pre_save, sender=User)
def disable_anon_user_password_save(sender, instance, **kwargs):
"""Block setting password for anonymous user."""
if instance.is_anonymous and instance.has_usable_password():
raise ValueError("Anonymous user can not have usable password!")
|
from datetime import timedelta
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.components.streamlabswater import DOMAIN as STREAMLABSWATER_DOMAIN
from homeassistant.util import Throttle
DEPENDS = ["streamlabswater"]
MIN_TIME_BETWEEN_LOCATION_UPDATES = timedelta(seconds=60)
ATTR_LOCATION_ID = "location_id"
NAME_AWAY_MODE = "Water Away Mode"
def setup_platform(hass, config, add_devices, discovery_info=None):
"""Set up the StreamLabsWater mode sensor."""
client = hass.data[STREAMLABSWATER_DOMAIN]["client"]
location_id = hass.data[STREAMLABSWATER_DOMAIN]["location_id"]
location_name = hass.data[STREAMLABSWATER_DOMAIN]["location_name"]
streamlabs_location_data = StreamlabsLocationData(location_id, client)
streamlabs_location_data.update()
add_devices([StreamlabsAwayMode(location_name, streamlabs_location_data)])
class StreamlabsLocationData:
"""Track and query location data."""
def __init__(self, location_id, client):
"""Initialize the location data."""
self._location_id = location_id
self._client = client
self._is_away = None
@Throttle(MIN_TIME_BETWEEN_LOCATION_UPDATES)
def update(self):
"""Query and store location data."""
location = self._client.get_location(self._location_id)
self._is_away = location["homeAway"] == "away"
def is_away(self):
"""Return whether away more is enabled."""
return self._is_away
class StreamlabsAwayMode(BinarySensorEntity):
"""Monitor the away mode state."""
def __init__(self, location_name, streamlabs_location_data):
"""Initialize the away mode device."""
self._location_name = location_name
self._streamlabs_location_data = streamlabs_location_data
self._is_away = None
@property
def name(self):
"""Return the name for away mode."""
return f"{self._location_name} {NAME_AWAY_MODE}"
@property
def is_on(self):
"""Return if away mode is on."""
return self._streamlabs_location_data.is_away()
def update(self):
"""Retrieve the latest location data and away mode state."""
self._streamlabs_location_data.update()
|
import asyncio
import logging
import async_timeout
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
BaseNotificationService,
)
from homeassistant.const import (
HTTP_ACCEPTED,
HTTP_CREATED,
HTTP_OK,
HTTP_TOO_MANY_REQUESTS,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.util.dt as dt_util
from .const import (
ATTR_APP_DATA,
ATTR_APP_ID,
ATTR_APP_VERSION,
ATTR_DEVICE_NAME,
ATTR_OS_VERSION,
ATTR_PUSH_RATE_LIMITS,
ATTR_PUSH_RATE_LIMITS_ERRORS,
ATTR_PUSH_RATE_LIMITS_MAXIMUM,
ATTR_PUSH_RATE_LIMITS_RESETS_AT,
ATTR_PUSH_RATE_LIMITS_SUCCESSFUL,
ATTR_PUSH_TOKEN,
ATTR_PUSH_URL,
DATA_CONFIG_ENTRIES,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
def push_registrations(hass):
"""Return a dictionary of push enabled registrations."""
targets = {}
for webhook_id, entry in hass.data[DOMAIN][DATA_CONFIG_ENTRIES].items():
data = entry.data
app_data = data[ATTR_APP_DATA]
if ATTR_PUSH_TOKEN in app_data and ATTR_PUSH_URL in app_data:
device_name = data[ATTR_DEVICE_NAME]
if device_name in targets:
_LOGGER.warning("Found duplicate device name %s", device_name)
continue
targets[device_name] = webhook_id
return targets
# pylint: disable=invalid-name
def log_rate_limits(hass, device_name, resp, level=logging.INFO):
"""Output rate limit log line at given level."""
if ATTR_PUSH_RATE_LIMITS not in resp:
return
rate_limits = resp[ATTR_PUSH_RATE_LIMITS]
resetsAt = rate_limits[ATTR_PUSH_RATE_LIMITS_RESETS_AT]
resetsAtTime = dt_util.parse_datetime(resetsAt) - dt_util.utcnow()
rate_limit_msg = (
"mobile_app push notification rate limits for %s: "
"%d sent, %d allowed, %d errors, "
"resets in %s"
)
_LOGGER.log(
level,
rate_limit_msg,
device_name,
rate_limits[ATTR_PUSH_RATE_LIMITS_SUCCESSFUL],
rate_limits[ATTR_PUSH_RATE_LIMITS_MAXIMUM],
rate_limits[ATTR_PUSH_RATE_LIMITS_ERRORS],
str(resetsAtTime).split(".")[0],
)
async def async_get_service(hass, config, discovery_info=None):
"""Get the mobile_app notification service."""
session = async_get_clientsession(hass)
return MobileAppNotificationService(session)
class MobileAppNotificationService(BaseNotificationService):
"""Implement the notification service for mobile_app."""
def __init__(self, session):
"""Initialize the service."""
self._session = session
@property
def targets(self):
"""Return a dictionary of registered targets."""
return push_registrations(self.hass)
async def async_send_message(self, message="", **kwargs):
"""Send a message to the Lambda APNS gateway."""
data = {ATTR_MESSAGE: message}
if kwargs.get(ATTR_TITLE) is not None:
# Remove default title from notifications.
if kwargs.get(ATTR_TITLE) != ATTR_TITLE_DEFAULT:
data[ATTR_TITLE] = kwargs.get(ATTR_TITLE)
targets = kwargs.get(ATTR_TARGET)
if not targets:
targets = push_registrations(self.hass).values()
if kwargs.get(ATTR_DATA) is not None:
data[ATTR_DATA] = kwargs.get(ATTR_DATA)
for target in targets:
entry = self.hass.data[DOMAIN][DATA_CONFIG_ENTRIES][target]
entry_data = entry.data
app_data = entry_data[ATTR_APP_DATA]
push_token = app_data[ATTR_PUSH_TOKEN]
push_url = app_data[ATTR_PUSH_URL]
data[ATTR_PUSH_TOKEN] = push_token
reg_info = {
ATTR_APP_ID: entry_data[ATTR_APP_ID],
ATTR_APP_VERSION: entry_data[ATTR_APP_VERSION],
}
if ATTR_OS_VERSION in entry_data:
reg_info[ATTR_OS_VERSION] = entry_data[ATTR_OS_VERSION]
data["registration_info"] = reg_info
try:
with async_timeout.timeout(10):
response = await self._session.post(push_url, json=data)
result = await response.json()
if response.status in [HTTP_OK, HTTP_CREATED, HTTP_ACCEPTED]:
log_rate_limits(self.hass, entry_data[ATTR_DEVICE_NAME], result)
continue
fallback_error = result.get("errorMessage", "Unknown error")
fallback_message = (
f"Internal server error, please try again later: {fallback_error}"
)
message = result.get("message", fallback_message)
if "message" in result:
if message[-1] not in [".", "?", "!"]:
message += "."
message += (
" This message is generated externally to Home Assistant."
)
if response.status == HTTP_TOO_MANY_REQUESTS:
_LOGGER.warning(message)
log_rate_limits(
self.hass, entry_data[ATTR_DEVICE_NAME], result, logging.WARNING
)
else:
_LOGGER.error(message)
except asyncio.TimeoutError:
_LOGGER.error("Timeout sending notification to %s", push_url)
|
from abc import ABC, abstractmethod
import pandas as pd
import numpy as np
from scattertext.Scalers import stretch_neg1_to_1
class CategoryProjectionBase(ABC):
'''
'''
def _pseduo_init(self, category_corpus, category_counts, projection, x_dim=0, y_dim=1, term_projection=None):
self.category_corpus = category_corpus
self.category_counts = category_counts
self.x_dim = x_dim
self.y_dim = y_dim
self.projection = projection
self.term_projection = term_projection
def project_with_alternative_dimensions(self, x_dim, y_dim):
return CategoryProjection(self.category_corpus, self.category_counts, self.projection, x_dim, y_dim)
def project_with_alternate_axes(self, x_axis=None, y_axis=None):
# !!! Need to fix
if x_axis is None:
x_axis = self._get_x_axis()
if y_axis is None:
y_axis = self._get_y_axis()
return CategoryProjectionAlternateAxes(self.category_corpus,
self.category_counts,
self.projection,
self.get_category_embeddings(),
self.x_dim,
self.y_dim,
x_axis=x_axis,
y_axis=y_axis)
def get_pandas_projection(self):
'''
:param x_dim: int
:param y_dim: int
:return: pd.DataFrame
'''
return pd.DataFrame({'term': self.category_corpus.get_metadata(),
'x': self._get_x_axis(),
'y': self._get_y_axis()}).set_index('term')
def _get_x_axis(self):
return self.projection.T[self.x_dim]
def _get_y_axis(self):
return self.projection.T[self.y_dim]
def get_axes_labels(self, num_terms=5):
df = self.get_term_projection()
return {'right': list(df.sort_values(by='x', ascending=False).index[:num_terms]),
'left': list(df.sort_values(by='x', ascending=True).index[:num_terms]),
'top': list(df.sort_values(by='y', ascending=False).index[:num_terms]),
'bottom': list(df.sort_values(by='y', ascending=True).index[:num_terms])}
def get_nearest_terms(self, num_terms=5):
df = self.get_term_projection().apply(stretch_neg1_to_1)
return {
'top_right': ((df.x - 1) ** 2 + (df.y - 1) ** 2).sort_values().index[:num_terms].values,
'top': (df.x ** 2 + (df.y - 1) ** 2).sort_values().index[:num_terms].values,
'top_left': ((df.x + 1) ** 2 + (df.y - 1) ** 2).sort_values().index[:num_terms].values,
'right': ((df.x - 1) ** 2 + df.y ** 2).sort_values().index[:num_terms].values,
'left': ((df.x + 1) ** 2 + df.y ** 2).sort_values().index[:num_terms].values,
'bottom_right': ((df.x - 1) ** 2 + (df.y + 1) ** 2).sort_values().index[:num_terms].values,
'bottom': (df.x ** 2 + (df.y + 1) ** 2).sort_values().index[:num_terms].values,
'bottom_left': ((df.x + 1) ** 2 + (df.y + 1) ** 2).sort_values().index[:num_terms].values,
}
def get_term_projection(self):
if self.term_projection is None:
# np.ndarray(self.category_counts.values) * self._get_x_y_projection()
dim_term = np.matmul(self.category_counts.values, self._get_x_y_projection())
else:
dim_term = self.term_projection
df = pd.DataFrame(dim_term, index=self.category_corpus.get_terms(), columns=['x', 'y'])
return df
def _get_x_y_projection(self):
return np.array([self._get_x_axis(), self._get_y_axis()]).T
def get_projection(self):
return self.projection
@abstractmethod
def use_alternate_projection(self, projection):
pass
@abstractmethod
def get_category_embeddings(self):
pass
def get_corpus(self):
return self.category_corpus
class CategoryProjection(CategoryProjectionBase):
def __init__(self, category_corpus, category_counts, projection, x_dim=0, y_dim=1, term_projection=None):
self._pseduo_init(category_corpus, category_counts, projection, x_dim, y_dim, term_projection)
def get_category_embeddings(self):
return self.category_counts.values
def use_alternate_projection(self, projection):
return CategoryProjection(self.category_corpus, self.category_counts, projection, self.x_dim, self.y_dim)
class CategoryProjectionWithDoc2Vec(CategoryProjectionBase):
def __init__(self, category_corpus, category_counts, projection, x_dim=0, y_dim=1, doc2vec_model=None,
term_projection=None, ):
self.doc2vec_model = doc2vec_model
self._pseduo_init(category_corpus, category_counts, projection, x_dim, y_dim, term_projection)
def project_with_alternative_dimensions(self, x_dim, y_dim):
return CategoryProjectionWithDoc2Vec(self.category_corpus, self.category_counts, self.projection,
x_dim, y_dim,
doc2vec_model=self.doc2vec_model)
def get_category_embeddings(self):
return self.doc2vec_model.project()
def use_alternate_projection(self, projection):
return CategoryProjectionWithDoc2Vec(self.category_corpus, self.category_counts, projection,
self.x_dim, self.y_dim, doc2vec_model=self.doc2vec_model)
# !!! Need to fix
class CategoryProjectionAlternateAxes(CategoryProjectionBase):
def __init__(self, category_corpus, category_counts, projection, category_embeddings, x_dim=0, y_dim=1, x_axis=None,
y_axis=None):
self._pseduo_init(category_corpus, category_counts, projection, x_dim=x_dim, y_dim=y_dim)
self.x_axis_ = x_axis
self.y_axis_ = y_axis
self.category_embeddings_ = category_embeddings
def get_category_embeddings(self):
return self.category_embeddings_
def _get_x_axis(self):
return self.x_axis_
def _get_y_axis(self):
return self.y_axis_
def project_raw_corpus(category_corpus,
projection,
projection_type=CategoryProjection,
term_projection=None,
x_dim=0,
y_dim=1):
return projection_type(category_corpus,
category_corpus.get_term_freq_df(),
projection,
x_dim,
y_dim,
term_projection)
|
import numpy as np
def create_lut(cmap, n_colors=256, center=None):
"""Return a colormap suitable for setting as a LUT."""
from .._3d import _get_cmap
assert not (isinstance(cmap, str) and cmap == 'auto')
cmap = _get_cmap(cmap)
lut = np.round(cmap(np.linspace(0, 1, n_colors)) * 255.0).astype(np.int64)
return lut
def scale_sequential_lut(lut_table, fmin, fmid, fmax):
"""Scale a sequential colormap."""
assert fmin <= fmid <= fmax # guaranteed by calculate_lut
lut_table_new = lut_table.copy()
n_colors = lut_table.shape[0]
n_colors2 = n_colors // 2
if fmax == fmin:
fmid_idx = 0
else:
fmid_idx = np.clip(int(np.round(
n_colors * ((fmid - fmin) / (fmax - fmin))) - 1), 0, n_colors - 2)
n_left = fmid_idx + 1
n_right = n_colors - n_left
for i in range(4):
lut_table_new[:fmid_idx + 1, i] = np.interp(
np.linspace(0, n_colors2 - 1, n_left),
np.arange(n_colors), lut_table[:, i])
lut_table_new[fmid_idx + 1:, i] = np.interp(
np.linspace(n_colors - 1, n_colors2, n_right)[::-1],
np.arange(n_colors), lut_table[:, i])
return lut_table_new
def get_fill_colors(cols, n_fill):
"""Get the fill colors for the middle of divergent colormaps."""
steps = np.linalg.norm(np.diff(cols[:, :3].astype(float), axis=0),
axis=1)
ind = np.flatnonzero(steps[1:-1] > steps[[0, -1]].mean() * 3)
if ind.size > 0:
# choose the two colors between which there is the large step
ind = ind[0] + 1
fillcols = np.r_[np.tile(cols[ind, :], (n_fill / 2, 1)),
np.tile(cols[ind + 1, :],
(n_fill - n_fill / 2, 1))]
else:
# choose a color from the middle of the colormap
fillcols = np.tile(cols[int(cols.shape[0] / 2), :], (n_fill, 1))
return fillcols
def calculate_lut(lut_table, alpha, fmin, fmid, fmax, center=None,
transparent=True):
u"""Transparent color map calculation.
A colormap may be sequential or divergent. When the colormap is
divergent indicate this by providing a value for 'center'. The
meanings of fmin, fmid and fmax are different for sequential and
divergent colormaps. A sequential colormap is characterised by::
[fmin, fmid, fmax]
where fmin and fmax define the edges of the colormap and fmid
will be the value mapped to the center of the originally chosen colormap.
A divergent colormap is characterised by::
[center-fmax, center-fmid, center-fmin, center,
center+fmin, center+fmid, center+fmax]
i.e., values between center-fmin and center+fmin will not be shown
while center-fmid will map to the fmid of the first half of the
original colormap and center-fmid to the fmid of the second half.
Parameters
----------
lim_cmap : Colormap
Color map obtained from _process_mapdata.
alpha : float
Alpha value to apply globally to the overlay. Has no effect with mpl
backend.
fmin : float
Min value in colormap.
fmid : float
Intermediate value in colormap.
fmax : float
Max value in colormap.
center : float or None
If not None, center of a divergent colormap, changes the meaning of
fmin, fmax and fmid.
transparent : boolean
if True: use a linear transparency between fmin and fmid and make
values below fmin fully transparent (symmetrically for divergent
colormaps)
Returns
-------
cmap : matplotlib.ListedColormap
Color map with transparency channel.
"""
if not fmin <= fmid <= fmax:
raise ValueError('Must have fmin (%s) <= fmid (%s) <= fmax (%s)'
% (fmin, fmid, fmax))
lut_table = create_lut(lut_table)
assert lut_table.dtype.kind == 'i'
divergent = center is not None
n_colors = lut_table.shape[0]
# Add transparency if needed
n_colors2 = n_colors // 2
if transparent:
if divergent:
N4 = np.full(4, n_colors // 4)
N4[[0, 3, 1, 2][:np.mod(n_colors, 4)]] += 1
assert N4.sum() == n_colors
lut_table[:, -1] = np.round(np.hstack([
np.full(N4[0], 255.),
np.linspace(0, 255, N4[1])[::-1],
np.linspace(0, 255, N4[2]),
np.full(N4[3], 255.)]))
else:
lut_table[:n_colors2, -1] = np.round(np.linspace(
0, 255, n_colors2))
lut_table[n_colors2:, -1] = 255
alpha = float(alpha)
if alpha < 1.0:
lut_table[:, -1] = np.round(lut_table[:, -1] * alpha)
if divergent:
if fmax == fmin:
lut_table = np.r_[
lut_table[:1],
get_fill_colors(
lut_table[n_colors2 - 3:n_colors2 + 3, :], n_colors - 2),
lut_table[-1:]]
else:
n_fill = int(round(fmin * n_colors2 / (fmax - fmin))) * 2
lut_table = np.r_[
scale_sequential_lut(lut_table[:n_colors2, :],
center - fmax, center - fmid,
center - fmin),
get_fill_colors(
lut_table[n_colors2 - 3:n_colors2 + 3, :], n_fill),
scale_sequential_lut(lut_table[n_colors2:, :][::-1],
center - fmax, center - fmid,
center - fmin)[::-1]]
else:
lut_table = scale_sequential_lut(lut_table, fmin, fmid, fmax)
n_colors = lut_table.shape[0]
if n_colors != 256:
lut = np.zeros((256, 4))
x = np.linspace(1, n_colors, 256)
for chan in range(4):
lut[:, chan] = np.interp(x,
np.arange(1, n_colors + 1),
lut_table[:, chan])
lut_table = lut
lut_table = lut_table.astype(np.float64) / 255.0
return lut_table
|
import pathlib
from PyQt5.QtCore import QLibraryInfo
from PyQt5.QtWebEngineWidgets import QWebEngineView, QWebEnginePage
from PyQt5.QtWidgets import QWidget
from qutebrowser.browser import inspector
from qutebrowser.browser.webengine import webenginesettings
from qutebrowser.misc import miscwidgets
from qutebrowser.utils import version
class WebEngineInspectorView(QWebEngineView):
"""The QWebEngineView used for the inspector.
We don't use a qutebrowser WebEngineView because that has various
customization which doesn't apply to the inspector.
"""
def createWindow(self,
wintype: QWebEnginePage.WebWindowType) -> QWebEngineView:
"""Called by Qt when a page wants to create a new tab or window.
In case the user wants to open a resource in a new tab, we use the
createWindow handling of the main page to achieve that.
See WebEngineView.createWindow for details.
"""
return self.page().inspectedPage().view().createWindow(wintype)
class WebEngineInspector(inspector.AbstractWebInspector):
"""A web inspector for QtWebEngine with Qt API support."""
def __init__(self, splitter: miscwidgets.InspectorSplitter,
win_id: int,
parent: QWidget = None) -> None:
super().__init__(splitter, win_id, parent)
self._check_devtools_resources()
view = WebEngineInspectorView()
self._settings = webenginesettings.WebEngineSettings(view.settings())
self._set_widget(view)
def _check_devtools_resources(self) -> None:
"""Make sure that the devtools resources are available on Fedora.
Fedora packages devtools resources into its own package. If it's not
installed, we show a nice error instead of a blank inspector.
"""
dist = version.distribution()
if dist is None or dist.parsed != version.Distribution.fedora:
return
data_path = pathlib.Path(QLibraryInfo.location(QLibraryInfo.DataPath))
pak = data_path / 'resources' / 'qtwebengine_devtools_resources.pak'
if not pak.exists():
raise inspector.Error("QtWebEngine devtools resources not found, "
"please install the qt5-qtwebengine-devtools "
"Fedora package.")
def inspect(self, page: QWebEnginePage) -> None: # type: ignore[override]
inspector_page = self._widget.page()
inspector_page.setInspectedPage(page)
self._settings.update_for_url(inspector_page.requestedUrl())
def _needs_recreate(self) -> bool:
"""Recreate the inspector when detaching to a window.
WORKAROUND for what's likely an unknown Qt bug.
"""
return True
|
from copy import deepcopy
import pytest
from homeassistant.components.climate import (
DOMAIN as CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_TEMPERATURE,
)
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
)
from homeassistant.components.deconz.const import (
CONF_ALLOW_CLIP_SENSOR,
DOMAIN as DECONZ_DOMAIN,
)
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from homeassistant.const import ATTR_ENTITY_ID, ATTR_TEMPERATURE, STATE_OFF
from homeassistant.setup import async_setup_component
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.async_mock import patch
SENSORS = {
"1": {
"id": "Thermostat id",
"name": "Thermostat",
"type": "ZHAThermostat",
"state": {"on": True, "temperature": 2260, "valve": 30},
"config": {
"battery": 100,
"heatsetpoint": 2200,
"mode": "auto",
"offset": 10,
"reachable": True,
},
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"id": "CLIP thermostat id",
"name": "CLIP thermostat",
"type": "CLIPThermostat",
"state": {"on": True, "temperature": 2260, "valve": 30},
"config": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
}
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, CLIMATE_DOMAIN, {"climate": {"platform": DECONZ_DOMAIN}}
)
is True
)
assert DECONZ_DOMAIN not in hass.data
async def test_no_sensors(hass):
"""Test that no sensors in deconz results in no climate entities."""
await setup_deconz_integration(hass)
assert len(hass.states.async_all()) == 0
async def test_climate_devices(hass):
"""Test successful creation of sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 2
assert hass.states.get("climate.thermostat").state == HVAC_MODE_AUTO
assert hass.states.get("sensor.thermostat") is None
assert hass.states.get("sensor.thermostat_battery_level").state == "100"
assert hass.states.get("climate.presence_sensor") is None
assert hass.states.get("climate.clip_thermostat") is None
# Event signals thermostat configured off
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"config": {"mode": "off"},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == STATE_OFF
# Event signals thermostat state on
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"config": {"mode": "other"},
"state": {"on": True},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == HVAC_MODE_HEAT
# Event signals thermostat state off
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"on": False},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == STATE_OFF
# Verify service calls
thermostat_device = gateway.api.sensors["1"]
# Service set HVAC mode to auto
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_AUTO},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put", "/sensors/1/config", json={"mode": "auto"}
)
# Service set HVAC mode to heat
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_HEAT},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put", "/sensors/1/config", json={"mode": "heat"}
)
# Service set HVAC mode to off
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_OFF},
blocking=True,
)
set_callback.assert_called_with(
"put", "/sensors/1/config", json={"mode": "off"}
)
# Service set HVAC mode to unsupported value
with patch.object(
thermostat_device, "_request", return_value=True
) as set_callback, pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_HVAC_MODE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_HVAC_MODE: HVAC_MODE_COOL},
blocking=True,
)
# Service set temperature to 20
with patch.object(thermostat_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{ATTR_ENTITY_ID: "climate.thermostat", ATTR_TEMPERATURE: 20},
blocking=True,
)
set_callback.assert_called_with(
"put", "/sensors/1/config", json={"heatsetpoint": 2000.0}
)
# Service set temperature without providing temperature attribute
with patch.object(
thermostat_device, "_request", return_value=True
) as set_callback, pytest.raises(ValueError):
await hass.services.async_call(
CLIMATE_DOMAIN,
SERVICE_SET_TEMPERATURE,
{
ATTR_ENTITY_ID: "climate.thermostat",
ATTR_TARGET_TEMP_HIGH: 30,
ATTR_TARGET_TEMP_LOW: 10,
},
blocking=True,
)
await hass.config_entries.async_unload(config_entry.entry_id)
assert len(hass.states.async_all()) == 0
async def test_clip_climate_device(hass):
"""Test successful creation of sensor entities."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(
hass,
options={CONF_ALLOW_CLIP_SENSOR: True},
get_state_response=data,
)
assert len(hass.states.async_all()) == 3
assert hass.states.get("climate.thermostat").state == HVAC_MODE_AUTO
assert hass.states.get("sensor.thermostat") is None
assert hass.states.get("sensor.thermostat_battery_level").state == "100"
assert hass.states.get("climate.clip_thermostat").state == HVAC_MODE_HEAT
# Disallow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: False}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("climate.clip_thermostat") is None
# Allow clip sensors
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_CLIP_SENSOR: True}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 3
assert hass.states.get("climate.clip_thermostat").state == HVAC_MODE_HEAT
async def test_verify_state_update(hass):
"""Test that state update properly."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["sensors"] = deepcopy(SENSORS)
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert hass.states.get("climate.thermostat").state == HVAC_MODE_AUTO
state_changed_event = {
"t": "event",
"e": "changed",
"r": "sensors",
"id": "1",
"state": {"on": False},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("climate.thermostat").state == HVAC_MODE_AUTO
assert gateway.api.sensors["1"].changed_keys == {"state", "r", "t", "on", "e", "id"}
async def test_add_new_climate_device(hass):
"""Test that adding a new climate device works."""
config_entry = await setup_deconz_integration(hass)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 0
state_added_event = {
"t": "event",
"e": "added",
"r": "sensors",
"id": "1",
"sensor": deepcopy(SENSORS["1"]),
}
gateway.api.event_handler(state_added_event)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 2
assert hass.states.get("climate.thermostat").state == HVAC_MODE_AUTO
assert hass.states.get("sensor.thermostat_battery_level").state == "100"
|
import enum
from typing import Mapping, Optional, Tuple
from gi.repository import Gdk, GtkSource
from meld.conf import _
class MeldStyleScheme(enum.Enum):
base = "meld-base"
dark = "meld-dark"
style_scheme: Optional[GtkSource.StyleScheme] = None
base_style_scheme: Optional[GtkSource.StyleScheme] = None
def set_base_style_scheme(
new_style_scheme: GtkSource.StyleScheme) -> GtkSource.StyleScheme:
global base_style_scheme
global style_scheme
style_scheme = new_style_scheme
# Get our text background colour by checking the 'text' style of
# the user's selected style scheme, falling back to the GTK+ theme
# background if there is no style scheme background set.
style = style_scheme.get_style('text') if style_scheme else None
if style:
background = style.props.background
rgba = Gdk.RGBA()
rgba.parse(background)
else:
# This case will only be hit for GtkSourceView style schemes
# that don't set a text background, like the "Classic" scheme.
from meld.sourceview import MeldSourceView
stylecontext = MeldSourceView().get_style_context()
background_set, rgba = (
stylecontext.lookup_color('theme_bg_color'))
if not background_set:
rgba = Gdk.RGBA(1, 1, 1, 1)
# This heuristic is absolutely dire. I made it up. There's
# literally no basis to this.
use_dark = (rgba.red + rgba.green + rgba.blue) < 1.0
base_scheme_name = (
MeldStyleScheme.dark if use_dark else MeldStyleScheme.base)
manager = GtkSource.StyleSchemeManager.get_default()
base_style_scheme = manager.get_scheme(base_scheme_name.value)
base_schemes = (MeldStyleScheme.dark.value, MeldStyleScheme.base.value)
if style_scheme and style_scheme.props.id in base_schemes:
style_scheme = base_style_scheme
return base_style_scheme
def colour_lookup_with_fallback(name: str, attribute: str) -> Gdk.RGBA:
style = style_scheme.get_style(name) if style_scheme else None
style_attr = getattr(style.props, attribute) if style else None
if not style or not style_attr:
try:
style = base_style_scheme.get_style(name)
style_attr = getattr(style.props, attribute)
except AttributeError:
pass
if not style_attr:
import sys
print(_(
"Couldn’t find colour scheme details for %s-%s; "
"this is a bad install") % (name, attribute), file=sys.stderr)
sys.exit(1)
colour = Gdk.RGBA()
colour.parse(style_attr)
return colour
ColourMap = Mapping[str, Gdk.RGBA]
def get_common_theme() -> Tuple[ColourMap, ColourMap]:
lookup = colour_lookup_with_fallback
fill_colours = {
"insert": lookup("meld:insert", "background"),
"delete": lookup("meld:insert", "background"),
"conflict": lookup("meld:conflict", "background"),
"replace": lookup("meld:replace", "background"),
"error": lookup("meld:error", "background"),
"focus-highlight": lookup("meld:current-line-highlight", "foreground"),
"current-chunk-highlight": lookup(
"meld:current-chunk-highlight", "background"),
"overscroll": lookup("meld:overscroll", "background"),
}
line_colours = {
"insert": lookup("meld:insert", "line-background"),
"delete": lookup("meld:insert", "line-background"),
"conflict": lookup("meld:conflict", "line-background"),
"replace": lookup("meld:replace", "line-background"),
"error": lookup("meld:error", "line-background"),
}
return fill_colours, line_colours
|
from datetime import date
from datetime import datetime
import json
class CJsonEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, datetime):
return obj.strftime('%Y-%m-%d %H:%M:%S')
elif isinstance(obj, date):
return obj.strftime('%Y-%m-%d')
else:
return json.JSONEncoder.default(self, obj)
def object_2_json(obj):
'''
py字典、数据转成json字符转
'''
if obj is None:
obj = {}
return json.dumps(obj, cls=CJsonEncoder)
def json_2_dict(json_str):
'''
json字符串转成dict,list数据结构
'''
try:
return json.loads(json_str)
except Exception:
return None
|
from homeassistant.helpers.deprecation import deprecated_substitute, get_deprecated
from tests.async_mock import MagicMock, patch
class MockBaseClass:
"""Mock base class for deprecated testing."""
@property
@deprecated_substitute("old_property")
def new_property(self):
"""Test property to fetch."""
raise NotImplementedError()
class MockDeprecatedClass(MockBaseClass):
"""Mock deprecated class object."""
@property
def old_property(self):
"""Test property to fetch."""
return True
class MockUpdatedClass(MockBaseClass):
"""Mock updated class object."""
@property
def new_property(self):
"""Test property to fetch."""
return True
@patch("logging.getLogger")
def test_deprecated_substitute_old_class(mock_get_logger):
"""Test deprecated class object."""
mock_logger = MagicMock()
mock_get_logger.return_value = mock_logger
mock_object = MockDeprecatedClass()
assert mock_object.new_property is True
assert mock_object.new_property is True
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
@patch("logging.getLogger")
def test_deprecated_substitute_new_class(mock_get_logger):
"""Test deprecated class object."""
mock_logger = MagicMock()
mock_get_logger.return_value = mock_logger
mock_object = MockUpdatedClass()
assert mock_object.new_property is True
assert mock_object.new_property is True
assert not mock_logger.warning.called
@patch("logging.getLogger")
def test_config_get_deprecated_old(mock_get_logger):
"""Test deprecated class object."""
mock_logger = MagicMock()
mock_get_logger.return_value = mock_logger
config = {"old_name": True}
assert get_deprecated(config, "new_name", "old_name") is True
assert mock_logger.warning.called
assert len(mock_logger.warning.mock_calls) == 1
@patch("logging.getLogger")
def test_config_get_deprecated_new(mock_get_logger):
"""Test deprecated class object."""
mock_logger = MagicMock()
mock_get_logger.return_value = mock_logger
config = {"new_name": True}
assert get_deprecated(config, "new_name", "old_name") is True
assert not mock_logger.warning.called
|
from PyQt5.QtCore import pyqtSlot, QSize
from PyQt5.QtWidgets import QProgressBar, QSizePolicy
from qutebrowser.config import stylesheet
from qutebrowser.utils import utils, usertypes
class Progress(QProgressBar):
"""The progress bar part of the status bar."""
STYLESHEET = """
QProgressBar {
border-radius: 0px;
border: 2px solid transparent;
background-color: transparent;
font: {{ conf.fonts.statusbar }};
}
QProgressBar::chunk {
background-color: {{ conf.colors.statusbar.progress.bg }};
}
"""
def __init__(self, parent=None):
super().__init__(parent)
stylesheet.set_register(self)
self.enabled = False
self.setSizePolicy(QSizePolicy.Fixed, QSizePolicy.Fixed)
self.setTextVisible(False)
self.hide()
def __repr__(self):
return utils.get_repr(self, value=self.value())
@pyqtSlot()
def on_load_started(self):
"""Clear old error and show progress, used as slot to loadStarted."""
self.setValue(0)
self.setVisible(self.enabled)
@pyqtSlot(int)
def on_load_progress(self, value):
"""Hide the statusbar when loading finished.
We use this instead of loadFinished because we sometimes get
loadStarted and loadProgress(100) without loadFinished from Qt.
WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
"""
self.setValue(value)
if value == 100:
self.hide()
def on_tab_changed(self, tab):
"""Set the correct value when the current tab changed."""
self.setValue(tab.progress())
if self.enabled and tab.load_status() == usertypes.LoadStatus.loading:
self.show()
else:
self.hide()
def sizeHint(self):
"""Set the height to the text height."""
width = super().sizeHint().width()
height = self.fontMetrics().height()
return QSize(width, height)
def minimumSizeHint(self):
return self.sizeHint()
|
from homeassistant.components.fan import (
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
)
from homeassistant.components.zwave import fan
from tests.mock.zwave import MockEntityValues, MockNode, MockValue, value_changed
def test_get_device_detects_fan(mock_openzwave):
"""Test get_device returns a zwave fan."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = fan.get_device(node=node, values=values, node_config={})
assert isinstance(device, fan.ZwaveFan)
assert device.supported_features == SUPPORT_SET_SPEED
assert device.speed_list == [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
def test_fan_turn_on(mock_openzwave):
"""Test turning on a zwave fan."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = fan.get_device(node=node, values=values, node_config={})
device.turn_on()
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 255
node.reset_mock()
device.turn_on(speed=SPEED_OFF)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 0
node.reset_mock()
device.turn_on(speed=SPEED_LOW)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 1
node.reset_mock()
device.turn_on(speed=SPEED_MEDIUM)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 50
node.reset_mock()
device.turn_on(speed=SPEED_HIGH)
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 99
def test_fan_turn_off(mock_openzwave):
"""Test turning off a dimmable zwave fan."""
node = MockNode()
value = MockValue(data=46, node=node)
values = MockEntityValues(primary=value)
device = fan.get_device(node=node, values=values, node_config={})
device.turn_off()
assert node.set_dimmer.called
value_id, brightness = node.set_dimmer.mock_calls[0][1]
assert value_id == value.value_id
assert brightness == 0
def test_fan_value_changed(mock_openzwave):
"""Test value changed for zwave fan."""
node = MockNode()
value = MockValue(data=0, node=node)
values = MockEntityValues(primary=value)
device = fan.get_device(node=node, values=values, node_config={})
assert not device.is_on
value.data = 10
value_changed(value)
assert device.is_on
assert device.speed == SPEED_LOW
value.data = 50
value_changed(value)
assert device.is_on
assert device.speed == SPEED_MEDIUM
value.data = 90
value_changed(value)
assert device.is_on
assert device.speed == SPEED_HIGH
|
import logging
from PyTado.interface import Tado
import requests.exceptions
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from .const import CONF_FALLBACK, UNIQUE_ID
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_USERNAME): str, vol.Required(CONF_PASSWORD): str}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
try:
tado = await hass.async_add_executor_job(
Tado, data[CONF_USERNAME], data[CONF_PASSWORD]
)
tado_me = await hass.async_add_executor_job(tado.getMe)
except KeyError as ex:
raise InvalidAuth from ex
except RuntimeError as ex:
raise CannotConnect from ex
except requests.exceptions.HTTPError as ex:
if ex.response.status_code > 400 and ex.response.status_code < 500:
raise InvalidAuth from ex
raise CannotConnect from ex
if "homes" not in tado_me or len(tado_me["homes"]) == 0:
raise NoHomes
home = tado_me["homes"][0]
unique_id = str(home["id"])
name = home["name"]
return {"title": name, UNIQUE_ID: unique_id}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Tado."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
validated = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except NoHomes:
errors["base"] = "no_homes"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(validated[UNIQUE_ID])
self._abort_if_unique_id_configured()
return self.async_create_entry(
title=validated["title"], data=user_input
)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_homekit(self, homekit_info):
"""Handle HomeKit discovery."""
if self._async_current_entries():
# We can see tado on the network to tell them to configure
# it, but since the device will not give up the account it is
# bound to and there can be multiple tado devices on a single
# account, we avoid showing the device as discovered once
# they already have one configured as they can always
# add a new one via "+"
return self.async_abort(reason="already_configured")
properties = {
key.lower(): value for (key, value) in homekit_info["properties"].items()
}
await self.async_set_unique_id(properties["id"])
return await self.async_step_user()
async def async_step_import(self, user_input):
"""Handle import."""
if self._username_already_configured(user_input):
return self.async_abort(reason="already_configured")
return await self.async_step_user(user_input)
def _username_already_configured(self, user_input):
"""See if we already have a username matching user input configured."""
existing_username = {
entry.data[CONF_USERNAME] for entry in self._async_current_entries()
}
return user_input[CONF_USERNAME] in existing_username
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for tado."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
data_schema = vol.Schema(
{
vol.Required(
CONF_FALLBACK, default=self.config_entry.options.get(CONF_FALLBACK)
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
class NoHomes(exceptions.HomeAssistantError):
"""Error to indicate the account has no homes."""
|
try:
import colorama
colorama.init()
except ImportError:
pass
from plumbum import cli, colors
class Geet(cli.Application):
SUBCOMMAND_HELPMSG = False
DESCRIPTION = colors.yellow | """The l33t version control"""
PROGNAME = colors.green
VERSION = colors.blue | "1.7.2"
COLOR_USAGE = colors.magenta
COLOR_GROUPS = {"Meta-switches":colors.bold, "Switches":colors.skyblue1, "Subcommands":colors.yellow}
verbosity = cli.SwitchAttr("--verbosity", cli.Set("low", "high", "some-very-long-name", "to-test-wrap-around"),
help = colors.cyan | "sets the verbosity level of the geet tool. doesn't really do anything except for testing line-wrapping "
"in help " * 3)
verbositie = cli.SwitchAttr("--verbositie", cli.Set("low", "high", "some-very-long-name", "to-test-wrap-around"),
help = colors.hotpink | "sets the verbosity level of the geet tool. doesn't really do anything except for testing line-wrapping "
"in help " * 3)
@Geet.subcommand(colors.red | "commit")
class GeetCommit(cli.Application):
"""creates a new commit in the current branch"""
auto_add = cli.Flag("-a", help = "automatically add changed files")
message = cli.SwitchAttr("-m", str, mandatory = True, help = "sets the commit message")
def main(self):
print("committing...")
GeetCommit.unbind_switches("-v", "--version")
@Geet.subcommand("push")
class GeetPush(cli.Application):
"""pushes the current local branch to the remote one"""
tags = cli.Flag("--tags", help = "whether to push tags (default is False)")
def main(self, remote, branch = "master"):
print("pushing to %s/%s..." % (remote, branch))
if __name__ == "__main__":
Geet.run()
|
import subprocess
import sys
import logging
import re
import pytest
from PyQt5.QtCore import QProcess
from helpers import utils
ascii_locale = pytest.mark.skipif(sys.hexversion >= 0x03070000,
reason="Python >= 3.7 doesn't force ASCII "
"locale with LC_ALL=C")
def _base_args(config):
"""Get the arguments to pass with every invocation."""
args = ['--debug', '--json-logging', '--no-err-windows']
if config.webengine:
args += ['--backend', 'webengine']
else:
args += ['--backend', 'webkit']
if config.webengine:
args += utils.seccomp_args(qt_flag=True)
args.append('about:blank')
return args
@pytest.fixture
def temp_basedir_env(tmpdir, short_tmpdir):
"""Return a dict of environment variables that fakes --temp-basedir.
We can't run --basedir or --temp-basedir for some tests, so we mess with
XDG_*_DIR to get things relocated.
"""
data_dir = tmpdir / 'data'
config_dir = tmpdir / 'config'
runtime_dir = short_tmpdir / 'rt'
cache_dir = tmpdir / 'cache'
runtime_dir.ensure(dir=True)
runtime_dir.chmod(0o700)
lines = [
'[general]',
'quickstart-done = 1',
'backend-warning-shown = 1',
'webkit-warning-shown = 1',
]
state_file = data_dir / 'qutebrowser' / 'state'
state_file.write_text('\n'.join(lines), encoding='utf-8', ensure=True)
env = {
'XDG_DATA_HOME': str(data_dir),
'XDG_CONFIG_HOME': str(config_dir),
'XDG_RUNTIME_DIR': str(runtime_dir),
'XDG_CACHE_HOME': str(cache_dir),
}
return env
@pytest.mark.linux
@ascii_locale
def test_downloads_with_ascii_locale(request, server, tmpdir, quteproc_new):
"""Test downloads with LC_ALL=C set.
https://github.com/qutebrowser/qutebrowser/issues/908
https://github.com/qutebrowser/qutebrowser/issues/1726
"""
args = ['--temp-basedir'] + _base_args(request.config)
quteproc_new.start(args, env={'LC_ALL': 'C'})
quteproc_new.set_setting('downloads.location.directory', str(tmpdir))
# Test a normal download
quteproc_new.set_setting('downloads.location.prompt', 'false')
url = 'http://localhost:{port}/data/downloads/ä-issue908.bin'.format(
port=server.port)
quteproc_new.send_cmd(':download {}'.format(url))
quteproc_new.wait_for(category='downloads',
message='Download ?-issue908.bin finished')
# Test :prompt-open-download
quteproc_new.set_setting('downloads.location.prompt', 'true')
quteproc_new.send_cmd(':download {}'.format(url))
quteproc_new.send_cmd(':prompt-open-download "{}" -c pass'
.format(sys.executable))
quteproc_new.wait_for(category='downloads',
message='Download ä-issue908.bin finished')
quteproc_new.wait_for(category='misc',
message='Opening * with [*python*]')
assert len(tmpdir.listdir()) == 1
assert (tmpdir / '?-issue908.bin').exists()
@pytest.mark.linux
@pytest.mark.parametrize('url', ['/föö.html', 'file:///föö.html'])
@ascii_locale
def test_open_with_ascii_locale(request, server, tmpdir, quteproc_new, url):
"""Test opening non-ascii URL with LC_ALL=C set.
https://github.com/qutebrowser/qutebrowser/issues/1450
"""
args = ['--temp-basedir'] + _base_args(request.config)
quteproc_new.start(args, env={'LC_ALL': 'C'})
quteproc_new.set_setting('url.auto_search', 'never')
# Test opening a file whose name contains non-ascii characters.
# No exception thrown means test success.
quteproc_new.send_cmd(':open {}'.format(url))
if not request.config.webengine:
line = quteproc_new.wait_for(message="Error while loading *: Error "
"opening /*: No such file or directory")
line.expected = True
quteproc_new.wait_for(message="load status for <* tab_id=* "
"url='*/f%C3%B6%C3%B6.html'>: LoadStatus.error")
if request.config.webengine:
line = quteproc_new.wait_for(message='Load error: ERR_FILE_NOT_FOUND')
line.expected = True
@pytest.mark.linux
@ascii_locale
def test_open_command_line_with_ascii_locale(request, server, tmpdir,
quteproc_new):
"""Test opening file via command line with a non-ascii name with LC_ALL=C.
https://github.com/qutebrowser/qutebrowser/issues/1450
"""
# The file does not actually have to exist because the relevant checks will
# all be called. No exception thrown means test success.
args = (['--temp-basedir'] + _base_args(request.config) +
['/home/user/föö.html'])
quteproc_new.start(args, env={'LC_ALL': 'C'})
if not request.config.webengine:
line = quteproc_new.wait_for(message="Error while loading *: Error "
"opening /*: No such file or directory")
line.expected = True
quteproc_new.wait_for(message="load status for <* tab_id=* "
"url='*/f*.html'>: LoadStatus.error")
@pytest.mark.linux
def test_misconfigured_user_dirs(request, server, temp_basedir_env,
tmpdir, quteproc_new):
"""Test downloads with a misconfigured XDG_DOWNLOAD_DIR.
https://github.com/qutebrowser/qutebrowser/issues/866
https://github.com/qutebrowser/qutebrowser/issues/1269
"""
home = tmpdir / 'home'
home.ensure(dir=True)
temp_basedir_env['HOME'] = str(home)
assert temp_basedir_env['XDG_CONFIG_HOME'] == tmpdir / 'config'
(tmpdir / 'config' / 'user-dirs.dirs').write('XDG_DOWNLOAD_DIR="relative"',
ensure=True)
quteproc_new.start(_base_args(request.config), env=temp_basedir_env)
quteproc_new.set_setting('downloads.location.prompt', 'false')
url = 'http://localhost:{port}/data/downloads/download.bin'.format(
port=server.port)
quteproc_new.send_cmd(':download {}'.format(url))
line = quteproc_new.wait_for(
loglevel=logging.ERROR, category='message',
message='XDG_DOWNLOAD_DIR points to a relative path - please check '
'your ~/.config/user-dirs.dirs. The download is saved in your '
'home directory.')
line.expected = True
quteproc_new.wait_for(category='downloads',
message='Download download.bin finished')
assert (home / 'download.bin').exists()
def test_no_loglines(request, quteproc_new):
"""Test qute://log with --loglines=0."""
quteproc_new.start(args=['--temp-basedir', '--loglines=0'] +
_base_args(request.config))
quteproc_new.open_path('qute://log')
assert quteproc_new.get_content() == 'Log output was disabled.'
@pytest.mark.not_frozen
@pytest.mark.parametrize('level', ['1', '2'])
def test_optimize(request, quteproc_new, capfd, level):
quteproc_new.start(args=['--temp-basedir'] + _base_args(request.config),
env={'PYTHONOPTIMIZE': level})
if level == '2':
msg = ("Running on optimize level higher than 1, unexpected behavior "
"may occur.")
line = quteproc_new.wait_for(message=msg)
line.expected = True
# Waiting for quit to make sure no other warning is emitted
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
@pytest.mark.not_frozen
@pytest.mark.flaky # Fails sometimes with empty output...
def test_version(request):
"""Test invocation with --version argument."""
args = ['-m', 'qutebrowser', '--version'] + _base_args(request.config)
# can't use quteproc_new here because it's confused by
# early process termination
proc = QProcess()
proc.setProcessChannelMode(QProcess.SeparateChannels)
proc.start(sys.executable, args)
ok = proc.waitForStarted(2000)
assert ok
ok = proc.waitForFinished(10000)
stdout = bytes(proc.readAllStandardOutput()).decode('utf-8')
print(stdout)
stderr = bytes(proc.readAllStandardError()).decode('utf-8')
print(stderr)
assert ok
assert proc.exitStatus() == QProcess.NormalExit
match = re.search(r'^qutebrowser\s+v\d+(\.\d+)', stdout, re.MULTILINE)
assert match is not None
def test_qt_arg(request, quteproc_new, tmpdir):
"""Test --qt-arg."""
args = (['--temp-basedir', '--qt-arg', 'stylesheet',
str(tmpdir / 'does-not-exist')] + _base_args(request.config))
quteproc_new.start(args)
msg = 'QCss::Parser - Failed to load file "*does-not-exist"'
line = quteproc_new.wait_for(message=msg)
line.expected = True
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
@pytest.mark.linux
def test_webengine_download_suffix(request, quteproc_new, tmpdir):
"""Make sure QtWebEngine does not add a suffix to downloads."""
if not request.config.webengine:
pytest.skip()
download_dir = tmpdir / 'downloads'
download_dir.ensure(dir=True)
(tmpdir / 'user-dirs.dirs').write(
'XDG_DOWNLOAD_DIR={}'.format(download_dir))
env = {'XDG_CONFIG_HOME': str(tmpdir)}
args = (['--temp-basedir'] + _base_args(request.config))
quteproc_new.start(args, env=env)
quteproc_new.set_setting('downloads.location.prompt', 'false')
quteproc_new.set_setting('downloads.location.directory', str(download_dir))
quteproc_new.open_path('data/downloads/download.bin', wait=False)
quteproc_new.wait_for(category='downloads', message='Download * finished')
quteproc_new.open_path('data/downloads/download.bin', wait=False)
quteproc_new.wait_for(message='Entering mode KeyMode.yesno *')
quteproc_new.send_cmd(':prompt-accept yes')
quteproc_new.wait_for(category='downloads', message='Download * finished')
files = download_dir.listdir()
assert len(files) == 1
assert files[0].basename == 'download.bin'
def test_command_on_start(request, quteproc_new):
"""Make sure passing a command on start works.
See https://github.com/qutebrowser/qutebrowser/issues/2408
"""
args = (['--temp-basedir'] + _base_args(request.config) +
[':quickmark-add https://www.example.com/ example'])
quteproc_new.start(args)
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
@pytest.mark.parametrize('python', ['python2', 'python3.5'])
def test_launching_with_old_python(python):
try:
proc = subprocess.run(
[python, '-m', 'qutebrowser', '--no-err-windows'],
stderr=subprocess.PIPE,
check=False)
except FileNotFoundError:
pytest.skip(f"{python} not found")
assert proc.returncode == 1
error = "At least Python 3.6 is required to run qutebrowser"
assert proc.stderr.decode('ascii').startswith(error)
def test_initial_private_browsing(request, quteproc_new):
"""Make sure the initial window is private when the setting is set."""
args = (_base_args(request.config) +
['--temp-basedir', '-s', 'content.private_browsing', 'true'])
quteproc_new.start(args)
quteproc_new.compare_session("""
windows:
- private: True
tabs:
- history:
- url: about:blank
""")
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
def test_loading_empty_session(tmpdir, request, quteproc_new):
"""Make sure loading an empty session opens a window."""
session = tmpdir / 'session.yml'
session.write('windows: []')
args = _base_args(request.config) + ['--temp-basedir', '-r', str(session)]
quteproc_new.start(args)
quteproc_new.compare_session("""
windows:
- tabs:
- history:
- url: about:blank
""")
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
def test_qute_settings_persistence(short_tmpdir, request, quteproc_new):
"""Make sure settings from qute://settings are persistent."""
args = _base_args(request.config) + ['--basedir', str(short_tmpdir)]
quteproc_new.start(args)
quteproc_new.open_path('qute://settings/')
quteproc_new.send_cmd(':jseval --world main '
'cset("search.ignore_case", "always")')
quteproc_new.wait_for(message='No output or error')
quteproc_new.wait_for(category='config',
message='Config option changed: '
'search.ignore_case = always')
assert quteproc_new.get_setting('search.ignore_case') == 'always'
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
quteproc_new.start(args)
assert quteproc_new.get_setting('search.ignore_case') == 'always'
quteproc_new.send_cmd(':quit')
quteproc_new.wait_for_quit()
|
import unicodedata
import numpy as np
from .. import coding
from ..core.variable import Variable
# Special characters that are permitted in netCDF names except in the
# 0th position of the string
_specialchars = '_.@+- !"#$%&\\()*,:;<=>?[]^`{|}~'
# The following are reserved names in CDL and may not be used as names of
# variables, dimension, attributes
_reserved_names = {
"byte",
"char",
"short",
"ushort",
"int",
"uint",
"int64",
"uint64",
"float",
"real",
"double",
"bool",
"string",
}
# These data-types aren't supported by netCDF3, so they are automatically
# coerced instead as indicated by the "coerce_nc3_dtype" function
_nc3_dtype_coercions = {
"int64": "int32",
"uint64": "int32",
"uint32": "int32",
"uint16": "int16",
"uint8": "int8",
"bool": "int8",
}
# encode all strings as UTF-8
STRING_ENCODING = "utf-8"
def coerce_nc3_dtype(arr):
"""Coerce an array to a data type that can be stored in a netCDF-3 file
This function performs the dtype conversions as specified by the
``_nc3_dtype_coercions`` mapping:
int64 -> int32
uint64 -> int32
uint32 -> int32
uint16 -> int16
uint8 -> int8
bool -> int8
Data is checked for equality, or equivalence (non-NaN values) using the
``(cast_array == original_array).all()``.
"""
dtype = str(arr.dtype)
if dtype in _nc3_dtype_coercions:
new_dtype = _nc3_dtype_coercions[dtype]
# TODO: raise a warning whenever casting the data-type instead?
cast_arr = arr.astype(new_dtype)
if not (cast_arr == arr).all():
raise ValueError(
f"could not safely cast array from dtype {dtype} to {new_dtype}"
)
arr = cast_arr
return arr
def encode_nc3_attr_value(value):
if isinstance(value, bytes):
pass
elif isinstance(value, str):
value = value.encode(STRING_ENCODING)
else:
value = coerce_nc3_dtype(np.atleast_1d(value))
if value.ndim > 1:
raise ValueError("netCDF attributes must be 1-dimensional")
return value
def encode_nc3_attrs(attrs):
return {k: encode_nc3_attr_value(v) for k, v in attrs.items()}
def encode_nc3_variable(var):
for coder in [
coding.strings.EncodedStringCoder(allows_unicode=False),
coding.strings.CharacterArrayCoder(),
]:
var = coder.encode(var)
data = coerce_nc3_dtype(var.data)
attrs = encode_nc3_attrs(var.attrs)
return Variable(var.dims, data, attrs, var.encoding)
def _isalnumMUTF8(c):
"""Return True if the given UTF-8 encoded character is alphanumeric
or multibyte.
Input is not checked!
"""
return c.isalnum() or (len(c.encode("utf-8")) > 1)
def is_valid_nc3_name(s):
"""Test whether an object can be validly converted to a netCDF-3
dimension, variable or attribute name
Earlier versions of the netCDF C-library reference implementation
enforced a more restricted set of characters in creating new names,
but permitted reading names containing arbitrary bytes. This
specification extends the permitted characters in names to include
multi-byte UTF-8 encoded Unicode and additional printing characters
from the US-ASCII alphabet. The first character of a name must be
alphanumeric, a multi-byte UTF-8 character, or '_' (reserved for
special names with meaning to implementations, such as the
"_FillValue" attribute). Subsequent characters may also include
printing special characters, except for '/' which is not allowed in
names. Names that have trailing space characters are also not
permitted.
"""
if not isinstance(s, str):
return False
if not isinstance(s, str):
s = s.decode("utf-8")
num_bytes = len(s.encode("utf-8"))
return (
(unicodedata.normalize("NFC", s) == s)
and (s not in _reserved_names)
and (num_bytes >= 0)
and ("/" not in s)
and (s[-1] != " ")
and (_isalnumMUTF8(s[0]) or (s[0] == "_"))
and all(_isalnumMUTF8(c) or c in _specialchars for c in s)
)
|
from datetime import timedelta
from pyirishrail.pyirishrail import IrishRailRTPI
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME, TIME_MINUTES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
ATTR_STATION = "Station"
ATTR_ORIGIN = "Origin"
ATTR_DESTINATION = "Destination"
ATTR_DIRECTION = "Direction"
ATTR_STOPS_AT = "Stops at"
ATTR_DUE_IN = "Due in"
ATTR_DUE_AT = "Due at"
ATTR_EXPECT_AT = "Expected at"
ATTR_NEXT_UP = "Later Train"
ATTR_TRAIN_TYPE = "Train type"
ATTRIBUTION = "Data provided by Irish Rail"
CONF_STATION = "station"
CONF_DESTINATION = "destination"
CONF_DIRECTION = "direction"
CONF_STOPS_AT = "stops_at"
DEFAULT_NAME = "Next Train"
ICON = "mdi:train"
SCAN_INTERVAL = timedelta(minutes=2)
TIME_STR_FORMAT = "%H:%M"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATION): cv.string,
vol.Optional(CONF_DIRECTION): cv.string,
vol.Optional(CONF_DESTINATION): cv.string,
vol.Optional(CONF_STOPS_AT): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Irish Rail transport sensor."""
station = config.get(CONF_STATION)
direction = config.get(CONF_DIRECTION)
destination = config.get(CONF_DESTINATION)
stops_at = config.get(CONF_STOPS_AT)
name = config.get(CONF_NAME)
irish_rail = IrishRailRTPI()
data = IrishRailTransportData(irish_rail, station, direction, destination, stops_at)
add_entities(
[
IrishRailTransportSensor(
data, station, direction, destination, stops_at, name
)
],
True,
)
class IrishRailTransportSensor(Entity):
"""Implementation of an irish rail public transport sensor."""
def __init__(self, data, station, direction, destination, stops_at, name):
"""Initialize the sensor."""
self.data = data
self._station = station
self._direction = direction
self._direction = direction
self._stops_at = stops_at
self._name = name
self._state = None
self._times = []
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._times:
next_up = "None"
if len(self._times) > 1:
next_up = (
f"{self._times[1][ATTR_ORIGIN]} to "
f"{self._times[1][ATTR_DESTINATION]} in "
f"{self._times[1][ATTR_DUE_IN]}"
)
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_STATION: self._station,
ATTR_ORIGIN: self._times[0][ATTR_ORIGIN],
ATTR_DESTINATION: self._times[0][ATTR_DESTINATION],
ATTR_DUE_IN: self._times[0][ATTR_DUE_IN],
ATTR_DUE_AT: self._times[0][ATTR_DUE_AT],
ATTR_EXPECT_AT: self._times[0][ATTR_EXPECT_AT],
ATTR_DIRECTION: self._times[0][ATTR_DIRECTION],
ATTR_STOPS_AT: self._times[0][ATTR_STOPS_AT],
ATTR_NEXT_UP: next_up,
ATTR_TRAIN_TYPE: self._times[0][ATTR_TRAIN_TYPE],
}
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return TIME_MINUTES
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
def update(self):
"""Get the latest data and update the states."""
self.data.update()
self._times = self.data.info
if self._times:
self._state = self._times[0][ATTR_DUE_IN]
else:
self._state = None
class IrishRailTransportData:
"""The Class for handling the data retrieval."""
def __init__(self, irish_rail, station, direction, destination, stops_at):
"""Initialize the data object."""
self._ir_api = irish_rail
self.station = station
self.direction = direction
self.destination = destination
self.stops_at = stops_at
self.info = self._empty_train_data()
def update(self):
"""Get the latest data from irishrail."""
trains = self._ir_api.get_station_by_name(
self.station,
direction=self.direction,
destination=self.destination,
stops_at=self.stops_at,
)
stops_at = self.stops_at if self.stops_at else ""
self.info = []
for train in trains:
train_data = {
ATTR_STATION: self.station,
ATTR_ORIGIN: train.get("origin"),
ATTR_DESTINATION: train.get("destination"),
ATTR_DUE_IN: train.get("due_in_mins"),
ATTR_DUE_AT: train.get("scheduled_arrival_time"),
ATTR_EXPECT_AT: train.get("expected_departure_time"),
ATTR_DIRECTION: train.get("direction"),
ATTR_STOPS_AT: stops_at,
ATTR_TRAIN_TYPE: train.get("type"),
}
self.info.append(train_data)
if not self.info:
self.info = self._empty_train_data()
def _empty_train_data(self):
"""Generate info for an empty train."""
dest = self.destination if self.destination else ""
direction = self.direction if self.direction else ""
stops_at = self.stops_at if self.stops_at else ""
return [
{
ATTR_STATION: self.station,
ATTR_ORIGIN: "",
ATTR_DESTINATION: dest,
ATTR_DUE_IN: "n/a",
ATTR_DUE_AT: "n/a",
ATTR_EXPECT_AT: "n/a",
ATTR_DIRECTION: direction,
ATTR_STOPS_AT: stops_at,
ATTR_TRAIN_TYPE: "",
}
]
|
from sqlalchemy import Column, Integer, String, text
from sqlalchemy.dialects.postgresql import JSON
from sqlalchemy.orm import relationship
from sqlalchemy_utils import ArrowType
from lemur.database import db
from lemur.plugins.base import plugins
from lemur.utils import Vault
class DnsProvider(db.Model):
__tablename__ = "dns_providers"
id = Column(Integer(), primary_key=True)
name = Column(String(length=256), unique=True, nullable=True)
description = Column(String(length=1024), nullable=True)
provider_type = Column(String(length=256), nullable=True)
credentials = Column(Vault, nullable=True)
api_endpoint = Column(String(length=256), nullable=True)
date_created = Column(ArrowType(), server_default=text("now()"), nullable=False)
status = Column(String(length=128), nullable=True)
options = Column(JSON, nullable=True)
domains = Column(JSON, nullable=True)
certificates = relationship(
"Certificate",
backref="dns_provider",
foreign_keys="Certificate.dns_provider_id",
lazy="dynamic",
)
def __init__(self, name, description, provider_type, credentials):
self.name = name
self.description = description
self.provider_type = provider_type
self.credentials = credentials
@property
def plugin(self):
return plugins.get(self.plugin_name)
def __repr__(self):
return "DnsProvider(name={name})".format(name=self.name)
|
from diamond.convertor import time
import unittest
class TestConvertor(unittest.TestCase):
def test_basic(self):
self.assertEquals(time.convert(60, 's', 's'), 60.0)
self.assertEquals(time.convert(60, 's', 'm'), 1.0)
self.assertEquals(time.convert(60000, 'ms', 'm'), 1.0)
self.assertEquals(time.convert(60000, 'MS', 'minutes'), 1.0)
self.assertEquals(time.convert(3600 * 1000 * 1.4, 'ms', 'h'), 1.4)
self.assertEquals(time.convert(86400 * 1000 * 2.5, 'ms', 'd'), 2.5)
self.assertEquals(time.convert(86400 * 1000 * 365 * 0.7, 'ms', 'y'),
0.7)
self.assertEquals(time.convert(1000, 'ms', 'us'), 1000000)
self.assertEquals(time.convert(1000, 'ms', 'ns'), 1000000000)
self.assertEquals(time.convert(1.5, 'y', 'ns'),
1.5 * 365 * 24 * 3600 * 1000 * 1000 * 1000)
def test_unrecognised_unit(self):
self.assertRaises(NotImplementedError, time.convert, 60, 's', 'months')
|
Subsets and Splits