text
stringlengths 213
32.3k
|
---|
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
import pytest
from mne.parallel import parallel_func
from mne.utils import (ProgressBar, array_split_idx, use_log_level,
modified_env, catch_logging)
def test_progressbar():
"""Test progressbar class."""
a = np.arange(10)
pbar = ProgressBar(a)
assert a is pbar.iterable
assert pbar.max_value == 10
pbar = ProgressBar(10)
assert pbar.max_value == 10
assert pbar.iterable is None
# Make sure that non-iterable input raises an error
def iter_func(a):
for ii in a:
pass
with pytest.raises(TypeError, match='not iterable'):
iter_func(pbar)
# Make sure different progress bars can be used
with catch_logging() as log, modified_env(MNE_TQDM='tqdm'), \
use_log_level('debug'), ProgressBar(np.arange(3)) as pbar:
for p in pbar:
pass
log = log.getvalue()
assert 'Using ProgressBar with tqdm\n' in log
with modified_env(MNE_TQDM='broken'), pytest.raises(ValueError):
ProgressBar(np.arange(3))
with modified_env(MNE_TQDM='tqdm.broken'), pytest.raises(AttributeError):
ProgressBar(np.arange(3))
# off
with catch_logging() as log, modified_env(MNE_TQDM='off'), \
use_log_level('debug'), ProgressBar(np.arange(3)) as pbar:
for p in pbar:
pass
log = log.getvalue()
assert 'Using ProgressBar with off\n' == log
def _identity(x):
return x
def test_progressbar_parallel_basic(capsys):
"""Test ProgressBar with parallel computing, basic version."""
assert capsys.readouterr().out == ''
parallel, p_fun, _ = parallel_func(_identity, total=10, n_jobs=1,
verbose=True)
with use_log_level(True):
out = parallel(p_fun(x) for x in range(10))
assert out == list(range(10))
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block(x, pb):
for ii in range(len(x)):
pb.update(ii + 1)
return x
def test_progressbar_parallel_advanced(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr)) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(arr, 2))
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=10).sum()
assert sum_ == len(arr)
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
out = np.concatenate(out)
assert_array_equal(out, arr)
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
def _identity_block_wide(x, pb):
for ii in range(len(x)):
for jj in range(2):
pb.update(ii * 2 + jj + 1)
return x, pb.idx
def test_progressbar_parallel_more(capsys):
"""Test ProgressBar with parallel computing, advanced version."""
assert capsys.readouterr().out == ''
# This must be "1" because "capsys" won't get stdout properly otherwise
parallel, p_fun, _ = parallel_func(_identity_block_wide, n_jobs=1,
verbose=False)
arr = np.arange(10)
with use_log_level(True):
with ProgressBar(len(arr) * 2) as pb:
out = parallel(p_fun(x, pb.subset(pb_idx))
for pb_idx, x in array_split_idx(
arr, 2, n_per_split=2))
idxs = np.concatenate([o[1] for o in out])
assert_array_equal(idxs, np.arange(len(arr) * 2))
out = np.concatenate([o[0] for o in out])
assert op.isfile(pb._mmap_fname)
sum_ = np.memmap(pb._mmap_fname, dtype='bool', mode='r',
shape=len(arr) * 2).sum()
assert sum_ == len(arr) * 2
assert not op.isfile(pb._mmap_fname), '__exit__ not called?'
cap = capsys.readouterr()
out = cap.err
assert '100%' in out
|
from collections import Counter
import numpy as np
import pandas as pd
from .coding.times import CFDatetimeCoder, CFTimedeltaCoder
from .conventions import decode_cf
from .core import duck_array_ops
from .core.dataarray import DataArray
from .core.dtypes import get_fill_value
from .core.pycompat import dask_array_type
cdms2_ignored_attrs = {"name", "tileIndex"}
iris_forbidden_keys = {
"standard_name",
"long_name",
"units",
"bounds",
"axis",
"calendar",
"leap_month",
"leap_year",
"month_lengths",
"coordinates",
"grid_mapping",
"climatology",
"cell_methods",
"formula_terms",
"compress",
"missing_value",
"add_offset",
"scale_factor",
"valid_max",
"valid_min",
"valid_range",
"_FillValue",
}
cell_methods_strings = {
"point",
"sum",
"maximum",
"median",
"mid_range",
"minimum",
"mean",
"mode",
"standard_deviation",
"variance",
}
def encode(var):
return CFTimedeltaCoder().encode(CFDatetimeCoder().encode(var.variable))
def _filter_attrs(attrs, ignored_attrs):
"""Return attrs that are not in ignored_attrs"""
return {k: v for k, v in attrs.items() if k not in ignored_attrs}
def from_cdms2(variable):
"""Convert a cdms2 variable into an DataArray"""
values = np.asarray(variable)
name = variable.id
dims = variable.getAxisIds()
coords = {}
for axis in variable.getAxisList():
coords[axis.id] = DataArray(
np.asarray(axis),
dims=[axis.id],
attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs),
)
grid = variable.getGrid()
if grid is not None:
ids = [a.id for a in grid.getAxisList()]
for axis in grid.getLongitude(), grid.getLatitude():
if axis.id not in variable.getAxisIds():
coords[axis.id] = DataArray(
np.asarray(axis[:]),
dims=ids,
attrs=_filter_attrs(axis.attributes, cdms2_ignored_attrs),
)
attrs = _filter_attrs(variable.attributes, cdms2_ignored_attrs)
dataarray = DataArray(values, dims=dims, coords=coords, name=name, attrs=attrs)
return decode_cf(dataarray.to_dataset())[dataarray.name]
def to_cdms2(dataarray, copy=True):
"""Convert a DataArray into a cdms2 variable"""
# we don't want cdms2 to be a hard dependency
import cdms2
def set_cdms2_attrs(var, attrs):
for k, v in attrs.items():
setattr(var, k, v)
# 1D axes
axes = []
for dim in dataarray.dims:
coord = encode(dataarray.coords[dim])
axis = cdms2.createAxis(coord.values, id=dim)
set_cdms2_attrs(axis, coord.attrs)
axes.append(axis)
# Data
var = encode(dataarray)
cdms2_var = cdms2.createVariable(
var.values, axes=axes, id=dataarray.name, mask=pd.isnull(var.values), copy=copy
)
# Attributes
set_cdms2_attrs(cdms2_var, var.attrs)
# Curvilinear and unstructured grids
if dataarray.name not in dataarray.coords:
cdms2_axes = {}
for coord_name in set(dataarray.coords.keys()) - set(dataarray.dims):
coord_array = dataarray.coords[coord_name].to_cdms2()
cdms2_axis_cls = (
cdms2.coord.TransientAxis2D
if coord_array.ndim
else cdms2.auxcoord.TransientAuxAxis1D
)
cdms2_axis = cdms2_axis_cls(coord_array)
if cdms2_axis.isLongitude():
cdms2_axes["lon"] = cdms2_axis
elif cdms2_axis.isLatitude():
cdms2_axes["lat"] = cdms2_axis
if "lon" in cdms2_axes and "lat" in cdms2_axes:
if len(cdms2_axes["lon"].shape) == 2:
cdms2_grid = cdms2.hgrid.TransientCurveGrid(
cdms2_axes["lat"], cdms2_axes["lon"]
)
else:
cdms2_grid = cdms2.gengrid.AbstractGenericGrid(
cdms2_axes["lat"], cdms2_axes["lon"]
)
for axis in cdms2_grid.getAxisList():
cdms2_var.setAxis(cdms2_var.getAxisIds().index(axis.id), axis)
cdms2_var.setGrid(cdms2_grid)
return cdms2_var
def _pick_attrs(attrs, keys):
"""Return attrs with keys in keys list"""
return {k: v for k, v in attrs.items() if k in keys}
def _get_iris_args(attrs):
"""Converts the xarray attrs into args that can be passed into Iris"""
# iris.unit is deprecated in Iris v1.9
import cf_units
args = {"attributes": _filter_attrs(attrs, iris_forbidden_keys)}
args.update(_pick_attrs(attrs, ("standard_name", "long_name")))
unit_args = _pick_attrs(attrs, ("calendar",))
if "units" in attrs:
args["units"] = cf_units.Unit(attrs["units"], **unit_args)
return args
# TODO: Add converting bounds from xarray to Iris and back
def to_iris(dataarray):
"""Convert a DataArray into a Iris Cube"""
# Iris not a hard dependency
import iris
from iris.fileformats.netcdf import parse_cell_methods
dim_coords = []
aux_coords = []
for coord_name in dataarray.coords:
coord = encode(dataarray.coords[coord_name])
coord_args = _get_iris_args(coord.attrs)
coord_args["var_name"] = coord_name
axis = None
if coord.dims:
axis = dataarray.get_axis_num(coord.dims)
if coord_name in dataarray.dims:
try:
iris_coord = iris.coords.DimCoord(coord.values, **coord_args)
dim_coords.append((iris_coord, axis))
except ValueError:
iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)
aux_coords.append((iris_coord, axis))
else:
iris_coord = iris.coords.AuxCoord(coord.values, **coord_args)
aux_coords.append((iris_coord, axis))
args = _get_iris_args(dataarray.attrs)
args["var_name"] = dataarray.name
args["dim_coords_and_dims"] = dim_coords
args["aux_coords_and_dims"] = aux_coords
if "cell_methods" in dataarray.attrs:
args["cell_methods"] = parse_cell_methods(dataarray.attrs["cell_methods"])
masked_data = duck_array_ops.masked_invalid(dataarray.data)
cube = iris.cube.Cube(masked_data, **args)
return cube
def _iris_obj_to_attrs(obj):
"""Return a dictionary of attrs when given a Iris object"""
attrs = {"standard_name": obj.standard_name, "long_name": obj.long_name}
if obj.units.calendar:
attrs["calendar"] = obj.units.calendar
if obj.units.origin != "1" and not obj.units.is_unknown():
attrs["units"] = obj.units.origin
attrs.update(obj.attributes)
return {k: v for k, v in attrs.items() if v is not None}
def _iris_cell_methods_to_str(cell_methods_obj):
"""Converts a Iris cell methods into a string"""
cell_methods = []
for cell_method in cell_methods_obj:
names = "".join(f"{n}: " for n in cell_method.coord_names)
intervals = " ".join(
f"interval: {interval}" for interval in cell_method.intervals
)
comments = " ".join(f"comment: {comment}" for comment in cell_method.comments)
extra = " ".join([intervals, comments]).strip()
if extra:
extra = f" ({extra})"
cell_methods.append(names + cell_method.method + extra)
return " ".join(cell_methods)
def _name(iris_obj, default="unknown"):
"""Mimicks `iris_obj.name()` but with different name resolution order.
Similar to iris_obj.name() method, but using iris_obj.var_name first to
enable roundtripping.
"""
return iris_obj.var_name or iris_obj.standard_name or iris_obj.long_name or default
def from_iris(cube):
"""Convert a Iris cube into an DataArray"""
import iris.exceptions
name = _name(cube)
if name == "unknown":
name = None
dims = []
for i in range(cube.ndim):
try:
dim_coord = cube.coord(dim_coords=True, dimensions=(i,))
dims.append(_name(dim_coord))
except iris.exceptions.CoordinateNotFoundError:
dims.append(f"dim_{i}")
if len(set(dims)) != len(dims):
duplicates = [k for k, v in Counter(dims).items() if v > 1]
raise ValueError(f"Duplicate coordinate name {duplicates}.")
coords = {}
for coord in cube.coords():
coord_attrs = _iris_obj_to_attrs(coord)
coord_dims = [dims[i] for i in cube.coord_dims(coord)]
if coord_dims:
coords[_name(coord)] = (coord_dims, coord.points, coord_attrs)
else:
coords[_name(coord)] = ((), coord.points.item(), coord_attrs)
array_attrs = _iris_obj_to_attrs(cube)
cell_methods = _iris_cell_methods_to_str(cube.cell_methods)
if cell_methods:
array_attrs["cell_methods"] = cell_methods
# Deal with iris 1.* and 2.*
cube_data = cube.core_data() if hasattr(cube, "core_data") else cube.data
# Deal with dask and numpy masked arrays
if isinstance(cube_data, dask_array_type):
from dask.array import ma as dask_ma
filled_data = dask_ma.filled(cube_data, get_fill_value(cube.dtype))
elif isinstance(cube_data, np.ma.MaskedArray):
filled_data = np.ma.filled(cube_data, get_fill_value(cube.dtype))
else:
filled_data = cube_data
dataarray = DataArray(
filled_data, coords=coords, name=name, attrs=array_attrs, dims=dims
)
decoded_ds = decode_cf(dataarray._to_temp_dataset())
return dataarray._from_temp_dataset(decoded_ds)
|
from typing import List
import voluptuous as vol
from homeassistant.components.device_automation import toggle_entity
from homeassistant.components.light import (
ATTR_FLASH,
FLASH_SHORT,
SUPPORT_FLASH,
VALID_BRIGHTNESS_PCT,
VALID_FLASH,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
CONF_DOMAIN,
CONF_TYPE,
SERVICE_TURN_ON,
)
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import config_validation as cv, entity_registry
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import ATTR_BRIGHTNESS_PCT, ATTR_BRIGHTNESS_STEP_PCT, DOMAIN, SUPPORT_BRIGHTNESS
TYPE_BRIGHTNESS_INCREASE = "brightness_increase"
TYPE_BRIGHTNESS_DECREASE = "brightness_decrease"
TYPE_FLASH = "flash"
ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_id,
vol.Required(CONF_DOMAIN): DOMAIN,
vol.Required(CONF_TYPE): vol.In(
toggle_entity.DEVICE_ACTION_TYPES
+ [TYPE_BRIGHTNESS_INCREASE, TYPE_BRIGHTNESS_DECREASE, TYPE_FLASH]
),
vol.Optional(ATTR_BRIGHTNESS_PCT): VALID_BRIGHTNESS_PCT,
vol.Optional(ATTR_FLASH): VALID_FLASH,
}
)
async def async_call_action_from_config(
hass: HomeAssistant,
config: ConfigType,
variables: TemplateVarsType,
context: Context,
) -> None:
"""Change state based on configuration."""
if (
config[CONF_TYPE] in toggle_entity.DEVICE_ACTION_TYPES
and config[CONF_TYPE] != toggle_entity.CONF_TURN_ON
):
await toggle_entity.async_call_action_from_config(
hass, config, variables, context, DOMAIN
)
return
data = {ATTR_ENTITY_ID: config[ATTR_ENTITY_ID]}
if config[CONF_TYPE] == TYPE_BRIGHTNESS_INCREASE:
data[ATTR_BRIGHTNESS_STEP_PCT] = 10
elif config[CONF_TYPE] == TYPE_BRIGHTNESS_DECREASE:
data[ATTR_BRIGHTNESS_STEP_PCT] = -10
elif ATTR_BRIGHTNESS_PCT in config:
data[ATTR_BRIGHTNESS_PCT] = config[ATTR_BRIGHTNESS_PCT]
if config[CONF_TYPE] == TYPE_FLASH:
if ATTR_FLASH in config:
data[ATTR_FLASH] = config[ATTR_FLASH]
else:
data[ATTR_FLASH] = FLASH_SHORT
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, data, blocking=True, context=context
)
async def async_get_actions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device actions."""
actions = await toggle_entity.async_get_actions(hass, device_id, DOMAIN)
registry = await entity_registry.async_get_registry(hass)
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
state = hass.states.get(entry.entity_id)
if state:
supported_features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
else:
supported_features = entry.supported_features
if supported_features & SUPPORT_BRIGHTNESS:
actions.extend(
(
{
CONF_TYPE: TYPE_BRIGHTNESS_INCREASE,
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
},
{
CONF_TYPE: TYPE_BRIGHTNESS_DECREASE,
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
},
)
)
if supported_features & SUPPORT_FLASH:
actions.extend(
(
{
CONF_TYPE: TYPE_FLASH,
"device_id": device_id,
"entity_id": entry.entity_id,
"domain": DOMAIN,
},
)
)
return actions
async def async_get_action_capabilities(hass: HomeAssistant, config: dict) -> dict:
"""List action capabilities."""
if config[CONF_TYPE] != toggle_entity.CONF_TURN_ON:
return {}
registry = await entity_registry.async_get_registry(hass)
entry = registry.async_get(config[ATTR_ENTITY_ID])
state = hass.states.get(config[ATTR_ENTITY_ID])
supported_features = 0
if state:
supported_features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
elif entry:
supported_features = entry.supported_features
extra_fields = {}
if supported_features & SUPPORT_BRIGHTNESS:
extra_fields[vol.Optional(ATTR_BRIGHTNESS_PCT)] = VALID_BRIGHTNESS_PCT
if supported_features & SUPPORT_FLASH:
extra_fields[vol.Optional(ATTR_FLASH)] = VALID_FLASH
return {"extra_fields": vol.Schema(extra_fields)} if extra_fields else {}
|
import speech_recognition as sr
from kalliope.core import Utils
from kalliope.stt.Utils import SpeechRecognition
class Wit(SpeechRecognition):
def __init__(self, callback=None, **kwargs):
"""
Start recording the microphone and analyse audio with Wit.ai api
:param callback: The callback function to call to send the text
:param kwargs:
"""
# give the audio file path to process directly to the mother class if exist
SpeechRecognition.__init__(self, kwargs.get('audio_file_path', None))
# callback function to call after the translation speech/tex
self.main_controller_callback = callback
self.key = kwargs.get('key', None)
self.show_all = kwargs.get('show_all', False)
# start listening in the background
self.set_callback(self.wit_callback)
# start processing, record a sample from the microphone if no audio file path provided, else read the file
self.start_processing()
def wit_callback(self, recognizer, audio):
try:
captured_audio = recognizer.recognize_wit(audio,
key=self.key,
show_all=self.show_all)
Utils.print_success("Wit.ai Speech Recognition thinks you said %s" % captured_audio)
self._analyse_audio(captured_audio)
except sr.UnknownValueError:
Utils.print_warning("Wit.ai Speech Recognition could not understand audio")
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except sr.RequestError as e:
Utils.print_danger("Could not request results from Wit.ai Speech Recognition service; {0}".format(e))
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except AssertionError:
Utils.print_warning("No audio caught from microphone")
self._analyse_audio(audio_to_text=None)
def _analyse_audio(self, audio_to_text):
"""
Confirm the audio exists and run it in a Callback
:param audio_to_text: the captured audio
"""
if self.main_controller_callback is not None:
self.main_controller_callback(audio_to_text)
|
import pyvera as pv
from homeassistant.const import STATE_LOCKED, STATE_UNLOCKED
from homeassistant.core import HomeAssistant
from .common import ComponentFactory, new_simple_controller_config
from tests.async_mock import MagicMock
async def test_lock(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_device = MagicMock(spec=pv.VeraLock) # type: pv.VeraLock
vera_device.device_id = 1
vera_device.vera_device_id = vera_device.device_id
vera_device.name = "dev1"
vera_device.category = pv.CATEGORY_LOCK
vera_device.is_locked = MagicMock(return_value=False)
entity_id = "lock.dev1_1"
component_data = await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(devices=(vera_device,)),
)
update_callback = component_data.controller_data[0].update_callback
assert hass.states.get(entity_id).state == STATE_UNLOCKED
await hass.services.async_call(
"lock",
"lock",
{"entity_id": entity_id},
)
await hass.async_block_till_done()
vera_device.lock.assert_called()
vera_device.is_locked.return_value = True
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_LOCKED
await hass.services.async_call(
"lock",
"unlock",
{"entity_id": entity_id},
)
await hass.async_block_till_done()
vera_device.unlock.assert_called()
vera_device.is_locked.return_value = False
update_callback(vera_device)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_UNLOCKED
|
import errno
import os
import sys
import unittest
from itertools import dropwhile
from urwid.listbox import ListBox
from urwid.decoration import BoxAdapter
from urwid import vterm
from urwid import signals
from urwid.compat import B
class DummyCommand(object):
QUITSTRING = B('|||quit|||')
def __init__(self):
self.reader, self.writer = os.pipe()
def __call__(self):
# reset
stdout = getattr(sys.stdout, 'buffer', sys.stdout)
stdout.write(B('\x1bc'))
while True:
data = self.read(1024)
if self.QUITSTRING == data:
break
stdout.write(data)
stdout.flush()
def read(self, size):
while True:
try:
return os.read(self.reader, size)
except OSError as e:
if e.errno != errno.EINTR:
raise
def write(self, data):
os.write(self.writer, data)
def quit(self):
self.write(self.QUITSTRING)
class TermTest(unittest.TestCase):
def setUp(self):
self.command = DummyCommand()
self.term = vterm.Terminal(self.command)
self.resize(80, 24)
def tearDown(self):
self.command.quit()
def connect_signal(self, signal):
self._sig_response = None
def _set_signal_response(widget, *args, **kwargs):
self._sig_response = (args, kwargs)
self._set_signal_response = _set_signal_response
signals.connect_signal(self.term, signal, self._set_signal_response)
def expect_signal(self, *args, **kwargs):
self.assertEqual(self._sig_response, (args, kwargs))
def disconnect_signal(self, signal):
signals.disconnect_signal(self.term, signal, self._set_signal_response)
def caught_beep(self, obj):
self.beeped = True
def resize(self, width, height, soft=False):
self.termsize = (width, height)
if not soft:
self.term.render(self.termsize, focus=False)
def write(self, data):
data = B(data)
self.command.write(data.replace(B('\e'), B('\x1b')))
def flush(self):
self.write(chr(0x7f))
def read(self, raw=False, focus=False):
self.term.wait_and_feed()
rendered = self.term.render(self.termsize, focus=focus)
if raw:
is_empty = lambda c: c == (None, None, B(' '))
content = list(rendered.content())
lines = [list(dropwhile(is_empty, reversed(line)))
for line in content]
return [list(reversed(line)) for line in lines if len(line)]
else:
content = rendered.text
lines = [line.rstrip() for line in content]
return B('\n').join(lines).rstrip()
def expect(self, what, desc=None, raw=False, focus=False):
if not isinstance(what, list):
what = B(what)
got = self.read(raw=raw, focus=focus)
if desc is None:
desc = ''
else:
desc += '\n'
desc += 'Expected:\n%r\nGot:\n%r' % (what, got)
self.assertEqual(got, what, desc)
def test_simplestring(self):
self.write('hello world')
self.expect('hello world')
def test_linefeed(self):
self.write('hello\x0aworld')
self.expect('hello\nworld')
def test_linefeed2(self):
self.write('aa\b\b\eDbb')
self.expect('aa\nbb')
def test_carriage_return(self):
self.write('hello\x0dworld')
self.expect('world')
def test_insertlines(self):
self.write('\e[0;0flast\e[0;0f\e[10L\e[0;0ffirst\nsecond\n\e[11D')
self.expect('first\nsecond\n\n\n\n\n\n\n\n\nlast')
def test_deletelines(self):
self.write('1\n2\n3\n4\e[2;1f\e[2M')
self.expect('1\n4')
def test_nul(self):
self.write('a\0b')
self.expect('ab')
def test_movement(self):
self.write('\e[10;20H11\e[10;0f\e[20C\e[K')
self.expect('\n' * 9 + ' ' * 19 + '1')
self.write('\e[A\e[B\e[C\e[D\b\e[K')
self.expect('')
self.write('\e[50A2')
self.expect(' ' * 19 + '2')
self.write('\b\e[K\e[50B3')
self.expect('\n' * 23 + ' ' * 19 + '3')
self.write('\b\e[K' + '\eM' * 30 + '\e[100C4')
self.expect(' ' * 79 + '4')
self.write('\e[100D\e[K5')
self.expect('5')
def edgewall(self):
edgewall = '1-\e[1;%(x)df-2\e[%(y)d;1f3-\e[%(y)d;%(x)df-4\x0d'
self.write(edgewall % {'x': self.termsize[0] - 1,
'y': self.termsize[1] - 1})
def test_horizontal_resize(self):
self.resize(80, 24)
self.edgewall()
self.expect('1-' + ' ' * 76 + '-2' + '\n' * 22
+ '3-' + ' ' * 76 + '-4')
self.resize(78, 24, soft=True)
self.flush()
self.expect('1-' + '\n' * 22 + '3-')
self.resize(80, 24, soft=True)
self.flush()
self.expect('1-' + '\n' * 22 + '3-')
def test_vertical_resize(self):
self.resize(80, 24)
self.edgewall()
self.expect('1-' + ' ' * 76 + '-2' + '\n' * 22
+ '3-' + ' ' * 76 + '-4')
for y in range(23, 1, -1):
self.resize(80, y, soft=True)
self.write('\e[%df\e[J3-\e[%d;%df-4' % (y, y, 79))
desc = "try to rescale to 80x%d." % y
self.expect('\n' * (y - 2) + '3-' + ' ' * 76 + '-4', desc)
self.resize(80, 24, soft=True)
self.flush()
self.expect('1-' + ' ' * 76 + '-2' + '\n' * 22
+ '3-' + ' ' * 76 + '-4')
def write_movements(self, arg):
fmt = 'XXX\n\e[faaa\e[Bccc\e[Addd\e[Bfff\e[Cbbb\e[A\e[Deee'
self.write(fmt.replace('\e[', '\e['+arg))
def test_defargs(self):
self.write_movements('')
self.expect('aaa ddd eee\n ccc fff bbb')
def test_nullargs(self):
self.write_movements('0')
self.expect('aaa ddd eee\n ccc fff bbb')
def test_erase_line(self):
self.write('1234567890\e[5D\e[K\n1234567890\e[5D\e[1K\naaaaaaaaaaaaaaa\e[2Ka')
self.expect('12345\n 7890\n a')
def test_erase_display(self):
self.write('1234567890\e[5D\e[Ja')
self.expect('12345a')
self.write('98765\e[8D\e[1Jx')
self.expect(' x5a98765')
def test_scrolling_region_simple(self):
self.write('\e[10;20r\e[10f1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\e[faa')
self.expect('aa' + '\n' * 9 + '2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12')
def test_scrolling_region_reverse(self):
self.write('\e[2J\e[1;2r\e[5Baaa\r\eM\eM\eMbbb\nXXX')
self.expect('\n\nbbb\nXXX\n\naaa')
def test_scrolling_region_move(self):
self.write('\e[10;20r\e[2J\e[10Bfoo\rbar\rblah\rmooh\r\e[10Aone\r\eM\eMtwo\r\eM\eMthree\r\eM\eMa')
self.expect('ahree\n\n\n\n\n\n\n\n\n\nmooh')
def test_scrolling_twice(self):
self.write('\e[?6h\e[10;20r\e[2;5rtest')
self.expect('\ntest')
def test_cursor_scrolling_region(self):
self.write('\e[?6h\e[10;20r\e[10f1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\e[faa')
self.expect('\n' * 9 + 'aa\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12')
def test_scrolling_region_simple_with_focus(self):
self.write('\e[10;20r\e[10f1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\e[faa')
self.expect('aa' + '\n' * 9 + '2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12', focus=True)
def test_scrolling_region_reverse_with_focus(self):
self.write('\e[2J\e[1;2r\e[5Baaa\r\eM\eM\eMbbb\nXXX')
self.expect('\n\nbbb\nXXX\n\naaa', focus=True)
def test_scrolling_region_move_with_focus(self):
self.write('\e[10;20r\e[2J\e[10Bfoo\rbar\rblah\rmooh\r\e[10Aone\r\eM\eMtwo\r\eM\eMthree\r\eM\eMa')
self.expect('ahree\n\n\n\n\n\n\n\n\n\nmooh', focus=True)
def test_scrolling_twice_with_focus(self):
self.write('\e[?6h\e[10;20r\e[2;5rtest')
self.expect('\ntest', focus=True)
def test_cursor_scrolling_region_with_focus(self):
self.write('\e[?6h\e[10;20r\e[10f1\n2\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12\e[faa')
self.expect('\n' * 9 + 'aa\n3\n4\n5\n6\n7\n8\n9\n10\n11\n12', focus=True)
def test_relative_region_jump(self):
self.write('\e[21H---\e[10;20r\e[?6h\e[18Htest')
self.expect('\n' * 19 + 'test\n---')
def test_set_multiple_modes(self):
self.write('\e[?6;5htest')
self.expect('test')
self.assertTrue(self.term.term_modes.constrain_scrolling)
self.assertTrue(self.term.term_modes.reverse_video)
self.write('\e[?6;5l')
self.expect('test')
self.assertFalse(self.term.term_modes.constrain_scrolling)
self.assertFalse(self.term.term_modes.reverse_video)
def test_wrap_simple(self):
self.write('\e[?7h\e[1;%dHtt' % self.term.width)
self.expect(' ' * (self.term.width - 1) + 't\nt')
def test_wrap_backspace_tab(self):
self.write('\e[?7h\e[1;%dHt\b\b\t\ta' % self.term.width)
self.expect(' ' * (self.term.width - 1) + 'a')
def test_cursor_visibility(self):
self.write('\e[?25linvisible')
self.expect('invisible', focus=True)
self.assertEqual(self.term.term.cursor, None)
self.write('\rvisible\e[?25h\e[K')
self.expect('visible', focus=True)
self.assertNotEqual(self.term.term.cursor, None)
def test_get_utf8_len(self):
length = self.term.term.get_utf8_len(int("11110000", 2))
self.assertEqual(length, 3)
length = self.term.term.get_utf8_len(int("11000000", 2))
self.assertEqual(length, 1)
length = self.term.term.get_utf8_len(int("11111101", 2))
self.assertEqual(length, 5)
def test_encoding_unicode(self):
vterm.util._target_encoding = 'utf-8'
self.write('\e%G\xe2\x80\x94')
self.expect('\xe2\x80\x94')
def test_encoding_unicode_ascii(self):
vterm.util._target_encoding = 'ascii'
self.write('\e%G\xe2\x80\x94')
self.expect('?')
def test_encoding_wrong_unicode(self):
vterm.util._target_encoding = 'utf-8'
self.write('\e%G\xc0\x99')
self.expect('')
def test_encoding_vt100_graphics(self):
vterm.util._target_encoding = 'ascii'
self.write('\e)0\e(0\x0fg\x0eg\e)Bn\e)0g\e)B\e(B\x0fn')
self.expect([[
(None, '0', B('g')), (None, '0', B('g')),
(None, None, B('n')), (None, '0', B('g')),
(None, None, B('n'))
]], raw=True)
def test_ibmpc_mapping(self):
vterm.util._target_encoding = 'ascii'
self.write('\e[11m\x18\e[10m\x18')
self.expect([[(None, 'U', B('\x18'))]], raw=True)
self.write('\ec\e)U\x0e\x18\x0f\e[3h\x18\e[3l\x18')
self.expect([[(None, None, B('\x18'))]], raw=True)
self.write('\ec\e[11m\xdb\x18\e[10m\xdb')
self.expect([[
(None, 'U', B('\xdb')), (None, 'U', B('\x18')),
(None, None, B('\xdb'))
]], raw=True)
def test_set_title(self):
self._the_title = None
def _change_title(widget, title):
self._the_title = title
self.connect_signal('title')
self.write('\e]666parsed right?\e\\te\e]0;test title\007st1')
self.expect('test1')
self.expect_signal(B('test title'))
self.write('\e]3;stupid title\e\\\e[0G\e[2Ktest2')
self.expect('test2')
self.expect_signal(B('stupid title'))
self.disconnect_signal('title')
def test_set_leds(self):
self.connect_signal('leds')
self.write('\e[0qtest1')
self.expect('test1')
self.expect_signal('clear')
self.write('\e[3q\e[H\e[Ktest2')
self.expect('test2')
self.expect_signal('caps_lock')
self.disconnect_signal('leds')
def test_in_listbox(self):
listbox = ListBox([BoxAdapter(self.term, 80)])
rendered = listbox.render((80, 24))
|
import logging
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'ping'
BENCHMARK_CONFIG = """
ping:
description: Benchmarks ping latency over internal IP addresses
vm_groups:
vm_1:
vm_spec: *default_single_core
vm_2:
vm_spec: *default_single_core
"""
METRICS = ('Min Latency', 'Average Latency', 'Max Latency', 'Latency Std Dev')
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec): # pylint: disable=unused-argument
"""Install ping on the target vm.
Checks that there are exactly two vms specified.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
if len(benchmark_spec.vms) != 2:
raise ValueError(
'Ping benchmark requires exactly two machines, found {0}'
.format(len(benchmark_spec.vms)))
if vm_util.ShouldRunOnExternalIpAddress():
vms = benchmark_spec.vms
for vm in vms:
vm.AllowIcmp()
def Run(benchmark_spec):
"""Run ping on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
results = []
for sending_vm, receiving_vm in vms, reversed(vms):
if vm_util.ShouldRunOnExternalIpAddress():
ip_type = vm_util.IpAddressMetadata.EXTERNAL
results = results + _RunPing(sending_vm,
receiving_vm,
receiving_vm.ip_address,
ip_type)
if vm_util.ShouldRunOnInternalIpAddress(sending_vm, receiving_vm):
ip_type = vm_util.IpAddressMetadata.INTERNAL
results = results + _RunPing(sending_vm,
receiving_vm,
receiving_vm.internal_ip,
ip_type)
return results
def _RunPing(sending_vm, receiving_vm, receiving_ip, ip_type):
"""Run ping using 'sending_vm' to connect to 'receiving_ip'.
Args:
sending_vm: The VM issuing the ping request.
receiving_vm: The VM receiving the ping. Needed for metadata.
receiving_ip: The IP address to be pinged.
ip_type: The type of 'receiving_ip',
(either 'vm_util.IpAddressSubset.INTERNAL
or vm_util.IpAddressSubset.EXTERNAL')
Returns:
A list of samples, with one sample for each metric.
"""
if (ip_type == vm_util.IpAddressMetadata.INTERNAL and
not sending_vm.IsReachable(receiving_vm)):
logging.warn('%s is not reachable from %s', receiving_vm, sending_vm)
return []
logging.info('Ping results (ip_type = %s):', ip_type)
ping_cmd = 'ping -c 100 %s' % receiving_ip
stdout, _ = sending_vm.RemoteCommand(ping_cmd, should_log=True)
stats = re.findall('([0-9]*\\.[0-9]*)', stdout.splitlines()[-1])
assert len(stats) == len(METRICS), stats
results = []
metadata = {'ip_type': ip_type,
'receiving_zone': receiving_vm.zone,
'sending_zone': sending_vm.zone}
for i, metric in enumerate(METRICS):
results.append(sample.Sample(metric, float(stats[i]), 'ms', metadata))
return results
def Cleanup(benchmark_spec): # pylint: disable=unused-argument
"""Cleanup ping on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
|
import logging
from rocketchat_API.APIExceptions.RocketExceptions import (
RocketAuthenticationException,
RocketConnectionException,
)
from rocketchat_API.rocketchat import RocketChat
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import (
CONF_PASSWORD,
CONF_ROOM,
CONF_URL,
CONF_USERNAME,
HTTP_OK,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
# pylint: disable=no-value-for-parameter
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): vol.Url(),
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_ROOM): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Return the notify service."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
url = config.get(CONF_URL)
room = config.get(CONF_ROOM)
try:
return RocketChatNotificationService(url, username, password, room)
except RocketConnectionException:
_LOGGER.warning("Unable to connect to Rocket.Chat server at %s", url)
except RocketAuthenticationException:
_LOGGER.warning("Rocket.Chat authentication failed for user %s", username)
_LOGGER.info("Please check your username/password")
return None
class RocketChatNotificationService(BaseNotificationService):
"""Implement the notification service for Rocket.Chat."""
def __init__(self, url, username, password, room):
"""Initialize the service."""
self._room = room
self._server = RocketChat(username, password, server_url=url)
def send_message(self, message="", **kwargs):
"""Send a message to Rocket.Chat."""
data = kwargs.get(ATTR_DATA) or {}
resp = self._server.chat_post_message(message, channel=self._room, **data)
if resp.status_code == HTTP_OK:
success = resp.json()["success"]
if not success:
_LOGGER.error("Unable to post Rocket.Chat message")
else:
_LOGGER.error(
"Incorrect status code when posting message: %d", resp.status_code
)
|
from __future__ import absolute_import
import unittest
import tempfile, gzip, os, os.path, gc, shutil
from .common_imports import (
etree, ElementTree, _str, _bytes,
SillyFileLike, LargeFileLike, HelperTestCase,
read_file, write_to_file, BytesIO, tmpfile
)
class _IOTestCaseBase(HelperTestCase):
"""(c)ElementTree compatibility for IO functions/methods
"""
etree = None
def setUp(self):
"""Setting up a minimal tree
"""
self.root = self.etree.Element('a')
self.root_str = self.etree.tostring(self.root)
self.tree = self.etree.ElementTree(self.root)
self._temp_dir = tempfile.mkdtemp()
def tearDown(self):
gc.collect()
shutil.rmtree(self._temp_dir)
def getTestFilePath(self, name):
return os.path.join(self._temp_dir, name)
def buildNodes(self, element, children, depth):
Element = self.etree.Element
if depth == 0:
return
for i in range(children):
new_element = Element('element_%s_%s' % (depth, i))
self.buildNodes(new_element, children, depth - 1)
element.append(new_element)
def test_tree_io(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
element = Element('top')
element.text = _str("qwrtioüöä\uAABB")
tree = ElementTree(element)
self.buildNodes(element, 10, 3)
with open(self.getTestFilePath('testdump.xml'), 'wb') as f:
tree.write(f, encoding='UTF-8')
with open(self.getTestFilePath('testdump.xml'), 'rb') as f:
tree = ElementTree(file=f)
with open(self.getTestFilePath('testdump2.xml'), 'wb') as f:
tree.write(f, encoding='UTF-8')
with open(self.getTestFilePath('testdump.xml'), 'rb') as f:
data1 = f.read()
with open(self.getTestFilePath('testdump2.xml'), 'rb') as f:
data2 = f.read()
self.assertEqual(data1, data2)
def test_tree_io_latin1(self):
Element = self.etree.Element
ElementTree = self.etree.ElementTree
element = Element('top')
element.text = _str("qwrtioüöäßá")
tree = ElementTree(element)
self.buildNodes(element, 10, 3)
with open(self.getTestFilePath('testdump.xml'), 'wb') as f:
tree.write(f, encoding='iso-8859-1')
with open(self.getTestFilePath('testdump.xml'), 'rb') as f:
tree = ElementTree(file=f)
with open(self.getTestFilePath('testdump2.xml'), 'wb') as f:
tree.write(f, encoding='iso-8859-1')
with open(self.getTestFilePath('testdump.xml'), 'rb') as f:
data1 = f.read()
with open(self.getTestFilePath('testdump2.xml'), 'rb') as f:
data2 = f.read()
self.assertEqual(data1, data2)
def test_write_filename(self):
# (c)ElementTree supports filename strings as write argument
with tmpfile(prefix="p", suffix=".xml") as filename:
self.tree.write(filename)
self.assertEqual(read_file(filename, 'rb').replace(b'\n', b''),
self.root_str)
def test_write_filename_special_percent(self):
# '%20' is a URL escaped space character.
before_test = os.listdir(tempfile.gettempdir())
def difference(filenames):
return sorted(
fn for fn in set(filenames).difference(before_test)
if fn.startswith('lxmltmp-')
)
with tmpfile(prefix="lxmltmp-p%20p", suffix=".xml") as filename:
try:
before_write = os.listdir(tempfile.gettempdir())
self.tree.write(filename)
after_write = os.listdir(tempfile.gettempdir())
self.assertEqual(read_file(filename, 'rb').replace(b'\n', b''),
self.root_str)
except (AssertionError, IOError, OSError):
print("Before write: %s, after write: %s" % (
difference(before_write), difference(after_write))
)
raise
def test_write_filename_special_plus(self):
# '+' is used as an escaped space character in URLs.
with tmpfile(prefix="p+", suffix=".xml") as filename:
self.tree.write(filename)
self.assertEqual(read_file(filename, 'rb').replace(b'\n', b''),
self.root_str)
def test_write_invalid_filename(self):
filename = os.path.join(
os.path.join('hopefullynonexistingpathname'),
'invalid_file.xml')
try:
self.tree.write(filename)
except IOError:
pass
else:
self.assertTrue(
False, "writing to an invalid file path should fail")
def test_module_parse_gzipobject(self):
# (c)ElementTree supports gzip instance as parse argument
with tmpfile(suffix=".xml.gz") as filename:
with gzip.open(filename, 'wb') as f:
f.write(self.root_str)
with gzip.open(filename, 'rb') as f_gz:
tree = self.etree.parse(f_gz)
self.assertEqual(self.etree.tostring(tree.getroot()), self.root_str)
def test_class_parse_filename(self):
# (c)ElementTree class ElementTree has a 'parse' method that returns
# the root of the tree
# parse from filename
with tmpfile(suffix=".xml") as filename:
write_to_file(filename, self.root_str, 'wb')
tree = self.etree.ElementTree()
root = tree.parse(filename)
self.assertEqual(self.etree.tostring(root), self.root_str)
def test_class_parse_filename_remove_previous(self):
with tmpfile(suffix=".xml") as filename:
write_to_file(filename, self.root_str, 'wb')
tree = self.etree.ElementTree()
root = tree.parse(filename)
# and now do it again; previous content should still be there
root2 = tree.parse(filename)
self.assertEqual('a', root.tag)
self.assertEqual('a', root2.tag)
# now remove all references to root2, and parse again
del root2
root3 = tree.parse(filename)
self.assertEqual('a', root.tag)
self.assertEqual('a', root3.tag)
# root2's memory should've been freed here
# XXX how to check?
def test_class_parse_fileobject(self):
# (c)ElementTree class ElementTree has a 'parse' method that returns
# the root of the tree
# parse from file object
handle, filename = tempfile.mkstemp(suffix=".xml")
try:
os.write(handle, self.root_str)
with open(filename, 'rb') as f:
tree = self.etree.ElementTree()
root = tree.parse(f)
self.assertEqual(self.etree.tostring(root), self.root_str)
finally:
os.close(handle)
os.remove(filename)
def test_class_parse_unamed_fileobject(self):
# (c)ElementTree class ElementTree has a 'parse' method that returns
# the root of the tree
# parse from unnamed file object
f = SillyFileLike()
root = self.etree.ElementTree().parse(f)
self.assertTrue(root.tag.endswith('foo'))
def test_module_parse_large_fileobject(self):
# parse from unnamed file object
f = LargeFileLike()
tree = self.etree.parse(f)
root = tree.getroot()
self.assertTrue(root.tag.endswith('root'))
def test_module_parse_fileobject_error(self):
class LocalError(Exception):
pass
class TestFile:
def read(*args):
raise LocalError
f = TestFile()
self.assertRaises(LocalError, self.etree.parse, f)
def test_module_parse_fileobject_late_error(self):
class LocalError(Exception):
pass
class TestFile:
data = '<root>test</'
try:
next_char = iter(data).next
except AttributeError:
# Python 3
next_char = iter(data).__next__
counter = 0
def read(self, amount=None):
if amount is None:
while True:
self.read(1)
else:
try:
self.counter += 1
return _bytes(self.next_char())
except StopIteration:
raise LocalError
f = TestFile()
self.assertRaises(LocalError, self.etree.parse, f)
self.assertEqual(f.counter, len(f.data)+1)
def test_module_parse_fileobject_type_error(self):
class TestFile:
def read(*args):
return 1
f = TestFile()
try:
expect_exc = (TypeError, self.etree.ParseError)
except AttributeError:
expect_exc = TypeError
self.assertRaises(expect_exc, self.etree.parse, f)
def test_etree_parse_io_error(self):
# this is a directory name that contains characters beyond latin-1
dirnameEN = _str('Directory')
dirnameRU = _str('Каталог')
filename = _str('nosuchfile.xml')
dn = tempfile.mkdtemp(prefix=dirnameEN)
try:
self.assertRaises(IOError, self.etree.parse, os.path.join(dn, filename))
finally:
os.rmdir(dn)
dn = tempfile.mkdtemp(prefix=dirnameRU)
try:
self.assertRaises(IOError, self.etree.parse, os.path.join(dn, filename))
finally:
os.rmdir(dn)
def test_parse_utf8_bom(self):
utext = _str('Søk på nettet')
uxml = '<?xml version="1.0" encoding="UTF-8"?><p>%s</p>' % utext
bom = _bytes('\\xEF\\xBB\\xBF').decode(
"unicode_escape").encode("latin1")
self.assertEqual(3, len(bom))
f = tempfile.NamedTemporaryFile(delete=False)
try:
try:
f.write(bom)
f.write(uxml.encode("utf-8"))
finally:
f.close()
tree = self.etree.parse(f.name)
finally:
os.unlink(f.name)
self.assertEqual(utext, tree.getroot().text)
def test_iterparse_utf8_bom(self):
utext = _str('Søk på nettet')
uxml = '<?xml version="1.0" encoding="UTF-8"?><p>%s</p>' % utext
bom = _bytes('\\xEF\\xBB\\xBF').decode(
"unicode_escape").encode("latin1")
self.assertEqual(3, len(bom))
f = tempfile.NamedTemporaryFile(delete=False)
try:
try:
f.write(bom)
f.write(uxml.encode("utf-8"))
finally:
f.close()
elements = [el for _, el in self.etree.iterparse(f.name)]
self.assertEqual(1, len(elements))
root = elements[0]
finally:
os.unlink(f.name)
self.assertEqual(utext, root.text)
def test_iterparse_utf16_bom(self):
utext = _str('Søk på nettet')
uxml = '<?xml version="1.0" encoding="UTF-16"?><p>%s</p>' % utext
boms = _bytes('\\xFE\\xFF \\xFF\\xFE').decode(
"unicode_escape").encode("latin1")
self.assertEqual(5, len(boms))
xml = uxml.encode("utf-16")
self.assertTrue(xml[:2] in boms, repr(xml[:2]))
f = tempfile.NamedTemporaryFile(delete=False)
try:
try:
f.write(xml)
finally:
f.close()
elements = [el for _, el in self.etree.iterparse(f.name)]
self.assertEqual(1, len(elements))
root = elements[0]
finally:
os.unlink(f.name)
self.assertEqual(utext, root.text)
class ETreeIOTestCase(_IOTestCaseBase):
etree = etree
def test_write_compressed_text(self):
Element = self.etree.Element
SubElement = self.etree.SubElement
ElementTree = self.etree.ElementTree
text = _str("qwrtioüöä")
root = Element('root')
root.text = text
child = SubElement(root, 'sub')
child.text = 'TEXT'
child.tail = 'TAIL'
SubElement(root, 'sub').text = text
tree = ElementTree(root)
out = BytesIO()
tree.write(out, method='text', encoding='utf8', compression=9)
out.seek(0)
f = gzip.GzipFile(fileobj=out)
try:
result = f.read().decode('utf8')
finally:
f.close()
self.assertEqual(text+'TEXTTAIL'+text, result)
if ElementTree:
class ElementTreeIOTestCase(_IOTestCaseBase):
etree = ElementTree
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ETreeIOTestCase)])
if ElementTree:
suite.addTests([unittest.makeSuite(ElementTreeIOTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
from django.test import TestCase
from translate.storage.placeables.strelem import StringElem
from translate.storage.xliff import xlifffile
from weblate.trans.tests.utils import get_test_file
from weblate.trans.util import rich_to_xliff_string, xliff_string_to_rich
TEST_X = get_test_file("placeholder-x.xliff")
TEST_MRK = get_test_file("placeholder-mrk.xliff")
class XliffPlaceholdersTest(TestCase):
def test_bidirectional_xliff_string(self):
cases = [
'foo <x id="INTERPOLATION" equiv-text="{{ angular }}"/> bar',
"",
"hello world",
"hello <p>world</p>",
]
for string in cases:
rich = xliff_string_to_rich(string)
self.assertTrue(isinstance(rich, list))
self.assertTrue(isinstance(rich[0], StringElem))
final_string = rich_to_xliff_string(rich)
self.assertEqual(string, final_string)
def test_xliff_roundtrip(self):
with open(TEST_X, "rb") as handle:
source = handle.read()
store = xlifffile.parsestring(source)
string = rich_to_xliff_string(store.units[0].rich_source)
self.assertEqual(
'T: <x id="INTERPOLATION" equiv-text="{{ angular }}"/>', string
)
store.units[0].rich_source = xliff_string_to_rich(string)
self.assertEqual(source, bytes(store))
def test_xliff_roundtrip_unknown(self):
with open(TEST_MRK, "rb") as handle:
source = handle.read()
store = xlifffile.parsestring(source)
string = rich_to_xliff_string(store.units[0].rich_source)
self.assertEqual('T: <mrk mtype="protected">%s</mrk>', string)
store.units[0].rich_source = xliff_string_to_rich(string)
self.assertEqual(source, bytes(store))
|
from unittest import TestCase
import pandas as pd
import numpy as np
from scattertext.termscoring.BM25Difference import BM25Difference
from scattertext.test.test_termDocMatrixFactory import build_hamlet_jz_corpus
class TestBM25Difference(TestCase):
@classmethod
def setUpClass(cls):
cls.corpus = build_hamlet_jz_corpus()
def test_get_scores(self):
result = BM25Difference(self.corpus).set_categories('hamlet').get_scores()
self.assertEquals(type(result), pd.Series)
np.testing.assert_array_equal(np.array(result.index), self.corpus.get_terms())
def test_get_name(self):
self.assertEquals(BM25Difference(self.corpus).set_categories('hamlet').get_name(), 'BM25 difference')
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
SERVICE_ALARM_TRIGGER,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
}
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
service_data = {ATTR_ENTITY_ID: state.entity_id}
if state.state == STATE_ALARM_ARMED_AWAY:
service = SERVICE_ALARM_ARM_AWAY
elif state.state == STATE_ALARM_ARMED_CUSTOM_BYPASS:
service = SERVICE_ALARM_ARM_CUSTOM_BYPASS
elif state.state == STATE_ALARM_ARMED_HOME:
service = SERVICE_ALARM_ARM_HOME
elif state.state == STATE_ALARM_ARMED_NIGHT:
service = SERVICE_ALARM_ARM_NIGHT
elif state.state == STATE_ALARM_DISARMED:
service = SERVICE_ALARM_DISARM
elif state.state == STATE_ALARM_TRIGGERED:
service = SERVICE_ALARM_TRIGGER
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Alarm control panel states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
from homeassistant.components.cover import (
ATTR_POSITION,
ATTR_TILT_POSITION,
DOMAIN,
CoverEntity,
)
from . import FIBARO_DEVICES, FibaroDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Fibaro covers."""
if discovery_info is None:
return
add_entities(
[FibaroCover(device) for device in hass.data[FIBARO_DEVICES]["cover"]], True
)
class FibaroCover(FibaroDevice, CoverEntity):
"""Representation a Fibaro Cover."""
def __init__(self, fibaro_device):
"""Initialize the Vera device."""
super().__init__(fibaro_device)
self.entity_id = f"{DOMAIN}.{self.ha_id}"
@staticmethod
def bound(position):
"""Normalize the position."""
if position is None:
return None
position = int(position)
if position <= 5:
return 0
if position >= 95:
return 100
return position
@property
def current_cover_position(self):
"""Return current position of cover. 0 is closed, 100 is open."""
return self.bound(self.level)
@property
def current_cover_tilt_position(self):
"""Return the current tilt position for venetian blinds."""
return self.bound(self.level2)
def set_cover_position(self, **kwargs):
"""Move the cover to a specific position."""
self.set_level(kwargs.get(ATTR_POSITION))
def set_cover_tilt_position(self, **kwargs):
"""Move the cover to a specific position."""
self.set_level2(kwargs.get(ATTR_TILT_POSITION))
@property
def is_closed(self):
"""Return if the cover is closed."""
if self.current_cover_position is None:
return None
return self.current_cover_position == 0
def open_cover(self, **kwargs):
"""Open the cover."""
self.action("open")
def close_cover(self, **kwargs):
"""Close the cover."""
self.action("close")
def open_cover_tilt(self, **kwargs):
"""Open the cover tilt."""
self.set_level2(100)
def close_cover_tilt(self, **kwargs):
"""Close the cover."""
self.set_level2(0)
def stop_cover(self, **kwargs):
"""Stop the cover."""
self.action("stop")
|
import logging
from homeassistant.components.media_player import DOMAIN as MP_DOMAIN
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from .const import DISCOVERY_TASK, DOMAIN, PLAYER_DISCOVERY_UNSUB
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Logitech Squeezebox component."""
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Logitech Squeezebox from a config entry."""
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, MP_DOMAIN)
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
# Stop player discovery task for this config entry.
hass.data[DOMAIN][entry.entry_id][PLAYER_DISCOVERY_UNSUB]()
# Remove stored data for this config entry
hass.data[DOMAIN].pop(entry.entry_id)
# Stop server discovery task if this is the last config entry.
current_entries = hass.config_entries.async_entries(DOMAIN)
if len(current_entries) == 1 and current_entries[0] == entry:
_LOGGER.debug("Stopping server discovery task")
hass.data[DOMAIN][DISCOVERY_TASK].cancel()
hass.data[DOMAIN].pop(DISCOVERY_TASK)
return await hass.config_entries.async_forward_entry_unload(entry, MP_DOMAIN)
|
from datetime import datetime
from urllib.parse import urlparse
from django.conf import settings
from django.utils.html import escape
from django.utils.http import url_has_allowed_host_and_scheme
from django.utils.safestring import mark_safe
from django.utils.translation import gettext as _
import weblate
import weblate.screenshots.views
from weblate.configuration.views import CustomCSSView
from weblate.utils.site import get_site_domain, get_site_url
from weblate.wladmin.models import ConfigurationError
WEBLATE_URL = "https://weblate.org/"
DONATE_URL = "https://weblate.org/donate/"
CONTEXT_SETTINGS = [
"SITE_TITLE",
"OFFER_HOSTING",
"ENABLE_AVATARS",
"ENABLE_SHARING",
"MATOMO_SITE_ID",
"MATOMO_URL",
"GOOGLE_ANALYTICS_ID",
"ENABLE_HOOKS",
"REGISTRATION_OPEN",
"STATUS_URL",
"LEGAL_URL",
"FONTS_CDN_URL",
"AVATAR_URL_PREFIX",
"HIDE_VERSION",
# Hosted Weblate integration
"PAYMENT_ENABLED",
]
CONTEXT_APPS = ["billing", "legal", "gitexport"]
def add_error_logging_context(context):
if (
hasattr(settings, "ROLLBAR")
and "client_token" in settings.ROLLBAR
and "environment" in settings.ROLLBAR
):
context["rollbar_token"] = settings.ROLLBAR["client_token"]
context["rollbar_environment"] = settings.ROLLBAR["environment"]
else:
context["rollbar_token"] = None
context["rollbar_environment"] = None
if hasattr(settings, "RAVEN_CONFIG") and "public_dsn" in settings.RAVEN_CONFIG:
context["sentry_dsn"] = settings.RAVEN_CONFIG["public_dsn"]
else:
context["sentry_dsn"] = None
def add_settings_context(context):
for name in CONTEXT_SETTINGS:
context[name.lower()] = getattr(settings, name, None)
def add_optional_context(context):
for name in CONTEXT_APPS:
appname = f"weblate.{name}"
context[f"has_{name}"] = appname in settings.INSTALLED_APPS
def get_preconnect_list():
result = []
if settings.MATOMO_URL:
result.append(urlparse(settings.MATOMO_URL).hostname)
if settings.GOOGLE_ANALYTICS_ID:
result.append("www.google-analytics.com")
return result
def get_bread_image(path):
if path == "/":
return "dashboard.svg"
first = path.split("/", 2)[1]
if first in ("user", "accounts"):
return "account.svg"
if first == "checks":
return "alert.svg"
if first == "languages":
return "language.svg"
if first == "manage":
return "wrench.svg"
if first in ("about", "stats", "keys", "legal"):
return "weblate.svg"
if first in (
"glossaries",
"upload-glossaries",
"delete-glossaries",
"edit-glossaries",
):
return "glossary.svg"
return "project.svg"
def weblate_context(request):
"""Context processor to inject various useful variables into context."""
if url_has_allowed_host_and_scheme(request.GET.get("next", ""), allowed_hosts=None):
login_redirect_url = request.GET["next"]
else:
login_redirect_url = request.get_full_path()
# Load user translations if user is authenticated
watched_projects = None
if hasattr(request, "user") and request.user.is_authenticated:
watched_projects = request.user.watched_projects
if settings.OFFER_HOSTING:
description = _("Hosted Weblate, the place to localize your software project.")
else:
description = _(
"This site runs Weblate for localizing various software projects."
)
context = {
"cache_param": f"?v={weblate.GIT_VERSION}"
if not settings.COMPRESS_ENABLED
else "",
"version": weblate.VERSION,
"bread_image": get_bread_image(request.path),
"description": description,
"weblate_link": mark_safe(
'<a href="{}">weblate.org</a>'.format(escape(WEBLATE_URL))
),
"weblate_name_link": mark_safe(
'<a href="{}">Weblate</a>'.format(escape(WEBLATE_URL))
),
"weblate_version_link": mark_safe(
'<a href="{}">Weblate {}</a>'.format(
escape(WEBLATE_URL), "" if settings.HIDE_VERSION else weblate.VERSION
)
),
"donate_url": DONATE_URL,
"site_url": get_site_url(),
"site_domain": get_site_domain(),
"current_date": datetime.utcnow().strftime("%Y-%m-%d"),
"current_year": datetime.utcnow().strftime("%Y"),
"current_month": datetime.utcnow().strftime("%m"),
"login_redirect_url": login_redirect_url,
"has_ocr": weblate.screenshots.views.HAS_OCR,
"has_antispam": bool(settings.AKISMET_API_KEY),
"has_sentry": bool(settings.SENTRY_DSN),
"watched_projects": watched_projects,
"allow_index": False,
"configuration_errors": ConfigurationError.objects.filter(
ignored=False
).order_by("-timestamp"),
"preconnect_list": get_preconnect_list(),
"custom_css_hash": CustomCSSView.get_hash(request),
}
add_error_logging_context(context)
add_settings_context(context)
add_optional_context(context)
return context
|
import logging
import re
import aiohomekit
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import zeroconf
from homeassistant.core import callback
from homeassistant.helpers.device_registry import (
CONNECTION_NETWORK_MAC,
async_get_registry as async_get_device_registry,
)
from .connection import get_accessory_name, get_bridge_information
from .const import DOMAIN, KNOWN_DEVICES
HOMEKIT_DIR = ".homekit"
HOMEKIT_BRIDGE_DOMAIN = "homekit"
HOMEKIT_BRIDGE_SERIAL_NUMBER = "homekit.bridge"
HOMEKIT_BRIDGE_MODEL = "Home Assistant HomeKit Bridge"
PAIRING_FILE = "pairing.json"
MDNS_SUFFIX = "._hap._tcp.local."
PIN_FORMAT = re.compile(r"^(\d{3})-{0,1}(\d{2})-{0,1}(\d{3})$")
_LOGGER = logging.getLogger(__name__)
DISALLOWED_CODES = {
"00000000",
"11111111",
"22222222",
"33333333",
"44444444",
"55555555",
"66666666",
"77777777",
"88888888",
"99999999",
"12345678",
"87654321",
}
def normalize_hkid(hkid):
"""Normalize a hkid so that it is safe to compare with other normalized hkids."""
return hkid.lower()
@callback
def find_existing_host(hass, serial):
"""Return a set of the configured hosts."""
for entry in hass.config_entries.async_entries(DOMAIN):
if entry.data.get("AccessoryPairingID") == serial:
return entry
def ensure_pin_format(pin):
"""
Ensure a pin code is correctly formatted.
Ensures a pin code is in the format 111-11-111. Handles codes with and without dashes.
If incorrect code is entered, an exception is raised.
"""
match = PIN_FORMAT.search(pin.strip())
if not match:
raise aiohomekit.exceptions.MalformedPinError(f"Invalid PIN code f{pin}")
pin_without_dashes = "".join(match.groups())
if pin_without_dashes in DISALLOWED_CODES:
raise aiohomekit.exceptions.MalformedPinError(f"Invalid PIN code f{pin}")
return "-".join(match.groups())
@config_entries.HANDLERS.register(DOMAIN)
class HomekitControllerFlowHandler(config_entries.ConfigFlow):
"""Handle a HomeKit config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the homekit_controller flow."""
self.model = None
self.hkid = None
self.name = None
self.devices = {}
self.controller = None
self.finish_pairing = None
async def _async_setup_controller(self):
"""Create the controller."""
zeroconf_instance = await zeroconf.async_get_instance(self.hass)
self.controller = aiohomekit.Controller(zeroconf_instance=zeroconf_instance)
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
errors = {}
if user_input is not None:
key = user_input["device"]
self.hkid = self.devices[key].device_id
self.model = self.devices[key].info["md"]
self.name = key[: -len(MDNS_SUFFIX)] if key.endswith(MDNS_SUFFIX) else key
await self.async_set_unique_id(
normalize_hkid(self.hkid), raise_on_progress=False
)
return await self.async_step_pair()
if self.controller is None:
await self._async_setup_controller()
all_hosts = await self.controller.discover_ip()
self.devices = {}
for host in all_hosts:
status_flags = int(host.info["sf"])
paired = not status_flags & 0x01
if paired:
continue
self.devices[host.info["name"]] = host
if not self.devices:
return self.async_abort(reason="no_devices")
return self.async_show_form(
step_id="user",
errors=errors,
data_schema=vol.Schema(
{vol.Required("device"): vol.In(self.devices.keys())}
),
)
async def async_step_unignore(self, user_input):
"""Rediscover a previously ignored discover."""
unique_id = user_input["unique_id"]
await self.async_set_unique_id(unique_id)
if self.controller is None:
await self._async_setup_controller()
devices = await self.controller.discover_ip(max_seconds=5)
for device in devices:
if normalize_hkid(device.device_id) != unique_id:
continue
record = device.info
return await self.async_step_zeroconf(
{
"host": record["address"],
"port": record["port"],
"hostname": record["name"],
"type": "_hap._tcp.local.",
"name": record["name"],
"properties": {
"md": record["md"],
"pv": record["pv"],
"id": unique_id,
"c#": record["c#"],
"s#": record["s#"],
"ff": record["ff"],
"ci": record["ci"],
"sf": record["sf"],
"sh": "",
},
}
)
return self.async_abort(reason="no_devices")
async def _hkid_is_homekit_bridge(self, hkid):
"""Determine if the device is a homekit bridge."""
dev_reg = await async_get_device_registry(self.hass)
device = dev_reg.async_get_device(
identifiers=set(), connections={(CONNECTION_NETWORK_MAC, hkid)}
)
if device is None:
return False
return device.model == HOMEKIT_BRIDGE_MODEL
async def async_step_zeroconf(self, discovery_info):
"""Handle a discovered HomeKit accessory.
This flow is triggered by the discovery component.
"""
# Normalize properties from discovery
# homekit_python has code to do this, but not in a form we can
# easily use, so do the bare minimum ourselves here instead.
properties = {
key.lower(): value for (key, value) in discovery_info["properties"].items()
}
if "id" not in properties:
_LOGGER.warning(
"HomeKit device %s: id not exposed, in violation of spec", properties
)
return self.async_abort(reason="invalid_properties")
# The hkid is a unique random number that looks like a pairing code.
# It changes if a device is factory reset.
hkid = properties["id"]
model = properties["md"]
name = discovery_info["name"].replace("._hap._tcp.local.", "")
status_flags = int(properties["sf"])
paired = not status_flags & 0x01
# The configuration number increases every time the characteristic map
# needs updating. Some devices use a slightly off-spec name so handle
# both cases.
try:
config_num = int(properties["c#"])
except KeyError:
_LOGGER.warning(
"HomeKit device %s: c# not exposed, in violation of spec", hkid
)
config_num = None
# If the device is already paired and known to us we should monitor c#
# (config_num) for changes. If it changes, we check for new entities
if paired and hkid in self.hass.data.get(KNOWN_DEVICES, {}):
conn = self.hass.data[KNOWN_DEVICES][hkid]
if conn.config_num != config_num:
_LOGGER.debug(
"HomeKit info %s: c# incremented, refreshing entities", hkid
)
self.hass.async_create_task(conn.async_refresh_entity_map(config_num))
return self.async_abort(reason="already_configured")
_LOGGER.debug("Discovered device %s (%s - %s)", name, model, hkid)
# Device isn't paired with us or anyone else.
# But we have a 'complete' config entry for it - that is probably
# invalid. Remove it automatically.
existing = find_existing_host(self.hass, hkid)
if not paired and existing:
await self.hass.config_entries.async_remove(existing.entry_id)
# Set unique-id and error out if it's already configured
await self.async_set_unique_id(normalize_hkid(hkid))
self._abort_if_unique_id_configured()
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context["hkid"] = hkid
if paired:
# Device is paired but not to us - ignore it
_LOGGER.debug("HomeKit device %s ignored as already paired", hkid)
return self.async_abort(reason="already_paired")
# Devices in HOMEKIT_IGNORE have native local integrations - users
# should be encouraged to use native integration and not confused
# by alternative HK API.
if await self._hkid_is_homekit_bridge(hkid):
return self.async_abort(reason="ignored_model")
self.name = name
self.model = model
self.hkid = hkid
# We want to show the pairing form - but don't call async_step_pair
# directly as it has side effects (will ask the device to show a
# pairing code)
return self._async_step_pair_show_form()
async def async_step_pair(self, pair_info=None):
"""Pair with a new HomeKit accessory."""
# If async_step_pair is called with no pairing code then we do the M1
# phase of pairing. If this is successful the device enters pairing
# mode.
# If it doesn't have a screen then the pin is static.
# If it has a display it will display a pin on that display. In
# this case the code is random. So we have to call the start_pairing
# API before the user can enter a pin. But equally we don't want to
# call start_pairing when the device is discovered, only when they
# click on 'Configure' in the UI.
# start_pairing will make the device show its pin and return a
# callable. We call the callable with the pin that the user has typed
# in.
errors = {}
if self.controller is None:
await self._async_setup_controller()
if pair_info and self.finish_pairing:
code = pair_info["pairing_code"]
try:
code = ensure_pin_format(code)
pairing = await self.finish_pairing(code)
return await self._entry_from_accessory(pairing)
except aiohomekit.exceptions.MalformedPinError:
# Library claimed pin was invalid before even making an API call
errors["pairing_code"] = "authentication_error"
except aiohomekit.AuthenticationError:
# PairSetup M4 - SRP proof failed
# PairSetup M6 - Ed25519 signature verification failed
# PairVerify M4 - Decryption failed
# PairVerify M4 - Device not recognised
# PairVerify M4 - Ed25519 signature verification failed
errors["pairing_code"] = "authentication_error"
self.finish_pairing = None
except aiohomekit.UnknownError:
# An error occurred on the device whilst performing this
# operation.
errors["pairing_code"] = "unknown_error"
self.finish_pairing = None
except aiohomekit.MaxPeersError:
# The device can't pair with any more accessories.
errors["pairing_code"] = "max_peers_error"
self.finish_pairing = None
except aiohomekit.AccessoryNotFoundError:
# Can no longer find the device on the network
return self.async_abort(reason="accessory_not_found_error")
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Pairing attempt failed with an unhandled exception")
self.finish_pairing = None
errors["pairing_code"] = "pairing_failed"
if not self.finish_pairing:
# Its possible that the first try may have been busy so
# we always check to see if self.finish_paring has been
# set.
try:
discovery = await self.controller.find_ip_by_device_id(self.hkid)
self.finish_pairing = await discovery.start_pairing(self.hkid)
except aiohomekit.BusyError:
# Already performing a pair setup operation with a different
# controller
return await self.async_step_busy_error()
except aiohomekit.MaxTriesError:
# The accessory has received more than 100 unsuccessful auth
# attempts.
return await self.async_step_max_tries_error()
except aiohomekit.UnavailableError:
# The accessory is already paired - cannot try to pair again.
return self.async_abort(reason="already_paired")
except aiohomekit.AccessoryNotFoundError:
# Can no longer find the device on the network
return self.async_abort(reason="accessory_not_found_error")
except IndexError:
# TLV error, usually not in pairing mode
_LOGGER.exception("Pairing communication failed")
return await self.async_step_protocol_error()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Pairing attempt failed with an unhandled exception")
errors["pairing_code"] = "pairing_failed"
return self._async_step_pair_show_form(errors)
async def async_step_busy_error(self, user_input=None):
"""Retry pairing after the accessory is busy."""
if user_input is not None:
return await self.async_step_pair()
return self.async_show_form(step_id="busy_error")
async def async_step_max_tries_error(self, user_input=None):
"""Retry pairing after the accessory has reached max tries."""
if user_input is not None:
return await self.async_step_pair()
return self.async_show_form(step_id="max_tries_error")
async def async_step_protocol_error(self, user_input=None):
"""Retry pairing after the accessory has a protocol error."""
if user_input is not None:
return await self.async_step_pair()
return self.async_show_form(step_id="protocol_error")
@callback
def _async_step_pair_show_form(self, errors=None):
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
placeholders = {"name": self.name}
self.context["title_placeholders"] = {"name": self.name}
return self.async_show_form(
step_id="pair",
errors=errors or {},
description_placeholders=placeholders,
data_schema=vol.Schema(
{vol.Required("pairing_code"): vol.All(str, vol.Strip)}
),
)
async def _entry_from_accessory(self, pairing):
"""Return a config entry from an initialized bridge."""
# The bulk of the pairing record is stored on the config entry.
# A specific exception is the 'accessories' key. This is more
# volatile. We do cache it, but not against the config entry.
# So copy the pairing data and mutate the copy.
pairing_data = pairing.pairing_data.copy()
# Use the accessories data from the pairing operation if it is
# available. Otherwise request a fresh copy from the API.
# This removes the 'accessories' key from pairing_data at
# the same time.
accessories = pairing_data.pop("accessories", None)
if not accessories:
accessories = await pairing.list_accessories_and_characteristics()
bridge_info = get_bridge_information(accessories)
name = get_accessory_name(bridge_info)
return self.async_create_entry(title=name, data=pairing_data)
|
import argparse
import numpy as np
import chainer
import chainer.functions as F
from chainer import iterators
from chainercv.datasets import directory_parsing_label_names
from chainercv.datasets import DirectoryParsingLabelDataset
from chainercv.links import FeaturePredictor
from chainercv.links import MobileNetV2
from chainercv.links import ResNet101
from chainercv.links import ResNet152
from chainercv.links import ResNet50
from chainercv.links import SEResNet101
from chainercv.links import SEResNet152
from chainercv.links import SEResNet50
from chainercv.links import SEResNeXt101
from chainercv.links import SEResNeXt50
from chainercv.links import VGG16
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
models = {
# model: (class, dataset -> pretrained_model, default batchsize,
# crop, resnet_arch)
'vgg16': (VGG16, {}, 32, 'center', None),
'resnet50': (ResNet50, {}, 32, 'center', 'fb'),
'resnet101': (ResNet101, {}, 32, 'center', 'fb'),
'resnet152': (ResNet152, {}, 32, 'center', 'fb'),
'se-resnet50': (SEResNet50, {}, 32, 'center', None),
'se-resnet101': (SEResNet101, {}, 32, 'center', None),
'se-resnet152': (SEResNet152, {}, 32, 'center', None),
'se-resnext50': (SEResNeXt50, {}, 32, 'center', None),
'se-resnext101': (SEResNeXt101, {}, 32, 'center', None),
'mobilenet_v2_1.0': (MobileNetV2, {}, 32, 'center', None),
'mobilenet_v2_1.4': (MobileNetV2, {}, 32, 'center', None)
}
def setup(dataset, model, pretrained_model, batchsize, val, crop, resnet_arch):
dataset_name = dataset
if dataset_name == 'imagenet':
dataset = DirectoryParsingLabelDataset(val)
label_names = directory_parsing_label_names(val)
def eval_(out_values, rest_values):
pred_probs, = out_values
gt_labels, = rest_values
accuracy = F.accuracy(
np.array(list(pred_probs)), np.array(list(gt_labels))).data
print()
print('Top 1 Error {}'.format(1. - accuracy))
cls, pretrained_models, default_batchsize = models[model][:3]
if pretrained_model is None:
pretrained_model = pretrained_models.get(dataset_name, dataset_name)
if crop is None:
crop = models[model][3]
kwargs = {
'n_class': len(label_names),
'pretrained_model': pretrained_model,
}
if model in ['resnet50', 'resnet101', 'resnet152']:
if resnet_arch is None:
resnet_arch = models[model][4]
kwargs.update({'arch': resnet_arch})
extractor = cls(**kwargs)
model = FeaturePredictor(
extractor, crop_size=224, scale_size=256, crop=crop)
if batchsize is None:
batchsize = default_batchsize
return dataset, eval_, model, batchsize
def main():
parser = argparse.ArgumentParser(
description='Evaluating convnet from ILSVRC2012 dataset')
parser.add_argument('val', help='Path to root of the validation dataset')
parser.add_argument('--model', choices=sorted(models.keys()))
parser.add_argument('--pretrained-model')
parser.add_argument('--dataset', choices=('imagenet',))
parser.add_argument('--gpu', type=int, default=-1)
parser.add_argument('--batchsize', type=int)
parser.add_argument('--crop', choices=('center', '10'))
parser.add_argument('--resnet-arch')
args = parser.parse_args()
dataset, eval_, model, batchsize = setup(
args.dataset, args.model, args.pretrained_model, args.batchsize,
args.val, args.crop, args.resnet_arch)
if args.gpu >= 0:
chainer.cuda.get_device(args.gpu).use()
model.to_gpu()
iterator = iterators.MultiprocessIterator(
dataset, batchsize, repeat=False, shuffle=False,
n_processes=6, shared_mem=300000000)
print('Model has been prepared. Evaluation starts.')
in_values, out_values, rest_values = apply_to_iterator(
model.predict, iterator, hook=ProgressHook(len(dataset)))
del in_values
eval_(out_values, rest_values)
if __name__ == '__main__':
main()
|
import pytest
from homeassistant.components import cloud
from homeassistant.components.cloud.const import DOMAIN
from homeassistant.components.cloud.prefs import STORAGE_KEY
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import Context
from homeassistant.exceptions import Unauthorized
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_constructor_loads_info_from_config(hass):
"""Test non-dev mode loads info from SERVERS constant."""
with patch("hass_nabucasa.Cloud.start"):
result = await async_setup_component(
hass,
"cloud",
{
"http": {},
"cloud": {
cloud.CONF_MODE: cloud.MODE_DEV,
"cognito_client_id": "test-cognito_client_id",
"user_pool_id": "test-user_pool_id",
"region": "test-region",
"relayer": "test-relayer",
"subscription_info_url": "http://test-subscription-info-url",
"cloudhook_create_url": "http://test-cloudhook_create_url",
"remote_api_url": "http://test-remote_api_url",
"alexa_access_token_url": "http://test-alexa-token-url",
"acme_directory_server": "http://test-acme-directory-server",
"google_actions_report_state_url": "http://test-google-actions-report-state-url",
},
},
)
assert result
cl = hass.data["cloud"]
assert cl.mode == cloud.MODE_DEV
assert cl.cognito_client_id == "test-cognito_client_id"
assert cl.user_pool_id == "test-user_pool_id"
assert cl.region == "test-region"
assert cl.relayer == "test-relayer"
assert cl.subscription_info_url == "http://test-subscription-info-url"
assert cl.cloudhook_create_url == "http://test-cloudhook_create_url"
assert cl.remote_api_url == "http://test-remote_api_url"
assert cl.alexa_access_token_url == "http://test-alexa-token-url"
assert cl.acme_directory_server == "http://test-acme-directory-server"
assert (
cl.google_actions_report_state_url
== "http://test-google-actions-report-state-url"
)
async def test_remote_services(hass, mock_cloud_fixture, hass_read_only_user):
"""Setup cloud component and test services."""
cloud = hass.data[DOMAIN]
assert hass.services.has_service(DOMAIN, "remote_connect")
assert hass.services.has_service(DOMAIN, "remote_disconnect")
with patch("hass_nabucasa.remote.RemoteUI.connect") as mock_connect:
await hass.services.async_call(DOMAIN, "remote_connect", blocking=True)
assert mock_connect.called
assert cloud.client.remote_autostart
with patch("hass_nabucasa.remote.RemoteUI.disconnect") as mock_disconnect:
await hass.services.async_call(DOMAIN, "remote_disconnect", blocking=True)
assert mock_disconnect.called
assert not cloud.client.remote_autostart
# Test admin access required
non_admin_context = Context(user_id=hass_read_only_user.id)
with patch("hass_nabucasa.remote.RemoteUI.connect") as mock_connect, pytest.raises(
Unauthorized
):
await hass.services.async_call(
DOMAIN, "remote_connect", blocking=True, context=non_admin_context
)
assert mock_connect.called is False
with patch(
"hass_nabucasa.remote.RemoteUI.disconnect"
) as mock_disconnect, pytest.raises(Unauthorized):
await hass.services.async_call(
DOMAIN, "remote_disconnect", blocking=True, context=non_admin_context
)
assert mock_disconnect.called is False
async def test_startup_shutdown_events(hass, mock_cloud_fixture):
"""Test if the cloud will start on startup event."""
with patch("hass_nabucasa.Cloud.stop") as mock_stop:
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP)
await hass.async_block_till_done()
assert mock_stop.called
async def test_setup_existing_cloud_user(hass, hass_storage):
"""Test setup with API push default data."""
user = await hass.auth.async_create_system_user("Cloud test")
hass_storage[STORAGE_KEY] = {"version": 1, "data": {"cloud_user": user.id}}
with patch("hass_nabucasa.Cloud.start"):
result = await async_setup_component(
hass,
"cloud",
{
"http": {},
"cloud": {
cloud.CONF_MODE: cloud.MODE_DEV,
"cognito_client_id": "test-cognito_client_id",
"user_pool_id": "test-user_pool_id",
"region": "test-region",
"relayer": "test-relayer",
},
},
)
assert result
assert hass_storage[STORAGE_KEY]["data"]["cloud_user"] == user.id
async def test_on_connect(hass, mock_cloud_fixture):
"""Test cloud on connect triggers."""
cl = hass.data["cloud"]
assert len(cl.iot._on_connect) == 3
assert len(hass.states.async_entity_ids("binary_sensor")) == 0
assert "async_setup" in str(cl.iot._on_connect[-1])
await cl.iot._on_connect[-1]()
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids("binary_sensor")) == 1
with patch("homeassistant.helpers.discovery.async_load_platform") as mock_load:
await cl.iot._on_connect[-1]()
await hass.async_block_till_done()
assert len(mock_load.mock_calls) == 0
async def test_remote_ui_url(hass, mock_cloud_fixture):
"""Test getting remote ui url."""
cl = hass.data["cloud"]
# Not logged in
with pytest.raises(cloud.CloudNotAvailable):
cloud.async_remote_ui_url(hass)
with patch.object(cloud, "async_is_logged_in", return_value=True):
# Remote not enabled
with pytest.raises(cloud.CloudNotAvailable):
cloud.async_remote_ui_url(hass)
await cl.client.prefs.async_update(remote_enabled=True)
# No instance domain
with pytest.raises(cloud.CloudNotAvailable):
cloud.async_remote_ui_url(hass)
cl.remote._instance_domain = "example.com"
assert cloud.async_remote_ui_url(hass) == "https://example.com"
|
from asyncio import gather
from copy import deepcopy
from functools import partial
from abodepy import Abode
from abodepy.exceptions import AbodeException
import abodepy.helpers.timeline as TIMELINE
from requests.exceptions import ConnectTimeout, HTTPError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_DATE,
ATTR_ENTITY_ID,
ATTR_TIME,
CONF_PASSWORD,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.entity import Entity
from .const import ATTRIBUTION, DEFAULT_CACHEDB, DOMAIN, LOGGER
CONF_POLLING = "polling"
SERVICE_SETTINGS = "change_setting"
SERVICE_CAPTURE_IMAGE = "capture_image"
SERVICE_TRIGGER_AUTOMATION = "trigger_automation"
ATTR_DEVICE_ID = "device_id"
ATTR_DEVICE_NAME = "device_name"
ATTR_DEVICE_TYPE = "device_type"
ATTR_EVENT_CODE = "event_code"
ATTR_EVENT_NAME = "event_name"
ATTR_EVENT_TYPE = "event_type"
ATTR_EVENT_UTC = "event_utc"
ATTR_SETTING = "setting"
ATTR_USER_NAME = "user_name"
ATTR_APP_TYPE = "app_type"
ATTR_EVENT_BY = "event_by"
ATTR_VALUE = "value"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_POLLING, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
CHANGE_SETTING_SCHEMA = vol.Schema(
{vol.Required(ATTR_SETTING): cv.string, vol.Required(ATTR_VALUE): cv.string}
)
CAPTURE_IMAGE_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
AUTOMATION_SCHEMA = vol.Schema({ATTR_ENTITY_ID: cv.entity_ids})
ABODE_PLATFORMS = [
"alarm_control_panel",
"binary_sensor",
"lock",
"switch",
"cover",
"camera",
"light",
"sensor",
]
class AbodeSystem:
"""Abode System class."""
def __init__(self, abode, polling):
"""Initialize the system."""
self.abode = abode
self.polling = polling
self.entity_ids = set()
self.logout_listener = None
async def async_setup(hass, config):
"""Set up Abode integration."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=deepcopy(conf)
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up Abode integration from a config entry."""
username = config_entry.data.get(CONF_USERNAME)
password = config_entry.data.get(CONF_PASSWORD)
polling = config_entry.data.get(CONF_POLLING)
try:
cache = hass.config.path(DEFAULT_CACHEDB)
abode = await hass.async_add_executor_job(
Abode, username, password, True, True, True, cache
)
hass.data[DOMAIN] = AbodeSystem(abode, polling)
except (AbodeException, ConnectTimeout, HTTPError) as ex:
LOGGER.error("Unable to connect to Abode: %s", str(ex))
raise ConfigEntryNotReady from ex
for platform in ABODE_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, platform)
)
await setup_hass_events(hass)
await hass.async_add_executor_job(setup_hass_services, hass)
await hass.async_add_executor_job(setup_abode_events, hass)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
hass.services.async_remove(DOMAIN, SERVICE_SETTINGS)
hass.services.async_remove(DOMAIN, SERVICE_CAPTURE_IMAGE)
hass.services.async_remove(DOMAIN, SERVICE_TRIGGER_AUTOMATION)
tasks = []
for platform in ABODE_PLATFORMS:
tasks.append(
hass.config_entries.async_forward_entry_unload(config_entry, platform)
)
await gather(*tasks)
await hass.async_add_executor_job(hass.data[DOMAIN].abode.events.stop)
await hass.async_add_executor_job(hass.data[DOMAIN].abode.logout)
hass.data[DOMAIN].logout_listener()
hass.data.pop(DOMAIN)
return True
def setup_hass_services(hass):
"""Home Assistant services."""
def change_setting(call):
"""Change an Abode system setting."""
setting = call.data.get(ATTR_SETTING)
value = call.data.get(ATTR_VALUE)
try:
hass.data[DOMAIN].abode.set_setting(setting, value)
except AbodeException as ex:
LOGGER.warning(ex)
def capture_image(call):
"""Capture a new image."""
entity_ids = call.data.get(ATTR_ENTITY_ID)
target_entities = [
entity_id
for entity_id in hass.data[DOMAIN].entity_ids
if entity_id in entity_ids
]
for entity_id in target_entities:
signal = f"abode_camera_capture_{entity_id}"
dispatcher_send(hass, signal)
def trigger_automation(call):
"""Trigger an Abode automation."""
entity_ids = call.data.get(ATTR_ENTITY_ID)
target_entities = [
entity_id
for entity_id in hass.data[DOMAIN].entity_ids
if entity_id in entity_ids
]
for entity_id in target_entities:
signal = f"abode_trigger_automation_{entity_id}"
dispatcher_send(hass, signal)
hass.services.register(
DOMAIN, SERVICE_SETTINGS, change_setting, schema=CHANGE_SETTING_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_CAPTURE_IMAGE, capture_image, schema=CAPTURE_IMAGE_SCHEMA
)
hass.services.register(
DOMAIN, SERVICE_TRIGGER_AUTOMATION, trigger_automation, schema=AUTOMATION_SCHEMA
)
async def setup_hass_events(hass):
"""Home Assistant start and stop callbacks."""
def logout(event):
"""Logout of Abode."""
if not hass.data[DOMAIN].polling:
hass.data[DOMAIN].abode.events.stop()
hass.data[DOMAIN].abode.logout()
LOGGER.info("Logged out of Abode")
if not hass.data[DOMAIN].polling:
await hass.async_add_executor_job(hass.data[DOMAIN].abode.events.start)
hass.data[DOMAIN].logout_listener = hass.bus.async_listen_once(
EVENT_HOMEASSISTANT_STOP, logout
)
def setup_abode_events(hass):
"""Event callbacks."""
def event_callback(event, event_json):
"""Handle an event callback from Abode."""
data = {
ATTR_DEVICE_ID: event_json.get(ATTR_DEVICE_ID, ""),
ATTR_DEVICE_NAME: event_json.get(ATTR_DEVICE_NAME, ""),
ATTR_DEVICE_TYPE: event_json.get(ATTR_DEVICE_TYPE, ""),
ATTR_EVENT_CODE: event_json.get(ATTR_EVENT_CODE, ""),
ATTR_EVENT_NAME: event_json.get(ATTR_EVENT_NAME, ""),
ATTR_EVENT_TYPE: event_json.get(ATTR_EVENT_TYPE, ""),
ATTR_EVENT_UTC: event_json.get(ATTR_EVENT_UTC, ""),
ATTR_USER_NAME: event_json.get(ATTR_USER_NAME, ""),
ATTR_APP_TYPE: event_json.get(ATTR_APP_TYPE, ""),
ATTR_EVENT_BY: event_json.get(ATTR_EVENT_BY, ""),
ATTR_DATE: event_json.get(ATTR_DATE, ""),
ATTR_TIME: event_json.get(ATTR_TIME, ""),
}
hass.bus.fire(event, data)
events = [
TIMELINE.ALARM_GROUP,
TIMELINE.ALARM_END_GROUP,
TIMELINE.PANEL_FAULT_GROUP,
TIMELINE.PANEL_RESTORE_GROUP,
TIMELINE.AUTOMATION_GROUP,
TIMELINE.DISARM_GROUP,
TIMELINE.ARM_GROUP,
TIMELINE.ARM_FAULT_GROUP,
TIMELINE.TEST_GROUP,
TIMELINE.CAPTURE_GROUP,
TIMELINE.DEVICE_GROUP,
]
for event in events:
hass.data[DOMAIN].abode.events.add_event_callback(
event, partial(event_callback, event)
)
class AbodeEntity(Entity):
"""Representation of an Abode entity."""
def __init__(self, data):
"""Initialize Abode entity."""
self._data = data
self._available = True
@property
def available(self):
"""Return the available state."""
return self._available
@property
def should_poll(self):
"""Return the polling state."""
return self._data.polling
async def async_added_to_hass(self):
"""Subscribe to Abode connection status updates."""
await self.hass.async_add_executor_job(
self._data.abode.events.add_connection_status_callback,
self.unique_id,
self._update_connection_status,
)
self.hass.data[DOMAIN].entity_ids.add(self.entity_id)
async def async_will_remove_from_hass(self):
"""Unsubscribe from Abode connection status updates."""
await self.hass.async_add_executor_job(
self._data.abode.events.remove_connection_status_callback, self.unique_id
)
def _update_connection_status(self):
"""Update the entity available property."""
self._available = self._data.abode.events.connected
self.schedule_update_ha_state()
class AbodeDevice(AbodeEntity):
"""Representation of an Abode device."""
def __init__(self, data, device):
"""Initialize Abode device."""
super().__init__(data)
self._device = device
async def async_added_to_hass(self):
"""Subscribe to device events."""
await super().async_added_to_hass()
await self.hass.async_add_executor_job(
self._data.abode.events.add_device_callback,
self._device.device_id,
self._update_callback,
)
async def async_will_remove_from_hass(self):
"""Unsubscribe from device events."""
await super().async_will_remove_from_hass()
await self.hass.async_add_executor_job(
self._data.abode.events.remove_all_device_callbacks, self._device.device_id
)
def update(self):
"""Update device state."""
self._device.refresh()
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
ATTR_ATTRIBUTION: ATTRIBUTION,
"device_id": self._device.device_id,
"battery_low": self._device.battery_low,
"no_response": self._device.no_response,
"device_type": self._device.type,
}
@property
def unique_id(self):
"""Return a unique ID to use for this device."""
return self._device.device_uuid
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self._device.device_id)},
"manufacturer": "Abode",
"name": self._device.name,
"device_type": self._device.type,
}
def _update_callback(self, device):
"""Update the device state."""
self.schedule_update_ha_state()
class AbodeAutomation(AbodeEntity):
"""Representation of an Abode automation."""
def __init__(self, data, automation):
"""Initialize for Abode automation."""
super().__init__(data)
self._automation = automation
def update(self):
"""Update automation state."""
self._automation.refresh()
@property
def name(self):
"""Return the name of the automation."""
return self._automation.name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION, "type": "CUE automation"}
@property
def unique_id(self):
"""Return a unique ID to use for this automation."""
return self._automation.automation_id
|
from __future__ import with_statement
import atexit
import time
import traceback
from threading import Lock
from threading import RLock
from threading import Thread
try:
from queue import Empty, Queue
except ImportError:
from Queue import Empty, Queue
from .core import printerrlog
from .core import printlog
from .core import printlog_bold
class PmonException(Exception):
pass
class FatalProcessLaunch(PmonException):
"""
Exception to indicate that a process launch has failed in a fatal
manner (i.e. relaunch is unlikely to succeed)
"""
pass
# start/shutdown ################################################
_pmons = []
_pmon_counter = 0
_shutting_down = False
def start_process_monitor():
global _pmon_counter
if _shutting_down:
return None
_pmon_counter += 1
name = 'ProcessMonitor-%s' % _pmon_counter
process_monitor = ProcessMonitor(name)
with _shutdown_lock:
# prevent race condition with pmon_shutdown() being triggered
# as we are starting a ProcessMonitor (i.e. user hits ctrl-C
# during startup)
_pmons.append(process_monitor)
process_monitor.start()
return process_monitor
def shutdown_process_monitor(process_monitor):
"""
@param process_monitor: process monitor to kill
@type process_monitor: L{ProcessMonitor}
@return: True if process_monitor was successfully
shutdown. False if it could not be shutdown cleanly or if there is
a problem with process_monitor
parameter. shutdown_process_monitor() does not throw any exceptions
as this is shutdown-critical code.
@rtype: bool
"""
try:
if process_monitor is None or process_monitor.is_shutdown:
return False
process_monitor.shutdown()
process_monitor.join(20.0)
if process_monitor.isAlive():
return False
else:
return True
except Exception:
return False
_shutdown_lock = Lock()
def pmon_shutdown():
global _pmons
with _shutdown_lock:
if not _pmons:
return
for p in _pmons:
shutdown_process_monitor(p)
del _pmons[:]
atexit.register(pmon_shutdown)
# ##############################################################
class Process(object):
"""
Basic process representation for L{ProcessMonitor}. Must be subclassed
to provide actual start()/stop() implementations.
"""
def __init__(self, package, name, args, env, respawn=False, required=False):
self.package = package
self.name = name
self.args = args
self.env = env
self.respawn = respawn
self.required = required
self.lock = Lock()
self.exit_code = None
# for keeping track of respawning
self.spawn_count = 0
def __str__(self):
return 'Process<%s>' % (self.name)
# NOTE: get_info() is going to have to be sufficient for
# generating respawn requests, so we must be complete about it
def get_info(self):
"""
Get all data about this process in dictionary form
@return: dictionary of all relevant process properties
@rtype: dict { str: val }
"""
info = {
'spawn_count': self.spawn_count,
'args': self.args,
'env': self.env,
'package': self.package,
'name': self.name,
'alive': self.is_alive(),
'respawn': self.respawn,
'required': self.required,
}
if self.exit_code is not None:
info['exit_code'] = self.exit_code
return info
def start(self):
self.spawn_count += 1
def is_alive(self):
return False
def stop(self, errors=[]):
"""
Stop the process. Record any significant error messages in the errors parameter
@param errors: error messages. stop() will record messages into this list.
@type errors: [str]
"""
pass
def get_exit_description(self):
if self.exit_code is not None:
if self.exit_code:
return 'process has died [exit code %s]' % self.exit_code
else:
# try not to scare users about process exit
return 'process has finished cleanly'
else:
return 'process has died'
class DeadProcess(Process):
"""
Container class to maintain information about a process that has died. This
container allows us to delete the actual Process but still maintain the metadata
"""
def __init__(self, p):
super(DeadProcess, self).__init__(p.package, p.name, p.args, p.env, p.respawn)
self.exit_code = p.exit_code
self.lock = None
self.spawn_count = p.spawn_count
self.info = p.get_info()
def get_info(self):
return self.info
def start(self):
raise Exception('cannot call start on a dead process!')
def is_alive(self):
return False
class ProcessListener(object):
"""
Listener class for L{ProcessMonitor}
"""
def process_died(self, process_name, exit_code):
"""
Notifies listener that process has died. This callback only
occurs for processes that die during normal process monitor
execution -- processes that are forcibly killed during
ProcessMonitor shutdown are not reported.
@param process_name: name of process
@type process_name: str
@param exit_code: exit code of process. If None, it means
that ProcessMonitor was unable to determine an exit code.
@type exit_code: int
"""
pass
class ProcessMonitor(Thread):
def __init__(self, name='ProcessMonitor'):
Thread.__init__(self, name=name)
self.procs = []
self.plock = RLock()
self.is_shutdown = False
self.done = False
self.setDaemon(True)
self.listeners = []
self.dead_list = []
# #885: ensure core procs
self.core_procs = []
# #642: flag to prevent process monitor exiting prematurely
self._registrations_complete = False
def add_process_listener(self, l):
"""
Listener for process events. MUST be called before
ProcessMonitor is running.See ProcessListener class.
@param l: listener instance
@type l: L{ProcessListener}
"""
self.listeners.append(l)
def register(self, p):
"""
Register process with L{ProcessMonitor}
@param p: Process
@type p: L{Process}
@raise PmonException: if process with same name is already registered
"""
e = None
with self.plock:
if self.has_process(p.name):
e = PmonException("cannot add process with duplicate name '%s'" % p.name)
elif self.is_shutdown:
e = PmonException('cannot add process [%s] after process monitor has been shut down' % p.name)
else:
self.procs.append(p)
if e:
raise e
def register_core_proc(self, p):
"""
Register core process with ProcessMonitor. Coreprocesses
have special shutdown semantics. They are killed after all
other processes, in reverse order in which they are added.
@param p Process
@type p: L{Process}
@raise PmonException: if process with same name is already registered
"""
self.register(p)
self.core_procs.append(p)
def registrations_complete(self):
"""
Inform the process monitor that registrations are complete.
After the registrations_complete flag is set, process monitor
will exit if there are no processes left to monitor.
"""
self._registrations_complete = True
def unregister(self, p):
with self.plock:
self.procs.remove(p)
def has_process(self, name):
"""
@return: True if process is still be monitored. If False, process
has died or was never registered with process
@rtype: bool
"""
return len([p for p in self.procs if p.name == name]) > 0
def get_process(self, name):
"""
@return: process registered under \a name, or None
@rtype: L{Process}
"""
with self.plock:
v = [p for p in self.procs if p.name == name]
if v:
return v[0]
def kill_process(self, name):
"""
Kill process that matches name. NOTE: a killed process will
continue to show up as active until the process monitor thread
has caught that it has died.
@param name: Process name
@type name: str
@return: True if a process named name was removed from
process monitor. A process is considered killed if its stop()
method was called.
@rtype: bool
"""
def is_string_type(obj):
try:
return isinstance(obj, basestring)
except NameError:
return isinstance(obj, str)
if not is_string_type(name):
raise PmonException('kill_process takes in a process name but was given: %s' % name)
printlog('[%s] kill requested' % name)
with self.plock:
p = self.get_process(name)
if p:
try:
# no need to accumulate errors, so pass in []
p.stop([])
except Exception as e:
printerrlog('Exception: %s' % (str(e)))
return True
else:
return False
def shutdown(self):
"""
Shutdown the process monitor thread
"""
self.is_shutdown = True
def get_active_names(self):
"""
@return [str]: list of active process names
"""
with self.plock:
retval = [p.name for p in self.procs]
return retval
def get_process_names_with_spawn_count(self):
"""
@return: Two lists, where first
list of active process names along with the number of times
that process has been spawned. Second list contains dead process names
and their spawn count.
@rtype: [[(str, int),], [(str,int),]]
"""
with self.plock:
actives = [(p.name, p.spawn_count) for p in self.procs]
deads = [(p.name, p.spawn_count) for p in self.dead_list]
retval = [actives, deads]
return retval
def run(self):
"""
thread routine of the process monitor.
"""
try:
# don't let exceptions bomb thread, interferes with exit
try:
self._run()
except Exception:
traceback.print_exc()
finally:
self._post_run()
def _run(self):
"""
Internal run loop of ProcessMonitor
"""
plock = self.plock
dead = []
respawn = []
while not self.is_shutdown:
with plock: # copy self.procs
procs = self.procs[:]
if self.is_shutdown:
break
for p in procs:
try:
if not p.is_alive():
exit_code_str = p.get_exit_description()
if p.respawn:
printlog_bold('[%s] %s\nrespawning...' % (p.name, exit_code_str))
respawn.append(p)
elif p.required:
printerrlog('=' * 80 + 'REQUIRED process [%s] has died!\n%s\nInitiating shutdown!\n' % (p.name, exit_code_str) + '=' * 80)
self.is_shutdown = True
else:
if p.exit_code:
printerrlog('[%s] %s' % (p.name, exit_code_str))
else:
printlog_bold('[%s] %s' % (p.name, exit_code_str))
dead.append(p)
# no need for lock as we require listeners be
# added before process monitor is launched
for l in self.listeners:
l.process_died(p.name, p.exit_code)
except Exception:
traceback.print_exc()
# don't respawn as this is an internal error
dead.append(p)
if self.is_shutdown:
break # stop polling
for d in dead:
try:
self.unregister(d)
# stop process, don't accumulate errors
d.stop([])
# save process data to dead list
with plock:
self.dead_list.append(DeadProcess(d))
except Exception as e:
printerrlog('Exception: %s' % (str(e)))
# dead check is to make sure that ProcessMonitor at least
# waits until its had at least one process before exiting
if self._registrations_complete and dead and not self.procs and not respawn:
printlog('all processes on machine have died, roslaunch will exit')
self.is_shutdown = True
del dead[:]
for r in respawn:
try:
if self.is_shutdown:
break
printlog('[%s] restarting process' % r.name)
# stop process, don't accumulate errors
r.stop([])
r.start()
except Exception:
traceback.print_exc()
del respawn[:]
time.sleep(0.1) # yield thread
# moved this to finally block of _post_run
# self._post_run() #kill all processes
def _post_run(self):
# this is already true entering, but go ahead and make sure
self.is_shutdown = True
# killall processes on run exit
q = Queue()
q.join()
with self.plock:
# make copy of core_procs for threadsafe usage
core_procs = self.core_procs[:]
# enqueue all non-core procs in reverse order for parallel kill
# #526/885: ignore core procs
[q.put(p) for p in reversed(self.procs) if p not in core_procs]
# use 10 workers
killers = []
for i in range(10):
t = _ProcessKiller(q, i)
killers.append(t)
t.start()
# wait for workers to finish
q.join()
shutdown_errors = []
# accumulate all the shutdown errors
for t in killers:
shutdown_errors.extend(t.errors)
del killers[:]
# #526/885: kill core procs last
# we don't want to parallelize this as the master has to be last
for p in reversed(core_procs):
_kill_process(p, shutdown_errors)
# delete everything except dead_list
with self.plock:
del core_procs[:]
del self.procs[:]
del self.core_procs[:]
self.done = True
if shutdown_errors:
printerrlog('Shutdown errors:\n' + '\n'.join([' * %s' % e for e in shutdown_errors]))
def _kill_process(p, errors):
"""
Routine for kill Process p with appropriate logging to screen and logfile
@param p: process to kill
@type p: Process
@param errors: list of error messages from killed process
@type errors: [str]
"""
try:
printlog('[%s] killing on exit' % p.name)
# we accumulate errors from each process so that we can print these at the end
p.stop(errors)
except Exception as e:
printerrlog('Exception: %s' % (str(e)))
class _ProcessKiller(Thread):
def __init__(self, q, i):
Thread.__init__(self, name='ProcessKiller-%s' % i)
self.q = q
self.errors = []
def run(self):
q = self.q
while not q.empty():
try:
p = q.get(False)
_kill_process(p, self.errors)
q.task_done()
except Empty:
pass
|
from flask import Flask, jsonify
from flasgger import Swagger
from flasgger_package import package_view
app = Flask(__name__)
swag = Swagger(app)
app.add_url_rule(
'/v1/decorated/<username>',
view_func=package_view
)
@app.route('/v2/decorated/<username>')
def package_view_2(username):
"""
This is the summary defined in yaml file
First line is the summary
All following lines until the hyphens is added to description
the format of the first lines until 3 hyphens will be not yaml compliant
but everything below the 3 hyphens should be.
---
tags:
- users
import: "flasgger_package/parameters.yml"
responses:
200:
description: A single user item
schema:
id: rec_username
properties:
username:
type: string
description: The name of the user
default: 'steve-harris 2'
"""
return jsonify({'username': username})
def test_swag(client, specs_data):
"""
This test is runs automatically in Travis CI
:param client: Flask app test client
:param specs_data: {'url': {swag_specs}} for every spec in app
"""
for url, spec in specs_data.items():
assert 'rec_username' in spec['definitions']
assert 'users' in spec['paths'][
'/v1/decorated/{username}'
]['get']['tags']
if __name__ == "__main__":
app.run(debug=True)
|
import logging
from tesla_powerwall import MissingAttributeError, Powerwall, PowerwallUnreachableError
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_IP_ADDRESS
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({vol.Required(CONF_IP_ADDRESS): str})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
power_wall = Powerwall(data[CONF_IP_ADDRESS])
try:
await hass.async_add_executor_job(power_wall.detect_and_pin_version)
site_info = await hass.async_add_executor_job(power_wall.get_site_info)
except PowerwallUnreachableError as err:
raise CannotConnect from err
except MissingAttributeError as err:
# Only log the exception without the traceback
_LOGGER.error(str(err))
raise WrongVersion from err
# Return info that you want to store in the config entry.
return {"title": site_info.site_name}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Tesla Powerwall."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except WrongVersion:
errors["base"] = "wrong_version"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(user_input[CONF_IP_ADDRESS])
self._abort_if_unique_id_configured()
return self.async_create_entry(title=info["title"], data=user_input)
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input):
"""Handle import."""
await self.async_set_unique_id(user_input[CONF_IP_ADDRESS])
self._abort_if_unique_id_configured()
return await self.async_step_user(user_input)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class WrongVersion(exceptions.HomeAssistantError):
"""Error to indicate the powerwall uses a software version we cannot interact with."""
|
import re
import voluptuous as vol
from homeassistant.const import CONF_NAME
from .const import DEFAULT_NAME
# Regex for address validation
PATTERN_ADDRESS = re.compile(
"^((?P<conn_id>\\w+)\\.)?s?(?P<seg_id>\\d+)\\.(?P<type>m|g)?(?P<id>\\d+)$"
)
def get_connection(connections, connection_id=None):
"""Return the connection object from list."""
if connection_id is None:
connection = connections[0]
else:
for connection in connections:
if connection.connection_id == connection_id:
break
else:
raise ValueError("Unknown connection_id.")
return connection
def has_unique_connection_names(connections):
"""Validate that all connection names are unique.
Use 'pchk' as default connection_name (or add a numeric suffix if
pchk' is already in use.
"""
for suffix, connection in enumerate(connections):
connection_name = connection.get(CONF_NAME)
if connection_name is None:
if suffix == 0:
connection[CONF_NAME] = DEFAULT_NAME
else:
connection[CONF_NAME] = f"{DEFAULT_NAME}{suffix:d}"
schema = vol.Schema(vol.Unique())
schema([connection.get(CONF_NAME) for connection in connections])
return connections
def is_address(value):
"""Validate the given address string.
Examples for S000M005 at myhome:
myhome.s000.m005
myhome.s0.m5
myhome.0.5 ("m" is implicit if missing)
Examples for s000g011
myhome.0.g11
myhome.s0.g11
"""
matcher = PATTERN_ADDRESS.match(value)
if matcher:
is_group = matcher.group("type") == "g"
addr = (int(matcher.group("seg_id")), int(matcher.group("id")), is_group)
conn_id = matcher.group("conn_id")
return addr, conn_id
raise vol.error.Invalid("Not a valid address string.")
def is_relays_states_string(states_string):
"""Validate the given states string and return states list."""
if len(states_string) == 8:
states = []
for state_string in states_string:
if state_string == "1":
state = "ON"
elif state_string == "0":
state = "OFF"
elif state_string == "T":
state = "TOGGLE"
elif state_string == "-":
state = "NOCHANGE"
else:
raise vol.error.Invalid("Not a valid relay state string.")
states.append(state)
return states
raise vol.error.Invalid("Wrong length of relay state string.")
def is_key_lock_states_string(states_string):
"""Validate the given states string and returns states list."""
if len(states_string) == 8:
states = []
for state_string in states_string:
if state_string == "1":
state = "ON"
elif state_string == "0":
state = "OFF"
elif state_string == "T":
state = "TOGGLE"
elif state_string == "-":
state = "NOCHANGE"
else:
raise vol.error.Invalid("Not a valid key lock state string.")
states.append(state)
return states
raise vol.error.Invalid("Wrong length of key lock state string.")
|
import numpy as np
import pytest
import xarray as xr
from xarray.core import dtypes, merge
from xarray.core.merge import MergeError
from xarray.testing import assert_identical
from . import raises_regex
from .test_dataset import create_test_data
class TestMergeInternals:
def test_broadcast_dimension_size(self):
actual = merge.broadcast_dimension_size(
[xr.Variable("x", [1]), xr.Variable("y", [2, 1])]
)
assert actual == {"x": 1, "y": 2}
actual = merge.broadcast_dimension_size(
[xr.Variable(("x", "y"), [[1, 2]]), xr.Variable("y", [2, 1])]
)
assert actual == {"x": 1, "y": 2}
with pytest.raises(ValueError):
merge.broadcast_dimension_size(
[xr.Variable(("x", "y"), [[1, 2]]), xr.Variable("y", [2])]
)
class TestMergeFunction:
def test_merge_arrays(self):
data = create_test_data()
actual = xr.merge([data.var1, data.var2])
expected = data[["var1", "var2"]]
assert actual.identical(expected)
def test_merge_datasets(self):
data = create_test_data()
actual = xr.merge([data[["var1"]], data[["var2"]]])
expected = data[["var1", "var2"]]
assert actual.identical(expected)
actual = xr.merge([data, data])
assert actual.identical(data)
def test_merge_dataarray_unnamed(self):
data = xr.DataArray([1, 2], dims="x")
with raises_regex(ValueError, "without providing an explicit name"):
xr.merge([data])
def test_merge_arrays_attrs_default(self):
var1_attrs = {"a": 1, "b": 2}
var2_attrs = {"a": 1, "c": 3}
expected_attrs = {}
data = create_test_data()
data.var1.attrs = var1_attrs
data.var2.attrs = var2_attrs
actual = xr.merge([data.var1, data.var2])
expected = data[["var1", "var2"]]
expected.attrs = expected_attrs
assert actual.identical(expected)
@pytest.mark.parametrize(
"combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception",
[
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 1, "c": 3},
{"a": 1, "b": 2, "c": 3},
False,
),
("no_conflicts", {"a": 1, "b": 2}, {}, {"a": 1, "b": 2}, False),
("no_conflicts", {}, {"a": 1, "c": 3}, {"a": 1, "c": 3}, False),
(
"no_conflicts",
{"a": 1, "b": 2},
{"a": 4, "c": 3},
{"a": 1, "b": 2, "c": 3},
True,
),
("drop", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "b": 2}, {"a": 1, "b": 2}, False),
("identical", {"a": 1, "b": 2}, {"a": 1, "c": 3}, {"a": 1, "b": 2}, True),
(
"override",
{"a": 1, "b": 2},
{"a": 4, "b": 5, "c": 3},
{"a": 1, "b": 2},
False,
),
],
)
def test_merge_arrays_attrs(
self, combine_attrs, var1_attrs, var2_attrs, expected_attrs, expect_exception
):
data = create_test_data()
data.var1.attrs = var1_attrs
data.var2.attrs = var2_attrs
if expect_exception:
with raises_regex(MergeError, "combine_attrs"):
actual = xr.merge([data.var1, data.var2], combine_attrs=combine_attrs)
else:
actual = xr.merge([data.var1, data.var2], combine_attrs=combine_attrs)
expected = data[["var1", "var2"]]
expected.attrs = expected_attrs
assert actual.identical(expected)
def test_merge_attrs_override_copy(self):
ds1 = xr.Dataset(attrs={"x": 0})
ds2 = xr.Dataset(attrs={"x": 1})
ds3 = xr.merge([ds1, ds2], combine_attrs="override")
ds3.attrs["x"] = 2
assert ds1.x == 0
def test_merge_dicts_simple(self):
actual = xr.merge([{"foo": 0}, {"bar": "one"}, {"baz": 3.5}])
expected = xr.Dataset({"foo": 0, "bar": "one", "baz": 3.5})
assert actual.identical(expected)
def test_merge_dicts_dims(self):
actual = xr.merge([{"y": ("x", [13])}, {"x": [12]}])
expected = xr.Dataset({"x": [12], "y": ("x", [13])})
assert actual.identical(expected)
def test_merge_error(self):
ds = xr.Dataset({"x": 0})
with pytest.raises(xr.MergeError):
xr.merge([ds, ds + 1])
def test_merge_alignment_error(self):
ds = xr.Dataset(coords={"x": [1, 2]})
other = xr.Dataset(coords={"x": [2, 3]})
with raises_regex(ValueError, "indexes .* not equal"):
xr.merge([ds, other], join="exact")
def test_merge_wrong_input_error(self):
with raises_regex(TypeError, "objects must be an iterable"):
xr.merge([1])
ds = xr.Dataset(coords={"x": [1, 2]})
with raises_regex(TypeError, "objects must be an iterable"):
xr.merge({"a": ds})
with raises_regex(TypeError, "objects must be an iterable"):
xr.merge([ds, 1])
def test_merge_no_conflicts_single_var(self):
ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
ds2 = xr.Dataset({"a": ("x", [2, 3]), "x": [1, 2]})
expected = xr.Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]})
assert expected.identical(xr.merge([ds1, ds2], compat="no_conflicts"))
assert expected.identical(xr.merge([ds2, ds1], compat="no_conflicts"))
assert ds1.identical(xr.merge([ds1, ds2], compat="no_conflicts", join="left"))
assert ds2.identical(xr.merge([ds1, ds2], compat="no_conflicts", join="right"))
expected = xr.Dataset({"a": ("x", [2]), "x": [1]})
assert expected.identical(
xr.merge([ds1, ds2], compat="no_conflicts", join="inner")
)
with pytest.raises(xr.MergeError):
ds3 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]})
xr.merge([ds1, ds3], compat="no_conflicts")
with pytest.raises(xr.MergeError):
ds3 = xr.Dataset({"a": ("y", [2, 3]), "y": [1, 2]})
xr.merge([ds1, ds3], compat="no_conflicts")
def test_merge_no_conflicts_multi_var(self):
data = create_test_data()
data1 = data.copy(deep=True)
data2 = data.copy(deep=True)
expected = data[["var1", "var2"]]
actual = xr.merge([data1.var1, data2.var2], compat="no_conflicts")
assert expected.identical(actual)
data1["var1"][:, :5] = np.nan
data2["var1"][:, 5:] = np.nan
data1["var2"][:4, :] = np.nan
data2["var2"][4:, :] = np.nan
del data2["var3"]
actual = xr.merge([data1, data2], compat="no_conflicts")
assert data.equals(actual)
def test_merge_no_conflicts_preserve_attrs(self):
data = xr.Dataset({"x": ([], 0, {"foo": "bar"})})
actual = xr.merge([data, data])
assert data.identical(actual)
def test_merge_no_conflicts_broadcast(self):
datasets = [xr.Dataset({"x": ("y", [0])}), xr.Dataset({"x": np.nan})]
actual = xr.merge(datasets)
expected = xr.Dataset({"x": ("y", [0])})
assert expected.identical(actual)
datasets = [xr.Dataset({"x": ("y", [np.nan])}), xr.Dataset({"x": 0})]
actual = xr.merge(datasets)
assert expected.identical(actual)
class TestMergeMethod:
def test_merge(self):
data = create_test_data()
ds1 = data[["var1"]]
ds2 = data[["var3"]]
expected = data[["var1", "var3"]]
actual = ds1.merge(ds2)
assert expected.identical(actual)
actual = ds2.merge(ds1)
assert expected.identical(actual)
actual = data.merge(data)
assert data.identical(actual)
actual = data.reset_coords(drop=True).merge(data)
assert data.identical(actual)
actual = data.merge(data.reset_coords(drop=True))
assert data.identical(actual)
with pytest.raises(ValueError):
ds1.merge(ds2.rename({"var3": "var1"}))
with raises_regex(ValueError, "should be coordinates or not"):
data.reset_coords().merge(data)
with raises_regex(ValueError, "should be coordinates or not"):
data.merge(data.reset_coords())
def test_merge_broadcast_equals(self):
ds1 = xr.Dataset({"x": 0})
ds2 = xr.Dataset({"x": ("y", [0, 0])})
actual = ds1.merge(ds2)
assert ds2.identical(actual)
actual = ds2.merge(ds1)
assert ds2.identical(actual)
actual = ds1.copy()
actual.update(ds2)
assert ds2.identical(actual)
ds1 = xr.Dataset({"x": np.nan})
ds2 = xr.Dataset({"x": ("y", [np.nan, np.nan])})
actual = ds1.merge(ds2)
assert ds2.identical(actual)
def test_merge_compat(self):
ds1 = xr.Dataset({"x": 0})
ds2 = xr.Dataset({"x": 1})
for compat in ["broadcast_equals", "equals", "identical", "no_conflicts"]:
with pytest.raises(xr.MergeError):
ds1.merge(ds2, compat=compat)
ds2 = xr.Dataset({"x": [0, 0]})
for compat in ["equals", "identical"]:
with raises_regex(ValueError, "should be coordinates or not"):
ds1.merge(ds2, compat=compat)
ds2 = xr.Dataset({"x": ((), 0, {"foo": "bar"})})
with pytest.raises(xr.MergeError):
ds1.merge(ds2, compat="identical")
with raises_regex(ValueError, "compat=.* invalid"):
ds1.merge(ds2, compat="foobar")
assert ds1.identical(ds1.merge(ds2, compat="override"))
def test_merge_auto_align(self):
ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
ds2 = xr.Dataset({"b": ("x", [3, 4]), "x": [1, 2]})
expected = xr.Dataset(
{"a": ("x", [1, 2, np.nan]), "b": ("x", [np.nan, 3, 4])}, {"x": [0, 1, 2]}
)
assert expected.identical(ds1.merge(ds2))
assert expected.identical(ds2.merge(ds1))
expected = expected.isel(x=slice(2))
assert expected.identical(ds1.merge(ds2, join="left"))
assert expected.identical(ds2.merge(ds1, join="right"))
expected = expected.isel(x=slice(1, 2))
assert expected.identical(ds1.merge(ds2, join="inner"))
assert expected.identical(ds2.merge(ds1, join="inner"))
@pytest.mark.parametrize("fill_value", [dtypes.NA, 2, 2.0, {"a": 2, "b": 1}])
def test_merge_fill_value(self, fill_value):
ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
ds2 = xr.Dataset({"b": ("x", [3, 4]), "x": [1, 2]})
if fill_value == dtypes.NA:
# if we supply the default, we expect the missing value for a
# float array
fill_value_a = fill_value_b = np.nan
elif isinstance(fill_value, dict):
fill_value_a = fill_value["a"]
fill_value_b = fill_value["b"]
else:
fill_value_a = fill_value_b = fill_value
expected = xr.Dataset(
{"a": ("x", [1, 2, fill_value_a]), "b": ("x", [fill_value_b, 3, 4])},
{"x": [0, 1, 2]},
)
assert expected.identical(ds1.merge(ds2, fill_value=fill_value))
assert expected.identical(ds2.merge(ds1, fill_value=fill_value))
assert expected.identical(xr.merge([ds1, ds2], fill_value=fill_value))
def test_merge_no_conflicts(self):
ds1 = xr.Dataset({"a": ("x", [1, 2]), "x": [0, 1]})
ds2 = xr.Dataset({"a": ("x", [2, 3]), "x": [1, 2]})
expected = xr.Dataset({"a": ("x", [1, 2, 3]), "x": [0, 1, 2]})
assert expected.identical(ds1.merge(ds2, compat="no_conflicts"))
assert expected.identical(ds2.merge(ds1, compat="no_conflicts"))
assert ds1.identical(ds1.merge(ds2, compat="no_conflicts", join="left"))
assert ds2.identical(ds1.merge(ds2, compat="no_conflicts", join="right"))
expected2 = xr.Dataset({"a": ("x", [2]), "x": [1]})
assert expected2.identical(ds1.merge(ds2, compat="no_conflicts", join="inner"))
with pytest.raises(xr.MergeError):
ds3 = xr.Dataset({"a": ("x", [99, 3]), "x": [1, 2]})
ds1.merge(ds3, compat="no_conflicts")
with pytest.raises(xr.MergeError):
ds3 = xr.Dataset({"a": ("y", [2, 3]), "y": [1, 2]})
ds1.merge(ds3, compat="no_conflicts")
def test_merge_dataarray(self):
ds = xr.Dataset({"a": 0})
da = xr.DataArray(data=1, name="b")
assert_identical(ds.merge(da), xr.merge([ds, da]))
|
import functools
import urllib.parse
from typing import MutableMapping
from PyQt5.QtCore import pyqtSignal, QObject, QTimer
from PyQt5.QtNetwork import (QNetworkAccessManager, QNetworkRequest,
QNetworkReply)
class HTTPRequest(QNetworkRequest):
"""A QNetworkRquest that follows (secure) redirects by default."""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setAttribute(QNetworkRequest.RedirectPolicyAttribute,
QNetworkRequest.NoLessSafeRedirectPolicy)
class HTTPClient(QObject):
"""An HTTP client based on QNetworkAccessManager.
Intended for APIs, automatically decodes data.
Attributes:
_nam: The QNetworkAccessManager used.
_timers: A {QNetworkReply: QTimer} dict.
Signals:
success: Emitted when the operation succeeded.
arg: The received data.
error: Emitted when the request failed.
arg: The error message, as string.
"""
success = pyqtSignal(str)
error = pyqtSignal(str)
def __init__(self, parent=None):
super().__init__(parent)
self._nam = QNetworkAccessManager(self)
self._timers: MutableMapping[QNetworkReply, QTimer] = {}
def post(self, url, data=None):
"""Create a new POST request.
Args:
url: The URL to post to, as QUrl.
data: A dict of data to send.
"""
if data is None:
data = {}
encoded_data = urllib.parse.urlencode(data).encode('utf-8')
request = HTTPRequest(url)
request.setHeader(QNetworkRequest.ContentTypeHeader,
'application/x-www-form-urlencoded;charset=utf-8')
reply = self._nam.post(request, encoded_data)
self._handle_reply(reply)
def get(self, url):
"""Create a new GET request.
Emits success/error when done.
Args:
url: The URL to access, as QUrl.
"""
request = HTTPRequest(url)
reply = self._nam.get(request)
self._handle_reply(reply)
def _handle_reply(self, reply):
"""Handle a new QNetworkReply."""
if reply.isFinished():
self.on_reply_finished(reply)
else:
timer = QTimer(self)
timer.setInterval(10000)
timer.timeout.connect(reply.abort)
timer.start()
self._timers[reply] = timer
reply.finished.connect(functools.partial(
self.on_reply_finished, reply))
def on_reply_finished(self, reply):
"""Read the data and finish when the reply finished.
Args:
reply: The QNetworkReply which finished.
"""
timer = self._timers.pop(reply)
if timer is not None:
timer.stop()
timer.deleteLater()
if reply.error() != QNetworkReply.NoError:
self.error.emit(reply.errorString())
return
try:
data = bytes(reply.readAll()).decode('utf-8')
except UnicodeDecodeError:
self.error.emit("Invalid UTF-8 data received in reply!")
return
self.success.emit(data)
|
import logging
from datadog import initialize, statsd
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
CONF_PREFIX,
EVENT_LOGBOOK_ENTRY,
EVENT_STATE_CHANGED,
STATE_UNKNOWN,
)
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_RATE = "rate"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 8125
DEFAULT_PREFIX = "hass"
DEFAULT_RATE = 1
DOMAIN = "datadog"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PREFIX, default=DEFAULT_PREFIX): cv.string,
vol.Optional(CONF_RATE, default=DEFAULT_RATE): vol.All(
vol.Coerce(int), vol.Range(min=1)
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Datadog component."""
conf = config[DOMAIN]
host = conf[CONF_HOST]
port = conf[CONF_PORT]
sample_rate = conf[CONF_RATE]
prefix = conf[CONF_PREFIX]
initialize(statsd_host=host, statsd_port=port)
def logbook_entry_listener(event):
"""Listen for logbook entries and send them as events."""
name = event.data.get("name")
message = event.data.get("message")
statsd.event(
title="Home Assistant",
text=f"%%% \n **{name}** {message} \n %%%",
tags=[
f"entity:{event.data.get('entity_id')}",
f"domain:{event.data.get('domain')}",
],
)
_LOGGER.debug("Sent event %s", event.data.get("entity_id"))
def state_changed_listener(event):
"""Listen for new messages on the bus and sends them to Datadog."""
state = event.data.get("new_state")
if state is None or state.state == STATE_UNKNOWN:
return
states = dict(state.attributes)
metric = f"{prefix}.{state.domain}"
tags = [f"entity:{state.entity_id}"]
for key, value in states.items():
if isinstance(value, (float, int)):
attribute = f"{metric}.{key.replace(' ', '_')}"
value = int(value) if isinstance(value, bool) else value
statsd.gauge(attribute, value, sample_rate=sample_rate, tags=tags)
_LOGGER.debug("Sent metric %s: %s (tags: %s)", attribute, value, tags)
try:
value = state_helper.state_as_number(state)
except ValueError:
_LOGGER.debug("Error sending %s: %s (tags: %s)", metric, state.state, tags)
return
statsd.gauge(metric, value, sample_rate=sample_rate, tags=tags)
_LOGGER.debug("Sent metric %s: %s (tags: %s)", metric, value, tags)
hass.bus.listen(EVENT_LOGBOOK_ENTRY, logbook_entry_listener)
hass.bus.listen(EVENT_STATE_CHANGED, state_changed_listener)
return True
|
import json
import logging
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import managed_memory_store
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import aws_network
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
REDIS_VERSION_MAPPING = {'redis_3_2': '3.2.10',
'redis_4_0': '4.0.10'}
class ElastiCacheRedis(managed_memory_store.BaseManagedMemoryStore):
"""Object representing a AWS Elasticache redis instance."""
CLOUD = aws.CLOUD
MEMORY_STORE = managed_memory_store.REDIS
def __init__(self, spec):
super(ElastiCacheRedis, self).__init__(spec)
self.subnet_group_name = 'subnet-%s' % self.name
self.version = REDIS_VERSION_MAPPING[spec.config.cloud_redis.redis_version]
self.node_type = FLAGS.cache_node_type
self.redis_region = FLAGS.redis_region
self.failover_zone = FLAGS.aws_elasticache_failover_zone
self.failover_subnet = None
self.failover_style = FLAGS.redis_failover_style
@staticmethod
def CheckPrerequisites(benchmark_config):
if (FLAGS.managed_memory_store_version and
FLAGS.managed_memory_store_version not in
managed_memory_store.REDIS_VERSIONS):
raise errors.Config.InvalidValue('Invalid Redis version.')
if FLAGS.redis_failover_style in [
managed_memory_store.Failover.FAILOVER_NONE,
managed_memory_store.Failover.FAILOVER_SAME_ZONE]:
if FLAGS.aws_elasticache_failover_zone:
raise errors.Config.InvalidValue(
'The aws_elasticache_failover_zone flag is ignored. '
'There is no need for a failover zone when there is no failover. '
'Same zone failover will fail over to the same zone.')
else:
if (not FLAGS.aws_elasticache_failover_zone or
FLAGS.aws_elasticache_failover_zone[:-1] != FLAGS.redis_region):
raise errors.Config.InvalidValue(
'Invalid failover zone. '
'A failover zone in %s must be specified. ' % FLAGS.redis_region)
def GetResourceMetadata(self):
"""Returns a dict containing metadata about the instance.
Returns:
dict mapping string property key to value.
"""
result = {
'cloud_redis_failover_style': self.failover_style,
'cloud_redis_version': self.version,
'cloud_redis_node_type': self.node_type,
'cloud_redis_region': self.redis_region,
'cloud_redis_primary_zone': self.spec.vms[0].zone,
'cloud_redis_failover_zone': self.failover_zone,
}
return result
def _CreateDependencies(self):
"""Create the subnet dependencies."""
subnet_id = self.spec.vms[0].network.subnet.id
cmd = ['aws', 'elasticache', 'create-cache-subnet-group',
'--region', self.redis_region,
'--cache-subnet-group-name', self.subnet_group_name,
'--cache-subnet-group-description', '"PKB redis benchmark subnet"',
'--subnet-ids', subnet_id]
if self.failover_style == (
managed_memory_store.Failover.FAILOVER_SAME_REGION):
regional_network = self.spec.vms[0].network.regional_network
vpc_id = regional_network.vpc.id
cidr = regional_network.vpc.NextSubnetCidrBlock()
self.failover_subnet = aws_network.AwsSubnet(
self.failover_zone, vpc_id, cidr_block=cidr)
self.failover_subnet.Create()
cmd += [self.failover_subnet.id]
vm_util.IssueCommand(cmd)
def _DeleteDependencies(self):
"""Delete the subnet dependencies."""
cmd = ['aws', 'elasticache', 'delete-cache-subnet-group',
'--region=%s' % self.redis_region,
'--cache-subnet-group-name=%s' % self.subnet_group_name]
vm_util.IssueCommand(cmd, raise_on_failure=False)
if self.failover_subnet:
self.failover_subnet.Delete()
def _Create(self):
"""Creates the cluster."""
cmd = ['aws', 'elasticache', 'create-replication-group',
'--engine', 'redis',
'--engine-version', self.version,
'--replication-group-id', self.name,
'--replication-group-description', self.name,
'--region', self.redis_region,
'--cache-node-type', self.node_type,
'--cache-subnet-group-name', self.subnet_group_name,
'--preferred-cache-cluster-a-zs', self.spec.vms[0].zone]
if self.failover_style == managed_memory_store.Failover.FAILOVER_SAME_REGION:
cmd += [self.failover_zone]
elif self.failover_style == managed_memory_store.Failover.FAILOVER_SAME_ZONE:
cmd += [self.spec.vms[0].zone]
if self.failover_style != managed_memory_store.Failover.FAILOVER_NONE:
cmd += ['--automatic-failover-enabled',
'--num-cache-clusters', '2']
cmd += ['--tags']
cmd += util.MakeFormattedDefaultTags()
_, stderr, _ = vm_util.IssueCommand(cmd, raise_on_failure=False)
if 'InsufficientCacheClusterCapacity' in stderr:
raise errors.Benchmarks.InsufficientCapacityCloudFailure(stderr)
def _Delete(self):
"""Deletes the cluster."""
cmd = ['aws', 'elasticache', 'delete-replication-group',
'--region', self.redis_region,
'--replication-group-id', self.name]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _IsDeleting(self):
"""Returns True if cluster is being deleted and false otherwise."""
cluster_info = self.DescribeInstance()
return cluster_info.get('Status', '') == 'deleting'
def _IsReady(self):
"""Returns True if cluster is ready and false otherwise."""
cluster_info = self.DescribeInstance()
return cluster_info.get('Status', '') == 'available'
def _Exists(self):
"""Returns true if the cluster exists and is not being deleted."""
cluster_info = self.DescribeInstance()
return ('Status' in cluster_info and
cluster_info['Status'] not in ['deleting', 'create-failed'])
def DescribeInstance(self):
"""Calls describe on cluster.
Returns:
dict mapping string cluster_info property key to value.
"""
cmd = ['aws', 'elasticache', 'describe-replication-groups',
'--region', self.redis_region,
'--replication-group-id', self.name]
stdout, stderr, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
logging.info('Could not find cluster %s, %s', self.name, stderr)
return {}
for cluster_info in json.loads(stdout)['ReplicationGroups']:
if cluster_info['ReplicationGroupId'] == self.name:
return cluster_info
return {}
@vm_util.Retry(max_retries=5)
def _PopulateEndpoint(self):
"""Populates address and port information from cluster_info.
Raises:
errors.Resource.RetryableGetError:
Failed to retrieve information on cluster
"""
cluster_info = self.DescribeInstance()
if not cluster_info:
raise errors.Resource.RetryableGetError(
'Failed to retrieve information on %s', self.name)
primary_endpoint = cluster_info['NodeGroups'][0]['PrimaryEndpoint']
self._ip = primary_endpoint['Address']
self._port = primary_endpoint['Port']
|
import pypck
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import CONF_ADDRESS
from . import LcnDevice
from .const import BINSENSOR_PORTS, CONF_CONNECTIONS, CONF_SOURCE, DATA_LCN, SETPOINTS
from .helpers import get_connection
async def async_setup_platform(
hass, hass_config, async_add_entities, discovery_info=None
):
"""Set up the LCN binary sensor platform."""
if discovery_info is None:
return
devices = []
for config in discovery_info:
address, connection_id = config[CONF_ADDRESS]
addr = pypck.lcn_addr.LcnAddr(*address)
connections = hass.data[DATA_LCN][CONF_CONNECTIONS]
connection = get_connection(connections, connection_id)
address_connection = connection.get_address_conn(addr)
if config[CONF_SOURCE] in SETPOINTS:
device = LcnRegulatorLockSensor(config, address_connection)
elif config[CONF_SOURCE] in BINSENSOR_PORTS:
device = LcnBinarySensor(config, address_connection)
else: # in KEYS
device = LcnLockKeysSensor(config, address_connection)
devices.append(device)
async_add_entities(devices)
class LcnRegulatorLockSensor(LcnDevice, BinarySensorEntity):
"""Representation of a LCN binary sensor for regulator locks."""
def __init__(self, config, address_connection):
"""Initialize the LCN binary sensor."""
super().__init__(config, address_connection)
self.setpoint_variable = pypck.lcn_defs.Var[config[CONF_SOURCE]]
self._value = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(
self.setpoint_variable
)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._value
def input_received(self, input_obj):
"""Set sensor value when LCN input object (command) is received."""
if (
not isinstance(input_obj, pypck.inputs.ModStatusVar)
or input_obj.get_var() != self.setpoint_variable
):
return
self._value = input_obj.get_value().is_locked_regulator()
self.async_write_ha_state()
class LcnBinarySensor(LcnDevice, BinarySensorEntity):
"""Representation of a LCN binary sensor for binary sensor ports."""
def __init__(self, config, address_connection):
"""Initialize the LCN binary sensor."""
super().__init__(config, address_connection)
self.bin_sensor_port = pypck.lcn_defs.BinSensorPort[config[CONF_SOURCE]]
self._value = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(
self.bin_sensor_port
)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._value
def input_received(self, input_obj):
"""Set sensor value when LCN input object (command) is received."""
if not isinstance(input_obj, pypck.inputs.ModStatusBinSensors):
return
self._value = input_obj.get_state(self.bin_sensor_port.value)
self.async_write_ha_state()
class LcnLockKeysSensor(LcnDevice, BinarySensorEntity):
"""Representation of a LCN sensor for key locks."""
def __init__(self, config, address_connection):
"""Initialize the LCN sensor."""
super().__init__(config, address_connection)
self.source = pypck.lcn_defs.Key[config[CONF_SOURCE]]
self._value = None
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
await super().async_added_to_hass()
await self.address_connection.activate_status_request_handler(self.source)
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._value
def input_received(self, input_obj):
"""Set sensor value when LCN input object (command) is received."""
if (
not isinstance(input_obj, pypck.inputs.ModStatusKeyLocks)
or self.source not in pypck.lcn_defs.Key
):
return
table_id = ord(self.source.name[0]) - 65
key_id = int(self.source.name[1]) - 1
self._value = input_obj.get_state(table_id, key_id)
self.async_write_ha_state()
|
from homeassistant.helpers.entity import Entity
from . import DOMAIN, SENSORS
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Nextcloud sensors."""
if discovery_info is None:
return
sensors = []
for name in hass.data[DOMAIN]:
if name in SENSORS:
sensors.append(NextcloudSensor(name))
add_entities(sensors, True)
class NextcloudSensor(Entity):
"""Represents a Nextcloud sensor."""
def __init__(self, item):
"""Initialize the Nextcloud sensor."""
self._name = item
self._state = None
@property
def icon(self):
"""Return the icon for this sensor."""
return "mdi:cloud"
@property
def name(self):
"""Return the name for this sensor."""
return self._name
@property
def state(self):
"""Return the state for this sensor."""
return self._state
@property
def unique_id(self):
"""Return the unique ID for this sensor."""
return f"{self.hass.data[DOMAIN]['instance']}#{self._name}"
def update(self):
"""Update the sensor."""
self._state = self.hass.data[DOMAIN][self._name]
|
import sys
from distutils.version import LooseVersion
from os import path
import pandas as pd
import pytest
from bson.binary import Binary
from bson.objectid import ObjectId
from mock import create_autospec, sentinel, Mock, call
from six.moves import cPickle
from arctic._compression import compress, compressHC
from arctic.exceptions import UnsupportedPickleStoreVersion
from arctic.store._pickle_store import PickleStore
from arctic.store._version_store_utils import checksum
PANDAS_VERSION = LooseVersion(pd.__version__)
def test_write():
self = create_autospec(PickleStore)
version = {}
PickleStore.write(self, sentinel.arctic_lib, version, sentinel.symbol, 'item', sentinel.previous_version)
assert version['data'] == 'item'
def test_write_object():
arctic_lib = Mock()
self = create_autospec(PickleStore)
version = {'_id': ObjectId()}
PickleStore.write(self, arctic_lib, version, 'sentinel.symbol', sentinel.item, sentinel.previous_version)
assert 'data' not in version
assert version['blob'] == '__chunked__V2'
coll = arctic_lib.get_top_level_collection.return_value
assert coll.update_one.call_args_list == [call({'sha': checksum('sentinel.symbol', {'segment': 0, 'data': Binary(compress(cPickle.dumps(sentinel.item, cPickle.HIGHEST_PROTOCOL)))}),
'symbol': 'sentinel.symbol'},
{'$set': {'segment': 0, 'data': Binary(compress(cPickle.dumps(sentinel.item, cPickle.HIGHEST_PROTOCOL)), 0)},
'$addToSet': {'parent': version['_id']}}, upsert=True)]
def test_read():
self = create_autospec(PickleStore)
version = {'data': 'item'}
assert PickleStore.read(self, sentinel.arctic_lib, version, sentinel.symbol) == 'item'
def test_read_object_backwards_compat():
self = create_autospec(PickleStore)
version = {'blob': Binary(compressHC(cPickle.dumps(object)))}
assert PickleStore.read(self, sentinel.arctic_lib, version, sentinel.symbol) == object
def test_read_object_2():
self = create_autospec(PickleStore)
version = {'_id': sentinel._id,
'blob': '__chunked__'}
coll = Mock()
arctic_lib = Mock()
coll.find.return_value = [{'data': Binary(compressHC(cPickle.dumps(object))),
'symbol': 'sentinel.symbol',
'segment': 1}
]
arctic_lib.get_top_level_collection.return_value = coll
assert PickleStore.read(self, arctic_lib, version, sentinel.symbol) == object
assert coll.find.call_args_list == [call({'symbol': sentinel.symbol, 'parent': sentinel._id})]
def test_read_with_base_version_id():
self = create_autospec(PickleStore)
version = {'_id': sentinel._id,
'base_version_id': sentinel.base_version_id,
'blob': '__chunked__'}
coll = Mock()
arctic_lib = Mock()
coll.find.return_value = [{'data': Binary(compressHC(cPickle.dumps(object))),
'symbol': 'sentinel.symbol',
'segment': 1}
]
arctic_lib.get_top_level_collection.return_value = coll
assert PickleStore.read(self, arctic_lib, version, sentinel.symbol) == object
assert coll.find.call_args_list == [call({'symbol': sentinel.symbol, 'parent': sentinel.base_version_id})]
@pytest.mark.xfail(sys.version_info >= (3,),
reason="lz4 data written with python2 not compatible with python3")
def test_read_backward_compatibility():
"""Test backwards compatibility with a pickled file that's created with Python 2.7.3,
Numpy 1.7.1_ahl2 and Pandas 0.14.1
"""
fname = path.join(path.dirname(__file__), "data", "test-data.pkl")
# For newer versions; verify that unpickling fails when using cPickle
if PANDAS_VERSION >= LooseVersion("0.16.1"):
if sys.version_info[0] >= 3:
with pytest.raises(UnicodeDecodeError), open(fname) as fh:
cPickle.load(fh)
else:
with pytest.raises(TypeError), open(fname) as fh:
cPickle.load(fh)
# Verify that PickleStore() uses a backwards compatible unpickler.
store = PickleStore()
with open(fname) as fh:
# PickleStore compresses data with lz4
version = {'blob': compressHC(fh.read())}
df = store.read(sentinel.arctic_lib, version, sentinel.symbol)
expected = pd.DataFrame(range(4), pd.date_range(start="20150101", periods=4))
assert (df == expected).all().all()
def test_unpickle_highest_protocol():
"""Pandas version 0.14.1 fails to unpickle a pandas.Series() in compat mode if the
container has been pickled with HIGHEST_PROTOCOL.
"""
version = {
'blob': compressHC(cPickle.dumps(pd.Series(), protocol=cPickle.HIGHEST_PROTOCOL)),
}
store = PickleStore()
ps = store.read(sentinel.arctic_lib, version, sentinel.symbol)
expected = pd.Series()
assert (ps == expected).all()
def test_pickle_chunk_V1_read():
data = {'foo': b'abcdefghijklmnopqrstuvwxyz'}
version = {'_id': sentinel._id,
'blob': '__chunked__'}
coll = Mock()
arctic_lib = Mock()
datap = compressHC(cPickle.dumps(data, protocol=cPickle.HIGHEST_PROTOCOL))
data_1 = datap[0:5]
data_2 = datap[5:]
coll.find.return_value = [{'data': Binary(data_1),
'symbol': 'sentinel.symbol',
'segment': 0},
{'data': Binary(data_2),
'symbol': 'sentinel.symbol',
'segment': 1},
]
arctic_lib.get_top_level_collection.return_value = coll
ps = PickleStore()
assert(data == ps.read(arctic_lib, version, sentinel.symbol))
def test_pickle_store_future_version():
data = {'foo': b'abcdefghijklmnopqrstuvwxyz'}
version = {'_id': sentinel._id,
'blob': '__chunked__VERSION_ONE_MILLION'}
coll = Mock()
arctic_lib = Mock()
datap = compressHC(cPickle.dumps(data, protocol=cPickle.HIGHEST_PROTOCOL))
data_1 = datap[0:5]
data_2 = datap[5:]
coll.find.return_value = [{'data': Binary(data_1),
'symbol': 'sentinel.symbol',
'segment': 0},
{'data': Binary(data_2),
'symbol': 'sentinel.symbol',
'segment': 1},
]
arctic_lib.get_top_level_collection.return_value = coll
ps = PickleStore()
with pytest.raises(UnsupportedPickleStoreVersion) as e:
ps.read(arctic_lib, version, sentinel.symbol)
assert('unsupported version of pickle store' in str(e.value))
|
from math import cos, pi
from pygal._compat import is_str
from pygal.adapters import none_to_zero, positive
from pygal.graph.line import Line
from pygal.util import cached_property, compute_scale, cut, deg, truncate
from pygal.view import PolarLogView, PolarView
class Radar(Line):
"""Rada graph class"""
_adapters = [positive, none_to_zero]
def __init__(self, *args, **kwargs):
"""Init custom vars"""
self._rmax = None
super(Radar, self).__init__(*args, **kwargs)
def _fill(self, values):
"""Add extra values to fill the line"""
return values
@cached_property
def _values(self):
"""Getter for series values (flattened)"""
if self.interpolate:
return [
val[0] for serie in self.series for val in serie.interpolated
]
else:
return super(Line, self)._values
def _set_view(self):
"""Assign a view to current graph"""
if self.logarithmic:
view_class = PolarLogView
else:
view_class = PolarView
self.view = view_class(
self.width - self.margin_box.x, self.height - self.margin_box.y,
self._box
)
def _x_axis(self, draw_axes=True):
"""Override x axis to make it polar"""
if not self._x_labels or not self.show_x_labels:
return
axis = self.svg.node(
self.nodes['plot'],
class_="axis x web%s" %
(' always_show' if self.show_x_guides else '')
)
format_ = lambda x: '%f %f' % x
center = self.view((0, 0))
r = self._rmax
# Can't simply determine truncation
truncation = self.truncate_label or 25
for label, theta in self._x_labels:
major = label in self._x_labels_major
if not (self.show_minor_x_labels or major):
continue
guides = self.svg.node(axis, class_='guides')
end = self.view((r, theta))
self.svg.node(
guides,
'path',
d='M%s L%s' % (format_(center), format_(end)),
class_='%s%sline' %
('axis ' if label == "0" else '', 'major ' if major else '')
)
r_txt = (1 - self._box.__class__.margin) * self._box.ymax
pos_text = self.view((r_txt, theta))
text = self.svg.node(
guides,
'text',
x=pos_text[0],
y=pos_text[1],
class_='major' if major else ''
)
text.text = truncate(label, truncation)
if text.text != label:
self.svg.node(guides, 'title').text = label
else:
self.svg.node(
guides,
'title',
).text = self._x_format(theta)
angle = -theta + pi / 2
if cos(angle) < 0:
angle -= pi
text.attrib['transform'] = 'rotate(%f %s)' % (
self.x_label_rotation or deg(angle), format_(pos_text)
)
def _y_axis(self, draw_axes=True):
"""Override y axis to make it polar"""
if not self._y_labels or not self.show_y_labels:
return
axis = self.svg.node(self.nodes['plot'], class_="axis y web")
for label, r in reversed(self._y_labels):
major = r in self._y_labels_major
if not (self.show_minor_y_labels or major):
continue
guides = self.svg.node(
axis,
class_='%sguides' %
('logarithmic ' if self.logarithmic else '')
)
if self.show_y_guides:
self.svg.line(
guides, [self.view((r, theta)) for theta in self._x_pos],
close=True,
class_='%sguide line' % ('major ' if major else '')
)
x, y = self.view((r, self._x_pos[0]))
x -= 5
text = self.svg.node(
guides, 'text', x=x, y=y, class_='major' if major else ''
)
text.text = label
if self.y_label_rotation:
text.attrib[
'transform'
] = "rotate(%d %f %f)" % (self.y_label_rotation, x, y)
self.svg.node(
guides,
'title',
).text = self._y_format(r)
def _compute(self):
"""Compute r min max and labels position"""
delta = 2 * pi / self._len if self._len else 0
self._x_pos = [.5 * pi + i * delta for i in range(self._len + 1)]
for serie in self.all_series:
serie.points = [(v, self._x_pos[i])
for i, v in enumerate(serie.values)]
if self.interpolate:
extended_x_pos = ([.5 * pi - delta] + self._x_pos)
extended_vals = (serie.values[-1:] + serie.values)
serie.interpolated = list(
map(
tuple,
map(
reversed,
self._interpolate(extended_x_pos, extended_vals)
)
)
)
# x labels space
self._box.margin *= 2
self._rmin = self.zero
self._rmax = self._max or 1
self._box.set_polar_box(self._rmin, self._rmax)
self._self_close = True
def _compute_y_labels(self):
y_pos = compute_scale(
self._rmin, self._rmax, self.logarithmic, self.order_min,
self.min_scale, self.max_scale / 2
)
if self.y_labels:
self._y_labels = []
for i, y_label in enumerate(self.y_labels):
if isinstance(y_label, dict):
pos = self._adapt(y_label.get('value'))
title = y_label.get('label', self._y_format(pos))
elif is_str(y_label):
pos = self._adapt(y_pos[i])
title = y_label
else:
pos = self._adapt(y_label)
title = self._y_format(pos)
self._y_labels.append((title, pos))
self._rmin = min(self._rmin, min(cut(self._y_labels, 1)))
self._rmax = max(self._rmax, max(cut(self._y_labels, 1)))
self._box.set_polar_box(self._rmin, self._rmax)
else:
self._y_labels = list(zip(map(self._y_format, y_pos), y_pos))
|
import pytest
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
@pytest.fixture(autouse=True)
def mock_ssdp():
"""Mock ssdp."""
with patch("homeassistant.components.ssdp.Scanner.async_scan"):
yield
@pytest.fixture(autouse=True)
def mock_updater():
"""Mock updater."""
with patch("homeassistant.components.updater.get_newest_version"):
yield
@pytest.fixture(autouse=True)
def recorder_url_mock():
"""Mock recorder url."""
with patch("homeassistant.components.recorder.DEFAULT_URL", "sqlite://"):
yield
async def test_setup(hass, mock_zeroconf):
"""Test setup."""
assert await async_setup_component(hass, "default_config", {"foo": "bar"})
|
import numpy as np
from keras import backend as K
from matchzoo import losses
def test_hinge_loss():
true_value = K.variable(np.array([[1.2], [1],
[1], [1]]))
pred_value = K.variable(np.array([[1.2], [0.1],
[0], [-0.3]]))
expected_loss = (0 + 1 - 0.3 + 0) / 2.0
loss = K.eval(losses.RankHingeLoss()(true_value, pred_value))
assert np.isclose(expected_loss, loss)
expected_loss = (2 + 0.1 - 1.2 + 2 - 0.3 + 0) / 2.0
loss = K.eval(losses.RankHingeLoss(margin=2)(true_value, pred_value))
assert np.isclose(expected_loss, loss)
true_value = K.variable(np.array([[1.2], [1], [0.8],
[1], [1], [0.8]]))
pred_value = K.variable(np.array([[1.2], [0.1], [-0.5],
[0], [0], [-0.3]]))
expected_loss = (0 + 1 - 0.15) / 2.0
loss = K.eval(losses.RankHingeLoss(num_neg=2, margin=1)(
true_value, pred_value))
assert np.isclose(expected_loss, loss)
def test_rank_crossentropy_loss():
losses.neg_num = 1
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
true_value = K.variable(np.array([[1.], [0.],
[0.], [1.]]))
pred_value = K.variable(np.array([[0.8], [0.1],
[0.8], [0.1]]))
expected_loss = (-np.log(softmax([0.8, 0.1])[0]) - np.log(
softmax([0.8, 0.1])[1])) / 2
loss = K.eval(losses.RankCrossEntropyLoss()(true_value, pred_value))
assert np.isclose(expected_loss, loss)
true_value = K.variable(np.array([[1.], [0.], [0.],
[0.], [1.], [0.]]))
pred_value = K.variable(np.array([[0.8], [0.1], [0.1],
[0.8], [0.1], [0.1]]))
expected_loss = (-np.log(softmax([0.8, 0.1, 0.1])[0]) - np.log(
softmax([0.8, 0.1, 0.1])[1])) / 2
loss = K.eval(losses.RankCrossEntropyLoss(num_neg=2)(
true_value, pred_value))
assert np.isclose(expected_loss, loss)
|
from PyQt5.QtCore import QUrl, pyqtSlot
from PyQt5.QtNetwork import QNetworkProxy, QNetworkProxyFactory
from qutebrowser.config import config, configtypes
from qutebrowser.utils import message, usertypes, urlutils, utils
from qutebrowser.misc import objects
from qutebrowser.browser.network import pac
application_factory = None
def init():
"""Set the application wide proxy factory."""
global application_factory
application_factory = ProxyFactory()
QNetworkProxyFactory.setApplicationProxyFactory(application_factory)
config.instance.changed.connect(_warn_for_pac)
_warn_for_pac()
@config.change_filter('content.proxy', function=True)
def _warn_for_pac():
"""Show a warning if PAC is used with QtWebEngine."""
proxy = config.val.content.proxy
if (isinstance(proxy, pac.PACFetcher) and
objects.backend == usertypes.Backend.QtWebEngine):
message.error("PAC support isn't implemented for QtWebEngine yet!")
@pyqtSlot()
def shutdown():
QNetworkProxyFactory.setApplicationProxyFactory(
None) # type: ignore[arg-type]
class ProxyFactory(QNetworkProxyFactory):
"""Factory for proxies to be used by qutebrowser."""
def get_error(self):
"""Check if proxy can't be resolved.
Return:
None if proxy is correct, otherwise an error message.
"""
proxy = config.val.content.proxy
if isinstance(proxy, pac.PACFetcher):
return proxy.fetch_error()
else:
return None
def _set_capabilities(self, proxy):
if proxy.type() == QNetworkProxy.NoProxy:
return
capabilities = proxy.capabilities()
lookup_cap = QNetworkProxy.HostNameLookupCapability
if config.val.content.proxy_dns_requests:
capabilities |= lookup_cap
else:
capabilities &= ~lookup_cap
proxy.setCapabilities(capabilities)
def queryProxy(self, query):
"""Get the QNetworkProxies for a query.
Args:
query: The QNetworkProxyQuery to get a proxy for.
Return:
A list of QNetworkProxy objects in order of preference.
"""
proxy = config.val.content.proxy
if proxy is configtypes.SYSTEM_PROXY:
# On Linux, use "export http_proxy=socks5://host:port" to manually
# set system proxy.
# ref. http://doc.qt.io/qt-5/qnetworkproxyfactory.html#systemProxyForQuery
proxies = QNetworkProxyFactory.systemProxyForQuery(query)
elif isinstance(proxy, pac.PACFetcher):
if objects.backend == usertypes.Backend.QtWebEngine:
# Looks like query.url() is always invalid on QtWebEngine...
proxy = urlutils.proxy_from_url(QUrl('direct://'))
assert not isinstance(proxy, pac.PACFetcher)
proxies = [proxy]
elif objects.backend == usertypes.Backend.QtWebKit:
proxies = proxy.resolve(query)
else:
raise utils.Unreachable(objects.backend)
else:
proxies = [proxy]
for proxy in proxies:
self._set_capabilities(proxy)
return proxies
|
import itertools
import pytest
from arctic.exceptions import ArcticSerializationException
from arctic.serialization.incremental import IncrementalPandasToRecArraySerializer
from arctic.serialization.numpy_records import DataFrameSerializer
from tests.unit.serialization.serialization_test_data import _mixed_test_data, is_test_data_serializable
_CHUNK_SIZE = 2 * 1024 * 1024 - 2048
NON_HOMOGENEOUS_DTYPE_PATCH_SIZE_ROWS = 50
_TEST_DATA = None
df_serializer = DataFrameSerializer()
def test_incremental_bad_init():
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, 'hello world', chunk_size=_CHUNK_SIZE)
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, 1234, chunk_size=_CHUNK_SIZE)
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, _mixed_test_data()['small'][0], chunk_size=0)
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, _mixed_test_data()['small'][0], chunk_size=-1)
with pytest.raises(ArcticSerializationException):
IncrementalPandasToRecArraySerializer(df_serializer, _mixed_test_data()['small'][0], chunk_size=_CHUNK_SIZE, string_max_len=-1)
def test_none_df():
with pytest.raises(ArcticSerializationException):
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, None, chunk_size=_CHUNK_SIZE)
incr_ser.serialize()
with pytest.raises(ArcticSerializationException):
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, None, chunk_size=_CHUNK_SIZE)
incr_ser.generator_bytes()
@pytest.mark.parametrize("input_df_descr", _mixed_test_data().keys())
def test_serialize_pandas_to_recarray(input_df_descr):
if not is_test_data_serializable(input_df_descr):
return
df = _mixed_test_data()[input_df_descr][0]
expectation = _mixed_test_data()[input_df_descr][1]
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=_CHUNK_SIZE)
if not isinstance(expectation, tuple) and issubclass(expectation, Exception):
with pytest.raises(expectation):
[chunk for chunk, _, _, _ in incr_ser.generator_bytes()]
else:
incr_ser_data, incr_ser_dtype = incr_ser.serialize()
matching = expectation[0].tostring() == incr_ser_data.tostring()
assert matching
assert expectation[1] == incr_ser_dtype
@pytest.mark.parametrize("input_df_descr", _mixed_test_data().keys())
def test_serialize_incremental_pandas_to_recarray(input_df_descr):
if not is_test_data_serializable(input_df_descr):
return
df = _mixed_test_data()[input_df_descr][0]
expectation = _mixed_test_data()[input_df_descr][1]
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=_CHUNK_SIZE)
if not isinstance(expectation, tuple) and issubclass(expectation, Exception):
with pytest.raises(expectation):
[chunk for chunk, _, _, _ in incr_ser.generator_bytes()]
else:
chunk_bytes = [chunk_b for chunk_b, _, _, _ in incr_ser.generator_bytes()]
matching = expectation[0].tostring() == b''.join(chunk_bytes)
assert matching
assert expectation[1] == incr_ser.dtype
@pytest.mark.parametrize("input_df_descr", _mixed_test_data().keys())
def test_serialize_incremental_chunk_size_pandas_to_recarray(input_df_descr):
if not is_test_data_serializable(input_df_descr):
return
df = _mixed_test_data()[input_df_descr][0]
expectation = _mixed_test_data()[input_df_descr][1]
if not isinstance(expectation, tuple) and issubclass(expectation, Exception):
for div in (1, 4, 8):
chunk_size = div * 8 * 1024 ** 2
with pytest.raises(expectation):
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=chunk_size)
[chunk for chunk, _, _, _ in incr_ser.generator_bytes()]
return
for div in (1, 4, 8):
chunk_size = div * 8 * 1024 ** 2
if input_df_descr is not None and len(expectation) > 0:
row_size = int(expectation[0].dtype.itemsize)
chunk_size = NON_HOMOGENEOUS_DTYPE_PATCH_SIZE_ROWS * row_size / div
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=chunk_size)
chunk_bytes = [chunk for chunk, _, _, _ in incr_ser.generator_bytes()]
matching = expectation[0].tostring() == b''.join(chunk_bytes)
assert matching
assert expectation[1] == incr_ser.dtype
@pytest.mark.parametrize("input_df_descr", _mixed_test_data().keys())
def test_shape(input_df_descr):
if not is_test_data_serializable(input_df_descr):
return
df = _mixed_test_data()[input_df_descr][0]
expectation = _mixed_test_data()[input_df_descr][1]
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=_CHUNK_SIZE)
if not isinstance(expectation, tuple) and issubclass(expectation, Exception):
with pytest.raises(expectation):
[chunk for chunk, _, _, _ in incr_ser.shape]
else:
assert incr_ser.shape == expectation[0].shape
@pytest.mark.parametrize("from_idx, to_idx",
[(x, y) for (x, y) in itertools.product(range(-10, len(_mixed_test_data()['large'][0])+100, 500),
range(-10, len(_mixed_test_data()['large'][0])+100, 500))
if x <= y]
)
def test_generator_bytes_range(from_idx, to_idx):
# Tests also negative indexing
df = _mixed_test_data()['large'][0]
expectation = _mixed_test_data()['large'][1]
incr_ser = IncrementalPandasToRecArraySerializer(df_serializer, df, chunk_size=_CHUNK_SIZE)
chunk_bytes = [chunk_b for chunk_b, _, _, _ in incr_ser.generator_bytes(from_idx=from_idx, to_idx=to_idx)]
matching = expectation[0][from_idx:to_idx].tostring() == b''.join(chunk_bytes)
assert matching
assert expectation[1] == incr_ser.dtype
|
import logging
from typing import List, Optional
from bond_api import Action, Bond
_LOGGER = logging.getLogger(__name__)
class BondDevice:
"""Helper device class to hold ID and attributes together."""
def __init__(self, device_id: str, attrs: dict, props: dict):
"""Create a helper device from ID and attributes returned by API."""
self.device_id = device_id
self.props = props
self._attrs = attrs
def __repr__(self):
"""Return readable representation of a bond device."""
return {
"device_id": self.device_id,
"props": self.props,
"attrs": self._attrs,
}.__repr__()
@property
def name(self) -> str:
"""Get the name of this device."""
return self._attrs["name"]
@property
def type(self) -> str:
"""Get the type of this device."""
return self._attrs["type"]
@property
def trust_state(self) -> bool:
"""Check if Trust State is turned on."""
return self.props.get("trust_state", False)
def supports_speed(self) -> bool:
"""Return True if this device supports any of the speed related commands."""
actions: List[str] = self._attrs["actions"]
return bool([action for action in actions if action in [Action.SET_SPEED]])
def supports_direction(self) -> bool:
"""Return True if this device supports any of the direction related commands."""
actions: List[str] = self._attrs["actions"]
return bool([action for action in actions if action in [Action.SET_DIRECTION]])
def supports_light(self) -> bool:
"""Return True if this device supports any of the light related commands."""
actions: List[str] = self._attrs["actions"]
return bool(
[
action
for action in actions
if action in [Action.TURN_LIGHT_ON, Action.TURN_LIGHT_OFF]
]
)
def supports_set_brightness(self) -> bool:
"""Return True if this device supports setting a light brightness."""
actions: List[str] = self._attrs["actions"]
return bool([action for action in actions if action in [Action.SET_BRIGHTNESS]])
class BondHub:
"""Hub device representing Bond Bridge."""
def __init__(self, bond: Bond):
"""Initialize Bond Hub."""
self.bond: Bond = bond
self._version: Optional[dict] = None
self._devices: Optional[List[BondDevice]] = None
async def setup(self):
"""Read hub version information."""
self._version = await self.bond.version()
_LOGGER.debug("Bond reported the following version info: %s", self._version)
# Fetch all available devices using Bond API.
device_ids = await self.bond.devices()
self._devices = [
BondDevice(
device_id,
await self.bond.device(device_id),
await self.bond.device_properties(device_id),
)
for device_id in device_ids
]
_LOGGER.debug("Discovered Bond devices: %s", self._devices)
@property
def bond_id(self) -> str:
"""Return unique Bond ID for this hub."""
return self._version["bondid"]
@property
def target(self) -> str:
"""Return this hub model."""
return self._version.get("target")
@property
def fw_ver(self) -> str:
"""Return this hub firmware version."""
return self._version.get("fw_ver")
@property
def devices(self) -> List[BondDevice]:
"""Return a list of all devices controlled by this hub."""
return self._devices
@property
def is_bridge(self) -> bool:
"""Return if the Bond is a Bond Bridge."""
# If False, it means that it is a Smart by Bond product. Assumes that it is if the model is not available.
return self._version.get("model", "BD-").startswith("BD-")
|
from django.contrib.sites.models import Site
from django.db import models
from django.utils import timezone
from zinnia.settings import SEARCH_FIELDS
DRAFT = 0
HIDDEN = 1
PUBLISHED = 2
def tags_published():
"""
Return the published tags.
"""
from tagging.models import Tag
from zinnia.models.entry import Entry
tags_entry_published = Tag.objects.usage_for_queryset(
Entry.published.all())
# Need to do that until the issue #44 of django-tagging is fixed
return Tag.objects.filter(name__in=[t.name for t in tags_entry_published])
def entries_published(queryset):
"""
Return only the entries published.
"""
now = timezone.now()
return queryset.filter(
models.Q(start_publication__lte=now) |
models.Q(start_publication=None),
models.Q(end_publication__gt=now) |
models.Q(end_publication=None),
status=PUBLISHED, sites=Site.objects.get_current())
class EntryPublishedManager(models.Manager):
"""
Manager to retrieve published entries.
"""
def get_queryset(self):
"""
Return published entries.
"""
return entries_published(
super(EntryPublishedManager, self).get_queryset())
def on_site(self):
"""
Return entries published on current site.
"""
return super(EntryPublishedManager, self).get_queryset().filter(
sites=Site.objects.get_current())
def search(self, pattern):
"""
Top level search method on entries.
"""
try:
return self.advanced_search(pattern)
except Exception:
return self.basic_search(pattern)
def advanced_search(self, pattern):
"""
Advanced search on entries.
"""
from zinnia.search import advanced_search
return advanced_search(pattern)
def basic_search(self, pattern):
"""
Basic search on entries.
"""
lookup = None
for pattern in pattern.split():
query_part = models.Q()
for field in SEARCH_FIELDS:
query_part |= models.Q(**{'%s__icontains' % field: pattern})
if lookup is None:
lookup = query_part
else:
lookup |= query_part
return self.get_queryset().filter(lookup)
class EntryRelatedPublishedManager(models.Manager):
"""
Manager to retrieve objects associated with published entries.
"""
def get_queryset(self):
"""
Return a queryset containing published entries.
"""
now = timezone.now()
return super(
EntryRelatedPublishedManager, self).get_queryset().filter(
models.Q(entries__start_publication__lte=now) |
models.Q(entries__start_publication=None),
models.Q(entries__end_publication__gt=now) |
models.Q(entries__end_publication=None),
entries__status=PUBLISHED,
entries__sites=Site.objects.get_current()
).distinct()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from six.moves import range
from six.moves import zip
import tensorflow as tf
def verify_compatible_shapes(img1, img2):
"""Checks if two image tensors are compatible for metric computation.
This function checks if two sets of images have ranks at least 3, and if the
last three dimensions match.
Args:
img1: The first images tensor.
img2: The second images tensor.
Returns:
A tuple of the first tensor shape, the second tensor shape, and a list of
tf.Assert() implementing the checks.
Raises:
ValueError: when static shape check fails.
"""
shape1 = img1.get_shape().with_rank_at_least(3)
shape2 = img2.get_shape().with_rank_at_least(3)
shape1[-3:].assert_is_compatible_with(shape2[-3:])
if shape1.ndims is not None and shape2.ndims is not None:
for dim1, dim2 in zip(reversed(shape1[:-3]), reversed(shape2[:-3])):
if not (dim1 == 1 or dim2 == 1 or dim1.is_compatible_with(dim2)):
raise ValueError(
'Two images are not compatible: %s and %s' % (shape1, shape2))
# Now assign shape tensors.
shape1, shape2 = tf.shape_n([img1, img2])
checks = []
checks.append(tf.Assert(tf.greater_equal(tf.size(shape1), 3),
[shape1, shape2], summarize=10))
checks.append(tf.Assert(tf.reduce_all(tf.equal(shape1[-3:], shape2[-3:])),
[shape1, shape2], summarize=10))
return shape1, shape2, checks
_SSIM_K1 = 0.01
_SSIM_K2 = 0.03
def _ssim_helper(x, y, reducer, max_val, compensation=1.0):
r"""Helper function to SSIM.
SSIM estimates covariances with weighted sums, e.g., normalized Gaussian blur.
Like the unbiased covariance estimator has normalization factor of n-1 instead
of n, naive covariance estimations with weighted sums are biased estimators.
Suppose `reducer` is a weighted sum, then the mean estimators are
mu_x = \sum_i w_i x_i,
mu_y = \sum_i w_i y_i,
where w_i's are the weighted-sum weights, and covariance estimator is
cov_xy = \sum_i w_i (x_i - mu_x) (y_i - mu_y)
with assumption \sum_i w_i = 1. This covariance estimator is biased, since
E cov_xy = (1 - \sum_i w_i ** 2) Cov(X, Y).
For SSIM measure with unbiased covariance estimators, pass as `compensation`
argument (1 - \sum_i w_i ** 2).
Arguments:
x: first set of images.
y: first set of images.
reducer: Function that computes 'local' averages from set of images.
For non-covolutional version, this is usually tf.reduce_mean(x, [1, 2]),
and for convolutional version, this is usually tf.nn.avg_pool or
tf.nn.conv2d with weighted-sum kernel.
max_val: The dynamic range (i.e., the difference between the maximum
possible allowed value and the minimum allowed value).
compensation: Compensation factor. See above.
Returns:
A pair containing the luminance measure and the contrast-structure measure.
"""
c1 = (_SSIM_K1 * max_val) ** 2
c2 = (_SSIM_K2 * max_val) ** 2
# SSIM luminance measure is
# (2 * mu_x * mu_y + c1) / (mu_x ** 2 + mu_y ** 2 + c1).
mean0 = reducer(x)
mean1 = reducer(y)
num0 = mean0 * mean1 * 2.0
den0 = tf.square(mean0) + tf.square(mean1)
luminance = (num0 + c1) / (den0 + c1)
# SSIM contrast-structure measure is
# (2 * cov_xy + c2) / (cov_xx + cov_yy + c2).
# Note that `reducer` is a weighted sum with weight w_k, \sum_i w_i = 1, then
# cov_xy = \sum_i w_i (x_i - mu_x) (y_i - mu_y)
# = \sum_i w_i x_i y_i - (\sum_i w_i x_i) (\sum_j w_j y_j).
num1 = reducer(x * y) * 2.0
den1 = reducer(tf.square(x) + tf.square(y))
c2 *= compensation
cs = (num1 - num0 + c2) / (den1 - den0 + c2)
# SSIM score is the product of the luminance and contrast-structure measures.
return luminance, cs
def f_special_gauss(size, sigma):
"""Function to mimic the 'fspecial' gaussian MATLAB function."""
size = tf.convert_to_tensor(size, tf.int32)
sigma = tf.convert_to_tensor(sigma)
coords = tf.cast(tf.range(size), sigma.dtype)
coords -= tf.cast(size - 1, sigma.dtype) / 2.0
g = tf.square(coords)
g *= -0.5 / tf.square(sigma)
g = tf.reshape(g, shape=[1, -1]) + tf.reshape(g, shape=[-1, 1])
g = tf.reshape(g, shape=[1, -1]) # For tf.nn.softmax().
g = tf.nn.softmax(g)
return tf.reshape(g, shape=[size, size, 1, 1])
def _ssim_index_per_channel(
img1, img2, filter_size, filter_width, max_val=255.0):
"""Computes SSIM index between img1 and img2 per color channel.
This function matches the standard SSIM implementation found at:
https://ece.uwaterloo.ca/~z70wang/research/ssim/ssim_index.m
Details:
- To reproduce a 11x11 Gaussian filter of width 1.5 is used.
- k1 = 0.01, k2 = 0.03 as in the original paper.
Args:
img1: First RGB image batch.
img2: Second RGB image batch.
filter_size: An integer, the filter size of the Gaussian kernel used.
filter_width: A float, the filter width of the Gaussian kernel used.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
Returns:
A pair of tensors containing batch-wise and channel-wise SSIM and
contrast-structure measure. The shape is [..., channels].
"""
filter_size = tf.constant(filter_size, dtype=tf.int32)
filter_sigma = tf.constant(filter_width, dtype=img1.dtype)
shape1, shape2 = tf.shape_n([img1, img2])
filter_size = tf.reduce_min(
tf.concat([tf.expand_dims(filter_size, axis=0),
shape1[-3:-1],
shape2[-3:-1]],
axis=0))
kernel = f_special_gauss(filter_size, filter_sigma)
kernel = tf.tile(kernel, multiples=[1, 1, shape1[-1], 1])
# The correct compensation factor is `1.0 - tf.reduce_sum(tf.square(kernel))`,
# but to match MATLAB implementation of MS-SSIM, we use 1.0 instead.
compensation = 1.0
def reducer(x): # pylint: disable=invalid-name
shape = tf.shape(x)
x = tf.reshape(x, shape=tf.concat([[-1], shape[-3:]], 0))
y = tf.nn.depthwise_conv2d(x, kernel, strides=[1] * 4, padding='VALID')
return tf.reshape(y, tf.concat([shape[:-3], tf.shape(y)[1:]], 0))
luminance, cs = _ssim_helper(img1, img2, reducer, max_val, compensation)
# Average over the second and the third from the last: height, width.
axes = tf.constant([-3, -2], dtype=tf.int32)
ssim = tf.reduce_mean(luminance * cs, axes)
cs = tf.reduce_mean(cs, axes)
return ssim, cs
# This must be a tuple (not a list) because tuples are immutable and we don't
# want these to accidentally change.
_MSSSIM_WEIGHTS = (.0448, 0.2856, 0.3001, 0.2363, 0.1333)
def multiscale_ssim(
img1, img2, filter_size=11, filter_width=1.5, max_val=255.0):
"""Computes MS-SSIM with power factors from Wang paper."""
return _multiscale_ssim_helper(img1, img2,
filter_size=filter_size,
filter_width=filter_width,
max_val=max_val,
power_factors=_MSSSIM_WEIGHTS)
def multiscale_ssim_unweighted(
img1, img2, filter_size=11, filter_width=1.5, max_val=255.0):
"""Computes unweighted MS-SSIM with power factors from Zhao paper."""
return _multiscale_ssim_helper(img1, img2,
filter_size=filter_size,
filter_width=filter_width,
max_val=max_val,
power_factors=[1, 1, 1, 1, 1])
def _multiscale_ssim_helper(
img1, img2, filter_size, filter_width, power_factors, max_val=255.0):
"""Computes the MS-SSIM between img1 and img2.
This function assumes that `img1` and `img2` are image batches, i.e. the last
three dimensions are [row, col, channels].
Arguments:
img1: First RGB image batch.
img2: Second RGB image batch. Must have the same rank as img1.
filter_size: An integer, the filter size of the Gaussian kernel used.
filter_width: A float, the filter width of the Gaussian kernel used.
power_factors: iterable of weightings for each of the scales. The number of
scales used is the length of the list. Index 0 is the unscaled
resolution's weighting and each increasing scale corresponds to the image
being downsampled by 2.
max_val: the dynamic range of the images (i.e., the difference between the
maximum the and minimum allowed values).
Returns:
A tensor containing batch-wise MS-SSIM measure. MS-SSIM has range [0, 1].
The shape is broadcast(img1.shape[:-3], img2.shape[:-3]).
"""
# Shape checking.
shape1 = img1.get_shape().with_rank_at_least(3)
shape2 = img2.get_shape().with_rank_at_least(3)
shape1[-3:].merge_with(shape2[-3:])
with tf.name_scope(None, 'MS-SSIM', [img1, img2]):
shape1, shape2, checks = verify_compatible_shapes(img1, img2)
with tf.control_dependencies(checks):
img1 = tf.identity(img1)
imgs = [img1, img2]
shapes = [shape1, shape2]
# img1 and img2 are assumed to be a (multi-dimensional) batch of
# 3-dimensional images (height, width, channels). `heads` contain the batch
# dimensions, and `tails` contain the image dimensions.
heads = [s[:-3] for s in shapes]
tails = [s[-3:] for s in shapes]
divisor = [1, 2, 2, 1]
divisor_tensor = tf.constant(divisor[1:], dtype=tf.int32)
def do_pad(images, remainder): # pylint: disable=invalid-name
padding = tf.expand_dims(remainder, -1)
padding = tf.pad(padding, [[1, 0], [1, 0]])
return [tf.pad(x, padding, mode='SYMMETRIC') for x in images]
mcs = []
for k in range(len(power_factors)):
with tf.name_scope(None, 'Scale%d' % k, imgs):
if k > 0:
# Avg pool takes rank 4 tensors. Flatten leading dimensions.
flat_imgs = [
tf.reshape(x, tf.concat([[-1], t], 0))
for x, t in zip(imgs, tails)
]
remainder = tails[0] % divisor_tensor
need_padding = tf.reduce_any(tf.not_equal(remainder, 0))
# pylint: disable=cell-var-from-loop
padded = tf.cond(need_padding,
lambda: do_pad(flat_imgs, remainder),
lambda: flat_imgs)
# pylint: enable=cell-var-from-loop
downscaled = [
tf.nn.avg_pool(
x, ksize=divisor, strides=divisor, padding='VALID')
for x in padded
]
tails = [x[1:] for x in tf.shape_n(downscaled)]
imgs = [
tf.reshape(x, tf.concat([h, t], 0))
for x, h, t in zip(downscaled, heads, tails)
]
# Overwrite previous ssim value since we only need the last one.
ssim, cs = _ssim_index_per_channel(
*imgs,
filter_size=filter_size, filter_width=filter_width,
max_val=max_val)
mcs.append(tf.nn.relu(cs))
# Remove the cs score for the last scale. In the MS-SSIM calculation,
# we use the l(p) at the highest scale. l(p) * cs(p) is ssim(p).
mcs.pop() # Remove the cs score for the last scale.
mcs_and_ssim = tf.stack(mcs + [tf.nn.relu(ssim)], axis=-1)
# Take weighted geometric mean across the scale axis.
ms_ssim = tf.reduce_prod(tf.pow(mcs_and_ssim, power_factors), [-1])
ms_ssim = tf.reduce_mean(ms_ssim, [-1]) # Average over color channels.
return ms_ssim
|
from collections import OrderedDict, namedtuple
import io
import ipaddress
import logging
import os
import re
import secrets
import socket
import pyqrcode
import voluptuous as vol
from homeassistant.components import binary_sensor, fan, media_player, sensor
from homeassistant.const import (
ATTR_CODE,
ATTR_SUPPORTED_FEATURES,
CONF_NAME,
CONF_TYPE,
TEMP_CELSIUS,
)
from homeassistant.core import HomeAssistant, split_entity_id
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.storage import STORAGE_DIR
import homeassistant.util.temperature as temp_util
from .const import (
AUDIO_CODEC_COPY,
AUDIO_CODEC_OPUS,
CONF_AUDIO_CODEC,
CONF_AUDIO_MAP,
CONF_AUDIO_PACKET_SIZE,
CONF_FEATURE,
CONF_FEATURE_LIST,
CONF_LINKED_BATTERY_CHARGING_SENSOR,
CONF_LINKED_BATTERY_SENSOR,
CONF_LINKED_DOORBELL_SENSOR,
CONF_LINKED_HUMIDITY_SENSOR,
CONF_LINKED_MOTION_SENSOR,
CONF_LINKED_OBSTRUCTION_SENSOR,
CONF_LOW_BATTERY_THRESHOLD,
CONF_MAX_FPS,
CONF_MAX_HEIGHT,
CONF_MAX_WIDTH,
CONF_STREAM_ADDRESS,
CONF_STREAM_COUNT,
CONF_STREAM_SOURCE,
CONF_SUPPORT_AUDIO,
CONF_VIDEO_CODEC,
CONF_VIDEO_MAP,
CONF_VIDEO_PACKET_SIZE,
DEFAULT_AUDIO_CODEC,
DEFAULT_AUDIO_MAP,
DEFAULT_AUDIO_PACKET_SIZE,
DEFAULT_LOW_BATTERY_THRESHOLD,
DEFAULT_MAX_FPS,
DEFAULT_MAX_HEIGHT,
DEFAULT_MAX_WIDTH,
DEFAULT_STREAM_COUNT,
DEFAULT_SUPPORT_AUDIO,
DEFAULT_VIDEO_CODEC,
DEFAULT_VIDEO_MAP,
DEFAULT_VIDEO_PACKET_SIZE,
DOMAIN,
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP,
FEATURE_TOGGLE_MUTE,
HOMEKIT_FILE,
HOMEKIT_PAIRING_QR,
HOMEKIT_PAIRING_QR_SECRET,
TYPE_FAUCET,
TYPE_OUTLET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_SWITCH,
TYPE_VALVE,
VIDEO_CODEC_COPY,
VIDEO_CODEC_H264_OMX,
VIDEO_CODEC_LIBX264,
)
_LOGGER = logging.getLogger(__name__)
MAX_PORT = 65535
VALID_VIDEO_CODECS = [VIDEO_CODEC_LIBX264, VIDEO_CODEC_H264_OMX, AUDIO_CODEC_COPY]
VALID_AUDIO_CODECS = [AUDIO_CODEC_OPUS, VIDEO_CODEC_COPY]
BASIC_INFO_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_LINKED_BATTERY_SENSOR): cv.entity_domain(sensor.DOMAIN),
vol.Optional(CONF_LINKED_BATTERY_CHARGING_SENSOR): cv.entity_domain(
binary_sensor.DOMAIN
),
vol.Optional(
CONF_LOW_BATTERY_THRESHOLD, default=DEFAULT_LOW_BATTERY_THRESHOLD
): cv.positive_int,
}
)
FEATURE_SCHEMA = BASIC_INFO_SCHEMA.extend(
{vol.Optional(CONF_FEATURE_LIST, default=None): cv.ensure_list}
)
CAMERA_SCHEMA = BASIC_INFO_SCHEMA.extend(
{
vol.Optional(CONF_STREAM_ADDRESS): vol.All(ipaddress.ip_address, cv.string),
vol.Optional(CONF_STREAM_SOURCE): cv.string,
vol.Optional(CONF_AUDIO_CODEC, default=DEFAULT_AUDIO_CODEC): vol.In(
VALID_AUDIO_CODECS
),
vol.Optional(CONF_SUPPORT_AUDIO, default=DEFAULT_SUPPORT_AUDIO): cv.boolean,
vol.Optional(CONF_MAX_WIDTH, default=DEFAULT_MAX_WIDTH): cv.positive_int,
vol.Optional(CONF_MAX_HEIGHT, default=DEFAULT_MAX_HEIGHT): cv.positive_int,
vol.Optional(CONF_MAX_FPS, default=DEFAULT_MAX_FPS): cv.positive_int,
vol.Optional(CONF_AUDIO_MAP, default=DEFAULT_AUDIO_MAP): cv.string,
vol.Optional(CONF_VIDEO_MAP, default=DEFAULT_VIDEO_MAP): cv.string,
vol.Optional(CONF_STREAM_COUNT, default=DEFAULT_STREAM_COUNT): vol.All(
vol.Coerce(int), vol.Range(min=1, max=10)
),
vol.Optional(CONF_VIDEO_CODEC, default=DEFAULT_VIDEO_CODEC): vol.In(
VALID_VIDEO_CODECS
),
vol.Optional(
CONF_AUDIO_PACKET_SIZE, default=DEFAULT_AUDIO_PACKET_SIZE
): cv.positive_int,
vol.Optional(
CONF_VIDEO_PACKET_SIZE, default=DEFAULT_VIDEO_PACKET_SIZE
): cv.positive_int,
vol.Optional(CONF_LINKED_MOTION_SENSOR): cv.entity_domain(binary_sensor.DOMAIN),
vol.Optional(CONF_LINKED_DOORBELL_SENSOR): cv.entity_domain(
binary_sensor.DOMAIN
),
}
)
HUMIDIFIER_SCHEMA = BASIC_INFO_SCHEMA.extend(
{vol.Optional(CONF_LINKED_HUMIDITY_SENSOR): cv.entity_domain(sensor.DOMAIN)}
)
COVER_SCHEMA = BASIC_INFO_SCHEMA.extend(
{
vol.Optional(CONF_LINKED_OBSTRUCTION_SENSOR): cv.entity_domain(
binary_sensor.DOMAIN
)
}
)
CODE_SCHEMA = BASIC_INFO_SCHEMA.extend(
{vol.Optional(ATTR_CODE, default=None): vol.Any(None, cv.string)}
)
MEDIA_PLAYER_SCHEMA = vol.Schema(
{
vol.Required(CONF_FEATURE): vol.All(
cv.string,
vol.In(
(
FEATURE_ON_OFF,
FEATURE_PLAY_PAUSE,
FEATURE_PLAY_STOP,
FEATURE_TOGGLE_MUTE,
)
),
)
}
)
SWITCH_TYPE_SCHEMA = BASIC_INFO_SCHEMA.extend(
{
vol.Optional(CONF_TYPE, default=TYPE_SWITCH): vol.All(
cv.string,
vol.In(
(
TYPE_FAUCET,
TYPE_OUTLET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_SWITCH,
TYPE_VALVE,
)
),
)
}
)
HOMEKIT_CHAR_TRANSLATIONS = {
0: " ", # nul
10: " ", # nl
13: " ", # cr
33: "-", # !
34: " ", # "
36: "-", # $
37: "-", # %
40: "-", # (
41: "-", # )
42: "-", # *
43: "-", # +
47: "-", # /
58: "-", # :
59: "-", # ;
60: "-", # <
61: "-", # =
62: "-", # >
63: "-", # ?
64: "-", # @
91: "-", # [
92: "-", # \
93: "-", # ]
94: "-", # ^
95: " ", # _
96: "-", # `
123: "-", # {
124: "-", # |
125: "-", # }
126: "-", # ~
127: "-", # del
}
def validate_entity_config(values):
"""Validate config entry for CONF_ENTITY."""
if not isinstance(values, dict):
raise vol.Invalid("expected a dictionary")
entities = {}
for entity_id, config in values.items():
entity = cv.entity_id(entity_id)
domain, _ = split_entity_id(entity)
if not isinstance(config, dict):
raise vol.Invalid(f"The configuration for {entity} must be a dictionary.")
if domain in ("alarm_control_panel", "lock"):
config = CODE_SCHEMA(config)
elif domain == media_player.const.DOMAIN:
config = FEATURE_SCHEMA(config)
feature_list = {}
for feature in config[CONF_FEATURE_LIST]:
params = MEDIA_PLAYER_SCHEMA(feature)
key = params.pop(CONF_FEATURE)
if key in feature_list:
raise vol.Invalid(f"A feature can be added only once for {entity}")
feature_list[key] = params
config[CONF_FEATURE_LIST] = feature_list
elif domain == "camera":
config = CAMERA_SCHEMA(config)
elif domain == "switch":
config = SWITCH_TYPE_SCHEMA(config)
elif domain == "humidifier":
config = HUMIDIFIER_SCHEMA(config)
elif domain == "cover":
config = COVER_SCHEMA(config)
else:
config = BASIC_INFO_SCHEMA(config)
entities[entity] = config
return entities
def get_media_player_features(state):
"""Determine features for media players."""
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
supported_modes = []
if features & (
media_player.const.SUPPORT_TURN_ON | media_player.const.SUPPORT_TURN_OFF
):
supported_modes.append(FEATURE_ON_OFF)
if features & (media_player.const.SUPPORT_PLAY | media_player.const.SUPPORT_PAUSE):
supported_modes.append(FEATURE_PLAY_PAUSE)
if features & (media_player.const.SUPPORT_PLAY | media_player.const.SUPPORT_STOP):
supported_modes.append(FEATURE_PLAY_STOP)
if features & media_player.const.SUPPORT_VOLUME_MUTE:
supported_modes.append(FEATURE_TOGGLE_MUTE)
return supported_modes
def validate_media_player_features(state, feature_list):
"""Validate features for media players."""
supported_modes = get_media_player_features(state)
if not supported_modes:
_LOGGER.error("%s does not support any media_player features", state.entity_id)
return False
if not feature_list:
# Auto detected
return True
error_list = []
for feature in feature_list:
if feature not in supported_modes:
error_list.append(feature)
if error_list:
_LOGGER.error(
"%s does not support media_player features: %s", state.entity_id, error_list
)
return False
return True
SpeedRange = namedtuple("SpeedRange", ("start", "target"))
SpeedRange.__doc__ += """ Maps Home Assistant speed \
values to percentage based HomeKit speeds.
start: Start of the range (inclusive).
target: Percentage to use to determine HomeKit percentages \
from HomeAssistant speed.
"""
class HomeKitSpeedMapping:
"""Supports conversion between Home Assistant and HomeKit fan speeds."""
def __init__(self, speed_list):
"""Initialize a new SpeedMapping object."""
if speed_list[0] != fan.SPEED_OFF:
_LOGGER.warning(
"%s does not contain the speed setting "
"%s as its first element. "
"Assuming that %s is equivalent to 'off'",
speed_list,
fan.SPEED_OFF,
speed_list[0],
)
self.speed_ranges = OrderedDict()
list_size = len(speed_list)
for index, speed in enumerate(speed_list):
# By dividing by list_size -1 the following
# desired attributes hold true:
# * index = 0 => 0%, equal to "off"
# * index = len(speed_list) - 1 => 100 %
# * all other indices are equally distributed
target = index * 100 / (list_size - 1)
start = index * 100 / list_size
self.speed_ranges[speed] = SpeedRange(start, target)
def speed_to_homekit(self, speed):
"""Map Home Assistant speed state to HomeKit speed."""
if speed is None:
return None
speed_range = self.speed_ranges[speed]
return round(speed_range.target)
def speed_to_states(self, speed):
"""Map HomeKit speed to Home Assistant speed state."""
for state, speed_range in reversed(self.speed_ranges.items()):
if speed_range.start <= speed:
return state
return list(self.speed_ranges)[0]
def show_setup_message(hass, entry_id, bridge_name, pincode, uri):
"""Display persistent notification with setup information."""
pin = pincode.decode()
_LOGGER.info("Pincode: %s", pin)
buffer = io.BytesIO()
url = pyqrcode.create(uri)
url.svg(buffer, scale=5, module_color="#000", background="#FFF")
pairing_secret = secrets.token_hex(32)
hass.data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR] = buffer.getvalue()
hass.data[DOMAIN][entry_id][HOMEKIT_PAIRING_QR_SECRET] = pairing_secret
message = (
f"To set up {bridge_name} in the Home App, "
f"scan the QR code or enter the following code:\n"
f"### {pin}\n"
f""
)
hass.components.persistent_notification.create(
message, "HomeKit Bridge Setup", entry_id
)
def dismiss_setup_message(hass, entry_id):
"""Dismiss persistent notification and remove QR code."""
hass.components.persistent_notification.dismiss(entry_id)
def convert_to_float(state):
"""Return float of state, catch errors."""
try:
return float(state)
except (ValueError, TypeError):
return None
def cleanup_name_for_homekit(name):
"""Ensure the name of the device will not crash homekit."""
#
# This is not a security measure.
#
# UNICODE_EMOJI is also not allowed but that
# likely isn't a problem
return name.translate(HOMEKIT_CHAR_TRANSLATIONS)
def temperature_to_homekit(temperature, unit):
"""Convert temperature to Celsius for HomeKit."""
return round(temp_util.convert(temperature, unit, TEMP_CELSIUS), 1)
def temperature_to_states(temperature, unit):
"""Convert temperature back from Celsius to Home Assistant unit."""
return round(temp_util.convert(temperature, TEMP_CELSIUS, unit) * 2) / 2
def density_to_air_quality(density):
"""Map PM2.5 density to HomeKit AirQuality level."""
if density <= 35:
return 1
if density <= 75:
return 2
if density <= 115:
return 3
if density <= 150:
return 4
return 5
def get_persist_filename_for_entry_id(entry_id: str):
"""Determine the filename of the homekit state file."""
return f"{DOMAIN}.{entry_id}.state"
def get_aid_storage_filename_for_entry_id(entry_id: str):
"""Determine the ilename of homekit aid storage file."""
return f"{DOMAIN}.{entry_id}.aids"
def get_persist_fullpath_for_entry_id(hass: HomeAssistant, entry_id: str):
"""Determine the path to the homekit state file."""
return hass.config.path(STORAGE_DIR, get_persist_filename_for_entry_id(entry_id))
def get_aid_storage_fullpath_for_entry_id(hass: HomeAssistant, entry_id: str):
"""Determine the path to the homekit aid storage file."""
return hass.config.path(
STORAGE_DIR, get_aid_storage_filename_for_entry_id(entry_id)
)
def format_sw_version(version):
"""Extract the version string in a format homekit can consume."""
match = re.search(r"([0-9]+)(\.[0-9]+)?(\.[0-9]+)?", str(version).replace("-", "."))
if match:
return match.group(0)
return None
def migrate_filesystem_state_data_for_primary_imported_entry_id(
hass: HomeAssistant, entry_id: str
):
"""Migrate the old paths to the storage directory."""
legacy_persist_file_path = hass.config.path(HOMEKIT_FILE)
if os.path.exists(legacy_persist_file_path):
os.rename(
legacy_persist_file_path, get_persist_fullpath_for_entry_id(hass, entry_id)
)
legacy_aid_storage_path = hass.config.path(STORAGE_DIR, "homekit.aids")
if os.path.exists(legacy_aid_storage_path):
os.rename(
legacy_aid_storage_path,
get_aid_storage_fullpath_for_entry_id(hass, entry_id),
)
def remove_state_files_for_entry_id(hass: HomeAssistant, entry_id: str):
"""Remove the state files from disk."""
persist_file_path = get_persist_fullpath_for_entry_id(hass, entry_id)
aid_storage_path = get_aid_storage_fullpath_for_entry_id(hass, entry_id)
os.unlink(persist_file_path)
if os.path.exists(aid_storage_path):
os.unlink(aid_storage_path)
return True
def _get_test_socket():
"""Create a socket to test binding ports."""
test_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
test_socket.setblocking(False)
test_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
return test_socket
def port_is_available(port: int):
"""Check to see if a port is available."""
test_socket = _get_test_socket()
try:
test_socket.bind(("", port))
except OSError:
return False
return True
def find_next_available_port(start_port: int):
"""Find the next available port starting with the given port."""
test_socket = _get_test_socket()
for port in range(start_port, MAX_PORT):
try:
test_socket.bind(("", port))
return port
except OSError:
if port == MAX_PORT:
raise
continue
def pid_is_alive(pid):
"""Check to see if a process is alive."""
try:
os.kill(pid, 0)
return True
except OSError:
pass
return False
|
import locale
import os
import sys
from urllib.parse import urlparse
from django.core.cache import cache
from django.http import HttpResponseRedirect
from django.shortcuts import redirect
from django.shortcuts import render as django_render
from django.shortcuts import resolve_url
from django.utils.http import url_has_allowed_host_and_scheme
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from lxml import etree
from translate.misc.multistring import multistring
from translate.storage.placeables.lisa import parse_xliff, strelem_to_xml
from weblate.utils.data import data_dir
PLURAL_SEPARATOR = "\x1e\x1e"
LOCALE_SETUP = True
PRIORITY_CHOICES = (
(60, gettext_lazy("Very high")),
(80, gettext_lazy("High")),
(100, gettext_lazy("Medium")),
(120, gettext_lazy("Low")),
(140, gettext_lazy("Very low")),
)
# Initialize to sane locales for strxfrm
try:
locale.setlocale(locale.LC_ALL, ("C", "UTF-8"))
except locale.Error:
try:
locale.setlocale(locale.LC_ALL, ("en_US", "UTF-8"))
except locale.Error:
LOCALE_SETUP = False
def is_plural(text):
"""Check whether string is plural form."""
return text.find(PLURAL_SEPARATOR) != -1
def split_plural(text):
return text.split(PLURAL_SEPARATOR)
def join_plural(text):
return PLURAL_SEPARATOR.join(text)
def get_string(text):
"""Return correctly formatted string from ttkit unit data."""
# Check for null target (happens with XLIFF)
if text is None:
return ""
if isinstance(text, multistring):
return join_plural(get_string(str(item)) for item in text.strings)
if isinstance(text, str):
# Remove possible surrogates in the string. There doesn't seem to be
# a cheap way to detect this, so do the conversion in both cases. In
# case of failure, this at least fails when parsing the file instead
# being that later when inserting the data to the database.
return text.encode("utf-16", "surrogatepass").decode("utf-16")
# We might get integer or float in some formats
return str(text)
def is_repo_link(val):
"""Check whether repository is just a link for other one."""
return val.startswith("weblate://")
def get_distinct_translations(units):
"""Return list of distinct translations.
It should be possible to use distinct('target') since Django 1.4, but it is not
supported with MySQL, so let's emulate that based on presumption we won't get too
many results.
"""
targets = {}
result = []
for unit in units:
if unit.target in targets:
continue
targets[unit.target] = 1
result.append(unit)
return result
def translation_percent(translated, total, zero_complete=True):
"""Return translation percentage."""
if total == 0:
return 100.0 if zero_complete else 0.0
if total is None:
return 0.0
perc = (1000 * translated // total) / 10.0
# Avoid displaying misleading rounded 0.0% or 100.0%
if perc == 0.0 and translated != 0:
return 0.1
if perc == 100.0 and translated < total:
return 99.9
return perc
def get_clean_env(extra=None):
"""Return cleaned up environment for subprocess execution."""
environ = {
"LANG": "C.UTF-8",
"LC_ALL": "C.UTF-8",
"HOME": data_dir("home"),
"PATH": "/bin:/usr/bin:/usr/local/bin",
}
if extra is not None:
environ.update(extra)
variables = (
# Keep PATH setup
"PATH",
# Keep Python search path
"PYTHONPATH",
# Keep linker configuration
"LD_LIBRARY_PATH",
"LD_PRELOAD",
# Needed by Git on Windows
"SystemRoot",
# Pass proxy configuration
"http_proxy",
"https_proxy",
"HTTPS_PROXY",
"NO_PROXY",
# below two are nedded for openshift3 deployment,
# where nss_wrapper is used
# more on the topic on below link:
# https://docs.openshift.com/enterprise/3.2/creating_images/guidelines.html
"NSS_WRAPPER_GROUP",
"NSS_WRAPPER_PASSWD",
)
for var in variables:
if var in os.environ:
environ[var] = os.environ[var]
# Extend path to include virtualenv, avoid insert already existing ones to
# not break existing ordering (for example PATH injection used in tests)
venv_path = os.path.join(sys.exec_prefix, "bin")
if venv_path not in environ["PATH"]:
environ["PATH"] = "{}:{}".format(venv_path, environ["PATH"])
return environ
def cleanup_repo_url(url, text=None):
"""Remove credentials from repository URL."""
if text is None:
text = url
try:
parsed = urlparse(url)
except ValueError:
# The URL can not be parsed, so avoid stripping
return text
if parsed.username and parsed.password:
return text.replace(f"{parsed.username}:{parsed.password}@", "")
if parsed.username:
return text.replace(f"{parsed.username}@", "")
return text
def redirect_param(location, params, *args, **kwargs):
"""Redirect to a URL with parameters."""
return HttpResponseRedirect(resolve_url(location, *args, **kwargs) + params)
def cleanup_path(path):
"""Remove leading ./ or / from path."""
if not path:
return path
# interpret absolute pathname as relative, remove drive letter or
# UNC path, redundant separators, "." and ".." components.
path = os.path.splitdrive(path)[1]
invalid_path_parts = ("", os.path.curdir, os.path.pardir)
path = os.path.sep.join(
x for x in path.split(os.path.sep) if x not in invalid_path_parts
)
return os.path.normpath(path)
def get_project_description(project):
"""Return verbose description for project translation."""
# Cache the count as it might be expensive to calculate (it pull
# all project stats) and there is no need to always have up to date
# count here
cache_key = f"project-lang-count-{project.id}"
count = cache.get(cache_key)
if count is None:
count = project.stats.languages
cache.set(cache_key, count, 6 * 3600)
return _(
"{0} is translated into {1} languages using Weblate. "
"Join the translation or start translating your own project."
).format(project, count)
def render(request, template, context=None, status=None):
"""Wrapper around Django render to extend context."""
if context is None:
context = {}
if "project" in context and context["project"] is not None:
context["description"] = get_project_description(context["project"])
return django_render(request, template, context, status=status)
def path_separator(path):
"""Alway use / as path separator for consistency."""
if os.path.sep != "/":
return path.replace(os.path.sep, "/")
return path
def sort_unicode(choices, key):
"""Unicode aware sorting if available."""
return sorted(choices, key=lambda tup: locale.strxfrm(key(tup)))
def sort_choices(choices):
"""Sort choices alphabetically."""
return sort_unicode(choices, lambda tup: tup[1])
def sort_objects(objects):
"""Sort objects alphabetically."""
return sort_unicode(objects, str)
def redirect_next(next_url, fallback):
"""Redirect to next URL from request after validating it."""
if (
next_url is None
or not url_has_allowed_host_and_scheme(next_url, allowed_hosts=None)
or not next_url.startswith("/")
):
return redirect(fallback)
return HttpResponseRedirect(next_url)
def xliff_string_to_rich(string):
"""Convert XLIFF string to StringElement.
Transform a string containing XLIFF placeholders as XML into a rich content
(StringElement)
"""
if isinstance(string, list):
return [parse_xliff(s) for s in string]
return [parse_xliff(string)]
def rich_to_xliff_string(string_elements):
"""Convert StringElement to XLIFF string.
Transform rich content (StringElement) into a string with placeholder kept as XML
"""
# Create dummy root element
xml = etree.Element("e")
for string_element in string_elements:
# Inject placeable from translate-toolkit
strelem_to_xml(xml, string_element)
# Remove any possible namespace
for child in xml:
if child.tag.startswith("{"):
child.tag = child.tag[child.tag.index("}") + 1 :]
etree.cleanup_namespaces(xml)
# Convert to string
string_xml = etree.tostring(xml, encoding="unicode")
# Strip dummy root element
return string_xml[3:][:-4]
def get_state_css(unit):
"""Return state flags."""
flags = []
if unit.fuzzy:
flags.append("state-need-edit")
elif not unit.translated:
flags.append("state-empty")
elif unit.readonly:
flags.append("state-readonly")
elif unit.approved:
flags.append("state-approved")
elif unit.translated:
flags.append("state-translated")
if unit.has_failing_check:
flags.append("state-check")
if unit.dismissed_checks:
flags.append("state-dismissed-check")
if unit.has_comment:
flags.append("state-comment")
if unit.has_suggestion:
flags.append("state-suggest")
return flags
def check_upload_method_permissions(user, translation, method: str):
"""Check whether user has permission to perform upload method."""
if method == "source":
return (
translation.is_source
and user.has_perm("upload.perform", translation)
and hasattr(translation.component.file_format_cls, "update_bilingual")
)
if method in ("translate", "fuzzy"):
return user.has_perm("unit.edit", translation)
if method == "suggest":
return not translation.is_readonly and user.has_perm(
"suggestion.add", translation
)
if method == "approve":
return user.has_perm("unit.review", translation)
if method == "replace":
return translation.filename and user.has_perm("component.edit", translation)
raise ValueError(f"Invalid method: {method}")
|
import pytest
@pytest.mark.parametrize(['file_name', 'elem_id', 'source', 'input_text'], [
('textarea.html', 'qute-textarea', 'clipboard', 'qutebrowser'),
('textarea.html', 'qute-textarea', 'keypress', 'superqutebrowser'),
('input.html', 'qute-input', 'clipboard', 'amazingqutebrowser'),
('input.html', 'qute-input', 'keypress', 'awesomequtebrowser'),
pytest.param('autofocus.html', 'qute-input-autofocus', 'keypress',
'cutebrowser', marks=pytest.mark.flaky),
])
@pytest.mark.parametrize('zoom', [100, 125, 250])
def test_insert_mode(file_name, elem_id, source, input_text, zoom,
quteproc, request):
url_path = 'data/insert_mode_settings/html/{}'.format(file_name)
quteproc.open_path(url_path)
quteproc.send_cmd(':zoom {}'.format(zoom))
quteproc.send_cmd(':click-element --force-event id {}'.format(elem_id))
quteproc.wait_for(message='Entering mode KeyMode.insert (reason: *)')
quteproc.send_cmd(':debug-set-fake-clipboard')
if source == 'keypress':
quteproc.press_keys(input_text)
elif source == 'clipboard':
quteproc.send_cmd(':debug-set-fake-clipboard "{}"'.format(input_text))
quteproc.send_cmd(':insert-text {clipboard}')
else:
raise ValueError("Invalid source {!r}".format(source))
quteproc.wait_for_js('contents: {}'.format(input_text))
quteproc.send_cmd(':leave-mode')
@pytest.mark.parametrize('auto_load, background, insert_mode', [
(False, False, False), # auto_load disabled
(True, False, True), # enabled and foreground tab
(True, True, False), # background tab
])
@pytest.mark.flaky
def test_auto_load(quteproc, auto_load, background, insert_mode):
quteproc.set_setting('input.insert_mode.auto_load', str(auto_load))
url_path = 'data/insert_mode_settings/html/autofocus.html'
quteproc.open_path(url_path, new_bg_tab=background)
log_message = 'Entering mode KeyMode.insert (reason: *)'
if insert_mode:
quteproc.wait_for(message=log_message)
quteproc.send_cmd(':leave-mode')
else:
quteproc.ensure_not_logged(message=log_message)
def test_auto_leave_insert_mode(quteproc):
url_path = 'data/insert_mode_settings/html/autofocus.html'
quteproc.open_path(url_path)
quteproc.set_setting('input.insert_mode.auto_leave', 'true')
quteproc.send_cmd(':zoom 100')
quteproc.press_keys('abcd')
quteproc.send_cmd(':hint all')
quteproc.wait_for(message='hints: *')
# Select the disabled input box to leave insert mode
quteproc.send_cmd(':follow-hint s')
quteproc.wait_for(message='Clicked non-editable element!')
@pytest.mark.parametrize('leave_on_load', [True, False])
def test_auto_leave_insert_mode_reload(quteproc, leave_on_load):
url_path = 'data/hello.txt'
quteproc.open_path(url_path)
quteproc.set_setting('input.insert_mode.leave_on_load',
str(leave_on_load).lower())
quteproc.send_cmd(':enter-mode insert')
quteproc.wait_for(message='Entering mode KeyMode.insert (reason: *)')
quteproc.send_cmd(':reload')
if leave_on_load:
quteproc.wait_for(message='Leaving mode KeyMode.insert (reason: *)')
else:
quteproc.wait_for(
message='Ignoring leave_on_load request due to setting.')
|
import subprocess
import unittest.mock
from hangups.ui import notifier
NOTIFICATION = notifier.Notification(
'John Cleese', 'Cheese Shop', 'How about a little red Leicester?'
)
MOCK_DBUS = unittest.mock.patch(
'subprocess.check_output', autospec=True, return_value=b'(uint32 7,)\n'
)
MOCK_APPLE = unittest.mock.patch(
'subprocess.check_output', autospec=True, return_value=b''
)
def test_bell_notifier(capsys):
notifier.BellNotifier().send(NOTIFICATION)
assert capsys.readouterr() == ('\a', '')
def test_dbus_notifier():
with MOCK_DBUS as check_output:
notifier.DbusNotifier().send(NOTIFICATION)
check_output.assert_called_once_with([
'gdbus', 'call', '--session',
'--dest', 'org.freedesktop.Notifications',
'--object-path', '/org/freedesktop/Notifications',
'--method', 'org.freedesktop.Notifications.Notify',
'hangups', '0', '', 'John Cleese', 'How about a little red Leicester?',
'[]', '{}', ' -1'
], stderr=subprocess.STDOUT)
def test_dbus_notifier_replaces_id():
dbus_notifier = notifier.DbusNotifier()
with MOCK_DBUS as check_output:
dbus_notifier.send(NOTIFICATION)
assert check_output.call_args[0][0][10] == '0'
dbus_notifier.send(NOTIFICATION)
assert check_output.call_args[0][0][10] == '7'
def test_dbus_notifier_escaping():
evil_notification = notifier.Notification(
'<b>title</b> \\ \' "', None, '<b>message</b> \\ \' "'
)
with MOCK_DBUS as check_output:
notifier.DbusNotifier().send(evil_notification)
assert check_output.call_args[0][0][12:14] == [
'<b>title</b> \\\\ \\u0027 \\u0022',
'<b>message</b> \\\\ \\u0027 \\u0022',
]
def test_apple_notifier():
with MOCK_APPLE as check_output:
notifier.AppleNotifier().send(NOTIFICATION)
check_output.assert_called_once_with([
'osascript', '-e',
'display notification "How about a little red Leicester?" '
'with title "John Cleese" subtitle "Cheese Shop"'
], stderr=subprocess.STDOUT)
def test_apple_notifier_escaping():
evil_notification = notifier.Notification(
'title "', 'subtitle "', 'message "'
)
with MOCK_APPLE as check_output:
notifier.AppleNotifier().send(evil_notification)
assert check_output.call_args[0][0][2] == (
'display notification "message \\"" '
'with title "title \\"" subtitle "subtitle \\""'
)
def test_default_notifier():
default_notifier = notifier.DefaultNotifier()
# pylint: disable=protected-access
mock_send = unittest.mock.patch.object(
default_notifier._notifier, 'send', autospec=True
)
with mock_send as send:
default_notifier.send(NOTIFICATION)
send.assert_called_once_with(NOTIFICATION)
|
from typing import Any, Dict, Optional
from rpi_bad_power import new_under_voltage
from homeassistant import config_entries
from homeassistant.core import HomeAssistant
from homeassistant.helpers.config_entry_flow import DiscoveryFlowHandler
from .const import DOMAIN
async def _async_supported(hass: HomeAssistant) -> bool:
"""Return if the system supports under voltage detection."""
under_voltage = await hass.async_add_executor_job(new_under_voltage)
return under_voltage is not None
class RPiPowerFlow(DiscoveryFlowHandler, domain=DOMAIN):
"""Discovery flow handler."""
VERSION = 1
def __init__(self) -> None:
"""Set up config flow."""
super().__init__(
DOMAIN,
"Raspberry Pi Power Supply Checker",
_async_supported,
config_entries.CONN_CLASS_LOCAL_POLL,
)
async def async_step_onboarding(
self, data: Optional[Dict[str, Any]] = None
) -> Dict[str, Any]:
"""Handle a flow initialized by onboarding."""
has_devices = await self._discovery_function(self.hass)
if not has_devices:
return self.async_abort(reason="no_devices_found")
return self.async_create_entry(title=self._title, data={})
|
from homeassistant.components.lock import DOMAIN, LockEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import LOCKS, NEW_LIGHT
from .deconz_device import DeconzDevice
from .gateway import get_gateway_from_config_entry
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up locks for deCONZ component.
Locks are based on the same device class as lights in deCONZ.
"""
gateway = get_gateway_from_config_entry(hass, config_entry)
gateway.entities[DOMAIN] = set()
@callback
def async_add_lock(lights):
"""Add lock from deCONZ."""
entities = []
for light in lights:
if light.type in LOCKS and light.uniqueid not in gateway.entities[DOMAIN]:
entities.append(DeconzLock(light, gateway))
if entities:
async_add_entities(entities)
gateway.listeners.append(
async_dispatcher_connect(
hass, gateway.async_signal_new_device(NEW_LIGHT), async_add_lock
)
)
async_add_lock(gateway.api.lights.values())
class DeconzLock(DeconzDevice, LockEntity):
"""Representation of a deCONZ lock."""
TYPE = DOMAIN
@property
def is_locked(self):
"""Return true if lock is on."""
return self._device.state
async def async_lock(self, **kwargs):
"""Lock the lock."""
data = {"on": True}
await self._device.async_set_state(data)
async def async_unlock(self, **kwargs):
"""Unlock the lock."""
data = {"on": False}
await self._device.async_set_state(data)
|
import configparser
import enum
import os
import sys
import tempfile
from typing import List, Tuple
from gi.repository import Gio, GLib, Gtk
import meld.misc
from meld.conf import _
class RecentType(enum.Enum):
File = "File"
Folder = "Folder"
VersionControl = "Version control"
Merge = "Merge"
class RecentFiles:
mime_type = "application/x-meld-comparison"
recent_path = os.path.join(GLib.get_user_data_dir(), "meld")
recent_suffix = ".meldcmp"
# Recent data
app_name = "Meld"
def __init__(self):
self.recent_manager = Gtk.RecentManager.get_default()
self.recent_filter = Gtk.RecentFilter()
self.recent_filter.add_mime_type(self.mime_type)
self._stored_comparisons = {}
self.app_exec = os.path.abspath(sys.argv[0])
if not os.path.exists(self.recent_path):
os.makedirs(self.recent_path)
self._clean_recent_files()
self._update_recent_files()
self.recent_manager.connect("changed", self._update_recent_files)
def add(self, tab, flags=None):
"""Add a tab to our recently-used comparison list
The passed flags are currently ignored. In the future these are to be
used for extra initialisation not captured by the tab itself.
"""
recent_type, gfiles = tab.get_comparison()
# While Meld handles comparisons including None, recording these as
# recently-used comparisons just isn't that sane.
if None in gfiles:
return
uris = [f.get_uri() for f in gfiles]
names = [f.get_parse_name() for f in gfiles]
# If a (type, uris) comparison is already registered, then re-add
# the corresponding comparison file
comparison_key = (recent_type, tuple(uris))
if comparison_key in self._stored_comparisons:
gfile = Gio.File.new_for_uri(
self._stored_comparisons[comparison_key])
else:
recent_path = self._write_recent_file(recent_type, uris)
gfile = Gio.File.new_for_path(recent_path)
if len(uris) > 1:
display_name = " : ".join(meld.misc.shorten_names(*names))
else:
display_path = names[0]
userhome = os.path.expanduser("~")
if display_path.startswith(userhome):
# FIXME: What should we show on Windows?
display_path = "~" + display_path[len(userhome):]
display_name = _("Version control:") + " " + display_path
# FIXME: Should this be translatable? It's not actually used anywhere.
description = "{} comparison\n{}".format(
recent_type.value, ", ".join(uris))
recent_metadata = Gtk.RecentData()
recent_metadata.mime_type = self.mime_type
recent_metadata.app_name = self.app_name
recent_metadata.app_exec = "%s --comparison-file %%u" % self.app_exec
recent_metadata.display_name = display_name
recent_metadata.description = description
recent_metadata.is_private = True
self.recent_manager.add_full(gfile.get_uri(), recent_metadata)
def read(self, uri: str) -> Tuple[RecentType, List[Gio.File]]:
"""Read stored comparison from URI"""
comp_gfile = Gio.File.new_for_uri(uri)
comp_path = comp_gfile.get_path()
if not comp_gfile.query_exists(None) or not comp_path:
raise IOError("Recent comparison file does not exist")
try:
config = configparser.RawConfigParser()
config.read(comp_path)
assert (config.has_section("Comparison") and
config.has_option("Comparison", "type") and
config.has_option("Comparison", "uris"))
except (configparser.Error, AssertionError):
raise ValueError("Invalid recent comparison file")
try:
recent_type = RecentType(config.get("Comparison", "type"))
except ValueError:
raise ValueError("Invalid recent comparison file")
uris = config.get("Comparison", "uris").split(";")
gfiles = [Gio.File.new_for_uri(u) for u in uris]
return recent_type, gfiles
def _write_recent_file(self, recent_type: RecentType, uris):
# TODO: Use GKeyFile instead, and return a Gio.File. This is why we're
# using ';' to join comparison paths.
with tempfile.NamedTemporaryFile(
mode='w+t', prefix='recent-', suffix=self.recent_suffix,
dir=self.recent_path, delete=False) as f:
config = configparser.RawConfigParser()
config.add_section("Comparison")
config.set("Comparison", "type", recent_type.value)
config.set("Comparison", "uris", ";".join(uris))
config.write(f)
name = f.name
return name
def _clean_recent_files(self):
# Remove from RecentManager any comparisons with no existing file
meld_items = self._filter_items(self.recent_filter,
self.recent_manager.get_items())
for item in meld_items:
if not item.exists():
self.recent_manager.remove_item(item.get_uri())
meld_items = [item for item in meld_items if item.exists()]
# Remove any comparison files that are not listed by RecentManager
item_uris = [item.get_uri() for item in meld_items]
item_paths = [
Gio.File.new_for_uri(uri).get_path() for uri in item_uris]
stored = [p for p in os.listdir(self.recent_path)
if p.endswith(self.recent_suffix)]
for path in stored:
file_path = os.path.abspath(os.path.join(self.recent_path, path))
if file_path not in item_paths:
try:
os.remove(file_path)
except OSError:
pass
def _update_recent_files(self, *args):
meld_items = self._filter_items(self.recent_filter,
self.recent_manager.get_items())
item_uris = [item.get_uri() for item in meld_items if item.exists()]
self._stored_comparisons = {}
for item_uri in item_uris:
try:
recent_type, gfiles = self.read(item_uri)
except (IOError, ValueError):
continue
# Store and look up comparisons by type and paths
gfile_uris = tuple(gfile.get_uri() for gfile in gfiles)
self._stored_comparisons[recent_type, gfile_uris] = item_uri
def _filter_items(self, recent_filter, items):
getters = {Gtk.RecentFilterFlags.URI: "uri",
Gtk.RecentFilterFlags.DISPLAY_NAME: "display_name",
Gtk.RecentFilterFlags.MIME_TYPE: "mime_type",
Gtk.RecentFilterFlags.APPLICATION: "applications",
Gtk.RecentFilterFlags.GROUP: "groups",
Gtk.RecentFilterFlags.AGE: "age"}
needed = recent_filter.get_needed()
attrs = [v for k, v in getters.items() if needed & k]
filtered_items = []
for i in items:
filter_data = {}
for attr in attrs:
filter_data[attr] = getattr(i, "get_" + attr)()
filter_info = Gtk.RecentFilterInfo()
filter_info.contains = needed
for f, v in filter_data.items():
# https://bugzilla.gnome.org/show_bug.cgi?id=695970
if isinstance(v, list):
continue
setattr(filter_info, f, v)
if recent_filter.filter(filter_info):
filtered_items.append(i)
return filtered_items
def __str__(self):
items = self.recent_manager.get_items()
descriptions = []
for i in self._filter_items(self.recent_filter, items):
descriptions.append("%s\n%s\n" % (i.get_display_name(),
i.get_uri_display()))
return "\n".join(descriptions)
recent_comparisons = RecentFiles()
|
import logging
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_BATTERY_CHARGING,
CONF_MONITORED_CONDITIONS,
CONF_SENSORS,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.util.dt import as_local
from .const import (
ATTRIBUTION,
DEVICE_BRAND,
DOMAIN as LOGI_CIRCLE_DOMAIN,
LOGI_SENSORS as SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a sensor for a Logi Circle device. Obsolete."""
_LOGGER.warning("Logi Circle no longer works with sensor platform configuration")
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a Logi Circle sensor based on a config entry."""
devices = await hass.data[LOGI_CIRCLE_DOMAIN].cameras
time_zone = str(hass.config.time_zone)
sensors = []
for sensor_type in entry.data.get(CONF_SENSORS).get(CONF_MONITORED_CONDITIONS):
for device in devices:
if device.supports_feature(sensor_type):
sensors.append(LogiSensor(device, time_zone, sensor_type))
async_add_entities(sensors, True)
class LogiSensor(Entity):
"""A sensor implementation for a Logi Circle camera."""
def __init__(self, camera, time_zone, sensor_type):
"""Initialize a sensor for Logi Circle camera."""
self._sensor_type = sensor_type
self._camera = camera
self._id = f"{self._camera.mac_address}-{self._sensor_type}"
self._icon = f"mdi:{SENSOR_TYPES.get(self._sensor_type)[2]}"
self._name = f"{self._camera.name} {SENSOR_TYPES.get(self._sensor_type)[0]}"
self._activity = {}
self._state = None
self._tz = time_zone
@property
def unique_id(self):
"""Return a unique ID."""
return self._id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_info(self):
"""Return information about the device."""
return {
"name": self._camera.name,
"identifiers": {(LOGI_CIRCLE_DOMAIN, self._camera.id)},
"model": self._camera.model_name,
"sw_version": self._camera.firmware,
"manufacturer": DEVICE_BRAND,
}
@property
def device_state_attributes(self):
"""Return the state attributes."""
state = {
ATTR_ATTRIBUTION: ATTRIBUTION,
"battery_saving_mode": (
STATE_ON if self._camera.battery_saving else STATE_OFF
),
"microphone_gain": self._camera.microphone_gain,
}
if self._sensor_type == "battery_level":
state[ATTR_BATTERY_CHARGING] = self._camera.charging
return state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
if self._sensor_type == "battery_level" and self._state is not None:
return icon_for_battery_level(
battery_level=int(self._state), charging=False
)
if self._sensor_type == "recording_mode" and self._state is not None:
return "mdi:eye" if self._state == STATE_ON else "mdi:eye-off"
if self._sensor_type == "streaming_mode" and self._state is not None:
return "mdi:camera" if self._state == STATE_ON else "mdi:camera-off"
return self._icon
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return SENSOR_TYPES.get(self._sensor_type)[1]
async def async_update(self):
"""Get the latest data and updates the state."""
_LOGGER.debug("Pulling data from %s sensor", self._name)
await self._camera.update()
if self._sensor_type == "last_activity_time":
last_activity = await self._camera.get_last_activity(force_refresh=True)
if last_activity is not None:
last_activity_time = as_local(last_activity.end_time_utc)
self._state = (
f"{last_activity_time.hour:0>2}:{last_activity_time.minute:0>2}"
)
else:
state = getattr(self._camera, self._sensor_type, None)
if isinstance(state, bool):
self._state = STATE_ON if state is True else STATE_OFF
else:
self._state = state
self._state = state
|
from homeassistant.components.switch import SwitchEntity
from . import IHC_CONTROLLER, IHC_INFO
from .const import CONF_OFF_ID, CONF_ON_ID
from .ihcdevice import IHCDevice
from .util import async_pulse, async_set_bool
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the IHC switch platform."""
if discovery_info is None:
return
devices = []
for name, device in discovery_info.items():
ihc_id = device["ihc_id"]
product_cfg = device["product_cfg"]
product = device["product"]
# Find controller that corresponds with device id
ctrl_id = device["ctrl_id"]
ihc_key = f"ihc{ctrl_id}"
info = hass.data[ihc_key][IHC_INFO]
ihc_controller = hass.data[ihc_key][IHC_CONTROLLER]
ihc_off_id = product_cfg.get(CONF_OFF_ID)
ihc_on_id = product_cfg.get(CONF_ON_ID)
switch = IHCSwitch(
ihc_controller, name, ihc_id, ihc_off_id, ihc_on_id, info, product
)
devices.append(switch)
add_entities(devices)
class IHCSwitch(IHCDevice, SwitchEntity):
"""Representation of an IHC switch."""
def __init__(
self,
ihc_controller,
name: str,
ihc_id: int,
ihc_off_id: int,
ihc_on_id: int,
info: bool,
product=None,
) -> None:
"""Initialize the IHC switch."""
super().__init__(ihc_controller, name, ihc_id, product)
self._ihc_off_id = ihc_off_id
self._ihc_on_id = ihc_on_id
self._state = False
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
if self._ihc_on_id:
await async_pulse(self.hass, self.ihc_controller, self._ihc_on_id)
else:
await async_set_bool(self.hass, self.ihc_controller, self.ihc_id, True)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
if self._ihc_off_id:
await async_pulse(self.hass, self.ihc_controller, self._ihc_off_id)
else:
await async_set_bool(self.hass, self.ihc_controller, self.ihc_id, False)
def on_ihc_change(self, ihc_id, value):
"""Handle IHC resource change."""
self._state = value
self.schedule_update_ha_state()
|
import sys
import os
from glob import glob
import platform
def running_under_virtualenv():
if hasattr(sys, 'real_prefix'):
return True
elif sys.prefix != getattr(sys, "base_prefix", sys.prefix):
return True
if os.getenv('VIRTUAL_ENV', False):
return True
return False
if os.environ.get('USE_SETUPTOOLS'):
from setuptools import setup
setup_kwargs = dict(zip_safe=0)
else:
from distutils.core import setup
setup_kwargs = dict()
if os.name == 'nt':
pgm_files = os.environ["ProgramFiles"]
base_files = os.path.join(pgm_files, 'diamond')
data_files = [
(base_files, ['LICENSE', 'version.txt']),
(os.path.join(base_files, 'user_scripts'), []),
(os.path.join(base_files, 'conf'), glob('conf/*.conf.*')),
(os.path.join(base_files, 'collectors'), glob('conf/collectors/*')),
(os.path.join(base_files, 'handlers'), glob('conf/handlers/*')),
]
install_requires = ['configobj', 'psutil', ],
else:
data_files = [
('share/diamond', ['LICENSE', 'version.txt']),
('share/diamond/user_scripts', []),
]
distro = platform.dist()[0]
distro_major_version = platform.dist()[1].split('.')[0]
if not distro:
if 'amzn' in platform.uname()[2]:
distro = 'centos'
if running_under_virtualenv():
data_files.append(('etc/diamond',
glob('conf/*.conf.*')))
data_files.append(('etc/diamond/collectors',
glob('conf/collectors/*')))
data_files.append(('etc/diamond/handlers',
glob('conf/handlers/*')))
else:
data_files.append(('/etc/diamond',
glob('conf/*.conf.*')))
data_files.append(('/etc/diamond/collectors',
glob('conf/collectors/*')))
data_files.append(('/etc/diamond/handlers',
glob('conf/handlers/*')))
data_files.append(('/var/log/diamond',
['.keep']))
if distro == 'Ubuntu':
if distro_major_version >= 16:
data_files.append(('/usr/lib/systemd/system',
['rpm/systemd/diamond.service']))
else:
data_files.append(('/etc/init',
['debian/diamond.upstart']))
if distro in ['centos', 'redhat', 'debian', 'fedora', 'oracle']:
data_files.append(('/etc/init.d',
['bin/init.d/diamond']))
if distro_major_version >= 7 and not distro == 'debian':
data_files.append(('/usr/lib/systemd/system',
['rpm/systemd/diamond.service']))
elif distro_major_version >= 6 and not distro == 'debian':
data_files.append(('/etc/init',
['rpm/upstart/diamond.conf']))
# Support packages being called differently on different distros
# Are we in a virtenv?
if running_under_virtualenv():
install_requires = ['configobj', 'psutil', ]
else:
if distro in ['debian', 'Ubuntu']:
install_requires = ['python-configobj', 'python-psutil', ]
# Default back to pip style requires
else:
install_requires = ['configobj', 'psutil', ]
def get_version():
"""
Read the version.txt file to get the new version string
Generate it if version.txt is not available. Generation
is required for pip installs
"""
try:
f = open('version.txt')
except IOError:
os.system("./version.sh > version.txt")
f = open('version.txt')
version = ''.join(f.readlines()).rstrip()
f.close()
return version
def pkgPath(root, path, rpath="/"):
"""
Package up a path recursively
"""
global data_files
if not os.path.exists(path):
return
files = []
for spath in os.listdir(path):
# Ignore test directories
if spath == 'test':
continue
subpath = os.path.join(path, spath)
spath = os.path.join(rpath, spath)
if os.path.isfile(subpath):
files.append(subpath)
if os.path.isdir(subpath):
pkgPath(root, subpath, spath)
data_files.append((root + rpath, files))
if os.name == 'nt':
pkgPath(os.path.join(base_files, 'collectors'), 'src/collectors', '\\')
else:
pkgPath('share/diamond/collectors', 'src/collectors')
version = get_version()
setup(
name='diamond',
version=version,
url='https://github.com/python-diamond/Diamond',
author='The Diamond Team',
author_email='[email protected]',
license='MIT License',
description='Smart data producer for graphite graphing package',
package_dir={'': 'src'},
packages=['diamond', 'diamond.handler', 'diamond.utils'],
scripts=['bin/diamond', 'bin/diamond-setup'],
data_files=data_files,
python_requires='==2.7',
install_requires=install_requires,
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2',
],
** setup_kwargs
)
|
import pytest
from homeassistant.components import automation, zone
from homeassistant.const import ATTR_ENTITY_ID, ENTITY_MATCH_ALL, SERVICE_TURN_OFF
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service, mock_component
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
@pytest.fixture(autouse=True)
def setup_comp(hass):
"""Initialize components."""
mock_component(hass, "group")
hass.loop.run_until_complete(
async_setup_component(
hass,
zone.DOMAIN,
{
"zone": {
"name": "test",
"latitude": 32.880837,
"longitude": -117.237561,
"radius": 250,
}
},
)
)
async def test_if_fires_on_zone_enter(hass, calls):
"""Test for firing on zone enter."""
context = Context()
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.881011, "longitude": -117.234758}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "zone",
"entity_id": "test.entity",
"zone": "zone.test",
"event": "enter",
},
"action": {
"service": "test.automation",
"data_template": {
"some": "{{ trigger.%s }}"
% "}} - {{ trigger.".join(
(
"platform",
"entity_id",
"from_state.state",
"to_state.state",
"zone.name",
)
)
},
},
}
},
)
hass.states.async_set(
"test.entity",
"hello",
{"latitude": 32.880586, "longitude": -117.237564},
context=context,
)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].context.parent_id == context.id
assert "zone - test.entity - hello - hello - test" == calls[0].data["some"]
# Set out of zone again so we can trigger call
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.881011, "longitude": -117.234758}
)
await hass.async_block_till_done()
await hass.services.async_call(
automation.DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: ENTITY_MATCH_ALL},
blocking=True,
)
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.880586, "longitude": -117.237564}
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_for_enter_on_zone_leave(hass, calls):
"""Test for not firing on zone leave."""
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.880586, "longitude": -117.237564}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "zone",
"entity_id": "test.entity",
"zone": "zone.test",
"event": "enter",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.881011, "longitude": -117.234758}
)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_if_fires_on_zone_leave(hass, calls):
"""Test for firing on zone leave."""
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.880586, "longitude": -117.237564}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "zone",
"entity_id": "test.entity",
"zone": "zone.test",
"event": "leave",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.881011, "longitude": -117.234758}
)
await hass.async_block_till_done()
assert len(calls) == 1
async def test_if_not_fires_for_leave_on_zone_enter(hass, calls):
"""Test for not firing on zone enter."""
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.881011, "longitude": -117.234758}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {
"platform": "zone",
"entity_id": "test.entity",
"zone": "zone.test",
"event": "leave",
},
"action": {"service": "test.automation"},
}
},
)
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.880586, "longitude": -117.237564}
)
await hass.async_block_till_done()
assert len(calls) == 0
async def test_zone_condition(hass, calls):
"""Test for zone condition."""
hass.states.async_set(
"test.entity", "hello", {"latitude": 32.880586, "longitude": -117.237564}
)
await hass.async_block_till_done()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: {
"trigger": {"platform": "event", "event_type": "test_event"},
"condition": {
"condition": "zone",
"entity_id": "test.entity",
"zone": "zone.test",
},
"action": {"service": "test.automation"},
}
},
)
hass.bus.async_fire("test_event")
await hass.async_block_till_done()
assert len(calls) == 1
|
from plumbum import cli
class App(cli.Application):
#VERSION = "1.2.3"
#x = cli.SwitchAttr("--lala")
y = cli.Flag("-f")
def main(self, x, y):
pass
@App.subcommand("bar")
class Bar(cli.Application):
z = cli.Flag("-z")
def main(self, z, w):
pass
if __name__ == "__main__":
App.run()
|
import asyncio
from datetime import timedelta
import logging
import nuheat
import requests
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_DEVICES,
CONF_PASSWORD,
CONF_USERNAME,
HTTP_BAD_REQUEST,
HTTP_INTERNAL_SERVER_ERROR,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from .const import CONF_SERIAL_NUMBER, DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_DEVICES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the NuHeat component."""
hass.data.setdefault(DOMAIN, {})
conf = config.get(DOMAIN)
if not conf:
return True
for serial_number in conf[CONF_DEVICES]:
# Since the api currently doesn't permit fetching the serial numbers
# and they have to be specified we create a separate config entry for
# each serial number. This won't increase the number of http
# requests as each thermostat has to be updated anyways.
# This also allows us to validate that the entered valid serial
# numbers and do not end up with a config entry where half of the
# devices work.
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data={
CONF_USERNAME: conf[CONF_USERNAME],
CONF_PASSWORD: conf[CONF_PASSWORD],
CONF_SERIAL_NUMBER: serial_number,
},
)
)
return True
def _get_thermostat(api, serial_number):
"""Authenticate and create the thermostat object."""
api.authenticate()
return api.get_thermostat(serial_number)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up NuHeat from a config entry."""
conf = entry.data
username = conf[CONF_USERNAME]
password = conf[CONF_PASSWORD]
serial_number = conf[CONF_SERIAL_NUMBER]
api = nuheat.NuHeat(username, password)
try:
thermostat = await hass.async_add_executor_job(
_get_thermostat, api, serial_number
)
except requests.exceptions.Timeout as ex:
raise ConfigEntryNotReady from ex
except requests.exceptions.HTTPError as ex:
if (
ex.response.status_code > HTTP_BAD_REQUEST
and ex.response.status_code < HTTP_INTERNAL_SERVER_ERROR
):
_LOGGER.error("Failed to login to nuheat: %s", ex)
return False
raise ConfigEntryNotReady from ex
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error("Failed to login to nuheat: %s", ex)
return False
async def _async_update_data():
"""Fetch data from API endpoint."""
await hass.async_add_executor_job(thermostat.get_data)
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=f"nuheat {serial_number}",
update_method=_async_update_data,
update_interval=timedelta(minutes=5),
)
hass.data[DOMAIN][entry.entry_id] = (thermostat, coordinator)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
from datetime import timedelta
from homeassistant.components import habitica
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
async def async_setup_platform(hass, config, async_add_devices, discovery_info=None):
"""Set up the habitica platform."""
if discovery_info is None:
return
name = discovery_info[habitica.CONF_NAME]
sensors = discovery_info[habitica.CONF_SENSORS]
sensor_data = HabitipyData(hass.data[habitica.DOMAIN][name])
await sensor_data.update()
async_add_devices(
[HabitipySensor(name, sensor, sensor_data) for sensor in sensors], True
)
class HabitipyData:
"""Habitica API user data cache."""
def __init__(self, api):
"""Habitica API user data cache."""
self.api = api
self.data = None
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def update(self):
"""Get a new fix from Habitica servers."""
self.data = await self.api.user.get()
class HabitipySensor(Entity):
"""A generic Habitica sensor."""
def __init__(self, name, sensor_name, updater):
"""Initialize a generic Habitica sensor."""
self._name = name
self._sensor_name = sensor_name
self._sensor_type = habitica.SENSORS_TYPES[sensor_name]
self._state = None
self._updater = updater
async def async_update(self):
"""Update Condition and Forecast."""
await self._updater.update()
data = self._updater.data
for element in self._sensor_type.path:
data = data[element]
self._state = data
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._sensor_type.icon
@property
def name(self):
"""Return the name of the sensor."""
return f"{habitica.DOMAIN}_{self._name}_{self._sensor_name}"
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._sensor_type.unit
|
import asyncio
import functools
import logging
import json
from aiohttp import ClientConnectionError, ClientResponse, RequestInfo, streams
from aiohttp import hdrs, CookieJar
from http.cookies import CookieError, Morsel, SimpleCookie
from aiohttp.helpers import strip_auth_from_url
from multidict import CIMultiDict, CIMultiDictProxy
from yarl import URL
from vcr.errors import CannotOverwriteExistingCassetteException
from vcr.request import Request
log = logging.getLogger(__name__)
class MockStream(asyncio.StreamReader, streams.AsyncStreamReaderMixin):
pass
class MockClientResponse(ClientResponse):
def __init__(self, method, url, request_info=None):
super().__init__(
method=method,
url=url,
writer=None,
continue100=None,
timer=None,
request_info=request_info,
traces=None,
loop=asyncio.get_event_loop(),
session=None,
)
async def json(self, *, encoding="utf-8", loads=json.loads, **kwargs): # NOQA: E999
stripped = self._body.strip()
if not stripped:
return None
return loads(stripped.decode(encoding))
async def text(self, encoding="utf-8", errors="strict"):
return self._body.decode(encoding, errors=errors)
async def read(self):
return self._body
def release(self):
pass
@property
def content(self):
s = MockStream()
s.feed_data(self._body)
s.feed_eof()
return s
def build_response(vcr_request, vcr_response, history):
request_info = RequestInfo(
url=URL(vcr_request.url),
method=vcr_request.method,
headers=_deserialize_headers(vcr_request.headers),
real_url=URL(vcr_request.url),
)
response = MockClientResponse(vcr_request.method, URL(vcr_response.get("url")), request_info=request_info)
response.status = vcr_response["status"]["code"]
response._body = vcr_response["body"].get("string", b"")
response.reason = vcr_response["status"]["message"]
response._headers = _deserialize_headers(vcr_response["headers"])
response._history = tuple(history)
# cookies
for hdr in response.headers.getall(hdrs.SET_COOKIE, ()):
try:
cookies = SimpleCookie(hdr)
for cookie_name, cookie in cookies.items():
expires = cookie.get("expires", "").strip()
if expires:
log.debug('Ignoring expiration date: %s="%s"', cookie_name, expires)
cookie["expires"] = ""
response.cookies.load(cookie.output(header="").strip())
except CookieError as exc:
log.warning("Can not load response cookies: %s", exc)
response.close()
return response
def _serialize_headers(headers):
"""Serialize CIMultiDictProxy to a pickle-able dict because proxy
objects forbid pickling:
https://github.com/aio-libs/multidict/issues/340
"""
# Mark strings as keys so 'istr' types don't show up in
# the cassettes as comments.
serialized_headers = {}
for k, v in headers.items():
serialized_headers.setdefault(str(k), []).append(v)
return serialized_headers
def _deserialize_headers(headers):
deserialized_headers = CIMultiDict()
for k, vs in headers.items():
if isinstance(vs, list):
for v in vs:
deserialized_headers.add(k, v)
else:
deserialized_headers.add(k, vs)
return CIMultiDictProxy(deserialized_headers)
def play_responses(cassette, vcr_request):
history = []
vcr_response = cassette.play_response(vcr_request)
response = build_response(vcr_request, vcr_response, history)
# If we're following redirects, continue playing until we reach
# our final destination.
while 300 <= response.status <= 399:
if "location" not in response.headers:
break
next_url = URL(response.url).join(URL(response.headers["location"]))
# Make a stub VCR request that we can then use to look up the recorded
# VCR request saved to the cassette. This feels a little hacky and
# may have edge cases based on the headers we're providing (e.g. if
# there's a matcher that is used to filter by headers).
vcr_request = Request("GET", str(next_url), None, _serialize_headers(response.request_info.headers))
vcr_requests = cassette.find_requests_with_most_matches(vcr_request)
for vcr_request, *_ in vcr_requests:
if cassette.can_play_response_for(vcr_request):
break
# Tack on the response we saw from the redirect into the history
# list that is added on to the final response.
history.append(response)
vcr_response = cassette.play_response(vcr_request)
response = build_response(vcr_request, vcr_response, history)
return response
async def record_response(cassette, vcr_request, response):
"""Record a VCR request-response chain to the cassette."""
try:
body = {"string": (await response.read())}
# aiohttp raises a ClientConnectionError on reads when
# there is no body. We can use this to know to not write one.
except ClientConnectionError:
body = {}
vcr_response = {
"status": {"code": response.status, "message": response.reason},
"headers": _serialize_headers(response.headers),
"body": body, # NOQA: E999
"url": str(response.url),
}
cassette.append(vcr_request, vcr_response)
async def record_responses(cassette, vcr_request, response):
"""Because aiohttp follows redirects by default, we must support
them by default. This method is used to write individual
request-response chains that were implicitly followed to get
to the final destination.
"""
for past_response in response.history:
aiohttp_request = past_response.request_info
# No data because it's following a redirect.
past_request = Request(
aiohttp_request.method,
str(aiohttp_request.url),
None,
_serialize_headers(aiohttp_request.headers),
)
await record_response(cassette, past_request, past_response)
# If we're following redirects, then the last request-response
# we record is the one attached to the `response`.
if response.history:
aiohttp_request = response.request_info
vcr_request = Request(
aiohttp_request.method,
str(aiohttp_request.url),
None,
_serialize_headers(aiohttp_request.headers),
)
await record_response(cassette, vcr_request, response)
def _build_cookie_header(session, cookies, cookie_header, url):
url, _ = strip_auth_from_url(url)
all_cookies = session._cookie_jar.filter_cookies(url)
if cookies is not None:
tmp_cookie_jar = CookieJar()
tmp_cookie_jar.update_cookies(cookies)
req_cookies = tmp_cookie_jar.filter_cookies(url)
if req_cookies:
all_cookies.load(req_cookies)
if not all_cookies and not cookie_header:
return None
c = SimpleCookie()
if cookie_header:
c.load(cookie_header)
for name, value in all_cookies.items():
if isinstance(value, Morsel):
mrsl_val = value.get(value.key, Morsel())
mrsl_val.set(value.key, value.value, value.coded_value)
c[name] = mrsl_val
else:
c[name] = value
return c.output(header="", sep=";").strip()
def vcr_request(cassette, real_request):
@functools.wraps(real_request)
async def new_request(self, method, url, **kwargs):
headers = kwargs.get("headers")
auth = kwargs.get("auth")
headers = self._prepare_headers(headers)
data = kwargs.get("data", kwargs.get("json"))
params = kwargs.get("params")
cookies = kwargs.get("cookies")
if auth is not None:
headers["AUTHORIZATION"] = auth.encode()
request_url = URL(url)
if params:
for k, v in params.items():
params[k] = str(v)
request_url = URL(url).with_query(params)
c_header = headers.pop(hdrs.COOKIE, None)
cookie_header = _build_cookie_header(self, cookies, c_header, request_url)
if cookie_header:
headers[hdrs.COOKIE] = cookie_header
vcr_request = Request(method, str(request_url), data, _serialize_headers(headers))
if cassette.can_play_response_for(vcr_request):
log.info("Playing response for {} from cassette".format(vcr_request))
response = play_responses(cassette, vcr_request)
for redirect in response.history:
self._cookie_jar.update_cookies(redirect.cookies, redirect.url)
self._cookie_jar.update_cookies(response.cookies, response.url)
return response
if cassette.write_protected and cassette.filter_request(vcr_request):
raise CannotOverwriteExistingCassetteException(cassette=cassette, failed_request=vcr_request)
log.info("%s not in cassette, sending to real server", vcr_request)
response = await real_request(self, method, url, **kwargs) # NOQA: E999
await record_responses(cassette, vcr_request, response)
return response
return new_request
|
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_TRANSITION,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
import homeassistant.util.color as color_util
from .base_class import TradfriBaseClass, TradfriBaseDevice
from .const import (
ATTR_DIMMER,
ATTR_HUE,
ATTR_SAT,
ATTR_TRANSITION_TIME,
CONF_GATEWAY_ID,
CONF_IMPORT_GROUPS,
DEVICES,
DOMAIN,
GROUPS,
KEY_API,
SUPPORTED_GROUP_FEATURES,
SUPPORTED_LIGHT_FEATURES,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Load Tradfri lights based on a config entry."""
gateway_id = config_entry.data[CONF_GATEWAY_ID]
tradfri_data = hass.data[DOMAIN][config_entry.entry_id]
api = tradfri_data[KEY_API]
devices = tradfri_data[DEVICES]
lights = [dev for dev in devices if dev.has_light_control]
if lights:
async_add_entities(TradfriLight(light, api, gateway_id) for light in lights)
if config_entry.data[CONF_IMPORT_GROUPS]:
groups = tradfri_data[GROUPS]
if groups:
async_add_entities(TradfriGroup(group, api, gateway_id) for group in groups)
class TradfriGroup(TradfriBaseClass, LightEntity):
"""The platform class for light groups required by hass."""
def __init__(self, device, api, gateway_id):
"""Initialize a Group."""
super().__init__(device, api, gateway_id)
self._unique_id = f"group-{gateway_id}-{device.id}"
self._refresh(device)
@property
def should_poll(self):
"""Poll needed for tradfri groups."""
return True
async def async_update(self):
"""Fetch new state data for the group.
This method is required for groups to update properly.
"""
await self._api(self._device.update())
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORTED_GROUP_FEATURES
@property
def is_on(self):
"""Return true if group lights are on."""
return self._device.state
@property
def brightness(self):
"""Return the brightness of the group lights."""
return self._device.dimmer
async def async_turn_off(self, **kwargs):
"""Instruct the group lights to turn off."""
await self._api(self._device.set_state(0))
async def async_turn_on(self, **kwargs):
"""Instruct the group lights to turn on, or dim."""
keys = {}
if ATTR_TRANSITION in kwargs:
keys["transition_time"] = int(kwargs[ATTR_TRANSITION]) * 10
if ATTR_BRIGHTNESS in kwargs:
if kwargs[ATTR_BRIGHTNESS] == 255:
kwargs[ATTR_BRIGHTNESS] = 254
await self._api(self._device.set_dimmer(kwargs[ATTR_BRIGHTNESS], **keys))
else:
await self._api(self._device.set_state(1))
class TradfriLight(TradfriBaseDevice, LightEntity):
"""The platform class required by Home Assistant."""
def __init__(self, device, api, gateway_id):
"""Initialize a Light."""
super().__init__(device, api, gateway_id)
self._unique_id = f"light-{gateway_id}-{device.id}"
self._hs_color = None
# Calculate supported features
_features = SUPPORTED_LIGHT_FEATURES
if device.light_control.can_set_dimmer:
_features |= SUPPORT_BRIGHTNESS
if device.light_control.can_set_color:
_features |= SUPPORT_COLOR
if device.light_control.can_set_temp:
_features |= SUPPORT_COLOR_TEMP
self._features = _features
self._refresh(device)
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return self._device_control.min_mireds
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return self._device_control.max_mireds
@property
def supported_features(self):
"""Flag supported features."""
return self._features
@property
def is_on(self):
"""Return true if light is on."""
return self._device_data.state
@property
def brightness(self):
"""Return the brightness of the light."""
return self._device_data.dimmer
@property
def color_temp(self):
"""Return the color temp value in mireds."""
return self._device_data.color_temp
@property
def hs_color(self):
"""HS color of the light."""
if self._device_control.can_set_color:
hsbxy = self._device_data.hsb_xy_color
hue = hsbxy[0] / (self._device_control.max_hue / 360)
sat = hsbxy[1] / (self._device_control.max_saturation / 100)
if hue is not None and sat is not None:
return hue, sat
async def async_turn_off(self, **kwargs):
"""Instruct the light to turn off."""
# This allows transitioning to off, but resets the brightness
# to 1 for the next set_state(True) command
transition_time = None
if ATTR_TRANSITION in kwargs:
transition_time = int(kwargs[ATTR_TRANSITION]) * 10
dimmer_data = {ATTR_DIMMER: 0, ATTR_TRANSITION_TIME: transition_time}
await self._api(self._device_control.set_dimmer(**dimmer_data))
else:
await self._api(self._device_control.set_state(False))
async def async_turn_on(self, **kwargs):
"""Instruct the light to turn on."""
transition_time = None
if ATTR_TRANSITION in kwargs:
transition_time = int(kwargs[ATTR_TRANSITION]) * 10
dimmer_command = None
if ATTR_BRIGHTNESS in kwargs:
brightness = kwargs[ATTR_BRIGHTNESS]
if brightness > 254:
brightness = 254
dimmer_data = {
ATTR_DIMMER: brightness,
ATTR_TRANSITION_TIME: transition_time,
}
dimmer_command = self._device_control.set_dimmer(**dimmer_data)
transition_time = None
else:
dimmer_command = self._device_control.set_state(True)
color_command = None
if ATTR_HS_COLOR in kwargs and self._device_control.can_set_color:
hue = int(kwargs[ATTR_HS_COLOR][0] * (self._device_control.max_hue / 360))
sat = int(
kwargs[ATTR_HS_COLOR][1] * (self._device_control.max_saturation / 100)
)
color_data = {
ATTR_HUE: hue,
ATTR_SAT: sat,
ATTR_TRANSITION_TIME: transition_time,
}
color_command = self._device_control.set_hsb(**color_data)
transition_time = None
temp_command = None
if ATTR_COLOR_TEMP in kwargs and (
self._device_control.can_set_temp or self._device_control.can_set_color
):
temp = kwargs[ATTR_COLOR_TEMP]
# White Spectrum bulb
if self._device_control.can_set_temp:
if temp > self.max_mireds:
temp = self.max_mireds
elif temp < self.min_mireds:
temp = self.min_mireds
temp_data = {
ATTR_COLOR_TEMP: temp,
ATTR_TRANSITION_TIME: transition_time,
}
temp_command = self._device_control.set_color_temp(**temp_data)
transition_time = None
# Color bulb (CWS)
# color_temp needs to be set with hue/saturation
elif self._device_control.can_set_color:
temp_k = color_util.color_temperature_mired_to_kelvin(temp)
hs_color = color_util.color_temperature_to_hs(temp_k)
hue = int(hs_color[0] * (self._device_control.max_hue / 360))
sat = int(hs_color[1] * (self._device_control.max_saturation / 100))
color_data = {
ATTR_HUE: hue,
ATTR_SAT: sat,
ATTR_TRANSITION_TIME: transition_time,
}
color_command = self._device_control.set_hsb(**color_data)
transition_time = None
# HSB can always be set, but color temp + brightness is bulb dependent
command = dimmer_command
if command is not None:
command += color_command
else:
command = color_command
if self._device_control.can_combine_commands:
await self._api(command + temp_command)
else:
if temp_command is not None:
await self._api(temp_command)
if command is not None:
await self._api(command)
def _refresh(self, device):
"""Refresh the light data."""
super()._refresh(device)
# Caching of LightControl and light object
self._device_control = device.light_control
self._device_data = device.light_control.lights[0]
|
import unittest
from six import with_metaclass
from kalliope.core.Models import Singleton
class MyClass(with_metaclass(Singleton, object)):
def __init__(self):
self.value = "test"
class TestSingleton(unittest.TestCase):
def setUp(self):
pass
def test_singleton(self):
obj1 = MyClass()
obj2 = MyClass()
self.assertEqual(id(obj1), id(obj2))
def test_drop_singleton(self):
obj1 = MyClass()
obj2 = MyClass()
# drop the singleton instance
Singleton._instances = {}
obj3 = MyClass()
self.assertEqual(id(obj1), id(obj2))
self.assertNotEqual(id(obj1), id(obj3))
|
import hangups
from common import run_example
async def send_message(client, args):
request = hangups.hangouts_pb2.SendChatMessageRequest(
request_header=client.get_request_header(),
event_request_header=hangups.hangouts_pb2.EventRequestHeader(
conversation_id=hangups.hangouts_pb2.ConversationId(
id=args.conversation_id
),
client_generated_id=client.get_client_generated_id(),
),
message_content=hangups.hangouts_pb2.MessageContent(
segment=[
hangups.ChatMessageSegment(args.message_text).serialize()
],
),
)
await client.send_chat_message(request)
if __name__ == '__main__':
run_example(send_message, '--conversation-id', '--message-text')
|
import unittest
from unittest.mock import patch, Mock, MagicMock, mock_open
from flask import Flask
from lemur.plugins.lemur_sftp import plugin
from paramiko.ssh_exception import AuthenticationException
class TestSftp(unittest.TestCase):
def setUp(self):
self.sftp_destination = plugin.SFTPDestinationPlugin()
# Creates a new Flask application for a test duration. In python 3.8, manual push of application context is
# needed to run tests in dev environment without getting error 'Working outside of application context'.
_app = Flask('lemur_test_sftp')
self.ctx = _app.app_context()
assert self.ctx
self.ctx.push()
def tearDown(self):
self.ctx.pop()
def test_failing_ssh_connection(self):
dst_path = '/var/non-existent'
files = {'first-file': 'data'}
options = [{'name': 'host', 'value': 'non-existent'}, {'name': 'port', 'value': '22'},
{'name': 'user', 'value': 'test_acme'}]
with self.assertRaises(AuthenticationException):
self.sftp_destination.upload_file(dst_path, files, options)
@patch("lemur.plugins.lemur_sftp.plugin.paramiko")
def test_upload_file_single_with_password(self, mock_paramiko):
dst_path = '/var/non-existent'
files = {'first-file': 'data'}
options = [{'name': 'host', 'value': 'non-existent'}, {'name': 'port', 'value': '22'},
{'name': 'user', 'value': 'test_acme'}, {'name': 'password', 'value': 'test_password'}]
mock_sftp = Mock()
mock_sftp.open = mock_open()
mock_ssh = mock_paramiko.SSHClient.return_value
mock_ssh.connect = MagicMock()
mock_ssh.open_sftp.return_value = mock_sftp
self.sftp_destination.upload_file(dst_path, files, options)
mock_sftp.open.assert_called_once_with('/var/non-existent/first-file', 'w')
handle = mock_sftp.open()
handle.write.assert_called_once_with('data')
mock_ssh.close.assert_called_once()
mock_ssh.connect.assert_called_with('non-existent', username='test_acme', port='22',
password='test_password')
@patch("lemur.plugins.lemur_sftp.plugin.paramiko")
def test_upload_file_multiple_with_key(self, mock_paramiko):
dst_path = '/var/non-existent'
files = {'first-file': 'data', 'second-file': 'data2'}
options = [{'name': 'host', 'value': 'non-existent'}, {'name': 'port', 'value': '22'},
{'name': 'user', 'value': 'test_acme'}, {'name': 'privateKeyPath', 'value': '/var/id_rsa'},
{'name': 'privateKeyPass', 'value': 'ssh-key-password'}]
mock_sftp = Mock()
mock_sftp.open = mock_open()
mock_paramiko.RSAKey.from_private_key_file.return_value = 'ssh-rsa test-key'
mock_ssh = mock_paramiko.SSHClient.return_value
mock_ssh.connect = MagicMock()
mock_ssh.open_sftp.return_value = mock_sftp
self.sftp_destination.upload_file(dst_path, files, options)
mock_sftp.open.assert_called_with('/var/non-existent/second-file', 'w')
handle = mock_sftp.open()
handle.write.assert_called_with('data2')
mock_ssh.close.assert_called_once()
mock_paramiko.RSAKey.from_private_key_file.assert_called_with('/var/id_rsa', 'ssh-key-password')
mock_ssh.connect.assert_called_with('non-existent', username='test_acme', port='22',
pkey='ssh-rsa test-key')
@patch("lemur.plugins.lemur_sftp.plugin.paramiko")
def test_upload_acme_token(self, mock_paramiko):
token_path = './well-known/acme-challenge/some-token-path'
token = 'token-data'
options = [{'name': 'host', 'value': 'non-existent'}, {'name': 'port', 'value': '22'},
{'name': 'user', 'value': 'test_acme'}, {'name': 'password', 'value': 'test_password'},
{'name': 'destinationPath', 'value': '/var/destination-path'}]
mock_sftp = Mock()
mock_sftp.open = mock_open()
mock_ssh = mock_paramiko.SSHClient.return_value
mock_ssh.connect = MagicMock()
mock_ssh.open_sftp.return_value = mock_sftp
self.sftp_destination.upload_acme_token(token_path, token, options)
mock_sftp.open.assert_called_once_with('/var/destination-path/some-token-path', 'w')
handle = mock_sftp.open()
handle.write.assert_called_once_with('token-data')
mock_ssh.close.assert_called_once()
mock_ssh.connect.assert_called_with('non-existent', username='test_acme', port='22',
password='test_password')
@patch("lemur.plugins.lemur_sftp.plugin.paramiko")
def test_delete_file_with_password(self, mock_paramiko):
dst_path = '/var/non-existent'
files = {'first-file': None}
options = [{'name': 'host', 'value': 'non-existent'}, {'name': 'port', 'value': '22'},
{'name': 'user', 'value': 'test_acme'}, {'name': 'password', 'value': 'test_password'}]
mock_sftp = Mock()
mock_ssh = mock_paramiko.SSHClient.return_value
mock_ssh.connect = MagicMock()
mock_ssh.open_sftp.return_value = mock_sftp
self.sftp_destination.delete_file(dst_path, files, options)
mock_sftp.remove.assert_called_once_with('/var/non-existent/first-file')
mock_ssh.close.assert_called_once()
mock_ssh.connect.assert_called_with('non-existent', username='test_acme', port='22',
password='test_password')
@patch("lemur.plugins.lemur_sftp.plugin.paramiko")
def test_delete_acme_token(self, mock_paramiko):
token_path = './well-known/acme-challenge/some-token-path'
options = [{'name': 'host', 'value': 'non-existent'}, {'name': 'port', 'value': '22'},
{'name': 'user', 'value': 'test_acme'}, {'name': 'password', 'value': 'test_password'},
{'name': 'destinationPath', 'value': '/var/destination-path'}]
mock_sftp = Mock()
mock_ssh = mock_paramiko.SSHClient.return_value
mock_ssh.connect = MagicMock()
mock_ssh.open_sftp.return_value = mock_sftp
self.sftp_destination.delete_acme_token(token_path, options)
mock_sftp.remove.assert_called_once_with('/var/destination-path/some-token-path')
mock_ssh.close.assert_called_once()
mock_ssh.connect.assert_called_with('non-existent', username='test_acme', port='22',
password='test_password')
|
from flask import Flask, jsonify
from flasgger import APISpec, Schema, Swagger, fields
from apispec.ext.marshmallow import MarshmallowPlugin
from apispec_webframeworks.flask import FlaskPlugin
# Create an APISpec
spec = APISpec(
title='Flasger Petstore',
version='1.0.10',
openapi_version='2.0',
plugins=[
FlaskPlugin(),
MarshmallowPlugin(),
],
)
app = Flask(__name__)
# Optional marshmallow support
class CategorySchema(Schema):
id = fields.Int()
name = fields.Str(required=True)
class PetSchema(Schema):
category = fields.Nested(CategorySchema, many=True)
name = fields.Str()
@app.route('/random')
def random_pet():
"""
A cute furry animal endpoint.
Get a random pet
---
description: Get a random pet
responses:
200:
description: A pet to be returned
schema:
$ref: '#/definitions/Pet'
"""
pet = {'category': [{'id': 1, 'name': 'rodent'}], 'name': 'Mickey'}
return jsonify(PetSchema().dump(pet).data)
template = spec.to_flasgger(
app,
definitions=[CategorySchema, PetSchema],
paths=[random_pet]
)
"""
optionally if using apispec.APISpec from original module
you can do:
from flasgger.utils import apispec_to_template
template = apispec_to_template(
app=app,
spec=spec,
definitions=[CategorySchema, PetSchema],
paths=[random_pet]
)
"""
# set the UIVERSION to 3
app.config['SWAGGER'] = {'uiversion': 3}
# start Flasgger using a template from apispec
swag = Swagger(app, template=template)
if __name__ == '__main__':
app.run(debug=True)
|
from datetime import timedelta
import logging
from homeassistant.components.camera import ATTR_ENTITY_ID, SUPPORT_ON_OFF, Camera
from homeassistant.components.ffmpeg import DATA_FFMPEG
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
STATE_OFF,
STATE_ON,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import (
ATTRIBUTION,
DEVICE_BRAND,
DOMAIN as LOGI_CIRCLE_DOMAIN,
LED_MODE_KEY,
RECORDING_MODE_KEY,
SIGNAL_LOGI_CIRCLE_RECONFIGURE,
SIGNAL_LOGI_CIRCLE_RECORD,
SIGNAL_LOGI_CIRCLE_SNAPSHOT,
)
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(seconds=60)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a Logi Circle Camera. Obsolete."""
_LOGGER.warning("Logi Circle no longer works with camera platform configuration")
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up a Logi Circle Camera based on a config entry."""
devices = await hass.data[LOGI_CIRCLE_DOMAIN].cameras
ffmpeg = hass.data[DATA_FFMPEG]
cameras = [LogiCam(device, entry, ffmpeg) for device in devices]
async_add_entities(cameras, True)
class LogiCam(Camera):
"""An implementation of a Logi Circle camera."""
def __init__(self, camera, device_info, ffmpeg):
"""Initialize Logi Circle camera."""
super().__init__()
self._camera = camera
self._name = self._camera.name
self._id = self._camera.mac_address
self._has_battery = self._camera.supports_feature("battery_level")
self._ffmpeg = ffmpeg
self._listeners = []
async def async_added_to_hass(self):
"""Connect camera methods to signals."""
def _dispatch_proxy(method):
"""Expand parameters & filter entity IDs."""
async def _call(params):
entity_ids = params.get(ATTR_ENTITY_ID)
filtered_params = {
k: v for k, v in params.items() if k != ATTR_ENTITY_ID
}
if entity_ids is None or self.entity_id in entity_ids:
await method(**filtered_params)
return _call
self._listeners.extend(
[
async_dispatcher_connect(
self.hass,
SIGNAL_LOGI_CIRCLE_RECONFIGURE,
_dispatch_proxy(self.set_config),
),
async_dispatcher_connect(
self.hass,
SIGNAL_LOGI_CIRCLE_SNAPSHOT,
_dispatch_proxy(self.livestream_snapshot),
),
async_dispatcher_connect(
self.hass,
SIGNAL_LOGI_CIRCLE_RECORD,
_dispatch_proxy(self.download_livestream),
),
]
)
async def async_will_remove_from_hass(self):
"""Disconnect dispatcher listeners when removed."""
for detach in self._listeners:
detach()
@property
def unique_id(self):
"""Return a unique ID."""
return self._id
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def supported_features(self):
"""Logi Circle camera's support turning on and off ("soft" switch)."""
return SUPPORT_ON_OFF
@property
def device_info(self):
"""Return information about the device."""
return {
"name": self._camera.name,
"identifiers": {(LOGI_CIRCLE_DOMAIN, self._camera.id)},
"model": self._camera.model_name,
"sw_version": self._camera.firmware,
"manufacturer": DEVICE_BRAND,
}
@property
def device_state_attributes(self):
"""Return the state attributes."""
state = {
ATTR_ATTRIBUTION: ATTRIBUTION,
"battery_saving_mode": (
STATE_ON if self._camera.battery_saving else STATE_OFF
),
"microphone_gain": self._camera.microphone_gain,
}
# Add battery attributes if camera is battery-powered
if self._has_battery:
state[ATTR_BATTERY_CHARGING] = self._camera.charging
state[ATTR_BATTERY_LEVEL] = self._camera.battery_level
return state
async def async_camera_image(self):
"""Return a still image from the camera."""
return await self._camera.live_stream.download_jpeg()
async def async_turn_off(self):
"""Disable streaming mode for this camera."""
await self._camera.set_config("streaming", False)
async def async_turn_on(self):
"""Enable streaming mode for this camera."""
await self._camera.set_config("streaming", True)
@property
def should_poll(self):
"""Update the image periodically."""
return True
async def set_config(self, mode, value):
"""Set an configuration property for the target camera."""
if mode == LED_MODE_KEY:
await self._camera.set_config("led", value)
if mode == RECORDING_MODE_KEY:
await self._camera.set_config("recording_disabled", not value)
async def download_livestream(self, filename, duration):
"""Download a recording from the camera's livestream."""
# Render filename from template.
filename.hass = self.hass
stream_file = filename.async_render(variables={ATTR_ENTITY_ID: self.entity_id})
# Respect configured allowed paths.
if not self.hass.config.is_allowed_path(stream_file):
_LOGGER.error("Can't write %s, no access to path!", stream_file)
return
await self._camera.live_stream.download_rtsp(
filename=stream_file,
duration=timedelta(seconds=duration),
ffmpeg_bin=self._ffmpeg.binary,
)
async def livestream_snapshot(self, filename):
"""Download a still frame from the camera's livestream."""
# Render filename from template.
filename.hass = self.hass
snapshot_file = filename.async_render(
variables={ATTR_ENTITY_ID: self.entity_id}
)
# Respect configured allowed paths.
if not self.hass.config.is_allowed_path(snapshot_file):
_LOGGER.error("Can't write %s, no access to path!", snapshot_file)
return
await self._camera.live_stream.download_jpeg(
filename=snapshot_file, refresh=True
)
async def async_update(self):
"""Update camera entity and refresh attributes."""
await self._camera.update()
|
import os
from setuptools import setup, Command
from datetime import date
# Fix for building on non-Windows systems
import codecs
try:
codecs.lookup('mbcs')
except LookupError:
ascii = codecs.lookup('ascii')
func = lambda name, enc=ascii: {True: enc}.get(name=='mbcs')
codecs.register(func)
class PyDocs(Command):
user_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
import subprocess
import sys
os.chdir('docs')
errno = subprocess.call(['make', 'html'])
sys.exit(errno)
class PyTest(Command):
user_options = [('cov', 'c', 'Produce coverage'),
('report', 'r', 'Produce html coverage report')]
def initialize_options(self):
self.cov = None
self.report = None
def finalize_options(self):
pass
def run(self):
import sys, subprocess
proc = [sys.executable, '-m', 'pytest']
if self.cov or self.report:
proc += ['--cov','--cov-config=.coveragerc']
if self.report:
proc += ['--cov-report=html']
errno = subprocess.call(proc)
raise SystemExit(errno)
setup(cmdclass = {'test':PyTest, 'docs':PyDocs})
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import MagicMock
from mock import patch
from mock import call
from diamond.collector import Collector
from tokumx import TokuMXCollector
##########################################################################
def run_only_if_pymongo_is_available(func):
try:
import pymongo
except ImportError:
pymongo = None
pred = lambda: pymongo is not None
return run_only(func, pred)
class TestTokuMXCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('TokuMXCollector', {
'host': 'localhost:27017',
'databases': '^db',
})
self.collector = TokuMXCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(TokuMXCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_server_stats(self,
publish_mock,
connector_mock):
data = {'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_has_calls(
[call('serverStatus'), call('engineStatus')], any_order=False)
self.assertPublishedMany(publish_mock, {
'more_keys.nested_key': 1,
'key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_db_stats(self,
publish_mock,
connector_mock):
data = {'db_keys': {'db_nested_key': 1}, 'dbkey': 2, 'dbstring': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection['db1'].command.assert_called_once_with('dbStats')
metrics = {
'db_keys.db_nested_key': 1,
'dbkey': 2
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_stats_with_long_type(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_has_calls(
[call('serverStatus'), call('engineStatus')], any_order=False)
self.assertPublishedMany(publish_mock, {
'more_keys': 1,
'key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_databases(self,
publish_mock,
connector_mock):
self._annotate_connection(connector_mock, {})
self.collector.collect()
assert call('baddb') not in self.connection.__getitem__.call_args_list
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_collections(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.connection['db1'].collection_names.return_value = ['collection1',
'tmp.mr.tmp1']
self.connection['db1'].command.return_value = {'key': 2,
'string': 'str'}
self.collector.collect()
self.connection.db.command.assert_has_calls(
[call('serverStatus'), call('engineStatus')], any_order=False)
self.connection['db1'].collection_names.assert_called_once_with()
self.connection['db1'].command.assert_any_call('dbStats')
self.connection['db1'].command.assert_any_call('collstats',
'collection1')
assert call('collstats', 'tmp.mr.tmp1') not in self.connection['db1'].command.call_args_list # NOQA
metrics = {
'databases.db1.collection1.key': 2,
}
self.assertPublishedMany(publish_mock, metrics)
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb']
class TestMongoMultiHostDBCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('TokuMXCollector', {
'hosts': ['localhost:27017', 'localhost:27057'],
'databases': '^db',
})
self.collector = TokuMXCollector(config, None)
self.connection = MagicMock()
def test_import(self):
self.assertTrue(TokuMXCollector)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_server_stats(self,
publish_mock,
connector_mock):
data = {'more_keys': {'nested_key': 1}, 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_with('engineStatus')
self.assertPublishedMany(publish_mock, {
'localhost_27017.more_keys.nested_key': 1,
'localhost_27057.more_keys.nested_key': 1,
'localhost_27017.key': 2,
'localhost_27057.key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_nested_keys_for_db_stats(self,
publish_mock,
connector_mock):
data = {'db_keys': {'db_nested_key': 1}, 'dbkey': 2, 'dbstring': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection['db1'].command.assert_called_with('dbStats')
metrics = {
'localhost_27017.db_keys.db_nested_key': 1,
'localhost_27057.db_keys.db_nested_key': 1,
'localhost_27017.dbkey': 2,
'localhost_27057.dbkey': 2
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_publish_stats_with_long_type(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.collector.collect()
self.connection.db.command.assert_called_with('engineStatus')
self.assertPublishedMany(publish_mock, {
'localhost_27017.more_keys': 1,
'localhost_27057.more_keys': 1,
'localhost_27017.key': 2,
'localhost_27057.key': 2
})
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_databases(self,
publish_mock,
connector_mock):
self._annotate_connection(connector_mock, {})
self.collector.collect()
assert call('baddb') not in self.connection.__getitem__.call_args_list
@run_only_if_pymongo_is_available
@patch('pymongo.Connection')
@patch.object(Collector, 'publish')
def test_should_ignore_unneeded_collections(self,
publish_mock,
connector_mock):
data = {'more_keys': long(1), 'key': 2, 'string': 'str'}
self._annotate_connection(connector_mock, data)
self.connection['db1'].collection_names.return_value = ['collection1',
'tmp.mr.tmp1']
self.connection['db1'].command.return_value = {'key': 2,
'string': 'str'}
self.collector.collect()
self.connection.db.command.assert_has_calls(
[call('serverStatus'), call('engineStatus')], any_order=False)
self.connection['db1'].collection_names.assert_called_with()
self.connection['db1'].command.assert_any_call('dbStats')
self.connection['db1'].command.assert_any_call('collstats',
'collection1')
assert call('collstats', 'tmp.mr.tmp1') not in self.connection['db1'].command.call_args_list # NOQA
metrics = {
'localhost_27017.databases.db1.collection1.key': 2,
'localhost_27057.databases.db1.collection1.key': 2,
}
self.assertPublishedMany(publish_mock, metrics)
def _annotate_connection(self, connector_mock, data):
connector_mock.return_value = self.connection
self.connection.db.command.return_value = data
self.connection.database_names.return_value = ['db1', 'baddb']
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import typing
from pathlib import Path
import uuid
import matchzoo as mz
from matchzoo.engine.base_model import BaseModel
from .callback import Callback
class SaveModel(Callback):
"""
Save trained model.
For each trained model, a UUID will be generated as the `model_id`, the
model will be saved under the `dir_path/model_id`. A `model_id` key will
also be inserted into the result, which will visible in the return value of
the `tune` method.
:param dir_path: Path to save the models to. (default:
`matchzoo.USER_TUNED_MODELS_DIR`)
"""
def __init__(
self,
dir_path: typing.Union[str, Path] = mz.USER_TUNED_MODELS_DIR
):
"""Init."""
self._dir_path = dir_path
def on_run_end(self, tuner, model: BaseModel, result: dict):
"""Save model on run end."""
model_id = str(uuid.uuid4())
model.save(self._dir_path.joinpath(model_id))
result['model_id'] = model_id
|
from homeassistant import config_entries
from homeassistant.components.coolmaster.const import AVAILABLE_MODES, DOMAIN
from tests.async_mock import patch
def _flow_data():
options = {"host": "1.1.1.1"}
for mode in AVAILABLE_MODES:
options[mode] = True
return options
async def test_form(hass):
"""Test we get the form."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.coolmaster.config_flow.CoolMasterNet.status",
return_value={"test_id": "test_unit"},
), patch(
"homeassistant.components.coolmaster.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.coolmaster.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], _flow_data()
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "1.1.1.1"
assert result2["data"] == {
"host": "1.1.1.1",
"port": 10102,
"supported_modes": AVAILABLE_MODES,
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_timeout(hass):
"""Test we handle a connection timeout."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.coolmaster.config_flow.CoolMasterNet.status",
side_effect=TimeoutError(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], _flow_data()
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_connection_refused(hass):
"""Test we handle a connection error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.coolmaster.config_flow.CoolMasterNet.status",
side_effect=ConnectionRefusedError(),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], _flow_data()
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_form_no_units(hass):
"""Test we handle no units found."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch(
"homeassistant.components.coolmaster.config_flow.CoolMasterNet.status",
return_value={},
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], _flow_data()
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "no_units"}
|
from regenmaschine import Client
from regenmaschine.errors import RainMachineError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_IP_ADDRESS, CONF_PASSWORD, CONF_PORT, CONF_SSL
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client, config_validation as cv
from .const import ( # pylint: disable=unused-import
CONF_ZONE_RUN_TIME,
DEFAULT_PORT,
DEFAULT_ZONE_RUN,
DOMAIN,
)
class RainMachineFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a RainMachine config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the config flow."""
self.data_schema = vol.Schema(
{
vol.Required(CONF_IP_ADDRESS): str,
vol.Required(CONF_PASSWORD): str,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): int,
}
)
async def _show_form(self, errors=None):
"""Show the form to the user."""
return self.async_show_form(
step_id="user",
data_schema=self.data_schema,
errors=errors if errors else {},
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Define the config flow to handle options."""
return RainMachineOptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return await self._show_form()
await self.async_set_unique_id(user_input[CONF_IP_ADDRESS])
self._abort_if_unique_id_configured()
websession = aiohttp_client.async_get_clientsession(self.hass)
client = Client(session=websession)
try:
await client.load_local(
user_input[CONF_IP_ADDRESS],
user_input[CONF_PASSWORD],
port=user_input[CONF_PORT],
ssl=user_input.get(CONF_SSL, True),
)
except RainMachineError:
return await self._show_form({CONF_PASSWORD: "invalid_auth"})
# Unfortunately, RainMachine doesn't provide a way to refresh the
# access token without using the IP address and password, so we have to
# store it:
return self.async_create_entry(
title=user_input[CONF_IP_ADDRESS],
data={
CONF_IP_ADDRESS: user_input[CONF_IP_ADDRESS],
CONF_PASSWORD: user_input[CONF_PASSWORD],
CONF_PORT: user_input[CONF_PORT],
CONF_SSL: user_input.get(CONF_SSL, True),
CONF_ZONE_RUN_TIME: user_input.get(
CONF_ZONE_RUN_TIME, DEFAULT_ZONE_RUN
),
},
)
class RainMachineOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a RainMachine options flow."""
def __init__(self, config_entry):
"""Initialize."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ZONE_RUN_TIME,
default=self.config_entry.options.get(CONF_ZONE_RUN_TIME),
): cv.positive_int
}
),
)
|
from dataclasses import dataclass
import logging
from miio import AirQualityMonitor, DeviceException # pylint: disable=import-error
from miio.gateway import (
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
GATEWAY_MODEL_EU,
DeviceType,
GatewayException,
)
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_TOKEN,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
LIGHT_LUX,
PERCENTAGE,
PRESSURE_HPA,
TEMP_CELSIUS,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from .config_flow import CONF_FLOW_TYPE, CONF_GATEWAY
from .const import DOMAIN
from .gateway import XiaomiGatewayDevice
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Miio Sensor"
DATA_KEY = "sensor.xiaomi_miio"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
ATTR_POWER = "power"
ATTR_CHARGING = "charging"
ATTR_BATTERY_LEVEL = "battery_level"
ATTR_DISPLAY_CLOCK = "display_clock"
ATTR_NIGHT_MODE = "night_mode"
ATTR_NIGHT_TIME_BEGIN = "night_time_begin"
ATTR_NIGHT_TIME_END = "night_time_end"
ATTR_SENSOR_STATE = "sensor_state"
ATTR_MODEL = "model"
SUCCESS = ["ok"]
@dataclass
class SensorType:
"""Class that holds device specific info for a xiaomi aqara sensor."""
unit: str = None
icon: str = None
device_class: str = None
GATEWAY_SENSOR_TYPES = {
"temperature": SensorType(
unit=TEMP_CELSIUS, icon=None, device_class=DEVICE_CLASS_TEMPERATURE
),
"humidity": SensorType(
unit=PERCENTAGE, icon=None, device_class=DEVICE_CLASS_HUMIDITY
),
"pressure": SensorType(
unit=PRESSURE_HPA, icon=None, device_class=DEVICE_CLASS_PRESSURE
),
}
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Xiaomi sensor from a config entry."""
entities = []
if config_entry.data[CONF_FLOW_TYPE] == CONF_GATEWAY:
gateway = hass.data[DOMAIN][config_entry.entry_id]
# Gateway illuminance sensor
if gateway.model not in [
GATEWAY_MODEL_AC_V1,
GATEWAY_MODEL_AC_V2,
GATEWAY_MODEL_AC_V3,
GATEWAY_MODEL_EU,
]:
entities.append(
XiaomiGatewayIlluminanceSensor(
gateway, config_entry.title, config_entry.unique_id
)
)
# Gateway sub devices
sub_devices = gateway.devices
for sub_device in sub_devices.values():
sensor_variables = None
if sub_device.type == DeviceType.SensorHT:
sensor_variables = ["temperature", "humidity"]
if sub_device.type == DeviceType.AqaraHT:
sensor_variables = ["temperature", "humidity", "pressure"]
if sensor_variables is not None:
entities.extend(
[
XiaomiGatewaySensor(sub_device, config_entry, variable)
for variable in sensor_variables
]
)
async_add_entities(entities, update_before_add=True)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the sensor from config."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
try:
air_quality_monitor = AirQualityMonitor(host, token)
device_info = await hass.async_add_executor_job(air_quality_monitor.info)
model = device_info.model
unique_id = f"{model}-{device_info.mac_address}"
_LOGGER.info(
"%s %s %s detected",
model,
device_info.firmware_version,
device_info.hardware_version,
)
device = XiaomiAirQualityMonitor(name, air_quality_monitor, model, unique_id)
except DeviceException as ex:
raise PlatformNotReady from ex
hass.data[DATA_KEY][host] = device
async_add_entities([device], update_before_add=True)
class XiaomiAirQualityMonitor(Entity):
"""Representation of a Xiaomi Air Quality Monitor."""
def __init__(self, name, device, model, unique_id):
"""Initialize the entity."""
self._name = name
self._device = device
self._model = model
self._unique_id = unique_id
self._icon = "mdi:cloud"
self._unit_of_measurement = "AQI"
self._available = None
self._state = None
self._state_attrs = {
ATTR_POWER: None,
ATTR_BATTERY_LEVEL: None,
ATTR_CHARGING: None,
ATTR_DISPLAY_CLOCK: None,
ATTR_NIGHT_MODE: None,
ATTR_NIGHT_TIME_BEGIN: None,
ATTR_NIGHT_TIME_END: None,
ATTR_SENSOR_STATE: None,
ATTR_MODEL: self._model,
}
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use for device if any."""
return self._icon
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._state_attrs
async def async_update(self):
"""Fetch state from the miio device."""
try:
state = await self.hass.async_add_executor_job(self._device.status)
_LOGGER.debug("Got new state: %s", state)
self._available = True
self._state = state.aqi
self._state_attrs.update(
{
ATTR_POWER: state.power,
ATTR_CHARGING: state.usb_power,
ATTR_BATTERY_LEVEL: state.battery,
ATTR_DISPLAY_CLOCK: state.display_clock,
ATTR_NIGHT_MODE: state.night_mode,
ATTR_NIGHT_TIME_BEGIN: state.night_time_begin,
ATTR_NIGHT_TIME_END: state.night_time_end,
ATTR_SENSOR_STATE: state.sensor_state,
}
)
except DeviceException as ex:
if self._available:
self._available = False
_LOGGER.error("Got exception while fetching the state: %s", ex)
class XiaomiGatewaySensor(XiaomiGatewayDevice):
"""Representation of a XiaomiGatewaySensor."""
def __init__(self, sub_device, entry, data_key):
"""Initialize the XiaomiSensor."""
super().__init__(sub_device, entry)
self._data_key = data_key
self._unique_id = f"{sub_device.sid}-{data_key}"
self._name = f"{data_key} ({sub_device.sid})".capitalize()
@property
def icon(self):
"""Return the icon to use in the frontend."""
return GATEWAY_SENSOR_TYPES[self._data_key].icon
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return GATEWAY_SENSOR_TYPES[self._data_key].unit
@property
def device_class(self):
"""Return the device class of this entity."""
return GATEWAY_SENSOR_TYPES[self._data_key].device_class
@property
def state(self):
"""Return the state of the sensor."""
return self._sub_device.status[self._data_key]
class XiaomiGatewayIlluminanceSensor(Entity):
"""Representation of the gateway device's illuminance sensor."""
def __init__(self, gateway_device, gateway_name, gateway_device_id):
"""Initialize the entity."""
self._gateway = gateway_device
self._name = f"{gateway_name} Illuminance"
self._gateway_device_id = gateway_device_id
self._unique_id = f"{gateway_device_id}-illuminance"
self._available = False
self._state = None
@property
def unique_id(self):
"""Return an unique ID."""
return self._unique_id
@property
def device_info(self):
"""Return the device info of the gateway."""
return {
"identifiers": {(DOMAIN, self._gateway_device_id)},
}
@property
def name(self):
"""Return the name of this entity, if any."""
return self._name
@property
def available(self):
"""Return true when state is known."""
return self._available
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity."""
return LIGHT_LUX
@property
def device_class(self):
"""Return the device class of this entity."""
return DEVICE_CLASS_ILLUMINANCE
@property
def state(self):
"""Return the state of the device."""
return self._state
async def async_update(self):
"""Fetch state from the device."""
try:
self._state = await self.hass.async_add_executor_job(
self._gateway.get_illumination
)
self._available = True
except GatewayException as ex:
if self._available:
self._available = False
_LOGGER.error(
"Got exception while fetching the gateway illuminance state: %s", ex
)
|
from datetime import timedelta
from typing import Callable, List, Optional, cast
import pyvera as veraApi
from homeassistant.components.sensor import DOMAIN as PLATFORM_DOMAIN, ENTITY_ID_FORMAT
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import LIGHT_LUX, PERCENTAGE, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.core import HomeAssistant
from homeassistant.helpers.entity import Entity
from homeassistant.util import convert
from . import VeraDevice
from .common import ControllerData, get_controller_data
SCAN_INTERVAL = timedelta(seconds=5)
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up the sensor config entry."""
controller_data = get_controller_data(hass, entry)
async_add_entities(
[
VeraSensor(device, controller_data)
for device in controller_data.devices.get(PLATFORM_DOMAIN)
]
)
class VeraSensor(VeraDevice[veraApi.VeraSensor], Entity):
"""Representation of a Vera Sensor."""
def __init__(
self, vera_device: veraApi.VeraSensor, controller_data: ControllerData
):
"""Initialize the sensor."""
self.current_value = None
self._temperature_units = None
self.last_changed_time = None
VeraDevice.__init__(self, vera_device, controller_data)
self.entity_id = ENTITY_ID_FORMAT.format(self.vera_id)
@property
def state(self) -> str:
"""Return the name of the sensor."""
return self.current_value
@property
def unit_of_measurement(self) -> Optional[str]:
"""Return the unit of measurement of this entity, if any."""
if self.vera_device.category == veraApi.CATEGORY_TEMPERATURE_SENSOR:
return self._temperature_units
if self.vera_device.category == veraApi.CATEGORY_LIGHT_SENSOR:
return LIGHT_LUX
if self.vera_device.category == veraApi.CATEGORY_UV_SENSOR:
return "level"
if self.vera_device.category == veraApi.CATEGORY_HUMIDITY_SENSOR:
return PERCENTAGE
if self.vera_device.category == veraApi.CATEGORY_POWER_METER:
return "watts"
def update(self) -> None:
"""Update the state."""
if self.vera_device.category == veraApi.CATEGORY_TEMPERATURE_SENSOR:
self.current_value = self.vera_device.temperature
vera_temp_units = self.vera_device.vera_controller.temperature_units
if vera_temp_units == "F":
self._temperature_units = TEMP_FAHRENHEIT
else:
self._temperature_units = TEMP_CELSIUS
elif self.vera_device.category == veraApi.CATEGORY_LIGHT_SENSOR:
self.current_value = self.vera_device.light
elif self.vera_device.category == veraApi.CATEGORY_UV_SENSOR:
self.current_value = self.vera_device.light
elif self.vera_device.category == veraApi.CATEGORY_HUMIDITY_SENSOR:
self.current_value = self.vera_device.humidity
elif self.vera_device.category == veraApi.CATEGORY_SCENE_CONTROLLER:
controller = cast(veraApi.VeraSceneController, self.vera_device)
value = controller.get_last_scene_id(True)
time = controller.get_last_scene_time(True)
if time == self.last_changed_time:
self.current_value = None
else:
self.current_value = value
self.last_changed_time = time
elif self.vera_device.category == veraApi.CATEGORY_POWER_METER:
power = convert(self.vera_device.power, float, 0)
self.current_value = int(round(power, 0))
elif self.vera_device.is_trippable:
tripped = self.vera_device.is_tripped
self.current_value = "Tripped" if tripped else "Not Tripped"
else:
self.current_value = "Unknown"
|
from pyramid import testing
from paasta_tools import marathon_tools
from paasta_tools.api import settings
from paasta_tools.api.views.marathon_dashboard import marathon_dashboard
from paasta_tools.utils import SystemPaastaConfig
def test_list_instances():
settings.cluster = "fake_cluster"
system_paasta_config_dict = {
"marathon_servers": [
{
"user": "fake_user",
"password": "fake_password",
"url": ["http://marathon:8080"],
},
{
"user": "fake_user",
"password": "fake_password",
"url": ["http://marathon1:8080"],
},
{
"user": "fake_user",
"password": "fake_password",
"url": ["http://marathon2:8080"],
},
],
"dashboard_links": {
"testcluster": {
"Marathon RO": [
"http://accessible-marathon",
"http://accessible-marathon1",
"http://accessible-marathon2",
]
}
},
}
system_paasta_config = SystemPaastaConfig(
config=system_paasta_config_dict, directory="unused"
)
marathon_servers = marathon_tools.get_marathon_servers(system_paasta_config)
settings.marathon_clients = marathon_tools.get_marathon_clients(
marathon_servers=marathon_servers, cached=False
)
request = testing.DummyRequest()
settings.system_paasta_config = system_paasta_config
response = marathon_dashboard(request)
expected_output = {settings.cluster: []}
assert response == expected_output
|
from babelfish import LanguageReverseConverter, language_converters
class Addic7edConverter(LanguageReverseConverter):
def __init__(self):
self.name_converter = language_converters['name']
self.from_addic7ed = {u'Català': ('cat',), 'Chinese (Simplified)': ('zho',), 'Chinese (Traditional)': ('zho',),
'Euskera': ('eus',), 'Galego': ('glg',), 'Greek': ('ell',), 'Malay': ('msa',),
'Portuguese (Brazilian)': ('por', 'BR'), 'Serbian (Cyrillic)': ('srp', None, 'Cyrl'),
'Serbian (Latin)': ('srp',), 'Spanish (Latin America)': ('spa',),
'Spanish (Spain)': ('spa',)}
self.to_addic7ed = {('cat',): 'Català', ('zho',): 'Chinese (Simplified)', ('eus',): 'Euskera',
('glg',): 'Galego', ('ell',): 'Greek', ('msa',): 'Malay',
('por', 'BR'): 'Portuguese (Brazilian)', ('srp', None, 'Cyrl'): 'Serbian (Cyrillic)'}
self.codes = self.name_converter.codes | set(self.from_addic7ed.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3, country, script) in self.to_addic7ed:
return self.to_addic7ed[(alpha3, country, script)]
if (alpha3, country) in self.to_addic7ed:
return self.to_addic7ed[(alpha3, country)]
if (alpha3,) in self.to_addic7ed:
return self.to_addic7ed[(alpha3,)]
return self.name_converter.convert(alpha3, country, script)
def reverse(self, addic7ed):
if addic7ed in self.from_addic7ed:
return self.from_addic7ed[addic7ed]
return self.name_converter.reverse(addic7ed)
|
from gi.repository import Gio, GObject, GtkSource, Pango
import meld.conf
import meld.filters
class MeldSettings(GObject.GObject):
"""Handler for settings that can't easily be bound to object properties"""
__gsignals__ = {
'file-filters-changed': (GObject.SignalFlags.RUN_FIRST, None, ()),
'text-filters-changed': (GObject.SignalFlags.RUN_FIRST, None, ()),
'changed': (GObject.SignalFlags.RUN_FIRST, None, (str,)),
}
def __init__(self):
super().__init__()
self.on_setting_changed(settings, 'filename-filters')
self.on_setting_changed(settings, 'text-filters')
self.on_setting_changed(settings, 'use-system-font')
self.style_scheme = self._style_scheme_from_gsettings()
settings.connect('changed', self.on_setting_changed)
def on_setting_changed(self, settings, key):
if key == 'filename-filters':
self.file_filters = self._filters_from_gsetting(
'filename-filters', meld.filters.FilterEntry.SHELL)
self.emit('file-filters-changed')
elif key == 'text-filters':
self.text_filters = self._filters_from_gsetting(
'text-filters', meld.filters.FilterEntry.REGEX)
self.emit('text-filters-changed')
elif key in ('use-system-font', 'custom-font'):
self.font = self._current_font_from_gsetting()
self.emit('changed', 'font')
elif key in ('style-scheme'):
self.style_scheme = self._style_scheme_from_gsettings()
self.emit('changed', 'style-scheme')
def _style_scheme_from_gsettings(self):
from meld.style import set_base_style_scheme
manager = GtkSource.StyleSchemeManager.get_default()
scheme = manager.get_scheme(settings.get_string('style-scheme'))
set_base_style_scheme(scheme)
return scheme
def _filters_from_gsetting(self, key, filt_type):
filter_params = settings.get_value(key)
filters = [
meld.filters.FilterEntry.new_from_gsetting(params, filt_type)
for params in filter_params
]
return filters
def _current_font_from_gsetting(self, *args):
if settings.get_boolean('use-system-font'):
font_string = interface_settings.get_string('monospace-font-name')
else:
font_string = settings.get_string('custom-font')
return Pango.FontDescription(font_string)
def load_settings_schema(schema_id):
if meld.conf.DATADIR_IS_UNINSTALLED:
schema_source = Gio.SettingsSchemaSource.new_from_directory(
str(meld.conf.DATADIR),
Gio.SettingsSchemaSource.get_default(),
False,
)
schema = schema_source.lookup(schema_id, False)
settings = Gio.Settings.new_full(
schema=schema, backend=None, path=None)
else:
settings = Gio.Settings.new(schema_id)
return settings
def create_settings():
global settings, interface_settings, _meldsettings
settings = load_settings_schema(meld.conf.SETTINGS_SCHEMA_ID)
interface_settings = Gio.Settings.new('org.gnome.desktop.interface')
_meldsettings = MeldSettings()
def bind_settings(obj):
global settings
bind_flags = (
Gio.SettingsBindFlags.DEFAULT | Gio.SettingsBindFlags.NO_SENSITIVITY)
for binding in getattr(obj, '__gsettings_bindings__', ()):
settings_id, property_id = binding
settings.bind(settings_id, obj, property_id, bind_flags)
bind_flags = (
Gio.SettingsBindFlags.GET | Gio.SettingsBindFlags.NO_SENSITIVITY)
for binding in getattr(obj, '__gsettings_bindings_view__', ()):
settings_id, property_id = binding
settings.bind(settings_id, obj, property_id, bind_flags)
def get_meld_settings() -> MeldSettings:
return _meldsettings
settings = None
interface_settings = None
_meldsettings = None
|
import pytest
from homeassistant.components.plex.const import DOMAIN
from .const import DEFAULT_DATA, DEFAULT_OPTIONS
from .mock_classes import MockGDM, MockPlexAccount, MockPlexServer
from tests.async_mock import patch
from tests.common import MockConfigEntry
@pytest.fixture(name="entry")
def mock_config_entry():
"""Return the default mocked config entry."""
return MockConfigEntry(
domain=DOMAIN,
data=DEFAULT_DATA,
options=DEFAULT_OPTIONS,
unique_id=DEFAULT_DATA["server_id"],
)
@pytest.fixture
def mock_plex_account():
"""Mock the PlexAccount class and return the used instance."""
plex_account = MockPlexAccount()
with patch("plexapi.myplex.MyPlexAccount", return_value=plex_account):
yield plex_account
@pytest.fixture
def mock_websocket():
"""Mock the PlexWebsocket class."""
with patch("homeassistant.components.plex.PlexWebsocket", autospec=True) as ws:
yield ws
@pytest.fixture
def setup_plex_server(hass, entry, mock_plex_account, mock_websocket):
"""Set up and return a mocked Plex server instance."""
async def _wrapper(**kwargs):
"""Wrap the fixture to allow passing arguments to the MockPlexServer instance."""
config_entry = kwargs.get("config_entry", entry)
disable_gdm = kwargs.pop("disable_gdm", True)
plex_server = MockPlexServer(**kwargs)
with patch("plexapi.server.PlexServer", return_value=plex_server), patch(
"homeassistant.components.plex.GDM",
return_value=MockGDM(disabled=disable_gdm),
):
config_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
return plex_server
return _wrapper
@pytest.fixture
async def mock_plex_server(entry, setup_plex_server):
"""Init from a config entry and return a mocked PlexServer instance."""
return await setup_plex_server(config_entry=entry)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import json
import logging
import posixpath
import re
import time
from absl import flags
import jinja2
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import units
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import fio
import six
from six.moves import range
PKB_FIO_LOG_FILE_NAME = 'pkb_fio_avg'
LOCAL_JOB_FILE_SUFFIX = '_fio.job' # used with vm_util.PrependTempDir()
REMOTE_JOB_FILE_PATH = posixpath.join(vm_util.VM_TMP_DIR, 'fio.job')
DEFAULT_TEMP_FILE_NAME = 'fio-temp-file'
MOUNT_POINT = '/scratch'
# This dictionary maps scenario names to dictionaries of fio settings.
SCENARIOS = {
'sequential_write': {
'name': 'sequential_write',
'rwkind': 'write',
'blocksize': '512k'
},
'sequential_read': {
'name': 'sequential_read',
'rwkind': 'read',
'blocksize': '512k'
},
'random_write': {
'name': 'random_write',
'rwkind': 'randwrite',
'blocksize': '4k'
},
'random_read': {
'name': 'random_read',
'rwkind': 'randread',
'blocksize': '4k'
},
'random_read_write': {
'name': 'random_read_write',
'rwkind': 'randrw',
'blocksize': '4k'
},
'sequential_trim': {
'name': 'sequential_trim',
'rwkind': 'trim',
'blocksize': '512k'
},
'rand_trim': {
'name': 'rand_trim',
'rwkind': 'randtrim',
'blocksize': '4k'
}
}
FLAGS = flags.FLAGS
# Modes for --fio_target_mode
AGAINST_FILE_WITH_FILL_MODE = 'against_file_with_fill'
AGAINST_FILE_WITHOUT_FILL_MODE = 'against_file_without_fill'
AGAINST_DEVICE_WITH_FILL_MODE = 'against_device_with_fill'
AGAINST_DEVICE_WITHOUT_FILL_MODE = 'against_device_without_fill'
AGAINST_DEVICE_MODES = {AGAINST_DEVICE_WITH_FILL_MODE,
AGAINST_DEVICE_WITHOUT_FILL_MODE}
FILL_TARGET_MODES = {AGAINST_DEVICE_WITH_FILL_MODE,
AGAINST_FILE_WITH_FILL_MODE}
flags.DEFINE_string('fio_jobfile', None,
'Job file that fio will use. If not given, use a job file '
'bundled with PKB. Cannot use with '
'--fio_generate_scenarios.')
flags.DEFINE_list('fio_generate_scenarios', [],
'Generate a job file with the given scenarios. Special '
'scenario \'all\' generates all scenarios. Available '
'scenarios are sequential_write, sequential_read, '
'random_write, and random_read. Cannot use with '
'--fio_jobfile.')
flags.DEFINE_enum('fio_target_mode', AGAINST_FILE_WITHOUT_FILL_MODE,
[AGAINST_DEVICE_WITH_FILL_MODE,
AGAINST_DEVICE_WITHOUT_FILL_MODE,
AGAINST_FILE_WITH_FILL_MODE,
AGAINST_FILE_WITHOUT_FILL_MODE],
'Whether to run against a raw device or a file, and whether '
'to prefill.')
flags.DEFINE_string('fio_fill_size', '100%',
'The amount of device to fill in prepare stage. '
'The valid value can either be an integer, which '
'represents the number of bytes to fill or a '
'percentage, which represents the percentage '
'of the device. A filesystem will be unmounted before '
'filling and remounted afterwards. Only valid when '
'--fio_target_mode is against_device_with_fill or '
'against_file_with_fill.')
flag_util.DEFINE_integerlist('fio_io_depths', flag_util.IntegerList([1]),
'IO queue depths to run on. Can specify a single '
'number, like --fio_io_depths=1, a range, like '
'--fio_io_depths=1-4, or a list, like '
'--fio_io_depths=1-4,6-8',
on_nonincreasing=flag_util.IntegerListParser.WARN,
module_name=__name__)
flag_util.DEFINE_integerlist('fio_num_jobs', flag_util.IntegerList([1]),
'Number of concurrent fio jobs to run.',
on_nonincreasing=flag_util.IntegerListParser.WARN,
module_name=__name__)
flags.DEFINE_integer('fio_working_set_size', None,
'The size of the working set, in GB. If not given, use '
'the full size of the device. If using '
'--fio_generate_scenarios and not running against a raw '
'device, you must pass --fio_working_set_size.',
lower_bound=0)
flag_util.DEFINE_units('fio_blocksize', None,
'The block size for fio operations. Default is given by '
'the scenario when using --generate_scenarios. This '
'flag does not apply when using --fio_jobfile.',
convertible_to=units.byte)
flags.DEFINE_integer('fio_runtime', 600,
'The number of seconds to run each fio job for.',
lower_bound=1)
flags.DEFINE_list('fio_parameters', ['randrepeat=0'],
'Parameters to apply to all PKB generated fio jobs. Each '
'member of the list should be of the form "param=value".')
flags.DEFINE_boolean('fio_lat_log', False,
'Whether to collect a latency log of the fio jobs.')
flags.DEFINE_boolean('fio_bw_log', False,
'Whether to collect a bandwidth log of the fio jobs.')
flags.DEFINE_boolean('fio_iops_log', False,
'Whether to collect an IOPS log of the fio jobs.')
flags.DEFINE_integer('fio_log_avg_msec', 1000,
'By default, this will average each log entry in the '
'fio latency, bandwidth, and iops logs over the specified '
'period of time in milliseconds. If set to 0, fio will '
'log an entry for every IO that completes, this can grow '
'very quickly in size and can cause performance overhead.',
lower_bound=0)
flags.DEFINE_boolean('fio_hist_log', False,
'Whether to collect clat histogram.')
flags.DEFINE_integer('fio_log_hist_msec', 1000,
'Same as fio_log_avg_msec, but logs entries for '
'completion latency histograms. If set to 0, histogram '
'logging is disabled.')
flags.DEFINE_boolean( # TODO(user): Add support for simultaneous read.
'fio_write_against_multiple_clients', False,
'Whether to run fio against multiple nfs. Only applicable '
'when running fio against network mounts and rw=write.')
flags.DEFINE_integer('fio_command_timeout_sec', None,
'Timeout for fio commands in seconds.')
FLAGS_IGNORED_FOR_CUSTOM_JOBFILE = {
'fio_generate_scenarios', 'fio_io_depths', 'fio_runtime',
'fio_blocksize', 'fio_num_jobs', 'fio_parameters'}
def AgainstDevice():
"""Check whether we're running against a device or a file.
Returns:
True if running against a device, False if running against a file.
"""
return FLAGS.fio_target_mode in AGAINST_DEVICE_MODES
def FillTarget():
"""Check whether we should pre-fill our target or not.
Returns:
True if we should pre-fill our target, False if not.
"""
return FLAGS.fio_target_mode in FILL_TARGET_MODES
def FillDevice(vm, disk, fill_size, exec_path):
"""Fill the given disk on the given vm up to fill_size.
Args:
vm: a linux_virtual_machine.BaseLinuxMixin object.
disk: a disk.BaseDisk attached to the given vm.
fill_size: amount of device to fill, in fio format.
exec_path: string path to the fio executable
"""
command = (('%s --filename=%s --ioengine=libaio '
'--name=fill-device --blocksize=512k --iodepth=64 '
'--rw=write --direct=1 --size=%s') %
(exec_path, disk.GetDevicePath(), fill_size))
vm.RobustRemoteCommand(command)
BENCHMARK_NAME = 'fio'
BENCHMARK_CONFIG = """
fio:
description: Runs fio in sequential, random, read and write modes.
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: null
"""
JOB_FILE_TEMPLATE = """
[global]
ioengine=libaio
invalidate=1
direct=1
runtime={{runtime}}
time_based
filename={{filename}}
do_verify=0
verify_fatal=0
group_reporting=1
{%- for parameter in parameters %}
{{parameter}}
{%- endfor %}
{%- for scenario in scenarios %}
{%- for numjob in numjobs %}
{%- for iodepth in iodepths %}
[{{scenario['name']}}-io-depth-{{iodepth}}-num-jobs-{{numjob}}]
stonewall
rw={{scenario['rwkind']}}
{%- if scenario['rwmixread'] is defined %}
rwmixread={{scenario['rwmixread']}}
{%- endif%}
blocksize={{scenario['blocksize']}}
iodepth={{iodepth}}
{%- if scenario['size'] is defined %}
size={{scenario['size']}}
{%- else %}
size={{size}}
{%- endif%}
numjobs={{numjob}}
{%- endfor %}
{%- endfor %}
{%- endfor %}
"""
SECONDS_PER_MINUTE = 60
def GenerateJobFileString(filename, scenario_strings,
io_depths, num_jobs, working_set_size,
block_size, runtime, parameters):
"""Make a string with our fio job file.
Args:
filename: the file or disk we pre-filled, if any.
scenario_strings: list of strings with names in SCENARIOS.
io_depths: iterable of integers. The IO queue depths to test.
num_jobs: iterable of integers. The number of fio processes to test.
working_set_size: int or None. If int, the size of the working set in GB.
block_size: Quantity or None. If quantity, the block size to use.
runtime: int. The number of seconds to run each job.
parameters: list. Other fio parameters to be applied to all jobs.
Returns:
The contents of a fio job file, as a string.
"""
if 'all' in scenario_strings:
scenarios = six.itervalues(SCENARIOS)
else:
for name in scenario_strings:
if name not in SCENARIOS:
logging.error('Unknown scenario name %s', name)
scenarios = (SCENARIOS[name] for name in scenario_strings)
size_string = str(working_set_size) + 'G' if working_set_size else '100%'
if block_size is not None:
# If we don't make a copy here, this will modify the global
# SCENARIOS variable.
scenarios = [scenario.copy() for scenario in scenarios]
for scenario in scenarios:
scenario['blocksize'] = str(int(block_size.m_as(units.byte))) + 'B'
job_file_template = jinja2.Template(JOB_FILE_TEMPLATE,
undefined=jinja2.StrictUndefined)
return str(job_file_template.render(
runtime=runtime,
filename=filename,
size=size_string,
scenarios=scenarios,
iodepths=io_depths,
numjobs=num_jobs,
parameters=parameters))
FILENAME_PARAM_REGEXP = re.compile('filename\s*=.*$', re.MULTILINE)
def ProcessedJobFileString(fio_jobfile_contents, remove_filename):
"""Modify the fio job if requested.
Args:
fio_jobfile_contents: the contents of a fio job file.
remove_filename: bool. If true, remove the filename parameter from
the job file.
Returns:
The job file as a string, possibly without filename parameters.
"""
if remove_filename:
return FILENAME_PARAM_REGEXP.sub('', fio_jobfile_contents)
else:
return fio_jobfile_contents
def GetOrGenerateJobFileString(job_file_path, scenario_strings,
against_device, disk, io_depths,
num_jobs, working_set_size, block_size,
runtime, parameters, job_file_contents):
"""Get the contents of the fio job file we're working with.
This will either read the user's job file, if given, or generate a
new one.
Args:
job_file_path: string or None. The path to the user's job file, if
provided.
scenario_strings: list of strings or None. The workload scenarios
to generate.
against_device: bool. True if testing against a raw device, False
if testing against a filesystem.
disk: the disk.BaseDisk object to test against.
io_depths: iterable of integers. The IO queue depths to test.
num_jobs: iterable of integers. The number of fio processes to test.
working_set_size: int or None. If int, the size of the working set
in GB.
block_size: Quantity or None. If Quantity, the block size to use.
runtime: int. The number of seconds to run each job.
parameters: list. Other fio parameters to apply to all jobs.
job_file_contents: string contents of fio job.
Returns:
A string containing a fio job file.
"""
user_job_file_string = GetFileAsString(job_file_path)
use_user_jobfile = job_file_path or not scenario_strings
if use_user_jobfile:
remove_filename = against_device
return ProcessedJobFileString(user_job_file_string or job_file_contents,
remove_filename)
else:
if against_device:
filename = disk.GetDevicePath()
else:
# Since we pass --directory to fio, we must use relative file
# paths or get an error.
filename = DEFAULT_TEMP_FILE_NAME
return GenerateJobFileString(filename, scenario_strings, io_depths,
num_jobs, working_set_size, block_size,
runtime, parameters)
NEED_SIZE_MESSAGE = ('You must specify the working set size when using '
'generated scenarios with a filesystem.')
def WarnOnBadFlags():
"""Warn the user if they pass bad flag combinations."""
if FLAGS.fio_jobfile:
ignored_flags = {'--' + flag_name
for flag_name in FLAGS_IGNORED_FOR_CUSTOM_JOBFILE
if FLAGS[flag_name].present}
if ignored_flags:
logging.warning('Fio job file specified. Ignoring options "%s"',
', '.join(ignored_flags))
if (FLAGS.fio_jobfile is None and
FLAGS.fio_generate_scenarios and
not FLAGS.fio_working_set_size and
not AgainstDevice()):
logging.error(NEED_SIZE_MESSAGE)
raise errors.Benchmarks.PrepareException(NEED_SIZE_MESSAGE)
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS.fio_target_mode != AGAINST_FILE_WITHOUT_FILL_MODE:
disk_spec = config['vm_groups']['default']['disk_spec']
for cloud in disk_spec:
disk_spec[cloud]['mount_point'] = None
return config
def GetLogFlags(log_file_base):
"""Gets fio log files."""
collect_logs = FLAGS.fio_lat_log or FLAGS.fio_bw_log or FLAGS.fio_iops_log
fio_log_flags = [(FLAGS.fio_lat_log, '--write_lat_log=%(filename)s',),
(FLAGS.fio_bw_log, '--write_bw_log=%(filename)s',),
(FLAGS.fio_iops_log, '--write_iops_log=%(filename)s',),
(FLAGS.fio_hist_log, '--write_hist_log=%(filename)s',),
(collect_logs, '--log_avg_msec=%(interval)d',)]
fio_command_flags = ' '.join([flag for given, flag in fio_log_flags if given])
if FLAGS.fio_hist_log:
fio_command_flags = ' '.join([
fio_command_flags, '--log_hist_msec=%(hist_interval)d'])
return fio_command_flags % {'filename': log_file_base,
'interval': FLAGS.fio_log_avg_msec,
'hist_interval': FLAGS.fio_log_hist_msec}
def CheckPrerequisites(benchmark_config):
"""Perform flag checks."""
del benchmark_config # unused
WarnOnBadFlags()
def Prepare(benchmark_spec):
exec_path = fio.GetFioExec()
vms = benchmark_spec.vms
vm_util.RunThreaded(lambda vm: PrepareWithExec(vm, exec_path), vms)
def GetFileAsString(file_path):
if not file_path:
return None
with open(data.ResourcePath(file_path), 'r') as jobfile:
return jobfile.read()
def PrepareWithExec(vm, exec_path):
"""Prepare the virtual machine to run FIO.
This includes installing fio, bc, and libaio1 and pre-filling the
attached disk. We also make sure the job file is always located
at the same path on the local machine.
Args:
vm: The virtual machine to prepare the benchmark on.
exec_path: string path to the fio executable
"""
logging.info('FIO prepare on %s', vm)
vm.Install('fio')
# Choose a disk or file name and optionally fill it
disk = vm.scratch_disks[0]
if FillTarget():
logging.info('Fill device %s on %s', disk.GetDevicePath(), vm)
FillDevice(vm, disk, FLAGS.fio_fill_size, exec_path)
# We only need to format and mount if the target mode is against
# file with fill because 1) if we're running against the device, we
# don't want it mounted and 2) if we're running against a file
# without fill, it was never unmounted (see GetConfig()).
if FLAGS.fio_target_mode == AGAINST_FILE_WITH_FILL_MODE:
disk.mount_point = FLAGS.scratch_dir or MOUNT_POINT
disk_spec = vm.disk_specs[0]
vm.FormatDisk(disk.GetDevicePath(), disk_spec.disk_type)
vm.MountDisk(disk.GetDevicePath(), disk.mount_point,
disk_spec.disk_type, disk.mount_options, disk.fstab_options)
if FLAGS.fio_write_against_multiple_clients:
vm.RemoteCommand('sudo rm -rf %s/%s' % (disk.mount_point, vm.name))
vm.RemoteCommand('sudo mkdir -p %s/%s' % (disk.mount_point, vm.name))
def Run(benchmark_spec):
"""Spawn fio on vm(s) and gather results."""
fio_exe = fio.GetFioExec()
default_job_file_contents = GetFileAsString(data.ResourcePath('fio.job'))
vms = benchmark_spec.vms
samples = []
path = REMOTE_JOB_FILE_PATH
samples_list = vm_util.RunThreaded(
lambda vm: RunWithExec(vm, fio_exe, path, default_job_file_contents), vms)
for i, _ in enumerate(samples_list):
for item in samples_list[i]:
item.metadata['machine_instance'] = i
samples.extend(samples_list[i])
if FLAGS.fio_write_against_multiple_clients:
metrics = collections.defaultdict(list)
if not metrics:
return samples
for item in samples:
# example metric: 'filestore-bandwidth:write:bandwidth'
metrics[item.metric.split(':', 1)[-1]].append(item.value)
samples.append(
sample.Sample('Total_write_throughput', sum(metrics['write:bandwidth']),
'KB/s'))
samples.append(
sample.Sample('Total_write_iops', sum(metrics['write:iops']), 'ops'))
earliest_start = min(metrics['start_time'])
latest_start = max(metrics['start_time'])
earliest_end = min(metrics['end_time'])
latest_end = max(metrics['end_time'])
# Invalid run if the start and end times don't overlap 95%.
nonoverlap_percent = (latest_end - earliest_end + latest_start -
earliest_start) / (
earliest_end - latest_start)
valid_run = (nonoverlap_percent < 0.05)
for item in samples:
item.metadata['valid_run'] = valid_run
item.metadata['nonoverlap_percentage'] = nonoverlap_percent
for item in samples:
item.metadata['fio_target_mode'] = FLAGS.fio_target_mode
item.metadata['fio_fill_size'] = FLAGS.fio_fill_size
return samples
def RunWithExec(vm, exec_path, remote_job_file_path, job_file_contents):
"""Spawn fio and gather the results.
Args:
vm: vm to run the benchmark on.
exec_path: string path to the fio executable.
remote_job_file_path: path, on the vm, to the location of the job file.
job_file_contents: string contents of the fio job file.
Returns:
A list of sample.Sample objects.
"""
logging.info('FIO running on %s', vm)
disk = vm.scratch_disks[0]
mount_point = disk.mount_point
if FLAGS.fio_write_against_multiple_clients:
mount_point = '%s/%s' % (disk.mount_point, vm.name)
logging.info('FIO mount point changed to %s', mount_point)
job_file_string = GetOrGenerateJobFileString(
FLAGS.fio_jobfile,
FLAGS.fio_generate_scenarios,
AgainstDevice(),
disk,
FLAGS.fio_io_depths,
FLAGS.fio_num_jobs,
FLAGS.fio_working_set_size,
FLAGS.fio_blocksize,
FLAGS.fio_runtime,
FLAGS.fio_parameters,
job_file_contents)
job_file_path = vm_util.PrependTempDir(vm.name + LOCAL_JOB_FILE_SUFFIX)
with open(job_file_path, 'w') as job_file:
job_file.write(job_file_string)
logging.info('Wrote fio job file at %s', job_file_path)
logging.info(job_file_string)
vm.PushFile(job_file_path, remote_job_file_path)
if AgainstDevice():
fio_command = '%s --output-format=json --filename=%s %s' % (
exec_path, disk.GetDevicePath(), remote_job_file_path)
else:
fio_command = '%s --output-format=json --directory=%s %s' % (
exec_path, mount_point, remote_job_file_path)
collect_logs = any([FLAGS.fio_lat_log, FLAGS.fio_bw_log, FLAGS.fio_iops_log,
FLAGS.fio_hist_log])
log_file_base = ''
if collect_logs:
log_file_base = '%s_%s' % (PKB_FIO_LOG_FILE_NAME, str(time.time()))
fio_command = ' '.join([fio_command, GetLogFlags(log_file_base)])
# TODO(user): This only gives results at the end of a job run
# so the program pauses here with no feedback to the user.
# This is a pretty lousy experience.
logging.info('FIO Results:')
start_time = time.time()
stdout, _ = vm.RobustRemoteCommand(
fio_command, should_log=True, timeout=FLAGS.fio_command_timeout_sec)
end_time = time.time()
bin_vals = []
if collect_logs:
vm.PullFile(vm_util.GetTempDir(), '%s*.log' % log_file_base)
if FLAGS.fio_hist_log:
num_logs = int(vm.RemoteCommand(
'ls %s_clat_hist.*.log | wc -l' % log_file_base)[0])
bin_vals += [fio.ComputeHistogramBinVals(
vm, '%s_clat_hist.%s.log' % (
log_file_base, idx + 1)) for idx in range(num_logs)]
samples = fio.ParseResults(job_file_string, json.loads(stdout),
log_file_base=log_file_base, bin_vals=bin_vals)
samples.append(
sample.Sample('start_time', start_time, 'sec', samples[0].metadata))
samples.append(
sample.Sample('end_time', end_time, 'sec', samples[0].metadata))
return samples
def Cleanup(benchmark_spec):
"""Uninstall packages required for fio and remove benchmark files.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm = benchmark_spec.vms[0]
logging.info('FIO Cleanup up on %s', vm)
vm.RemoveFile(REMOTE_JOB_FILE_PATH)
if not AgainstDevice() and not FLAGS.fio_jobfile:
# If the user supplies their own job file, then they have to clean
# up after themselves, because we don't know their temp file name.
vm.RemoveFile(posixpath.join(vm.GetScratchDir(), DEFAULT_TEMP_FILE_NAME))
|
import os
import sys
import subprocess
from flexx.util.testing import run_tests_if_main, raises, skipif
import flexx
# https://docs.pytest.org/en/latest/skipping.html
pytestmark = skipif(
'__pypy__' in sys.builtin_module_names and os.getenv('TRAVIS', '') == 'true',
reason='These import tests are slow on pypy')
# minimum that will be imported when importing flexx
PROJECT_MODULE = flexx
MIN_MODULES = ['flexx', 'flexx.util', 'flexx._config']
PROJECT_NAME = 'flexx'
## Generic code
def loaded_modules(import_module, depth=None, all_modules=False):
""" Import the given module in subprocess and return loaded modules
Import a certain module in a clean subprocess and return the
projects modules that are subsequently loaded. The given depth
indicates the module level (i.e. depth=1 will only yield 'X.app'
but not 'X.app.backends').
"""
project_dir = os.path.dirname(os.path.dirname(PROJECT_MODULE.__file__))
# Get the loaded modules in a clean interpreter
code = "import sys, %s; print(', '.join(sys.modules))" % import_module
res = subprocess.check_output([sys.executable, '-c', code], cwd=project_dir,
stderr=subprocess.STDOUT).decode()
loaded_modules = [name.strip() for name in res.split(',')]
# Tweaks for legacy Python
loaded_modules = [name.replace('flexx_legacy', 'flexx') for name in loaded_modules]
if 'flexx.sys' in loaded_modules:
loaded_modules.remove('flexx.sys')
if all_modules:
return loaded_modules
# Select project modules at the given depth
project_modules = set()
for m in loaded_modules:
if m.startswith(PROJECT_NAME) and '__future__' not in m:
if depth:
parts = m.split('.')
m = '.'.join(parts[:depth])
project_modules.add(m)
return project_modules
def test_import_nothing():
""" Not importing anything should not import any project modules. """
modnames = loaded_modules('os', 2)
assert modnames == set()
def test_import_project():
""" Importing project should only pull in the minimal submodules. """
modnames = loaded_modules(PROJECT_NAME, 2)
assert modnames == set(MIN_MODULES)
def test_import_project_fail():
raises(Exception, loaded_modules, PROJECT_NAME + '.foobarxx')
## below it's project specific
def test_import_flexx_util():
modnames = loaded_modules('flexx.util', 2)
assert modnames == set(MIN_MODULES + ['flexx.util'])
def test_import_flexx_event():
modnames = loaded_modules('flexx.event', 2)
assert modnames == set(MIN_MODULES + ['flexx.event'])
def test_import_flexx_app():
modnames = loaded_modules('flexx.app', 2)
assert modnames == set(MIN_MODULES + ['flexx.app', 'flexx.util', 'flexx.event'])
def test_import_flexx_ui():
modnames = loaded_modules('flexx.ui', 2)
assert modnames == set(MIN_MODULES + ['flexx.app', 'flexx.util', 'flexx.event', 'flexx.ui'])
def test_import_deps():
# These do not depend on tornado
deps = set() # no, not set(['tornado']) :)
assert deps.difference(loaded_modules('flexx.util', 2, True)) == deps
assert deps.difference(loaded_modules('flexx.event', 2, True)) == deps
# But app and ui do
assert deps.difference(loaded_modules('flexx.app', 2, True)) == set()
run_tests_if_main()
|
import unittest
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import voc_semantic_segmentation_label_names
from chainercv.datasets import VOCSemanticSegmentationDataset
from chainercv.utils import assert_is_semantic_segmentation_dataset
@testing.parameterize(
{'split': 'train'},
{'split': 'val'},
{'split': 'trainval'}
)
class TestVOCSemanticSegmentationDataset(unittest.TestCase):
def setUp(self):
self.dataset = VOCSemanticSegmentationDataset(split=self.split)
@attr.slow
def test_voc_semantic_segmentation_dataset(self):
assert_is_semantic_segmentation_dataset(
self.dataset,
len(voc_semantic_segmentation_label_names),
n_example=10)
testing.run_module(__name__, __file__)
|
import logging
from pyhap.const import CATEGORY_LIGHTBULB
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import callback
from homeassistant.util.color import (
color_temperature_mired_to_kelvin,
color_temperature_to_hs,
)
from .accessories import TYPES, HomeAccessory
from .const import (
CHAR_BRIGHTNESS,
CHAR_COLOR_TEMPERATURE,
CHAR_HUE,
CHAR_ON,
CHAR_SATURATION,
PROP_MAX_VALUE,
PROP_MIN_VALUE,
SERV_LIGHTBULB,
)
_LOGGER = logging.getLogger(__name__)
RGB_COLOR = "rgb_color"
@TYPES.register("Light")
class Light(HomeAccessory):
"""Generate a Light accessory for a light entity.
Currently supports: state, brightness, color temperature, rgb_color.
"""
def __init__(self, *args):
"""Initialize a new Light accessory object."""
super().__init__(*args, category=CATEGORY_LIGHTBULB)
self.chars = []
state = self.hass.states.get(self.entity_id)
self._features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if self._features & SUPPORT_BRIGHTNESS:
self.chars.append(CHAR_BRIGHTNESS)
if self._features & SUPPORT_COLOR:
self.chars.append(CHAR_HUE)
self.chars.append(CHAR_SATURATION)
elif self._features & SUPPORT_COLOR_TEMP:
# ColorTemperature and Hue characteristic should not be
# exposed both. Both states are tracked separately in HomeKit,
# causing "source of truth" problems.
self.chars.append(CHAR_COLOR_TEMPERATURE)
serv_light = self.add_preload_service(SERV_LIGHTBULB, self.chars)
self.char_on = serv_light.configure_char(CHAR_ON, value=0)
if CHAR_BRIGHTNESS in self.chars:
# Initial value is set to 100 because 0 is a special value (off). 100 is
# an arbitrary non-zero value. It is updated immediately by async_update_state
# to set to the correct initial value.
self.char_brightness = serv_light.configure_char(CHAR_BRIGHTNESS, value=100)
if CHAR_COLOR_TEMPERATURE in self.chars:
min_mireds = self.hass.states.get(self.entity_id).attributes.get(
ATTR_MIN_MIREDS, 153
)
max_mireds = self.hass.states.get(self.entity_id).attributes.get(
ATTR_MAX_MIREDS, 500
)
self.char_color_temperature = serv_light.configure_char(
CHAR_COLOR_TEMPERATURE,
value=min_mireds,
properties={PROP_MIN_VALUE: min_mireds, PROP_MAX_VALUE: max_mireds},
)
if CHAR_HUE in self.chars:
self.char_hue = serv_light.configure_char(CHAR_HUE, value=0)
if CHAR_SATURATION in self.chars:
self.char_saturation = serv_light.configure_char(CHAR_SATURATION, value=75)
self.async_update_state(state)
serv_light.setter_callback = self._set_chars
def _set_chars(self, char_values):
_LOGGER.debug("Light _set_chars: %s", char_values)
events = []
service = SERVICE_TURN_ON
params = {ATTR_ENTITY_ID: self.entity_id}
if CHAR_ON in char_values:
if not char_values[CHAR_ON]:
service = SERVICE_TURN_OFF
events.append(f"Set state to {char_values[CHAR_ON]}")
if CHAR_BRIGHTNESS in char_values:
if char_values[CHAR_BRIGHTNESS] == 0:
events[-1] = "Set state to 0"
service = SERVICE_TURN_OFF
else:
params[ATTR_BRIGHTNESS_PCT] = char_values[CHAR_BRIGHTNESS]
events.append(f"brightness at {char_values[CHAR_BRIGHTNESS]}%")
if CHAR_COLOR_TEMPERATURE in char_values:
params[ATTR_COLOR_TEMP] = char_values[CHAR_COLOR_TEMPERATURE]
events.append(f"color temperature at {char_values[CHAR_COLOR_TEMPERATURE]}")
if (
self._features & SUPPORT_COLOR
and CHAR_HUE in char_values
and CHAR_SATURATION in char_values
):
color = (char_values[CHAR_HUE], char_values[CHAR_SATURATION])
_LOGGER.debug("%s: Set hs_color to %s", self.entity_id, color)
params[ATTR_HS_COLOR] = color
events.append(f"set color at {color}")
self.call_service(DOMAIN, service, params, ", ".join(events))
@callback
def async_update_state(self, new_state):
"""Update light after state change."""
# Handle State
state = new_state.state
if state == STATE_ON and self.char_on.value != 1:
self.char_on.set_value(1)
elif state == STATE_OFF and self.char_on.value != 0:
self.char_on.set_value(0)
# Handle Brightness
if CHAR_BRIGHTNESS in self.chars:
brightness = new_state.attributes.get(ATTR_BRIGHTNESS)
if isinstance(brightness, (int, float)):
brightness = round(brightness / 255 * 100, 0)
# The homeassistant component might report its brightness as 0 but is
# not off. But 0 is a special value in homekit. When you turn on a
# homekit accessory it will try to restore the last brightness state
# which will be the last value saved by char_brightness.set_value.
# But if it is set to 0, HomeKit will update the brightness to 100 as
# it thinks 0 is off.
#
# Therefore, if the the brightness is 0 and the device is still on,
# the brightness is mapped to 1 otherwise the update is ignored in
# order to avoid this incorrect behavior.
if brightness == 0 and state == STATE_ON:
brightness = 1
if self.char_brightness.value != brightness:
self.char_brightness.set_value(brightness)
# Handle color temperature
if CHAR_COLOR_TEMPERATURE in self.chars:
color_temperature = new_state.attributes.get(ATTR_COLOR_TEMP)
if isinstance(color_temperature, (int, float)):
color_temperature = round(color_temperature, 0)
if self.char_color_temperature.value != color_temperature:
self.char_color_temperature.set_value(color_temperature)
# Handle Color
if CHAR_SATURATION in self.chars and CHAR_HUE in self.chars:
if ATTR_HS_COLOR in new_state.attributes:
hue, saturation = new_state.attributes[ATTR_HS_COLOR]
elif ATTR_COLOR_TEMP in new_state.attributes:
hue, saturation = color_temperature_to_hs(
color_temperature_mired_to_kelvin(
new_state.attributes[ATTR_COLOR_TEMP]
)
)
else:
hue, saturation = None, None
if isinstance(hue, (int, float)) and isinstance(saturation, (int, float)):
hue = round(hue, 0)
saturation = round(saturation, 0)
if hue != self.char_hue.value:
self.char_hue.set_value(hue)
if saturation != self.char_saturation.value:
self.char_saturation.set_value(saturation)
|
from collections import UserList
from pylatex.utils import dumps_list
from contextlib import contextmanager
from .latex_object import LatexObject
from .command import Command, Arguments
class Container(LatexObject, UserList):
"""A base class that groups multiple LaTeX classes.
This class should be subclassed when a LaTeX class has content that is of
variable length. It subclasses UserList, so it holds a list of elements
that can simply be accessed by using normal list functionality, like
indexing or appending.
"""
content_separator = '%\n'
def __init__(self, *, data=None):
r"""
Args
----
data: list, `~.LatexObject` or something that can be converted to a \
string
The content with which the container is initialized
"""
if data is None:
data = []
elif not isinstance(data, list):
# If the data is not already a list make it a list, otherwise list
# operations will not work
data = [data]
self.data = data
self.real_data = data # Always the data of this instance
super().__init__()
@property
def _repr_attributes(self):
return super()._repr_attributes + ['real_data']
def dumps_content(self, **kwargs):
r"""Represent the container as a string in LaTeX syntax.
Args
----
\*\*kwargs:
Arguments that can be passed to `~.dumps_list`
Returns
-------
string:
A LaTeX string representing the container
"""
return dumps_list(self, escape=self.escape,
token=self.content_separator, **kwargs)
def _propagate_packages(self):
"""Make sure packages get propagated."""
for item in self.data:
if isinstance(item, LatexObject):
if isinstance(item, Container):
item._propagate_packages()
for p in item.packages:
self.packages.add(p)
def dumps_packages(self):
r"""Represent the packages needed as a string in LaTeX syntax.
Returns
-------
string:
A LaTeX string representing the packages of the container
"""
self._propagate_packages()
return super().dumps_packages()
@contextmanager
def create(self, child):
"""Add a LaTeX object to current container, context-manager style.
Args
----
child: `~.Container`
An object to be added to the current container
"""
prev_data = self.data
self.data = child.data # This way append works appends to the child
yield child # allows with ... as to be used as well
self.data = prev_data
self.append(child)
class Environment(Container):
r"""A base class for LaTeX environments.
This class implements the basics of a LaTeX environment. A LaTeX
environment looks like this:
.. code-block:: latex
\begin{environment_name}
Some content that is in the environment
\end{environment_name}
The text that is used in the place of environment_name is by default the
name of the class in lowercase.
However, this default can be overridden in 2 ways:
1. setting the _latex_name class variable when declaring the class
2. setting the _latex_name attribute when initialising object
"""
#: Set to true if this full container should be equivalent to an empty
#: string if it has no content.
omit_if_empty = False
def __init__(self, *, options=None, arguments=None, start_arguments=None,
**kwargs):
r"""
Args
----
options: str or list or `~.Options`
Options to be added to the ``\begin`` command
arguments: str or list or `~.Arguments`
Arguments to be added to the ``\begin`` command
start_arguments: str or list or `~.Arguments`
Arguments to be added before the options
"""
self.options = options
self.arguments = arguments
self.start_arguments = start_arguments
super().__init__(**kwargs)
def dumps(self):
"""Represent the environment as a string in LaTeX syntax.
Returns
-------
str
A LaTeX string representing the environment.
"""
content = self.dumps_content()
if not content.strip() and self.omit_if_empty:
return ''
string = ''
# Something other than None needs to be used as extra arguments, that
# way the options end up behind the latex_name argument.
if self.arguments is None:
extra_arguments = Arguments()
else:
extra_arguments = self.arguments
begin = Command('begin', self.start_arguments, self.options,
extra_arguments=extra_arguments)
begin.arguments._positional_args.insert(0, self.latex_name)
string += begin.dumps() + self.content_separator
string += content + self.content_separator
string += Command('end', self.latex_name).dumps()
return string
class Fragment(Container):
r"""A LaTeX fragment container class for fragmented document construction.
This only provides logical wrapping of the items. The final document will
look the same as if all items would not have been part of a container.
A common usecase of this is to generate a .tex snippet containing more than
one LaTeX item item without any extra container around it. This snippet can
then be included in another ``.tex`` file using ``\input{snippet.tex}``
"""
def __init__(self, **kwargs):
"""
Args
----
"""
super().__init__(**kwargs)
def dumps(self):
"""Represent the fragment as a string in LaTeX syntax.
Returns
-------
str
"""
return self.dumps_content()
class ContainerCommand(Container):
r"""A base class for a container command (A command which contains data).
Container command example:
.. code-block:: latex
\CommandName[options]{arguments}{
data
}
"""
omit_if_empty = False
def __init__(self, arguments=None, options=None, *, data=None, **kwargs):
r"""
Args
----
arguments: str or `list`
The arguments for the container command
options: str, list or `~.Options`
The options for the preamble command
data: str or `~.LatexObject`
The data to place inside the preamble command
"""
self.arguments = arguments
self.options = options
super().__init__(data=data, **kwargs)
def dumps(self):
r"""Convert the container to a string in latex syntax."""
content = self.dumps_content()
if not content.strip() and self.omit_if_empty:
return ''
string = ''
start = Command(self.latex_name, arguments=self.arguments,
options=self.options)
string += start.dumps() + '{%\n'
if content != '':
string += content + '%\n}'
else:
string += '}'
return string
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.architectures import resnet5
from compare_gan.gans import consts
import gin
import tensorflow as tf
class ResNetInitTest(tf.test.TestCase):
def setUp(self):
super(ResNetInitTest, self).setUp()
gin.clear_config()
def testInitializersOldDefault(self):
valid_initalizer = [
"kernel/Initializer/random_normal",
"bias/Initializer/Const",
# truncated_normal is the old default for conv2d.
"kernel/Initializer/truncated_normal",
"bias/Initializer/Const",
"beta/Initializer/zeros",
"gamma/Initializer/ones",
]
valid_op_names = "/({}):0$".format("|".join(valid_initalizer))
with tf.Graph().as_default():
z = tf.zeros((2, 128))
fake_image = resnet5.Generator(image_shape=(128, 128, 3))(
z, y=None, is_training=True)
resnet5.Discriminator()(fake_image, y=None, is_training=True)
for var in tf.trainable_variables():
op_name = var.initializer.inputs[1].name
self.assertRegex(op_name, valid_op_names)
def testInitializersRandomNormal(self):
gin.bind_parameter("weights.initializer", consts.NORMAL_INIT)
valid_initalizer = [
"kernel/Initializer/random_normal",
"bias/Initializer/Const",
"kernel/Initializer/random_normal",
"bias/Initializer/Const",
"beta/Initializer/zeros",
"gamma/Initializer/ones",
]
valid_op_names = "/({}):0$".format("|".join(valid_initalizer))
with tf.Graph().as_default():
z = tf.zeros((2, 128))
fake_image = resnet5.Generator(image_shape=(128, 128, 3))(
z, y=None, is_training=True)
resnet5.Discriminator()(fake_image, y=None, is_training=True)
for var in tf.trainable_variables():
op_name = var.initializer.inputs[1].name
self.assertRegex(op_name, valid_op_names)
def testInitializersTruncatedNormal(self):
gin.bind_parameter("weights.initializer", consts.TRUNCATED_INIT)
valid_initalizer = [
"kernel/Initializer/truncated_normal",
"bias/Initializer/Const",
"kernel/Initializer/truncated_normal",
"bias/Initializer/Const",
"beta/Initializer/zeros",
"gamma/Initializer/ones",
]
valid_op_names = "/({}):0$".format("|".join(valid_initalizer))
with tf.Graph().as_default():
z = tf.zeros((2, 128))
fake_image = resnet5.Generator(image_shape=(128, 128, 3))(
z, y=None, is_training=True)
resnet5.Discriminator()(fake_image, y=None, is_training=True)
for var in tf.trainable_variables():
op_name = var.initializer.inputs[1].name
self.assertRegex(op_name, valid_op_names)
def testGeneratorInitializersOrthogonal(self):
gin.bind_parameter("weights.initializer", consts.ORTHOGONAL_INIT)
valid_initalizer = [
"kernel/Initializer/mul_1",
"bias/Initializer/Const",
"kernel/Initializer/mul_1",
"bias/Initializer/Const",
"beta/Initializer/zeros",
"gamma/Initializer/ones",
]
valid_op_names = "/({}):0$".format("|".join(valid_initalizer))
with tf.Graph().as_default():
z = tf.zeros((2, 128))
fake_image = resnet5.Generator(image_shape=(128, 128, 3))(
z, y=None, is_training=True)
resnet5.Discriminator()(fake_image, y=None, is_training=True)
for var in tf.trainable_variables():
op_name = var.initializer.inputs[1].name
self.assertRegex(op_name, valid_op_names)
if __name__ == "__main__":
tf.test.main()
|
import logging
import aiohttp
from pyjuicenet import Api, TokenError
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_ACCESS_TOKEN
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import DOMAIN # pylint:disable=unused-import
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema({vol.Required(CONF_ACCESS_TOKEN): str})
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
session = async_get_clientsession(hass)
juicenet = Api(data[CONF_ACCESS_TOKEN], session)
try:
await juicenet.get_devices()
except TokenError as error:
_LOGGER.error("Token Error %s", error)
raise InvalidAuth from error
except aiohttp.ClientError as error:
_LOGGER.error("Error connecting %s", error)
raise CannotConnect from error
# Return info that you want to store in the config entry.
return {"title": "JuiceNet"}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for JuiceNet."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
await self.async_set_unique_id(user_input[CONF_ACCESS_TOKEN])
self._abort_if_unique_id_configured()
try:
info = await validate_input(self.hass, user_input)
return self.async_create_entry(title=info["title"], data=user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_import(self, user_input):
"""Handle import."""
return await self.async_step_user(user_input)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
import cherrypy
from cherrypy._cpcompat import text_or_bytes
from cherrypy.lib import reprconf
def _if_filename_register_autoreload(ob):
"""Register for autoreload if ob is a string (presumed filename)."""
is_filename = isinstance(ob, text_or_bytes)
is_filename and cherrypy.engine.autoreload.files.add(ob)
def merge(base, other):
"""Merge one app config (from a dict, file, or filename) into another.
If the given config is a filename, it will be appended to
the list of files to monitor for "autoreload" changes.
"""
_if_filename_register_autoreload(other)
# Load other into base
for section, value_map in reprconf.Parser.load(other).items():
if not isinstance(value_map, dict):
raise ValueError(
'Application config must include section headers, but the '
"config you tried to merge doesn't have any sections. "
'Wrap your config in another dict with paths as section '
"headers, for example: {'/': config}.")
base.setdefault(section, {}).update(value_map)
class Config(reprconf.Config):
"""The 'global' configuration data for the entire CherryPy process."""
def update(self, config):
"""Update self from a dict, file or filename."""
_if_filename_register_autoreload(config)
super(Config, self).update(config)
def _apply(self, config):
"""Update self from a dict."""
if isinstance(config.get('global'), dict):
if len(config) > 1:
cherrypy.checker.global_config_contained_paths = True
config = config['global']
if 'tools.staticdir.dir' in config:
config['tools.staticdir.section'] = 'global'
super(Config, self)._apply(config)
@staticmethod
def __call__(**kwargs):
"""Decorate for page handlers to set _cp_config."""
def tool_decorator(f):
_Vars(f).setdefault('_cp_config', {}).update(kwargs)
return f
return tool_decorator
class _Vars(object):
"""Adapter allowing setting a default attribute on a function or class."""
def __init__(self, target):
self.target = target
def setdefault(self, key, default):
if not hasattr(self.target, key):
setattr(self.target, key, default)
return getattr(self.target, key)
# Sphinx begin config.environments
Config.environments = environments = {
'staging': {
'engine.autoreload.on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': False,
'request.show_mismatched_params': False,
},
'production': {
'engine.autoreload.on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': False,
'request.show_mismatched_params': False,
'log.screen': False,
},
'embedded': {
# For use with CherryPy embedded in another deployment stack.
'engine.autoreload.on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': False,
'request.show_mismatched_params': False,
'log.screen': False,
'engine.SIGHUP': None,
'engine.SIGTERM': None,
},
'test_suite': {
'engine.autoreload.on': False,
'checker.on': False,
'tools.log_headers.on': False,
'request.show_tracebacks': True,
'request.show_mismatched_params': True,
'log.screen': False,
},
}
# Sphinx end config.environments
def _server_namespace_handler(k, v):
"""Config handler for the "server" namespace."""
atoms = k.split('.', 1)
if len(atoms) > 1:
# Special-case config keys of the form 'server.servername.socket_port'
# to configure additional HTTP servers.
if not hasattr(cherrypy, 'servers'):
cherrypy.servers = {}
servername, k = atoms
if servername not in cherrypy.servers:
from cherrypy import _cpserver
cherrypy.servers[servername] = _cpserver.Server()
# On by default, but 'on = False' can unsubscribe it (see below).
cherrypy.servers[servername].subscribe()
if k == 'on':
if v:
cherrypy.servers[servername].subscribe()
else:
cherrypy.servers[servername].unsubscribe()
else:
setattr(cherrypy.servers[servername], k, v)
else:
setattr(cherrypy.server, k, v)
Config.namespaces['server'] = _server_namespace_handler
def _engine_namespace_handler(k, v):
"""Config handler for the "engine" namespace."""
engine = cherrypy.engine
if k in {'SIGHUP', 'SIGTERM'}:
engine.subscribe(k, v)
return
if '.' in k:
plugin, attrname = k.split('.', 1)
plugin = getattr(engine, plugin)
op = 'subscribe' if v else 'unsubscribe'
sub_unsub = getattr(plugin, op, None)
if attrname == 'on' and callable(sub_unsub):
sub_unsub()
return
setattr(plugin, attrname, v)
else:
setattr(engine, k, v)
Config.namespaces['engine'] = _engine_namespace_handler
def _tree_namespace_handler(k, v):
"""Namespace handler for the 'tree' config namespace."""
if isinstance(v, dict):
for script_name, app in v.items():
cherrypy.tree.graft(app, script_name)
msg = 'Mounted: %s on %s' % (app, script_name or '/')
cherrypy.engine.log(msg)
else:
cherrypy.tree.graft(v, v.script_name)
cherrypy.engine.log('Mounted: %s on %s' % (v, v.script_name or '/'))
Config.namespaces['tree'] = _tree_namespace_handler
|
import sys
import mne
def run():
"""Run command."""
parser = mne.commands.utils.get_optparser(
__file__, usage='mne show_info <file>')
options, args = parser.parse_args()
if len(args) != 1:
parser.print_help()
sys.exit(1)
fname = args[0]
if not fname.endswith('.fif'):
raise ValueError('%s does not seem to be a .fif file.' % fname)
info = mne.io.read_info(fname)
print("File : %s" % fname)
print(info)
mne.utils.run_command_if_main()
|
import logging
from pynanoleaf import Nanoleaf, Unavailable
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN
import homeassistant.helpers.config_validation as cv
from homeassistant.util import color as color_util
from homeassistant.util.color import (
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
from homeassistant.util.json import load_json, save_json
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Nanoleaf"
DATA_NANOLEAF = "nanoleaf"
CONFIG_FILE = ".nanoleaf.conf"
ICON = "mdi:triangle-outline"
SUPPORT_NANOLEAF = (
SUPPORT_BRIGHTNESS
| SUPPORT_COLOR_TEMP
| SUPPORT_EFFECT
| SUPPORT_COLOR
| SUPPORT_TRANSITION
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Nanoleaf light."""
if DATA_NANOLEAF not in hass.data:
hass.data[DATA_NANOLEAF] = {}
token = ""
if discovery_info is not None:
host = discovery_info["host"]
name = discovery_info["hostname"]
# if device already exists via config, skip discovery setup
if host in hass.data[DATA_NANOLEAF]:
return
_LOGGER.info("Discovered a new Nanoleaf: %s", discovery_info)
conf = load_json(hass.config.path(CONFIG_FILE))
if conf.get(host, {}).get("token"):
token = conf[host]["token"]
else:
host = config[CONF_HOST]
name = config[CONF_NAME]
token = config[CONF_TOKEN]
nanoleaf_light = Nanoleaf(host)
if not token:
token = nanoleaf_light.request_token()
if not token:
_LOGGER.error(
"Could not generate the auth token, did you press "
"and hold the power button on %s"
"for 5-7 seconds?",
name,
)
return
conf = load_json(hass.config.path(CONFIG_FILE))
conf[host] = {"token": token}
save_json(hass.config.path(CONFIG_FILE), conf)
nanoleaf_light.token = token
try:
nanoleaf_light.available
except Unavailable:
_LOGGER.error("Could not connect to Nanoleaf Light: %s on %s", name, host)
return
hass.data[DATA_NANOLEAF][host] = nanoleaf_light
add_entities([NanoleafLight(nanoleaf_light, name)], True)
class NanoleafLight(LightEntity):
"""Representation of a Nanoleaf Light."""
def __init__(self, light, name):
"""Initialize an Nanoleaf light."""
self._available = True
self._brightness = None
self._color_temp = None
self._effect = None
self._effects_list = None
self._light = light
self._name = name
self._hs_color = None
self._state = None
@property
def available(self):
"""Return availability."""
return self._available
@property
def brightness(self):
"""Return the brightness of the light."""
if self._brightness is not None:
return int(self._brightness * 2.55)
return None
@property
def color_temp(self):
"""Return the current color temperature."""
if self._color_temp is not None:
return color_util.color_temperature_kelvin_to_mired(self._color_temp)
return None
@property
def effect(self):
"""Return the current effect."""
return self._effect
@property
def effect_list(self):
"""Return the list of supported effects."""
return self._effects_list
@property
def min_mireds(self):
"""Return the coldest color_temp that this light supports."""
return 154
@property
def max_mireds(self):
"""Return the warmest color_temp that this light supports."""
return 833
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def hs_color(self):
"""Return the color in HS."""
return self._hs_color
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_NANOLEAF
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
brightness = kwargs.get(ATTR_BRIGHTNESS)
hs_color = kwargs.get(ATTR_HS_COLOR)
color_temp_mired = kwargs.get(ATTR_COLOR_TEMP)
effect = kwargs.get(ATTR_EFFECT)
transition = kwargs.get(ATTR_TRANSITION)
if hs_color:
hue, saturation = hs_color
self._light.hue = int(hue)
self._light.saturation = int(saturation)
if color_temp_mired:
self._light.color_temperature = mired_to_kelvin(color_temp_mired)
if transition:
if brightness: # tune to the required brightness in n seconds
self._light.brightness_transition(
int(brightness / 2.55), int(transition)
)
else: # If brightness is not specified, assume full brightness
self._light.brightness_transition(100, int(transition))
else: # If no transition is occurring, turn on the light
self._light.on = True
if brightness:
self._light.brightness = int(brightness / 2.55)
if effect:
if effect not in self._effects_list:
raise ValueError(
f"Attempting to apply effect not in the effect list: '{effect}'"
)
self._light.effect = effect
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
transition = kwargs.get(ATTR_TRANSITION)
if transition:
self._light.brightness_transition(0, int(transition))
else:
self._light.on = False
def update(self):
"""Fetch new state data for this light."""
try:
self._available = self._light.available
self._brightness = self._light.brightness
self._effects_list = self._light.effects
# Nanoleaf api returns non-existent effect named "*Solid*" when light set to solid color.
# This causes various issues with scening (see https://github.com/home-assistant/core/issues/36359).
# Until fixed at the library level, we should ensure the effect exists before saving to light properties
self._effect = (
self._light.effect if self._light.effect in self._effects_list else None
)
if self._effect is None:
self._color_temp = self._light.color_temperature
self._hs_color = self._light.hue, self._light.saturation
else:
self._color_temp = None
self._hs_color = None
self._state = self._light.on
except Unavailable as err:
_LOGGER.error("Could not update status for %s (%s)", self.name, err)
self._available = False
|
import os
import pytest
import serial.tools.list_ports
import zigpy.config
from homeassistant import setup
from homeassistant.components.zha import config_flow
from homeassistant.components.zha.core.const import CONF_RADIO_TYPE, DOMAIN, RadioType
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_SOURCE
from homeassistant.data_entry_flow import RESULT_TYPE_CREATE_ENTRY, RESULT_TYPE_FORM
from tests.async_mock import AsyncMock, MagicMock, patch, sentinel
from tests.common import MockConfigEntry
def com_port():
"""Mock of a serial port."""
port = serial.tools.list_ports_common.ListPortInfo()
port.serial_number = "1234"
port.manufacturer = "Virtual serial port"
port.device = "/dev/ttyUSB1234"
port.description = "Some serial port"
return port
@patch("serial.tools.list_ports.comports", MagicMock(return_value=[com_port()]))
@patch(
"homeassistant.components.zha.config_flow.detect_radios",
return_value={CONF_RADIO_TYPE: "test_radio"},
)
async def test_user_flow(detect_mock, hass):
"""Test user flow -- radio detected."""
port = com_port()
port_select = f"{port}, s/n: {port.serial_number} - {port.manufacturer}"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data={zigpy.config.CONF_DEVICE_PATH: port_select},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"].startswith(port.description)
assert result["data"] == {CONF_RADIO_TYPE: "test_radio"}
assert detect_mock.await_count == 1
assert detect_mock.await_args[0][0] == port.device
@patch("serial.tools.list_ports.comports", MagicMock(return_value=[com_port()]))
@patch(
"homeassistant.components.zha.config_flow.detect_radios",
return_value=None,
)
async def test_user_flow_not_detected(detect_mock, hass):
"""Test user flow, radio not detected."""
port = com_port()
port_select = f"{port}, s/n: {port.serial_number} - {port.manufacturer}"
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data={zigpy.config.CONF_DEVICE_PATH: port_select},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pick_radio"
assert detect_mock.await_count == 1
assert detect_mock.await_args[0][0] == port.device
async def test_user_flow_show_form(hass):
"""Test user step form."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_user_flow_manual(hass):
"""Test user flow manual entry."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: SOURCE_USER},
data={zigpy.config.CONF_DEVICE_PATH: config_flow.CONF_MANUAL_PATH},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "pick_radio"
@pytest.mark.parametrize("radio_type", RadioType.list())
async def test_pick_radio_flow(hass, radio_type):
"""Test radio picker."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: "pick_radio"}, data={CONF_RADIO_TYPE: radio_type}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "port_config"
async def test_user_flow_existing_config_entry(hass):
"""Test if config entry already exists."""
MockConfigEntry(domain=DOMAIN, data={"usb_path": "/dev/ttyUSB1"}).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={CONF_SOURCE: SOURCE_USER}
)
assert result["type"] == "abort"
@patch("zigpy_cc.zigbee.application.ControllerApplication.probe", return_value=False)
@patch(
"zigpy_deconz.zigbee.application.ControllerApplication.probe", return_value=False
)
@patch(
"zigpy_zigate.zigbee.application.ControllerApplication.probe", return_value=False
)
@patch("zigpy_xbee.zigbee.application.ControllerApplication.probe", return_value=False)
async def test_probe_radios(xbee_probe, zigate_probe, deconz_probe, cc_probe, hass):
"""Test detect radios."""
app_ctrl_cls = MagicMock()
app_ctrl_cls.SCHEMA_DEVICE = zigpy.config.SCHEMA_DEVICE
app_ctrl_cls.probe = AsyncMock(side_effect=(True, False))
p1 = patch(
"bellows.zigbee.application.ControllerApplication.probe",
side_effect=(True, False),
)
with p1 as probe_mock:
res = await config_flow.detect_radios("/dev/null")
assert probe_mock.await_count == 1
assert res[CONF_RADIO_TYPE] == "ezsp"
assert zigpy.config.CONF_DEVICE in res
assert (
res[zigpy.config.CONF_DEVICE][zigpy.config.CONF_DEVICE_PATH] == "/dev/null"
)
res = await config_flow.detect_radios("/dev/null")
assert res is None
assert xbee_probe.await_count == 1
assert zigate_probe.await_count == 1
assert deconz_probe.await_count == 1
assert cc_probe.await_count == 1
@patch("bellows.zigbee.application.ControllerApplication.probe", return_value=False)
async def test_user_port_config_fail(probe_mock, hass):
"""Test port config flow."""
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: "pick_radio"},
data={CONF_RADIO_TYPE: RadioType.ezsp.description},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB33"},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "port_config"
assert result["errors"]["base"] == "cannot_connect"
assert probe_mock.await_count == 1
@patch("homeassistant.components.zha.async_setup_entry", AsyncMock(return_value=True))
@patch("bellows.zigbee.application.ControllerApplication.probe", return_value=True)
async def test_user_port_config(probe_mock, hass):
"""Test port config."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={CONF_SOURCE: "pick_radio"},
data={CONF_RADIO_TYPE: RadioType.ezsp.description},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input={zigpy.config.CONF_DEVICE_PATH: "/dev/ttyUSB33"},
)
assert result["type"] == "create_entry"
assert result["title"].startswith("/dev/ttyUSB33")
assert (
result["data"][zigpy.config.CONF_DEVICE][zigpy.config.CONF_DEVICE_PATH]
== "/dev/ttyUSB33"
)
assert result["data"][CONF_RADIO_TYPE] == "ezsp"
assert probe_mock.await_count == 1
def test_get_serial_by_id_no_dir():
"""Test serial by id conversion if there's no /dev/serial/by-id."""
p1 = patch("os.path.isdir", MagicMock(return_value=False))
p2 = patch("os.scandir")
with p1 as is_dir_mock, p2 as scan_mock:
res = config_flow.get_serial_by_id(sentinel.path)
assert res is sentinel.path
assert is_dir_mock.call_count == 1
assert scan_mock.call_count == 0
def test_get_serial_by_id():
"""Test serial by id conversion."""
p1 = patch("os.path.isdir", MagicMock(return_value=True))
p2 = patch("os.scandir")
def _realpath(path):
if path is sentinel.matched_link:
return sentinel.path
return sentinel.serial_link_path
p3 = patch("os.path.realpath", side_effect=_realpath)
with p1 as is_dir_mock, p2 as scan_mock, p3:
res = config_flow.get_serial_by_id(sentinel.path)
assert res is sentinel.path
assert is_dir_mock.call_count == 1
assert scan_mock.call_count == 1
entry1 = MagicMock(spec_set=os.DirEntry)
entry1.is_symlink.return_value = True
entry1.path = sentinel.some_path
entry2 = MagicMock(spec_set=os.DirEntry)
entry2.is_symlink.return_value = False
entry2.path = sentinel.other_path
entry3 = MagicMock(spec_set=os.DirEntry)
entry3.is_symlink.return_value = True
entry3.path = sentinel.matched_link
scan_mock.return_value = [entry1, entry2, entry3]
res = config_flow.get_serial_by_id(sentinel.path)
assert res is sentinel.matched_link
assert is_dir_mock.call_count == 2
assert scan_mock.call_count == 2
|
from unittest.mock import patch
from glances_api import Glances
from homeassistant import data_entry_flow
from homeassistant.components import glances
from homeassistant.const import CONF_SCAN_INTERVAL
from tests.common import MockConfigEntry, mock_coro
NAME = "Glances"
HOST = "0.0.0.0"
USERNAME = "username"
PASSWORD = "password"
PORT = 61208
VERSION = 3
SCAN_INTERVAL = 10
DEMO_USER_INPUT = {
"name": NAME,
"host": HOST,
"username": USERNAME,
"password": PASSWORD,
"version": VERSION,
"port": PORT,
"ssl": False,
"verify_ssl": True,
}
async def test_form(hass):
"""Test config entry configured successfully."""
result = await hass.config_entries.flow.async_init(
glances.DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
with patch("glances_api.Glances"), patch.object(
Glances, "get_data", return_value=mock_coro()
):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=DEMO_USER_INPUT
)
assert result["type"] == "create_entry"
assert result["title"] == NAME
assert result["data"] == DEMO_USER_INPUT
async def test_form_cannot_connect(hass):
"""Test to return error if we cannot connect."""
with patch("glances_api.Glances"):
result = await hass.config_entries.flow.async_init(
glances.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=DEMO_USER_INPUT
)
assert result["type"] == "form"
assert result["errors"] == {"base": "cannot_connect"}
async def test_form_wrong_version(hass):
"""Test to check if wrong version is entered."""
user_input = DEMO_USER_INPUT.copy()
user_input.update(version=1)
result = await hass.config_entries.flow.async_init(
glances.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=user_input
)
assert result["type"] == "form"
assert result["errors"] == {"version": "wrong_version"}
async def test_form_already_configured(hass):
"""Test host is already configured."""
entry = MockConfigEntry(
domain=glances.DOMAIN, data=DEMO_USER_INPUT, options={CONF_SCAN_INTERVAL: 60}
)
entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
glances.DOMAIN, context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input=DEMO_USER_INPUT
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_options(hass):
"""Test options for Glances."""
entry = MockConfigEntry(
domain=glances.DOMAIN, data=DEMO_USER_INPUT, options={CONF_SCAN_INTERVAL: 60}
)
entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={glances.CONF_SCAN_INTERVAL: 10}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
glances.CONF_SCAN_INTERVAL: 10,
}
|
import asyncio
import logging
from typing import Any, Dict, Optional, Set
from homeassistant.const import EVENT_COMPONENT_LOADED
from homeassistant.core import Event, callback
from homeassistant.loader import (
Integration,
async_get_config_flows,
async_get_integration,
bind_hass,
)
from homeassistant.util.json import load_json
from .typing import HomeAssistantType
_LOGGER = logging.getLogger(__name__)
TRANSLATION_LOAD_LOCK = "translation_load_lock"
TRANSLATION_FLATTEN_CACHE = "translation_flatten_cache"
def recursive_flatten(prefix: Any, data: Dict) -> Dict[str, Any]:
"""Return a flattened representation of dict data."""
output = {}
for key, value in data.items():
if isinstance(value, dict):
output.update(recursive_flatten(f"{prefix}{key}.", value))
else:
output[f"{prefix}{key}"] = value
return output
def flatten(data: Dict) -> Dict[str, Any]:
"""Return a flattened representation of dict data."""
return recursive_flatten("", data)
@callback
def component_translation_path(
component: str, language: str, integration: Integration
) -> Optional[str]:
"""Return the translation json file location for a component.
For component:
- components/hue/translations/nl.json
For platform:
- components/hue/translations/light.nl.json
If component is just a single file, will return None.
"""
parts = component.split(".")
domain = parts[-1]
is_platform = len(parts) == 2
# If it's a component that is just one file, we don't support translations
# Example custom_components/my_component.py
if integration.file_path.name != domain:
return None
if is_platform:
filename = f"{parts[0]}.{language}.json"
else:
filename = f"{language}.json"
translation_path = integration.file_path / "translations"
return str(translation_path / filename)
def load_translations_files(
translation_files: Dict[str, str]
) -> Dict[str, Dict[str, Any]]:
"""Load and parse translation.json files."""
loaded = {}
for component, translation_file in translation_files.items():
loaded_json = load_json(translation_file)
if not isinstance(loaded_json, dict):
_LOGGER.warning(
"Translation file is unexpected type %s. Expected dict for %s",
type(loaded_json),
translation_file,
)
continue
loaded[component] = loaded_json
return loaded
def merge_resources(
translation_strings: Dict[str, Dict[str, Any]],
components: Set[str],
category: str,
) -> Dict[str, Dict[str, Any]]:
"""Build and merge the resources response for the given components and platforms."""
# Build response
resources: Dict[str, Dict[str, Any]] = {}
for component in components:
if "." not in component:
domain = component
else:
domain = component.split(".", 1)[0]
domain_resources = resources.setdefault(domain, {})
# Integrations are able to provide translations for their entities under other
# integrations if they don't have an existing device class. This is done by
# using a custom device class prefixed with their domain and two underscores.
# These files are in platform specific files in the integration folder with
# names like `strings.sensor.json`.
# We are going to merge the translations for the custom device classes into
# the translations of sensor.
new_value = translation_strings[component].get(category)
if new_value is None:
continue
cur_value = domain_resources.get(category)
# If not exists, set value.
if cur_value is None:
domain_resources[category] = new_value
# If exists, and a list, append
elif isinstance(cur_value, list):
cur_value.append(new_value)
# If exists, and a dict make it a list with 2 entries.
else:
domain_resources[category] = [cur_value, new_value]
# Merge all the lists
for domain, domain_resources in list(resources.items()):
if not isinstance(domain_resources.get(category), list):
continue
merged = {}
for entry in domain_resources[category]:
if isinstance(entry, dict):
merged.update(entry)
else:
_LOGGER.error(
"An integration providing translations for %s provided invalid data: %s",
domain,
entry,
)
domain_resources[category] = merged
return {"component": resources}
def build_resources(
translation_strings: Dict[str, Dict[str, Any]],
components: Set[str],
category: str,
) -> Dict[str, Dict[str, Any]]:
"""Build the resources response for the given components."""
# Build response
resources: Dict[str, Dict[str, Any]] = {}
for component in components:
new_value = translation_strings[component].get(category)
if new_value is None:
continue
resources[component] = {category: new_value}
return {"component": resources}
async def async_get_component_strings(
hass: HomeAssistantType, language: str, components: Set[str]
) -> Dict[str, Any]:
"""Load translations."""
domains = list({loaded.split(".")[-1] for loaded in components})
integrations = dict(
zip(
domains,
await asyncio.gather(
*[async_get_integration(hass, domain) for domain in domains]
),
)
)
translations: Dict[str, Any] = {}
# Determine paths of missing components/platforms
files_to_load = {}
for loaded in components:
parts = loaded.split(".")
domain = parts[-1]
integration = integrations[domain]
path = component_translation_path(loaded, language, integration)
# No translation available
if path is None:
translations[loaded] = {}
else:
files_to_load[loaded] = path
if not files_to_load:
return translations
# Load files
load_translations_job = hass.async_add_executor_job(
load_translations_files, files_to_load
)
assert load_translations_job is not None
loaded_translations = await load_translations_job
# Translations that miss "title" will get integration put in.
for loaded, loaded_translation in loaded_translations.items():
if "." in loaded:
continue
if "title" not in loaded_translation:
loaded_translation["title"] = integrations[loaded].name
translations.update(loaded_translations)
return translations
class FlatCache:
"""Cache for flattened translations."""
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize the cache."""
self.hass = hass
self.cache: Dict[str, Dict[str, Dict[str, str]]] = {}
@callback
def async_setup(self) -> None:
"""Initialize the cache clear listeners."""
self.hass.bus.async_listen(EVENT_COMPONENT_LOADED, self._async_component_loaded)
@callback
def _async_component_loaded(self, event: Event) -> None:
"""Clear cache when a new component is loaded."""
self.cache = {}
@callback
def async_get_cache(self, language: str, category: str) -> Optional[Dict[str, str]]:
"""Get cache."""
return self.cache.setdefault(language, {}).get(category)
@callback
def async_set_cache(
self, language: str, category: str, data: Dict[str, str]
) -> None:
"""Set cache."""
self.cache.setdefault(language, {})[category] = data
@bind_hass
async def async_get_translations(
hass: HomeAssistantType,
language: str,
category: str,
integration: Optional[str] = None,
config_flow: Optional[bool] = None,
) -> Dict[str, Any]:
"""Return all backend translations.
If integration specified, load it for that one.
Otherwise default to loaded intgrations combined with config flow
integrations if config_flow is true.
"""
lock = hass.data.get(TRANSLATION_LOAD_LOCK)
if lock is None:
lock = hass.data[TRANSLATION_LOAD_LOCK] = asyncio.Lock()
if integration is not None:
components = {integration}
elif config_flow:
# When it's a config flow, we're going to merge the cached loaded component results
# with the integrations that have not been loaded yet. We merge this at the end.
# We can't cache with config flow, as we can't monitor it during runtime.
components = (await async_get_config_flows(hass)) - hass.config.components
else:
# Only 'state' supports merging, so remove platforms from selection
if category == "state":
components = set(hass.config.components)
else:
components = {
component
for component in hass.config.components
if "." not in component
}
async with lock:
use_cache = integration is None and not config_flow
if use_cache:
cache = hass.data.get(TRANSLATION_FLATTEN_CACHE)
if cache is None:
cache = hass.data[TRANSLATION_FLATTEN_CACHE] = FlatCache(hass)
cache.async_setup()
cached_translations = cache.async_get_cache(language, category)
if cached_translations is not None:
return cached_translations
tasks = [async_get_component_strings(hass, language, components)]
# Fetch the English resources, as a fallback for missing keys
if language != "en":
tasks.append(async_get_component_strings(hass, "en", components))
_LOGGER.debug(
"Cache miss for %s, %s: %s", language, category, ", ".join(components)
)
results = await asyncio.gather(*tasks)
if category == "state":
resource_func = merge_resources
else:
resource_func = build_resources
resources = flatten(resource_func(results[0], components, category))
if language != "en":
base_resources = flatten(resource_func(results[1], components, category))
resources = {**base_resources, **resources}
# The cache must be set while holding the lock
if use_cache:
assert cache is not None
cache.async_set_cache(language, category, resources)
if config_flow:
loaded_comp_resources = await async_get_translations(hass, language, category)
resources.update(loaded_comp_resources)
return resources
|
import unittest
import random
import sys
import numpy
import dedupe
class RandomPairsTest(unittest.TestCase):
def test_random_pair(self):
random.seed(123)
if sys.version_info < (3, 0):
target = [(0, 3), (0, 4), (2, 4), (0, 5), (6, 8)]
else:
target = [(0, 4), (2, 3), (0, 6), (3, 6), (0, 7)]
random_pairs = list(dedupe.core.randomPairs(10, 5))
assert random_pairs == target
random.seed(123)
if sys.version_info < (3, 0):
target = [(265, 3429)]
else:
target = [(357, 8322)]
random_pairs = list(dedupe.core.randomPairs(10**4, 1))
assert random_pairs == target
random_pairs = list(dedupe.core.randomPairs(10**10, 1))
def test_random_pair_match(self):
assert len(list(dedupe.core.randomPairsMatch(100, 100, 100))) == 100
assert len(list(dedupe.core.randomPairsMatch(10, 10, 99))) == 99
random.seed(123)
random.seed(123)
if sys.version_info < (3, 0):
target = [(0, 5), (0, 8), (4, 0), (1, 0), (9, 0),
(0, 3), (5, 3), (3, 3), (8, 5), (1, 5)]
else:
target = [(0, 6), (3, 4), (1, 1), (9, 8), (5, 2),
(1, 3), (0, 4), (4, 8), (6, 8), (7, 1)]
pairs = list(dedupe.core.randomPairsMatch(10, 10, 10))
assert pairs == target
pairs = list(dedupe.core.randomPairsMatch(10, 10, 0))
assert pairs == []
class ScoreDuplicates(unittest.TestCase):
def setUp(self):
random.seed(123)
long_string = 'asa;sasdfjasdio;fio;asdnfasdvnvao;asduifvnavjasdfasdfasfasasdfasdfasdfasdfasdfsdfasgnuavpidcvaspdivnaspdivninasduinguipghauipsdfnvaspfighapsdifnasdifnasdpighuignpaguinpgiasidfjasdfjsdofgiongag' # noqa: E501
self.records = iter([((long_string, {'name': 'Margret', 'age': '32'}),
('2', {'name': 'Marga', 'age': '33'})),
(('2', {'name': 'Marga', 'age': '33'}),
('3', {'name': 'Maria', 'age': '19'})),
(('4', {'name': 'Maria', 'age': '19'}),
('5', {'name': 'Monica', 'age': '39'})),
(('6', {'name': 'Monica', 'age': '39'}),
('7', {'name': 'Mira', 'age': '47'})),
(('8', {'name': 'Mira', 'age': '47'}),
('9', {'name': 'Mona', 'age': '9'})),
])
deduper = dedupe.Dedupe([{'field': "name", 'type': 'String'}])
self.data_model = deduper.data_model
self.classifier = deduper.classifier
self.classifier.weights = [-1.0302742719650269]
self.classifier.bias = 4.76
score_dtype = [('pairs', '<U192', 2), ('score', 'f4')]
self.desired_scored_pairs = numpy.array([((long_string, '2'), 0.96),
(['2', '3'], 0.96),
(['4', '5'], 0.78),
(['6', '7'], 0.72),
(['8', '9'], 0.84)],
dtype=score_dtype)
def test_score_duplicates(self):
scores = dedupe.core.scoreDuplicates(self.records,
self.data_model,
self.classifier,
2)
numpy.testing.assert_equal(scores['pairs'],
self.desired_scored_pairs['pairs'])
numpy.testing.assert_allclose(scores['score'],
self.desired_scored_pairs['score'], 2)
def test_score_duplicates_with_zeros(self):
self.classifier.weights = [-1000]
self.classifier.bias = 1000
self.records = iter([(('1', {'name': 'ABCD'}),
('2', {'name': 'EFGH'})),
(('3', {'name': 'IJKL'}),
('4', {'name': 'IJKL'}))
])
scores = dedupe.core.scoreDuplicates(self.records,
self.data_model,
self.classifier,
2)
score_dtype = [('pairs', '<U1', 2), ('score', 'f4')]
self.desired_scored_pairs = numpy.array([(['1', '2'], 0),
(['3', '4'], 1)],
dtype=score_dtype)
numpy.testing.assert_equal(scores['pairs'],
self.desired_scored_pairs['pairs'])
numpy.testing.assert_allclose(scores['score'],
self.desired_scored_pairs['score'], 2)
class FieldDistances(unittest.TestCase):
def test_exact_comparator(self):
deduper = dedupe.Dedupe([{'field': 'name',
'type': 'Exact'}
])
record_pairs = (({'name': 'Shmoo'}, {'name': 'Shmee'}),
({'name': 'Shmoo'}, {'name': 'Shmoo'}))
numpy.testing.assert_array_almost_equal(deduper.data_model.distances(record_pairs),
numpy.array([[0.0],
[1.0]]),
3)
def test_comparator(self):
deduper = dedupe.Dedupe([{'field': 'type',
'type': 'Categorical',
'categories': ['a', 'b', 'c']}])
record_pairs = (({'type': 'a'},
{'type': 'b'}),
({'type': 'a'},
{'type': 'c'}))
numpy.testing.assert_array_almost_equal(deduper.data_model.distances(record_pairs),
numpy.array([[0, 0, 1, 0, 0],
[0, 0, 0, 1, 0]]),
3)
def test_comparator_interaction(self):
deduper = dedupe.Dedupe([{'field': 'type',
'variable name': 'type',
'type': 'Categorical',
'categories': ['a', 'b']},
{'type': 'Interaction',
'interaction variables': ['type', 'name']},
{'field': 'name',
'variable name': 'name',
'type': 'Exact'}])
record_pairs = (({'name': 'steven', 'type': 'a'},
{'name': 'steven', 'type': 'b'}),
({'name': 'steven', 'type': 'b'},
{'name': 'steven', 'type': 'b'}))
numpy.testing.assert_array_almost_equal(deduper.data_model.distances(record_pairs),
numpy.array([[0, 1, 1, 0, 1],
[1, 0, 1, 1, 0]]), 3)
class Unique(unittest.TestCase):
def test_unique(self):
target = ([{1: 1, 2: 2}, {3: 3, 4: 4}],
[{3: 3, 4: 4}, {1: 1, 2: 2}])
assert dedupe.core.unique(
[{1: 1, 2: 2}, {3: 3, 4: 4}, {1: 1, 2: 2}]) in target
if __name__ == "__main__":
unittest.main()
|
import json
import logging
from pathlib import Path
from typing import TYPE_CHECKING, Mapping, Optional, Union
import aiohttp
from redbot.core import Config
from redbot.core.bot import Red
from redbot.core.commands import Cog
from redbot.core.i18n import Translator
from ..errors import YouTubeApiError
if TYPE_CHECKING:
from .. import Audio
log = logging.getLogger("red.cogs.Audio.api.YouTube")
_ = Translator("Audio", Path(__file__))
SEARCH_ENDPOINT = "https://www.googleapis.com/youtube/v3/search"
class YouTubeWrapper:
"""Wrapper for the YouTube Data API."""
def __init__(
self, bot: Red, config: Config, session: aiohttp.ClientSession, cog: Union["Audio", Cog]
):
self.bot = bot
self.config = config
self.session = session
self.api_key: Optional[str] = None
self._token: Mapping[str, str] = {}
self.cog = cog
async def update_token(self, new_token: Mapping[str, str]):
self._token = new_token
async def _get_api_key(
self,
) -> str:
"""Get the stored youtube token."""
if not self._token:
self._token = await self.bot.get_shared_api_tokens("youtube")
self.api_key = self._token.get("api_key", "")
return self.api_key if self.api_key is not None else ""
async def get_call(self, query: str) -> Optional[str]:
"""Make a Get call to youtube data api."""
params = {
"q": query,
"part": "id",
"key": await self._get_api_key(),
"maxResults": 1,
"type": "video",
}
async with self.session.request("GET", SEARCH_ENDPOINT, params=params) as r:
if r.status == 400:
if r.reason == "Bad Request":
raise YouTubeApiError(
_(
"Your YouTube Data API token is invalid.\n"
"Check the YouTube API key again and follow the instructions "
"at `{prefix}audioset youtubeapi`."
)
)
return None
elif r.status == 404:
return None
elif r.status == 403:
if r.reason in ["Forbidden", "quotaExceeded"]:
raise YouTubeApiError(
_(
"YouTube API error code: 403\nYour YouTube API key may have "
"reached the account's query limit for today. Please check "
"<https://developers.google.com/youtube/v3/getting-started#quota> "
"for more information."
)
)
return None
else:
search_response = await r.json(loads=json.loads)
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
return f"https://www.youtube.com/watch?v={search_result['id']['videoId']}"
return None
|
from __future__ import division, print_function, absolute_import
import numpy
import pickle
import time
import tensorflow as tf
import tflearn
from tflearn.layers.conv import conv_2d, max_pool_2d
from src.training_data import CACHE_PATH, METADATA_PATH, load_training_tiles, equalize_data, \
format_as_onehot_arrays, has_ways_in_center
MODEL_METADATA_FILENAME = 'model_metadata.pickle'
def train_on_cached_data(neural_net_type, number_of_epochs):
"""Load tiled/cached training data in batches, and train the neural net."""
with open(CACHE_PATH + METADATA_PATH, 'r') as infile:
training_info = pickle.load(infile)
bands = training_info['bands']
tile_size = training_info['tile_size']
training_images = []
onehot_training_labels = []
model = None
# there are usually 100+ images with road through the middle, out of every 10,000
# because we want half on, half off, and discard most images
EQUALIZATION_BATCH_SIZE = 10000
# the number of times to pull EQUALIZATION_BATCH_SIZE images from disk
NUMBER_OF_BATCHES = 50
for x in range(0, NUMBER_OF_BATCHES):
new_label_paths = load_training_tiles(EQUALIZATION_BATCH_SIZE)
print("Got batch of {} labels".format(len(new_label_paths)))
new_training_images, new_onehot_training_labels = format_as_onehot_arrays(new_label_paths)
equal_count_way_list, equal_count_tile_list = equalize_data(new_onehot_training_labels,
new_training_images, False)
[training_images.append(i) for i in equal_count_tile_list]
[onehot_training_labels.append(l) for l in equal_count_way_list]
# once we have 100 test_images, train on a mini batch
if len(training_images) >= 100:
# continue training the model with the new data set
model = train_with_data(onehot_training_labels, training_images, neural_net_type, bands,
tile_size, number_of_epochs, model)
training_images = []
onehot_training_labels = []
save_model(model, neural_net_type, bands, tile_size)
return model
def train_with_data(onehot_training_labels, training_images,
neural_net_type, band_list, tile_size, number_of_epochs, model):
"""Package data for tensorflow and analyze."""
npy_training_labels = numpy.asarray(onehot_training_labels)
# normalize 0-255 values to 0-1
norm_training_images = numpy.array([img_loc_tuple[0] for img_loc_tuple in training_images])
norm_train_images = norm_training_images.astype(numpy.float32)
norm_train_images = numpy.multiply(norm_train_images, 1.0 / 255.0)
with tf.Graph().as_default():
if not model:
on_band_count = 0
for b in band_list:
if b == 1:
on_band_count += 1
model = model_for_type(neural_net_type, tile_size, on_band_count)
model.fit(norm_train_images,
npy_training_labels,
n_epoch=number_of_epochs,
shuffle=False,
validation_set=.1,
show_metric=True,
run_id=time.strftime("%Y%m%d-%H%M%S"))
return model
def model_for_type(neural_net_type, tile_size, on_band_count):
"""The neural_net_type can be: one_layer_relu,
one_layer_relu_conv,
two_layer_relu_conv."""
network = tflearn.input_data(shape=[None, tile_size, tile_size, on_band_count])
# NN architectures mirror ch. 3 of www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
if neural_net_type == 'one_layer_relu':
network = tflearn.fully_connected(network, 64, activation='relu')
elif neural_net_type == 'one_layer_relu_conv':
network = conv_2d(network, 64, 12, strides=4, activation='relu')
network = max_pool_2d(network, 3)
elif neural_net_type == 'two_layer_relu_conv':
network = conv_2d(network, 64, 12, strides=4, activation='relu')
network = max_pool_2d(network, 3)
network = conv_2d(network, 128, 4, activation='relu')
else:
print("ERROR: exiting, unknown layer type for neural net")
# classify as road or not road
softmax = tflearn.fully_connected(network, 2, activation='softmax')
# hyperparameters based on www.cs.toronto.edu/~vmnih/docs/Mnih_Volodymyr_PhD_Thesis.pdf
momentum = tflearn.optimizers.Momentum(
learning_rate=.005, momentum=0.9,
lr_decay=0.0002, name='Momentum')
net = tflearn.regression(softmax, optimizer=momentum, loss='categorical_crossentropy')
return tflearn.DNN(net, tensorboard_verbose=0)
def save_model(model, neural_net_type, bands, tile_size):
"""Save a DeepOSM tflearn model and its metadata. """
model.save(CACHE_PATH + 'model.pickle')
# dump the training metadata to disk, for later loading model from disk
training_info = {'neural_net_type': neural_net_type,
'bands': bands,
'tile_size': tile_size}
with open(CACHE_PATH + MODEL_METADATA_FILENAME, 'w') as outfile:
pickle.dump(training_info, outfile)
def load_model(neural_net_type, tile_size, on_band_count):
"""Load the TensorFlow model serialized at path."""
model = model_for_type(neural_net_type, tile_size, on_band_count)
model.load(CACHE_PATH + 'model.pickle')
return model
def list_findings(labels, test_images, model):
"""Return lists of predicted false negative/positive labels/data."""
npy_test_images = numpy.array([img_loc_tuple[0] for img_loc_tuple in test_images])
npy_test_images = npy_test_images.astype(numpy.float32)
npy_test_images = numpy.multiply(npy_test_images, 1.0 / 255.0)
false_pos = []
fp_images = []
index = 0
for x in range(0, len(npy_test_images) - 100, 100):
images = npy_test_images[x:x + 100]
image_tuples = test_images[x:x + 100]
index, false_pos, fp_images = sort_findings(model,
image_tuples,
images,
labels,
false_pos,
fp_images,
index)
images = npy_test_images[index:]
image_tuples = test_images[index:]
index, false_pos, fp_images = sort_findings(model,
image_tuples,
images,
labels,
false_pos,
fp_images,
index)
return false_pos, fp_images
def sort_findings(model, image_tuples, test_images, labels, false_positives, fp_images, index):
"""False positive if model says road doesn't exist, but OpenStreetMap says it does.
False negative if model says road exists, but OpenStreetMap doesn't list it.
"""
pred_index = 0
for p in model.predict(test_images):
label = labels[index][0]
if has_ways_in_center(label, 1) and p[0] > .5:
false_positives.append(p)
fp_images.append(image_tuples[pred_index])
# elif not has_ways_in_center(label, 16) and p[0] <= .5:
# false_negatives.append(p)
# fn_images.append(image_tuples[pred_index])
pred_index += 1
index += 1
return index, false_positives, fp_images
def predictions_for_tiles(test_images, model):
"""Batch predictions on the test image set, to avoid a memory spike."""
npy_test_images = numpy.array([img_loc_tuple[0] for img_loc_tuple in test_images])
test_images = npy_test_images.astype(numpy.float32)
test_images = numpy.multiply(test_images, 1.0 / 255.0)
all_predictions = []
for x in range(0, len(test_images) - 100, 100):
for p in model.predict(test_images[x:x + 100]):
all_predictions.append(p)
for p in model.predict(test_images[len(all_predictions):]):
all_predictions.append(p)
assert len(all_predictions) == len(test_images)
return all_predictions
|
import logging
from openwrt_luci_rpc import OpenWrtRpc
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_SSL = False
DEFAULT_VERIFY_SSL = True
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Luci scanner."""
scanner = LuciDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class LuciDeviceScanner(DeviceScanner):
"""This class scans for devices connected to an OpenWrt router."""
def __init__(self, config):
"""Initialize the scanner."""
self.router = OpenWrtRpc(
config[CONF_HOST],
config[CONF_USERNAME],
config[CONF_PASSWORD],
config[CONF_SSL],
config[CONF_VERIFY_SSL],
)
self.last_results = {}
self.success_init = self.router.is_logged_in()
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
name = next(
(result.hostname for result in self.last_results if result.mac == device),
None,
)
return name
def get_extra_attributes(self, device):
"""
Get extra attributes of a device.
Some known extra attributes that may be returned in the device tuple
include MAC address (mac), network device (dev), IP address
(ip), reachable status (reachable), associated router
(host), hostname if known (hostname) among others.
"""
device = next(
(result for result in self.last_results if result.mac == device), None
)
return device._asdict()
def _update_info(self):
"""Check the Luci router for devices."""
result = self.router.get_all_connected_devices(only_reachable=True)
_LOGGER.debug("Luci get_all_connected_devices returned: %s", result)
last_results = []
for device in result:
if (
not hasattr(self.router.router.owrt_version, "release")
or not self.router.router.owrt_version.release
or self.router.router.owrt_version.release[0] < 19
or device.reachable
):
last_results.append(device)
self.last_results = last_results
|
import flatbuffers
class AuthCryptosignChallenge(object):
__slots__ = ['_tab']
@classmethod
def GetRootAsAuthCryptosignChallenge(cls, buf, offset):
n = flatbuffers.encode.Get(flatbuffers.packer.uoffset, buf, offset)
x = AuthCryptosignChallenge()
x.Init(buf, n + offset)
return x
# AuthCryptosignChallenge
def Init(self, buf, pos):
self._tab = flatbuffers.table.Table(buf, pos)
# AuthCryptosignChallenge
def ChannelBinding(self):
o = flatbuffers.number_types.UOffsetTFlags.py_type(self._tab.Offset(4))
if o != 0:
return self._tab.Get(flatbuffers.number_types.Uint8Flags, o + self._tab.Pos)
return 0
def AuthCryptosignChallengeStart(builder): builder.StartObject(1)
def AuthCryptosignChallengeAddChannelBinding(builder, channelBinding): builder.PrependUint8Slot(0, channelBinding, 0)
def AuthCryptosignChallengeEnd(builder): return builder.EndObject()
|
import os.path
from weblate.formats.tests.test_formats import AutoFormatTest
from weblate.formats.txt import AppStoreFormat
from weblate.trans.tests.utils import get_test_file
APPSTORE_FILE = get_test_file("short_description.txt")
class AppStoreFormatTest(AutoFormatTest):
FORMAT = AppStoreFormat
FILE = APPSTORE_FILE
MIME = "text/plain"
EXT = "txt"
COUNT = 1
MASK = "market/*"
EXPECTED_PATH = "market/cs_CZ"
FIND = "Hello world"
FIND_CONTEXT = "short_description.txt:1"
FIND_MATCH = "Hello world"
MATCH = None
BASE = APPSTORE_FILE
EXPECTED_FLAGS = "max-length:80"
def parse_file(self, filename):
return self.FORMAT(os.path.dirname(filename))
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from compare_gan.architectures import arch_ops
import numpy as np
import tensorflow as tf
class ArchOpsTest(tf.test.TestCase):
def testBatchNorm(self):
with tf.Graph().as_default():
# 4 images with resolution 2x1 and 3 channels.
x1 = tf.constant([[[5, 7, 2]], [[5, 8, 8]]], dtype=tf.float32)
x2 = tf.constant([[[1, 2, 0]], [[4, 0, 4]]], dtype=tf.float32)
x3 = tf.constant([[[6, 2, 6]], [[5, 0, 5]]], dtype=tf.float32)
x4 = tf.constant([[[2, 4, 2]], [[6, 4, 1]]], dtype=tf.float32)
x = tf.stack([x1, x2, x3, x4])
self.assertAllEqual(x.shape.as_list(), [4, 2, 1, 3])
core_bn = tf.layers.batch_normalization(x, training=True)
contrib_bn = tf.contrib.layers.batch_norm(x, is_training=True)
custom_bn = arch_ops.batch_norm(x, is_training=True)
with self.session() as sess:
sess.run(tf.global_variables_initializer())
core_bn, contrib_bn, custom_bn = sess.run(
[core_bn, contrib_bn, custom_bn])
tf.logging.info("core_bn: %s", core_bn[0])
tf.logging.info("contrib_bn: %s", contrib_bn[0])
tf.logging.info("custom_bn: %s", custom_bn[0])
self.assertAllClose(core_bn, contrib_bn)
self.assertAllClose(custom_bn, contrib_bn)
expected_values = np.asarray(
[[[[0.4375205, 1.30336881, -0.58830315]],
[[0.4375205, 1.66291881, 1.76490951]]],
[[[-1.89592218, -0.49438119, -1.37270737]],
[[-0.14584017, -1.21348119, 0.19610107]]],
[[[1.02088118, -0.49438119, 0.98050523]],
[[0.4375205, -1.21348119, 0.58830321]]],
[[[-1.31256151, 0.22471881, -0.58830315]],
[[1.02088118, 0.22471881, -0.98050523]]]],
dtype=np.float32)
self.assertAllClose(custom_bn, expected_values)
def testAccumulatedMomentsDuringTraing(self):
with tf.Graph().as_default():
mean_in = tf.placeholder(tf.float32, shape=[2])
variance_in = tf.placeholder(tf.float32, shape=[2])
mean, variance = arch_ops._accumulated_moments_for_inference(
mean=mean_in, variance=variance_in, is_training=True)
variables_by_name = {v.op.name: v for v in tf.global_variables()}
tf.logging.error(variables_by_name)
accu_mean = variables_by_name["accu/accu_mean"]
accu_variance = variables_by_name["accu/accu_variance"]
accu_counter = variables_by_name["accu/accu_counter"]
with self.session() as sess:
sess.run(tf.global_variables_initializer())
m1, v1 = sess.run(
[mean, variance],
feed_dict={mean_in: [1.0, 2.0], variance_in: [3.0, 4.0]})
self.assertAllClose(m1, [1.0, 2.0])
self.assertAllClose(v1, [3.0, 4.0])
m2, v2 = sess.run(
[mean, variance],
feed_dict={mean_in: [5.0, 6.0], variance_in: [7.0, 8.0]})
self.assertAllClose(m2, [5.0, 6.0])
self.assertAllClose(v2, [7.0, 8.0])
am, av, ac = sess.run([accu_mean, accu_variance, accu_counter])
self.assertAllClose(am, [0.0, 0.0])
self.assertAllClose(av, [0.0, 0.0])
self.assertAllClose([ac], [0.0])
def testAccumulatedMomentsDuringEal(self):
with tf.Graph().as_default():
mean_in = tf.placeholder(tf.float32, shape=[2])
variance_in = tf.placeholder(tf.float32, shape=[2])
mean, variance = arch_ops._accumulated_moments_for_inference(
mean=mean_in, variance=variance_in, is_training=False)
variables_by_name = {v.op.name: v for v in tf.global_variables()}
tf.logging.error(variables_by_name)
accu_mean = variables_by_name["accu/accu_mean"]
accu_variance = variables_by_name["accu/accu_variance"]
accu_counter = variables_by_name["accu/accu_counter"]
update_accus = variables_by_name["accu/update_accus"]
with self.session() as sess:
sess.run(tf.global_variables_initializer())
# Fill accumulators.
sess.run(tf.assign(update_accus, 1))
m1, v1 = sess.run(
[mean, variance],
feed_dict={mean_in: [1.0, 2.0], variance_in: [3.0, 4.0]})
self.assertAllClose(m1, [1.0, 2.0])
self.assertAllClose(v1, [3.0, 4.0])
m2, v2 = sess.run(
[mean, variance],
feed_dict={mean_in: [5.0, 6.0], variance_in: [7.0, 8.0]})
self.assertAllClose(m2, [3.0, 4.0])
self.assertAllClose(v2, [5.0, 6.0])
# Check accumulators.
am, av, ac = sess.run([accu_mean, accu_variance, accu_counter])
self.assertAllClose(am, [6.0, 8.0])
self.assertAllClose(av, [10.0, 12.0])
self.assertAllClose([ac], [2.0])
# Use accumulators.
sess.run(tf.assign(update_accus, 0))
m3, v3 = sess.run(
[mean, variance],
feed_dict={mean_in: [2.0, 2.0], variance_in: [3.0, 3.0]})
self.assertAllClose(m3, [3.0, 4.0])
self.assertAllClose(v3, [5.0, 6.0])
am, av, ac = sess.run([accu_mean, accu_variance, accu_counter])
self.assertAllClose(am, [6.0, 8.0])
self.assertAllClose(av, [10.0, 12.0])
self.assertAllClose([ac], [2.0])
if __name__ == "__main__":
tf.test.main()
|
import asyncio
import inspect
from functools import update_wrapper
from markupsafe import Markup
from .environment import TemplateModule
from .runtime import LoopContext
from .utils import concat
from .utils import internalcode
from .utils import missing
async def concat_async(async_gen):
rv = []
async def collect():
async for event in async_gen:
rv.append(event)
await collect()
return concat(rv)
async def generate_async(self, *args, **kwargs):
vars = dict(*args, **kwargs)
try:
async for event in self.root_render_func(self.new_context(vars)):
yield event
except Exception:
yield self.environment.handle_exception()
def wrap_generate_func(original_generate):
def _convert_generator(self, loop, args, kwargs):
async_gen = self.generate_async(*args, **kwargs)
try:
while 1:
yield loop.run_until_complete(async_gen.__anext__())
except StopAsyncIteration:
pass
def generate(self, *args, **kwargs):
if not self.environment.is_async:
return original_generate(self, *args, **kwargs)
return _convert_generator(self, asyncio.get_event_loop(), args, kwargs)
return update_wrapper(generate, original_generate)
async def render_async(self, *args, **kwargs):
if not self.environment.is_async:
raise RuntimeError("The environment was not created with async mode enabled.")
vars = dict(*args, **kwargs)
ctx = self.new_context(vars)
try:
return await concat_async(self.root_render_func(ctx))
except Exception:
return self.environment.handle_exception()
def wrap_render_func(original_render):
def render(self, *args, **kwargs):
if not self.environment.is_async:
return original_render(self, *args, **kwargs)
loop = asyncio.get_event_loop()
return loop.run_until_complete(self.render_async(*args, **kwargs))
return update_wrapper(render, original_render)
def wrap_block_reference_call(original_call):
@internalcode
async def async_call(self):
rv = await concat_async(self._stack[self._depth](self._context))
if self._context.eval_ctx.autoescape:
rv = Markup(rv)
return rv
@internalcode
def __call__(self):
if not self._context.environment.is_async:
return original_call(self)
return async_call(self)
return update_wrapper(__call__, original_call)
def wrap_macro_invoke(original_invoke):
@internalcode
async def async_invoke(self, arguments, autoescape):
rv = await self._func(*arguments)
if autoescape:
rv = Markup(rv)
return rv
@internalcode
def _invoke(self, arguments, autoescape):
if not self._environment.is_async:
return original_invoke(self, arguments, autoescape)
return async_invoke(self, arguments, autoescape)
return update_wrapper(_invoke, original_invoke)
@internalcode
async def get_default_module_async(self):
if self._module is not None:
return self._module
self._module = rv = await self.make_module_async()
return rv
def wrap_default_module(original_default_module):
@internalcode
def _get_default_module(self):
if self.environment.is_async:
raise RuntimeError("Template module attribute is unavailable in async mode")
return original_default_module(self)
return _get_default_module
async def make_module_async(self, vars=None, shared=False, locals=None):
context = self.new_context(vars, shared, locals)
body_stream = []
async for item in self.root_render_func(context):
body_stream.append(item)
return TemplateModule(self, context, body_stream)
def patch_template():
from . import Template
Template.generate = wrap_generate_func(Template.generate)
Template.generate_async = update_wrapper(generate_async, Template.generate_async)
Template.render_async = update_wrapper(render_async, Template.render_async)
Template.render = wrap_render_func(Template.render)
Template._get_default_module = wrap_default_module(Template._get_default_module)
Template._get_default_module_async = get_default_module_async
Template.make_module_async = update_wrapper(
make_module_async, Template.make_module_async
)
def patch_runtime():
from .runtime import BlockReference, Macro
BlockReference.__call__ = wrap_block_reference_call(BlockReference.__call__)
Macro._invoke = wrap_macro_invoke(Macro._invoke)
def patch_filters():
from .filters import FILTERS
from .asyncfilters import ASYNC_FILTERS
FILTERS.update(ASYNC_FILTERS)
def patch_all():
patch_template()
patch_runtime()
patch_filters()
async def auto_await(value):
if inspect.isawaitable(value):
return await value
return value
async def auto_aiter(iterable):
if hasattr(iterable, "__aiter__"):
async for item in iterable:
yield item
return
for item in iterable:
yield item
class AsyncLoopContext(LoopContext):
_to_iterator = staticmethod(auto_aiter)
@property
async def length(self):
if self._length is not None:
return self._length
try:
self._length = len(self._iterable)
except TypeError:
iterable = [x async for x in self._iterator]
self._iterator = self._to_iterator(iterable)
self._length = len(iterable) + self.index + (self._after is not missing)
return self._length
@property
async def revindex0(self):
return await self.length - self.index
@property
async def revindex(self):
return await self.length - self.index0
async def _peek_next(self):
if self._after is not missing:
return self._after
try:
self._after = await self._iterator.__anext__()
except StopAsyncIteration:
self._after = missing
return self._after
@property
async def last(self):
return await self._peek_next() is missing
@property
async def nextitem(self):
rv = await self._peek_next()
if rv is missing:
return self._undefined("there is no next item")
return rv
def __aiter__(self):
return self
async def __anext__(self):
if self._after is not missing:
rv = self._after
self._after = missing
else:
rv = await self._iterator.__anext__()
self.index0 += 1
self._before = self._current
self._current = rv
return rv, self
patch_all()
|
from django.db import migrations
def alter_role(apps, schema_editor):
if schema_editor.connection.vendor != "postgresql":
return
settings = schema_editor.connection.settings_dict
template = "ALTER ROLE {} SET {{}} = {{}}".format(
schema_editor.quote_name(settings.get("ALTER_ROLE", settings["USER"]))
)
schema_editor.execute(template.format("timezone", "UTC"))
class Migration(migrations.Migration):
dependencies = []
operations = [
migrations.RunPython(
alter_role, migrations.RunPython.noop, elidable=False, atomic=False
)
]
|
from datetime import timedelta
import logging
from pydanfossair.commands import ReadCommand
from pydanfossair.danfossclient import DanfossClient
import voluptuous as vol
from homeassistant.const import CONF_HOST
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DANFOSS_AIR_PLATFORMS = ["sensor", "binary_sensor", "switch"]
DOMAIN = "danfoss_air"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=60)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_HOST): cv.string})}, extra=vol.ALLOW_EXTRA
)
def setup(hass, config):
"""Set up the Danfoss Air component."""
conf = config[DOMAIN]
hass.data[DOMAIN] = DanfossAir(conf[CONF_HOST])
for platform in DANFOSS_AIR_PLATFORMS:
discovery.load_platform(hass, platform, DOMAIN, {}, config)
return True
class DanfossAir:
"""Handle all communication with Danfoss Air CCM unit."""
def __init__(self, host):
"""Initialize the Danfoss Air CCM connection."""
self._data = {}
self._client = DanfossClient(host)
def get_value(self, item):
"""Get value for sensor."""
return self._data.get(item)
def update_state(self, command, state_command):
"""Send update command to Danfoss Air CCM."""
self._data[state_command] = self._client.command(command)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Use the data from Danfoss Air API."""
_LOGGER.debug("Fetching data from Danfoss Air CCM module")
self._data[ReadCommand.exhaustTemperature] = self._client.command(
ReadCommand.exhaustTemperature
)
self._data[ReadCommand.outdoorTemperature] = self._client.command(
ReadCommand.outdoorTemperature
)
self._data[ReadCommand.supplyTemperature] = self._client.command(
ReadCommand.supplyTemperature
)
self._data[ReadCommand.extractTemperature] = self._client.command(
ReadCommand.extractTemperature
)
self._data[ReadCommand.humidity] = round(
self._client.command(ReadCommand.humidity), 2
)
self._data[ReadCommand.filterPercent] = round(
self._client.command(ReadCommand.filterPercent), 2
)
self._data[ReadCommand.bypass] = self._client.command(ReadCommand.bypass)
self._data[ReadCommand.fan_step] = self._client.command(ReadCommand.fan_step)
self._data[ReadCommand.supply_fan_speed] = self._client.command(
ReadCommand.supply_fan_speed
)
self._data[ReadCommand.exhaust_fan_speed] = self._client.command(
ReadCommand.exhaust_fan_speed
)
self._data[ReadCommand.away_mode] = self._client.command(ReadCommand.away_mode)
self._data[ReadCommand.boost] = self._client.command(ReadCommand.boost)
self._data[ReadCommand.battery_percent] = self._client.command(
ReadCommand.battery_percent
)
self._data[ReadCommand.bypass] = self._client.command(ReadCommand.bypass)
self._data[ReadCommand.automatic_bypass] = self._client.command(
ReadCommand.automatic_bypass
)
_LOGGER.debug("Done fetching data from Danfoss Air CCM module")
|
import logging
from typing import Optional
from homeassistant.components.geo_location import GeolocationEvent
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_TIME,
CONF_UNIT_SYSTEM_IMPERIAL,
LENGTH_KILOMETERS,
LENGTH_MILES,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity_registry import async_get_registry
from homeassistant.util.unit_system import IMPERIAL_SYSTEM
from .const import DOMAIN, FEED
_LOGGER = logging.getLogger(__name__)
ATTR_DEPTH = "depth"
ATTR_EXTERNAL_ID = "external_id"
ATTR_LOCALITY = "locality"
ATTR_MAGNITUDE = "magnitude"
ATTR_MMI = "mmi"
ATTR_PUBLICATION_DATE = "publication_date"
ATTR_QUALITY = "quality"
# An update of this entity is not making a web request, but uses internal data only.
PARALLEL_UPDATES = 0
SOURCE = "geonetnz_quakes"
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the GeoNet NZ Quakes Feed platform."""
manager = hass.data[DOMAIN][FEED][entry.entry_id]
@callback
def async_add_geolocation(feed_manager, integration_id, external_id):
"""Add gelocation entity from feed."""
new_entity = GeonetnzQuakesEvent(feed_manager, integration_id, external_id)
_LOGGER.debug("Adding geolocation %s", new_entity)
async_add_entities([new_entity], True)
manager.listeners.append(
async_dispatcher_connect(
hass, manager.async_event_new_entity(), async_add_geolocation
)
)
# Do not wait for update here so that the setup can be completed and because an
# update will fetch data from the feed via HTTP and then process that data.
hass.async_create_task(manager.async_update())
_LOGGER.debug("Geolocation setup done")
class GeonetnzQuakesEvent(GeolocationEvent):
"""This represents an external event with GeoNet NZ Quakes feed data."""
def __init__(self, feed_manager, integration_id, external_id):
"""Initialize entity with data from feed entry."""
self._feed_manager = feed_manager
self._integration_id = integration_id
self._external_id = external_id
self._title = None
self._distance = None
self._latitude = None
self._longitude = None
self._attribution = None
self._depth = None
self._locality = None
self._magnitude = None
self._mmi = None
self._quality = None
self._time = None
self._remove_signal_delete = None
self._remove_signal_update = None
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self._remove_signal_delete = async_dispatcher_connect(
self.hass,
f"geonetnz_quakes_delete_{self._external_id}",
self._delete_callback,
)
self._remove_signal_update = async_dispatcher_connect(
self.hass,
f"geonetnz_quakes_update_{self._external_id}",
self._update_callback,
)
async def async_will_remove_from_hass(self) -> None:
"""Call when entity will be removed from hass."""
self._remove_signal_delete()
self._remove_signal_update()
# Remove from entity registry.
entity_registry = await async_get_registry(self.hass)
if self.entity_id in entity_registry.entities:
entity_registry.async_remove(self.entity_id)
@callback
def _delete_callback(self):
"""Remove this entity."""
self.hass.async_create_task(self.async_remove())
@callback
def _update_callback(self):
"""Call update method."""
self.async_schedule_update_ha_state(True)
@property
def should_poll(self):
"""No polling needed for GeoNet NZ Quakes feed location events."""
return False
async def async_update(self):
"""Update this entity from the data held in the feed manager."""
_LOGGER.debug("Updating %s", self._external_id)
feed_entry = self._feed_manager.get_entry(self._external_id)
if feed_entry:
self._update_from_feed(feed_entry)
def _update_from_feed(self, feed_entry):
"""Update the internal state from the provided feed entry."""
self._title = feed_entry.title
# Convert distance if not metric system.
if self.hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL:
self._distance = IMPERIAL_SYSTEM.length(
feed_entry.distance_to_home, LENGTH_KILOMETERS
)
else:
self._distance = feed_entry.distance_to_home
self._latitude = feed_entry.coordinates[0]
self._longitude = feed_entry.coordinates[1]
self._attribution = feed_entry.attribution
self._depth = feed_entry.depth
self._locality = feed_entry.locality
self._magnitude = feed_entry.magnitude
self._mmi = feed_entry.mmi
self._quality = feed_entry.quality
self._time = feed_entry.time
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID containing latitude/longitude and external id."""
return f"{self._integration_id}_{self._external_id}"
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return "mdi:pulse"
@property
def source(self) -> str:
"""Return source value of this external event."""
return SOURCE
@property
def name(self) -> Optional[str]:
"""Return the name of the entity."""
return self._title
@property
def distance(self) -> Optional[float]:
"""Return distance value of this external event."""
return self._distance
@property
def latitude(self) -> Optional[float]:
"""Return latitude value of this external event."""
return self._latitude
@property
def longitude(self) -> Optional[float]:
"""Return longitude value of this external event."""
return self._longitude
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
if self.hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL:
return LENGTH_MILES
return LENGTH_KILOMETERS
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = {}
for key, value in (
(ATTR_EXTERNAL_ID, self._external_id),
(ATTR_ATTRIBUTION, self._attribution),
(ATTR_DEPTH, self._depth),
(ATTR_LOCALITY, self._locality),
(ATTR_MAGNITUDE, self._magnitude),
(ATTR_MMI, self._mmi),
(ATTR_QUALITY, self._quality),
(ATTR_TIME, self._time),
):
if value or isinstance(value, bool):
attributes[key] = value
return attributes
|
from homeassistant.components.abode import DOMAIN as ABODE_DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def setup_platform(hass, platform):
"""Set up the Abode platform."""
mock_entry = MockConfigEntry(
domain=ABODE_DOMAIN,
data={CONF_USERNAME: "[email protected]", CONF_PASSWORD: "password"},
)
mock_entry.add_to_hass(hass)
with patch("homeassistant.components.abode.ABODE_PLATFORMS", [platform]), patch(
"abodepy.event_controller.sio"
), patch("abodepy.utils.save_cache"):
assert await async_setup_component(hass, ABODE_DOMAIN, {})
await hass.async_block_till_done()
return mock_entry
|
from utils import WriterTC
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.ureports.html_writer import *
class HTMLWriterTC(TestCase, WriterTC):
def setUp(self):
self.writer = HTMLWriter(1)
# Section tests ###########################################################
section_base = '''<div>
<h1>Section title</h1>
<p>Section\'s description.
Blabla bla</p></div>
'''
section_nested = '''<div>\n<h1>Section title</h1>\n<p>Section\'s description.\nBlabla bla</p><div>\n<h2>Subsection</h2>\n<p>Sub section description</p></div>\n</div>\n'''
# List tests ##############################################################
list_base = '''<ul>\n<li>item1</li>\n<li>item2</li>\n<li>item3</li>\n<li>item4</li>\n</ul>\n'''
nested_list = '''<ul>
<li><p>blabla<ul>
<li>1</li>
<li>2</li>
<li>3</li>
</ul>
</p></li>
<li>an other point</li>
</ul>
'''
# Table tests #############################################################
table_base = '''<table>\n<tr class="odd">\n<td>head1</td>\n<td>head2</td>\n</tr>\n<tr class="even">\n<td>cell1</td>\n<td>cell2</td>\n</tr>\n</table>\n'''
field_table = '''<table class="field" id="mytable">\n<tr class="odd">\n<td>f1</td>\n<td>v1</td>\n</tr>\n<tr class="even">\n<td>f22</td>\n<td>v22</td>\n</tr>\n<tr class="odd">\n<td>f333</td>\n<td>v333</td>\n</tr>\n</table>\n'''
advanced_table = '''<table class="whatever" id="mytable">\n<tr class="header">\n<th>field</th>\n<th>value</th>\n</tr>\n<tr class="even">\n<td>f1</td>\n<td>v1</td>\n</tr>\n<tr class="odd">\n<td>f22</td>\n<td>v22</td>\n</tr>\n<tr class="even">\n<td>f333</td>\n<td>v333</td>\n</tr>\n<tr class="odd">\n<td> <a href="http://www.perdu.com">toi perdu ?</a></td>\n<td> </td>\n</tr>\n</table>\n'''
# VerbatimText tests ######################################################
verbatim_base = '''<pre>blablabla</pre>'''
if __name__ == '__main__':
unittest_main()
|
from homeassistant.const import ATTR_ENTITY_ID, ATTR_NAME
from homeassistant.core import callback
from . import ATTR_SOURCE, DOMAIN, EVENT_AUTOMATION_TRIGGERED
@callback
def async_describe_events(hass, async_describe_event): # type: ignore
"""Describe logbook events."""
@callback
def async_describe_logbook_event(event): # type: ignore
"""Describe a logbook event."""
data = event.data
message = "has been triggered"
if ATTR_SOURCE in data:
message = f"{message} by {data[ATTR_SOURCE]}"
return {
"name": data.get(ATTR_NAME),
"message": message,
"source": data.get(ATTR_SOURCE),
"entity_id": data.get(ATTR_ENTITY_ID),
}
async_describe_event(
DOMAIN, EVENT_AUTOMATION_TRIGGERED, async_describe_logbook_event
)
|
from datetime import timedelta
import logging
import requests
from homeassistant.components import nest
from homeassistant.components.camera import PLATFORM_SCHEMA, SUPPORT_ON_OFF, Camera
from homeassistant.util.dt import utcnow
_LOGGER = logging.getLogger(__name__)
NEST_BRAND = "Nest"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a Nest Cam.
No longer in use.
"""
async def async_setup_legacy_entry(hass, entry, async_add_entities):
"""Set up a Nest sensor based on a config entry."""
camera_devices = await hass.async_add_executor_job(
hass.data[nest.DATA_NEST].cameras
)
cameras = [NestCamera(structure, device) for structure, device in camera_devices]
async_add_entities(cameras, True)
class NestCamera(Camera):
"""Representation of a Nest Camera."""
def __init__(self, structure, device):
"""Initialize a Nest Camera."""
super().__init__()
self.structure = structure
self.device = device
self._location = None
self._name = None
self._online = None
self._is_streaming = None
self._is_video_history_enabled = False
# Default to non-NestAware subscribed, but will be fixed during update
self._time_between_snapshots = timedelta(seconds=30)
self._last_image = None
self._next_snapshot_at = None
@property
def name(self):
"""Return the name of the nest, if any."""
return self._name
@property
def unique_id(self):
"""Return the serial number."""
return self.device.device_id
@property
def device_info(self):
"""Return information about the device."""
return {
"identifiers": {(nest.DOMAIN, self.device.device_id)},
"name": self.device.name_long,
"manufacturer": "Nest Labs",
"model": "Camera",
}
@property
def should_poll(self):
"""Nest camera should poll periodically."""
return True
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._is_streaming
@property
def brand(self):
"""Return the brand of the camera."""
return NEST_BRAND
@property
def supported_features(self):
"""Nest Cam support turn on and off."""
return SUPPORT_ON_OFF
@property
def is_on(self):
"""Return true if on."""
return self._online and self._is_streaming
def turn_off(self):
"""Turn off camera."""
_LOGGER.debug("Turn off camera %s", self._name)
# Calling Nest API in is_streaming setter.
# device.is_streaming would not immediately change until the process
# finished in Nest Cam.
self.device.is_streaming = False
def turn_on(self):
"""Turn on camera."""
if not self._online:
_LOGGER.error("Camera %s is offline", self._name)
return
_LOGGER.debug("Turn on camera %s", self._name)
# Calling Nest API in is_streaming setter.
# device.is_streaming would not immediately change until the process
# finished in Nest Cam.
self.device.is_streaming = True
def update(self):
"""Cache value from Python-nest."""
self._location = self.device.where
self._name = self.device.name
self._online = self.device.online
self._is_streaming = self.device.is_streaming
self._is_video_history_enabled = self.device.is_video_history_enabled
if self._is_video_history_enabled:
# NestAware allowed 10/min
self._time_between_snapshots = timedelta(seconds=6)
else:
# Otherwise, 2/min
self._time_between_snapshots = timedelta(seconds=30)
def _ready_for_snapshot(self, now):
return self._next_snapshot_at is None or now > self._next_snapshot_at
def camera_image(self):
"""Return a still image response from the camera."""
now = utcnow()
if self._ready_for_snapshot(now):
url = self.device.snapshot_url
try:
response = requests.get(url)
except requests.exceptions.RequestException as error:
_LOGGER.error("Error getting camera image: %s", error)
return None
self._next_snapshot_at = now + self._time_between_snapshots
self._last_image = response.content
return self._last_image
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from test import run_only
from mock import Mock
from mock import patch
from diamond.collector import Collector
from xen_collector import XENCollector
###############################################################################
def run_only_if_libvirt_is_available(func):
try:
import libvirt
except ImportError:
libvirt = None
pred = lambda: libvirt is not None
return run_only(func, pred)
class TestXENCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('XENCollector', {
})
self.collector = XENCollector(config, None)
def test_import(self):
self.assertTrue(XENCollector)
@run_only_if_libvirt_is_available
@patch('os.statvfs')
@patch('libvirt.openReadOnly')
@patch.object(Collector, 'publish')
def test_centos6(self, publish_mock, libvirt_mock, os_mock):
class info:
def __init__(self, id):
self.id = id
def info(self):
if self.id == 0:
return [1, 49420888L, 49420888L, 8, 911232000000000L]
if self.id == 1:
return [1, 2097152L, 2097152L, 2, 310676150000000L]
if self.id == 2:
return [1, 2097152L, 2097152L, 2, 100375300000000L]
if self.id == 3:
return [1, 10485760L, 10485760L, 2, 335312040000000L]
if self.id == 4:
return [1, 10485760L, 10485760L, 2, 351313480000000L]
libvirt_m = Mock()
libvirt_m.getInfo.return_value = ['x86_64', 48262, 8, 1200, 2, 1, 4, 1]
libvirt_m.listDomainsID.return_value = [0, 2, 1, 4, 3]
def lookupByIdMock(id):
lookup = info(id)
return lookup
libvirt_m.lookupByID = lookupByIdMock
libvirt_mock.return_value = libvirt_m
statsvfs_mock = Mock()
statsvfs_mock.f_bavail = 74492145
statsvfs_mock.f_frsize = 4096
os_mock.return_value = statsvfs_mock
self.collector.collect()
metrics = {
'TotalCores': 8.000000,
'InstalledMem': 48262.000000,
'MemAllocated': 24576.000000,
'MemFree': 23686.000000,
'DiskFree': 297968580.000000,
'FreeCores': 0.000000,
'AllocatedCores': 8.000000,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
import contextlib
import asynctest
import mock
import pytest
from paasta_tools import hacheck
@contextlib.contextmanager
def mock_ClientSession(**fake_session_kwargs):
fake_session = asynctest.MagicMock(name="session", **fake_session_kwargs)
class FakeClientSession:
def __init__(self, *args, **kwargs):
...
async def __aenter__(*args):
return fake_session
async def __aexit__(*args):
pass
with mock.patch("aiohttp.ClientSession", new=FakeClientSession, autospec=False):
yield
@pytest.mark.asyncio
async def test_get_spool():
fake_response = mock.Mock(
status=503,
text=asynctest.CoroutineMock(
return_value="Service service in down state since 1435694078.778886 "
"until 1435694178.780000: Drained by Paasta"
),
)
fake_task = mock.Mock(host="fake_host", ports=[54321])
with mock_ClientSession(
get=asynctest.Mock(
return_value=asynctest.MagicMock(
__aenter__=asynctest.CoroutineMock(return_value=fake_response)
)
)
):
actual = await hacheck.get_spool(fake_task)
expected = {
"service": "service",
"state": "down",
"reason": "Drained by Paasta",
"since": 1435694078.778886,
"until": 1435694178.780000,
}
assert actual == expected
@pytest.mark.asyncio
async def test_get_spool_handles_no_ports():
actual = await hacheck.get_spool(None)
assert actual is None
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.