text
stringlengths 213
32.3k
|
---|
import logging
from typing import List
import eiscp
from eiscp import eISCP
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
DOMAIN,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_HOST,
CONF_NAME,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_SOURCES = "sources"
CONF_MAX_VOLUME = "max_volume"
CONF_RECEIVER_MAX_VOLUME = "receiver_max_volume"
DEFAULT_NAME = "Onkyo Receiver"
SUPPORTED_MAX_VOLUME = 100
DEFAULT_RECEIVER_MAX_VOLUME = 80
SUPPORT_ONKYO = (
SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_VOLUME_STEP
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
)
SUPPORT_ONKYO_WO_VOLUME = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
)
KNOWN_HOSTS: List[str] = []
DEFAULT_SOURCES = {
"tv": "TV",
"bd": "Bluray",
"game": "Game",
"aux1": "Aux1",
"video1": "Video 1",
"video2": "Video 2",
"video3": "Video 3",
"video4": "Video 4",
"video5": "Video 5",
"video6": "Video 6",
"video7": "Video 7",
"fm": "Radio",
}
DEFAULT_PLAYABLE_SOURCES = ("fm", "am", "tuner")
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MAX_VOLUME, default=SUPPORTED_MAX_VOLUME): vol.All(
vol.Coerce(int), vol.Range(min=1, max=100)
),
vol.Optional(
CONF_RECEIVER_MAX_VOLUME, default=DEFAULT_RECEIVER_MAX_VOLUME
): cv.positive_int,
vol.Optional(CONF_SOURCES, default=DEFAULT_SOURCES): {cv.string: cv.string},
}
)
TIMEOUT_MESSAGE = "Timeout waiting for response."
ATTR_HDMI_OUTPUT = "hdmi_output"
ATTR_PRESET = "preset"
ACCEPTED_VALUES = [
"no",
"analog",
"yes",
"out",
"out-sub",
"sub",
"hdbaset",
"both",
"up",
]
ONKYO_SELECT_OUTPUT_SCHEMA = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): cv.entity_ids,
vol.Required(ATTR_HDMI_OUTPUT): vol.In(ACCEPTED_VALUES),
}
)
SERVICE_SELECT_HDMI_OUTPUT = "onkyo_select_hdmi_output"
def determine_zones(receiver):
"""Determine what zones are available for the receiver."""
out = {"zone2": False, "zone3": False}
try:
_LOGGER.debug("Checking for zone 2 capability")
receiver.raw("ZPWQSTN")
out["zone2"] = True
except ValueError as error:
if str(error) != TIMEOUT_MESSAGE:
raise error
_LOGGER.debug("Zone 2 timed out, assuming no functionality")
try:
_LOGGER.debug("Checking for zone 3 capability")
receiver.raw("PW3QSTN")
out["zone3"] = True
except ValueError as error:
if str(error) != TIMEOUT_MESSAGE:
raise error
_LOGGER.debug("Zone 3 timed out, assuming no functionality")
except AssertionError:
_LOGGER.error("Zone 3 detection failed")
return out
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Onkyo platform."""
host = config.get(CONF_HOST)
hosts = []
def service_handle(service):
"""Handle for services."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
devices = [d for d in hosts if d.entity_id in entity_ids]
for device in devices:
if service.service == SERVICE_SELECT_HDMI_OUTPUT:
device.select_output(service.data.get(ATTR_HDMI_OUTPUT))
hass.services.register(
DOMAIN,
SERVICE_SELECT_HDMI_OUTPUT,
service_handle,
schema=ONKYO_SELECT_OUTPUT_SCHEMA,
)
if CONF_HOST in config and host not in KNOWN_HOSTS:
try:
receiver = eiscp.eISCP(host)
hosts.append(
OnkyoDevice(
receiver,
config.get(CONF_SOURCES),
name=config.get(CONF_NAME),
max_volume=config.get(CONF_MAX_VOLUME),
receiver_max_volume=config.get(CONF_RECEIVER_MAX_VOLUME),
)
)
KNOWN_HOSTS.append(host)
zones = determine_zones(receiver)
# Add Zone2 if available
if zones["zone2"]:
_LOGGER.debug("Setting up zone 2")
hosts.append(
OnkyoDeviceZone(
"2",
receiver,
config.get(CONF_SOURCES),
name=f"{config[CONF_NAME]} Zone 2",
max_volume=config.get(CONF_MAX_VOLUME),
receiver_max_volume=config.get(CONF_RECEIVER_MAX_VOLUME),
)
)
# Add Zone3 if available
if zones["zone3"]:
_LOGGER.debug("Setting up zone 3")
hosts.append(
OnkyoDeviceZone(
"3",
receiver,
config.get(CONF_SOURCES),
name=f"{config[CONF_NAME]} Zone 3",
max_volume=config.get(CONF_MAX_VOLUME),
receiver_max_volume=config.get(CONF_RECEIVER_MAX_VOLUME),
)
)
except OSError:
_LOGGER.error("Unable to connect to receiver at %s", host)
else:
for receiver in eISCP.discover():
if receiver.host not in KNOWN_HOSTS:
hosts.append(OnkyoDevice(receiver, config.get(CONF_SOURCES)))
KNOWN_HOSTS.append(receiver.host)
add_entities(hosts, True)
class OnkyoDevice(MediaPlayerEntity):
"""Representation of an Onkyo device."""
def __init__(
self,
receiver,
sources,
name=None,
max_volume=SUPPORTED_MAX_VOLUME,
receiver_max_volume=DEFAULT_RECEIVER_MAX_VOLUME,
):
"""Initialize the Onkyo Receiver."""
self._receiver = receiver
self._muted = False
self._volume = 0
self._pwstate = STATE_OFF
self._name = (
name or f"{receiver.info['model_name']}_{receiver.info['identifier']}"
)
self._max_volume = max_volume
self._receiver_max_volume = receiver_max_volume
self._current_source = None
self._source_list = list(sources.values())
self._source_mapping = sources
self._reverse_mapping = {value: key for key, value in sources.items()}
self._attributes = {}
self._hdmi_out_supported = True
def command(self, command):
"""Run an eiscp command and catch connection errors."""
try:
result = self._receiver.command(command)
except (ValueError, OSError, AttributeError, AssertionError):
if self._receiver.command_socket:
self._receiver.command_socket = None
_LOGGER.debug("Resetting connection to %s", self._name)
else:
_LOGGER.info("%s is disconnected. Attempting to reconnect", self._name)
return False
_LOGGER.debug("Result for %s: %s", command, result)
return result
def update(self):
"""Get the latest state from the device."""
status = self.command("system-power query")
if not status:
return
if status[1] == "on":
self._pwstate = STATE_ON
else:
self._pwstate = STATE_OFF
return
volume_raw = self.command("volume query")
mute_raw = self.command("audio-muting query")
current_source_raw = self.command("input-selector query")
# If the following command is sent to a device with only one HDMI out,
# the display shows 'Not Available'.
# We avoid this by checking if HDMI out is supported
if self._hdmi_out_supported:
hdmi_out_raw = self.command("hdmi-output-selector query")
else:
hdmi_out_raw = []
preset_raw = self.command("preset query")
if not (volume_raw and mute_raw and current_source_raw):
return
# eiscp can return string or tuple. Make everything tuples.
if isinstance(current_source_raw[1], str):
current_source_tuples = (current_source_raw[0], (current_source_raw[1],))
else:
current_source_tuples = current_source_raw
for source in current_source_tuples[1]:
if source in self._source_mapping:
self._current_source = self._source_mapping[source]
break
self._current_source = "_".join(current_source_tuples[1])
if preset_raw and self._current_source.lower() == "radio":
self._attributes[ATTR_PRESET] = preset_raw[1]
elif ATTR_PRESET in self._attributes:
del self._attributes[ATTR_PRESET]
self._muted = bool(mute_raw[1] == "on")
# AMP_VOL/MAX_RECEIVER_VOL*(MAX_VOL/100)
self._volume = (
volume_raw[1] / self._receiver_max_volume * (self._max_volume / 100)
)
if not hdmi_out_raw:
return
self._attributes["video_out"] = ",".join(hdmi_out_raw[1])
if hdmi_out_raw[1] == "N/A":
self._hdmi_out_supported = False
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._pwstate
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume
@property
def is_volume_muted(self):
"""Return boolean indicating mute status."""
return self._muted
@property
def supported_features(self):
"""Return media player features that are supported."""
return SUPPORT_ONKYO
@property
def source(self):
"""Return the current input source of the device."""
return self._current_source
@property
def source_list(self):
"""List of available input sources."""
return self._source_list
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return self._attributes
def turn_off(self):
"""Turn the media player off."""
self.command("system-power standby")
def set_volume_level(self, volume):
"""
Set volume level, input is range 0..1.
However full volume on the amp is usually far too loud so allow the user to specify the upper range
with CONF_MAX_VOLUME. we change as per max_volume set by user. This means that if max volume is 80 then full
volume in HA will give 80% volume on the receiver. Then we convert
that to the correct scale for the receiver.
"""
# HA_VOL * (MAX VOL / 100) * MAX_RECEIVER_VOL
self.command(
f"volume {int(volume * (self._max_volume / 100) * self._receiver_max_volume)}"
)
def volume_up(self):
"""Increase volume by 1 step."""
self.command("volume level-up")
def volume_down(self):
"""Decrease volume by 1 step."""
self.command("volume level-down")
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
if mute:
self.command("audio-muting on")
else:
self.command("audio-muting off")
def turn_on(self):
"""Turn the media player on."""
self.command("system-power on")
def select_source(self, source):
"""Set the input source."""
if source in self._source_list:
source = self._reverse_mapping[source]
self.command(f"input-selector {source}")
def play_media(self, media_type, media_id, **kwargs):
"""Play radio station by preset number."""
source = self._reverse_mapping[self._current_source]
if media_type.lower() == "radio" and source in DEFAULT_PLAYABLE_SOURCES:
self.command(f"preset {media_id}")
def select_output(self, output):
"""Set hdmi-out."""
self.command(f"hdmi-output-selector={output}")
class OnkyoDeviceZone(OnkyoDevice):
"""Representation of an Onkyo device's extra zone."""
def __init__(
self,
zone,
receiver,
sources,
name=None,
max_volume=SUPPORTED_MAX_VOLUME,
receiver_max_volume=DEFAULT_RECEIVER_MAX_VOLUME,
):
"""Initialize the Zone with the zone identifier."""
self._zone = zone
self._supports_volume = True
super().__init__(receiver, sources, name, max_volume, receiver_max_volume)
def update(self):
"""Get the latest state from the device."""
status = self.command(f"zone{self._zone}.power=query")
if not status:
return
if status[1] == "on":
self._pwstate = STATE_ON
else:
self._pwstate = STATE_OFF
return
volume_raw = self.command(f"zone{self._zone}.volume=query")
mute_raw = self.command(f"zone{self._zone}.muting=query")
current_source_raw = self.command(f"zone{self._zone}.selector=query")
preset_raw = self.command(f"zone{self._zone}.preset=query")
# If we received a source value, but not a volume value
# it's likely this zone permanently does not support volume.
if current_source_raw and not volume_raw:
self._supports_volume = False
if not (volume_raw and mute_raw and current_source_raw):
return
# It's possible for some players to have zones set to HDMI with
# no sound control. In this case, the string `N/A` is returned.
self._supports_volume = isinstance(volume_raw[1], (float, int))
# eiscp can return string or tuple. Make everything tuples.
if isinstance(current_source_raw[1], str):
current_source_tuples = (current_source_raw[0], (current_source_raw[1],))
else:
current_source_tuples = current_source_raw
for source in current_source_tuples[1]:
if source in self._source_mapping:
self._current_source = self._source_mapping[source]
break
self._current_source = "_".join(current_source_tuples[1])
self._muted = bool(mute_raw[1] == "on")
if preset_raw and self._current_source.lower() == "radio":
self._attributes[ATTR_PRESET] = preset_raw[1]
elif ATTR_PRESET in self._attributes:
del self._attributes[ATTR_PRESET]
if self._supports_volume:
# AMP_VOL/MAX_RECEIVER_VOL*(MAX_VOL/100)
self._volume = (
volume_raw[1] / self._receiver_max_volume * (self._max_volume / 100)
)
@property
def supported_features(self):
"""Return media player features that are supported."""
if self._supports_volume:
return SUPPORT_ONKYO
return SUPPORT_ONKYO_WO_VOLUME
def turn_off(self):
"""Turn the media player off."""
self.command(f"zone{self._zone}.power=standby")
def set_volume_level(self, volume):
"""
Set volume level, input is range 0..1.
However full volume on the amp is usually far too loud so allow the user to specify the upper range
with CONF_MAX_VOLUME. we change as per max_volume set by user. This means that if max volume is 80 then full
volume in HA will give 80% volume on the receiver. Then we convert
that to the correct scale for the receiver.
"""
# HA_VOL * (MAX VOL / 100) * MAX_RECEIVER_VOL
self.command(
f"zone{self._zone}.volume={int(volume * (self._max_volume / 100) * self._receiver_max_volume)}"
)
def volume_up(self):
"""Increase volume by 1 step."""
self.command(f"zone{self._zone}.volume=level-up")
def volume_down(self):
"""Decrease volume by 1 step."""
self.command(f"zone{self._zone}.volume=level-down")
def mute_volume(self, mute):
"""Mute (true) or unmute (false) media player."""
if mute:
self.command(f"zone{self._zone}.muting=on")
else:
self.command(f"zone{self._zone}.muting=off")
def turn_on(self):
"""Turn the media player on."""
self.command(f"zone{self._zone}.power=on")
def select_source(self, source):
"""Set the input source."""
if source in self._source_list:
source = self._reverse_mapping[source]
self.command(f"zone{self._zone}.selector={source}")
|
import asyncio
import logging
from types import MappingProxyType
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_NAME,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_PROFILE,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
VALID_STATES = {STATE_ON, STATE_OFF}
ATTR_GROUP = [
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_WHITE_VALUE,
ATTR_TRANSITION,
]
COLOR_GROUP = [
ATTR_HS_COLOR,
ATTR_COLOR_TEMP,
ATTR_RGB_COLOR,
ATTR_XY_COLOR,
# The following color attributes are deprecated
ATTR_PROFILE,
ATTR_COLOR_NAME,
ATTR_KELVIN,
]
DEPRECATED_GROUP = [
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_NAME,
ATTR_FLASH,
ATTR_KELVIN,
ATTR_PROFILE,
ATTR_TRANSITION,
]
DEPRECATION_WARNING = (
"The use of other attributes than device state attributes is deprecated and will be removed in a future release. "
"Invalid attributes are %s. Read the logs for further details: https://www.home-assistant.io/integrations/scene/"
)
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if state.state not in VALID_STATES:
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Warn if deprecated attributes are used
deprecated_attrs = [attr for attr in state.attributes if attr in DEPRECATED_GROUP]
if deprecated_attrs:
_LOGGER.warning(DEPRECATION_WARNING, deprecated_attrs)
# Return if we are already at the right state.
if cur_state.state == state.state and all(
check_attr_equal(cur_state.attributes, state.attributes, attr)
for attr in ATTR_GROUP + COLOR_GROUP
):
return
service_data: Dict[str, Any] = {ATTR_ENTITY_ID: state.entity_id}
if reproduce_options is not None and ATTR_TRANSITION in reproduce_options:
service_data[ATTR_TRANSITION] = reproduce_options[ATTR_TRANSITION]
if state.state == STATE_ON:
service = SERVICE_TURN_ON
for attr in ATTR_GROUP:
# All attributes that are not colors
if attr in state.attributes:
service_data[attr] = state.attributes[attr]
for color_attr in COLOR_GROUP:
# Choose the first color that is specified
if color_attr in state.attributes:
service_data[color_attr] = state.attributes[color_attr]
break
elif state.state == STATE_OFF:
service = SERVICE_TURN_OFF
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Light states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
def check_attr_equal(
attr1: MappingProxyType, attr2: MappingProxyType, attr_str: str
) -> bool:
"""Return true if the given attributes are equal."""
return attr1.get(attr_str) == attr2.get(attr_str)
|
import os.path as op
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_allclose,
assert_equal, assert_array_less)
from scipy import fftpack
from mne import read_events, Epochs, make_fixed_length_events
from mne.io import read_raw_fif
from mne.time_frequency._stockwell import (tfr_stockwell, _st,
_precompute_st_windows,
_check_input_st,
_st_power_itc)
from mne.time_frequency import AverageTFR, tfr_array_stockwell
from mne.utils import run_tests_if_main
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
def test_stockwell_ctf():
"""Test that Stockwell can be calculated on CTF data."""
raw = read_raw_fif(raw_ctf_fname)
raw.apply_gradient_compensation(3)
events = make_fixed_length_events(raw, duration=0.5)
evoked = Epochs(raw, events, tmin=-0.2, tmax=0.3, decim=10,
preload=True, verbose='error').average()
tfr_stockwell(evoked, verbose='error') # smoke test
def test_stockwell_check_input():
"""Test input checker for stockwell."""
# check for data size equal and unequal to a power of 2
for last_dim in (127, 128):
data = np.zeros((2, 10, last_dim))
with pytest.warns(None): # n_fft sometimes
x_in, n_fft, zero_pad = _check_input_st(data, None)
assert_equal(x_in.shape, (2, 10, 128))
assert_equal(n_fft, 128)
assert_equal(zero_pad, 128 - last_dim)
def test_stockwell_st_no_zero_pad():
"""Test stockwell power itc."""
data = np.zeros((20, 128))
start_f = 1
stop_f = 10
sfreq = 30
width = 2
W = _precompute_st_windows(data.shape[-1], start_f, stop_f, sfreq, width)
_st_power_itc(data, 10, True, 0, 1, W)
def test_stockwell_core():
"""Test stockwell transform."""
# adapted from
# http://vcs.ynic.york.ac.uk/docs/naf/intro/concepts/timefreq.html
sfreq = 1000.0 # make things easy to understand
dur = 0.5
onset, offset = 0.175, 0.275
n_samp = int(sfreq * dur)
t = np.arange(n_samp) / sfreq # make an array for time
pulse_freq = 15.
pulse = np.cos(2. * np.pi * pulse_freq * t)
pulse[0:int(onset * sfreq)] = 0. # Zero before our desired pulse
pulse[int(offset * sfreq):] = 0. # and zero after our desired pulse
width = 0.5
freqs = fftpack.fftfreq(len(pulse), 1. / sfreq)
fmin, fmax = 1.0, 100.0
start_f, stop_f = [np.abs(freqs - f).argmin() for f in (fmin, fmax)]
W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)
st_pulse = _st(pulse, start_f, W)
st_pulse = np.abs(st_pulse) ** 2
assert_equal(st_pulse.shape[-1], len(pulse))
st_max_freq = freqs[st_pulse.max(axis=1).argmax(axis=0)] # max freq
assert_allclose(st_max_freq, pulse_freq, atol=1.0)
assert (onset < t[st_pulse.max(axis=0).argmax(axis=0)] < offset)
# test inversion to FFT, by averaging local spectra, see eq. 5 in
# Moukadem, A., Bouguila, Z., Ould Abdeslam, D. and Alain Dieterlen.
# "Stockwell transform optimization applied on the detection of split in
# heart sounds."
width = 1.0
start_f, stop_f = 0, len(pulse)
W = _precompute_st_windows(n_samp, start_f, stop_f, sfreq, width)
y = _st(pulse, start_f, W)
# invert stockwell
y_inv = fftpack.ifft(np.sum(y, axis=1)).real
assert_array_almost_equal(pulse, y_inv)
def test_stockwell_api():
"""Test stockwell functions."""
raw = read_raw_fif(raw_fname)
event_id, tmin, tmax = 1, -0.2, 0.5
event_name = op.join(base_dir, 'test-eve.fif')
events = read_events(event_name)
epochs = Epochs(raw, events, # XXX pick 2 has epochs of zeros.
event_id, tmin, tmax, picks=[0, 1, 3])
for fmin, fmax in [(None, 50), (5, 50), (5, None)]:
with pytest.warns(RuntimeWarning, match='padding'):
power, itc = tfr_stockwell(epochs, fmin=fmin, fmax=fmax,
return_itc=True)
if fmax is not None:
assert (power.freqs.max() <= fmax)
with pytest.warns(RuntimeWarning, match='padding'):
power_evoked = tfr_stockwell(epochs.average(), fmin=fmin,
fmax=fmax, return_itc=False)
# for multitaper these don't necessarily match, but they seem to
# for stockwell... if this fails, this maybe could be changed
# just to check the shape
assert_array_almost_equal(power_evoked.data, power.data)
assert (isinstance(power, AverageTFR))
assert (isinstance(itc, AverageTFR))
assert_equal(power.data.shape, itc.data.shape)
assert (itc.data.min() >= 0.0)
assert (itc.data.max() <= 1.0)
assert (np.log(power.data.max()) * 20 <= 0.0)
assert (np.log(power.data.max()) * 20 <= 0.0)
with pytest.raises(TypeError, match='ndarray'):
tfr_array_stockwell('foo', 1000.)
data = np.random.RandomState(0).randn(1, 1024)
with pytest.raises(ValueError, match='3D with shape'):
tfr_array_stockwell(data, 1000.)
data = data[np.newaxis]
power, itc, freqs = tfr_array_stockwell(data, 1000., return_itc=True)
assert_allclose(itc, np.ones_like(itc))
assert power.shape == (1, len(freqs), data.shape[-1])
assert_array_less(0, power)
run_tests_if_main()
|
from threading import Thread, Event
import requests
import time
import logging
logger = logging.getLogger(__name__)
class PandoraStatsPoller(Thread):
def __init__(self, port):
super(PandoraStatsPoller, self).__init__()
self._stop_run = Event()
self.buffer = []
self.port = port
def run(self):
last_ts = int(time.time() - 1)
while not self._stop_run.is_set():
curr_ts = int(time.time())
if curr_ts > last_ts:
last_ts = curr_ts
try:
pandora_stat = requests.get(
"http://localhost:{port}/debug/vars".format(port=self.port), timeout=0.9
).json()
data = {
'ts': last_ts - 1,
'metrics': {
'instances': pandora_stat.get("engine_ActiveRequests"),
'reqps': pandora_stat.get("engine_ReqPS"),
}
}
except (requests.ConnectionError, requests.HTTPError, requests.exceptions.Timeout, ValueError):
logger.debug("Pandora expvar http interface is unavailable", exc_info=True)
data = {
'ts': last_ts - 1,
'metrics': {
'instances': 0,
'reqps': 0
}
}
self.buffer.append(data)
else:
time.sleep(0.2)
def stop(self):
self._stop_run.set()
def get_data(self):
result, self.buffer = self.buffer, []
return result
class PandoraStatsReader(object):
# TODO: maybe make stats collection asyncronous
def __init__(self, expvar, port):
self.closed = False
self.expvar = expvar
self.port = port
self.poller = PandoraStatsPoller(port)
self.started = False
def __next__(self):
if not self.expvar:
if self.closed:
raise StopIteration
return [{
'ts': int(time.time() - 1),
'metrics': {
'instances': 0,
'reqps': 0
}
}]
else:
if self.closed:
raise StopIteration()
elif not self.started:
self.poller.start()
self.started = True
return self.poller.get_data()
def close(self):
self.closed = True
self.poller.stop()
self.poller.join()
def __iter__(self):
return self
|
from django.test import TestCase
from zinnia.templating import append_position
from zinnia.templating import loop_template_list
class TemplateTestCase(TestCase):
"""Tests cases for template"""
def test_loop_template_list(self):
template = 'zinnia/template.html'
self.assertEqual(
loop_template_list(
(1, 1), None, None, template, {}),
['zinnia/template-1.html',
'zinnia/template_1.html',
template])
self.assertEqual(
loop_template_list(
(10, 1), None, None, template,
{'default': {10: 'default_template.html'}}),
['default_template.html',
'zinnia/template-10.html',
'zinnia/template_1.html',
template])
self.assertEqual(
loop_template_list(
(10, 1), 'object', 'str', template,
{'default': {10: 'default_template.html'},
'str': {10: 'str_template.html'},
'object': {10: 'object_template.html'},
'str-object': {10: 'str_object_template.html'}}),
['str_object_template.html',
'object_template.html',
'str_template.html',
'default_template.html',
'zinnia/template-10.html',
'zinnia/template_1.html',
template])
def test_append_position(self):
self.assertEqual(
append_position('template.html', 1),
'template1.html')
self.assertEqual(
append_position('template', 1),
'template1')
self.assertEqual(
append_position('/path/template.html', 1),
'/path/template1.html')
self.assertEqual(
append_position('/path/template.html', 1, '-'),
'/path/template-1.html')
|
from .base_classes import Command, Container, Environment
from .package import Package
class Alignat(Environment):
"""Class that represents a aligned equation environment."""
#: Alignat environment cause compile errors when they do not contain items.
#: This is why it is omitted fully if they are empty.
omit_if_empty = True
packages = [Package('amsmath')]
def __init__(self, aligns=2, numbering=True, escape=None):
"""
Parameters
----------
aligns : int
number of alignments
numbering : bool
Whether to number equations
escape : bool
if True, will escape strings
"""
self.aligns = aligns
self.numbering = numbering
self.escape = escape
if not numbering:
self._star_latex_name = True
super().__init__(start_arguments=[str(int(aligns))])
class Math(Container):
"""A class representing a math environment."""
packages = [Package('amsmath')]
content_separator = ' '
def __init__(self, *, inline=False, data=None, escape=None):
r"""
Args
----
data: list
Content of the math container.
inline: bool
If the math should be displayed inline or not.
escape : bool
if True, will escape strings
"""
self.inline = inline
self.escape = escape
super().__init__(data=data)
def dumps(self):
"""Return a LaTeX formatted string representing the object.
Returns
-------
str
"""
if self.inline:
return '$' + self.dumps_content() + '$'
return '\\[%\n' + self.dumps_content() + '%\n\\]'
class VectorName(Command):
"""A class representing a named vector."""
_repr_attributes_mapping = {
'name': 'arguments',
}
def __init__(self, name):
"""
Args
----
name: str
Name of the vector
"""
super().__init__('mathbf', arguments=name)
class Matrix(Environment):
"""A class representing a matrix."""
packages = [Package('amsmath')]
_repr_attributes_mapping = {
'alignment': 'arguments',
}
def __init__(self, matrix, *, mtype='p', alignment=None):
r"""
Args
----
matrix: `numpy.ndarray` instance
The matrix to display
mtype: str
What kind of brackets are used around the matrix. The different
options and their corresponding brackets are:
p = ( ), b = [ ], B = { }, v = \| \|, V = \|\| \|\|
alignment: str
How to align the content of the cells in the matrix. This is ``c``
by default.
References
----------
* https://en.wikibooks.org/wiki/LaTeX/Mathematics#Matrices_and_arrays
"""
import numpy # noqa, Sanity check if numpy is installed
self.matrix = matrix
self.latex_name = mtype + 'matrix'
self._mtype = mtype
if alignment is not None:
self.latex_name += '*'
super().__init__(arguments=alignment)
def dumps_content(self):
"""Return a string representing the matrix in LaTeX syntax.
Returns
-------
str
"""
import numpy as np
string = ''
shape = self.matrix.shape
for (y, x), value in np.ndenumerate(self.matrix):
if x:
string += '&'
string += str(value)
if x == shape[1] - 1 and y != shape[0] - 1:
string += r'\\' + '%\n'
super().dumps_content()
return string
|
from goalzero import exceptions
from homeassistant.components.goalzero.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from . import (
CONF_CONFIG_FLOW,
CONF_DATA,
CONF_HOST,
CONF_NAME,
NAME,
_create_mocked_yeti,
_patch_config_flow_yeti,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
def _flow_next(hass, flow_id):
return next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == flow_id
)
def _patch_setup():
return patch(
"homeassistant.components.goalzero.async_setup_entry",
return_value=True,
)
async def test_flow_user(hass):
"""Test user initialized flow."""
mocked_yeti = await _create_mocked_yeti()
with _patch_config_flow_yeti(mocked_yeti), _patch_setup():
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_USER},
)
result = await hass.config_entries.flow.async_configure(
result["flow_id"],
user_input=CONF_CONFIG_FLOW,
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == NAME
assert result["data"] == CONF_DATA
async def test_flow_user_already_configured(hass):
"""Test user initialized flow with duplicate server."""
entry = MockConfigEntry(
domain=DOMAIN,
data={CONF_HOST: "1.2.3.4", CONF_NAME: "Yeti"},
)
entry.add_to_hass(hass)
service_info = {
"host": "1.2.3.4",
"name": "Yeti",
}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=service_info
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_flow_user_cannot_connect(hass):
"""Test user initialized flow with unreachable server."""
mocked_yeti = await _create_mocked_yeti(True)
with _patch_config_flow_yeti(mocked_yeti) as yetimock:
yetimock.side_effect = exceptions.ConnectError
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONF_CONFIG_FLOW
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "cannot_connect"}
async def test_flow_user_invalid_host(hass):
"""Test user initialized flow with invalid server."""
mocked_yeti = await _create_mocked_yeti(True)
with _patch_config_flow_yeti(mocked_yeti) as yetimock:
yetimock.side_effect = exceptions.InvalidHost
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONF_CONFIG_FLOW
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "invalid_host"}
async def test_flow_user_unknown_error(hass):
"""Test user initialized flow with unreachable server."""
mocked_yeti = await _create_mocked_yeti(True)
with _patch_config_flow_yeti(mocked_yeti) as yetimock:
yetimock.side_effect = Exception
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONF_CONFIG_FLOW
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "unknown"}
|
import os
import subprocess
import threading
import time
import unittest
from absl import flags
import mock
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from tests import pkb_common_test_case
import psutil
FLAGS = flags.FLAGS
class ShouldRunOnInternalIpAddressTestCase(
pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(ShouldRunOnInternalIpAddressTestCase, self).setUp()
self.sending_vm = mock.MagicMock()
self.receiving_vm = mock.MagicMock()
def _RunTest(self, expectation, ip_addresses, is_reachable=True):
FLAGS.ip_addresses = ip_addresses
self.sending_vm.IsReachable.return_value = is_reachable
self.assertEqual(
expectation,
vm_util.ShouldRunOnInternalIpAddress(
self.sending_vm, self.receiving_vm))
def testExternal_Reachable(self):
self._RunTest(False, vm_util.IpAddressSubset.EXTERNAL, True)
def testExternal_Unreachable(self):
self._RunTest(False, vm_util.IpAddressSubset.EXTERNAL, False)
def testInternal_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.INTERNAL, True)
def testInternal_Unreachable(self):
self._RunTest(True, vm_util.IpAddressSubset.INTERNAL, False)
def testBoth_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.BOTH, True)
def testBoth_Unreachable(self):
self._RunTest(True, vm_util.IpAddressSubset.BOTH, False)
def testReachable_Reachable(self):
self._RunTest(True, vm_util.IpAddressSubset.REACHABLE, True)
def testReachable_Unreachable(self):
self._RunTest(
False, vm_util.IpAddressSubset.REACHABLE, False)
def HaveSleepSubprocess():
"""Checks if the current process has a sleep subprocess."""
for child in psutil.Process(os.getpid()).children(recursive=True):
if 'sleep' in child.cmdline():
return True
return False
class WaitUntilSleepTimer(threading.Thread):
"""Timer that waits for a sleep subprocess to appear.
This is intended for specific tests that want to trigger timer
expiry as soon as it detects that a subprocess is executing a
"sleep" command.
It assumes that the test driver is not parallelizing the tests using
this method since that may lead to inconsistent results.
TODO(klausw): If that's an issue, could add a unique fractional part
to the sleep command args to distinguish them.
"""
def __init__(self, interval, function):
threading.Thread.__init__(self)
self.end_time = time.time() + interval
self.function = function
self.finished = threading.Event()
self.have_sleep = threading.Event()
def WaitForSleep():
while not self.finished.is_set():
if HaveSleepSubprocess():
self.have_sleep.set()
break
time.sleep(0) # yield to other Python threads
threading.Thread(target=WaitForSleep).run()
def cancel(self):
self.finished.set()
def run(self):
while time.time() < self.end_time and not self.have_sleep.is_set():
time.sleep(0) # yield to other Python threads
if not self.finished.is_set():
self.function()
self.finished.set()
class IssueCommandTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(IssueCommandTestCase, self).setUp()
FLAGS.time_commands = True
def testTimeoutNotReached(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '0s'])
self.assertEqual(retcode, 0)
@mock.patch('threading.Timer', new=WaitUntilSleepTimer)
def testTimeoutReachedThrows(self):
with self.assertRaises(errors.VmUtil.IssueCommandTimeoutError):
_, _, _ = vm_util.IssueCommand(['sleep', '2s'], timeout=1,
raise_on_failure=False)
self.assertFalse(HaveSleepSubprocess())
@mock.patch('threading.Timer', new=WaitUntilSleepTimer)
def testTimeoutReached(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '2s'], timeout=1,
raise_on_failure=False,
raise_on_timeout=False)
self.assertEqual(retcode, -9)
self.assertFalse(HaveSleepSubprocess())
def testNoTimeout(self):
_, _, retcode = vm_util.IssueCommand(['sleep', '0s'], timeout=None)
self.assertEqual(retcode, 0)
def testNoTimeout_ExceptionRaised(self):
with mock.patch('subprocess.Popen', spec=subprocess.Popen) as mock_popen:
mock_popen.return_value.wait.side_effect = KeyboardInterrupt()
with self.assertRaises(KeyboardInterrupt):
vm_util.IssueCommand(['sleep', '2s'], timeout=None)
self.assertFalse(HaveSleepSubprocess())
def testRaiseOnFailureSuppressed_NoException(self):
def _SuppressFailure(stdout, stderr, retcode):
del stdout # unused
del stderr # unused
self.assertNotEqual(
retcode, 0,
'_SuppressFailure should not have been called for retcode=0.')
return True
stdout, stderr, retcode = vm_util.IssueCommand(
['cat', 'non_existent_file'],
suppress_failure=_SuppressFailure)
# Ideally our command would produce stdout that we could verify is preserved
# but that's hard with the way IssueCommand creates local files for getting
# results subprocess.Popen().
self.assertEqual(stdout, '')
# suppressed from
# cat: non_existent_file: No such file or directory
self.assertEqual(stderr, '')
# suppressed from 1
self.assertEqual(retcode, 0)
def testRaiseOnFailureUnsuppressed_ExceptionRaised(self):
def _DoNotSuppressFailure(stdout, stderr, retcode):
del stdout # unused
del stderr # unused
self.assertNotEqual(
retcode, 0,
'_DoNotSuppressFailure should not have been called for retcode=0.')
return False
with self.assertRaises(errors.VmUtil.IssueCommandError) as cm:
vm_util.IssueCommand(['cat', 'non_existent_file'],
raise_on_failure=True,
suppress_failure=_DoNotSuppressFailure)
self.assertIn('cat: non_existent_file: No such file or directory',
str(cm.exception))
def testRaiseOnFailureWithNoSuppression_ExceptionRaised(self):
with self.assertRaises(errors.VmUtil.IssueCommandError) as cm:
vm_util.IssueCommand(['cat', 'non_existent_file'],
raise_on_failure=True,
suppress_failure=None)
self.assertIn('cat: non_existent_file: No such file or directory',
str(cm.exception))
class VmUtilTest(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(VmUtilTest, self).setUp()
self.mock_vm = mock.Mock()
def testReplaceTextUsesCorrectCommand(self):
"""Test of vm_util.ReplaceText()."""
vm_util.ReplaceText(
self.mock_vm, 'current', 'new', 'test_file', regex_char='|')
self.mock_vm.RemoteCommand.assert_called_with(
'sed -i -r "s|current|new|" test_file')
def testDictionaryToEnvString(self):
self.assertEqual('', vm_util.DictionaryToEnvString({}))
test_dict = {'a': 'b', 'c': 'd'}
self.assertEqual('a=b c=d', vm_util.DictionaryToEnvString(test_dict))
self.assertEqual('a=b;c=d', vm_util.DictionaryToEnvString(test_dict, ';'))
if __name__ == '__main__':
unittest.main()
|
from html import escape
from aiohttp import web
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import HTTP_OK
import homeassistant.helpers.config_validation as cv
CONTENT_TYPE_XML = "text/xml"
DOMAIN = "rss_feed_template"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
cv.match_all: vol.Schema(
{
vol.Optional("requires_api_password", default=True): cv.boolean,
vol.Optional("title"): cv.template,
vol.Required("items"): vol.All(
cv.ensure_list,
[
{
vol.Optional("title"): cv.template,
vol.Optional("description"): cv.template,
}
],
),
}
)
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the RSS feed template component."""
for (feeduri, feedconfig) in config[DOMAIN].items():
url = "/api/rss_template/%s" % feeduri
requires_auth = feedconfig.get("requires_api_password")
title = feedconfig.get("title")
if title is not None:
title.hass = hass
items = feedconfig.get("items")
for item in items:
if "title" in item:
item["title"].hass = hass
if "description" in item:
item["description"].hass = hass
rss_view = RssView(url, requires_auth, title, items)
hass.http.register_view(rss_view)
return True
class RssView(HomeAssistantView):
"""Export states and other values as RSS."""
requires_auth = True
url = None
name = "rss_template"
_title = None
_items = None
def __init__(self, url, requires_auth, title, items):
"""Initialize the rss view."""
self.url = url
self.requires_auth = requires_auth
self._title = title
self._items = items
async def get(self, request, entity_id=None):
"""Generate the RSS view XML."""
response = '<?xml version="1.0" encoding="utf-8"?>\n\n'
response += "<rss>\n"
if self._title is not None:
response += " <title>%s</title>\n" % escape(
self._title.async_render(parse_result=False)
)
for item in self._items:
response += " <item>\n"
if "title" in item:
response += " <title>"
response += escape(item["title"].async_render(parse_result=False))
response += "</title>\n"
if "description" in item:
response += " <description>"
response += escape(item["description"].async_render(parse_result=False))
response += "</description>\n"
response += " </item>\n"
response += "</rss>\n"
return web.Response(
body=response, content_type=CONTENT_TYPE_XML, status=HTTP_OK
)
|
from __future__ import absolute_import, division
import copy
import warnings
import numpy as np
from numpy import dot, zeros, eye
import scipy.linalg as linalg
from filterpy.common import pretty_str
class HInfinityFilter(object):
"""
H-Infinity filter. You are responsible for setting the
various state variables to reasonable values; the defaults below will
not give you a functional filter.
Parameters
----------
dim_x : int
Number of state variables for the Kalman filter. For example, if
you are tracking the position and velocity of an object in two
dimensions, dim_x would be 4.
This is used to set the default size of `P`, `Q`, and `u`
dim_z : int
Number of of measurement inputs. For example, if the sensor
provides you with position in (x, y), `dim_z` would be 2.
dim_u : int
Number of control inputs for the Gu part of the prediction step.
gamma : float
.. warning::
I do not believe this code is correct. DO NOT USE THIS.
In particular, note that predict does not update the covariance
matrix.
"""
def __init__(self, dim_x, dim_z, dim_u, gamma):
warnings.warn("This code is likely incorrect. DO NOT USE.", DeprecationWarning)
self.dim_x = dim_x
self.dim_z = dim_z
self.dim_u = dim_u
self.gamma = gamma
self.x = zeros((dim_x, 1)) # state
self.B = 0 # control transition matrix
self.F = eye(dim_x) # state transition matrix
self.H = zeros((dim_z, dim_x)) # Measurement function
self.P = eye(dim_x) # Uncertainty covariance.
self.Q = eye(dim_x)
self._V_inv = zeros((dim_z, dim_z)) # inverse measurement noise
self._V = zeros((dim_z, dim_z)) # measurement noise
self.W = zeros((dim_x, dim_x)) # process uncertainty
# gain and residual are computed during the innovation step. We
# save them so that in case you want to inspect them for various
# purposes
self.K = 0 # H-infinity gain
self.y = zeros((dim_z, 1))
self.z = zeros((dim_z, 1))
# identity matrix. Do not alter this.
self._I = np.eye(dim_x)
def update(self, z):
"""
Add a new measurement `z` to the H-Infinity filter. If `z` is None,
nothing is changed.
Parameters
----------
z : ndarray
measurement for this update.
"""
if z is None:
return
# rename for readability and a tiny extra bit of speed
I = self._I
gamma = self.gamma
Q = self.Q
H = self.H
P = self.P
x = self.x
V_inv = self._V_inv
F = self.F
W = self.W
# common subexpression H.T * V^-1
HTVI = dot(H.T, V_inv)
L = linalg.inv(I - gamma * dot(Q, P) + dot(HTVI, H).dot(P))
# common subexpression P*L
PL = dot(P, L)
K = dot(F, PL).dot(HTVI)
self.y = z - dot(H, x)
# x = x + Ky
# predict new x with residual scaled by the H-Infinity gain
self.x = self.x + dot(K, self.y)
self.P = dot(F, PL).dot(F.T) + W
# force P to be symmetric
self.P = (self.P + self.P.T) / 2
# pylint: disable=bare-except
try:
self.z = np.copy(z)
except:
self.z = copy.deepcopy(z)
def predict(self, u=0):
"""
Predict next position.
Parameters
----------
u : ndarray
Optional control vector. If non-zero, it is multiplied by `B`
to create the control input into the system.
"""
# x = Fx + Bu
self.x = dot(self.F, self.x) + dot(self.B, u)
def batch_filter(self, Zs,update_first=False, saver=False):
""" Batch processes a sequences of measurements.
Parameters
----------
Zs : list-like
list of measurements at each time step `self.dt` Missing
measurements must be represented by 'None'.
update_first : bool, default=False, optional,
controls whether the order of operations is update followed by
predict, or predict followed by update.
saver : filterpy.common.Saver, optional
filterpy.common.Saver object. If provided, saver.save() will be
called after every epoch
Returns
-------
means: ndarray ((n, dim_x, 1))
array of the state for each time step. Each entry is an np.array.
In other words `means[k,:]` is the state at step `k`.
covariance: ndarray((n, dim_x, dim_x))
array of the covariances for each time step. In other words
`covariance[k, :, :]` is the covariance at step `k`.
"""
n = np.size(Zs, 0)
# mean estimates from H-Infinity Filter
means = zeros((n, self.dim_x, 1))
# state covariances from H-Infinity Filter
covariances = zeros((n, self.dim_x, self.dim_x))
if update_first:
for i, z in enumerate(Zs):
self.update(z)
means[i, :] = self.x
covariances[i, :, :] = self.P
self.predict()
if saver is not None:
saver.save()
else:
for i, z in enumerate(Zs):
self.predict()
self.update(z)
means[i, :] = self.x
covariances[i, :, :] = self.P
if saver is not None:
saver.save()
return (means, covariances)
def get_prediction(self, u=0):
""" Predicts the next state of the filter and returns it. Does not
alter the state of the filter.
Parameters
----------
u : ndarray
optional control input
Returns
-------
x : ndarray
State vector of the prediction.
"""
return dot(self.F, self.x) + dot(self.B, u)
def residual_of(self, z):
""" returns the residual for the given measurement (z). Does not alter
the state of the filter.
"""
return z - dot(self.H, self.x)
def measurement_of_state(self, x):
""" Helper function that converts a state into a measurement.
Parameters
----------
x : ndarray
H-Infinity state vector
Returns
-------
z : ndarray
measurement corresponding to the given state
"""
return dot(self.H, x)
@property
def V(self):
""" measurement noise matrix"""
return self._V
@V.setter
def V(self, value):
""" measurement noise matrix"""
if np.isscalar(value):
self._V = np.array([[value]], dtype=float)
else:
self._V = value
self._V_inv = linalg.inv(self._V)
def __repr__(self):
return '\n'.join([
'HInfinityFilter object',
pretty_str('dim_x', self.dim_x),
pretty_str('dim_z', self.dim_z),
pretty_str('dim_u', self.dim_u),
pretty_str('gamma', self.dim_u),
pretty_str('x', self.x),
pretty_str('P', self.P),
pretty_str('F', self.F),
pretty_str('Q', self.Q),
pretty_str('V', self.V),
pretty_str('W', self.W),
pretty_str('K', self.K),
pretty_str('y', self.y),
])
|
from homeassistant.components.group import GroupIntegrationRegistry
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_TRIGGERED,
STATE_OFF,
)
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
@callback
def async_describe_on_off_states(
hass: HomeAssistantType, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
registry.on_off_states(
{
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_TRIGGERED,
},
STATE_OFF,
)
|
import copy
from datetime import datetime, timedelta
import logging
import re
import caldav
import voluptuous as vol
from homeassistant.components.calendar import (
ENTITY_ID_FORMAT,
PLATFORM_SCHEMA,
CalendarEventDevice,
calculate_offset,
get_date,
is_offset_reached,
)
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_URL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.util import Throttle, dt
_LOGGER = logging.getLogger(__name__)
CONF_CALENDARS = "calendars"
CONF_CUSTOM_CALENDARS = "custom_calendars"
CONF_CALENDAR = "calendar"
CONF_SEARCH = "search"
CONF_DAYS = "days"
OFFSET = "!!"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
# pylint: disable=no-value-for-parameter
vol.Required(CONF_URL): vol.Url(),
vol.Optional(CONF_CALENDARS, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Inclusive(CONF_USERNAME, "authentication"): cv.string,
vol.Inclusive(CONF_PASSWORD, "authentication"): cv.string,
vol.Optional(CONF_CUSTOM_CALENDARS, default=[]): vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_CALENDAR): cv.string,
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SEARCH): cv.string,
}
)
],
),
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_DAYS, default=1): cv.positive_int,
}
)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
def setup_platform(hass, config, add_entities, disc_info=None):
"""Set up the WebDav Calendar platform."""
url = config[CONF_URL]
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
days = config[CONF_DAYS]
client = caldav.DAVClient(
url, None, username, password, ssl_verify_cert=config[CONF_VERIFY_SSL]
)
calendars = client.principal().calendars()
calendar_devices = []
for calendar in list(calendars):
# If a calendar name was given in the configuration,
# ignore all the others
if config[CONF_CALENDARS] and calendar.name not in config[CONF_CALENDARS]:
_LOGGER.debug("Ignoring calendar '%s'", calendar.name)
continue
# Create additional calendars based on custom filtering rules
for cust_calendar in config[CONF_CUSTOM_CALENDARS]:
# Check that the base calendar matches
if cust_calendar[CONF_CALENDAR] != calendar.name:
continue
name = cust_calendar[CONF_NAME]
device_id = f"{cust_calendar[CONF_CALENDAR]} {cust_calendar[CONF_NAME]}"
entity_id = generate_entity_id(ENTITY_ID_FORMAT, device_id, hass=hass)
calendar_devices.append(
WebDavCalendarEventDevice(
name, calendar, entity_id, days, True, cust_calendar[CONF_SEARCH]
)
)
# Create a default calendar if there was no custom one
if not config[CONF_CUSTOM_CALENDARS]:
name = calendar.name
device_id = calendar.name
entity_id = generate_entity_id(ENTITY_ID_FORMAT, device_id, hass=hass)
calendar_devices.append(
WebDavCalendarEventDevice(name, calendar, entity_id, days)
)
add_entities(calendar_devices, True)
class WebDavCalendarEventDevice(CalendarEventDevice):
"""A device for getting the next Task from a WebDav Calendar."""
def __init__(self, name, calendar, entity_id, days, all_day=False, search=None):
"""Create the WebDav Calendar Event Device."""
self.data = WebDavCalendarData(calendar, days, all_day, search)
self.entity_id = entity_id
self._event = None
self._name = name
self._offset_reached = False
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return {"offset_reached": self._offset_reached}
@property
def event(self):
"""Return the next upcoming event."""
return self._event
@property
def name(self):
"""Return the name of the entity."""
return self._name
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
return await self.data.async_get_events(hass, start_date, end_date)
def update(self):
"""Update event data."""
self.data.update()
event = copy.deepcopy(self.data.event)
if event is None:
self._event = event
return
event = calculate_offset(event, OFFSET)
self._offset_reached = is_offset_reached(event)
self._event = event
class WebDavCalendarData:
"""Class to utilize the calendar dav client object to get next event."""
def __init__(self, calendar, days, include_all_day, search):
"""Set up how we are going to search the WebDav calendar."""
self.calendar = calendar
self.days = days
self.include_all_day = include_all_day
self.search = search
self.event = None
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
# Get event list from the current calendar
vevent_list = await hass.async_add_executor_job(
self.calendar.date_search, start_date, end_date
)
event_list = []
for event in vevent_list:
vevent = event.instance.vevent
uid = None
if hasattr(vevent, "uid"):
uid = vevent.uid.value
data = {
"uid": uid,
"summary": vevent.summary.value,
"start": self.get_hass_date(vevent.dtstart.value),
"end": self.get_hass_date(self.get_end_date(vevent)),
"location": self.get_attr_value(vevent, "location"),
"description": self.get_attr_value(vevent, "description"),
}
data["start"] = get_date(data["start"]).isoformat()
data["end"] = get_date(data["end"]).isoformat()
event_list.append(data)
return event_list
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data."""
start_of_today = dt.start_of_local_day()
start_of_tomorrow = dt.start_of_local_day() + timedelta(days=self.days)
# We have to retrieve the results for the whole day as the server
# won't return events that have already started
results = self.calendar.date_search(start_of_today, start_of_tomorrow)
# Create new events for each recurrence of an event that happens today.
# For recurring events, some servers return the original event with recurrence rules
# and they would not be properly parsed using their original start/end dates.
new_events = []
for event in results:
vevent = event.instance.vevent
for start_dt in vevent.getrruleset() or []:
_start_of_today = start_of_today
_start_of_tomorrow = start_of_tomorrow
if self.is_all_day(vevent):
start_dt = start_dt.date()
_start_of_today = _start_of_today.date()
_start_of_tomorrow = _start_of_tomorrow.date()
if _start_of_today <= start_dt < _start_of_tomorrow:
new_event = event.copy()
new_vevent = new_event.instance.vevent
if hasattr(new_vevent, "dtend"):
dur = new_vevent.dtend.value - new_vevent.dtstart.value
new_vevent.dtend.value = start_dt + dur
new_vevent.dtstart.value = start_dt
new_events.append(new_event)
elif _start_of_tomorrow <= start_dt:
break
vevents = [event.instance.vevent for event in results + new_events]
# dtstart can be a date or datetime depending if the event lasts a
# whole day. Convert everything to datetime to be able to sort it
vevents.sort(key=lambda x: self.to_datetime(x.dtstart.value))
vevent = next(
(
vevent
for vevent in vevents
if (
self.is_matching(vevent, self.search)
and (not self.is_all_day(vevent) or self.include_all_day)
and not self.is_over(vevent)
)
),
None,
)
# If no matching event could be found
if vevent is None:
_LOGGER.debug(
"No matching event found in the %d results for %s",
len(vevents),
self.calendar.name,
)
self.event = None
return
# Populate the entity attributes with the event values
self.event = {
"summary": vevent.summary.value,
"start": self.get_hass_date(vevent.dtstart.value),
"end": self.get_hass_date(self.get_end_date(vevent)),
"location": self.get_attr_value(vevent, "location"),
"description": self.get_attr_value(vevent, "description"),
}
@staticmethod
def is_matching(vevent, search):
"""Return if the event matches the filter criteria."""
if search is None:
return True
pattern = re.compile(search)
return (
hasattr(vevent, "summary")
and pattern.match(vevent.summary.value)
or hasattr(vevent, "location")
and pattern.match(vevent.location.value)
or hasattr(vevent, "description")
and pattern.match(vevent.description.value)
)
@staticmethod
def is_all_day(vevent):
"""Return if the event last the whole day."""
return not isinstance(vevent.dtstart.value, datetime)
@staticmethod
def is_over(vevent):
"""Return if the event is over."""
return dt.now() >= WebDavCalendarData.to_datetime(
WebDavCalendarData.get_end_date(vevent)
)
@staticmethod
def get_hass_date(obj):
"""Return if the event matches."""
if isinstance(obj, datetime):
return {"dateTime": obj.isoformat()}
return {"date": obj.isoformat()}
@staticmethod
def to_datetime(obj):
"""Return a datetime."""
if isinstance(obj, datetime):
if obj.tzinfo is None:
# floating value, not bound to any time zone in particular
# represent same time regardless of which time zone is currently being observed
return obj.replace(tzinfo=dt.DEFAULT_TIME_ZONE)
return obj
return dt.as_local(dt.dt.datetime.combine(obj, dt.dt.time.min))
@staticmethod
def get_attr_value(obj, attribute):
"""Return the value of the attribute if defined."""
if hasattr(obj, attribute):
return getattr(obj, attribute).value
return None
@staticmethod
def get_end_date(obj):
"""Return the end datetime as determined by dtend or duration."""
if hasattr(obj, "dtend"):
enddate = obj.dtend.value
elif hasattr(obj, "duration"):
enddate = obj.dtstart.value + obj.duration.value
else:
enddate = obj.dtstart.value + timedelta(days=1)
return enddate
|
import errno
import voluptuous as vol
from wiffi import WiffiTcpServer
from homeassistant import config_entries
from homeassistant.const import CONF_PORT, CONF_TIMEOUT
from homeassistant.core import callback
from .const import ( # pylint: disable=unused-import
DEFAULT_PORT,
DEFAULT_TIMEOUT,
DOMAIN,
)
class WiffiFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Wiffi server setup config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Create Wiffi server setup option flow."""
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow.
Called after wiffi integration has been selected in the 'add integration
UI'. The user_input is set to None in this case. We will open a config
flow form then.
This function is also called if the form has been submitted. user_input
contains a dict with the user entered values then.
"""
if user_input is None:
return self._async_show_form()
# received input from form or configuration.yaml
try:
# try to start server to check whether port is in use
server = WiffiTcpServer(user_input[CONF_PORT])
await server.start_server()
await server.close_server()
return self.async_create_entry(
title=f"Port {user_input[CONF_PORT]}", data=user_input
)
except OSError as exc:
if exc.errno == errno.EADDRINUSE:
return self.async_abort(reason="addr_in_use")
return self.async_abort(reason="start_server_failed")
@callback
def _async_show_form(self, errors=None):
"""Show the config flow form to the user."""
data_schema = {vol.Required(CONF_PORT, default=DEFAULT_PORT): int}
return self.async_show_form(
step_id="user", data_schema=vol.Schema(data_schema), errors=errors or {}
)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Wiffi server setup option flow."""
def __init__(self, config_entry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(
CONF_TIMEOUT,
default=self.config_entry.options.get(
CONF_TIMEOUT, DEFAULT_TIMEOUT
),
): int,
}
),
)
|
from __future__ import division
import unittest
import numpy as np
import PIL.Image
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.transforms import resize
from chainercv.utils import generate_random_bbox
from chainercv.utils import mask_to_bbox
from chainercv.utils import scale_mask
@testing.parameterize(
{'mask': np.array(
[[[False, False],
[False, True]]]),
'expected': np.array(
[[[False, False, False, False],
[False, False, False, False],
[False, False, True, True],
[False, False, True, True]]])
}
)
class TestScaleMaskSimple(unittest.TestCase):
def check(self, mask, expected):
in_type = type(mask)
bbox = mask_to_bbox(mask)
size = 4
out_mask = scale_mask(mask, bbox, size)
self.assertIsInstance(out_mask, in_type)
self.assertEqual(out_mask.dtype, np.bool)
np.testing.assert_equal(
cuda.to_cpu(out_mask),
cuda.to_cpu(expected))
def test_scale_mask_simple_cpu(self):
self.check(self.mask, self.expected)
@attr.gpu
def test_scale_mask_simple_gpu(self):
self.check(cuda.to_gpu(self.mask), cuda.to_gpu(self.expected))
class TestScaleMaskCompareResize(unittest.TestCase):
def test(self):
H = 80
W = 90
n_inst = 10
mask = np.zeros((n_inst, H, W), dtype=np.bool)
bbox = generate_random_bbox(n_inst, (H, W), 10, 30).astype(np.int32)
for i, bb in enumerate(bbox):
y_min, x_min, y_max, x_max = bb
m = np.random.randint(0, 2, size=(y_max - y_min, x_max - x_min))
m[5, 5] = 1 # At least one element is one
mask[i, y_min:y_max, x_min:x_max] = m
bbox = mask_to_bbox(mask)
size = H * 2
out_H = size
out_W = W * 2
out_mask = scale_mask(mask, bbox, size)
expected = resize(
mask.astype(np.float32), (out_H, out_W),
interpolation=PIL.Image.NEAREST).astype(np.bool)
np.testing.assert_equal(out_mask, expected)
testing.run_module(__name__, __file__)
|
import numpy as np
import unittest
import chainer
from chainer.backends import cuda
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.faster_rcnn import RegionProposalNetwork
@testing.parameterize(*(testing.product({
'B': [1],
'train': [True, False],
'scales': [None, 1.0, 2.0, [1.0]],
}) + testing.product({
'B': [2],
'train': [True, False],
'scales': [None, 1.0, 2.0, [1.0, 2.0]],
})))
class TestRegionProposalNetwork(unittest.TestCase):
def setUp(self):
feat_stride = 4
C = 16
H = 8
W = 12
self.proposal_creator_params = {
'n_train_post_nms': 10,
'n_test_post_nms': 5}
self.ratios = [0.25, 4]
self.anchor_scales = [2, 4]
self.link = RegionProposalNetwork(
in_channels=C, mid_channels=24,
ratios=self.ratios, anchor_scales=self.anchor_scales,
feat_stride=feat_stride,
proposal_creator_params=self.proposal_creator_params
)
self.x = np.random.uniform(size=(self.B, C, H, W)).astype(np.float32)
self.img_size = (H * feat_stride, W * feat_stride)
def _check_call(self, x, img_size, scales):
_, _, H, W = x.shape
with chainer.using_config('train', self.train):
rpn_locs, rpn_scores, rois, roi_indices, anchor = self.link(
chainer.Variable(x), img_size, scales)
self.assertIsInstance(rpn_locs, chainer.Variable)
self.assertIsInstance(rpn_locs.array, type(x))
self.assertIsInstance(rpn_scores, chainer.Variable)
self.assertIsInstance(rpn_scores.array, type(x))
A = len(self.ratios) * len(self.anchor_scales)
self.assertEqual(rpn_locs.shape, (self.B, H * W * A, 4))
self.assertEqual(rpn_scores.shape, (self.B, H * W * A, 2))
if self.train:
roi_size = self.proposal_creator_params[
'n_train_post_nms']
else:
roi_size = self.proposal_creator_params[
'n_test_post_nms']
self.assertIsInstance(rois, type(x))
self.assertIsInstance(roi_indices, type(x))
self.assertLessEqual(rois.shape[0], self.B * roi_size)
self.assertLessEqual(roi_indices.shape[0], self.B * roi_size)
# Depending randomly generated bounding boxes, this is not true.
if roi_indices.shape[0] == self.B * roi_size:
for i in range(self.B):
s = slice(i * roi_size, (i + 1) * roi_size)
np.testing.assert_equal(
cuda.to_cpu(roi_indices[s]),
i * np.ones((roi_size,), dtype=np.int32))
self.assertIsInstance(anchor, type(x))
self.assertEqual(anchor.shape, (A * H * W, 4))
def test_call_cpu(self):
self._check_call(self.x, self.img_size, self.scales)
@attr.gpu
def test_call_gpu(self):
self.link.to_gpu()
self._check_call(
chainer.backends.cuda.to_gpu(self.x), self.img_size, self.scales)
testing.run_module(__name__, __file__)
|
from django.conf import settings
from django.db import models
class ContributorAgreementManager(models.Manager):
def has_agreed(self, user, component):
cache_key = (user.pk, component.pk)
if cache_key not in user.cla_cache:
user.cla_cache[cache_key] = self.filter(
component=component, user=user
).exists()
return user.cla_cache[cache_key]
def create(self, user, component, **kwargs):
user.cla_cache[(user.pk, component.pk)] = True
return super().create(user=user, component=component, **kwargs)
def order(self):
return self.order_by("component__project__name", "component__name")
class ContributorAgreement(models.Model):
user = models.ForeignKey(
settings.AUTH_USER_MODEL, on_delete=models.deletion.CASCADE
)
component = models.ForeignKey("Component", on_delete=models.deletion.CASCADE)
timestamp = models.DateTimeField(auto_now=True)
objects = ContributorAgreementManager()
class Meta:
unique_together = [("user", "component")]
verbose_name = "contributor agreement"
verbose_name_plural = "contributor agreements"
def __str__(self):
return f"{self.user.username}:{self.component}"
|
from datetime import timedelta
import logging
from pyaftership.tracker import Tracking
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_API_KEY, CONF_NAME, HTTP_OK
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Information provided by AfterShip"
ATTR_TRACKINGS = "trackings"
BASE = "https://track.aftership.com/"
CONF_SLUG = "slug"
CONF_TITLE = "title"
CONF_TRACKING_NUMBER = "tracking_number"
DEFAULT_NAME = "aftership"
UPDATE_TOPIC = f"{DOMAIN}_update"
ICON = "mdi:package-variant-closed"
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
SERVICE_ADD_TRACKING = "add_tracking"
SERVICE_REMOVE_TRACKING = "remove_tracking"
ADD_TRACKING_SERVICE_SCHEMA = vol.Schema(
{
vol.Required(CONF_TRACKING_NUMBER): cv.string,
vol.Optional(CONF_TITLE): cv.string,
vol.Optional(CONF_SLUG): cv.string,
}
)
REMOVE_TRACKING_SERVICE_SCHEMA = vol.Schema(
{vol.Required(CONF_SLUG): cv.string, vol.Required(CONF_TRACKING_NUMBER): cv.string}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the AfterShip sensor platform."""
apikey = config[CONF_API_KEY]
name = config[CONF_NAME]
session = async_get_clientsession(hass)
aftership = Tracking(hass.loop, session, apikey)
await aftership.get_trackings()
if not aftership.meta or aftership.meta["code"] != HTTP_OK:
_LOGGER.error(
"No tracking data found. Check API key is correct: %s", aftership.meta
)
return
instance = AfterShipSensor(aftership, name)
async_add_entities([instance], True)
async def handle_add_tracking(call):
"""Call when a user adds a new Aftership tracking from Home Assistant."""
title = call.data.get(CONF_TITLE)
slug = call.data.get(CONF_SLUG)
tracking_number = call.data[CONF_TRACKING_NUMBER]
await aftership.add_package_tracking(tracking_number, title, slug)
async_dispatcher_send(hass, UPDATE_TOPIC)
hass.services.async_register(
DOMAIN,
SERVICE_ADD_TRACKING,
handle_add_tracking,
schema=ADD_TRACKING_SERVICE_SCHEMA,
)
async def handle_remove_tracking(call):
"""Call when a user removes an Aftership tracking from Home Assistant."""
slug = call.data[CONF_SLUG]
tracking_number = call.data[CONF_TRACKING_NUMBER]
await aftership.remove_package_tracking(slug, tracking_number)
async_dispatcher_send(hass, UPDATE_TOPIC)
hass.services.async_register(
DOMAIN,
SERVICE_REMOVE_TRACKING,
handle_remove_tracking,
schema=REMOVE_TRACKING_SERVICE_SCHEMA,
)
class AfterShipSensor(Entity):
"""Representation of a AfterShip sensor."""
def __init__(self, aftership, name):
"""Initialize the sensor."""
self._attributes = {}
self._name = name
self._state = None
self.aftership = aftership
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return "packages"
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
self.hass.helpers.dispatcher.async_dispatcher_connect(
UPDATE_TOPIC, self._force_update
)
)
async def _force_update(self):
"""Force update of data."""
await self.async_update(no_throttle=True)
self.async_write_ha_state()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self, **kwargs):
"""Get the latest data from the AfterShip API."""
await self.aftership.get_trackings()
if not self.aftership.meta:
_LOGGER.error("Unknown errors when querying")
return
if self.aftership.meta["code"] != HTTP_OK:
_LOGGER.error(
"Errors when querying AfterShip. %s", str(self.aftership.meta)
)
return
status_to_ignore = {"delivered"}
status_counts = {}
trackings = []
not_delivered_count = 0
for track in self.aftership.trackings["trackings"]:
status = track["tag"].lower()
name = (
track["tracking_number"] if track["title"] is None else track["title"]
)
last_checkpoint = (
"Shipment pending"
if track["tag"] == "Pending"
else track["checkpoints"][-1]
)
status_counts[status] = status_counts.get(status, 0) + 1
trackings.append(
{
"name": name,
"tracking_number": track["tracking_number"],
"slug": track["slug"],
"link": f"{BASE}{track['slug']}/{track['tracking_number']}",
"last_update": track["updated_at"],
"expected_delivery": track["expected_delivery"],
"status": track["tag"],
"last_checkpoint": last_checkpoint,
}
)
if status not in status_to_ignore:
not_delivered_count += 1
else:
_LOGGER.debug("Ignoring %s as it has status: %s", name, status)
self._attributes = {
ATTR_ATTRIBUTION: ATTRIBUTION,
**status_counts,
ATTR_TRACKINGS: trackings,
}
self._state = not_delivered_count
|
from ast import literal_eval
from nikola.plugin_categories import ShortcodePlugin
from nikola.utils import req_missing, load_data
try:
import pygal
except ImportError:
pygal = None
_site = None
class ChartShortcode(ShortcodePlugin):
"""Plugin for chart shortcode."""
name = "chart"
def handler(self, chart_type, **_options):
"""Generate chart using Pygal."""
if pygal is None:
msg = req_missing(
['pygal'], 'use the Chart directive', optional=True)
return '<div class="text-error">{0}</div>'.format(msg)
options = {}
chart_data = []
_options.pop('post', None)
_options.pop('site')
data = _options.pop('data')
for line in data.splitlines():
line = line.strip()
if line:
chart_data.append(literal_eval('({0})'.format(line)))
if 'data_file' in _options:
options = load_data(_options['data_file'])
_options.pop('data_file')
if not chart_data: # If there is data in the document, it wins
for k, v in options.pop('data', {}).items():
chart_data.append((k, v))
options.update(_options)
style_name = options.pop('style', 'BlueStyle')
if '(' in style_name: # Parametric style
style = eval('pygal.style.' + style_name)
else:
style = getattr(pygal.style, style_name)
for k, v in options.items():
try:
options[k] = literal_eval(v)
except Exception:
options[k] = v
chart = pygal
for o in chart_type.split('.'):
chart = getattr(chart, o)
chart = chart(style=style)
if _site and _site.invariant:
chart.no_prefix = True
chart.config(**options)
for label, series in chart_data:
chart.add(label, series)
return chart.render().decode('utf8')
|
import pytest
import voluptuous as vol
from homeassistant.components.ecobee.util import ecobee_date, ecobee_time
def test_ecobee_date_with_valid_input():
"""Test that the date function returns the expected result."""
test_input = "2019-09-27"
assert ecobee_date(test_input) == test_input
def test_ecobee_date_with_invalid_input():
"""Test that the date function raises the expected exception."""
test_input = "20190927"
with pytest.raises(vol.Invalid):
ecobee_date(test_input)
def test_ecobee_time_with_valid_input():
"""Test that the time function returns the expected result."""
test_input = "20:55:15"
assert ecobee_time(test_input) == test_input
def test_ecobee_time_with_invalid_input():
"""Test that the time function raises the expected exception."""
test_input = "20:55"
with pytest.raises(vol.Invalid):
ecobee_time(test_input)
|
import copy
import datetime
import tempfile
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker.dpb_service import BaseDpbService
from perfkitbenchmarker.providers.aws import aws_dpb_emr
from perfkitbenchmarker.providers.gcp import gcp_dpb_dataflow
from perfkitbenchmarker.providers.gcp import gcp_dpb_dataproc
BENCHMARK_NAME = 'dpb_wordcount_benchmark'
BENCHMARK_CONFIG = """
dpb_wordcount_benchmark:
description: Run word count on dataflow and dataproc
dpb_service:
service_type: dataproc
worker_group:
vm_spec:
GCP:
machine_type: n1-standard-1
boot_disk_size: 500
AWS:
machine_type: m3.medium
disk_spec:
GCP:
disk_type: nodisk
AWS:
disk_size: 500
disk_type: gp2
worker_count: 2
"""
WORD_COUNT_CONFIGURATION = dict(
[
(dpb_service.DATAPROC, (gcp_dpb_dataproc.SPARK_SAMPLE_LOCATION,
'org.apache.spark.examples.JavaWordCount',
BaseDpbService.SPARK_JOB_TYPE)),
(dpb_service.DATAFLOW, (None,
'org.example.WordCount',
BaseDpbService.DATAFLOW_JOB_TYPE)),
(dpb_service.EMR, (aws_dpb_emr.SPARK_SAMPLE_LOCATION,
'org.apache.spark.examples.JavaWordCount',
BaseDpbService.SPARK_JOB_TYPE))
]
)
flags.DEFINE_string('dpb_wordcount_input', None, 'Input for word count')
flags.DEFINE_enum('dpb_wordcount_fs', BaseDpbService.GCS_FS,
[BaseDpbService.GCS_FS, BaseDpbService.S3_FS],
'File System to use for the job output')
flags.DEFINE_string('dpb_wordcount_out_base', None,
'Base directory for word count output')
FLAGS = flags.FLAGS
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
if (FLAGS.dpb_wordcount_input is None and
FLAGS.dpb_wordcount_fs != BaseDpbService.GCS_FS):
raise errors.Config.InvalidValue('Invalid default input directory.')
# Get handle to the dpb service
dpb_service_class = dpb_service.GetDpbServiceClass(
benchmark_config.dpb_service.service_type)
dpb_service_class.CheckPrerequisites(benchmark_config)
def Prepare(benchmark_spec):
pass
def Run(benchmark_spec):
# Configuring input location for the word count job
if FLAGS.dpb_wordcount_input is None:
input_location = gcp_dpb_dataflow.DATAFLOW_WC_INPUT
else:
input_location = '{}://{}'.format(FLAGS.dpb_wordcount_fs,
FLAGS.dpb_wordcount_input)
# Get handle to the dpb service
dpb_service_instance = benchmark_spec.dpb_service
# Create a file handle to contain the response from running the job on
# the dpb service
stdout_file = tempfile.NamedTemporaryFile(suffix='.stdout',
prefix='dpb_wordcount_benchmark',
delete=False)
stdout_file.close()
# Switch the parameters for submit job function of specific dpb service
job_arguments = []
jarfile, classname, job_type = _GetJobArguments(
dpb_service_instance.SERVICE_TYPE)
if FLAGS.dpb_job_classname:
classname = FLAGS.dpb_job_classname
if dpb_service_instance.SERVICE_TYPE == dpb_service.DATAFLOW:
jarfile = FLAGS.dpb_job_jarfile
job_arguments.append('--stagingLocation={}'.format(
FLAGS.dpb_dataflow_staging_location))
job_arguments.append('--runner={}'.format(FLAGS.dpb_dataflow_runner))
job_arguments.append('--inputFile={}'.format(input_location))
if not FLAGS.dpb_wordcount_out_base:
base_out = FLAGS.dpb_dataflow_staging_location
else:
base_out = 'gs://{}'.format(FLAGS.dpb_wordcount_out_base)
job_arguments.append('--output={}/output/'.format(base_out))
else:
job_arguments = [input_location]
# TODO (saksena): Finalize more stats to gather
results = []
metadata = copy.copy(dpb_service_instance.GetMetadata())
metadata.update({'input_location': input_location})
start = datetime.datetime.now()
dpb_service_instance.SubmitJob(
jarfile=jarfile,
classname=classname,
job_arguments=job_arguments,
job_stdout_file=stdout_file,
job_type=job_type)
end_time = datetime.datetime.now()
run_time = (end_time - start).total_seconds()
results.append(sample.Sample('run_time', run_time, 'seconds', metadata))
return results
def Cleanup(benchmark_spec):
pass
def _GetJobArguments(dpb_service_type):
"""Returns the arguments for word count job based on runtime service."""
if dpb_service_type not in WORD_COUNT_CONFIGURATION:
raise NotImplementedError
else:
return WORD_COUNT_CONFIGURATION[dpb_service_type]
|
from pylatex.base_classes import Environment, CommandBase, Arguments
from pylatex.package import Package
from pylatex import Document, Section, UnsafeCommand
from pylatex.utils import NoEscape
class ExampleEnvironment(Environment):
"""
A class representing a custom LaTeX environment.
This class represents a custom LaTeX environment named
``exampleEnvironment``.
"""
_latex_name = 'exampleEnvironment'
packages = [Package('mdframed')]
class ExampleCommand(CommandBase):
"""
A class representing a custom LaTeX command.
This class represents a custom LaTeX command named
``exampleCommand``.
"""
_latex_name = 'exampleCommand'
packages = [Package('color')]
# Create a new document
doc = Document()
with doc.create(Section('Custom commands')):
doc.append(NoEscape(
r"""
The following is a demonstration of a custom \LaTeX{}
command with a couple of parameters.
"""))
# Define the new command
new_comm = UnsafeCommand('newcommand', '\exampleCommand', options=3,
extra_arguments=r'\color{#1} #2 #3 \color{black}')
doc.append(new_comm)
# Use our newly created command with different arguments
doc.append(ExampleCommand(arguments=Arguments('blue', 'Hello', 'World!')))
doc.append(ExampleCommand(arguments=Arguments('green', 'Hello', 'World!')))
doc.append(ExampleCommand(arguments=Arguments('red', 'Hello', 'World!')))
with doc.create(Section('Custom environments')):
doc.append(NoEscape(
r"""
The following is a demonstration of a custom \LaTeX{}
environment using the mdframed package.
"""))
# Define a style for our box
mdf_style_definition = UnsafeCommand('mdfdefinestyle',
arguments=['my_style',
('linecolor=#1,'
'linewidth=#2,'
'leftmargin=1cm,'
'leftmargin=1cm')])
# Define the new environment using the style definition above
new_env = UnsafeCommand('newenvironment', 'exampleEnvironment', options=2,
extra_arguments=[
mdf_style_definition.dumps() +
r'\begin{mdframed}[style=my_style]',
r'\end{mdframed}'])
doc.append(new_env)
# Usage of the newly created environment
with doc.create(
ExampleEnvironment(arguments=Arguments('red', 3))) as environment:
environment.append('This is the actual content')
# Generate pdf
doc.generate_pdf('own_commands_ex', clean_tex=False)
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.vacuum import DOMAIN, STATE_CLEANING, STATE_DOCKED
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a vacuum."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "cleaning",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "docked",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set("vacuum.entity", STATE_DOCKED)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "vacuum.entity",
"type": "cleaning",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"cleaning - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "vacuum.entity",
"type": "docked",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"docked - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}}"
)
},
},
},
]
},
)
# Fake that the entity is cleaning
hass.states.async_set("vacuum.entity", STATE_CLEANING)
await hass.async_block_till_done()
assert len(calls) == 1
assert (
calls[0].data["some"] == "cleaning - device - vacuum.entity - docked - cleaning"
)
# Fake that the entity is docked
hass.states.async_set("vacuum.entity", STATE_DOCKED)
await hass.async_block_till_done()
assert len(calls) == 2
assert (
calls[1].data["some"] == "docked - device - vacuum.entity - cleaning - docked"
)
|
import pytest
import cerberus
from cerberus.base import UnconcernedValidator
from cerberus.tests import assert_fail, assert_success
from cerberus.tests.conftest import sample_schema
def test_contextual_data_preservation():
class InheritedValidator(cerberus.Validator):
def __init__(self, *args, **kwargs):
if 'working_dir' in kwargs:
self.working_dir = kwargs['working_dir']
super().__init__(*args, **kwargs)
def _check_with_test(self, field, value):
if self.working_dir:
return True
assert 'test' in InheritedValidator.checkers
v = InheritedValidator(
{'test': {'type': 'list', 'itemsrules': {'check_with': 'test'}}},
working_dir='/tmp',
)
assert_success({'test': ['foo']}, validator=v)
def test_docstring_parsing():
class CustomValidator(cerberus.Validator):
def _validate_foo(self, argument, field, value):
""" {'type': 'zap'} """
pass
def _validate_bar(self, value):
"""
Test the barreness of a value.
The rule's arguments are validated against this schema:
{'type': 'boolean'}
"""
pass
assert 'foo' in CustomValidator.validation_rules
assert 'bar' in CustomValidator.validation_rules
def test_check_with_method():
# https://github.com/pyeve/cerberus/issues/265
class MyValidator(cerberus.Validator):
def _check_with_oddity(self, field, value):
if not value & 1:
self._error(field, "Must be an odd number")
v = MyValidator(schema={'amount': {'check_with': 'oddity'}})
assert_success(document={'amount': 1}, validator=v)
assert_fail(
document={'amount': 2},
validator=v,
error=('amount', (), cerberus.errors.CUSTOM, None, ('Must be an odd number',)),
)
@pytest.mark.parametrize(
'cls',
(
UnconcernedValidator,
cerberus.validator_factory('NonvalidatingValidator', validated_schema=False),
),
)
def test_schema_validation_can_be_disabled(cls):
v = cls(schema=sample_schema)
assert v.validate(document={'an_integer': 1})
assert not v.validate(document={'an_integer': 'a'})
v.schema['an_integer']['tüpe'] = 'int'
with pytest.raises(RuntimeError):
v.validate(document={'an_integer': 1})
v.schema['an_integer'].pop('tüpe')
def test_custom_datatype_rule():
class MyValidator(cerberus.Validator):
types_mapping = cerberus.Validator.types_mapping.copy()
types_mapping['number'] = cerberus.TypeDefinition('number', (int,), ())
def _validate_min_number(self, min_number, field, value):
""" {'type': 'number'} """
if value < min_number:
self._error(field, 'Below the min')
schema = {'test_field': {'min_number': 1, 'type': 'number'}}
validator = MyValidator(schema)
assert_fail(
{'test_field': 0.0},
validator=validator,
error=('test_field', ('test_field', 'type'), cerberus.errors.TYPE, ('number',)),
)
assert_fail(
{'test_field': 0},
validator=validator,
error=('test_field', (), cerberus.errors.CUSTOM, None, ('Below the min',)),
)
assert validator.errors == {'test_field': ['Below the min']}
|
import hmac
from typing import Any, Dict, Optional, cast
import voluptuous as vol
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from .. import AuthManager
from ..models import Credentials, User, UserMeta
AUTH_PROVIDER_TYPE = "legacy_api_password"
CONF_API_PASSWORD = "api_password"
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{vol.Required(CONF_API_PASSWORD): cv.string}, extra=vol.PREVENT_EXTRA
)
LEGACY_USER_NAME = "Legacy API password user"
class InvalidAuthError(HomeAssistantError):
"""Raised when submitting invalid authentication."""
async def async_validate_password(hass: HomeAssistant, password: str) -> Optional[User]:
"""Return a user if password is valid. None if not."""
auth = cast(AuthManager, hass.auth) # type: ignore
providers = auth.get_auth_providers(AUTH_PROVIDER_TYPE)
if not providers:
raise ValueError("Legacy API password provider not found")
try:
provider = cast(LegacyApiPasswordAuthProvider, providers[0])
provider.async_validate_login(password)
return await auth.async_get_or_create_user(
await provider.async_get_or_create_credentials({})
)
except InvalidAuthError:
return None
@AUTH_PROVIDERS.register(AUTH_PROVIDER_TYPE)
class LegacyApiPasswordAuthProvider(AuthProvider):
"""An auth provider support legacy api_password."""
DEFAULT_TITLE = "Legacy API Password"
@property
def api_password(self) -> str:
"""Return api_password."""
return str(self.config[CONF_API_PASSWORD])
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
return LegacyLoginFlow(self)
@callback
def async_validate_login(self, password: str) -> None:
"""Validate password."""
api_password = str(self.config[CONF_API_PASSWORD])
if not hmac.compare_digest(
api_password.encode("utf-8"), password.encode("utf-8")
):
raise InvalidAuthError
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
"""Return credentials for this login."""
credentials = await self.async_credentials()
if credentials:
return credentials[0]
return self.async_create_credentials({})
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""
Return info for the user.
Will be used to populate info when creating a new user.
"""
return UserMeta(name=LEGACY_USER_NAME, is_active=True)
class LegacyLoginFlow(LoginFlow):
"""Handler for the login flow."""
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the step of the form."""
errors = {}
if user_input is not None:
try:
cast(
LegacyApiPasswordAuthProvider, self._auth_provider
).async_validate_login(user_input["password"])
except InvalidAuthError:
errors["base"] = "invalid_auth"
if not errors:
return await self.async_finish({})
return self.async_show_form(
step_id="init", data_schema=vol.Schema({"password": str}), errors=errors
)
|
from ipaddress import IPv4Address, IPv4Network, IPv6Address, IPv6Network, ip_network
from typing import Any, Dict, List, Optional, Union, cast
import voluptuous as vol
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from . import AUTH_PROVIDER_SCHEMA, AUTH_PROVIDERS, AuthProvider, LoginFlow
from ..models import Credentials, UserMeta
IPAddress = Union[IPv4Address, IPv6Address]
IPNetwork = Union[IPv4Network, IPv6Network]
CONF_TRUSTED_NETWORKS = "trusted_networks"
CONF_TRUSTED_USERS = "trusted_users"
CONF_GROUP = "group"
CONF_ALLOW_BYPASS_LOGIN = "allow_bypass_login"
CONFIG_SCHEMA = AUTH_PROVIDER_SCHEMA.extend(
{
vol.Required(CONF_TRUSTED_NETWORKS): vol.All(cv.ensure_list, [ip_network]),
vol.Optional(CONF_TRUSTED_USERS, default={}): vol.Schema(
# we only validate the format of user_id or group_id
{
ip_network: vol.All(
cv.ensure_list,
[
vol.Or(
cv.uuid4_hex,
vol.Schema({vol.Required(CONF_GROUP): cv.uuid4_hex}),
)
],
)
}
),
vol.Optional(CONF_ALLOW_BYPASS_LOGIN, default=False): cv.boolean,
},
extra=vol.PREVENT_EXTRA,
)
class InvalidAuthError(HomeAssistantError):
"""Raised when try to access from untrusted networks."""
class InvalidUserError(HomeAssistantError):
"""Raised when try to login as invalid user."""
@AUTH_PROVIDERS.register("trusted_networks")
class TrustedNetworksAuthProvider(AuthProvider):
"""Trusted Networks auth provider.
Allow passwordless access from trusted network.
"""
DEFAULT_TITLE = "Trusted Networks"
@property
def trusted_networks(self) -> List[IPNetwork]:
"""Return trusted networks."""
return cast(List[IPNetwork], self.config[CONF_TRUSTED_NETWORKS])
@property
def trusted_users(self) -> Dict[IPNetwork, Any]:
"""Return trusted users per network."""
return cast(Dict[IPNetwork, Any], self.config[CONF_TRUSTED_USERS])
@property
def support_mfa(self) -> bool:
"""Trusted Networks auth provider does not support MFA."""
return False
async def async_login_flow(self, context: Optional[Dict]) -> LoginFlow:
"""Return a flow to login."""
assert context is not None
ip_addr = cast(IPAddress, context.get("ip_address"))
users = await self.store.async_get_users()
available_users = [
user for user in users if not user.system_generated and user.is_active
]
for ip_net, user_or_group_list in self.trusted_users.items():
if ip_addr in ip_net:
user_list = [
user_id
for user_id in user_or_group_list
if isinstance(user_id, str)
]
group_list = [
group[CONF_GROUP]
for group in user_or_group_list
if isinstance(group, dict)
]
flattened_group_list = [
group for sublist in group_list for group in sublist
]
available_users = [
user
for user in available_users
if (
user.id in user_list
or any(
[group.id in flattened_group_list for group in user.groups]
)
)
]
break
return TrustedNetworksLoginFlow(
self,
ip_addr,
{user.id: user.name for user in available_users},
self.config[CONF_ALLOW_BYPASS_LOGIN],
)
async def async_get_or_create_credentials(
self, flow_result: Dict[str, str]
) -> Credentials:
"""Get credentials based on the flow result."""
user_id = flow_result["user"]
users = await self.store.async_get_users()
for user in users:
if not user.system_generated and user.is_active and user.id == user_id:
for credential in await self.async_credentials():
if credential.data["user_id"] == user_id:
return credential
cred = self.async_create_credentials({"user_id": user_id})
await self.store.async_link_user(user, cred)
return cred
# We only allow login as exist user
raise InvalidUserError
async def async_user_meta_for_credentials(
self, credentials: Credentials
) -> UserMeta:
"""Return extra user metadata for credentials.
Trusted network auth provider should never create new user.
"""
raise NotImplementedError
@callback
def async_validate_access(self, ip_addr: IPAddress) -> None:
"""Make sure the access from trusted networks.
Raise InvalidAuthError if not.
Raise InvalidAuthError if trusted_networks is not configured.
"""
if not self.trusted_networks:
raise InvalidAuthError("trusted_networks is not configured")
if not any(
ip_addr in trusted_network for trusted_network in self.trusted_networks
):
raise InvalidAuthError("Not in trusted_networks")
class TrustedNetworksLoginFlow(LoginFlow):
"""Handler for the login flow."""
def __init__(
self,
auth_provider: TrustedNetworksAuthProvider,
ip_addr: IPAddress,
available_users: Dict[str, Optional[str]],
allow_bypass_login: bool,
) -> None:
"""Initialize the login flow."""
super().__init__(auth_provider)
self._available_users = available_users
self._ip_address = ip_addr
self._allow_bypass_login = allow_bypass_login
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the step of the form."""
try:
cast(
TrustedNetworksAuthProvider, self._auth_provider
).async_validate_access(self._ip_address)
except InvalidAuthError:
return self.async_abort(reason="not_allowed")
if user_input is not None:
return await self.async_finish(user_input)
if self._allow_bypass_login and len(self._available_users) == 1:
return await self.async_finish(
{"user": next(iter(self._available_users.keys()))}
)
return self.async_show_form(
step_id="init",
data_schema=vol.Schema({"user": vol.In(self._available_users)}),
)
|
import os
import unittest
import platform
import urwid
from urwid.compat import PYTHON3
class EventLoopTestMixin(object):
def test_event_loop(self):
rd, wr = os.pipe()
evl = self.evl
out = []
def step1():
out.append("writing")
os.write(wr, "hi".encode('ascii'))
def step2():
out.append(os.read(rd, 2).decode('ascii'))
raise urwid.ExitMainLoop
handle = evl.alarm(0, step1)
handle = evl.watch_file(rd, step2)
evl.run()
self.assertEqual(out, ["writing", "hi"])
def test_remove_alarm(self):
evl = self.evl
handle = evl.alarm(50, lambda: None)
def step1():
self.assertTrue(evl.remove_alarm(handle))
self.assertFalse(evl.remove_alarm(handle))
raise urwid.ExitMainLoop
evl.alarm(0, step1)
evl.run()
def test_remove_watch_file(self):
evl = self.evl
fd_r, fd_w = os.pipe()
try:
handle = evl.watch_file(fd_r, lambda: None)
def step1():
self.assertTrue(evl.remove_watch_file(handle))
self.assertFalse(evl.remove_watch_file(handle))
raise urwid.ExitMainLoop
evl.alarm(0, step1)
evl.run()
finally:
os.close(fd_r)
os.close(fd_w)
_expected_idle_handle = 1
def test_run(self):
evl = self.evl
out = []
rd, wr = os.pipe()
self.assertEqual(os.write(wr, "data".encode('ascii')), 4)
def say_hello():
out.append("hello")
def say_waiting():
out.append("waiting")
def exit_clean():
out.append("clean exit")
raise urwid.ExitMainLoop
def exit_error():
1/0
handle = evl.alarm(0.01, exit_clean)
handle = evl.alarm(0.005, say_hello)
idle_handle = evl.enter_idle(say_waiting)
if self._expected_idle_handle is not None:
self.assertEqual(idle_handle, 1)
evl.run()
self.assertTrue("hello" in out, out)
self.assertTrue("clean exit" in out, out)
handle = evl.watch_file(rd, exit_clean)
del out[:]
evl.run()
self.assertEqual(out, ["clean exit"])
self.assertTrue(evl.remove_watch_file(handle))
handle = evl.alarm(0, exit_error)
self.assertRaises(ZeroDivisionError, evl.run)
handle = evl.watch_file(rd, exit_error)
self.assertRaises(ZeroDivisionError, evl.run)
class SelectEventLoopTest(unittest.TestCase, EventLoopTestMixin):
def setUp(self):
self.evl = urwid.SelectEventLoop()
try:
import gi.repository
except ImportError:
pass
else:
class GLibEventLoopTest(unittest.TestCase, EventLoopTestMixin):
def setUp(self):
self.evl = urwid.GLibEventLoop()
def test_error(self):
evl = self.evl
evl.alarm(0.5, lambda: 1 / 0) # Simulate error in event loop
self.assertRaises(ZeroDivisionError, evl.run)
try:
import tornado
except ImportError:
pass
else:
class TornadoEventLoopTest(unittest.TestCase, EventLoopTestMixin):
def setUp(self):
from tornado.ioloop import IOLoop
self.evl = urwid.TornadoEventLoop(IOLoop())
try:
import twisted
except ImportError:
pass
else:
class TwistedEventLoopTest(unittest.TestCase, EventLoopTestMixin):
def setUp(self):
self.evl = urwid.TwistedEventLoop()
# can't restart twisted reactor, so use shortened tests
def test_event_loop(self):
pass
def test_remove_alarm(self):
pass
def test_remove_watch_file(self):
pass
def test_run(self):
evl = self.evl
out = []
rd, wr = os.pipe()
self.assertEqual(os.write(wr, "data".encode('ascii')), 4)
def step2():
out.append(os.read(rd, 2).decode('ascii'))
def say_hello():
out.append("hello")
def say_waiting():
out.append("waiting")
def test_remove_alarm():
handle = evl.alarm(50, lambda: None)
self.assertTrue(evl.remove_alarm(handle))
self.assertFalse(evl.remove_alarm(handle))
out.append("remove_alarm ok")
def test_remove_watch_file():
fd_r, fd_w = os.pipe()
try:
handle = evl.watch_file(fd_r, lambda: None)
self.assertTrue(evl.remove_watch_file(handle))
self.assertFalse(evl.remove_watch_file(handle))
finally:
os.close(fd_r)
os.close(fd_w)
out.append("remove_watch_file ok")
def exit_clean():
out.append("clean exit")
raise urwid.ExitMainLoop
def exit_error():
1/0
handle = evl.watch_file(rd, step2)
handle = evl.alarm(0.1, exit_clean)
handle = evl.alarm(0.05, say_hello)
handle = evl.alarm(0.06, test_remove_alarm)
handle = evl.alarm(0.07, test_remove_watch_file)
self.assertEqual(evl.enter_idle(say_waiting), 1)
evl.run()
self.assertTrue("da" in out, out)
self.assertTrue("ta" in out, out)
self.assertTrue("hello" in out, out)
self.assertTrue("remove_alarm ok" in out, out)
self.assertTrue("clean exit" in out, out)
def test_error(self):
evl = self.evl
evl.alarm(0.5, lambda: 1 / 0) # Simulate error in event loop
self.assertRaises(ZeroDivisionError, evl.run)
try:
import asyncio
except ImportError:
pass
else:
if not hasattr(asyncio, 'ensure_future'):
#-- Python < 3.4.4 (e.g. Debian Jessie)
asyncio.ensure_future = getattr(asyncio, 'async')
class AsyncioEventLoopTest(unittest.TestCase, EventLoopTestMixin):
def setUp(self):
self.evl = urwid.AsyncioEventLoop()
_expected_idle_handle = None
def test_error(self):
evl = self.evl
evl.alarm(0.5, lambda: 1 / 0) # Simulate error in event loop
self.assertRaises(ZeroDivisionError, evl.run)
def test_coroutine_error(self):
evl = self.evl
@asyncio.coroutine
def error_coro():
result = 1 / 0 # Simulate error in coroutine
yield result
asyncio.ensure_future(error_coro())
self.assertRaises(ZeroDivisionError, evl.run)
try:
import trio
except ImportError:
pass
else:
class TrioEventLoopTest(unittest.TestCase, EventLoopTestMixin):
def setUp(self):
self.evl = urwid.TrioEventLoop()
_expected_idle_handle = None
def test_error(self):
evl = self.evl
evl.alarm(0.5, lambda: 1 / 0) # Simulate error in event loop
self.assertRaises(ZeroDivisionError, evl.run)
|
from __future__ import print_function
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from netstat import NetstatCollector
################################################################################
class TestNetstatCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NetstatCollector', {
})
self.collector = NetstatCollector(config, None)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
NetstatCollector.PROC_TCP = self.getFixturePath('proc_net_tcp')
self.collector.collect()
metrics = {
'LISTEN': 9
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
print(publish_mock)
self.assertPublishedMany(publish_mock, metrics)
################################################################################
if __name__ == "__main__":
unittest.main()
|
import secrets
from typing import Dict
from aiohttp.web import Request, Response
import emoji
from nacl.secret import SecretBox
import voluptuous as vol
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.const import CONF_WEBHOOK_ID, HTTP_CREATED
from homeassistant.helpers import config_validation as cv
from homeassistant.util import slugify
from .const import (
ATTR_APP_DATA,
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_APP_VERSION,
ATTR_DEVICE_ID,
ATTR_DEVICE_NAME,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_OS_NAME,
ATTR_OS_VERSION,
ATTR_SUPPORTS_ENCRYPTION,
CONF_CLOUDHOOK_URL,
CONF_REMOTE_UI_URL,
CONF_SECRET,
CONF_USER_ID,
DOMAIN,
)
from .helpers import supports_encryption
class RegistrationsView(HomeAssistantView):
"""A view that accepts registration requests."""
url = "/api/mobile_app/registrations"
name = "api:mobile_app:register"
@RequestDataValidator(
vol.Schema(
{
vol.Optional(ATTR_APP_DATA, default={}): dict,
vol.Required(ATTR_APP_ID): cv.string,
vol.Required(ATTR_APP_NAME): cv.string,
vol.Required(ATTR_APP_VERSION): cv.string,
vol.Required(ATTR_DEVICE_NAME): cv.string,
vol.Required(ATTR_MANUFACTURER): cv.string,
vol.Required(ATTR_MODEL): cv.string,
vol.Optional(ATTR_DEVICE_ID): cv.string, # Added in 0.104
vol.Required(ATTR_OS_NAME): cv.string,
vol.Optional(ATTR_OS_VERSION): cv.string,
vol.Required(ATTR_SUPPORTS_ENCRYPTION, default=False): cv.boolean,
},
# To allow future apps to send more data
extra=vol.REMOVE_EXTRA,
)
)
async def post(self, request: Request, data: Dict) -> Response:
"""Handle the POST request for registration."""
hass = request.app["hass"]
webhook_id = secrets.token_hex()
if hass.components.cloud.async_active_subscription():
data[
CONF_CLOUDHOOK_URL
] = await hass.components.cloud.async_create_cloudhook(webhook_id)
data[CONF_WEBHOOK_ID] = webhook_id
if data[ATTR_SUPPORTS_ENCRYPTION] and supports_encryption():
data[CONF_SECRET] = secrets.token_hex(SecretBox.KEY_SIZE)
data[CONF_USER_ID] = request["hass_user"].id
if slugify(data[ATTR_DEVICE_NAME], separator=""):
# if slug is not empty and would not only be underscores
# use DEVICE_NAME
pass
elif emoji.emoji_count(data[ATTR_DEVICE_NAME]):
# If otherwise empty string contains emoji
# use descriptive name of the first emoji
data[ATTR_DEVICE_NAME] = emoji.demojize(
emoji.emoji_lis(data[ATTR_DEVICE_NAME])[0]["emoji"]
).replace(":", "")
else:
# Fallback to DEVICE_ID
data[ATTR_DEVICE_NAME] = data[ATTR_DEVICE_ID]
await hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, data=data, context={"source": "registration"}
)
)
remote_ui_url = None
try:
remote_ui_url = hass.components.cloud.async_remote_ui_url()
except hass.components.cloud.CloudNotAvailable:
pass
return self.json(
{
CONF_CLOUDHOOK_URL: data.get(CONF_CLOUDHOOK_URL),
CONF_REMOTE_UI_URL: remote_ui_url,
CONF_SECRET: data.get(CONF_SECRET),
CONF_WEBHOOK_ID: data[CONF_WEBHOOK_ID],
},
status_code=HTTP_CREATED,
)
|
from __future__ import print_function
import logging
import optparse
import pymongo
from .utils import do_db_auth, setup_logging
from ..arctic import Arctic
from ..hooks import get_mongodb_uri
logger = logging.getLogger(__name__)
def main():
usage = """usage: %prog [options]
Deletes the named library from a user's database.
Example:
%prog --host=hostname --library=arctic_jblackburn.my_library
"""
setup_logging()
parser = optparse.OptionParser(usage=usage)
parser.add_option("--host", default='localhost', help="Hostname, or clustername. Default: localhost")
parser.add_option("--library", help="The name of the library. e.g. 'arctic_jblackburn.lib'")
(opts, _) = parser.parse_args()
if not opts.library:
parser.error('Must specify the full path of the library e.g. arctic_jblackburn.lib!')
print("Deleting: %s on mongo %s" % (opts.library, opts.host))
c = pymongo.MongoClient(get_mongodb_uri(opts.host))
db_name = opts.library[:opts.library.index('.')] if '.' in opts.library else None
do_db_auth(opts.host, c, db_name)
store = Arctic(c)
store.delete_library(opts.library)
logger.info("Library %s deleted" % opts.library)
if __name__ == '__main__':
main()
|
import homeassistant.components.notify as notify_comp
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component
async def test_setup_full(hass):
"""Test valid configuration."""
await async_setup_component(
hass,
"homematic",
{"homematic": {"hosts": {"ccu2": {"host": "127.0.0.1"}}}},
)
with assert_setup_component(1) as handle_config:
assert await async_setup_component(
hass,
"notify",
{
"notify": {
"name": "test",
"platform": "homematic",
"address": "NEQXXXXXXX",
"channel": 2,
"param": "SUBMIT",
"value": "1,1,108000,2",
"interface": "my-interface",
}
},
)
assert handle_config[notify_comp.DOMAIN]
async def test_setup_without_optional(hass):
"""Test valid configuration without optional."""
await async_setup_component(
hass,
"homematic",
{"homematic": {"hosts": {"ccu2": {"host": "127.0.0.1"}}}},
)
with assert_setup_component(1) as handle_config:
assert await async_setup_component(
hass,
"notify",
{
"notify": {
"name": "test",
"platform": "homematic",
"address": "NEQXXXXXXX",
"channel": 2,
"param": "SUBMIT",
"value": "1,1,108000,2",
}
},
)
assert handle_config[notify_comp.DOMAIN]
async def test_bad_config(hass):
"""Test invalid configuration."""
config = {notify_comp.DOMAIN: {"name": "test", "platform": "homematic"}}
with assert_setup_component(0) as handle_config:
assert await async_setup_component(hass, notify_comp.DOMAIN, config)
assert not handle_config[notify_comp.DOMAIN]
|
import json
from io import StringIO
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import CommandError
from django.urls import reverse
from jsonschema import validate
from weblate_schemas import load_schema
from weblate.lang.models import Language
from weblate.memory.machine import WeblateMemory
from weblate.memory.models import Memory
from weblate.memory.tasks import handle_unit_translation_change, import_memory
from weblate.memory.utils import CATEGORY_FILE
from weblate.trans.tests.test_views import FixtureTestCase
from weblate.trans.tests.utils import get_test_file
def add_document():
Memory.objects.create(
source_language=Language.objects.get(code="en"),
target_language=Language.objects.get(code="cs"),
source="Hello",
target="Ahoj",
origin="test",
from_file=True,
shared=False,
)
class MemoryModelTest(FixtureTestCase):
@classmethod
def _databases_support_transactions(cls):
# This is workaroud for MySQL as FULL TEXT index does not work
# well inside a transaction, so we avoid using transactions for
# tests. Otherwise we end up with no matches for the query.
# See https://dev.mysql.com/doc/refman/5.6/en/innodb-fulltext-index.html
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.mysql":
return False
return super()._databases_support_transactions()
def test_machine(self):
add_document()
unit = self.get_unit()
machine_translation = WeblateMemory()
self.assertEqual(
machine_translation.translate(unit, search="Hello"),
[
{
"quality": 100,
"service": "Weblate Translation Memory",
"origin": "File: test",
"source": "Hello",
"text": "Ahoj",
}
],
)
def test_machine_batch(self):
add_document()
unit = self.get_unit()
machine_translation = WeblateMemory()
unit.source = "Hello"
machine_translation.batch_translate([unit])
self.assertEqual(unit.machinery, {"best": 100, "translation": "Ahoj"})
def test_import_tmx_command(self):
call_command("import_memory", get_test_file("memory.tmx"))
self.assertEqual(Memory.objects.count(), 2)
def test_import_tmx2_command(self):
call_command("import_memory", get_test_file("memory2.tmx"))
self.assertEqual(Memory.objects.count(), 1)
def test_import_map(self):
call_command(
"import_memory", get_test_file("memory.tmx"), language_map="en_US:en"
)
self.assertEqual(Memory.objects.count(), 2)
def test_dump_command(self):
add_document()
output = StringIO()
call_command("dump_memory", stdout=output)
data = json.loads(output.getvalue())
validate(data, load_schema("weblate-memory.schema.json"))
self.assertEqual(
data,
[
{
"source_language": "en",
"target_language": "cs",
"source": "Hello",
"target": "Ahoj",
"origin": "test",
"category": CATEGORY_FILE,
}
],
)
def test_import_invalid_command(self):
with self.assertRaises(CommandError):
call_command("import_memory", get_test_file("cs.po"))
self.assertEqual(Memory.objects.count(), 0)
def test_import_json_command(self):
call_command("import_memory", get_test_file("memory.json"))
self.assertEqual(Memory.objects.count(), 1)
def test_import_broken_json_command(self):
with self.assertRaises(CommandError):
call_command("import_memory", get_test_file("memory-broken.json"))
self.assertEqual(Memory.objects.count(), 0)
def test_import_empty_json_command(self):
with self.assertRaises(CommandError):
call_command("import_memory", get_test_file("memory-empty.json"))
self.assertEqual(Memory.objects.count(), 0)
def test_import_project(self):
import_memory(self.project.id)
self.assertEqual(Memory.objects.count(), 4)
import_memory(self.project.id)
self.assertEqual(Memory.objects.count(), 4)
def test_import_unit(self):
unit = self.get_unit()
handle_unit_translation_change(unit.id, self.user.id)
self.assertEqual(Memory.objects.count(), 3)
handle_unit_translation_change(unit.id, self.user.id)
self.assertEqual(Memory.objects.count(), 3)
class MemoryViewTest(FixtureTestCase):
def upload_file(self, name, prefix: str = "", **kwargs):
with open(get_test_file(name), "rb") as handle:
return self.client.post(
reverse(f"{prefix}memory-upload", **kwargs),
{"file": handle},
follow=True,
)
def test_memory(
self, match="Number of your entries", fail=False, prefix: str = "", **kwargs
):
# Test wipe without confirmation
response = self.client.get(reverse(f"{prefix}memory-delete", **kwargs))
self.assertRedirects(response, reverse(f"{prefix}memory", **kwargs))
response = self.client.post(reverse(f"{prefix}memory-delete", **kwargs))
self.assertRedirects(response, reverse(f"{prefix}memory", **kwargs))
# Test list
response = self.client.get(reverse(f"{prefix}memory", **kwargs))
self.assertContains(response, match)
# Test upload
response = self.upload_file("memory.tmx", prefix=prefix, **kwargs)
if fail:
self.assertContains(response, "Permission Denied", status_code=403)
else:
self.assertContains(response, "File processed")
# Test download
response = self.client.get(reverse(f"{prefix}memory-download", **kwargs))
validate(response.json(), load_schema("weblate-memory.schema.json"))
# Test download
response = self.client.get(
reverse(f"{prefix}memory-download", **kwargs), {"format": "tmx"}
)
self.assertContains(response, "<tmx")
response = self.client.get(
reverse(f"{prefix}memory-download", **kwargs),
{"format": "tmx", "origin": "memory.tmx"},
)
self.assertContains(response, "<tmx")
response = self.client.get(
reverse(f"{prefix}memory-download", **kwargs), {"format": "json"}
)
validate(response.json(), load_schema("weblate-memory.schema.json"))
# Test wipe
count = Memory.objects.count()
response = self.client.post(
reverse(f"{prefix}memory-delete", **kwargs),
{"confirm": "1", "origin": "invalid"},
follow=True,
)
if fail:
self.assertContains(response, "Permission Denied", status_code=403)
else:
self.assertContains(response, "Entries deleted")
self.assertEqual(count, Memory.objects.count())
response = self.client.post(
reverse(f"{prefix}memory-delete", **kwargs),
{"confirm": "1"},
follow=True,
)
self.assertContains(response, "Entries deleted")
self.assertGreater(count, Memory.objects.count())
# Test invalid upload
response = self.upload_file("cs.json", **kwargs)
if fail:
self.assertContains(response, "Permission Denied", status_code=403)
else:
self.assertContains(response, "Failed to parse JSON file")
# Test invalid upload
response = self.upload_file("memory-broken.json", **kwargs)
if fail:
self.assertContains(response, "Permission Denied", status_code=403)
else:
self.assertContains(response, "Failed to parse JSON file")
# Test invalid upload
response = self.upload_file("memory-invalid.json", **kwargs)
if fail:
self.assertContains(response, "Permission Denied", status_code=403)
else:
self.assertContains(response, "Failed to parse JSON file")
def test_memory_project(self):
self.test_memory("Number of entries for Test", True, kwargs=self.kw_project)
def test_memory_project_superuser(self):
self.user.is_superuser = True
self.user.save()
self.test_memory("Number of entries for Test", False, kwargs=self.kw_project)
def test_global_memory_superuser(self):
self.user.is_superuser = True
self.user.save()
self.test_memory("Number of uploaded shared entries", False, prefix="manage-")
# Download all entries
response = self.client.get(
reverse("manage-memory-download"),
{"format": "json", "kind": "all"},
)
validate(response.json(), load_schema("weblate-memory.schema.json"))
# Download shared entries
response = self.client.get(
reverse("manage-memory-download"),
{"format": "json", "kind": "shared"},
)
validate(response.json(), load_schema("weblate-memory.schema.json"))
|
from datetime import timedelta
import logging
import threading
import aiohttp
from amcrest import AmcrestError, Http, LoginError
import voluptuous as vol
from homeassistant.auth.permissions.const import POLICY_CONTROL
from homeassistant.components.binary_sensor import DOMAIN as BINARY_SENSOR
from homeassistant.components.camera import DOMAIN as CAMERA
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_AUTHENTICATION,
CONF_BINARY_SENSORS,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SCAN_INTERVAL,
CONF_SENSORS,
CONF_USERNAME,
ENTITY_MATCH_ALL,
ENTITY_MATCH_NONE,
HTTP_BASIC_AUTHENTICATION,
)
from homeassistant.exceptions import Unauthorized, UnknownUser
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_send, dispatcher_send
from homeassistant.helpers.event import track_time_interval
from homeassistant.helpers.service import async_extract_entity_ids
from .binary_sensor import BINARY_POLLED_SENSORS, BINARY_SENSORS, check_binary_sensors
from .camera import CAMERA_SERVICES, STREAM_SOURCE_LIST
from .const import (
CAMERAS,
COMM_RETRIES,
COMM_TIMEOUT,
DATA_AMCREST,
DEVICES,
DOMAIN,
SENSOR_EVENT_CODE,
SERVICE_EVENT,
SERVICE_UPDATE,
)
from .helpers import service_signal
from .sensor import SENSORS
_LOGGER = logging.getLogger(__name__)
CONF_RESOLUTION = "resolution"
CONF_STREAM_SOURCE = "stream_source"
CONF_FFMPEG_ARGUMENTS = "ffmpeg_arguments"
CONF_CONTROL_LIGHT = "control_light"
DEFAULT_NAME = "Amcrest Camera"
DEFAULT_PORT = 80
DEFAULT_RESOLUTION = "high"
DEFAULT_ARGUMENTS = "-pred 1"
MAX_ERRORS = 5
RECHECK_INTERVAL = timedelta(minutes=1)
NOTIFICATION_ID = "amcrest_notification"
NOTIFICATION_TITLE = "Amcrest Camera Setup"
RESOLUTION_LIST = {"high": 0, "low": 1}
SCAN_INTERVAL = timedelta(seconds=10)
AUTHENTICATION_LIST = {"basic": "basic"}
def _has_unique_names(devices):
names = [device[CONF_NAME] for device in devices]
vol.Schema(vol.Unique())(names)
return devices
AMCREST_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_AUTHENTICATION, default=HTTP_BASIC_AUTHENTICATION): vol.All(
vol.In(AUTHENTICATION_LIST)
),
vol.Optional(CONF_RESOLUTION, default=DEFAULT_RESOLUTION): vol.All(
vol.In(RESOLUTION_LIST)
),
vol.Optional(CONF_STREAM_SOURCE, default=STREAM_SOURCE_LIST[0]): vol.All(
vol.In(STREAM_SOURCE_LIST)
),
vol.Optional(CONF_FFMPEG_ARGUMENTS, default=DEFAULT_ARGUMENTS): cv.string,
vol.Optional(CONF_SCAN_INTERVAL, default=SCAN_INTERVAL): cv.time_period,
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [vol.In(BINARY_SENSORS)], vol.Unique(), check_binary_sensors
),
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.In(SENSORS)], vol.Unique()
),
vol.Optional(CONF_CONTROL_LIGHT, default=True): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.All(cv.ensure_list, [AMCREST_SCHEMA], _has_unique_names)},
extra=vol.ALLOW_EXTRA,
)
class AmcrestChecker(Http):
"""amcrest.Http wrapper for catching errors."""
def __init__(self, hass, name, host, port, user, password):
"""Initialize."""
self._hass = hass
self._wrap_name = name
self._wrap_errors = 0
self._wrap_lock = threading.Lock()
self._wrap_login_err = False
self._wrap_event_flag = threading.Event()
self._wrap_event_flag.set()
self._unsub_recheck = None
super().__init__(
host,
port,
user,
password,
retries_connection=COMM_RETRIES,
timeout_protocol=COMM_TIMEOUT,
)
@property
def available(self):
"""Return if camera's API is responding."""
return self._wrap_errors <= MAX_ERRORS and not self._wrap_login_err
@property
def available_flag(self):
"""Return threading event flag that indicates if camera's API is responding."""
return self._wrap_event_flag
def _start_recovery(self):
self._wrap_event_flag.clear()
dispatcher_send(self._hass, service_signal(SERVICE_UPDATE, self._wrap_name))
self._unsub_recheck = track_time_interval(
self._hass, self._wrap_test_online, RECHECK_INTERVAL
)
def command(self, *args, **kwargs):
"""amcrest.Http.command wrapper to catch errors."""
try:
ret = super().command(*args, **kwargs)
except LoginError as ex:
with self._wrap_lock:
was_online = self.available
was_login_err = self._wrap_login_err
self._wrap_login_err = True
if not was_login_err:
_LOGGER.error("%s camera offline: Login error: %s", self._wrap_name, ex)
if was_online:
self._start_recovery()
raise
except AmcrestError:
with self._wrap_lock:
was_online = self.available
errs = self._wrap_errors = self._wrap_errors + 1
offline = not self.available
_LOGGER.debug("%s camera errs: %i", self._wrap_name, errs)
if was_online and offline:
_LOGGER.error("%s camera offline: Too many errors", self._wrap_name)
self._start_recovery()
raise
with self._wrap_lock:
was_offline = not self.available
self._wrap_errors = 0
self._wrap_login_err = False
if was_offline:
self._unsub_recheck()
self._unsub_recheck = None
_LOGGER.error("%s camera back online", self._wrap_name)
self._wrap_event_flag.set()
dispatcher_send(self._hass, service_signal(SERVICE_UPDATE, self._wrap_name))
return ret
def _wrap_test_online(self, now):
"""Test if camera is back online."""
_LOGGER.debug("Testing if %s back online", self._wrap_name)
try:
self.current_time
except AmcrestError:
pass
def _monitor_events(hass, name, api, event_codes):
event_codes = ",".join(event_codes)
while True:
api.available_flag.wait()
try:
for code, start in api.event_actions(event_codes, retries=5):
signal = service_signal(SERVICE_EVENT, name, code)
_LOGGER.debug("Sending signal: '%s': %s", signal, start)
dispatcher_send(hass, signal, start)
except AmcrestError as error:
_LOGGER.warning(
"Error while processing events from %s camera: %r", name, error
)
def _start_event_monitor(hass, name, api, event_codes):
thread = threading.Thread(
target=_monitor_events,
name=f"Amcrest {name}",
args=(hass, name, api, event_codes),
daemon=True,
)
thread.start()
def setup(hass, config):
"""Set up the Amcrest IP Camera component."""
hass.data.setdefault(DATA_AMCREST, {DEVICES: {}, CAMERAS: []})
for device in config[DOMAIN]:
name = device[CONF_NAME]
username = device[CONF_USERNAME]
password = device[CONF_PASSWORD]
api = AmcrestChecker(
hass, name, device[CONF_HOST], device[CONF_PORT], username, password
)
ffmpeg_arguments = device[CONF_FFMPEG_ARGUMENTS]
resolution = RESOLUTION_LIST[device[CONF_RESOLUTION]]
binary_sensors = device.get(CONF_BINARY_SENSORS)
sensors = device.get(CONF_SENSORS)
stream_source = device[CONF_STREAM_SOURCE]
control_light = device.get(CONF_CONTROL_LIGHT)
# currently aiohttp only works with basic authentication
# only valid for mjpeg streaming
if device[CONF_AUTHENTICATION] == HTTP_BASIC_AUTHENTICATION:
authentication = aiohttp.BasicAuth(username, password)
else:
authentication = None
hass.data[DATA_AMCREST][DEVICES][name] = AmcrestDevice(
api,
authentication,
ffmpeg_arguments,
stream_source,
resolution,
control_light,
)
discovery.load_platform(hass, CAMERA, DOMAIN, {CONF_NAME: name}, config)
if binary_sensors:
discovery.load_platform(
hass,
BINARY_SENSOR,
DOMAIN,
{CONF_NAME: name, CONF_BINARY_SENSORS: binary_sensors},
config,
)
event_codes = [
BINARY_SENSORS[sensor_type][SENSOR_EVENT_CODE]
for sensor_type in binary_sensors
if sensor_type not in BINARY_POLLED_SENSORS
]
if event_codes:
_start_event_monitor(hass, name, api, event_codes)
if sensors:
discovery.load_platform(
hass, SENSOR, DOMAIN, {CONF_NAME: name, CONF_SENSORS: sensors}, config
)
if not hass.data[DATA_AMCREST][DEVICES]:
return False
def have_permission(user, entity_id):
return not user or user.permissions.check_entity(entity_id, POLICY_CONTROL)
async def async_extract_from_service(call):
if call.context.user_id:
user = await hass.auth.async_get_user(call.context.user_id)
if user is None:
raise UnknownUser(context=call.context)
else:
user = None
if call.data.get(ATTR_ENTITY_ID) == ENTITY_MATCH_ALL:
# Return all entity_ids user has permission to control.
return [
entity_id
for entity_id in hass.data[DATA_AMCREST][CAMERAS]
if have_permission(user, entity_id)
]
if call.data.get(ATTR_ENTITY_ID) == ENTITY_MATCH_NONE:
return []
call_ids = await async_extract_entity_ids(hass, call)
entity_ids = []
for entity_id in hass.data[DATA_AMCREST][CAMERAS]:
if entity_id not in call_ids:
continue
if not have_permission(user, entity_id):
raise Unauthorized(
context=call.context, entity_id=entity_id, permission=POLICY_CONTROL
)
entity_ids.append(entity_id)
return entity_ids
async def async_service_handler(call):
args = []
for arg in CAMERA_SERVICES[call.service][2]:
args.append(call.data[arg])
for entity_id in await async_extract_from_service(call):
async_dispatcher_send(hass, service_signal(call.service, entity_id), *args)
for service, params in CAMERA_SERVICES.items():
hass.services.register(DOMAIN, service, async_service_handler, params[0])
return True
class AmcrestDevice:
"""Representation of a base Amcrest discovery device."""
def __init__(
self,
api,
authentication,
ffmpeg_arguments,
stream_source,
resolution,
control_light,
):
"""Initialize the entity."""
self.api = api
self.authentication = authentication
self.ffmpeg_arguments = ffmpeg_arguments
self.stream_source = stream_source
self.resolution = resolution
self.control_light = control_light
|
import asyncio
import json
import logging
from serial import SerialException
import serial_asyncio
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_VALUE_TEMPLATE, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_SERIAL_PORT = "serial_port"
CONF_BAUDRATE = "baudrate"
CONF_BYTESIZE = "bytesize"
CONF_PARITY = "parity"
CONF_STOPBITS = "stopbits"
CONF_XONXOFF = "xonxoff"
CONF_RTSCTS = "rtscts"
CONF_DSRDTR = "dsrdtr"
DEFAULT_NAME = "Serial Sensor"
DEFAULT_BAUDRATE = 9600
DEFAULT_BYTESIZE = serial_asyncio.serial.EIGHTBITS
DEFAULT_PARITY = serial_asyncio.serial.PARITY_NONE
DEFAULT_STOPBITS = serial_asyncio.serial.STOPBITS_ONE
DEFAULT_XONXOFF = False
DEFAULT_RTSCTS = False
DEFAULT_DSRDTR = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SERIAL_PORT): cv.string,
vol.Optional(CONF_BAUDRATE, default=DEFAULT_BAUDRATE): cv.positive_int,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_VALUE_TEMPLATE): cv.template,
vol.Optional(CONF_BYTESIZE, default=DEFAULT_BYTESIZE): vol.In(
[
serial_asyncio.serial.FIVEBITS,
serial_asyncio.serial.SIXBITS,
serial_asyncio.serial.SEVENBITS,
serial_asyncio.serial.EIGHTBITS,
]
),
vol.Optional(CONF_PARITY, default=DEFAULT_PARITY): vol.In(
[
serial_asyncio.serial.PARITY_NONE,
serial_asyncio.serial.PARITY_EVEN,
serial_asyncio.serial.PARITY_ODD,
serial_asyncio.serial.PARITY_MARK,
serial_asyncio.serial.PARITY_SPACE,
]
),
vol.Optional(CONF_STOPBITS, default=DEFAULT_STOPBITS): vol.In(
[
serial_asyncio.serial.STOPBITS_ONE,
serial_asyncio.serial.STOPBITS_ONE_POINT_FIVE,
serial_asyncio.serial.STOPBITS_TWO,
]
),
vol.Optional(CONF_XONXOFF, default=DEFAULT_XONXOFF): cv.boolean,
vol.Optional(CONF_RTSCTS, default=DEFAULT_RTSCTS): cv.boolean,
vol.Optional(CONF_DSRDTR, default=DEFAULT_DSRDTR): cv.boolean,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Serial sensor platform."""
name = config.get(CONF_NAME)
port = config.get(CONF_SERIAL_PORT)
baudrate = config.get(CONF_BAUDRATE)
bytesize = config.get(CONF_BYTESIZE)
parity = config.get(CONF_PARITY)
stopbits = config.get(CONF_STOPBITS)
xonxoff = config.get(CONF_XONXOFF)
rtscts = config.get(CONF_RTSCTS)
dsrdtr = config.get(CONF_DSRDTR)
value_template = config.get(CONF_VALUE_TEMPLATE)
if value_template is not None:
value_template.hass = hass
sensor = SerialSensor(
name,
port,
baudrate,
bytesize,
parity,
stopbits,
xonxoff,
rtscts,
dsrdtr,
value_template,
)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, sensor.stop_serial_read)
async_add_entities([sensor], True)
class SerialSensor(Entity):
"""Representation of a Serial sensor."""
def __init__(
self,
name,
port,
baudrate,
bytesize,
parity,
stopbits,
xonxoff,
rtscts,
dsrdtr,
value_template,
):
"""Initialize the Serial sensor."""
self._name = name
self._state = None
self._port = port
self._baudrate = baudrate
self._bytesize = bytesize
self._parity = parity
self._stopbits = stopbits
self._xonxoff = xonxoff
self._rtscts = rtscts
self._dsrdtr = dsrdtr
self._serial_loop_task = None
self._template = value_template
self._attributes = None
async def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
self._serial_loop_task = self.hass.loop.create_task(
self.serial_read(
self._port,
self._baudrate,
self._bytesize,
self._parity,
self._stopbits,
self._xonxoff,
self._rtscts,
self._dsrdtr,
)
)
async def serial_read(
self,
device,
baudrate,
bytesize,
parity,
stopbits,
xonxoff,
rtscts,
dsrdtr,
**kwargs,
):
"""Read the data from the port."""
logged_error = False
while True:
try:
reader, _ = await serial_asyncio.open_serial_connection(
url=device,
baudrate=baudrate,
bytesize=bytesize,
parity=parity,
stopbits=stopbits,
xonxoff=xonxoff,
rtscts=rtscts,
dsrdtr=dsrdtr,
**kwargs,
)
except SerialException as exc:
if not logged_error:
_LOGGER.exception(
"Unable to connect to the serial device %s: %s. Will retry",
device,
exc,
)
logged_error = True
await self._handle_error()
else:
_LOGGER.info("Serial device %s connected", device)
while True:
try:
line = await reader.readline()
except SerialException as exc:
_LOGGER.exception(
"Error while reading serial device %s: %s", device, exc
)
await self._handle_error()
break
else:
line = line.decode("utf-8").strip()
try:
data = json.loads(line)
except ValueError:
pass
else:
if isinstance(data, dict):
self._attributes = data
if self._template is not None:
line = self._template.async_render_with_possible_json_value(
line
)
_LOGGER.debug("Received: %s", line)
self._state = line
self.async_write_ha_state()
async def _handle_error(self):
"""Handle error for serial connection."""
self._state = None
self._attributes = None
self.async_write_ha_state()
await asyncio.sleep(5)
@callback
def stop_serial_read(self, event):
"""Close resources."""
if self._serial_loop_task:
self._serial_loop_task.cancel()
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the attributes of the entity (if any JSON present)."""
return self._attributes
@property
def state(self):
"""Return the state of the sensor."""
return self._state
|
from typing import List
from typing import Mapping
from typing import Optional
import service_configuration_lib
from paasta_tools.kubernetes_tools import sanitised_cr_name
from paasta_tools.long_running_service_tools import LongRunningServiceConfig
from paasta_tools.long_running_service_tools import LongRunningServiceConfigDict
from paasta_tools.utils import BranchDictV2
from paasta_tools.utils import deep_merge_dictionaries
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_service_instance_config
from paasta_tools.utils import load_v2_deployments_json
class KafkaClusterDeploymentConfigDict(LongRunningServiceConfigDict, total=False):
replicas: int
class KafkaClusterDeploymentConfig(LongRunningServiceConfig):
config_dict: KafkaClusterDeploymentConfigDict
config_filename_prefix = "kafkacluster"
def __init__(
self,
service: str,
cluster: str,
instance: str,
config_dict: KafkaClusterDeploymentConfigDict,
branch_dict: Optional[BranchDictV2],
soa_dir: str = DEFAULT_SOA_DIR,
) -> None:
super().__init__(
cluster=cluster,
instance=instance,
service=service,
soa_dir=soa_dir,
config_dict=config_dict,
branch_dict=branch_dict,
)
def get_instances(self, with_limit: bool = True) -> int:
return self.config_dict.get("replicas", 1)
def validate(
self,
params: List[str] = [
"cpus",
"security",
"dependencies_reference",
"deploy_group",
],
) -> List[str]:
# Use InstanceConfig to validate shared config keys like cpus and mem
# TODO: add mem back to this list once we fix PAASTA-15582 and
# move to using the same units as flink/marathon etc.
error_msgs = super().validate(params=params)
if error_msgs:
name = self.get_instance()
return [f"{name}: {msg}" for msg in error_msgs]
else:
return []
def load_kafkacluster_instance_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
) -> KafkaClusterDeploymentConfig:
"""Read a service instance's configuration for KafkaCluster.
If a branch isn't specified for a config, the 'branch' key defaults to
paasta-${cluster}.${instance}.
:param service: The service name
:param instance: The instance of the service to retrieve
:param cluster: The cluster to read the configuration for
:param load_deployments: A boolean indicating if the corresponding deployments.json for this service
should also be loaded
:param soa_dir: The SOA configuration directory to read from
:returns: A dictionary of whatever was in the config for the service instance"""
general_config = service_configuration_lib.read_service_configuration(
service, soa_dir=soa_dir
)
instance_config = load_service_instance_config(
service, instance, "kafkacluster", cluster, soa_dir=soa_dir
)
general_config = deep_merge_dictionaries(
overrides=instance_config, defaults=general_config
)
branch_dict: Optional[BranchDictV2] = None
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = KafkaClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
return KafkaClusterDeploymentConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=general_config,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
# TODO: read this from CRD in service configs
def cr_id(service: str, instance: str) -> Mapping[str, str]:
return dict(
group="yelp.com",
version="v1alpha1",
namespace="paasta-kafkaclusters",
plural="kafkaclusters",
name=sanitised_cr_name(service, instance),
)
|
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from . import ElkEntity, create_elk_entities
from .const import DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Elk light platform."""
elk_data = hass.data[DOMAIN][config_entry.entry_id]
entities = []
elk = elk_data["elk"]
create_elk_entities(elk_data, elk.lights, "plc", ElkLight, entities)
async_add_entities(entities, True)
class ElkLight(ElkEntity, LightEntity):
"""Representation of an Elk lighting device."""
def __init__(self, element, elk, elk_data):
"""Initialize the Elk light."""
super().__init__(element, elk, elk_data)
self._brightness = self._element.status
@property
def brightness(self):
"""Get the brightness."""
return self._brightness
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def is_on(self) -> bool:
"""Get the current brightness."""
return self._brightness != 0
def _element_changed(self, element, changeset):
status = self._element.status if self._element.status != 1 else 100
self._brightness = round(status * 2.55)
async def async_turn_on(self, **kwargs):
"""Turn on the light."""
self._element.level(round(kwargs.get(ATTR_BRIGHTNESS, 255) / 2.55))
async def async_turn_off(self, **kwargs):
"""Turn off the light."""
self._element.level(0)
|
import unittest
import numpy as np
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.models import JunctionTree
from pgmpy.tests import help_functions as hf
class TestJunctionTreeCreation(unittest.TestCase):
def setUp(self):
self.graph = JunctionTree()
def test_add_single_node(self):
self.graph.add_node(("a", "b"))
self.assertListEqual(list(self.graph.nodes()), [("a", "b")])
def test_add_single_node_raises_error(self):
self.assertRaises(TypeError, self.graph.add_node, "a")
def test_add_multiple_nodes(self):
self.graph.add_nodes_from([("a", "b"), ("b", "c")])
self.assertListEqual(
hf.recursive_sorted(self.graph.nodes()), [["a", "b"], ["b", "c"]]
)
def test_add_single_edge(self):
self.graph.add_edge(("a", "b"), ("b", "c"))
self.assertListEqual(
hf.recursive_sorted(self.graph.nodes()), [["a", "b"], ["b", "c"]]
)
self.assertListEqual(
sorted([node for edge in self.graph.edges() for node in edge]),
[("a", "b"), ("b", "c")],
)
def test_add_single_edge_raises_error(self):
self.assertRaises(ValueError, self.graph.add_edge, ("a", "b"), ("c", "d"))
def test_add_cyclic_path_raises_error(self):
self.graph.add_edge(("a", "b"), ("b", "c"))
self.graph.add_edge(("b", "c"), ("c", "d"))
self.assertRaises(ValueError, self.graph.add_edge, ("c", "d"), ("a", "b"))
def tearDown(self):
del self.graph
class TestJunctionTreeMethods(unittest.TestCase):
def setUp(self):
self.factor1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
self.factor2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.factor3 = DiscreteFactor(["d", "e"], [2, 2], np.random.rand(4))
self.factor4 = DiscreteFactor(["e", "f"], [2, 2], np.random.rand(4))
self.factor5 = DiscreteFactor(["a", "b", "e"], [2, 2, 2], np.random.rand(8))
self.graph1 = JunctionTree()
self.graph1.add_edge(("a", "b"), ("b", "c"))
self.graph1.add_factors(self.factor1, self.factor2)
self.graph2 = JunctionTree()
self.graph2.add_nodes_from([("a", "b"), ("b", "c"), ("d", "e")])
self.graph2.add_edge(("a", "b"), ("b", "c"))
self.graph2.add_factors(self.factor1, self.factor2, self.factor3)
self.graph3 = JunctionTree()
self.graph3.add_edges_from([(("a", "b"), ("b", "c")), (("d", "e"), ("e", "f"))])
self.graph3.add_factors(self.factor1, self.factor2, self.factor3, self.factor4)
self.graph4 = JunctionTree()
self.graph4.add_edges_from(
[
(("a", "b", "e"), ("b", "c")),
(("a", "b", "e"), ("e", "f")),
(("d", "e"), ("e", "f")),
]
)
self.graph4.add_factors(self.factor5, self.factor2, self.factor3, self.factor4)
def test_check_model(self):
self.assertRaises(ValueError, self.graph2.check_model)
self.assertRaises(ValueError, self.graph3.check_model)
self.assertTrue(self.graph1.check_model())
self.assertTrue(self.graph4.check_model())
def tearDown(self):
del self.factor1
del self.factor2
del self.factor3
del self.factor4
del self.factor5
del self.graph1
del self.graph2
del self.graph3
del self.graph4
class TestJunctionTreeCopy(unittest.TestCase):
def setUp(self):
self.graph = JunctionTree()
def test_copy_with_nodes(self):
self.graph.add_nodes_from([("a", "b", "c"), ("a", "b"), ("a", "c")])
self.graph.add_edges_from(
[(("a", "b", "c"), ("a", "b")), (("a", "b", "c"), ("a", "c"))]
)
graph_copy = self.graph.copy()
self.graph.remove_edge(("a", "b", "c"), ("a", "c"))
self.assertFalse(self.graph.has_edge(("a", "b", "c"), ("a", "c")))
self.assertTrue(graph_copy.has_edge(("a", "b", "c"), ("a", "c")))
self.graph.remove_node(("a", "c"))
self.assertFalse(self.graph.has_node(("a", "c")))
self.assertTrue(graph_copy.has_node(("a", "c")))
self.graph.add_node(("c", "d"))
self.assertTrue(self.graph.has_node(("c", "d")))
self.assertFalse(graph_copy.has_node(("c", "d")))
def test_copy_with_factors(self):
self.graph.add_edges_from([[("a", "b"), ("b", "c")]])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_factors(phi1, phi2)
graph_copy = self.graph.copy()
self.assertIsInstance(graph_copy, JunctionTree)
self.assertIsNot(self.graph, graph_copy)
self.assertEqual(
hf.recursive_sorted(self.graph.nodes()),
hf.recursive_sorted(graph_copy.nodes()),
)
self.assertEqual(
hf.recursive_sorted(self.graph.edges()),
hf.recursive_sorted(graph_copy.edges()),
)
self.assertTrue(graph_copy.check_model())
self.assertEqual(self.graph.get_factors(), graph_copy.get_factors())
self.graph.remove_factors(phi1, phi2)
self.assertTrue(
phi1 not in self.graph.factors and phi2 not in self.graph.factors
)
self.assertTrue(phi1 in graph_copy.factors and phi2 in graph_copy.factors)
self.graph.add_factors(phi1, phi2)
self.graph.factors[0] = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
self.assertNotEqual(self.graph.get_factors()[0], graph_copy.get_factors()[0])
self.assertNotEqual(self.graph.factors, graph_copy.factors)
def test_copy_with_factorchanges(self):
self.graph.add_edges_from([[("a", "b"), ("b", "c")]])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_factors(phi1, phi2)
graph_copy = self.graph.copy()
self.graph.factors[0].reduce([("a", 0)])
self.assertNotEqual(
self.graph.factors[0].scope(), graph_copy.factors[0].scope()
)
self.assertNotEqual(self.graph, graph_copy)
self.graph.factors[1].marginalize(["b"])
self.assertNotEqual(
self.graph.factors[1].scope(), graph_copy.factors[1].scope()
)
self.assertNotEqual(self.graph, graph_copy)
def tearDown(self):
del self.graph
|
import logging
import lw12
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_TRANSITION,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_EFFECT,
SUPPORT_TRANSITION,
LightEntity,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "LW-12 FC"
DEFAULT_PORT = 5000
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up LW-12 WiFi LED Controller platform."""
# Assign configuration variables.
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
# Add devices
lw12_light = lw12.LW12Controller(host, port)
add_entities([LW12WiFi(name, lw12_light)])
class LW12WiFi(LightEntity):
"""LW-12 WiFi LED Controller."""
def __init__(self, name, lw12_light):
"""Initialise LW-12 WiFi LED Controller.
:param name: Friendly name for this platform to use.
:param lw12_light: Instance of the LW12 controller.
"""
self._light = lw12_light
self._name = name
self._state = None
self._effect = None
self._rgb_color = [255, 255, 255]
self._brightness = 255
# Setup feature list
self._supported_features = (
SUPPORT_BRIGHTNESS | SUPPORT_EFFECT | SUPPORT_COLOR | SUPPORT_TRANSITION
)
@property
def name(self):
"""Return the display name of the controlled light."""
return self._name
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def hs_color(self):
"""Read back the hue-saturation of the light."""
return color_util.color_RGB_to_hs(*self._rgb_color)
@property
def effect(self):
"""Return current light effect."""
if self._effect is None:
return None
return self._effect.replace("_", " ").title()
@property
def is_on(self):
"""Return true if light is on."""
return self._state
@property
def supported_features(self):
"""Return a list of supported features."""
return self._supported_features
@property
def effect_list(self):
"""Return a list of available effects.
Use the Enum element name for display.
"""
return [effect.name.replace("_", " ").title() for effect in lw12.LW12_EFFECT]
@property
def assumed_state(self) -> bool:
"""Return True if unable to access real state of the entity."""
return True
@property
def shoud_poll(self) -> bool:
"""Return False to not poll the state of this entity."""
return False
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
self._light.light_on()
if ATTR_HS_COLOR in kwargs:
self._rgb_color = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
self._light.set_color(*self._rgb_color)
self._effect = None
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs.get(ATTR_BRIGHTNESS)
brightness = int(self._brightness / 255 * 100)
self._light.set_light_option(lw12.LW12_LIGHT.BRIGHTNESS, brightness)
if ATTR_EFFECT in kwargs:
self._effect = kwargs[ATTR_EFFECT].replace(" ", "_").upper()
# Check if a known and supported effect was selected.
if self._effect in [eff.name for eff in lw12.LW12_EFFECT]:
# Selected effect is supported and will be applied.
self._light.set_effect(lw12.LW12_EFFECT[self._effect])
else:
# Unknown effect was set, recover by disabling the effect
# mode and log an error.
_LOGGER.error("Unknown effect selected: %s", self._effect)
self._effect = None
if ATTR_TRANSITION in kwargs:
transition_speed = int(kwargs[ATTR_TRANSITION])
self._light.set_light_option(lw12.LW12_LIGHT.FLASH, transition_speed)
self._state = True
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._light.light_off()
self._state = False
|
import struct
import time
import io
import cherrypy
from cherrypy._cpcompat import text_or_bytes
from cherrypy.lib import file_generator
from cherrypy.lib import is_closable_iterator
from cherrypy.lib import set_vary_header
_COMPRESSION_LEVEL_FAST = 1
_COMPRESSION_LEVEL_BEST = 9
def decode(encoding=None, default_encoding='utf-8'):
"""Replace or extend the list of charsets used to decode a request entity.
Either argument may be a single string or a list of strings.
encoding
If not None, restricts the set of charsets attempted while decoding
a request entity to the given set (even if a different charset is
given in the Content-Type request header).
default_encoding
Only in effect if the 'encoding' argument is not given.
If given, the set of charsets attempted while decoding a request
entity is *extended* with the given value(s).
"""
body = cherrypy.request.body
if encoding is not None:
if not isinstance(encoding, list):
encoding = [encoding]
body.attempt_charsets = encoding
elif default_encoding:
if not isinstance(default_encoding, list):
default_encoding = [default_encoding]
body.attempt_charsets = body.attempt_charsets + default_encoding
class UTF8StreamEncoder:
def __init__(self, iterator):
self._iterator = iterator
def __iter__(self):
return self
def next(self):
return self.__next__()
def __next__(self):
res = next(self._iterator)
if isinstance(res, str):
res = res.encode('utf-8')
return res
def close(self):
if is_closable_iterator(self._iterator):
self._iterator.close()
def __getattr__(self, attr):
if attr.startswith('__'):
raise AttributeError(self, attr)
return getattr(self._iterator, attr)
class ResponseEncoder:
default_encoding = 'utf-8'
failmsg = 'Response body could not be encoded with %r.'
encoding = None
errors = 'strict'
text_only = True
add_charset = True
debug = False
def __init__(self, **kwargs):
for k, v in kwargs.items():
setattr(self, k, v)
self.attempted_charsets = set()
request = cherrypy.serving.request
if request.handler is not None:
# Replace request.handler with self
if self.debug:
cherrypy.log('Replacing request.handler', 'TOOLS.ENCODE')
self.oldhandler = request.handler
request.handler = self
def encode_stream(self, encoding):
"""Encode a streaming response body.
Use a generator wrapper, and just pray it works as the stream is
being written out.
"""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
def encoder(body):
for chunk in body:
if isinstance(chunk, str):
chunk = chunk.encode(encoding, self.errors)
yield chunk
self.body = encoder(self.body)
return True
def encode_string(self, encoding):
"""Encode a buffered response body."""
if encoding in self.attempted_charsets:
return False
self.attempted_charsets.add(encoding)
body = []
for chunk in self.body:
if isinstance(chunk, str):
try:
chunk = chunk.encode(encoding, self.errors)
except (LookupError, UnicodeError):
return False
body.append(chunk)
self.body = body
return True
def find_acceptable_charset(self):
request = cherrypy.serving.request
response = cherrypy.serving.response
if self.debug:
cherrypy.log('response.stream %r' %
response.stream, 'TOOLS.ENCODE')
if response.stream:
encoder = self.encode_stream
else:
encoder = self.encode_string
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
# Encoded strings may be of different lengths from their
# unicode equivalents, and even from each other. For example:
# >>> t = u"\u7007\u3040"
# >>> len(t)
# 2
# >>> len(t.encode("UTF-8"))
# 6
# >>> len(t.encode("utf7"))
# 8
del response.headers['Content-Length']
# Parse the Accept-Charset request header, and try to provide one
# of the requested charsets (in order of user preference).
encs = request.headers.elements('Accept-Charset')
charsets = [enc.value.lower() for enc in encs]
if self.debug:
cherrypy.log('charsets %s' % repr(charsets), 'TOOLS.ENCODE')
if self.encoding is not None:
# If specified, force this encoding to be used, or fail.
encoding = self.encoding.lower()
if self.debug:
cherrypy.log('Specified encoding %r' %
encoding, 'TOOLS.ENCODE')
if (not charsets) or '*' in charsets or encoding in charsets:
if self.debug:
cherrypy.log('Attempting encoding %r' %
encoding, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
else:
if not encs:
if self.debug:
cherrypy.log('Attempting default encoding %r' %
self.default_encoding, 'TOOLS.ENCODE')
# Any character-set is acceptable.
if encoder(self.default_encoding):
return self.default_encoding
else:
raise cherrypy.HTTPError(500, self.failmsg %
self.default_encoding)
else:
for element in encs:
if element.qvalue > 0:
if element.value == '*':
# Matches any charset. Try our default.
if self.debug:
cherrypy.log('Attempting default encoding due '
'to %r' % element, 'TOOLS.ENCODE')
if encoder(self.default_encoding):
return self.default_encoding
else:
encoding = element.value
if self.debug:
cherrypy.log('Attempting encoding %s (qvalue >'
'0)' % element, 'TOOLS.ENCODE')
if encoder(encoding):
return encoding
if '*' not in charsets:
# If no "*" is present in an Accept-Charset field, then all
# character sets not explicitly mentioned get a quality
# value of 0, except for ISO-8859-1, which gets a quality
# value of 1 if not explicitly mentioned.
iso = 'iso-8859-1'
if iso not in charsets:
if self.debug:
cherrypy.log('Attempting ISO-8859-1 encoding',
'TOOLS.ENCODE')
if encoder(iso):
return iso
# No suitable encoding found.
ac = request.headers.get('Accept-Charset')
if ac is None:
msg = 'Your client did not send an Accept-Charset header.'
else:
msg = 'Your client sent this Accept-Charset header: %s.' % ac
_charsets = ', '.join(sorted(self.attempted_charsets))
msg += ' We tried these charsets: %s.' % (_charsets,)
raise cherrypy.HTTPError(406, msg)
def __call__(self, *args, **kwargs):
response = cherrypy.serving.response
self.body = self.oldhandler(*args, **kwargs)
self.body = prepare_iter(self.body)
ct = response.headers.elements('Content-Type')
if self.debug:
cherrypy.log('Content-Type: %r' % [str(h)
for h in ct], 'TOOLS.ENCODE')
if ct and self.add_charset:
ct = ct[0]
if self.text_only:
if ct.value.lower().startswith('text/'):
if self.debug:
cherrypy.log(
'Content-Type %s starts with "text/"' % ct,
'TOOLS.ENCODE')
do_find = True
else:
if self.debug:
cherrypy.log('Not finding because Content-Type %s '
'does not start with "text/"' % ct,
'TOOLS.ENCODE')
do_find = False
else:
if self.debug:
cherrypy.log('Finding because not text_only',
'TOOLS.ENCODE')
do_find = True
if do_find:
# Set "charset=..." param on response Content-Type header
ct.params['charset'] = self.find_acceptable_charset()
if self.debug:
cherrypy.log('Setting Content-Type %s' % ct,
'TOOLS.ENCODE')
response.headers['Content-Type'] = str(ct)
return self.body
def prepare_iter(value):
"""
Ensure response body is iterable and resolves to False when empty.
"""
if isinstance(value, text_or_bytes):
# strings get wrapped in a list because iterating over a single
# item list is much faster than iterating over every character
# in a long string.
if value:
value = [value]
else:
# [''] doesn't evaluate to False, so replace it with [].
value = []
# Don't use isinstance here; io.IOBase which has an ABC takes
# 1000 times as long as, say, isinstance(value, str)
elif hasattr(value, 'read'):
value = file_generator(value)
elif value is None:
value = []
return value
# GZIP
def compress(body, compress_level):
"""Compress 'body' at the given compress_level."""
import zlib
# See https://tools.ietf.org/html/rfc1952
yield b'\x1f\x8b' # ID1 and ID2: gzip marker
yield b'\x08' # CM: compression method
yield b'\x00' # FLG: none set
# MTIME: 4 bytes
yield struct.pack('<L', int(time.time()) & int('FFFFFFFF', 16))
# RFC 1952, section 2.3.1:
#
# XFL (eXtra FLags)
# These flags are available for use by specific compression
# methods. The "deflate" method (CM = 8) sets these flags as
# follows:
#
# XFL = 2 - compressor used maximum compression,
# slowest algorithm
# XFL = 4 - compressor used fastest algorithm
if compress_level == _COMPRESSION_LEVEL_BEST:
yield b'\x02' # XFL: max compression, slowest algo
elif compress_level == _COMPRESSION_LEVEL_FAST:
yield b'\x04' # XFL: min compression, fastest algo
else:
yield b'\x00' # XFL: compression unset/tradeoff
yield b'\xff' # OS: unknown
crc = zlib.crc32(b'')
size = 0
zobj = zlib.compressobj(compress_level,
zlib.DEFLATED, -zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL, 0)
for line in body:
size += len(line)
crc = zlib.crc32(line, crc)
yield zobj.compress(line)
yield zobj.flush()
# CRC32: 4 bytes
yield struct.pack('<L', crc & int('FFFFFFFF', 16))
# ISIZE: 4 bytes
yield struct.pack('<L', size & int('FFFFFFFF', 16))
def decompress(body):
import gzip
zbuf = io.BytesIO()
zbuf.write(body)
zbuf.seek(0)
zfile = gzip.GzipFile(mode='rb', fileobj=zbuf)
data = zfile.read()
zfile.close()
return data
def gzip(compress_level=5, mime_types=['text/html', 'text/plain'],
debug=False):
"""Try to gzip the response body if Content-Type in mime_types.
cherrypy.response.headers['Content-Type'] must be set to one of the
values in the mime_types arg before calling this function.
The provided list of mime-types must be of one of the following form:
* `type/subtype`
* `type/*`
* `type/*+subtype`
No compression is performed if any of the following hold:
* The client sends no Accept-Encoding request header
* No 'gzip' or 'x-gzip' is present in the Accept-Encoding header
* No 'gzip' or 'x-gzip' with a qvalue > 0 is present
* The 'identity' value is given with a qvalue > 0.
"""
request = cherrypy.serving.request
response = cherrypy.serving.response
set_vary_header(response, 'Accept-Encoding')
if not response.body:
# Response body is empty (might be a 304 for instance)
if debug:
cherrypy.log('No response body', context='TOOLS.GZIP')
return
# If returning cached content (which should already have been gzipped),
# don't re-zip.
if getattr(request, 'cached', False):
if debug:
cherrypy.log('Not gzipping cached response', context='TOOLS.GZIP')
return
acceptable = request.headers.elements('Accept-Encoding')
if not acceptable:
# If no Accept-Encoding field is present in a request,
# the server MAY assume that the client will accept any
# content coding. In this case, if "identity" is one of
# the available content-codings, then the server SHOULD use
# the "identity" content-coding, unless it has additional
# information that a different content-coding is meaningful
# to the client.
if debug:
cherrypy.log('No Accept-Encoding', context='TOOLS.GZIP')
return
ct = response.headers.get('Content-Type', '').split(';')[0]
for coding in acceptable:
if coding.value == 'identity' and coding.qvalue != 0:
if debug:
cherrypy.log('Non-zero identity qvalue: %s' % coding,
context='TOOLS.GZIP')
return
if coding.value in ('gzip', 'x-gzip'):
if coding.qvalue == 0:
if debug:
cherrypy.log('Zero gzip qvalue: %s' % coding,
context='TOOLS.GZIP')
return
if ct not in mime_types:
# If the list of provided mime-types contains tokens
# such as 'text/*' or 'application/*+xml',
# we go through them and find the most appropriate one
# based on the given content-type.
# The pattern matching is only caring about the most
# common cases, as stated above, and doesn't support
# for extra parameters.
found = False
if '/' in ct:
ct_media_type, ct_sub_type = ct.split('/')
for mime_type in mime_types:
if '/' in mime_type:
media_type, sub_type = mime_type.split('/')
if ct_media_type == media_type:
if sub_type == '*':
found = True
break
elif '+' in sub_type and '+' in ct_sub_type:
ct_left, ct_right = ct_sub_type.split('+')
left, right = sub_type.split('+')
if left == '*' and ct_right == right:
found = True
break
if not found:
if debug:
cherrypy.log('Content-Type %s not in mime_types %r' %
(ct, mime_types), context='TOOLS.GZIP')
return
if debug:
cherrypy.log('Gzipping', context='TOOLS.GZIP')
# Return a generator that compresses the page
response.headers['Content-Encoding'] = 'gzip'
response.body = compress(response.body, compress_level)
if 'Content-Length' in response.headers:
# Delete Content-Length header so finalize() recalcs it.
del response.headers['Content-Length']
return
if debug:
cherrypy.log('No acceptable encoding found.', context='GZIP')
cherrypy.HTTPError(406, 'identity, gzip').set_response()
|
from typing import Dict
from synology_dsm.api.surveillance_station import SynoSurveillanceStation
from synology_dsm.api.surveillance_station.camera import SynoCamera
from homeassistant.components.camera import SUPPORT_STREAM, Camera
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from . import SynoApi, SynologyDSMEntity
from .const import (
DOMAIN,
ENTITY_CLASS,
ENTITY_ENABLE,
ENTITY_ICON,
ENTITY_NAME,
ENTITY_UNIT,
SYNO_API,
)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Synology NAS binary sensor."""
api = hass.data[DOMAIN][entry.unique_id][SYNO_API]
if SynoSurveillanceStation.CAMERA_API_KEY not in api.dsm.apis:
return
surveillance_station = api.surveillance_station
await hass.async_add_executor_job(surveillance_station.update)
cameras = surveillance_station.get_all_cameras()
entities = [SynoDSMCamera(api, camera) for camera in cameras]
async_add_entities(entities)
class SynoDSMCamera(SynologyDSMEntity, Camera):
"""Representation a Synology camera."""
def __init__(self, api: SynoApi, camera: SynoCamera):
"""Initialize a Synology camera."""
super().__init__(
api,
f"{SynoSurveillanceStation.CAMERA_API_KEY}:{camera.id}",
{
ENTITY_NAME: camera.name,
ENTITY_CLASS: None,
ENTITY_ICON: None,
ENTITY_ENABLE: True,
ENTITY_UNIT: None,
},
)
self._camera = camera
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._api.information.serial, self._camera.id)},
"name": self._camera.name,
"model": self._camera.model,
"via_device": (
DOMAIN,
self._api.information.serial,
SynoSurveillanceStation.INFO_API_KEY,
),
}
@property
def available(self) -> bool:
"""Return the availability of the camera."""
return self._camera.is_enabled
@property
def supported_features(self) -> int:
"""Return supported features of this camera."""
return SUPPORT_STREAM
@property
def is_recording(self):
"""Return true if the device is recording."""
return self._camera.is_recording
@property
def motion_detection_enabled(self):
"""Return the camera motion detection status."""
return self._camera.is_motion_detection_enabled
def camera_image(self) -> bytes:
"""Return bytes of camera image."""
if not self.available:
return None
return self._api.surveillance_station.get_camera_image(self._camera.id)
async def stream_source(self) -> str:
"""Return the source of the stream."""
if not self.available:
return None
return self._camera.live_view.rtsp
def enable_motion_detection(self):
"""Enable motion detection in the camera."""
self._api.surveillance_station.enable_motion_detection(self._camera.id)
def disable_motion_detection(self):
"""Disable motion detection in camera."""
self._api.surveillance_station.disable_motion_detection(self._camera.id)
|
import pytest
import requests
import requests_mock
import homeassistant.components.facebox.image_processing as fb
import homeassistant.components.image_processing as ip
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_NAME,
CONF_FRIENDLY_NAME,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
HTTP_BAD_REQUEST,
HTTP_OK,
HTTP_UNAUTHORIZED,
STATE_UNKNOWN,
)
from homeassistant.core import callback
from homeassistant.setup import async_setup_component
from tests.async_mock import Mock, mock_open, patch
MOCK_IP = "192.168.0.1"
MOCK_PORT = "8080"
# Mock data returned by the facebox API.
MOCK_BOX_ID = "b893cc4f7fd6"
MOCK_ERROR_NO_FACE = "No face found"
MOCK_FACE = {
"confidence": 0.5812028911604818,
"id": "john.jpg",
"matched": True,
"name": "John Lennon",
"rect": {"height": 75, "left": 63, "top": 262, "width": 74},
}
MOCK_FILE_PATH = "/images/mock.jpg"
MOCK_HEALTH = {
"success": True,
"hostname": "b893cc4f7fd6",
"metadata": {"boxname": "facebox", "build": "development"},
"errors": [],
}
MOCK_JSON = {"facesCount": 1, "success": True, "faces": [MOCK_FACE]}
MOCK_NAME = "mock_name"
MOCK_USERNAME = "mock_username"
MOCK_PASSWORD = "mock_password"
# Faces data after parsing.
PARSED_FACES = [
{
fb.FACEBOX_NAME: "John Lennon",
fb.ATTR_IMAGE_ID: "john.jpg",
fb.ATTR_CONFIDENCE: 58.12,
fb.ATTR_MATCHED: True,
fb.ATTR_BOUNDING_BOX: {"height": 75, "left": 63, "top": 262, "width": 74},
}
]
MATCHED_FACES = {"John Lennon": 58.12}
VALID_ENTITY_ID = "image_processing.facebox_demo_camera"
VALID_CONFIG = {
ip.DOMAIN: {
"platform": "facebox",
CONF_IP_ADDRESS: MOCK_IP,
CONF_PORT: MOCK_PORT,
ip.CONF_SOURCE: {ip.CONF_ENTITY_ID: "camera.demo_camera"},
},
"camera": {"platform": "demo"},
}
@pytest.fixture
def mock_healthybox():
"""Mock fb.check_box_health."""
check_box_health = (
"homeassistant.components.facebox.image_processing.check_box_health"
)
with patch(check_box_health, return_value=MOCK_BOX_ID) as _mock_healthybox:
yield _mock_healthybox
@pytest.fixture
def mock_isfile():
"""Mock os.path.isfile."""
with patch(
"homeassistant.components.facebox.image_processing.cv.isfile", return_value=True
) as _mock_isfile:
yield _mock_isfile
@pytest.fixture
def mock_image():
"""Return a mock camera image."""
with patch(
"homeassistant.components.demo.camera.DemoCamera.camera_image",
return_value=b"Test",
) as image:
yield image
@pytest.fixture
def mock_open_file():
"""Mock open."""
mopen = mock_open()
with patch(
"homeassistant.components.facebox.image_processing.open", mopen, create=True
) as _mock_open:
yield _mock_open
def test_check_box_health(caplog):
"""Test check box health."""
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/healthz"
mock_req.get(url, status_code=HTTP_OK, json=MOCK_HEALTH)
assert fb.check_box_health(url, "user", "pass") == MOCK_BOX_ID
mock_req.get(url, status_code=HTTP_UNAUTHORIZED)
assert fb.check_box_health(url, None, None) is None
assert "AuthenticationError on facebox" in caplog.text
mock_req.get(url, exc=requests.exceptions.ConnectTimeout)
fb.check_box_health(url, None, None)
assert "ConnectionError: Is facebox running?" in caplog.text
def test_encode_image():
"""Test that binary data is encoded correctly."""
assert fb.encode_image(b"test") == "dGVzdA=="
def test_get_matched_faces():
"""Test that matched_faces are parsed correctly."""
assert fb.get_matched_faces(PARSED_FACES) == MATCHED_FACES
def test_parse_faces():
"""Test parsing of raw face data, and generation of matched_faces."""
assert fb.parse_faces(MOCK_JSON["faces"]) == PARSED_FACES
@patch("os.access", Mock(return_value=False))
def test_valid_file_path():
"""Test that an invalid file_path is caught."""
assert not fb.valid_file_path("test_path")
async def test_setup_platform(hass, mock_healthybox):
"""Set up platform with one entity."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
async def test_setup_platform_with_auth(hass, mock_healthybox):
"""Set up platform with one entity and auth."""
valid_config_auth = VALID_CONFIG.copy()
valid_config_auth[ip.DOMAIN][CONF_USERNAME] = MOCK_USERNAME
valid_config_auth[ip.DOMAIN][CONF_PASSWORD] = MOCK_PASSWORD
await async_setup_component(hass, ip.DOMAIN, valid_config_auth)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
async def test_process_image(hass, mock_healthybox, mock_image):
"""Test successful processing of an image."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
face_events = []
@callback
def mock_face_event(event):
"""Mock event."""
face_events.append(event)
hass.bus.async_listen("image_processing.detect_face", mock_face_event)
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/check"
mock_req.post(url, json=MOCK_JSON)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == "1"
assert state.attributes.get("matched_faces") == MATCHED_FACES
assert state.attributes.get("total_matched_faces") == 1
PARSED_FACES[0][ATTR_ENTITY_ID] = VALID_ENTITY_ID # Update.
assert state.attributes.get("faces") == PARSED_FACES
assert state.attributes.get(CONF_FRIENDLY_NAME) == "facebox demo_camera"
assert len(face_events) == 1
assert face_events[0].data[ATTR_NAME] == PARSED_FACES[0][ATTR_NAME]
assert (
face_events[0].data[fb.ATTR_CONFIDENCE] == PARSED_FACES[0][fb.ATTR_CONFIDENCE]
)
assert face_events[0].data[ATTR_ENTITY_ID] == VALID_ENTITY_ID
assert face_events[0].data[fb.ATTR_IMAGE_ID] == PARSED_FACES[0][fb.ATTR_IMAGE_ID]
assert (
face_events[0].data[fb.ATTR_BOUNDING_BOX]
== PARSED_FACES[0][fb.ATTR_BOUNDING_BOX]
)
async def test_process_image_errors(hass, mock_healthybox, mock_image, caplog):
"""Test process_image errors."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
# Test connection error.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/check"
mock_req.register_uri("POST", url, exc=requests.exceptions.ConnectTimeout)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
assert "ConnectionError: Is facebox running?" in caplog.text
state = hass.states.get(VALID_ENTITY_ID)
assert state.state == STATE_UNKNOWN
assert state.attributes.get("faces") == []
assert state.attributes.get("matched_faces") == {}
# Now test with bad auth.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/check"
mock_req.register_uri("POST", url, status_code=HTTP_UNAUTHORIZED)
data = {ATTR_ENTITY_ID: VALID_ENTITY_ID}
await hass.services.async_call(ip.DOMAIN, ip.SERVICE_SCAN, service_data=data)
await hass.async_block_till_done()
assert "AuthenticationError on facebox" in caplog.text
async def test_teach_service(
hass, mock_healthybox, mock_image, mock_isfile, mock_open_file, caplog
):
"""Test teaching of facebox."""
await async_setup_component(hass, ip.DOMAIN, VALID_CONFIG)
await hass.async_block_till_done()
assert hass.states.get(VALID_ENTITY_ID)
# Patch out 'is_allowed_path' as the mock files aren't allowed
hass.config.is_allowed_path = Mock(return_value=True)
# Test successful teach.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, status_code=HTTP_OK)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
# Now test with bad auth.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, status_code=HTTP_UNAUTHORIZED)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
assert "AuthenticationError on facebox" in caplog.text
# Now test the failed teaching.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, status_code=HTTP_BAD_REQUEST, text=MOCK_ERROR_NO_FACE)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
assert MOCK_ERROR_NO_FACE in caplog.text
# Now test connection error.
with requests_mock.Mocker() as mock_req:
url = f"http://{MOCK_IP}:{MOCK_PORT}/facebox/teach"
mock_req.post(url, exc=requests.exceptions.ConnectTimeout)
data = {
ATTR_ENTITY_ID: VALID_ENTITY_ID,
ATTR_NAME: MOCK_NAME,
fb.FILE_PATH: MOCK_FILE_PATH,
}
await hass.services.async_call(
fb.DOMAIN, fb.SERVICE_TEACH_FACE, service_data=data
)
await hass.async_block_till_done()
assert "ConnectionError: Is facebox running?" in caplog.text
async def test_setup_platform_with_name(hass, mock_healthybox):
"""Set up platform with one entity and a name."""
named_entity_id = f"image_processing.{MOCK_NAME}"
valid_config_named = VALID_CONFIG.copy()
valid_config_named[ip.DOMAIN][ip.CONF_SOURCE][ip.CONF_NAME] = MOCK_NAME
await async_setup_component(hass, ip.DOMAIN, valid_config_named)
await hass.async_block_till_done()
assert hass.states.get(named_entity_id)
state = hass.states.get(named_entity_id)
assert state.attributes.get(CONF_FRIENDLY_NAME) == MOCK_NAME
|
from typing import Dict
from .model import Config, Integration
BASE = """
# This file is generated by script/hassfest/codeowners.py
# People marked here will be automatically requested for a review
# when the code that they own is touched.
# https://github.com/blog/2392-introducing-code-owners
# Home Assistant Core
setup.py @home-assistant/core
homeassistant/*.py @home-assistant/core
homeassistant/helpers/* @home-assistant/core
homeassistant/util/* @home-assistant/core
# Other code
homeassistant/scripts/check_config.py @kellerza
# Integrations
""".strip()
INDIVIDUAL_FILES = """
# Individual files
homeassistant/components/demo/weather @fabaff
"""
def generate_and_validate(integrations: Dict[str, Integration]):
"""Generate CODEOWNERS."""
parts = [BASE]
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
continue
codeowners = integration.manifest["codeowners"]
if not codeowners:
continue
for owner in codeowners:
if not owner.startswith("@"):
integration.add_error(
"codeowners", "Code owners need to be valid GitHub handles."
)
parts.append(f"homeassistant/components/{domain}/* {' '.join(codeowners)}")
parts.append(f"\n{INDIVIDUAL_FILES.strip()}")
return "\n".join(parts)
def validate(integrations: Dict[str, Integration], config: Config):
"""Validate CODEOWNERS."""
codeowners_path = config.root / "CODEOWNERS"
config.cache["codeowners"] = content = generate_and_validate(integrations)
if config.specific_integrations:
return
with open(str(codeowners_path)) as fp:
if fp.read().strip() != content:
config.add_error(
"codeowners",
"File CODEOWNERS is not up to date. Run python3 -m script.hassfest",
fixable=True,
)
return
def generate(integrations: Dict[str, Integration], config: Config):
"""Generate CODEOWNERS."""
codeowners_path = config.root / "CODEOWNERS"
with open(str(codeowners_path), "w") as fp:
fp.write(f"{config.cache['codeowners']}\n")
|
import logging
from kalliope import Utils, BrainLoader
from kalliope.core import NeuronModule
from kalliope.core.NeuronModule import MissingParameterException
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Brain(NeuronModule):
def __init__(self, **kwargs):
super(Brain, self).__init__(**kwargs)
self.synapse_name = kwargs.get('synapse_name', None)
self.enabled = kwargs.get('enabled', None)
if self._is_parameters_ok():
self.say(self._update_brain())
def _is_parameters_ok(self):
"""
Check if received parameters are ok to perform operations in the neuron
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingParameterException
"""
if self.synapse_name is None or self.synapse_name == "":
raise MissingParameterException("[Brain neuron] You must specify a 'synapse_name'")
if self.enabled is None or self.enabled == "":
raise MissingParameterException("[Brain neuron] You must specify a 'enabled' boolean")
self.enabled = Utils.str_to_bool(self.enabled)
return True
def _update_brain(self):
new_status = "unknown"
brain = BrainLoader().brain
if self.enabled:
if brain.enable_synapse_by_name(self.synapse_name):
new_status = "enabled"
else:
if brain.disable_synapse_by_name(self.synapse_name):
new_status = "disabled"
message = {
"synapse_name": self.synapse_name,
"status": new_status
}
return message
|
import logging
from pyoppleio.OppleLightDevice import OppleLightDevice
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.const import CONF_HOST, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.util.color import (
color_temperature_kelvin_to_mired as kelvin_to_mired,
color_temperature_mired_to_kelvin as mired_to_kelvin,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "opple light"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Opple light platform."""
name = config[CONF_NAME]
host = config[CONF_HOST]
entity = OppleLight(name, host)
add_entities([entity])
_LOGGER.debug("Init light %s %s", host, entity.unique_id)
class OppleLight(LightEntity):
"""Opple light device."""
def __init__(self, name, host):
"""Initialize an Opple light."""
self._device = OppleLightDevice(host)
self._name = name
self._is_on = None
self._brightness = None
self._color_temp = None
@property
def available(self):
"""Return True if light is available."""
return self._device.is_online
@property
def unique_id(self):
"""Return unique ID for light."""
return self._device.mac
@property
def name(self):
"""Return the display name of this light."""
return self._name
@property
def is_on(self):
"""Return true if light is on."""
return self._is_on
@property
def brightness(self):
"""Return the brightness of the light."""
return self._brightness
@property
def color_temp(self):
"""Return the color temperature of this light."""
return kelvin_to_mired(self._color_temp)
@property
def min_mireds(self):
"""Return minimum supported color temperature."""
return 175
@property
def max_mireds(self):
"""Return maximum supported color temperature."""
return 333
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP
def turn_on(self, **kwargs):
"""Instruct the light to turn on."""
_LOGGER.debug("Turn on light %s %s", self._device.ip, kwargs)
if not self.is_on:
self._device.power_on = True
if ATTR_BRIGHTNESS in kwargs and self.brightness != kwargs[ATTR_BRIGHTNESS]:
self._device.brightness = kwargs[ATTR_BRIGHTNESS]
if ATTR_COLOR_TEMP in kwargs and self.color_temp != kwargs[ATTR_COLOR_TEMP]:
color_temp = mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
self._device.color_temperature = color_temp
def turn_off(self, **kwargs):
"""Instruct the light to turn off."""
self._device.power_on = False
_LOGGER.debug("Turn off light %s", self._device.ip)
def update(self):
"""Synchronize state with light."""
prev_available = self.available
self._device.update()
if (
prev_available == self.available
and self._is_on == self._device.power_on
and self._brightness == self._device.brightness
and self._color_temp == self._device.color_temperature
):
return
if not self.available:
_LOGGER.debug("Light %s is offline", self._device.ip)
return
self._is_on = self._device.power_on
self._brightness = self._device.brightness
self._color_temp = self._device.color_temperature
if not self.is_on:
_LOGGER.debug("Update light %s success: power off", self._device.ip)
else:
_LOGGER.debug(
"Update light %s success: power on brightness %s "
"color temperature %s",
self._device.ip,
self._brightness,
self._color_temp,
)
|
from homeassistant import config_entries, setup
from homeassistant.components.profiler.const import DOMAIN
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_form_user(hass):
"""Test we can setup by the user."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.profiler.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.profiler.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Profiler"
assert result2["data"] == {}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_form_user_only_once(hass):
"""Test we can setup by the user only once."""
MockConfigEntry(domain=DOMAIN).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "abort"
assert result["reason"] == "single_instance_allowed"
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from . import (
ATTR_DATE,
ATTR_DATETIME,
ATTR_TIME,
CONF_HAS_DATE,
CONF_HAS_TIME,
DOMAIN,
SERVICE_SET_DATETIME,
)
_LOGGER = logging.getLogger(__name__)
def is_valid_datetime(string: str) -> bool:
"""Test if string dt is a valid datetime."""
try:
return dt_util.parse_datetime(string) is not None
except ValueError:
return False
def is_valid_date(string: str) -> bool:
"""Test if string dt is a valid date."""
return dt_util.parse_date(string) is not None
def is_valid_time(string: str) -> bool:
"""Test if string dt is a valid time."""
return dt_util.parse_time(string) is not None
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
if not (
(
is_valid_datetime(state.state)
and cur_state.attributes.get(CONF_HAS_DATE)
and cur_state.attributes.get(CONF_HAS_TIME)
)
or (
is_valid_date(state.state)
and cur_state.attributes.get(CONF_HAS_DATE)
and not cur_state.attributes.get(CONF_HAS_TIME)
)
or (
is_valid_time(state.state)
and cur_state.attributes.get(CONF_HAS_TIME)
and not cur_state.attributes.get(CONF_HAS_DATE)
)
):
_LOGGER.warning(
"Invalid state specified for %s: %s", state.entity_id, state.state
)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
service = SERVICE_SET_DATETIME
service_data = {ATTR_ENTITY_ID: state.entity_id}
has_time = cur_state.attributes.get(CONF_HAS_TIME)
has_date = cur_state.attributes.get(CONF_HAS_DATE)
if has_time and has_date:
service_data[ATTR_DATETIME] = state.state
elif has_time:
service_data[ATTR_TIME] = state.state
elif has_date:
service_data[ATTR_DATE] = state.state
else:
_LOGGER.warning("input_datetime needs either has_date or has_time or both")
return
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Input datetime states."""
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
from __future__ import division
import numpy as np
import random
import six
from chainercv import utils
def random_distort(
img,
brightness_delta=32,
contrast_low=0.5, contrast_high=1.5,
saturation_low=0.5, saturation_high=1.5,
hue_delta=18):
"""A color related data augmentation used in SSD.
This function is a combination of four augmentation methods:
brightness, contrast, saturation and hue.
* brightness: Adding a random offset to the intensity of the image.
* contrast: Multiplying the intensity of the image by a random scale.
* saturation: Multiplying the saturation of the image by a random scale.
* hue: Adding a random offset to the hue of the image randomly.
This data augmentation is used in training of
Single Shot Multibox Detector [#]_.
Note that this function requires :mod:`cv2`.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Args:
img (~numpy.ndarray): An image array to be augmented. This is in
CHW and RGB format.
brightness_delta (float): The offset for saturation will be
drawn from :math:`[-brightness\_delta, brightness\_delta]`.
The default value is :obj:`32`.
contrast_low (float): The scale for contrast will be
drawn from :math:`[contrast\_low, contrast\_high]`.
The default value is :obj:`0.5`.
contrast_high (float): See :obj:`contrast_low`.
The default value is :obj:`1.5`.
saturation_low (float): The scale for saturation will be
drawn from :math:`[saturation\_low, saturation\_high]`.
The default value is :obj:`0.5`.
saturation_high (float): See :obj:`saturation_low`.
The default value is :obj:`1.5`.
hue_delta (float): The offset for hue will be
drawn from :math:`[-hue\_delta, hue\_delta]`.
The default value is :obj:`18`.
Returns:
An image in CHW and RGB format.
"""
import cv2
cv_img = img[::-1].transpose((1, 2, 0)).astype(np.uint8)
def convert(img, alpha=1, beta=0):
img = img.astype(float) * alpha + beta
img[img < 0] = 0
img[img > 255] = 255
return img.astype(np.uint8)
def brightness(cv_img, delta):
if random.randrange(2):
return convert(
cv_img,
beta=random.uniform(-delta, delta))
else:
return cv_img
def contrast(cv_img, low, high):
if random.randrange(2):
return convert(
cv_img,
alpha=random.uniform(low, high))
else:
return cv_img
def saturation(cv_img, low, high):
if random.randrange(2):
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2HSV)
cv_img[:, :, 1] = convert(
cv_img[:, :, 1],
alpha=random.uniform(low, high))
return cv2.cvtColor(cv_img, cv2.COLOR_HSV2BGR)
else:
return cv_img
def hue(cv_img, delta):
if random.randrange(2):
cv_img = cv2.cvtColor(cv_img, cv2.COLOR_BGR2HSV)
cv_img[:, :, 0] = (
cv_img[:, :, 0].astype(int) +
random.randint(-delta, delta)) % 180
return cv2.cvtColor(cv_img, cv2.COLOR_HSV2BGR)
else:
return cv_img
cv_img = brightness(cv_img, brightness_delta)
if random.randrange(2):
cv_img = contrast(cv_img, contrast_low, contrast_high)
cv_img = saturation(cv_img, saturation_low, saturation_high)
cv_img = hue(cv_img, hue_delta)
else:
cv_img = saturation(cv_img, saturation_low, saturation_high)
cv_img = hue(cv_img, hue_delta)
cv_img = contrast(cv_img, contrast_low, contrast_high)
return cv_img.astype(np.float32).transpose((2, 0, 1))[::-1]
def random_crop_with_bbox_constraints(
img, bbox, min_scale=0.3, max_scale=1,
max_aspect_ratio=2, constraints=None,
max_trial=50, return_param=False):
"""Crop an image randomly with bounding box constraints.
This data augmentation is used in training of
Single Shot Multibox Detector [#]_. More details can be found in
data augmentation section of the original paper.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Args:
img (~numpy.ndarray): An image array to be cropped. This is in
CHW format.
bbox (~numpy.ndarray): Bounding boxes used for constraints.
The shape is :math:`(R, 4)`.
:math:`R` is the number of bounding boxes.
min_scale (float): The minimum ratio between a cropped
region and the original image. The default value is :obj:`0.3`.
max_scale (float): The maximum ratio between a cropped
region and the original image. The default value is :obj:`1`.
max_aspect_ratio (float): The maximum aspect ratio of cropped region.
The default value is :obj:`2`.
constaraints (iterable of tuples): An iterable of constraints.
Each constraint should be :obj:`(min_iou, max_iou)` format.
If you set :obj:`min_iou` or :obj:`max_iou` to :obj:`None`,
it means not limited.
If this argument is not specified, :obj:`((0.1, None), (0.3, None),
(0.5, None), (0.7, None), (0.9, None), (None, 1))` will be used.
max_trial (int): The maximum number of trials to be conducted
for each constraint. If this function
can not find any region that satisfies the constraint in
:math:`max\_trial` trials, this function skips the constraint.
The default value is :obj:`50`.
return_param (bool): If :obj:`True`, this function returns
information of intermediate values.
Returns:
~numpy.ndarray or (~numpy.ndarray, dict):
If :obj:`return_param = False`,
returns an array :obj:`img` that is cropped from the input
array.
If :obj:`return_param = True`,
returns a tuple whose elements are :obj:`img, param`.
:obj:`param` is a dictionary of intermediate parameters whose
contents are listed below with key, value-type and the description
of the value.
* **constraint** (*tuple*): The chosen constraint.
* **y_slice** (*slice*): A slice in vertical direction used to crop \
the input image.
* **x_slice** (*slice*): A slice in horizontal direction used to crop \
the input image.
"""
if constraints is None:
constraints = (
(0.1, None),
(0.3, None),
(0.5, None),
(0.7, None),
(0.9, None),
(None, 1),
)
_, H, W = img.shape
params = [{
'constraint': None, 'y_slice': slice(0, H), 'x_slice': slice(0, W)}]
if len(bbox) == 0:
constraints = []
for min_iou, max_iou in constraints:
if min_iou is None:
min_iou = 0
if max_iou is None:
max_iou = 1
for _ in six.moves.range(max_trial):
scale = random.uniform(min_scale, max_scale)
aspect_ratio = random.uniform(
max(1 / max_aspect_ratio, scale * scale),
min(max_aspect_ratio, 1 / (scale * scale)))
crop_h = int(H * scale / np.sqrt(aspect_ratio))
crop_w = int(W * scale * np.sqrt(aspect_ratio))
crop_t = random.randrange(H - crop_h)
crop_l = random.randrange(W - crop_w)
crop_bb = np.array((
crop_t, crop_l, crop_t + crop_h, crop_l + crop_w))
iou = utils.bbox_iou(bbox, crop_bb[np.newaxis])
if min_iou <= iou.min() and iou.max() <= max_iou:
params.append({
'constraint': (min_iou, max_iou),
'y_slice': slice(crop_t, crop_t + crop_h),
'x_slice': slice(crop_l, crop_l + crop_w)})
break
param = random.choice(params)
img = img[:, param['y_slice'], param['x_slice']]
if return_param:
return img, param
else:
return img
def resize_with_random_interpolation(img, size, return_param=False):
"""Resize an image with a randomly selected interpolation method.
This function is similar to :func:`chainercv.transforms.resize`, but
this chooses the interpolation method randomly.
This data augmentation is used in training of
Single Shot Multibox Detector [#]_.
Note that this function requires :mod:`cv2`.
.. [#] Wei Liu, Dragomir Anguelov, Dumitru Erhan, Christian Szegedy,
Scott Reed, Cheng-Yang Fu, Alexander C. Berg.
SSD: Single Shot MultiBox Detector. ECCV 2016.
Args:
img (~numpy.ndarray): An array to be transformed.
This is in CHW format and the type should be :obj:`numpy.float32`.
size (tuple): This is a tuple of length 2. Its elements are
ordered as (height, width).
return_param (bool): Returns information of interpolation.
Returns:
~numpy.ndarray or (~numpy.ndarray, dict):
If :obj:`return_param = False`,
returns an array :obj:`img` that is the result of rotation.
If :obj:`return_param = True`,
returns a tuple whose elements are :obj:`img, param`.
:obj:`param` is a dictionary of intermediate parameters whose
contents are listed below with key, value-type and the description
of the value.
* **interpolatation**: The chosen interpolation method.
"""
import cv2
cv_img = img.transpose((1, 2, 0))
inters = (
cv2.INTER_LINEAR,
cv2.INTER_AREA,
cv2.INTER_NEAREST,
cv2.INTER_CUBIC,
cv2.INTER_LANCZOS4,
)
inter = random.choice(inters)
H, W = size
cv_img = cv2.resize(cv_img, (W, H), interpolation=inter)
# If input is a grayscale image, cv2 returns a two-dimentional array.
if len(cv_img.shape) == 2:
cv_img = cv_img[:, :, np.newaxis]
img = cv_img.astype(np.float32).transpose((2, 0, 1))
if return_param:
return img, {'interpolation': inter}
else:
return img
|
_DEV_NOTES = """
Overview of classes:
* PyComponent and JsComponent: the base class for creating Python/JS component.
* JSModule: represents a module in JS that corresponds to a Python module.
* Asset: represents an asset.
* Bundle: an Asset subclass to represent a collecton of JSModule's in one asset.
* AssetStore: one instance of this class is used to provide all client
assets in this process (JS, CSS, images, etc.). It also keeps track
of modules.
* SessionAssets: base class for Session that implements the assets/data part.
* Session: object that handles connection between Python and JS. Has a
websocket, and optionally a reference to the runtime.
* WebSocket: tornado WS handler.
* AppManager: keeps track of what apps are registered. Has functionality
to instantiate apps and connect the websocket to them.
* Server: handles http requests. Uses manager to create new app
instances or get the page for a pending session. Hosts assets by using
the global asset store.
* Flexx class (in _clientcore.py): more or less the JS side of a session.
"""
import logging
logger = logging.getLogger(__name__)
del logging
# flake8: noqa
from ._app import App, manager
from ._asset import Asset, Bundle
from ._component2 import BaseAppComponent, LocalComponent, ProxyComponent
from ._component2 import PyComponent, JsComponent, StubComponent
from ._component2 import get_component_classes, LocalProperty
from ._funcs import run, start, stop
from ._funcs import init_notebook, serve, launch, export
from ._server import create_server, current_server
from ._session import Session
from ._modules import JSModule
from ._assetstore import assets
from ._clientcore import serializer
# Resolve cyclic dependencies, and explicit exports to help cx_Freeze
# from . import _tornadoserver -- no, we don't want Tornado unless really needed
from . import _component2
_component2.manager = manager
|
import pandas as pd
import xarray as xr
from . import randn, requires_dask
try:
import dask # noqa: F401
except ImportError:
pass
def make_bench_data(shape, frac_nan, chunks):
vals = randn(shape, frac_nan)
coords = {"time": pd.date_range("2000-01-01", freq="D", periods=shape[0])}
da = xr.DataArray(vals, dims=("time", "x", "y"), coords=coords)
if chunks is not None:
da = da.chunk(chunks)
return da
def time_interpolate_na(shape, chunks, method, limit):
if chunks is not None:
requires_dask()
da = make_bench_data(shape, 0.1, chunks=chunks)
actual = da.interpolate_na(dim="time", method="linear", limit=limit)
if chunks is not None:
actual = actual.compute()
time_interpolate_na.param_names = ["shape", "chunks", "method", "limit"]
time_interpolate_na.params = (
[(3650, 200, 400), (100, 25, 25)],
[None, {"x": 25, "y": 25}],
["linear", "spline", "quadratic", "cubic"],
[None, 3],
)
def time_ffill(shape, chunks, limit):
da = make_bench_data(shape, 0.1, chunks=chunks)
actual = da.ffill(dim="time", limit=limit)
if chunks is not None:
actual = actual.compute()
time_ffill.param_names = ["shape", "chunks", "limit"]
time_ffill.params = (
[(3650, 200, 400), (100, 25, 25)],
[None, {"x": 25, "y": 25}],
[None, 3],
)
def time_bfill(shape, chunks, limit):
da = make_bench_data(shape, 0.1, chunks=chunks)
actual = da.bfill(dim="time", limit=limit)
if chunks is not None:
actual = actual.compute()
time_bfill.param_names = ["shape", "chunks", "limit"]
time_bfill.params = (
[(3650, 200, 400), (100, 25, 25)],
[None, {"x": 25, "y": 25}],
[None, 3],
)
|
import os
import pytest
from homeassistant import config as hass_config
from homeassistant.components.filesize import DOMAIN
from homeassistant.components.filesize.sensor import CONF_FILE_PATHS
from homeassistant.const import SERVICE_RELOAD
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
TEST_DIR = os.path.join(os.path.dirname(__file__))
TEST_FILE = os.path.join(TEST_DIR, "mock_file_test_filesize.txt")
def create_file(path):
"""Create a test file."""
with open(path, "w") as test_file:
test_file.write("test")
@pytest.fixture(autouse=True)
def remove_file():
"""Remove test file."""
yield
if os.path.isfile(TEST_FILE):
os.remove(TEST_FILE)
async def test_invalid_path(hass):
"""Test that an invalid path is caught."""
config = {"sensor": {"platform": "filesize", CONF_FILE_PATHS: ["invalid_path"]}}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 0
async def test_valid_path(hass):
"""Test for a valid path."""
create_file(TEST_FILE)
config = {"sensor": {"platform": "filesize", CONF_FILE_PATHS: [TEST_FILE]}}
hass.config.allowlist_external_dirs = {TEST_DIR}
assert await async_setup_component(hass, "sensor", config)
await hass.async_block_till_done()
assert len(hass.states.async_entity_ids()) == 1
state = hass.states.get("sensor.mock_file_test_filesize_txt")
assert state.state == "0.0"
assert state.attributes.get("bytes") == 4
async def test_reload(hass, tmpdir):
"""Verify we can reload filesize sensors."""
testfile = f"{tmpdir}/file"
await hass.async_add_executor_job(create_file, testfile)
with patch.object(hass.config, "is_allowed_path", return_value=True):
await async_setup_component(
hass,
"sensor",
{
"sensor": {
"platform": "filesize",
"file_paths": [testfile],
}
},
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 1
assert hass.states.get("sensor.file")
yaml_path = os.path.join(
_get_fixtures_base_path(),
"fixtures",
"filesize/configuration.yaml",
)
with patch.object(hass_config, "YAML_CONFIG_FILE", yaml_path), patch.object(
hass.config, "is_allowed_path", return_value=True
):
await hass.services.async_call(
DOMAIN,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert hass.states.get("sensor.file") is None
def _get_fixtures_base_path():
return os.path.dirname(os.path.dirname(os.path.dirname(__file__)))
|
from datetime import datetime as dt
from sqlalchemy import (
Integer,
ForeignKey,
String,
DefaultClause,
func,
Column,
Text,
Boolean,
)
from sqlalchemy.orm import relationship
from sqlalchemy_utils import JSONType
from sqlalchemy_utils.types.arrow import ArrowType
from lemur.certificates.models import get_sequence
from lemur.common import defaults, utils
from lemur.database import db
from lemur.models import (
pending_cert_source_associations,
pending_cert_destination_associations,
pending_cert_notification_associations,
pending_cert_replacement_associations,
pending_cert_role_associations,
)
from lemur.utils import Vault
def get_or_increase_name(name, serial):
certificates = PendingCertificate.query.filter(
PendingCertificate.name.ilike("{0}%".format(name))
).all()
if not certificates:
return name
serial_name = "{0}-{1}".format(name, hex(int(serial))[2:].upper())
certificates = PendingCertificate.query.filter(
PendingCertificate.name.ilike("{0}%".format(serial_name))
).all()
if not certificates:
return serial_name
ends = [0]
root, end = get_sequence(serial_name)
for cert in certificates:
root, end = get_sequence(cert.name)
if end:
ends.append(end)
return "{0}-{1}".format(root, max(ends) + 1)
class PendingCertificate(db.Model):
__tablename__ = "pending_certs"
id = Column(Integer, primary_key=True)
external_id = Column(String(128))
owner = Column(String(128), nullable=False)
name = Column(String(256), unique=True)
description = Column(String(1024))
notify = Column(Boolean, default=True)
number_attempts = Column(Integer)
rename = Column(Boolean, default=True)
resolved = Column(Boolean, default=False)
resolved_cert_id = Column(Integer, nullable=True)
cn = Column(String(128))
csr = Column(Text(), nullable=False)
chain = Column(Text())
private_key = Column(Vault, nullable=True)
date_created = Column(ArrowType, DefaultClause(func.now()), nullable=False)
dns_provider_id = Column(
Integer, ForeignKey("dns_providers.id", ondelete="CASCADE")
)
status = Column(Text(), nullable=True)
last_updated = Column(
ArrowType, DefaultClause(func.now()), onupdate=func.now(), nullable=False
)
rotation = Column(Boolean, default=False)
user_id = Column(Integer, ForeignKey("users.id"))
authority_id = Column(Integer, ForeignKey("authorities.id", ondelete="CASCADE"))
root_authority_id = Column(
Integer, ForeignKey("authorities.id", ondelete="CASCADE")
)
rotation_policy_id = Column(Integer, ForeignKey("rotation_policies.id"))
notifications = relationship(
"Notification",
secondary=pending_cert_notification_associations,
backref="pending_cert",
passive_deletes=True,
)
destinations = relationship(
"Destination",
secondary=pending_cert_destination_associations,
backref="pending_cert",
passive_deletes=True,
)
sources = relationship(
"Source",
secondary=pending_cert_source_associations,
backref="pending_cert",
passive_deletes=True,
)
roles = relationship(
"Role",
secondary=pending_cert_role_associations,
backref="pending_cert",
passive_deletes=True,
)
replaces = relationship(
"Certificate",
secondary=pending_cert_replacement_associations,
backref="pending_cert",
passive_deletes=True,
)
options = Column(JSONType)
rotation_policy = relationship("RotationPolicy")
sensitive_fields = ("private_key",)
def __init__(self, **kwargs):
self.csr = kwargs.get("csr")
self.private_key = kwargs.get("private_key", "")
if self.private_key:
# If the request does not send private key, the key exists but the value is None
self.private_key = self.private_key.strip()
self.external_id = kwargs.get("external_id")
# when destinations are appended they require a valid name.
if kwargs.get("name"):
self.name = get_or_increase_name(defaults.text_to_slug(kwargs["name"]), 0)
self.rename = False
else:
# TODO: Fix auto-generated name, it should be renamed on creation
self.name = get_or_increase_name(
defaults.certificate_name(
kwargs["common_name"],
kwargs["authority"].name,
dt.now(),
dt.now(),
False,
),
self.external_id,
)
self.rename = True
self.cn = defaults.common_name(utils.parse_csr(self.csr))
self.owner = kwargs["owner"]
self.number_attempts = 0
if kwargs.get("chain"):
self.chain = kwargs["chain"].strip()
self.notify = kwargs.get("notify", True)
self.destinations = kwargs.get("destinations", [])
self.notifications = kwargs.get("notifications", [])
self.description = kwargs.get("description")
self.roles = list(set(kwargs.get("roles", [])))
self.replaces = kwargs.get("replaces", [])
self.rotation = kwargs.get("rotation")
self.rotation_policy = kwargs.get("rotation_policy")
try:
self.dns_provider_id = kwargs.get("dns_provider").id
except (AttributeError, KeyError, TypeError, Exception):
pass
|
from lemur.plugins.bases import NotificationPlugin
class TestNotificationPlugin(NotificationPlugin):
title = "Test"
slug = "test-notification"
description = "Enables testing"
author = "Kevin Glisson"
author_url = "https://github.com/netflix/lemur.git"
def __init__(self, *args, **kwargs):
super(TestNotificationPlugin, self).__init__(*args, **kwargs)
@staticmethod
def send(notification_type, message, targets, options, **kwargs):
return
|
from __future__ import division
from itertools import chain
from pygal import colors
from pygal.colors import darken, is_foreground_light, lighten
class Style(object):
"""Styling class containing colors for the css generation"""
plot_background = 'rgba(255, 255, 255, 1)'
background = 'rgba(249, 249, 249, 1)'
value_background = 'rgba(229, 229, 229, 1)'
foreground = 'rgba(0, 0, 0, .87)'
foreground_strong = 'rgba(0, 0, 0, 1)'
foreground_subtle = 'rgba(0, 0, 0, .54)'
# Monospaced font is highly encouraged
font_family = ('Consolas, "Liberation Mono", Menlo, Courier, monospace')
label_font_family = None
major_label_font_family = None
value_font_family = None
value_label_font_family = None
tooltip_font_family = None
title_font_family = None
legend_font_family = None
no_data_font_family = None
label_font_size = 10
major_label_font_size = 10
value_font_size = 16
value_label_font_size = 10
tooltip_font_size = 14
title_font_size = 16
legend_font_size = 14
no_data_font_size = 64
# Guide line dash array style
guide_stroke_dasharray = '4,4'
major_guide_stroke_dasharray = '6,6'
guide_stroke_color = 'black'
major_guide_stroke_color = 'black'
opacity = '.7'
opacity_hover = '.8'
stroke_opacity = '.8'
stroke_width = '1'
stroke_opacity_hover = '.9'
stroke_width_hover = '4'
dot_opacity = '1'
transition = '150ms'
colors = (
'#F44336', # 0
'#3F51B5', # 4
'#009688', # 8
'#FFC107', # 13
'#FF5722', # 15
'#9C27B0', # 2
'#03A9F4', # 6
'#8BC34A', # 10
'#FF9800', # 14
'#E91E63', # 1
'#2196F3', # 5
'#4CAF50', # 9
'#FFEB3B', # 12
'#673AB7', # 3
'#00BCD4', # 7
'#CDDC39', # 11b
'#9E9E9E', # 17
'#607D8B', # 18
)
value_colors = ()
ci_colors = ()
def __init__(self, **kwargs):
"""Create the style"""
self.__dict__.update(kwargs)
self._google_fonts = set()
if self.font_family.startswith('googlefont:'):
self.font_family = self.font_family.replace('googlefont:', '')
self._google_fonts.add(self.font_family.split(',')[0].strip())
for name in dir(self):
if name.endswith('_font_family'):
fn = getattr(self, name)
if fn is None:
setattr(self, name, self.font_family)
elif fn.startswith('googlefont:'):
setattr(self, name, fn.replace('googlefont:', ''))
self._google_fonts.add(
getattr(self, name).split(',')[0].strip()
)
def get_colors(self, prefix, len_):
"""Get the css color list"""
def color(tupl):
"""Make a color css"""
return ((
'%s.color-{0}, %s.color-{0} a:visited {{\n'
' stroke: {1};\n'
' fill: {1};\n'
'}}\n'
) % (prefix, prefix)).format(*tupl)
def value_color(tupl):
"""Make a value color css"""
return ((
'%s .text-overlay .color-{0} text {{\n'
' fill: {1};\n'
'}}\n'
) % (prefix, )).format(*tupl)
def ci_color(tupl):
"""Make a value color css"""
if not tupl[1]:
return ''
return (('%s .color-{0} .ci {{\n'
' stroke: {1};\n'
'}}\n') % (prefix, )).format(*tupl)
if len(self.colors) < len_:
missing = len_ - len(self.colors)
cycles = 1 + missing // len(self.colors)
colors = []
for i in range(0, cycles + 1):
for color_ in self.colors:
colors.append(darken(color_, 33 * i / cycles))
if len(colors) >= len_:
break
else:
continue
break
else:
colors = self.colors[:len_]
# Auto compute foreground value color when color is missing
value_colors = []
for i in range(len_):
if i < len(self.value_colors) and self.value_colors[i] is not None:
value_colors.append(self.value_colors[i])
else:
value_colors.append(
'white' if is_foreground_light(colors[i]) else 'black'
)
return '\n'.join(
chain(
map(color, enumerate(colors)),
map(value_color, enumerate(value_colors)),
map(ci_color, enumerate(self.ci_colors))
)
)
def to_dict(self):
"""Convert instance to a serializable mapping."""
config = {}
for attr in dir(self):
if not attr.startswith('_'):
value = getattr(self, attr)
if not hasattr(value, '__call__'):
config[attr] = value
return config
DefaultStyle = Style
class DarkStyle(Style):
"""A dark style (old default)"""
background = 'black'
plot_background = '#111'
foreground = '#999'
foreground_strong = '#eee'
foreground_subtle = '#555'
opacity = '.8'
opacity_hover = '.4'
transition = '250ms'
colors = (
'#ff5995', '#b6e354', '#feed6c', '#8cedff', '#9e6ffe', '#899ca1',
'#f8f8f2', '#bf4646', '#516083', '#f92672', '#82b414', '#fd971f',
'#56c2d6', '#808384', '#8c54fe', '#465457'
)
class LightStyle(Style):
"""A light style"""
background = 'white'
plot_background = 'rgba(0, 0, 255, 0.1)'
foreground = 'rgba(0, 0, 0, 0.7)'
foreground_strong = 'rgba(0, 0, 0, 0.9)'
foreground_subtle = 'rgba(0, 0, 0, 0.5)'
colors = (
'#242424', '#9f6767', '#92ac68', '#d0d293', '#9aacc3', '#bb77a4',
'#77bbb5', '#777777'
)
class NeonStyle(DarkStyle):
"""Similar to DarkStyle but with more opacity and effects"""
opacity = '.1'
opacity_hover = '.75'
transition = '1s ease-out'
class CleanStyle(Style):
"""A rather clean style"""
background = 'transparent'
plot_background = 'rgba(240, 240, 240, 0.7)'
foreground = 'rgba(0, 0, 0, 0.9)'
foreground_strong = 'rgba(0, 0, 0, 0.9)'
foreground_subtle = 'rgba(0, 0, 0, 0.5)'
colors = (
'rgb(12,55,149)', 'rgb(117,38,65)', 'rgb(228,127,0)', 'rgb(159,170,0)',
'rgb(149,12,12)'
)
class DarkSolarizedStyle(Style):
"""Dark solarized popular theme"""
background = '#073642'
plot_background = '#002b36'
foreground = '#839496'
foreground_strong = '#fdf6e3'
foreground_subtle = '#657b83'
opacity = '.66'
opacity_hover = '.9'
transition = '500ms ease-in'
colors = (
'#b58900', '#cb4b16', '#dc322f', '#d33682', '#6c71c4', '#268bd2',
'#2aa198', '#859900'
)
class LightSolarizedStyle(DarkSolarizedStyle):
"""Light solarized popular theme"""
background = '#fdf6e3'
plot_background = '#eee8d5'
foreground = '#657b83'
foreground_strong = '#073642'
foreground_subtle = '#073642'
class RedBlueStyle(Style):
"""A red and blue theme"""
background = lighten('#e6e7e9', 7)
plot_background = lighten('#e6e7e9', 10)
foreground = 'rgba(0, 0, 0, 0.9)'
foreground_strong = 'rgba(0, 0, 0, 0.9)'
foreground_subtle = 'rgba(0, 0, 0, 0.5)'
opacity = '.6'
opacity_hover = '.9'
colors = (
'#d94e4c', '#e5884f', '#39929a', lighten('#d94e4c', 10),
darken('#39929a', 15), lighten('#e5884f', 17), darken('#d94e4c', 10),
'#234547'
)
class LightColorizedStyle(Style):
"""A light colorized style"""
background = '#f8f8f8'
plot_background = lighten('#f8f8f8', 3)
foreground = '#333'
foreground_strong = '#666'
foreground_subtle = 'rgba(0, 0 , 0, 0.5)'
opacity = '.5'
opacity_hover = '.9'
transition = '250ms ease-in'
colors = (
'#fe9592', '#534f4c', '#3ac2c0', '#a2a7a1', darken('#fe9592', 15),
lighten('#534f4c', 15), lighten('#3ac2c0', 15), lighten('#a2a7a1', 15),
lighten('#fe9592', 15), darken('#3ac2c0', 10)
)
class DarkColorizedStyle(Style):
"""A dark colorized style"""
background = darken('#3a2d3f', 5)
plot_background = lighten('#3a2d3f', 2)
foreground = 'rgba(255, 255, 255, 0.9)'
foreground_strong = 'rgba(255, 255, 255, 0.9)'
foreground_subtle = 'rgba(255, 255 , 255, 0.5)'
opacity = '.2'
opacity_hover = '.7'
transition = '250ms ease-in'
colors = (
'#c900fe', '#01b8fe', '#59f500', '#ff00e4', '#f9fa00',
darken('#c900fe', 20), darken('#01b8fe', 15), darken('#59f500', 20),
darken('#ff00e4', 15), lighten('#f9fa00', 20)
)
class TurquoiseStyle(Style):
"""A turquoise style"""
background = darken('#1b8088', 15)
plot_background = darken('#1b8088', 17)
foreground = 'rgba(255, 255, 255, 0.9)'
foreground_strong = 'rgba(255, 255, 255, 0.9)'
foreground_subtle = 'rgba(255, 255 , 255, 0.5)'
opacity = '.5'
opacity_hover = '.9'
transition = '250ms ease-in'
colors = (
'#93d2d9', '#ef940f', '#8C6243', '#fff', darken('#93d2d9', 20),
lighten('#ef940f', 15), lighten('#8c6243', 15), '#1b8088'
)
class LightGreenStyle(Style):
"""A light green style"""
background = lighten('#f3f3f3', 3)
plot_background = '#fff'
foreground = '#333333'
foreground_strong = '#666'
foreground_subtle = '#222222'
opacity = '.5'
opacity_hover = '.9'
transition = '250ms ease-in'
colors = (
'#7dcf30', '#247fab', lighten('#7dcf30', 10), '#ccc',
darken('#7dcf30', 15), '#ddd', lighten('#247fab', 10),
darken('#247fab', 15)
)
class DarkGreenStyle(Style):
"""A dark green style"""
background = darken('#251e01', 3)
plot_background = darken('#251e01', 1)
foreground = 'rgba(255, 255, 255, 0.9)'
foreground_strong = 'rgba(255, 255, 255, 0.9)'
foreground_subtle = 'rgba(255, 255, 255, 0.6)'
opacity = '.6'
opacity_hover = '.9'
transition = '250ms ease-in'
colors = (
'#adde09', '#6e8c06', '#4a5e04', '#fcd202', '#C1E34D',
lighten('#fcd202', 25)
)
class DarkGreenBlueStyle(Style):
"""A dark green and blue style"""
background = '#000'
plot_background = lighten('#000', 8)
foreground = 'rgba(255, 255, 255, 0.9)'
foreground_strong = 'rgba(255, 255, 255, 0.9)'
foreground_subtle = 'rgba(255, 255, 255, 0.6)'
opacity = '.55'
opacity_hover = '.9'
transition = '250ms ease-in'
colors = (
lighten('#34B8F7', 15), '#7dcf30', '#247fab', darken('#7dcf30', 10),
lighten('#247fab', 10), lighten('#7dcf30', 10), darken('#247fab', 10),
'#fff'
)
class BlueStyle(Style):
"""A blue style"""
background = darken('#f8f8f8', 3)
plot_background = '#f8f8f8'
foreground = 'rgba(0, 0, 0, 0.9)'
foreground_strong = 'rgba(0, 0, 0, 0.9)'
foreground_subtle = 'rgba(0, 0, 0, 0.6)'
opacity = '.5'
opacity_hover = '.9'
transition = '250ms ease-in'
colors = (
'#00b2f0', '#43d9be', '#0662ab', darken('#00b2f0', 20),
lighten('#43d9be', 20), lighten('#7dcf30', 10), darken('#0662ab', 15),
'#ffd541', '#7dcf30', lighten('#00b2f0', 15), darken('#ffd541', 20)
)
class SolidColorStyle(Style):
"""A light style with strong colors"""
background = '#FFFFFF'
plot_background = '#FFFFFF'
foreground = '#000000'
foreground_strong = '#000000'
foreground_subtle = '#828282'
opacity = '.8'
opacity_hover = '.9'
transition = '400ms ease-in'
colors = (
'#FF9900', '#DC3912', '#4674D1', '#109618', '#990099', '#0099C6',
'#DD4477', '#74B217', '#B82E2E', '#316395', '#994499'
)
styles = {
'default': DefaultStyle,
'dark': DarkStyle,
'light': LightStyle,
'neon': NeonStyle,
'clean': CleanStyle,
'light_red_blue': RedBlueStyle,
'dark_solarized': DarkSolarizedStyle,
'light_solarized': LightSolarizedStyle,
'dark_colorized': DarkColorizedStyle,
'light_colorized': LightColorizedStyle,
'turquoise': TurquoiseStyle,
'green': LightGreenStyle,
'dark_green': DarkGreenStyle,
'dark_green_blue': DarkGreenBlueStyle,
'blue': BlueStyle,
'solid_color': SolidColorStyle
}
class ParametricStyleBase(Style):
"""Parametric Style base class for all the parametric operations"""
_op = None
def __init__(self, color, step=10, max_=None, base_style=None, **kwargs):
"""
Initialization of the parametric style.
This takes several parameters:
* a `step` which correspond on how many colors will be needed
* a `max_` which defines the maximum amplitude of the color effect
* a `base_style` which will be taken as default for everything
except colors
* any keyword arguments setting other style parameters
"""
if self._op is None:
raise RuntimeError('ParametricStyle is not instanciable')
defaults = {}
if base_style is not None:
if isinstance(base_style, type):
base_style = base_style()
defaults.update(base_style.to_dict())
defaults.update(kwargs)
super(ParametricStyleBase, self).__init__(**defaults)
if max_ is None:
violency = {
'darken': 50,
'lighten': 50,
'saturate': 100,
'desaturate': 100,
'rotate': 360
}
max_ = violency[self._op]
def modifier(index):
percent = max_ * index / (step - 1)
return getattr(colors, self._op)(color, percent)
self.colors = list(map(modifier, range(0, max(2, step))))
class LightenStyle(ParametricStyleBase):
"""Create a style by lightening the given color"""
_op = 'lighten'
class DarkenStyle(ParametricStyleBase):
"""Create a style by darkening the given color"""
_op = 'darken'
class SaturateStyle(ParametricStyleBase):
"""Create a style by saturating the given color"""
_op = 'saturate'
class DesaturateStyle(ParametricStyleBase):
"""Create a style by desaturating the given color"""
_op = 'desaturate'
class RotateStyle(ParametricStyleBase):
"""Create a style by rotating the given color"""
_op = 'rotate'
parametric_styles = {
'lighten': LightenStyle,
'darken': DarkenStyle,
'saturate': SaturateStyle,
'desaturate': DesaturateStyle,
'rotate': RotateStyle
}
|
import numpy as np
import six
from chainercv.utils.testing.assertions.assert_is_bbox import assert_is_bbox
def assert_is_detection_link(link, n_fg_class):
"""Checks if a link satisfies detection link APIs.
This function checks if a given link satisfies detection link APIs
or not.
If the link does not satifiy the APIs, this function raises an
:class:`AssertionError`.
Args:
link: A link to be checked.
n_fg_class (int): The number of foreground classes.
"""
imgs = [
np.random.randint(0, 256, size=(3, 480, 640)).astype(np.float32),
np.random.randint(0, 256, size=(3, 480, 320)).astype(np.float32)]
result = link.predict(imgs)
assert len(result) == 3, \
'Link must return three elements: bboxes, labels and scores.'
bboxes, labels, scores = result
assert len(bboxes) == len(imgs), \
'The length of bboxes must be same as that of imgs.'
assert len(labels) == len(imgs), \
'The length of labels must be same as that of imgs.'
assert len(scores) == len(imgs), \
'The length of scores must be same as that of imgs.'
for bbox, label, score in six.moves.zip(bboxes, labels, scores):
assert_is_bbox(bbox)
assert isinstance(label, np.ndarray), \
'label must be a numpy.ndarray.'
assert label.dtype == np.int32, \
'The type of label must be numpy.int32.'
assert label.shape[1:] == (), \
'The shape of label must be (*,).'
assert len(label) == len(bbox), \
'The length of label must be same as that of bbox.'
if len(label) > 0:
assert label.min() >= 0 and label.max() < n_fg_class, \
'The value of label must be in [0, n_fg_class - 1].'
assert isinstance(score, np.ndarray), \
'score must be a numpy.ndarray.'
assert score.dtype == np.float32, \
'The type of score must be numpy.float32.'
assert score.shape[1:] == (), \
'The shape of score must be (*,).'
assert len(score) == len(bbox), \
'The length of score must be same as that of bbox.'
|
from typing import Any
from typing import Mapping
from kazoo.client import KazooClient
from pyramid.view import view_config
from paasta_tools.api import settings
from paasta_tools.deployd.queue import ZKDelayDeadlineQueue
@view_config(route_name="deploy_queue.list", request_method="GET", renderer="json")
def list_deploy_queue(request) -> Mapping[str, Any]:
zk_client = KazooClient(hosts=settings.system_paasta_config.get_zk_hosts())
zk_client.start()
queue = ZKDelayDeadlineQueue(client=zk_client)
available_service_instances = queue.get_available_service_instances(
fetch_service_instances=True
)
unavailable_service_instances = queue.get_unavailable_service_instances(
fetch_service_instances=True
)
available_service_instance_dicts = [
service_instance._asdict()
for _, service_instance in available_service_instances
]
unavailable_service_instance_dicts = [
service_instance._asdict()
for _, __, service_instance in unavailable_service_instances
]
return {
"available_service_instances": available_service_instance_dicts,
"unavailable_service_instances": unavailable_service_instance_dicts,
}
|
from flask import Flask
from flask import jsonify
from flasgger import Swagger
from flasgger import swag_from
app = Flask(__name__)
swag = Swagger(app)
@app.route("/example")
@swag_from({
"responses": {
400: {
"description": "Invalid action"
},
401: {
"description": "Login required"
}
},
"tags": ["Tag 1", "Tag 2"]
})
def view():
"""
A test view
---
responses:
200:
description: OK
tags: [Tag 3, Tag 4]
"""
return jsonify(hello="world")
def test_swag(client, specs_data):
example_spec = specs_data["/apispec_1.json"]["paths"]["/example"]["get"]
assert "400" in example_spec["responses"]
assert "401" in example_spec["responses"]
assert "200" in example_spec["responses"]
assert "Tag 1" in example_spec["tags"]
assert "Tag 2" in example_spec["tags"]
assert "Tag 3" in example_spec["tags"]
assert "Tag 4" in example_spec["tags"]
if __name__ == "__main__":
app.run(debug=True)
|
def boot(application, config):
if config and config['origins']:
try:
from flask.ext.cors import CORS
for i in config.keys():
application.config['CORS_%s' % i.upper()] = config[i]
CORS(application)
except Exception as e:
raise Exception('Failed to init cors support %s' % e)
|
from itertools import groupby
import numpy as np
import pandas as pd
def aggregate_returns(returns, convert_to):
"""
Aggregates returns by day, week, month, or year.
"""
def cumulate_returns(x):
return np.exp(np.log(1 + x).cumsum())[-1] - 1
if convert_to == 'weekly':
return returns.groupby(
[lambda x: x.year,
lambda x: x.month,
lambda x: x.isocalendar()[1]]).apply(cumulate_returns)
elif convert_to == 'monthly':
return returns.groupby(
[lambda x: x.year, lambda x: x.month]).apply(cumulate_returns)
elif convert_to == 'yearly':
return returns.groupby(
[lambda x: x.year]).apply(cumulate_returns)
else:
ValueError('convert_to must be weekly, monthly or yearly')
def create_cagr(equity, periods=252):
"""
Calculates the Compound Annual Growth Rate (CAGR)
for the portfolio, by determining the number of years
and then creating a compound annualised rate based
on the total return.
Parameters:
equity - A pandas Series representing the equity curve.
periods - Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
"""
years = len(equity) / float(periods)
return (equity[-1] ** (1.0 / years)) - 1.0
def create_sharpe_ratio(returns, periods=252):
"""
Create the Sharpe ratio for the strategy, based on a
benchmark of zero (i.e. no risk-free rate information).
Parameters:
returns - A pandas Series representing period percentage returns.
periods - Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
"""
return np.sqrt(periods) * (np.mean(returns)) / np.std(returns)
def create_sortino_ratio(returns, periods=252):
"""
Create the Sortino ratio for the strategy, based on a
benchmark of zero (i.e. no risk-free rate information).
Parameters:
returns - A pandas Series representing period percentage returns.
periods - Daily (252), Hourly (252*6.5), Minutely(252*6.5*60) etc.
"""
return np.sqrt(periods) * (np.mean(returns)) / np.std(returns[returns < 0])
def create_drawdowns(returns):
"""
Calculate the largest peak-to-trough drawdown of the equity curve
as well as the duration of the drawdown. Requires that the
pnl_returns is a pandas Series.
Parameters:
equity - A pandas Series representing period percentage returns.
Returns:
drawdown, drawdown_max, duration
"""
# Calculate the cumulative returns curve
# and set up the High Water Mark
idx = returns.index
hwm = np.zeros(len(idx))
# Create the high water mark
for t in range(1, len(idx)):
hwm[t] = max(hwm[t - 1], returns.iloc[t])
# Calculate the drawdown and duration statistics
perf = pd.DataFrame(index=idx)
perf["Drawdown"] = (hwm - returns) / hwm
perf["Drawdown"].iloc[0] = 0.0
perf["DurationCheck"] = np.where(perf["Drawdown"] == 0, 0, 1)
duration = max(
sum(1 for i in g if i == 1)
for k, g in groupby(perf["DurationCheck"])
)
return perf["Drawdown"], np.max(perf["Drawdown"]), duration
|
from contextlib import contextmanager
from typing import (
TYPE_CHECKING,
Any,
Dict,
Hashable,
Iterator,
Mapping,
Sequence,
Set,
Tuple,
Union,
cast,
)
import pandas as pd
from . import formatting, indexing
from .indexes import Indexes
from .merge import merge_coordinates_without_align, merge_coords
from .utils import Frozen, ReprObject, either_dict_or_kwargs
from .variable import Variable
if TYPE_CHECKING:
from .dataarray import DataArray
from .dataset import Dataset
# Used as the key corresponding to a DataArray's variable when converting
# arbitrary DataArray objects to datasets
_THIS_ARRAY = ReprObject("<this-array>")
class Coordinates(Mapping[Hashable, "DataArray"]):
__slots__ = ()
def __getitem__(self, key: Hashable) -> "DataArray":
raise NotImplementedError()
def __setitem__(self, key: Hashable, value: Any) -> None:
self.update({key: value})
@property
def _names(self) -> Set[Hashable]:
raise NotImplementedError()
@property
def dims(self) -> Union[Mapping[Hashable, int], Tuple[Hashable, ...]]:
raise NotImplementedError()
@property
def indexes(self) -> Indexes:
return self._data.indexes # type: ignore
@property
def variables(self):
raise NotImplementedError()
def _update_coords(self, coords, indexes):
raise NotImplementedError()
def __iter__(self) -> Iterator["Hashable"]:
# needs to be in the same order as the dataset variables
for k in self.variables:
if k in self._names:
yield k
def __len__(self) -> int:
return len(self._names)
def __contains__(self, key: Hashable) -> bool:
return key in self._names
def __repr__(self) -> str:
return formatting.coords_repr(self)
def to_dataset(self) -> "Dataset":
raise NotImplementedError()
def to_index(self, ordered_dims: Sequence[Hashable] = None) -> pd.Index:
"""Convert all index coordinates into a :py:class:`pandas.Index`.
Parameters
----------
ordered_dims : sequence of hashable, optional
Possibly reordered version of this object's dimensions indicating
the order in which dimensions should appear on the result.
Returns
-------
pandas.Index
Index subclass corresponding to the outer-product of all dimension
coordinates. This will be a MultiIndex if this object is has more
than more dimension.
"""
if ordered_dims is None:
ordered_dims = list(self.dims)
elif set(ordered_dims) != set(self.dims):
raise ValueError(
"ordered_dims must match dims, but does not: "
"{} vs {}".format(ordered_dims, self.dims)
)
if len(ordered_dims) == 0:
raise ValueError("no valid index for a 0-dimensional object")
elif len(ordered_dims) == 1:
(dim,) = ordered_dims
return self._data.get_index(dim) # type: ignore
else:
indexes = [self._data.get_index(k) for k in ordered_dims] # type: ignore
names = list(ordered_dims)
return pd.MultiIndex.from_product(indexes, names=names)
def update(self, other: Mapping[Hashable, Any]) -> None:
other_vars = getattr(other, "variables", other)
coords, indexes = merge_coords(
[self.variables, other_vars], priority_arg=1, indexes=self.indexes
)
self._update_coords(coords, indexes)
def _merge_raw(self, other):
"""For use with binary arithmetic."""
if other is None:
variables = dict(self.variables)
indexes = dict(self.indexes)
else:
variables, indexes = merge_coordinates_without_align([self, other])
return variables, indexes
@contextmanager
def _merge_inplace(self, other):
"""For use with in-place binary arithmetic."""
if other is None:
yield
else:
# don't include indexes in prioritized, because we didn't align
# first and we want indexes to be checked
prioritized = {
k: (v, None) for k, v in self.variables.items() if k not in self.indexes
}
variables, indexes = merge_coordinates_without_align(
[self, other], prioritized
)
yield
self._update_coords(variables, indexes)
def merge(self, other: "Coordinates") -> "Dataset":
"""Merge two sets of coordinates to create a new Dataset
The method implements the logic used for joining coordinates in the
result of a binary operation performed on xarray objects:
- If two index coordinates conflict (are not equal), an exception is
raised. You must align your data before passing it to this method.
- If an index coordinate and a non-index coordinate conflict, the non-
index coordinate is dropped.
- If two non-index coordinates conflict, both are dropped.
Parameters
----------
other : DatasetCoordinates or DataArrayCoordinates
The coordinates from another dataset or data array.
Returns
-------
merged : Dataset
A new Dataset with merged coordinates.
"""
from .dataset import Dataset
if other is None:
return self.to_dataset()
if not isinstance(other, Coordinates):
other = Dataset(coords=other).coords
coords, indexes = merge_coordinates_without_align([self, other])
coord_names = set(coords)
merged = Dataset._construct_direct(
variables=coords, coord_names=coord_names, indexes=indexes
)
return merged
class DatasetCoordinates(Coordinates):
"""Dictionary like container for Dataset coordinates.
Essentially an immutable dictionary with keys given by the array's
dimensions and the values given by the corresponding xarray.Coordinate
objects.
"""
__slots__ = ("_data",)
def __init__(self, dataset: "Dataset"):
self._data = dataset
@property
def _names(self) -> Set[Hashable]:
return self._data._coord_names
@property
def dims(self) -> Mapping[Hashable, int]:
return self._data.dims
@property
def variables(self) -> Mapping[Hashable, Variable]:
return Frozen(
{k: v for k, v in self._data.variables.items() if k in self._names}
)
def __getitem__(self, key: Hashable) -> "DataArray":
if key in self._data.data_vars:
raise KeyError(key)
return cast("DataArray", self._data[key])
def to_dataset(self) -> "Dataset":
"""Convert these coordinates into a new Dataset"""
names = [name for name in self._data._variables if name in self._names]
return self._data._copy_listed(names)
def _update_coords(
self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]
) -> None:
from .dataset import calculate_dimensions
variables = self._data._variables.copy()
variables.update(coords)
# check for inconsistent state *before* modifying anything in-place
dims = calculate_dimensions(variables)
new_coord_names = set(coords)
for dim, size in dims.items():
if dim in variables:
new_coord_names.add(dim)
self._data._variables = variables
self._data._coord_names.update(new_coord_names)
self._data._dims = dims
# TODO(shoyer): once ._indexes is always populated by a dict, modify
# it to update inplace instead.
original_indexes = dict(self._data.indexes)
original_indexes.update(indexes)
self._data._indexes = original_indexes
def __delitem__(self, key: Hashable) -> None:
if key in self:
del self._data[key]
else:
raise KeyError(f"{key!r} is not a coordinate variable.")
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return [
key
for key in self._data._ipython_key_completions_()
if key not in self._data.data_vars
]
class DataArrayCoordinates(Coordinates):
"""Dictionary like container for DataArray coordinates.
Essentially a dict with keys given by the array's
dimensions and the values given by corresponding DataArray objects.
"""
__slots__ = ("_data",)
def __init__(self, dataarray: "DataArray"):
self._data = dataarray
@property
def dims(self) -> Tuple[Hashable, ...]:
return self._data.dims
@property
def _names(self) -> Set[Hashable]:
return set(self._data._coords)
def __getitem__(self, key: Hashable) -> "DataArray":
return self._data._getitem_coord(key)
def _update_coords(
self, coords: Dict[Hashable, Variable], indexes: Mapping[Hashable, pd.Index]
) -> None:
from .dataset import calculate_dimensions
coords_plus_data = coords.copy()
coords_plus_data[_THIS_ARRAY] = self._data.variable
dims = calculate_dimensions(coords_plus_data)
if not set(dims) <= set(self.dims):
raise ValueError(
"cannot add coordinates with new dimensions to a DataArray"
)
self._data._coords = coords
# TODO(shoyer): once ._indexes is always populated by a dict, modify
# it to update inplace instead.
original_indexes = dict(self._data.indexes)
original_indexes.update(indexes)
self._data._indexes = original_indexes
@property
def variables(self):
return Frozen(self._data._coords)
def to_dataset(self) -> "Dataset":
from .dataset import Dataset
coords = {k: v.copy(deep=False) for k, v in self._data._coords.items()}
return Dataset._construct_direct(coords, set(coords))
def __delitem__(self, key: Hashable) -> None:
if key in self:
del self._data._coords[key]
if self._data._indexes is not None and key in self._data._indexes:
del self._data._indexes[key]
else:
raise KeyError(f"{key!r} is not a coordinate variable.")
def _ipython_key_completions_(self):
"""Provide method for the key-autocompletions in IPython. """
return self._data._ipython_key_completions_()
class LevelCoordinatesSource(Mapping[Hashable, Any]):
"""Iterator for MultiIndex level coordinates.
Used for attribute style lookup with AttrAccessMixin. Not returned directly
by any public methods.
"""
__slots__ = ("_data",)
def __init__(self, data_object: "Union[DataArray, Dataset]"):
self._data = data_object
def __getitem__(self, key):
# not necessary -- everything here can already be found in coords.
raise KeyError()
def __iter__(self) -> Iterator[Hashable]:
return iter(self._data._level_coords)
def __len__(self) -> int:
return len(self._data._level_coords)
def assert_coordinate_consistent(
obj: Union["DataArray", "Dataset"], coords: Mapping[Hashable, Variable]
) -> None:
"""Make sure the dimension coordinate of obj is consistent with coords.
obj: DataArray or Dataset
coords: Dict-like of variables
"""
for k in obj.dims:
# make sure there are no conflict in dimension coordinates
if k in coords and k in obj.coords:
if not coords[k].equals(obj[k].variable):
raise IndexError(
"dimension coordinate {!r} conflicts between "
"indexed and indexing objects:\n{}\nvs.\n{}".format(
k, obj[k], coords[k]
)
)
def remap_label_indexers(
obj: Union["DataArray", "Dataset"],
indexers: Mapping[Hashable, Any] = None,
method: str = None,
tolerance=None,
**indexers_kwargs: Any,
) -> Tuple[dict, dict]: # TODO more precise return type after annotations in indexing
"""Remap indexers from obj.coords.
If indexer is an instance of DataArray and it has coordinate, then this coordinate
will be attached to pos_indexers.
Returns
-------
pos_indexers: Same type of indexers.
np.ndarray or Variable or DataArray
new_indexes: mapping of new dimensional-coordinate.
"""
from .dataarray import DataArray
indexers = either_dict_or_kwargs(indexers, indexers_kwargs, "remap_label_indexers")
v_indexers = {
k: v.variable.data if isinstance(v, DataArray) else v
for k, v in indexers.items()
}
pos_indexers, new_indexes = indexing.remap_label_indexers(
obj, v_indexers, method=method, tolerance=tolerance
)
# attach indexer's coordinate to pos_indexers
for k, v in indexers.items():
if isinstance(v, Variable):
pos_indexers[k] = Variable(v.dims, pos_indexers[k])
elif isinstance(v, DataArray):
# drop coordinates found in indexers since .sel() already
# ensures alignments
coords = {k: var for k, var in v._coords.items() if k not in indexers}
pos_indexers[k] = DataArray(pos_indexers[k], coords=coords, dims=v.dims)
return pos_indexers, new_indexes
|
import os
import pytest
from PyQt5.QtCore import Qt
from qutebrowser.mainwindow import prompt as promptmod
from qutebrowser.utils import usertypes
class TestFileCompletion:
@pytest.fixture
def get_prompt(self, qtbot, config_stub, key_config_stub):
"""Get a function to display a prompt with a path."""
config_stub.val.bindings.default = {}
def _get_prompt_func(path):
question = usertypes.Question()
question.title = "test"
question.default = path
prompt = promptmod.DownloadFilenamePrompt(question)
qtbot.add_widget(prompt)
with qtbot.wait_signal(prompt._file_model.directoryLoaded):
pass
assert prompt._lineedit.text() == path
return prompt
return _get_prompt_func
@pytest.mark.parametrize('steps, where, subfolder', [
(1, 'next', '..'),
(1, 'prev', 'c'),
(2, 'next', 'a'),
(2, 'prev', 'b'),
])
def test_simple_completion(self, tmpdir, get_prompt, steps, where,
subfolder):
"""Simply trying to tab through items."""
testdir = tmpdir / 'test'
for directory in 'abc':
(testdir / directory).ensure(dir=True)
prompt = get_prompt(str(testdir) + os.sep)
for _ in range(steps):
prompt.item_focus(where)
assert prompt._lineedit.text() == str(testdir / subfolder)
def test_backspacing_path(self, qtbot, tmpdir, get_prompt):
"""When we start deleting a path we want to see the subdir."""
testdir = tmpdir / 'test'
for directory in ['bar', 'foo']:
(testdir / directory).ensure(dir=True)
prompt = get_prompt(str(testdir / 'foo') + os.sep)
# Deleting /f[oo/]
with qtbot.wait_signal(prompt._file_model.directoryLoaded):
for _ in range(3):
qtbot.keyPress(prompt._lineedit, Qt.Key_Backspace)
# foo should get completed from f
prompt.item_focus('next')
assert prompt._lineedit.text() == str(testdir / 'foo')
# Deleting /[foo]
for _ in range(3):
qtbot.keyPress(prompt._lineedit, Qt.Key_Backspace)
# We should now show / again, so tabbing twice gives us .. -> bar
prompt.item_focus('next')
prompt.item_focus('next')
assert prompt._lineedit.text() == str(testdir / 'bar')
@pytest.mark.linux
def test_root_path(self, get_prompt):
"""With / as path, show root contents."""
prompt = get_prompt('/')
assert prompt._file_model.rootPath() == '/'
|
import logging
import os.path
from gensim import interfaces, matutils
import dictionary # for constructing word->id mappings
logger = logging.getLogger('gensim.corpora.dmlcorpus')
class DmlConfig:
"""
DmlConfig contains parameters necessary for the abstraction of a 'corpus of
articles' (see the `DmlCorpus` class).
Articles may come from different sources (=different locations on disk/network,
different file formats etc.), so the main purpose of DmlConfig is to keep all
sources in one place.
Apart from glueing sources together, DmlConfig also decides where to store
output files and which articles to accept for the corpus (= an additional filter
over the sources).
"""
def __init__(self, configId, resultDir, acceptLangs=None):
self.resultDir = resultDir # output files will be stored in this directory
self.configId = configId
self.sources = {} # all article sources; see sources.DmlSource class for an example of source
if acceptLangs is None: # which languages to accept
acceptLangs = {'any'} # if not specified, accept all languages (including unknown/unspecified)
self.acceptLangs = set(acceptLangs)
logger.info('initialized %s', self)
def resultFile(self, fname):
return os.path.join(self.resultDir, self.configId + '_' + fname)
def acceptArticle(self, metadata):
lang = metadata.get('language', 'unk')
if 'any' not in self.acceptLangs and lang not in self.acceptLangs:
return False
return True
def addSource(self, source):
sourceId = str(source)
assert sourceId not in self.sources, "source %s already present in the config!" % sourceId
self.sources[sourceId] = source
def __str__(self):
return ("DmlConfig(id=%s, sources=[%s], acceptLangs=[%s])" %
(self.configId, ', '.join(self.sources.iterkeys()), ', '.join(self.acceptLangs)))
# endclass DmlConfig
class DmlCorpus(interfaces.CorpusABC):
"""
DmlCorpus implements a collection of articles. It is initialized via a DmlConfig
object, which holds information about where to look for the articles and how
to process them.
Apart from being a regular corpus (bag-of-words iterable with a `len()` method),
DmlCorpus has methods for building a dictionary (mapping between words and
their ids).
"""
def __init__(self):
self.documents = []
self.config = None
self.dictionary = dictionary.Dictionary()
def __len__(self):
return len(self.documents)
def __iter__(self):
"""
The function that defines a corpus -- iterating over the corpus yields
bag-of-words vectors, one for each document.
A bag-of-words vector is simply a list of ``(tokenId, tokenCount)`` 2-tuples.
"""
for docNo, (sourceId, docUri) in enumerate(self.documents):
source = self.config.sources[sourceId]
contents = source.getContent(docUri)
words = [source.normalizeWord(word) for word in source.tokenize(contents)]
yield self.dictionary.doc2bow(words, allowUpdate=False)
def buildDictionary(self):
"""
Populate dictionary mapping and statistics.
This is done by sequentially retrieving the article fulltexts, splitting
them into tokens and converting tokens to their ids (creating new ids as
necessary).
"""
logger.info("creating dictionary from %i articles", len(self.documents))
self.dictionary = dictionary.Dictionary()
numPositions = 0
for docNo, (sourceId, docUri) in enumerate(self.documents):
if docNo % 1000 == 0:
logger.info("PROGRESS: at document #%i/%i (%s, %s)", docNo, len(self.documents), sourceId, docUri)
source = self.config.sources[sourceId]
contents = source.getContent(docUri)
words = [source.normalizeWord(word) for word in source.tokenize(contents)]
numPositions += len(words)
# convert to bag-of-words, but ignore the result -- here we only care about updating token ids
_ = self.dictionary.doc2bow(words, allowUpdate=True) # noqa:F841
logger.info(
"built %s from %i documents (total %i corpus positions)",
self.dictionary, len(self.documents), numPositions
)
def processConfig(self, config, shuffle=False):
"""
Parse the directories specified in the config, looking for suitable articles.
This updates the self.documents var, which keeps a list of (source id,
article uri) 2-tuples. Each tuple is a unique identifier of one article.
Note that some articles are ignored based on config settings (for example
if the article's language doesn't match any language specified in the
config etc.).
"""
self.config = config
self.documents = []
logger.info("processing config %s", config)
for sourceId, source in config.sources.iteritems():
logger.info("processing source '%s'", sourceId)
accepted = []
for articleUri in source.findArticles():
meta = source.getMeta(articleUri) # retrieve metadata (= dictionary of key->value)
if config.acceptArticle(meta): # do additional filtering on articles, based on the article's metadata
accepted.append((sourceId, articleUri))
logger.info("accepted %i articles for source '%s'", len(accepted), sourceId)
self.documents.extend(accepted)
if not self.documents:
logger.warning('no articles at all found from the config; something went wrong!')
if shuffle:
logger.info("shuffling %i documents for random order", len(self.documents))
import random
random.shuffle(self.documents)
logger.info("accepted total of %i articles for %s", len(self.documents), str(config))
def saveDictionary(self, fname):
logger.info("saving dictionary mapping to %s", fname)
fout = open(fname, 'w')
for tokenId, token in self.dictionary.id2token.iteritems():
fout.write("%i\t%s\n" % (tokenId, token))
fout.close()
@staticmethod
def loadDictionary(fname):
result = {}
for lineNo, line in enumerate(open(fname)):
pair = line[:-1].split('\t')
if len(pair) != 2:
continue
wordId, word = pair
result[int(wordId)] = word
return result
def saveDocuments(self, fname):
logger.info("saving documents mapping to %s", fname)
fout = open(fname, 'w')
for docNo, docId in enumerate(self.documents):
sourceId, docUri = docId
intId, pathId = docUri
fout.write("%i\t%s\n" % (docNo, repr(docId)))
fout.close()
def saveAsText(self):
"""
Store the corpus to disk, in a human-readable text format.
This actually saves multiple files:
1. Pure document-term co-occurence frequency counts, as a Matrix Market file.
2. Token to integer mapping, as a text file.
3. Document to document URI mapping, as a text file.
The exact filesystem paths and filenames are determined from the config.
"""
self.saveDictionary(self.config.resultFile('wordids.txt'))
self.saveDocuments(self.config.resultFile('docids.txt'))
matutils.MmWriter.writeCorpus(self.config.resultFile('bow.mm'), self)
def articleDir(self, docNo):
"""
Return absolute normalized path on filesystem to article no. `docNo`.
"""
sourceId, (_, outPath) = self.documents[docNo]
source = self.config.sources[sourceId]
return os.path.join(source.baseDir, outPath)
def getMeta(self, docNo):
"""
Return metadata for article no. `docNo`.
"""
sourceId, uri = self.documents[docNo]
source = self.config.sources[sourceId]
return source.getMeta(uri)
# endclass DmlCorpus
|
import logging
from http.client import (
HTTPConnection,
HTTPSConnection,
HTTPResponse,
)
from io import BytesIO
from vcr.request import Request
from vcr.errors import CannotOverwriteExistingCassetteException
from . import compat
log = logging.getLogger(__name__)
class VCRFakeSocket:
"""
A socket that doesn't do anything!
Used when playing back cassettes, when there
is no actual open socket.
"""
def close(self):
pass
def settimeout(self, *args, **kwargs):
pass
def fileno(self):
"""
This is kinda crappy. requests will watch
this descriptor and make sure it's not closed.
Return file descriptor 0 since that's stdin.
"""
return 0 # wonder how bad this is....
def parse_headers(header_list):
"""
Convert headers from our serialized dict with lists for keys to a
HTTPMessage
"""
header_string = b""
for key, values in header_list.items():
for v in values:
header_string += key.encode("utf-8") + b":" + v.encode("utf-8") + b"\r\n"
return compat.get_httpmessage(header_string)
def serialize_headers(response):
out = {}
for key, values in compat.get_headers(response.msg):
out.setdefault(key, [])
out[key].extend(values)
return out
class VCRHTTPResponse(HTTPResponse):
"""
Stub response class that gets returned instead of a HTTPResponse
"""
def __init__(self, recorded_response):
self.fp = None
self.recorded_response = recorded_response
self.reason = recorded_response["status"]["message"]
self.status = self.code = recorded_response["status"]["code"]
self.version = None
self._content = BytesIO(self.recorded_response["body"]["string"])
self._closed = False
headers = self.recorded_response["headers"]
# Since we are loading a response that has already been serialized, our
# response is no longer chunked. That means we don't want any
# libraries trying to process a chunked response. By removing the
# transfer-encoding: chunked header, this should cause the downstream
# libraries to process this as a non-chunked response.
te_key = [h for h in headers.keys() if h.upper() == "TRANSFER-ENCODING"]
if te_key:
del headers[te_key[0]]
self.headers = self.msg = parse_headers(headers)
self.length = compat.get_header(self.msg, "content-length") or None
@property
def closed(self):
# in python3, I can't change the value of self.closed. So I'
# twiddling self._closed and using this property to shadow the real
# self.closed from the superclas
return self._closed
def read(self, *args, **kwargs):
return self._content.read(*args, **kwargs)
def readall(self):
return self._content.readall()
def readinto(self, *args, **kwargs):
return self._content.readinto(*args, **kwargs)
def readline(self, *args, **kwargs):
return self._content.readline(*args, **kwargs)
def readlines(self, *args, **kwargs):
return self._content.readlines(*args, **kwargs)
def seekable(self):
return self._content.seekable()
def tell(self):
return self._content.tell()
def isatty(self):
return self._content.isatty()
def seek(self, *args, **kwargs):
return self._content.seek(*args, **kwargs)
def close(self):
self._closed = True
return True
def getcode(self):
return self.status
def isclosed(self):
return self.closed
def info(self):
return parse_headers(self.recorded_response["headers"])
def getheaders(self):
message = parse_headers(self.recorded_response["headers"])
return list(compat.get_header_items(message))
def getheader(self, header, default=None):
values = [v for (k, v) in self.getheaders() if k.lower() == header.lower()]
if values:
return ", ".join(values)
else:
return default
def readable(self):
return self._content.readable()
class VCRConnection:
# A reference to the cassette that's currently being patched in
cassette = None
def _port_postfix(self):
"""
Returns empty string for the default port and ':port' otherwise
"""
port = self.real_connection.port
default_port = {"https": 443, "http": 80}[self._protocol]
return ":{}".format(port) if port != default_port else ""
def _uri(self, url):
"""Returns request absolute URI"""
if url and not url.startswith("/"):
# Then this must be a proxy request.
return url
uri = "{}://{}{}{}".format(self._protocol, self.real_connection.host, self._port_postfix(), url)
log.debug("Absolute URI: %s", uri)
return uri
def _url(self, uri):
"""Returns request selector url from absolute URI"""
prefix = "{}://{}{}".format(self._protocol, self.real_connection.host, self._port_postfix())
return uri.replace(prefix, "", 1)
def request(self, method, url, body=None, headers=None, *args, **kwargs):
"""Persist the request metadata in self._vcr_request"""
self._vcr_request = Request(method=method, uri=self._uri(url), body=body, headers=headers or {})
log.debug("Got {}".format(self._vcr_request))
# Note: The request may not actually be finished at this point, so
# I'm not sending the actual request until getresponse(). This
# allows me to compare the entire length of the response to see if it
# exists in the cassette.
self._sock = VCRFakeSocket()
def putrequest(self, method, url, *args, **kwargs):
"""
httplib gives you more than one way to do it. This is a way
to start building up a request. Usually followed by a bunch
of putheader() calls.
"""
self._vcr_request = Request(method=method, uri=self._uri(url), body="", headers={})
log.debug("Got {}".format(self._vcr_request))
def putheader(self, header, *values):
self._vcr_request.headers[header] = values
def send(self, data):
"""
This method is called after request(), to add additional data to the
body of the request. So if that happens, let's just append the data
onto the most recent request in the cassette.
"""
self._vcr_request.body = self._vcr_request.body + data if self._vcr_request.body else data
def close(self):
# Note: the real connection will only close if it's open, so
# no need to check that here.
self.real_connection.close()
def endheaders(self, message_body=None):
"""
Normally, this would actually send the request to the server.
We are not sending the request until getting the response,
so bypass this part and just append the message body, if any.
"""
if message_body is not None:
self._vcr_request.body = message_body
def getresponse(self, _=False, **kwargs):
"""Retrieve the response"""
# Check to see if the cassette has a response for this request. If so,
# then return it
if self.cassette.can_play_response_for(self._vcr_request):
log.info("Playing response for {} from cassette".format(self._vcr_request))
response = self.cassette.play_response(self._vcr_request)
return VCRHTTPResponse(response)
else:
if self.cassette.write_protected and self.cassette.filter_request(self._vcr_request):
raise CannotOverwriteExistingCassetteException(
cassette=self.cassette, failed_request=self._vcr_request
)
# Otherwise, we should send the request, then get the response
# and return it.
log.info("{} not in cassette, sending to real server".format(self._vcr_request))
# This is imported here to avoid circular import.
# TODO(@IvanMalison): Refactor to allow normal import.
from vcr.patch import force_reset
with force_reset():
self.real_connection.request(
method=self._vcr_request.method,
url=self._url(self._vcr_request.uri),
body=self._vcr_request.body,
headers=self._vcr_request.headers,
)
# get the response
response = self.real_connection.getresponse()
# put the response into the cassette
response = {
"status": {"code": response.status, "message": response.reason},
"headers": serialize_headers(response),
"body": {"string": response.read()},
}
self.cassette.append(self._vcr_request, response)
return VCRHTTPResponse(response)
def set_debuglevel(self, *args, **kwargs):
self.real_connection.set_debuglevel(*args, **kwargs)
def connect(self, *args, **kwargs):
"""
httplib2 uses this. Connects to the server I'm assuming.
Only pass to the baseclass if we don't have a recorded response
and are not write-protected.
"""
if hasattr(self, "_vcr_request") and self.cassette.can_play_response_for(self._vcr_request):
# We already have a response we are going to play, don't
# actually connect
return
if self.cassette.write_protected:
# Cassette is write-protected, don't actually connect
return
from vcr.patch import force_reset
with force_reset():
return self.real_connection.connect(*args, **kwargs)
self._sock = VCRFakeSocket()
@property
def sock(self):
if self.real_connection.sock:
return self.real_connection.sock
return self._sock
@sock.setter
def sock(self, value):
if self.real_connection.sock:
self.real_connection.sock = value
def __init__(self, *args, **kwargs):
kwargs.pop("strict", None) # apparently this is gone in py3
# need to temporarily reset here because the real connection
# inherits from the thing that we are mocking out. Take out
# the reset if you want to see what I mean :)
from vcr.patch import force_reset
with force_reset():
self.real_connection = self._baseclass(*args, **kwargs)
self._sock = None
def __setattr__(self, name, value):
"""
We need to define this because any attributes that are set on the
VCRConnection need to be propogated to the real connection.
For example, urllib3 will set certain attributes on the connection,
such as 'ssl_version'. These attributes need to get set on the real
connection to have the correct and expected behavior.
TODO: Separately setting the attribute on the two instances is not
ideal. We should switch to a proxying implementation.
"""
try:
setattr(self.real_connection, name, value)
except AttributeError:
# raised if real_connection has not been set yet, such as when
# we're setting the real_connection itself for the first time
pass
super().__setattr__(name, value)
def __getattr__(self, name):
"""
Send requests for weird attributes up to the real connection
(counterpart to __setattr above)
"""
if self.__dict__.get("real_connection"):
# check in case real_connection has not been set yet, such as when
# we're setting the real_connection itself for the first time
return getattr(self.real_connection, name)
return super().__getattr__(name)
for k, v in HTTPConnection.__dict__.items():
if isinstance(v, staticmethod):
setattr(VCRConnection, k, v)
class VCRHTTPConnection(VCRConnection):
"""A Mocked class for HTTP requests"""
_baseclass = HTTPConnection
_protocol = "http"
class VCRHTTPSConnection(VCRConnection):
"""A Mocked class for HTTPS requests"""
_baseclass = HTTPSConnection
_protocol = "https"
is_verified = True
|
import logging
import os
import re
import threading
import requests
import voluptuous as vol
from homeassistant.const import HTTP_OK
import homeassistant.helpers.config_validation as cv
from homeassistant.util import sanitize_filename
_LOGGER = logging.getLogger(__name__)
ATTR_FILENAME = "filename"
ATTR_SUBDIR = "subdir"
ATTR_URL = "url"
ATTR_OVERWRITE = "overwrite"
CONF_DOWNLOAD_DIR = "download_dir"
DOMAIN = "downloader"
DOWNLOAD_FAILED_EVENT = "download_failed"
DOWNLOAD_COMPLETED_EVENT = "download_completed"
SERVICE_DOWNLOAD_FILE = "download_file"
SERVICE_DOWNLOAD_FILE_SCHEMA = vol.Schema(
{
vol.Required(ATTR_URL): cv.url,
vol.Optional(ATTR_SUBDIR): cv.string,
vol.Optional(ATTR_FILENAME): cv.string,
vol.Optional(ATTR_OVERWRITE, default=False): cv.boolean,
}
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_DOWNLOAD_DIR): cv.string})},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Listen for download events to download files."""
download_path = config[DOMAIN][CONF_DOWNLOAD_DIR]
# If path is relative, we assume relative to Home Assistant config dir
if not os.path.isabs(download_path):
download_path = hass.config.path(download_path)
if not os.path.isdir(download_path):
_LOGGER.error(
"Download path %s does not exist. File Downloader not active", download_path
)
return False
def download_file(service):
"""Start thread to download file specified in the URL."""
def do_download():
"""Download the file."""
try:
url = service.data[ATTR_URL]
subdir = service.data.get(ATTR_SUBDIR)
filename = service.data.get(ATTR_FILENAME)
overwrite = service.data.get(ATTR_OVERWRITE)
if subdir:
subdir = sanitize_filename(subdir)
final_path = None
req = requests.get(url, stream=True, timeout=10)
if req.status_code != HTTP_OK:
_LOGGER.warning(
"downloading '%s' failed, status_code=%d", url, req.status_code
)
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_FAILED_EVENT}",
{"url": url, "filename": filename},
)
else:
if filename is None and "content-disposition" in req.headers:
match = re.findall(
r"filename=(\S+)", req.headers["content-disposition"]
)
if match:
filename = match[0].strip("'\" ")
if not filename:
filename = os.path.basename(url).strip()
if not filename:
filename = "ha_download"
# Remove stuff to ruin paths
filename = sanitize_filename(filename)
# Do we want to download to subdir, create if needed
if subdir:
subdir_path = os.path.join(download_path, subdir)
# Ensure subdir exist
if not os.path.isdir(subdir_path):
os.makedirs(subdir_path)
final_path = os.path.join(subdir_path, filename)
else:
final_path = os.path.join(download_path, filename)
path, ext = os.path.splitext(final_path)
# If file exist append a number.
# We test filename, filename_2..
if not overwrite:
tries = 1
final_path = path + ext
while os.path.isfile(final_path):
tries += 1
final_path = f"{path}_{tries}.{ext}"
_LOGGER.debug("%s -> %s", url, final_path)
with open(final_path, "wb") as fil:
for chunk in req.iter_content(1024):
fil.write(chunk)
_LOGGER.debug("Downloading of %s done", url)
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_COMPLETED_EVENT}",
{"url": url, "filename": filename},
)
except requests.exceptions.ConnectionError:
_LOGGER.exception("ConnectionError occurred for %s", url)
hass.bus.fire(
f"{DOMAIN}_{DOWNLOAD_FAILED_EVENT}",
{"url": url, "filename": filename},
)
# Remove file if we started downloading but failed
if final_path and os.path.isfile(final_path):
os.remove(final_path)
threading.Thread(target=do_download).start()
hass.services.register(
DOMAIN,
SERVICE_DOWNLOAD_FILE,
download_file,
schema=SERVICE_DOWNLOAD_FILE_SCHEMA,
)
return True
|
from homeassistant.components import google_assistant as ga
from homeassistant.core import Context
from homeassistant.setup import async_setup_component
from .test_http import DUMMY_CONFIG
async def test_request_sync_service(aioclient_mock, hass):
"""Test that it posts to the request_sync url."""
aioclient_mock.post(
ga.const.HOMEGRAPH_TOKEN_URL,
status=200,
json={"access_token": "1234", "expires_in": 3600},
)
aioclient_mock.post(ga.const.REQUEST_SYNC_BASE_URL, status=200)
await async_setup_component(
hass,
"google_assistant",
{"google_assistant": DUMMY_CONFIG},
)
assert aioclient_mock.call_count == 0
await hass.services.async_call(
ga.const.DOMAIN,
ga.const.SERVICE_REQUEST_SYNC,
blocking=True,
context=Context(user_id="123"),
)
assert aioclient_mock.call_count == 2 # token + request
|
.encode('idna')
import gevent.monkey
gevent.monkey.patch_all()
import docker_registry.core.boto as coreboto
from docker_registry.core import compat
from docker_registry.core import exceptions
from docker_registry.core import lru
import logging
import os
import re
import time
import boto.exception
import boto.s3
import boto.s3.connection
import boto.s3.key
logger = logging.getLogger(__name__)
class Cloudfront(object):
def __init__(self, awsaccess, awssecret, base, keyid, privatekey):
boto.connect_cloudfront(
awsaccess,
awssecret
)
host = re.compile('^https?://([^/]+)').findall(base)
self.dist = boto.cloudfront.distribution.Distribution(domain_name=host)
self.base = base
self.keyid = keyid
self.privatekey = privatekey
try:
self.privatekey = open(privatekey).read()
except Exception:
logger.debug('Passed private key is not readable. Assume string.')
def sign(self, url, expire_time=0):
path = os.path.join(self.base, url)
if expire_time:
expire_time = time.time() + expire_time
return self.dist.create_signed_url(
path,
self.keyid,
private_key_string=self.privatekey,
expire_time=int(expire_time)
)
def pub(self, path):
return os.path.join(self.base, path)
class Storage(coreboto.Base):
def __init__(self, path, config):
super(Storage, self).__init__(path, config)
def _build_connection_params(self):
kwargs = super(Storage, self)._build_connection_params()
if self._config.s3_secure is not None:
kwargs['is_secure'] = (self._config.s3_secure is True)
return kwargs
def makeConnection(self):
kwargs = self._build_connection_params()
# Connect cloudfront if we are required to
if self._config.cloudfront:
self.signer = Cloudfront(
self._config.s3_access_key,
self._config.s3_secret_key,
self._config.cloudfront['base'],
self._config.cloudfront['keyid'],
self._config.cloudfront['keysecret']
).sign
else:
self.signer = None
if self._config.s3_use_sigv4 is True:
if self._config.boto_host is None:
logger.warn("No S3 Host specified, Boto won't use SIGV4!")
boto.config.add_section('s3')
boto.config.set('s3', 'use-sigv4', 'True')
if self._config.s3_region is not None:
return boto.s3.connect_to_region(
region_name=self._config.s3_region,
aws_access_key_id=self._config.s3_access_key,
aws_secret_access_key=self._config.s3_secret_key,
**kwargs)
logger.warn("No S3 region specified, using boto default region, " +
"this may affect performance and stability.")
return boto.s3.connection.S3Connection(
self._config.s3_access_key,
self._config.s3_secret_key,
**kwargs)
def makeKey(self, path):
return boto.s3.key.Key(self._boto_bucket, path)
@lru.set
def put_content(self, path, content):
path = self._init_path(path)
key = self.makeKey(path)
key.set_contents_from_string(
content, encrypt_key=(self._config.s3_encrypt is True))
return path
def stream_write(self, path, fp):
# Minimum size of upload part size on S3 is 5MB
buffer_size = 5 * 1024 * 1024
if self.buffer_size > buffer_size:
buffer_size = self.buffer_size
path = self._init_path(path)
mp = self._boto_bucket.initiate_multipart_upload(
path, encrypt_key=(self._config.s3_encrypt is True))
num_part = 1
try:
while True:
buf = fp.read(buffer_size)
if not buf:
break
io = compat.StringIO(buf)
mp.upload_part_from_file(io, num_part)
num_part += 1
io.close()
except IOError as e:
raise e
mp.complete_upload()
def content_redirect_url(self, path):
path = self._init_path(path)
key = self.makeKey(path)
if not key.exists():
raise IOError('No such key: \'{0}\''.format(path))
# No cloudfront? Sign to the bucket
if not self.signer:
return key.generate_url(
expires_in=1200,
method='GET',
query_auth=True)
# Have cloudfront? Sign it
return self.signer(path, expire_time=60)
def get_content(self, path, tries=0):
try:
return super(Storage, self).get_content(path)
except exceptions.FileNotFoundError as e:
if tries <= 3:
time.sleep(.1)
return self.get_content(path, tries + 1)
else:
raise e
|
import asyncio
from datetime import timedelta
import voluptuous as vol
from homeassistant.components.media_player import DEVICE_CLASS_TV, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_ID,
CONF_IP_ADDRESS,
CONF_METHOD,
CONF_NAME,
CONF_PORT,
CONF_TOKEN,
STATE_OFF,
STATE_ON,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
from homeassistant.util import dt as dt_util
from .bridge import SamsungTVBridge
from .const import (
CONF_MANUFACTURER,
CONF_MODEL,
CONF_ON_ACTION,
DEFAULT_NAME,
DOMAIN,
LOGGER,
)
KEY_PRESS_TIMEOUT = 1.2
SOURCES = {"TV": "KEY_TV", "HDMI": "KEY_HDMI"}
SUPPORT_SAMSUNGTV = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_SELECT_SOURCE
| SUPPORT_NEXT_TRACK
| SUPPORT_TURN_OFF
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Samsung TV from a config entry."""
ip_address = config_entry.data[CONF_IP_ADDRESS]
on_script = None
if (
DOMAIN in hass.data
and ip_address in hass.data[DOMAIN]
and CONF_ON_ACTION in hass.data[DOMAIN][ip_address]
and hass.data[DOMAIN][ip_address][CONF_ON_ACTION]
):
turn_on_action = hass.data[DOMAIN][ip_address][CONF_ON_ACTION]
on_script = Script(
hass, turn_on_action, config_entry.data.get(CONF_NAME, DEFAULT_NAME), DOMAIN
)
# Initialize bridge
data = config_entry.data.copy()
bridge = SamsungTVBridge.get_bridge(
data[CONF_METHOD],
data[CONF_HOST],
data[CONF_PORT],
data.get(CONF_TOKEN),
)
if bridge.port is None and bridge.default_port is not None:
# For backward compat, set default port for websocket tv
data[CONF_PORT] = bridge.default_port
hass.config_entries.async_update_entry(config_entry, data=data)
bridge = SamsungTVBridge.get_bridge(
data[CONF_METHOD],
data[CONF_HOST],
data[CONF_PORT],
data.get(CONF_TOKEN),
)
async_add_entities([SamsungTVDevice(bridge, config_entry, on_script)])
class SamsungTVDevice(MediaPlayerEntity):
"""Representation of a Samsung TV."""
def __init__(self, bridge, config_entry, on_script):
"""Initialize the Samsung device."""
self._config_entry = config_entry
self._manufacturer = config_entry.data.get(CONF_MANUFACTURER)
self._model = config_entry.data.get(CONF_MODEL)
self._name = config_entry.data.get(CONF_NAME)
self._on_script = on_script
self._uuid = config_entry.data.get(CONF_ID)
# Assume that the TV is not muted
self._muted = False
# Assume that the TV is in Play mode
self._playing = True
self._state = None
# Mark the end of a shutdown command (need to wait 15 seconds before
# sending the next command to avoid turning the TV back ON).
self._end_of_power_off = None
self._bridge = bridge
self._bridge.register_reauth_callback(self.access_denied)
def access_denied(self):
"""Access denied callback."""
LOGGER.debug("Access denied in getting remote object")
self.hass.add_job(
self.hass.config_entries.flow.async_init(
DOMAIN,
context={"source": "reauth"},
data=self._config_entry.data,
)
)
def update(self):
"""Update state of device."""
if self._power_off_in_progress():
self._state = STATE_OFF
else:
self._state = STATE_ON if self._bridge.is_on() else STATE_OFF
def send_key(self, key):
"""Send a key to the tv and handles exceptions."""
if self._power_off_in_progress() and key != "KEY_POWEROFF":
LOGGER.info("TV is powering off, not sending command: %s", key)
return
self._bridge.send_key(key)
def _power_off_in_progress(self):
return (
self._end_of_power_off is not None
and self._end_of_power_off > dt_util.utcnow()
)
@property
def unique_id(self) -> str:
"""Return the unique ID of the device."""
return self._uuid
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_info(self):
"""Return device specific attributes."""
return {
"name": self.name,
"identifiers": {(DOMAIN, self.unique_id)},
"manufacturer": self._manufacturer,
"model": self._model,
}
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def source_list(self):
"""List of available input sources."""
return list(SOURCES)
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._on_script:
return SUPPORT_SAMSUNGTV | SUPPORT_TURN_ON
return SUPPORT_SAMSUNGTV
@property
def device_class(self):
"""Set the device class to TV."""
return DEVICE_CLASS_TV
def turn_off(self):
"""Turn off media player."""
self._end_of_power_off = dt_util.utcnow() + timedelta(seconds=15)
self.send_key("KEY_POWEROFF")
# Force closing of remote session to provide instant UI feedback
self._bridge.close_remote()
def volume_up(self):
"""Volume up the media player."""
self.send_key("KEY_VOLUP")
def volume_down(self):
"""Volume down media player."""
self.send_key("KEY_VOLDOWN")
def mute_volume(self, mute):
"""Send mute command."""
self.send_key("KEY_MUTE")
def media_play_pause(self):
"""Simulate play pause media player."""
if self._playing:
self.media_pause()
else:
self.media_play()
def media_play(self):
"""Send play command."""
self._playing = True
self.send_key("KEY_PLAY")
def media_pause(self):
"""Send media pause command to media player."""
self._playing = False
self.send_key("KEY_PAUSE")
def media_next_track(self):
"""Send next track command."""
self.send_key("KEY_CHUP")
def media_previous_track(self):
"""Send the previous track command."""
self.send_key("KEY_CHDOWN")
async def async_play_media(self, media_type, media_id, **kwargs):
"""Support changing a channel."""
if media_type != MEDIA_TYPE_CHANNEL:
LOGGER.error("Unsupported media type")
return
# media_id should only be a channel number
try:
cv.positive_int(media_id)
except vol.Invalid:
LOGGER.error("Media ID must be positive integer")
return
for digit in media_id:
await self.hass.async_add_executor_job(self.send_key, f"KEY_{digit}")
await asyncio.sleep(KEY_PRESS_TIMEOUT, self.hass.loop)
await self.hass.async_add_executor_job(self.send_key, "KEY_ENTER")
async def async_turn_on(self):
"""Turn the media player on."""
if self._on_script:
await self._on_script.async_run(context=self._context)
def select_source(self, source):
"""Select input source."""
if source not in SOURCES:
LOGGER.error("Unsupported source")
return
self.send_key(SOURCES[source])
|
import errno
import glob
import os
import xml.etree.ElementTree as ElementTree
from meld.conf import _
from . import _vc
#: Simple enum constants for differentiating conflict cases.
CONFLICT_TYPE_MERGE, CONFLICT_TYPE_UPDATE = 1, 2
class Vc(_vc.Vc):
CMD = "svn"
NAME = "Subversion"
VC_DIR = ".svn"
state_map = {
"unversioned": _vc.STATE_NONE,
"added": _vc.STATE_NEW,
"normal": _vc.STATE_NORMAL,
"missing": _vc.STATE_MISSING,
"ignored": _vc.STATE_IGNORED,
"modified": _vc.STATE_MODIFIED,
"deleted": _vc.STATE_REMOVED,
"conflicted": _vc.STATE_CONFLICT,
}
def commit(self, runner, files, message):
command = [self.CMD, 'commit', '-m', message]
runner(command, files, refresh=True, working_dir=self.root)
def update(self, runner):
command = [self.CMD, 'update']
runner(command, [], refresh=True, working_dir=self.root)
def remove(self, runner, files):
command = [self.CMD, 'rm', '--force']
runner(command, files, refresh=True, working_dir=self.root)
def revert(self, runner, files):
command = [self.CMD, 'revert']
runner(command, files, refresh=True, working_dir=self.root)
def resolve(self, runner, files):
command = [self.CMD, 'resolve', '--accept=working']
runner(command, files, refresh=True, working_dir=self.root)
def get_path_for_repo_file(self, path, commit=None):
if commit is None:
commit = "BASE"
else:
raise NotImplementedError()
if not path.startswith(self.root + os.path.sep):
raise _vc.InvalidVCPath(self, path, "Path not in repository")
path = path[len(self.root) + 1:]
args = [self.CMD, "cat", "-r", commit, path]
return _vc.call_temp_output(args, cwd=self.root)
def get_path_for_conflict(self, path, conflict=None):
"""
SVN has two types of conflicts:
Merge conflicts, which give 3 files:
.left.r* (THIS)
.working (BASE... although this is a bit debatable)
.right.r* (OTHER)
Update conflicts which give 3 files:
.mine (THIS)
.r* (lower - BASE)
.r* (higher - OTHER)
"""
if not path.startswith(self.root + os.path.sep):
raise _vc.InvalidVCPath(self, path, "Path not in repository")
# If this is merged, we just return the merged output
if conflict == _vc.CONFLICT_MERGED:
return path, False
# First fine what type of conflict this is by looking at the base
# we can possibly return straight away!
conflict_type = None
base = glob.glob('%s.working' % path)
if len(base) == 1:
# We have a merge conflict
conflict_type = CONFLICT_TYPE_MERGE
else:
base = glob.glob('%s.mine' % path)
if len(base) == 1:
# We have an update conflict
conflict_type = CONFLICT_TYPE_UPDATE
if conflict_type is None:
raise _vc.InvalidVCPath(self, path, "No known conflict type found")
if conflict == _vc.CONFLICT_BASE:
return base[0], False
elif conflict == _vc.CONFLICT_THIS:
if conflict_type == CONFLICT_TYPE_MERGE:
return glob.glob('%s.merge-left.r*' % path)[0], False
else:
return glob.glob('%s.r*' % path)[0], False
elif conflict == _vc.CONFLICT_OTHER:
if conflict_type == CONFLICT_TYPE_MERGE:
return glob.glob('%s.merge-right.r*' % path)[0], False
else:
return glob.glob('%s.r*' % path)[-1], False
raise KeyError("Conflict file does not exist")
def add(self, runner, files):
# SVN < 1.7 needs to add folders from their immediate parent
dirs = [s for s in files if os.path.isdir(s)]
files = [s for s in files if os.path.isfile(s)]
command = [self.CMD, 'add']
for path in dirs:
runner(command, [path], refresh=True,
working_dir=os.path.dirname(path))
if files:
runner(command, files, refresh=True, working_dir=self.location)
@classmethod
def _repo_version_support(cls, version):
return version >= 12
@classmethod
def valid_repo(cls, path):
if _vc.call([cls.CMD, "info"], cwd=path):
return False
root, location = cls.is_in_repo(path)
vc_dir = os.path.join(root, cls.VC_DIR)
# Check for repository version, trusting format file then entries file
repo_version = None
for filename in ("format", "entries"):
path = os.path.join(vc_dir, filename)
if os.path.exists(path):
with open(path) as f:
repo_version = int(f.readline().strip())
break
if not repo_version and os.path.exists(os.path.join(vc_dir, "wc.db")):
repo_version = 12
return cls._repo_version_support(repo_version)
def _update_tree_state_cache(self, path):
while 1:
try:
# "svn --xml" outputs utf8, even with Windows non-utf8 locale
proc = _vc.popen(
[self.CMD, "status", "-v", "--xml", path],
cwd=self.location, use_locale_encoding=False)
tree = ElementTree.parse(proc)
break
except OSError as e:
if e.errno != errno.EAGAIN:
raise
for target in tree.findall("target") + tree.findall("changelist"):
for entry in (t for t in target.getchildren() if t.tag == "entry"):
path = entry.attrib["path"]
if not path:
continue
if not os.path.isabs(path):
path = os.path.abspath(os.path.join(self.location, path))
for status in (e for e in entry.getchildren()
if e.tag == "wc-status"):
item = status.attrib["item"]
if item == "":
continue
state = self.state_map.get(item, _vc.STATE_NONE)
self._tree_cache[path] = state
rev = status.attrib.get("revision")
rev_label = _("Rev %s") % rev if rev is not None else ''
self._tree_meta_cache[path] = rev_label
self._add_missing_cache_entry(path, state)
|
import os
import tempfile
from uuid import uuid4
from django.core.cache import cache
from django.core.files.base import ContentFile
from django.core.files.storage import default_storage
class BaseStorage:
def __init__(self, name=None):
self.name = name
def save(self, data, mode='w'):
raise NotImplementedError
def read(self, read_mode='r'):
raise NotImplementedError
def remove(self):
raise NotImplementedError
class TempFolderStorage(BaseStorage):
def open(self, mode='r'):
if self.name:
return open(self.get_full_path(), mode)
else:
tmp_file = tempfile.NamedTemporaryFile(delete=False)
self.name = tmp_file.name
return tmp_file
def save(self, data, mode='w'):
with self.open(mode=mode) as file:
file.write(data)
def read(self, mode='r'):
with self.open(mode=mode) as file:
return file.read()
def remove(self):
os.remove(self.get_full_path())
def get_full_path(self):
return os.path.join(
tempfile.gettempdir(),
self.name
)
class CacheStorage(BaseStorage):
"""
By default memcache maximum size per key is 1MB, be careful with large files.
"""
CACHE_LIFETIME = 86400
CACHE_PREFIX = 'django-import-export-'
def save(self, data, mode=None):
if not self.name:
self.name = uuid4().hex
cache.set(self.CACHE_PREFIX + self.name, data, self.CACHE_LIFETIME)
def read(self, read_mode='r'):
return cache.get(self.CACHE_PREFIX + self.name)
def remove(self):
cache.delete(self.name)
class MediaStorage(BaseStorage):
MEDIA_FOLDER = 'django-import-export'
def save(self, data, mode=None):
if not self.name:
self.name = uuid4().hex
default_storage.save(self.get_full_path(), ContentFile(data))
def read(self, read_mode='rb'):
with default_storage.open(self.get_full_path(), mode=read_mode) as f:
return f.read()
def remove(self):
default_storage.delete(self.get_full_path())
def get_full_path(self):
return os.path.join(
self.MEDIA_FOLDER,
self.name
)
|
import argparse
import sys
from paasta_tools import tron_tools
def parse_args():
parser = argparse.ArgumentParser(description="Cleans up stale Tron namespaces.")
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=tron_tools.DEFAULT_SOA_DIR,
help="Use a different soa config directory",
)
parser.add_argument(
"--dry-run",
dest="dry_run",
action="store_true",
help="Print namespaces to be deleted, instead of deleting them",
)
args = parser.parse_args()
return args
def main():
args = parse_args()
cluster = tron_tools.load_tron_config().get_cluster_name()
client = tron_tools.get_tron_client()
namespaces = client.list_namespaces()
expected_namespaces = tron_tools.get_tron_namespaces(
cluster=cluster, soa_dir=args.soa_dir
)
to_delete = set(namespaces) - set(expected_namespaces) - {"MASTER"}
if not to_delete:
print("No Tron namespaces to remove")
sys.exit(0)
if args.dry_run:
print("Dry run, would have removed namespaces:\n " + "\n ".join(to_delete))
sys.exit(0)
successes = []
errors = []
for namespace in to_delete:
try:
client.update_namespace(namespace, "")
successes.append(namespace)
except Exception as e:
errors.append((namespace, e))
if successes:
print("Successfully removed namespaces:\n", "\n ".join(successes))
if errors:
print(
"Failed to remove namespaces:\n "
+ "\n ".join(
[
"{namespace}: {error}".format(namespace=namespace, error=str(error))
for namespace, error in errors
]
)
)
sys.exit(1)
if __name__ == "__main__":
main()
|
from homeassistant.core import State
from homeassistant.setup import async_setup_component
VALID_TEXT1 = "Test text"
VALID_TEXT2 = "LoremIpsum"
INVALID_TEXT1 = "This text is too long!"
INVALID_TEXT2 = "Short"
async def test_reproducing_states(hass, caplog):
"""Test reproducing Input text states."""
# Setup entity for testing
assert await async_setup_component(
hass,
"input_text",
{
"input_text": {
"test_text": {"min": "6", "max": "10", "initial": VALID_TEXT1}
}
},
)
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[
State("input_text.test_text", VALID_TEXT1),
# Should not raise
State("input_text.non_existing", VALID_TEXT1),
],
)
# Test that entity is in desired state
assert hass.states.get("input_text.test_text").state == VALID_TEXT1
# Try reproducing with different state
await hass.helpers.state.async_reproduce_state(
[
State("input_text.test_text", VALID_TEXT2),
# Should not raise
State("input_text.non_existing", VALID_TEXT2),
],
)
# Test that the state was changed
assert hass.states.get("input_text.test_text").state == VALID_TEXT2
# Test setting state to invalid state (length too long)
await hass.helpers.state.async_reproduce_state(
[State("input_text.test_text", INVALID_TEXT1)]
)
# The entity state should be unchanged
assert hass.states.get("input_text.test_text").state == VALID_TEXT2
# Test setting state to invalid state (length too short)
await hass.helpers.state.async_reproduce_state(
[State("input_text.test_text", INVALID_TEXT2)]
)
# The entity state should be unchanged
assert hass.states.get("input_text.test_text").state == VALID_TEXT2
|
from django.contrib.messages import add_message, constants
def get_request(request):
"""Return Django request object even for DRF requests."""
return getattr(request, "_request", request)
def debug(request, message, extra_tags=""):
"""Add a message with the ``DEBUG`` level."""
if request is not None:
add_message(get_request(request), constants.DEBUG, message, extra_tags)
def info(request, message, extra_tags=""):
"""Add a message with the ``INFO`` level."""
if request is not None:
add_message(get_request(request), constants.INFO, message, extra_tags)
def success(request, message, extra_tags=""):
"""Add a message with the ``SUCCESS`` level."""
if request is not None:
add_message(get_request(request), constants.SUCCESS, message, extra_tags)
def warning(request, message, extra_tags=""):
"""Add a message with the ``WARNING`` level."""
if request is not None:
add_message(get_request(request), constants.WARNING, message, extra_tags)
def error(request, message, extra_tags=""):
"""Add a message with the ``ERROR`` level."""
if request is not None:
add_message(get_request(request), constants.ERROR, message, extra_tags)
|
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import callback
from .const import (
COORDINATOR,
DOMAIN,
FLAME_ICON,
FLOW_OFF_ICON,
FLOW_ON_ICON,
IDLE_ICON,
)
from .sensor import SmileSensor
BINARY_SENSOR_MAP = {
"dhw_state": ["Domestic Hot Water State", None],
"slave_boiler_state": ["Secondary Heater Device State", None],
}
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Smile binary_sensors from a config entry."""
api = hass.data[DOMAIN][config_entry.entry_id]["api"]
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
entities = []
all_devices = api.get_all_devices()
for dev_id, device_properties in all_devices.items():
if device_properties["class"] != "heater_central":
continue
data = api.get_device_data(dev_id)
for binary_sensor, dummy in BINARY_SENSOR_MAP.items():
if binary_sensor not in data:
continue
entities.append(
PwBinarySensor(
api,
coordinator,
device_properties["name"],
binary_sensor,
dev_id,
device_properties["class"],
)
)
async_add_entities(entities, True)
class PwBinarySensor(SmileSensor, BinarySensorEntity):
"""Representation of a Plugwise binary_sensor."""
def __init__(self, api, coordinator, name, binary_sensor, dev_id, model):
"""Set up the Plugwise API."""
super().__init__(api, coordinator, name, dev_id, binary_sensor)
self._binary_sensor = binary_sensor
self._is_on = False
self._icon = None
self._unique_id = f"{dev_id}-{binary_sensor}"
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self._is_on
@property
def icon(self):
"""Return the icon to use in the frontend."""
return self._icon
@callback
def _async_process_data(self):
"""Update the entity."""
data = self._api.get_device_data(self._dev_id)
if not data:
_LOGGER.error("Received no data for device %s", self._binary_sensor)
self.async_write_ha_state()
return
if self._binary_sensor not in data:
self.async_write_ha_state()
return
self._is_on = data[self._binary_sensor]
self._state = STATE_OFF
if self._binary_sensor == "dhw_state":
self._icon = FLOW_OFF_ICON
if self._binary_sensor == "slave_boiler_state":
self._icon = IDLE_ICON
if self._is_on:
self._state = STATE_ON
if self._binary_sensor == "dhw_state":
self._icon = FLOW_ON_ICON
if self._binary_sensor == "slave_boiler_state":
self._icon = FLAME_ICON
self.async_write_ha_state()
|
import os.path
import subprocess
import perfkitbenchmarker
import pkg_resources
_STATIC_VERSION_FILE = 'version.txt'
def _GetVersion():
"""Gets the version from git or the static version file."""
# Try to pull the version from git.
root_dir = os.path.dirname(os.path.dirname(__file__))
git_dir = os.path.join(root_dir, '.git')
try:
version = subprocess.check_output(['git', '--git-dir', git_dir,
'describe', '--always'],
stderr=subprocess.STDOUT,
universal_newlines=True)
except (OSError, subprocess.CalledProcessError):
# Could not get the version from git. Resort to contents of the static
# version file.
try:
version = pkg_resources.resource_string(perfkitbenchmarker.__name__,
_STATIC_VERSION_FILE)
except IOError:
# Could not determine version.
return 'unknown'
return version.rstrip('\n')
VERSION = _GetVersion()
|
from typing import Any, Callable, Dict, List, Optional
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from . import WLEDDataUpdateCoordinator, WLEDDeviceEntity, wled_exception_handler
from .const import (
ATTR_DURATION,
ATTR_FADE,
ATTR_TARGET_BRIGHTNESS,
ATTR_UDP_PORT,
DOMAIN,
)
PARALLEL_UPDATES = 1
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up WLED switch based on a config entry."""
coordinator: WLEDDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id]
switches = [
WLEDNightlightSwitch(entry.entry_id, coordinator),
WLEDSyncSendSwitch(entry.entry_id, coordinator),
WLEDSyncReceiveSwitch(entry.entry_id, coordinator),
]
async_add_entities(switches, True)
class WLEDSwitch(WLEDDeviceEntity, SwitchEntity):
"""Defines a WLED switch."""
def __init__(
self,
*,
entry_id: str,
coordinator: WLEDDataUpdateCoordinator,
name: str,
icon: str,
key: str,
) -> None:
"""Initialize WLED switch."""
self._key = key
super().__init__(
entry_id=entry_id, coordinator=coordinator, name=name, icon=icon
)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return f"{self.coordinator.data.info.mac_address}_{self._key}"
class WLEDNightlightSwitch(WLEDSwitch):
"""Defines a WLED nightlight switch."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize WLED nightlight switch."""
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
icon="mdi:weather-night",
key="nightlight",
name=f"{coordinator.data.info.name} Nightlight",
)
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
return {
ATTR_DURATION: self.coordinator.data.state.nightlight.duration,
ATTR_FADE: self.coordinator.data.state.nightlight.fade,
ATTR_TARGET_BRIGHTNESS: self.coordinator.data.state.nightlight.target_brightness,
}
@property
def is_on(self) -> bool:
"""Return the state of the switch."""
return bool(self.coordinator.data.state.nightlight.on)
@wled_exception_handler
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the WLED nightlight switch."""
await self.coordinator.wled.nightlight(on=False)
@wled_exception_handler
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the WLED nightlight switch."""
await self.coordinator.wled.nightlight(on=True)
class WLEDSyncSendSwitch(WLEDSwitch):
"""Defines a WLED sync send switch."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator) -> None:
"""Initialize WLED sync send switch."""
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
icon="mdi:upload-network-outline",
key="sync_send",
name=f"{coordinator.data.info.name} Sync Send",
)
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
return {ATTR_UDP_PORT: self.coordinator.data.info.udp_port}
@property
def is_on(self) -> bool:
"""Return the state of the switch."""
return bool(self.coordinator.data.state.sync.send)
@wled_exception_handler
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the WLED sync send switch."""
await self.coordinator.wled.sync(send=False)
@wled_exception_handler
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the WLED sync send switch."""
await self.coordinator.wled.sync(send=True)
class WLEDSyncReceiveSwitch(WLEDSwitch):
"""Defines a WLED sync receive switch."""
def __init__(self, entry_id: str, coordinator: WLEDDataUpdateCoordinator):
"""Initialize WLED sync receive switch."""
super().__init__(
coordinator=coordinator,
entry_id=entry_id,
icon="mdi:download-network-outline",
key="sync_receive",
name=f"{coordinator.data.info.name} Sync Receive",
)
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
return {ATTR_UDP_PORT: self.coordinator.data.info.udp_port}
@property
def is_on(self) -> bool:
"""Return the state of the switch."""
return bool(self.coordinator.data.state.sync.receive)
@wled_exception_handler
async def async_turn_off(self, **kwargs: Any) -> None:
"""Turn off the WLED sync receive switch."""
await self.coordinator.wled.sync(receive=False)
@wled_exception_handler
async def async_turn_on(self, **kwargs: Any) -> None:
"""Turn on the WLED sync receive switch."""
await self.coordinator.wled.sync(receive=True)
|
import logging
import voluptuous as vol
from homeassistant.components.mqtt import ATTR_DISCOVERY_HASH
from homeassistant.components.mqtt.discovery import (
MQTT_DISCOVERY_NEW,
clear_discovery_hash,
)
from homeassistant.components.vacuum import DOMAIN
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.reload import async_setup_reload_service
from .. import DOMAIN as MQTT_DOMAIN, PLATFORMS
from .schema import CONF_SCHEMA, LEGACY, MQTT_VACUUM_SCHEMA, STATE
from .schema_legacy import PLATFORM_SCHEMA_LEGACY, async_setup_entity_legacy
from .schema_state import PLATFORM_SCHEMA_STATE, async_setup_entity_state
_LOGGER = logging.getLogger(__name__)
def validate_mqtt_vacuum(value):
"""Validate MQTT vacuum schema."""
schemas = {LEGACY: PLATFORM_SCHEMA_LEGACY, STATE: PLATFORM_SCHEMA_STATE}
return schemas[value[CONF_SCHEMA]](value)
PLATFORM_SCHEMA = vol.All(
MQTT_VACUUM_SCHEMA.extend({}, extra=vol.ALLOW_EXTRA), validate_mqtt_vacuum
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up MQTT vacuum through configuration.yaml."""
await async_setup_reload_service(hass, MQTT_DOMAIN, PLATFORMS)
await _async_setup_entity(config, async_add_entities, discovery_info)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT vacuum dynamically through MQTT discovery."""
async def async_discover(discovery_payload):
"""Discover and add a MQTT vacuum."""
discovery_data = discovery_payload.discovery_data
try:
config = PLATFORM_SCHEMA(discovery_payload)
await _async_setup_entity(
config, async_add_entities, config_entry, discovery_data
)
except Exception:
clear_discovery_hash(hass, discovery_data[ATTR_DISCOVERY_HASH])
raise
async_dispatcher_connect(
hass, MQTT_DISCOVERY_NEW.format(DOMAIN, "mqtt"), async_discover
)
async def _async_setup_entity(
config, async_add_entities, config_entry, discovery_data=None
):
"""Set up the MQTT vacuum."""
setup_entity = {LEGACY: async_setup_entity_legacy, STATE: async_setup_entity_state}
await setup_entity[config[CONF_SCHEMA]](
config, async_add_entities, config_entry, discovery_data
)
|
import os.path as op
import gc
import pytest
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_equal,
assert_array_equal, assert_allclose)
from mne.datasets import testing
from mne import (read_forward_solution, apply_forward, apply_forward_raw,
average_forward_solutions, write_forward_solution,
convert_forward_solution, SourceEstimate, pick_types_forward,
read_evokeds, VectorSourceEstimate)
from mne.io import read_info
from mne.label import read_label
from mne.utils import (requires_mne, run_subprocess,
run_tests_if_main)
from mne.forward import (restrict_forward_to_stc, restrict_forward_to_label,
Forward, is_fixed_orient, compute_orient_prior,
compute_depth_prior)
from mne.channels import equalize_channels
data_path = testing.data_path(download=False)
fname_meeg = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_meeg_grad = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-2-grad-fwd.fif')
fname_evoked = op.join(op.dirname(__file__), '..', '..', 'io', 'tests',
'data', 'test-ave.fif')
def assert_forward_allclose(f1, f2, rtol=1e-7):
"""Compare two potentially converted forward solutions."""
assert_allclose(f1['sol']['data'], f2['sol']['data'], rtol=rtol)
assert f1['sol']['ncol'] == f2['sol']['ncol']
assert f1['sol']['ncol'] == f1['sol']['data'].shape[1]
assert_allclose(f1['source_nn'], f2['source_nn'], rtol=rtol)
if f1['sol_grad'] is not None:
assert (f2['sol_grad'] is not None)
assert_allclose(f1['sol_grad']['data'], f2['sol_grad']['data'])
assert f1['sol_grad']['ncol'] == f2['sol_grad']['ncol']
assert f1['sol_grad']['ncol'] == f1['sol_grad']['data'].shape[1]
else:
assert (f2['sol_grad'] is None)
assert f1['source_ori'] == f2['source_ori']
assert f1['surf_ori'] == f2['surf_ori']
assert f1['src'][0]['coord_frame'] == f1['src'][0]['coord_frame']
@testing.requires_testing_data
def test_convert_forward():
"""Test converting forward solution between different representations."""
fwd = read_forward_solution(fname_meeg_grad)
fwd_repr = repr(fwd)
assert ('306' in fwd_repr)
assert ('60' in fwd_repr)
assert (fwd_repr)
assert (isinstance(fwd, Forward))
# look at surface orientation
fwd_surf = convert_forward_solution(fwd, surf_ori=True)
# go back
fwd_new = convert_forward_solution(fwd_surf, surf_ori=False)
assert (repr(fwd_new))
assert (isinstance(fwd_new, Forward))
assert_forward_allclose(fwd, fwd_new)
del fwd_new
gc.collect()
# now go to fixed
fwd_fixed = convert_forward_solution(fwd_surf, surf_ori=True,
force_fixed=True, use_cps=False)
del fwd_surf
gc.collect()
assert (repr(fwd_fixed))
assert (isinstance(fwd_fixed, Forward))
assert (is_fixed_orient(fwd_fixed))
# now go back to cartesian (original condition)
fwd_new = convert_forward_solution(fwd_fixed, surf_ori=False,
force_fixed=False)
assert (repr(fwd_new))
assert (isinstance(fwd_new, Forward))
assert_forward_allclose(fwd, fwd_new)
del fwd, fwd_new, fwd_fixed
gc.collect()
@pytest.mark.slowtest
@testing.requires_testing_data
def test_io_forward(tmpdir):
"""Test IO for forward solutions."""
# do extensive tests with MEEG + grad
n_channels, n_src = 366, 108
fwd = read_forward_solution(fname_meeg_grad)
assert (isinstance(fwd, Forward))
fwd = read_forward_solution(fname_meeg_grad)
fwd = convert_forward_solution(fwd, surf_ori=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd['sol']['row_names']), n_channels)
fname_temp = tmpdir.join('test-fwd.fif')
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd = read_forward_solution(fname_meeg_grad)
fwd = convert_forward_solution(fwd, surf_ori=True)
fwd_read = read_forward_solution(fname_temp)
fwd_read = convert_forward_solution(fwd_read, surf_ori=True)
leadfield = fwd_read['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src))
assert_equal(len(fwd_read['sol']['row_names']), n_channels)
assert_equal(len(fwd_read['info']['chs']), n_channels)
assert ('dev_head_t' in fwd_read['info'])
assert ('mri_head_t' in fwd_read)
assert_array_almost_equal(fwd['sol']['data'], fwd_read['sol']['data'])
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=False)
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
fwd_read = convert_forward_solution(fwd_read, surf_ori=True,
force_fixed=True, use_cps=False)
assert (repr(fwd_read))
assert (isinstance(fwd_read, Forward))
assert (is_fixed_orient(fwd_read))
assert_forward_allclose(fwd, fwd_read)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, 1494 / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert ('dev_head_t' in fwd['info'])
assert ('mri_head_t' in fwd)
assert (fwd['surf_ori'])
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
fwd_read = convert_forward_solution(fwd_read, surf_ori=True,
force_fixed=True, use_cps=True)
assert (repr(fwd_read))
assert (isinstance(fwd_read, Forward))
assert (is_fixed_orient(fwd_read))
assert_forward_allclose(fwd, fwd_read)
fwd = read_forward_solution(fname_meeg_grad)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
leadfield = fwd['sol']['data']
assert_equal(leadfield.shape, (n_channels, n_src / 3))
assert_equal(len(fwd['sol']['row_names']), n_channels)
assert_equal(len(fwd['info']['chs']), n_channels)
assert ('dev_head_t' in fwd['info'])
assert ('mri_head_t' in fwd)
assert (fwd['surf_ori'])
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
fwd_read = convert_forward_solution(fwd_read, surf_ori=True,
force_fixed=True, use_cps=True)
assert (repr(fwd_read))
assert (isinstance(fwd_read, Forward))
assert (is_fixed_orient(fwd_read))
assert_forward_allclose(fwd, fwd_read)
# test warnings on bad filenames
fwd = read_forward_solution(fname_meeg_grad)
fwd_badname = tmpdir.join('test-bad-name.fif.gz')
with pytest.warns(RuntimeWarning, match='end with'):
write_forward_solution(fwd_badname, fwd)
with pytest.warns(RuntimeWarning, match='end with'):
read_forward_solution(fwd_badname)
fwd = read_forward_solution(fname_meeg)
write_forward_solution(fname_temp, fwd, overwrite=True)
fwd_read = read_forward_solution(fname_temp)
assert_forward_allclose(fwd, fwd_read)
@testing.requires_testing_data
def test_apply_forward():
"""Test projection of source space data to sensor space."""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
fwd = pick_types_forward(fwd, meg=True)
assert isinstance(fwd, Forward)
vertno = [fwd['src'][0]['vertno'], fwd['src'][1]['vertno']]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
gain_sum = np.sum(fwd['sol']['data'], axis=1)
# Evoked
evoked = read_evokeds(fname_evoked, condition=0)
evoked.pick_types(meg=True)
with pytest.warns(RuntimeWarning, match='only .* positive values'):
evoked = apply_forward(fwd, stc, evoked.info, start=start, stop=stop)
data = evoked.data
times = evoked.times
# do some tests
assert_array_almost_equal(evoked.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
assert_array_almost_equal(times[0], t_start)
assert_array_almost_equal(times[-1], t_start + (n_times - 1) / sfreq)
# vector
stc_vec = VectorSourceEstimate(
fwd['source_nn'][:, :, np.newaxis] * stc.data[:, np.newaxis],
stc.vertices, stc.tmin, stc.tstep)
with pytest.warns(RuntimeWarning, match='very large'):
evoked_2 = apply_forward(fwd, stc_vec, evoked.info)
assert np.abs(evoked_2.data).mean() > 1e-5
assert_allclose(evoked.data, evoked_2.data, atol=1e-10)
# Raw
with pytest.warns(RuntimeWarning, match='only .* positive values'):
raw_proj = apply_forward_raw(fwd, stc, evoked.info, start=start,
stop=stop)
data, times = raw_proj[:, :]
# do some tests
assert_array_almost_equal(raw_proj.info['sfreq'], sfreq)
assert_array_almost_equal(np.sum(data, axis=1), n_times * gain_sum)
atol = 1. / sfreq
assert_allclose(raw_proj.first_samp / sfreq, t_start, atol=atol)
assert_allclose(raw_proj.last_samp / sfreq,
t_start + (n_times - 1) / sfreq, atol=atol)
@testing.requires_testing_data
def test_restrict_forward_to_stc(tmpdir):
"""Test restriction of source space to source SourceEstimate."""
start = 0
stop = 5
n_times = stop - start - 1
sfreq = 10.0
t_start = 0.123
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert (isinstance(fwd_out, Forward))
assert_equal(fwd_out['sol']['ncol'], 20)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=False)
fwd = pick_types_forward(fwd, meg=True)
vertno = [fwd['src'][0]['vertno'][0:15], fwd['src'][1]['vertno'][0:5]]
stc_data = np.ones((len(vertno[0]) + len(vertno[1]), n_times))
stc = SourceEstimate(stc_data, vertno, tmin=t_start, tstep=1.0 / sfreq)
fwd_out = restrict_forward_to_stc(fwd, stc)
assert_equal(fwd_out['sol']['ncol'], 60)
assert_equal(fwd_out['src'][0]['nuse'], 15)
assert_equal(fwd_out['src'][1]['nuse'], 5)
assert_equal(fwd_out['src'][0]['vertno'], fwd['src'][0]['vertno'][0:15])
assert_equal(fwd_out['src'][1]['vertno'], fwd['src'][1]['vertno'][0:5])
# Test saving the restricted forward object. This only works if all fields
# are properly accounted for.
fname_copy = tmpdir.join('copy-fwd.fif')
with pytest.warns(RuntimeWarning, match='stored on disk'):
write_forward_solution(fname_copy, fwd_out, overwrite=True)
fwd_out_read = read_forward_solution(fname_copy)
fwd_out_read = convert_forward_solution(fwd_out_read, surf_ori=True,
force_fixed=False)
assert_forward_allclose(fwd_out, fwd_out_read)
@testing.requires_testing_data
def test_restrict_forward_to_label(tmpdir):
"""Test restriction of source space to label."""
fwd = read_forward_solution(fname_meeg)
fwd = convert_forward_solution(fwd, surf_ori=True, force_fixed=True,
use_cps=True)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]
nuse_lh = fwd['src'][0]['nuse']
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
src_sel_rh += nuse_lh
assert_equal(fwd_out['sol']['ncol'], len(src_sel_lh) + len(src_sel_rh))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)
fwd = read_forward_solution(fname_meeg)
fwd = pick_types_forward(fwd, meg=True)
label_path = op.join(data_path, 'MEG', 'sample', 'labels')
labels = ['Aud-lh', 'Vis-rh']
label_lh = read_label(op.join(label_path, labels[0] + '.label'))
label_rh = read_label(op.join(label_path, labels[1] + '.label'))
fwd_out = restrict_forward_to_label(fwd, [label_lh, label_rh])
src_sel_lh = np.intersect1d(fwd['src'][0]['vertno'], label_lh.vertices)
src_sel_lh = np.searchsorted(fwd['src'][0]['vertno'], src_sel_lh)
vertno_lh = fwd['src'][0]['vertno'][src_sel_lh]
nuse_lh = fwd['src'][0]['nuse']
src_sel_rh = np.intersect1d(fwd['src'][1]['vertno'], label_rh.vertices)
src_sel_rh = np.searchsorted(fwd['src'][1]['vertno'], src_sel_rh)
vertno_rh = fwd['src'][1]['vertno'][src_sel_rh]
src_sel_rh += nuse_lh
assert_equal(fwd_out['sol']['ncol'],
3 * (len(src_sel_lh) + len(src_sel_rh)))
assert_equal(fwd_out['src'][0]['nuse'], len(src_sel_lh))
assert_equal(fwd_out['src'][1]['nuse'], len(src_sel_rh))
assert_equal(fwd_out['src'][0]['vertno'], vertno_lh)
assert_equal(fwd_out['src'][1]['vertno'], vertno_rh)
# Test saving the restricted forward object. This only works if all fields
# are properly accounted for.
fname_copy = tmpdir.join('copy-fwd.fif')
write_forward_solution(fname_copy, fwd_out, overwrite=True)
fwd_out_read = read_forward_solution(fname_copy)
assert_forward_allclose(fwd_out, fwd_out_read)
@testing.requires_testing_data
@requires_mne
def test_average_forward_solution(tmpdir):
"""Test averaging forward solutions."""
fwd = read_forward_solution(fname_meeg)
# input not a list
pytest.raises(TypeError, average_forward_solutions, 1)
# list is too short
pytest.raises(ValueError, average_forward_solutions, [])
# negative weights
pytest.raises(ValueError, average_forward_solutions, [fwd, fwd], [-1, 0])
# all zero weights
pytest.raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0])
# weights not same length
pytest.raises(ValueError, average_forward_solutions, [fwd, fwd], [0, 0, 0])
# list does not only have all dict()
pytest.raises(TypeError, average_forward_solutions, [1, fwd])
# try an easy case
fwd_copy = average_forward_solutions([fwd])
assert (isinstance(fwd_copy, Forward))
assert_array_equal(fwd['sol']['data'], fwd_copy['sol']['data'])
# modify a fwd solution, save it, use MNE to average with old one
fwd_copy['sol']['data'] *= 0.5
fname_copy = str(tmpdir.join('copy-fwd.fif'))
write_forward_solution(fname_copy, fwd_copy, overwrite=True)
cmd = ('mne_average_forward_solutions', '--fwd', fname_meeg, '--fwd',
fname_copy, '--out', fname_copy)
run_subprocess(cmd)
# now let's actually do it, with one filename and one fwd
fwd_ave = average_forward_solutions([fwd, fwd_copy])
assert_array_equal(0.75 * fwd['sol']['data'], fwd_ave['sol']['data'])
# fwd_ave_mne = read_forward_solution(fname_copy)
# assert_array_equal(fwd_ave_mne['sol']['data'], fwd_ave['sol']['data'])
# with gradient
fwd = read_forward_solution(fname_meeg_grad)
fwd_ave = average_forward_solutions([fwd, fwd])
assert_forward_allclose(fwd, fwd_ave)
@testing.requires_testing_data
def test_priors():
"""Test prior computations."""
# Depth prior
fwd = read_forward_solution(fname_meeg)
assert not is_fixed_orient(fwd)
n_sources = fwd['nsource']
info = read_info(fname_evoked)
depth_prior = compute_depth_prior(fwd, info, exp=0.8)
assert depth_prior.shape == (3 * n_sources,)
depth_prior = compute_depth_prior(fwd, info, exp=0.)
assert_array_equal(depth_prior, 1.)
with pytest.raises(ValueError, match='must be "whiten"'):
compute_depth_prior(fwd, info, limit_depth_chs='foo')
with pytest.raises(ValueError, match='noise_cov must be a Covariance'):
compute_depth_prior(fwd, info, limit_depth_chs='whiten')
fwd_fixed = convert_forward_solution(fwd, force_fixed=True)
depth_prior = compute_depth_prior(fwd_fixed, info=info)
assert depth_prior.shape == (n_sources,)
# Orientation prior
orient_prior = compute_orient_prior(fwd, 1.)
assert_array_equal(orient_prior, 1.)
orient_prior = compute_orient_prior(fwd_fixed, 0.)
assert_array_equal(orient_prior, 1.)
with pytest.raises(ValueError, match='oriented in surface coordinates'):
compute_orient_prior(fwd, 0.5)
fwd_surf_ori = convert_forward_solution(fwd, surf_ori=True)
orient_prior = compute_orient_prior(fwd_surf_ori, 0.5)
assert all(np.in1d(orient_prior, (0.5, 1.)))
with pytest.raises(ValueError, match='between 0 and 1'):
compute_orient_prior(fwd_surf_ori, -0.5)
with pytest.raises(ValueError, match='with fixed orientation'):
compute_orient_prior(fwd_fixed, 0.5)
@testing.requires_testing_data
def test_equalize_channels():
"""Test equalization of channels for instances of Forward."""
fwd1 = read_forward_solution(fname_meeg)
fwd1.pick_channels(['EEG 001', 'EEG 002', 'EEG 003'])
fwd2 = fwd1.copy().pick_channels(['EEG 002', 'EEG 001'], ordered=True)
fwd1, fwd2 = equalize_channels([fwd1, fwd2])
assert fwd1.ch_names == ['EEG 001', 'EEG 002']
assert fwd2.ch_names == ['EEG 001', 'EEG 002']
run_tests_if_main()
|
import logging
from typing import Dict, List
from homeassistant.core import CALLBACK_TYPE, callback
from homeassistant.helpers.entity import Entity
from .const import DOMAIN, MANUFACTURER, MODELS, SIGNAL_NAME
from .data_handler import NetatmoDataHandler
_LOGGER = logging.getLogger(__name__)
class NetatmoBase(Entity):
"""Netatmo entity base class."""
def __init__(self, data_handler: NetatmoDataHandler) -> None:
"""Set up Netatmo entity base."""
self.data_handler = data_handler
self._data_classes: List[Dict] = []
self._listeners: List[CALLBACK_TYPE] = []
self._device_name = None
self._id = None
self._model = None
self._name = None
self._unique_id = None
async def async_added_to_hass(self) -> None:
"""Entity created."""
_LOGGER.debug("New client %s", self.entity_id)
for data_class in self._data_classes:
signal_name = data_class[SIGNAL_NAME]
if "home_id" in data_class:
await self.data_handler.register_data_class(
data_class["name"],
signal_name,
self.async_update_callback,
home_id=data_class["home_id"],
)
elif data_class["name"] == "PublicData":
await self.data_handler.register_data_class(
data_class["name"],
signal_name,
self.async_update_callback,
LAT_NE=data_class["LAT_NE"],
LON_NE=data_class["LON_NE"],
LAT_SW=data_class["LAT_SW"],
LON_SW=data_class["LON_SW"],
)
else:
await self.data_handler.register_data_class(
data_class["name"], signal_name, self.async_update_callback
)
await self.data_handler.unregister_data_class(signal_name, None)
self.async_update_callback()
async def async_will_remove_from_hass(self):
"""Run when entity will be removed from hass."""
await super().async_will_remove_from_hass()
for listener in self._listeners:
listener()
for data_class in self._data_classes:
await self.data_handler.unregister_data_class(
data_class[SIGNAL_NAME], self.async_update_callback
)
async def async_remove(self):
"""Clean up when removing entity."""
entity_registry = await self.hass.helpers.entity_registry.async_get_registry()
entity_entry = entity_registry.async_get(self.entity_id)
if not entity_entry:
await super().async_remove()
return
entity_registry.async_remove(self.entity_id)
@callback
def async_update_callback(self):
"""Update the entity's state."""
raise NotImplementedError
@property
def _data(self):
"""Return data for this entity."""
return self.data_handler.data[self._data_classes[0]["name"]]
@property
def unique_id(self):
"""Return the unique ID of this entity."""
return self._unique_id
@property
def name(self):
"""Return the name of this entity."""
return self._name
@property
def device_info(self):
"""Return the device info for the sensor."""
return {
"identifiers": {(DOMAIN, self._id)},
"name": self._device_name,
"manufacturer": MANUFACTURER,
"model": MODELS[self._model],
}
|
import pytest
from homeassistant import core
from homeassistant.components import switch
from homeassistant.const import CONF_PLATFORM
from homeassistant.setup import async_setup_component
from tests.components.switch import common
@pytest.fixture(autouse=True)
def entities(hass):
"""Initialize the test switch."""
platform = getattr(hass.components, "test.switch")
platform.init()
yield platform.ENTITIES
async def test_methods(hass, entities):
"""Test is_on, turn_on, turn_off methods."""
switch_1, switch_2, switch_3 = entities
assert await async_setup_component(
hass, switch.DOMAIN, {switch.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.async_block_till_done()
assert switch.is_on(hass, switch_1.entity_id)
assert not switch.is_on(hass, switch_2.entity_id)
assert not switch.is_on(hass, switch_3.entity_id)
await common.async_turn_off(hass, switch_1.entity_id)
await common.async_turn_on(hass, switch_2.entity_id)
assert not switch.is_on(hass, switch_1.entity_id)
assert switch.is_on(hass, switch_2.entity_id)
# Turn all off
await common.async_turn_off(hass)
assert not switch.is_on(hass, switch_1.entity_id)
assert not switch.is_on(hass, switch_2.entity_id)
assert not switch.is_on(hass, switch_3.entity_id)
# Turn all on
await common.async_turn_on(hass)
assert switch.is_on(hass, switch_1.entity_id)
assert switch.is_on(hass, switch_2.entity_id)
assert switch.is_on(hass, switch_3.entity_id)
async def test_switch_context(hass, entities, hass_admin_user):
"""Test that switch context works."""
assert await async_setup_component(hass, "switch", {"switch": {"platform": "test"}})
await hass.async_block_till_done()
state = hass.states.get("switch.ac")
assert state is not None
await hass.services.async_call(
"switch",
"toggle",
{"entity_id": state.entity_id},
True,
core.Context(user_id=hass_admin_user.id),
)
state2 = hass.states.get("switch.ac")
assert state2 is not None
assert state.state != state2.state
assert state2.context.user_id == hass_admin_user.id
def test_deprecated_base_class(caplog):
"""Test deprecated base class."""
class CustomSwitch(switch.SwitchDevice):
pass
CustomSwitch()
assert "SwitchDevice is deprecated, modify CustomSwitch" in caplog.text
|
__version__ = '0.7.4'
import imp as pyimp # rename to avoid name conflict with objc_util
import logging
import logging.handlers
import os
import platform
import sys
from io import IOBase
import six
from six import BytesIO, StringIO
from six.moves.configparser import ConfigParser
# noinspection PyPep8Naming
from .system.shcommon import (_EXTERNAL_DIRS, _STASH_CONFIG_FILES, _STASH_ROOT, _SYS_STDOUT, IN_PYTHONISTA, ON_IPAD)
from .system.shcommon import Control as ctrl
from .system.shcommon import Escape as esc
from .system.shcommon import Graphics as graphics
from .system.shio import ShIO
from .system.shiowrapper import disable as disable_io_wrapper
from .system.shiowrapper import enable as enable_io_wrapper
from .system.shparsers import ShCompleter, ShExpander, ShParser
from .system.shruntime import ShRuntime
from .system.shscreens import ShSequentialScreen
from .system.shstreams import ShMiniBuffer, ShStream
from .system.shui import get_ui_implementation
from .system.shuseractionproxy import ShUserActionProxy
# Setup logging
LOGGER = logging.getLogger('StaSh')
# Debugging constants
_DEBUG_STREAM = 200
_DEBUG_RENDERER = 201
_DEBUG_MAIN_SCREEN = 202
_DEBUG_MINI_BUFFER = 203
_DEBUG_IO = 204
_DEBUG_UI = 300
_DEBUG_TERMINAL = 301
_DEBUG_TV_DELEGATE = 302
_DEBUG_RUNTIME = 400
_DEBUG_PARSER = 401
_DEBUG_EXPANDER = 402
_DEBUG_COMPLETER = 403
# Default configuration (can be overridden by external configuration file)
_DEFAULT_CONFIG = """[system]
rcfile=.stashrc
py_traceback=0
py_pdb=0
input_encoding_utf8=1
thread_type=ctypes
[display]
TEXT_FONT_SIZE={font_size}
BUTTON_FONT_SIZE=14
BACKGROUND_COLOR=(0.0, 0.0, 0.0)
TEXT_COLOR=(1.0, 1.0, 1.0)
TINT_COLOR=(0.0, 0.0, 1.0)
INDICATOR_STYLE=white
BUFFER_MAX=150
AUTO_COMPLETION_MAX=50
VK_SYMBOLS=~/.-*|>$'=!&_"\\?`
[style]
enable_styles=1
colored_errors=1
[history]
ipython_style_history_search=1
allow_double_lines=0
hide_whitespace_lines=1
maxsize=50
""".format(
font_size=(14 if ON_IPAD else 12),
)
# create directories outside STASH_ROOT
# we should do this each time StaSh because some commands may require
# this directories
for p in _EXTERNAL_DIRS:
if not os.path.exists(p):
try:
os.mkdir(p)
except:
pass
class StaSh(object):
"""
Main application class. It initialize and wires the components and provide
utility interfaces to running scripts.
"""
PY3 = six.PY3
def __init__(self, debug=(), log_setting=None, no_cfgfile=False, no_rcfile=False, no_historyfile=False, command=None):
self.__version__ = __version__
# Intercept IO
enable_io_wrapper()
self.config = self._load_config(no_cfgfile=no_cfgfile)
self.logger = self._config_logging(log_setting)
self.enable_styles = self.config.getboolean("style", "enable_styles")
self.user_action_proxy = ShUserActionProxy(self)
# Tab handler for running scripts
self.external_tab_handler = None
# Wire the components
self.main_screen = ShSequentialScreen(
self,
nlines_max=self.config.getint('display',
'BUFFER_MAX'),
debug=_DEBUG_MAIN_SCREEN in debug
)
self.mini_buffer = ShMiniBuffer(self, self.main_screen, debug=_DEBUG_MINI_BUFFER in debug)
self.stream = ShStream(self, self.main_screen, debug=_DEBUG_STREAM in debug)
self.io = ShIO(self, debug=_DEBUG_IO in debug)
ShUI, ShSequentialRenderer = get_ui_implementation()
self.terminal = None # will be set during UI initialisation
self.ui = ShUI(self, debug=(_DEBUG_UI in debug), debug_terminal=(_DEBUG_TERMINAL in debug))
self.renderer = ShSequentialRenderer(self, self.main_screen, self.terminal, debug=_DEBUG_RENDERER in debug)
parser = ShParser(debug=_DEBUG_PARSER in debug)
expander = ShExpander(self, debug=_DEBUG_EXPANDER in debug)
self.runtime = ShRuntime(self, parser, expander, no_historyfile=no_historyfile, debug=_DEBUG_RUNTIME in debug)
self.completer = ShCompleter(self, debug=_DEBUG_COMPLETER in debug)
# Navigate to the startup folder
if IN_PYTHONISTA:
os.chdir(self.runtime.state.environ_get('HOME2'))
self.runtime.load_rcfile(no_rcfile=no_rcfile)
self.io.write(
self.text_style(
'StaSh v%s on python %s\n' % (
self.__version__,
platform.python_version(),
),
{
'color': 'blue',
'traits': ['bold']
},
always=True,
),
)
# warn on py3
if self.PY3:
self.io.write(
self.text_style(
'Warning: you are running StaSh in python3. Some commands may not work correctly in python3.\n',
{'color': 'red'},
always=True,
),
)
self.io.write(
self.text_style(
'Please help us improving StaSh by reporting bugs on github.\n',
{
'color': 'yellow',
'traits': ['italic']
},
always=True,
),
)
# Load shared libraries
self._load_lib()
# run command (this calls script_will_end)
if command is None:
# show tip of the day
command = '$STASH_ROOT/bin/totd.py'
if command:
# do not run command if command is False (but not None)
if self.runtime.debug:
self.logger.debug("Running command: {!r}".format(command))
self(command, add_to_history=False, persistent_level=0)
def __call__(self, input_, persistent_level=2, *args, **kwargs):
""" This function is to be called by external script for
executing shell commands """
worker = self.runtime.run(input_, persistent_level=persistent_level, *args, **kwargs)
worker.join()
return worker
@staticmethod
def _load_config(no_cfgfile=False):
config = ConfigParser()
config.optionxform = str # make it preserve case
# defaults
if not six.PY3:
config.readfp(BytesIO(_DEFAULT_CONFIG))
else:
config.read_file(StringIO(_DEFAULT_CONFIG))
# update from config file
if not no_cfgfile:
config.read(os.path.join(_STASH_ROOT, f) for f in _STASH_CONFIG_FILES)
return config
@staticmethod
def _config_logging(log_setting):
logger = logging.getLogger('StaSh')
_log_setting = {
'level': 'DEBUG',
'stdout': True,
}
_log_setting.update(log_setting or {})
level = {
'CRITICAL': logging.CRITICAL,
'ERROR': logging.ERROR,
'WARNING': logging.WARNING,
'INFO': logging.INFO,
'DEBUG': logging.DEBUG,
'NOTEST': logging.NOTSET,
}.get(_log_setting['level'],
logging.DEBUG)
logger.setLevel(level)
if not logger.handlers:
if _log_setting['stdout']:
_log_handler = logging.StreamHandler(_SYS_STDOUT)
else:
_log_handler = logging.handlers.RotatingFileHandler('stash.log', mode='w')
_log_handler.setLevel(level)
_log_handler.setFormatter(
logging.Formatter(
'[%(asctime)s] [%(levelname)s] [%(threadName)s] [%(name)s] [%(funcName)s] [%(lineno)d] - %(message)s'
)
)
logger.addHandler(_log_handler)
return logger
def _load_lib(self):
"""
Load library files as modules and save each of them as attributes
"""
lib_path = os.path.join(_STASH_ROOT, 'lib')
os.environ['STASH_ROOT'] = _STASH_ROOT # libcompleter needs this value
try:
for f in os.listdir(lib_path):
fp = os.path.join(lib_path, f)
if f.startswith('lib') and f.endswith('.py') and os.path.isfile(fp):
name, _ = os.path.splitext(f)
if self.runtime.debug:
self.logger.debug("Attempting to load library '{}'...".format(name))
try:
self.__dict__[name] = pyimp.load_source(name, fp)
except Exception as e:
self.write_message('%s: failed to load library file (%s)' % (f, repr(e)), error=True)
finally: # do not modify environ permanently
os.environ.pop('STASH_ROOT')
def write_message(self, s, error=False, prefix="stash: "):
"""
Write a message to the output.
:param s: message to write
:type w: str
:param error: whether this is an error message
:type error: bool
"""
s = '%s%s\n' % (prefix, s)
if error:
if self.runtime.debug:
self.logger.error(s)
if self.runtime.colored_errors:
s = self.text_color(s, "red")
else:
if self.runtime.debug:
self.logger.info(s)
self.io.write(s)
def launch(self, command=None):
"""
Launch StaSh, presenting the UI.
"""
self.ui.show()
# self.terminal.set_focus()
def close(self):
"""
Quit StaSh.
StaSh is based arround the UI, so we delegate this task to the UI,
which in turn will call self.on_exit().
"""
self.ui.close()
def on_exit(self):
"""
This method will be called when StaSh is about the be closed.
"""
self.runtime.save_history()
self.cleanup()
# Clear the stack or the stdout becomes unusable for interactive prompt
self.runtime.worker_registry.purge()
def cleanup(self):
"""
Perform cleanup here.
"""
disable_io_wrapper()
def get_workers(self):
"""
Return a list of all workers..
:return: a list of all workers
:rtype: list of [stash.system.shtreads.BaseThread]
"""
return [worker for worker in self.runtime.worker_registry]
# noinspection PyProtectedMember
# @staticmethod
def text_style(self, s, style, always=False):
"""
Style the given string with ASCII escapes.
:param str s: String to decorate
:param dict style: A dictionary of styles
:param bool always: If true, style will be applied even for pipes.
:return:
"""
# No color for pipes, files and Pythonista console
if not self.enable_styles or (not always and (isinstance(sys.stdout,
(StringIO,
IOBase)) # or sys.stdout.write.im_self is _SYS_STDOUT
or sys.stdout is _SYS_STDOUT)):
return s
fmt_string = u'%s%%d%s%%s%s%%d%s' % (ctrl.CSI, esc.SGR, ctrl.CSI, esc.SGR)
for style_name, style_value in style.items():
if style_name == 'color':
color_id = graphics._SGR.get(style_value.lower())
if color_id is not None:
s = fmt_string % (color_id, s, graphics._SGR['default'])
elif style_name == 'bgcolor':
color_id = graphics._SGR.get('bg-' + style_value.lower())
if color_id is not None:
s = fmt_string % (color_id, s, graphics._SGR['default'])
elif style_name == 'traits':
for val in style_value:
val = val.lower()
if val == 'bold':
s = fmt_string % (graphics._SGR['+bold'], s, graphics._SGR['-bold'])
elif val == 'italic':
s = fmt_string % (graphics._SGR['+italics'], s, graphics._SGR['-italics'])
elif val == 'underline':
s = fmt_string % (graphics._SGR['+underscore'], s, graphics._SGR['-underscore'])
elif val == 'strikethrough':
s = fmt_string % (graphics._SGR['+strikethrough'], s, graphics._SGR['-strikethrough'])
return s
def text_color(self, s, color_name='default', **kwargs):
return self.text_style(s, {'color': color_name}, **kwargs)
def text_bgcolor(self, s, color_name='default', **kwargs):
return self.text_style(s, {'bgcolor': color_name}, **kwargs)
def text_bold(self, s, **kwargs):
return self.text_style(s, {'traits': ['bold']}, **kwargs)
def text_italic(self, s, **kwargs):
return self.text_style(s, {'traits': ['italic']}, **kwargs)
def text_bold_italic(self, s, **kwargs):
return self.text_style(s, {'traits': ['bold', 'italic']}, **kwargs)
def text_underline(self, s, **kwargs):
return self.text_style(s, {'traits': ['underline']}, **kwargs)
def text_strikethrough(self, s, **kwargs):
return self.text_style(s, {'traits': ['strikethrough']}, **kwargs)
|
from django.core.management.base import CommandError
from weblate.auth.models import User
from weblate.machinery import MACHINE_TRANSLATION_SERVICES
from weblate.trans.autotranslate import AutoTranslate
from weblate.trans.management.commands import WeblateTranslationCommand
from weblate.trans.models import Component
class Command(WeblateTranslationCommand):
"""Command for mass automatic translation."""
help = "performs automatic translation based on other components"
def add_arguments(self, parser):
super().add_arguments(parser)
parser.add_argument(
"--user", default="anonymous", help=("User performing the change")
)
parser.add_argument(
"--source", default="", help=("Source component <project/component>")
)
parser.add_argument(
"--add",
default=False,
action="store_true",
help=("Add translations if they do not exist"),
)
parser.add_argument(
"--overwrite",
default=False,
action="store_true",
help=("Overwrite existing translations in target component"),
)
parser.add_argument(
"--inconsistent",
default=False,
action="store_true",
help=("Process only inconsistent translations"),
)
parser.add_argument(
"--mt", action="append", default=[], help=("Add machine translation source")
)
parser.add_argument(
"--threshold",
default=80,
type=int,
help=("Set machine translation threshold"),
)
def handle(self, *args, **options):
# Get translation object
translation = self.get_translation(**options)
# Get user
try:
user = User.objects.get(username=options["user"])
except User.DoesNotExist:
raise CommandError("User does not exist!")
if options["source"]:
parts = options["source"].split("/")
if len(parts) != 2:
raise CommandError("Invalid source component specified!")
try:
component = Component.objects.get(project__slug=parts[0], slug=parts[1])
except Component.DoesNotExist:
raise CommandError("No matching source component found!")
source = component.id
else:
source = ""
if options["mt"]:
for translator in options["mt"]:
if translator not in MACHINE_TRANSLATION_SERVICES.keys():
raise CommandError(
f"Machine translation {translator} is not available"
)
if options["inconsistent"]:
filter_type = "check:inconsistent"
elif options["overwrite"]:
filter_type = "all"
else:
filter_type = "todo"
auto = AutoTranslate(user, translation, filter_type, "translate")
if options["mt"]:
auto.process_mt(options["mt"], options["threshold"])
else:
auto.process_others(source)
self.stdout.write(f"Updated {auto.updated} units")
|
import pytest
from qstrader.utils.console import GREEN, BLUE, CYAN, string_colour
@pytest.mark.parametrize(
"text,colour,expected",
[
('green colour', GREEN, "\x1b[1;32mgreen colour\x1b[0m"),
('blue colour', BLUE, "\x1b[1;34mblue colour\x1b[0m"),
('cyan colour', CYAN, "\x1b[1;36mcyan colour\x1b[0m"),
]
)
def test_string_colour(text, colour, expected):
"""
Tests that the string colourisation for the terminal console
produces the correct values.
"""
assert string_colour(text, colour=colour) == expected
|
import asyncio
import logging
import aiohttp
import async_timeout
import voluptuous as vol
from homeassistant.components.tts import CONF_LANG, PLATFORM_SCHEMA, Provider
from homeassistant.const import CONF_API_KEY, HTTP_OK
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
YANDEX_API_URL = "https://tts.voicetech.yandex.net/generate?"
SUPPORT_LANGUAGES = ["ru-RU", "en-US", "tr-TR", "uk-UK"]
SUPPORT_CODECS = ["mp3", "wav", "opus"]
SUPPORT_VOICES = [
"jane",
"oksana",
"alyss",
"omazh",
"zahar",
"ermil",
"levitan",
"ermilov",
"silaerkan",
"kolya",
"kostya",
"nastya",
"sasha",
"nick",
"erkanyavas",
"zhenya",
"tanya",
"anton_samokhvalov",
"tatyana_abramova",
"voicesearch",
"ermil_with_tuning",
"robot",
"dude",
"zombie",
"smoky",
]
SUPPORTED_EMOTION = ["good", "evil", "neutral"]
MIN_SPEED = 0.1
MAX_SPEED = 3
CONF_CODEC = "codec"
CONF_VOICE = "voice"
CONF_EMOTION = "emotion"
CONF_SPEED = "speed"
DEFAULT_LANG = "en-US"
DEFAULT_CODEC = "mp3"
DEFAULT_VOICE = "zahar"
DEFAULT_EMOTION = "neutral"
DEFAULT_SPEED = 1
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Optional(CONF_LANG, default=DEFAULT_LANG): vol.In(SUPPORT_LANGUAGES),
vol.Optional(CONF_CODEC, default=DEFAULT_CODEC): vol.In(SUPPORT_CODECS),
vol.Optional(CONF_VOICE, default=DEFAULT_VOICE): vol.In(SUPPORT_VOICES),
vol.Optional(CONF_EMOTION, default=DEFAULT_EMOTION): vol.In(SUPPORTED_EMOTION),
vol.Optional(CONF_SPEED, default=DEFAULT_SPEED): vol.Range(
min=MIN_SPEED, max=MAX_SPEED
),
}
)
SUPPORTED_OPTIONS = [CONF_CODEC, CONF_VOICE, CONF_EMOTION, CONF_SPEED]
async def async_get_engine(hass, config, discovery_info=None):
"""Set up VoiceRSS speech component."""
return YandexSpeechKitProvider(hass, config)
class YandexSpeechKitProvider(Provider):
"""VoiceRSS speech api provider."""
def __init__(self, hass, conf):
"""Init VoiceRSS TTS service."""
self.hass = hass
self._codec = conf.get(CONF_CODEC)
self._key = conf.get(CONF_API_KEY)
self._speaker = conf.get(CONF_VOICE)
self._language = conf.get(CONF_LANG)
self._emotion = conf.get(CONF_EMOTION)
self._speed = str(conf.get(CONF_SPEED))
self.name = "YandexTTS"
@property
def default_language(self):
"""Return the default language."""
return self._language
@property
def supported_languages(self):
"""Return list of supported languages."""
return SUPPORT_LANGUAGES
@property
def supported_options(self):
"""Return list of supported options."""
return SUPPORTED_OPTIONS
async def async_get_tts_audio(self, message, language, options=None):
"""Load TTS from yandex."""
websession = async_get_clientsession(self.hass)
actual_language = language
options = options or {}
try:
with async_timeout.timeout(10):
url_param = {
"text": message,
"lang": actual_language,
"key": self._key,
"speaker": options.get(CONF_VOICE, self._speaker),
"format": options.get(CONF_CODEC, self._codec),
"emotion": options.get(CONF_EMOTION, self._emotion),
"speed": options.get(CONF_SPEED, self._speed),
}
request = await websession.get(YANDEX_API_URL, params=url_param)
if request.status != HTTP_OK:
_LOGGER.error(
"Error %d on load URL %s", request.status, request.url
)
return (None, None)
data = await request.read()
except (asyncio.TimeoutError, aiohttp.ClientError):
_LOGGER.error("Timeout for yandex speech kit API")
return (None, None)
return (self._codec, data)
|
from homeassistant.components import mqtt
from homeassistant.core import callback
from homeassistant.helpers.entity import Entity
from homeassistant.util import slugify
from .definitions import DEFINITIONS
DOMAIN = "dsmr_reader"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up DSMR Reader sensors."""
sensors = []
for topic in DEFINITIONS:
sensors.append(DSMRSensor(topic))
async_add_entities(sensors)
class DSMRSensor(Entity):
"""Representation of a DSMR sensor that is updated via MQTT."""
def __init__(self, topic):
"""Initialize the sensor."""
self._definition = DEFINITIONS[topic]
self._entity_id = slugify(topic.replace("/", "_"))
self._topic = topic
self._name = self._definition.get("name", topic.split("/")[-1])
self._unit_of_measurement = self._definition.get("unit")
self._icon = self._definition.get("icon")
self._transform = self._definition.get("transform")
self._state = None
async def async_added_to_hass(self):
"""Subscribe to MQTT events."""
@callback
def message_received(message):
"""Handle new MQTT messages."""
if self._transform is not None:
self._state = self._transform(message.payload)
else:
self._state = message.payload
self.async_write_ha_state()
await mqtt.async_subscribe(self.hass, self._topic, message_received, 1)
@property
def name(self):
"""Return the name of the sensor supplied in constructor."""
return self._name
@property
def entity_id(self):
"""Return the entity ID for this sensor."""
return f"sensor.{self._entity_id}"
@property
def state(self):
"""Return the current state of the entity."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of this sensor."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon of this sensor."""
return self._icon
|
from pgmpy.estimators import StructureScore
class ScoreCache(StructureScore):
def __init__(self, base_scorer, data, max_size=10000, **kwargs):
"""
A wrapper class for StructureScore instances, which implement a decomposable score,
that caches local scores.
Based on the global decomposition property of Bayesian networks for decomposable scores.
Parameters
----------
base_scorer: StructureScore instance
Has to be a decomposable score.
data: pandas DataFrame instance
DataFrame instance where each column represents one variable.
(If some values in the data are missing the data cells should be set to `numpy.NaN`.
Note that pandas converts each column containing `numpy.NaN`s to dtype `float`.)
max_size: int (optional, default 10_000)
The maximum number of elements allowed in the cache. When the limit is reached, the least recently used
entries will be discarded.
**kwargs
Additional arguments that will be handed to the super constructor.
Reference
---------
Koller & Friedman, Probabilistic Graphical Models - Principles and Techniques, 2009
Section 18.3
"""
assert isinstance(
base_scorer, StructureScore
), "Base scorer has to be of type StructureScore."
self.base_scorer = base_scorer
self.cache = LRUCache(
original_function=self._wrapped_original, max_size=int(max_size)
)
super(ScoreCache, self).__init__(data, **kwargs)
def local_score(self, variable, parents):
hashable = tuple(parents)
return self.cache(variable, hashable)
def _wrapped_original(self, variable, parents):
expected = list(parents)
return self.base_scorer.local_score(variable, expected)
# link fields
_PREV, _NEXT, _KEY, _VALUE = 0, 1, 2, 3
class LRUCache:
def __init__(self, original_function, max_size=10000):
"""
Least-Recently-Used cache.
Acts as a wrapper around a arbitrary function and caches the return values.
Based on the implementation of Raymond Hettinger
(https://stackoverflow.com/questions/2437617/limiting-the-size-of-a-python-dictionary)
Parameters
----------
original_function: callable
The original function that will be wrapped. Return values will be cached.
The function parameters have to be hashable.
max_size: int (optional, default 10_000)
The maximum number of elements allowed within the cache. If the size would be exceeded,
the least recently used element will be removed from the cache.
"""
self.original_function = original_function
self.max_size = max_size
self.mapping = {}
# oldest
self.head = [None, None, None, None]
# newest
self.tail = [self.head, None, None, None]
self.head[_NEXT] = self.tail
def __call__(self, *key):
mapping, head, tail = self.mapping, self.head, self.tail
link = mapping.get(key, head)
if link is head:
# Not yet in map
value = self.original_function(*key)
if len(mapping) >= self.max_size:
# Unlink the least recently used element
old_prev, old_next, old_key, old_value = head[_NEXT]
head[_NEXT] = old_next
old_next[_PREV] = head
del mapping[old_key]
# Add new value as most recently used element
last = tail[_PREV]
link = [last, tail, key, value]
mapping[key] = last[_NEXT] = tail[_PREV] = link
else:
# Unlink element from current position
link_prev, link_next, key, value = link
link_prev[_NEXT] = link_next
link_next[_PREV] = link_prev
# Add as most recently used element
last = tail[_PREV]
last[_NEXT] = tail[_PREV] = link
link[_PREV] = last
link[_NEXT] = tail
return value
|
import os
import unittest
class RoslibPackagesTest(unittest.TestCase):
def test_find_node(self):
import roslib.packages
d = roslib.packages.get_pkg_dir('roslib')
p = os.path.join(d, 'test', 'fake_node.py')
self.assertEquals([p], roslib.packages.find_node('roslib', 'fake_node.py'))
self.assertEquals([], roslib.packages.find_node('roslib', 'not_a_node'))
def test_get_pkg_dir(self):
import roslib.packages
import roslib.rospack
path = os.path.normpath(roslib.rospack.rospackexec(['find', 'roslib']))
self.assertEquals(path, roslib.packages.get_pkg_dir('roslib'))
try:
self.assertEquals(path, roslib.packages.get_pkg_dir('fake_roslib'))
self.fail('should have raised')
except roslib.packages.InvalidROSPkgException:
pass
def test_get_dir_pkg(self):
import roslib.packages
path = get_roslib_path()
res = roslib.packages.get_dir_pkg(path)
res = (os.path.realpath(res[0]), res[1])
self.assertEquals((path, 'roslib'), res)
res = roslib.packages.get_dir_pkg(os.path.join(path, 'test'))
res = (os.path.realpath(res[0]), res[1])
self.assertEquals((path, 'roslib'), res)
# must fail on parent of roslib
self.assertEquals((None, None), roslib.packages.get_dir_pkg(os.path.dirname(path)))
def get_roslib_path():
return os.path.realpath(os.path.abspath(os.path.join(get_test_path(), '..')))
def get_test_path():
return os.path.abspath(os.path.dirname(__file__))
|
import sys
import textwrap
import pytest
from tests.helpers import utils
try:
from scripts.dev import run_vulture
except ImportError:
if hasattr(sys, 'frozen'):
# Tests aren't going to run anyways because of the mark
pass
else:
raise
pytestmark = [pytest.mark.not_frozen]
class VultureDir:
"""Fixture similar to pytest's testdir fixture for vulture.
Attributes:
_tmp_path: The pytest tmp_path fixture.
"""
def __init__(self, tmp_path):
self._tmp_path = tmp_path
def run(self):
"""Run vulture over all generated files and return the output."""
names = [p.name for p in self._tmp_path.glob('*')]
assert names
with utils.change_cwd(self._tmp_path):
return run_vulture.run(names)
def makepyfile(self, **kwargs):
"""Create a python file, similar to TestDir.makepyfile."""
for filename, data in kwargs.items():
text = textwrap.dedent(data)
(self._tmp_path / (filename + '.py')).write_text(text, 'utf-8')
@pytest.fixture
def vultdir(tmp_path):
return VultureDir(tmp_path)
def test_used(vultdir):
vultdir.makepyfile(foo="""
def foo():
pass
foo()
""")
assert not vultdir.run()
def test_unused_func(vultdir):
vultdir.makepyfile(foo="""
def foo():
pass
""")
msg = "foo.py:2: unused function 'foo' (60% confidence)"
assert vultdir.run() == [msg]
def test_unused_method_camelcase(vultdir):
"""Should be ignored because those are Qt methods."""
vultdir.makepyfile(foo="""
class Foo():
def fooBar(self):
pass
Foo()
""")
assert not vultdir.run()
|
from typing import cast
from PyQt5.QtCore import pyqtSlot, QObject, QEvent
from PyQt5.QtGui import QKeyEvent, QWindow
from PyQt5.QtWidgets import QApplication
from qutebrowser.keyinput import modeman
from qutebrowser.misc import quitter
from qutebrowser.utils import objreg
class EventFilter(QObject):
"""Global Qt event filter.
Attributes:
_activated: Whether the EventFilter is currently active.
_handlers; A {QEvent.Type: callable} dict with the handlers for an
event.
"""
def __init__(self, parent: QObject = None) -> None:
super().__init__(parent)
self._activated = True
self._handlers = {
QEvent.KeyPress: self._handle_key_event,
QEvent.KeyRelease: self._handle_key_event,
QEvent.ShortcutOverride: self._handle_key_event,
}
def install(self) -> None:
QApplication.instance().installEventFilter(self)
@pyqtSlot()
def shutdown(self) -> None:
QApplication.instance().removeEventFilter(self)
def _handle_key_event(self, event: QKeyEvent) -> bool:
"""Handle a key press/release event.
Args:
event: The QEvent which is about to be delivered.
Return:
True if the event should be filtered, False if it's passed through.
"""
active_window = QApplication.instance().activeWindow()
if active_window not in objreg.window_registry.values():
# Some other window (print dialog, etc.) is focused so we pass the
# event through.
return False
try:
man = modeman.instance('current')
return man.handle_event(event)
except objreg.RegistryUnavailableError:
# No window available yet, or not a MainWindow
return False
def eventFilter(self, obj: QObject, event: QEvent) -> bool:
"""Handle an event.
Args:
obj: The object which will get the event.
event: The QEvent which is about to be delivered.
Return:
True if the event should be filtered, False if it's passed through.
"""
if not isinstance(obj, QWindow):
# We already handled this same event at some point earlier, so
# we're not interested in it anymore.
return False
typ = event.type()
if typ not in self._handlers:
return False
if not self._activated:
return False
handler = self._handlers[typ]
try:
return handler(cast(QKeyEvent, event))
except:
# If there is an exception in here and we leave the eventfilter
# activated, we'll get an infinite loop and a stack overflow.
self._activated = False
raise
def init() -> None:
"""Initialize the global EventFilter instance."""
event_filter = EventFilter(parent=QApplication.instance())
event_filter.install()
quitter.instance.shutting_down.connect(event_filter.shutdown)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import flags
from absl.testing import parameterized
from compare_gan import datasets
from compare_gan import test_utils
from compare_gan.gans import consts as c
from compare_gan.gans.modular_gan import ModularGAN
import tensorflow as tf
FLAGS = flags.FLAGS
class ModularGanTpuTest(parameterized.TestCase, test_utils.CompareGanTestCase):
def setUp(self):
super(ModularGanTpuTest, self).setUp()
self.model_dir = self._get_empty_model_dir()
self.run_config = tf.contrib.tpu.RunConfig(
model_dir=self.model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
@parameterized.parameters([1, 2, 5])
def testBatchSize(self, disc_iters, use_tpu=True):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"disc_iters": disc_iters,
}
batch_size = 16
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=batch_size,
use_tpu=True)
estimator.train(gan.input_fn, steps=1)
gen_args = gan.generator.call_arg_list
disc_args = gan.discriminator.call_arg_list
self.assertLen(gen_args, disc_iters + 1) # D steps, G step.
self.assertLen(disc_args, disc_iters + 1) # D steps, G step.
for args in gen_args:
self.assertAllEqual(args["z"].shape.as_list(), [8, 128])
for args in disc_args:
self.assertAllEqual(args["x"].shape.as_list(), [16, 32, 32, 3])
@parameterized.parameters([1, 2, 5])
def testBatchSizeSplitDiscCalls(self, disc_iters):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"disc_iters": disc_iters,
}
batch_size = 16
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
deprecated_split_disc_calls=True,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=batch_size,
use_tpu=True)
estimator.train(gan.input_fn, steps=1)
gen_args = gan.generator.call_arg_list
disc_args = gan.discriminator.call_arg_list
self.assertLen(gen_args, disc_iters + 1) # D steps, G step.
# Each D and G step calls discriminator twice: for real and fake images.
self.assertLen(disc_args, 2 * (disc_iters + 1))
for args in gen_args:
self.assertAllEqual(args["z"].shape.as_list(), [8, 128])
for args in disc_args:
self.assertAllEqual(args["x"].shape.as_list(), [8, 32, 32, 3])
@parameterized.parameters([1, 2, 5])
def testBatchSizeExperimentalJointGenForDisc(self, disc_iters):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"disc_iters": disc_iters,
}
batch_size = 16
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
experimental_joint_gen_for_disc=True,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=batch_size,
use_tpu=True)
estimator.train(gan.input_fn, steps=1)
gen_args = gan.generator.call_arg_list
disc_args = gan.discriminator.call_arg_list
self.assertLen(gen_args, 2)
self.assertLen(disc_args, disc_iters + 1)
self.assertAllEqual(gen_args[0]["z"].shape.as_list(), [8 * disc_iters, 128])
self.assertAllEqual(gen_args[1]["z"].shape.as_list(), [8, 128])
for args in disc_args:
self.assertAllEqual(args["x"].shape.as_list(), [16, 32, 32, 3])
if __name__ == "__main__":
tf.test.main()
|
import datetime
from decimal import Decimal
# Third-party imports
from sqlalchemy.inspection import inspect
from flask_sqlalchemy import SQLAlchemy # pylint: disable=import-error,no-name-in-module
from sqlalchemy.ext.automap import automap_base
from sqlalchemy.ext.declarative import declarative_base
db = SQLAlchemy()
class Model(object):
"""The sandman2 Model class is the base class for all RESTful resources.
There is a one-to-one mapping between a table in the database and a
:class:`sandman2.model.Model`.
"""
#: The relative URL this resource should live at.
__url__ = None
#: The API version of this resource (not yet used).
__version__ = '1'
#: The HTTP methods this resource supports (default=all).
__methods__ = {
'GET',
'POST',
'PUT',
'PATCH',
'DELETE',
'HEAD',
'OPTIONS'
}
@classmethod
def required(cls):
"""Return a list of all columns required by the database to create the
resource.
:param cls: The Model class to gather attributes from
:rtype: list
"""
columns = []
for column in cls.__table__.columns: # pylint: disable=no-member
is_autoincrement = 'int' in str(column.type).lower() and column.autoincrement
if (not column.nullable and not column.primary_key) or (column.primary_key and not is_autoincrement):
columns.append(column.name)
return columns
@classmethod
def optional(cls):
"""Return a list of all nullable columns for the resource's table.
:rtype: list
"""
columns = []
for column in cls.__table__.columns: # pylint: disable=no-member
if column.nullable:
columns.append(column.name)
return columns
@classmethod
def primary_key(cls):
"""Return the key of the model's primary key field.
:rtype: string
"""
return list(
cls.__table__.primary_key.columns)[ # pylint: disable=no-member
0].key
def to_dict(self):
"""Return the resource as a dictionary.
:rtype: dict
"""
result_dict = {}
for column in self.__table__.columns.keys(): # pylint: disable=no-member
value = result_dict[column] = getattr(self, column, None)
if isinstance(value, Decimal):
result_dict[column] = float(result_dict[column])
elif isinstance(value, datetime.datetime):
result_dict[column] = value.isoformat()
elif isinstance(value, datetime.time):
result_dict[column] = value.strftime("%H:%M:%S")
return result_dict
def links(self):
"""Return a dictionary of links to related resources that should be
included in the *Link* header of an HTTP response.
:rtype: dict
"""
link_dict = {'self': self.resource_uri()}
for relationship in inspect( # pylint: disable=maybe-no-member
self.__class__).relationships:
if 'collection' not in relationship.key:
instance = getattr(self, relationship.key)
if instance:
link_dict[str(relationship.key)] = instance.resource_uri()
return link_dict
def resource_uri(self):
"""Return the URI to this specific resource.
:rtype: str
"""
return self.__url__ + '/' + str(getattr(self, self.primary_key()))
def update(self, attributes):
"""Update the current instance based on attribute->value items in
*attributes*.
:param dict attributes: Dictionary of attributes to be updated
:rtype: :class:`sandman2.model.Model`
"""
for attribute in attributes:
setattr(self, attribute, attributes[attribute])
return self
@classmethod
def description(cls):
"""Return a field->data type dictionary describing this model
as reported by the database.
:rtype: dict
"""
description = {}
for column in cls.__table__.columns: # pylint: disable=no-member
column_description = str(column.type)
if not column.nullable:
column_description += ' (required)'
description[column.name] = column_description
return description
DeclarativeModel = declarative_base(cls=(db.Model, Model))
AutomapModel = automap_base(DeclarativeModel)
|
from kombu.asynchronous import get_event_loop
from .base import Request, Headers, Response
__all__ = ('Client', 'Headers', 'Response', 'Request')
def Client(hub=None, **kwargs):
"""Create new HTTP client."""
from .curl import CurlClient
return CurlClient(hub, **kwargs)
def get_client(hub=None, **kwargs):
"""Get or create HTTP client bound to the current event loop."""
hub = hub or get_event_loop()
try:
return hub._current_http_client
except AttributeError:
client = hub._current_http_client = Client(hub, **kwargs)
return client
|
from __future__ import unicode_literals
import sys
import os
import inspect
import codecs
import argparse
import tempfile
import warnings
from collections import Counter
from multiprocessing import cpu_count
#hack to get imports working if running this as a script, or within a package
if __name__ == '__main__':
import learn_bpe
import apply_bpe
else:
from . import learn_bpe
from . import apply_bpe
# hack for python2/3 compatibility
from io import open
argparse.open = open
def create_parser(subparsers=None):
if subparsers:
parser = subparsers.add_parser('learn-joint-bpe-and-vocab',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="learn BPE-based word segmentation")
else:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="learn BPE-based word segmentation")
parser.add_argument(
'--input', '-i', type=argparse.FileType('r'), required=True, nargs = '+',
metavar='PATH',
help="Input texts (multiple allowed).")
parser.add_argument(
'--output', '-o', type=argparse.FileType('w'), required=True,
metavar='PATH',
help="Output file for BPE codes.")
parser.add_argument(
'--symbols', '-s', type=int, default=10000,
help="Create this many new symbols (each representing a character n-gram) (default: %(default)s)")
parser.add_argument(
'--separator', type=str, default='@@', metavar='STR',
help="Separator between non-final subword units (default: '%(default)s')")
parser.add_argument(
'--write-vocabulary', type=argparse.FileType('w'), required=True, nargs = '+', default=None,
metavar='PATH', dest='vocab',
help='Write to these vocabulary files after applying BPE. One per input text. Used for filtering in apply_bpe.py')
parser.add_argument(
'--min-frequency', type=int, default=2, metavar='FREQ',
help='Stop if no symbol pair has frequency >= FREQ (default: %(default)s)')
parser.add_argument(
'--total-symbols', '-t', action="store_true",
help="subtract number of characters from the symbols to be generated (so that '--symbols' becomes an estimate for the total number of symbols needed to encode text).")
parser.add_argument(
'--num-workers', type=int, default=1,
help="Number of processors to process texts, only supported in Python3. If -1, set `multiprocessing.cpu_count()`. (default: %(default)s)")
parser.add_argument(
'--verbose', '-v', action="store_true",
help="verbose mode.")
return parser
def learn_joint_bpe_and_vocab(args):
if args.vocab and len(args.input) != len(args.vocab):
sys.stderr.write('Error: number of input files and vocabulary files must match\n')
sys.exit(1)
# read/write files as UTF-8
args.input = [codecs.open(f.name, encoding='UTF-8') for f in args.input]
args.vocab = [codecs.open(f.name, 'w', encoding='UTF-8') for f in args.vocab]
# get combined vocabulary of all input texts
full_vocab = Counter()
for f in args.input:
full_vocab += learn_bpe.get_vocabulary(f, num_workers=args.num_workers)
f.seek(0)
vocab_list = ['{0} {1}'.format(key, freq) for (key, freq) in full_vocab.items()]
# learn BPE on combined vocabulary
with codecs.open(args.output.name, 'w', encoding='UTF-8') as output:
learn_bpe.learn_bpe(vocab_list, output, args.symbols, args.min_frequency, args.verbose, is_dict=True, total_symbols=args.total_symbols)
with codecs.open(args.output.name, encoding='UTF-8') as codes:
bpe = apply_bpe.BPE(codes, separator=args.separator)
# apply BPE to each training corpus and get vocabulary
for train_file, vocab_file in zip(args.input, args.vocab):
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
tmpout = codecs.open(tmp.name, 'w', encoding='UTF-8')
train_file.seek(0)
bpe.process_lines(train_file.name, tmpout, num_workers=args.num_workers)
tmpout.close()
tmpin = codecs.open(tmp.name, encoding='UTF-8')
vocab = learn_bpe.get_vocabulary(tmpin, num_workers=args.num_workers)
tmpin.close()
os.remove(tmp.name)
for key, freq in sorted(vocab.items(), key=lambda x: x[1], reverse=True):
vocab_file.write("{0} {1}\n".format(key, freq))
vocab_file.close()
if __name__ == '__main__':
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
newdir = os.path.join(currentdir, 'subword_nmt')
if os.path.isdir(newdir):
warnings.simplefilter('default')
warnings.warn(
"this script's location has moved to {0}. This symbolic link will be removed in a future version. Please point to the new location, or install the package and use the command 'subword-nmt'".format(newdir),
DeprecationWarning
)
# python 2/3 compatibility
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
else:
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr.buffer)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout.buffer)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin.buffer)
parser = create_parser()
args = parser.parse_args()
if args.num_workers <= 0:
args.num_workers = cpu_count()
if sys.version_info < (3, 0):
args.separator = args.separator.decode('UTF-8')
if args.num_workers > 1:
args.num_workers = 1
warnings.warn("Parallel mode is only supported in Python3. Using 1 processor instead.")
assert(len(args.input) == len(args.vocab))
learn_joint_bpe_and_vocab(args)
|
from flask import Flask
from flask_restful import Api, Resource, abort, reqparse
from flasgger import Swagger, swag_from
app = Flask(__name__)
api = Api(app)
app.config['SWAGGER'] = {
'title': 'Flasgger RESTful',
'uiversion': 2
}
swag = Swagger(app)
TODOS = {
'todo1': {'task': 'build an API'},
'todo2': {'task': '?????'},
'todo3': {'task': 'profit!'},
'42': {'task': 'Use Flasgger'}
}
def abort_if_todo_doesnt_exist(todo_id):
if todo_id not in TODOS:
abort(404, message="Todo {} doesn't exist".format(todo_id))
parser = reqparse.RequestParser()
parser.add_argument('task')
# Todo
# shows a single todo item and lets you delete a todo item
class Todo(Resource):
def get(self, todo_id):
"""
This is an example
---
tags:
- restful
parameters:
- in: path
name: todo_id
required: true
description: The ID of the task, try 42!
type: string
responses:
200:
description: The task data
schema:
id: Task
properties:
task:
type: string
default: My Task
"""
abort_if_todo_doesnt_exist(todo_id)
return TODOS[todo_id]
def delete(self, todo_id):
"""
This is an example
---
tags:
- restful
parameters:
- in: path
name: todo_id
required: true
description: The ID of the task, try 42!
type: string
responses:
204:
description: Task deleted
"""
abort_if_todo_doesnt_exist(todo_id)
del TODOS[todo_id]
return '', 204
def put(self, todo_id):
"""
This is an example
---
tags:
- restful
parameters:
- in: body
name: body
schema:
$ref: '#/definitions/Task'
- in: path
name: todo_id
required: true
description: The ID of the task, try 42!
type: string
responses:
201:
description: The task has been updated
schema:
$ref: '#/definitions/Task'
"""
args = parser.parse_args()
task = {'task': args['task']}
TODOS[todo_id] = task
return task, 201
# TodoList
# shows a list of all todos, and lets you POST to add new tasks
class TodoList(Resource):
def get(self):
"""
This is an example
---
tags:
- restful
responses:
200:
description: The task data
schema:
id: Tasks
properties:
task_id:
type: object
schema:
$ref: '#/definitions/Task'
"""
return TODOS
def post(self):
"""
This is an example
---
tags:
- restful
parameters:
- in: body
name: body
schema:
$ref: '#/definitions/Task'
responses:
201:
description: The task has been created
schema:
$ref: '#/definitions/Task'
"""
args = parser.parse_args()
todo_id = int(max(TODOS.keys()).lstrip('todo')) + 1
todo_id = 'todo%i' % todo_id
TODOS[todo_id] = {'task': args['task']}
return TODOS[todo_id], 201
class Username(Resource):
@swag_from('username_specs.yml', methods=['GET'])
def get(self, username):
return {'username': username}, 200
api.add_resource(TodoList, '/todos')
api.add_resource(Todo, '/todos/<todo_id>')
api.add_resource(Username, '/username/<username>')
if __name__ == '__main__':
app.run(debug=True)
|
from aiohttp import web
# Relic from the past. Kept here so we can run negative tests.
HTTP_HEADER_HA_AUTH = "X-HA-access"
def mock_real_ip(app):
"""Inject middleware to mock real IP.
Returns a function to set the real IP.
"""
ip_to_mock = None
def set_ip_to_mock(value):
nonlocal ip_to_mock
ip_to_mock = value
@web.middleware
async def mock_real_ip(request, handler):
"""Mock Real IP middleware."""
nonlocal ip_to_mock
request = request.clone(remote=ip_to_mock)
return await handler(request)
async def real_ip_startup(app):
"""Startup of real ip."""
app.middlewares.insert(0, mock_real_ip)
app.on_startup.append(real_ip_startup)
return set_ip_to_mock
|
import voluptuous as vol
CONF_SCHEMA = "schema"
LEGACY = "legacy"
STATE = "state"
MQTT_VACUUM_SCHEMA = vol.Schema(
{
vol.Optional(CONF_SCHEMA, default=LEGACY): vol.All(
vol.Lower, vol.Any(LEGACY, STATE)
)
}
)
def services_to_strings(services, service_to_string):
"""Convert SUPPORT_* service bitmask to list of service strings."""
strings = []
for service in service_to_string:
if service & services:
strings.append(service_to_string[service])
return strings
def strings_to_services(strings, string_to_service):
"""Convert service strings to SUPPORT_* service bitmask."""
services = 0
for string in strings:
services |= string_to_service[string]
return services
|
import logging
from pydelijn.api import Passages
from pydelijn.common import HttpException
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, DEVICE_CLASS_TIMESTAMP
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by data.delijn.be"
CONF_NEXT_DEPARTURE = "next_departure"
CONF_STOP_ID = "stop_id"
CONF_API_KEY = "api_key"
CONF_NUMBER_OF_DEPARTURES = "number_of_departures"
DEFAULT_NAME = "De Lijn"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_NEXT_DEPARTURE): [
{
vol.Required(CONF_STOP_ID): cv.string,
vol.Optional(CONF_NUMBER_OF_DEPARTURES, default=5): cv.positive_int,
}
],
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the sensor."""
api_key = config[CONF_API_KEY]
session = async_get_clientsession(hass)
sensors = []
for nextpassage in config[CONF_NEXT_DEPARTURE]:
sensors.append(
DeLijnPublicTransportSensor(
Passages(
hass.loop,
nextpassage[CONF_STOP_ID],
nextpassage[CONF_NUMBER_OF_DEPARTURES],
api_key,
session,
True,
)
)
)
async_add_entities(sensors, True)
class DeLijnPublicTransportSensor(Entity):
"""Representation of a Ruter sensor."""
def __init__(self, line):
"""Initialize the sensor."""
self.line = line
self._attributes = {ATTR_ATTRIBUTION: ATTRIBUTION}
self._name = None
self._state = None
self._available = True
async def async_update(self):
"""Get the latest data from the De Lijn API."""
try:
await self.line.get_passages()
self._name = await self.line.get_stopname()
except HttpException:
self._available = False
_LOGGER.error("De Lijn http error")
return
self._attributes["stopname"] = self._name
try:
first = self.line.passages[0]
if first["due_at_realtime"] is not None:
first_passage = first["due_at_realtime"]
else:
first_passage = first["due_at_schedule"]
self._state = first_passage
self._attributes["line_number_public"] = first["line_number_public"]
self._attributes["line_transport_type"] = first["line_transport_type"]
self._attributes["final_destination"] = first["final_destination"]
self._attributes["due_at_schedule"] = first["due_at_schedule"]
self._attributes["due_at_realtime"] = first["due_at_realtime"]
self._attributes["is_realtime"] = first["is_realtime"]
self._attributes["next_passages"] = self.line.passages
self._available = True
except (KeyError, IndexError):
_LOGGER.error("Invalid data received from De Lijn")
self._available = False
@property
def available(self):
"""Return True if entity is available."""
return self._available
@property
def device_class(self):
"""Return the device class."""
return DEVICE_CLASS_TIMESTAMP
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon of the sensor."""
return "mdi:bus"
@property
def device_state_attributes(self):
"""Return attributes for the sensor."""
return self._attributes
|
import logging
from urllib.parse import urlparse
import voluptuous as vol
from homeassistant import config_entries, exceptions
from homeassistant.components import ssdp
from homeassistant.components.remote import (
ATTR_ACTIVITY,
ATTR_DELAY_SECS,
DEFAULT_DELAY_SECS,
)
from homeassistant.const import CONF_HOST, CONF_NAME
from homeassistant.core import callback
from .const import DOMAIN, PREVIOUS_ACTIVE_ACTIVITY, UNIQUE_ID
from .util import (
find_best_name_for_remote,
find_unique_id_for_remote,
get_harmony_client_if_available,
)
_LOGGER = logging.getLogger(__name__)
DATA_SCHEMA = vol.Schema(
{vol.Required(CONF_HOST): str, vol.Required(CONF_NAME): str}, extra=vol.ALLOW_EXTRA
)
async def validate_input(data):
"""Validate the user input allows us to connect.
Data has the keys from DATA_SCHEMA with values provided by the user.
"""
harmony = await get_harmony_client_if_available(data[CONF_HOST])
if not harmony:
raise CannotConnect
return {
CONF_NAME: find_best_name_for_remote(data, harmony),
CONF_HOST: data[CONF_HOST],
UNIQUE_ID: find_unique_id_for_remote(harmony),
}
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Logitech Harmony Hub."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def __init__(self):
"""Initialize the Harmony config flow."""
self.harmony_config = {}
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
validated = await validate_input(user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
if "base" not in errors:
await self.async_set_unique_id(validated[UNIQUE_ID])
self._abort_if_unique_id_configured()
return await self._async_create_entry_from_valid_input(
validated, user_input
)
# Return form
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered Harmony device."""
_LOGGER.debug("SSDP discovery_info: %s", discovery_info)
parsed_url = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION])
friendly_name = discovery_info[ssdp.ATTR_UPNP_FRIENDLY_NAME]
if self._host_already_configured(parsed_url.hostname):
return self.async_abort(reason="already_configured")
# pylint: disable=no-member
self.context["title_placeholders"] = {"name": friendly_name}
self.harmony_config = {
CONF_HOST: parsed_url.hostname,
CONF_NAME: friendly_name,
}
harmony = await get_harmony_client_if_available(parsed_url.hostname)
if harmony:
unique_id = find_unique_id_for_remote(harmony)
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured(
updates={CONF_HOST: self.harmony_config[CONF_HOST]}
)
self.harmony_config[UNIQUE_ID] = unique_id
return await self.async_step_link()
async def async_step_link(self, user_input=None):
"""Attempt to link with the Harmony."""
errors = {}
if user_input is not None:
# Everything was validated in async_step_ssdp
# all we do now is create.
return await self._async_create_entry_from_valid_input(
self.harmony_config, {}
)
return self.async_show_form(
step_id="link",
errors=errors,
description_placeholders={
CONF_HOST: self.harmony_config[CONF_NAME],
CONF_NAME: self.harmony_config[CONF_HOST],
},
)
async def async_step_import(self, validated_input):
"""Handle import."""
await self.async_set_unique_id(
validated_input[UNIQUE_ID], raise_on_progress=False
)
self._abort_if_unique_id_configured()
# Everything was validated in remote async_setup_platform
# all we do now is create.
return await self._async_create_entry_from_valid_input(
validated_input, validated_input
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
async def _async_create_entry_from_valid_input(self, validated, user_input):
"""Single path to create the config entry from validated input."""
data = {
CONF_NAME: validated[CONF_NAME],
CONF_HOST: validated[CONF_HOST],
}
# Options from yaml are preserved, we will pull them out when
# we setup the config entry
data.update(_options_from_user_input(user_input))
return self.async_create_entry(title=validated[CONF_NAME], data=data)
def _host_already_configured(self, host):
"""See if we already have a harmony entry matching the host."""
for entry in self._async_current_entries():
if CONF_HOST not in entry.data:
continue
if entry.data[CONF_HOST] == host:
return True
return False
def _options_from_user_input(user_input):
options = {}
if ATTR_ACTIVITY in user_input:
options[ATTR_ACTIVITY] = user_input[ATTR_ACTIVITY]
if ATTR_DELAY_SECS in user_input:
options[ATTR_DELAY_SECS] = user_input[ATTR_DELAY_SECS]
return options
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Harmony."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
remote = self.hass.data[DOMAIN][self.config_entry.entry_id]
data_schema = vol.Schema(
{
vol.Optional(
ATTR_DELAY_SECS,
default=self.config_entry.options.get(
ATTR_DELAY_SECS, DEFAULT_DELAY_SECS
),
): vol.Coerce(float),
vol.Optional(
ATTR_ACTIVITY,
default=self.config_entry.options.get(
ATTR_ACTIVITY, PREVIOUS_ACTIVE_ACTIVITY
),
): vol.In([PREVIOUS_ACTIVE_ACTIVITY, *remote.activity_names]),
}
)
return self.async_show_form(step_id="init", data_schema=data_schema)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
import time
import mock
import pytest
from pysensu_yelp import Status
from paasta_tools.check_oom_events import compose_sensu_status
from paasta_tools.check_oom_events import latest_oom_events
from paasta_tools.check_oom_events import main
from paasta_tools.check_oom_events import read_oom_events_from_scribe
@pytest.fixture(autouse=True)
def mock_scribereader():
with mock.patch("paasta_tools.check_oom_events.scribereader", autospec=True,) as m:
yield m
@pytest.fixture
def scribereader_output():
time_now = int(time.time())
return (
(
'{"timestamp": %d, "hostname": "hostname1", "container_id": "baaab5a3a9fa",'
' "cluster": "fake_cluster", "service": "fake_service1", '
'"instance": "fake_instance1", "process_name": "uwsgi"}' % (time_now - 20)
),
# Same container, different process
(
'{"timestamp": %d, "hostname": "hostname1", "container_id": "baaab5a3a9fa",'
' "cluster": "fake_cluster", "service": "fake_service1", '
'"instance": "fake_instance1", "process_name": "python"}' % (time_now - 20)
),
(
'{"timestamp": %d, "hostname": "hostname2", "container_id": "8dc8b9aeebbe",'
' "cluster": "fake_cluster", "service": "fake_service2", '
'"instance": "fake_instance2", "process_name": "uwsgi"}' % (time_now - 15)
),
(
'{"timestamp": %d, "hostname": "hostname2", "container_id": "7dc8b9ffffff",'
' "cluster": "fake_cluster", "service": "fake_service2", '
'"instance": "fake_instance2", "process_name": "uwsgi"}' % (time_now - 14)
),
("Non-JSON lines must be ignored."),
)
@pytest.fixture
def instance_config():
config = mock.Mock()
config.instance = "fake_instance"
config.service = "fake_service"
return config
@pytest.fixture(autouse=True)
def mock_load_system_paasta_config():
with mock.patch(
"paasta_tools.check_oom_events.load_system_paasta_config", autospec=True,
) as mock_load:
mock_load.return_value.get_cluster.return_value = "fake_cluster"
mock_load.return_value.get_log_reader.return_value = {
"options": {"cluster_map": {"fake_cluster": "fake_scribe_env"}},
}
yield mock_load
@pytest.fixture(autouse=True)
def mock_scribe_env_to_locations():
with mock.patch(
"paasta_tools.check_oom_events.scribe_env_to_locations", autospec=True,
) as m:
m.return_value = {
"ecosystem": "an_ecosystem",
"region": "a_region",
"superregion": "a_superregion",
}
yield m
def test_compose_sensu_status_ok(instance_config):
assert compose_sensu_status(
instance=instance_config,
oom_events=[],
is_check_enabled=True,
alert_threshold=1,
check_interval=1,
) == (
Status.OK,
"No oom events for fake_service.fake_instance in the last 1 minute(s).",
)
def test_compose_sensu_status_unknown(instance_config):
assert compose_sensu_status(
instance=instance_config,
oom_events=[],
is_check_enabled=False,
alert_threshold=1,
check_interval=1,
) == (Status.OK, "This check is disabled for fake_service.fake_instance.")
def test_compose_sensu_status_not_ok(instance_config):
assert compose_sensu_status(
instance=instance_config,
oom_events={"container_id1", "container_id2", "container_id3"},
is_check_enabled=True,
alert_threshold=2,
check_interval=1,
) == (
Status.CRITICAL,
"The Out Of Memory killer killed processes for fake_service.fake_instance"
" in the last 1 minute(s).",
)
def test_compose_sensu_status_below_threshold(instance_config):
assert (
compose_sensu_status(
instance=instance_config,
oom_events={"container_id1", "container_id2", "container_id3"},
is_check_enabled=True,
alert_threshold=5,
check_interval=1,
)
is None
)
def test_read_oom_events_from_scribe(
mock_scribereader, scribereader_output, mock_scribe_env_to_locations,
):
mock_scribereader.get_tail_host_and_port.return_value = "localhost", 12345
mock_scribereader.get_stream_tailer.return_value = scribereader_output
assert (
len(
[x for x in read_oom_events_from_scribe("fake_cluster", "fake_superregion")]
)
== 4
)
assert mock_scribe_env_to_locations.call_args_list == [
mock.call("fake_scribe_env"),
]
def test_latest_oom_events(mock_scribereader, scribereader_output):
mock_scribereader.get_tail_host_and_port.return_value = "localhost", 12345
mock_scribereader.get_stream_tailer.return_value = scribereader_output
events = latest_oom_events("fake_cluster", "fake_superregion")
# Events from the same container count as one
assert len(events.get(("fake_service1", "fake_instance1"), [])) == 1
assert len(events.get(("fake_service2", "fake_instance2"), [])) == 2
assert len(events.get(("fake_service3", "fake_instance3"), [])) == 0
def test_latest_oom_events_interval(mock_scribereader, scribereader_output):
mock_scribereader.get_tail_host_and_port.return_value = "localhost", 12345
mock_scribereader.get_stream_tailer.return_value = scribereader_output
events = latest_oom_events("fake_cluster", "fake_superregion", interval=10)
# Scribereader mocks are more than 10 seconds ago, so no events should be returned
assert len(events) == 0
@mock.patch("paasta_tools.check_oom_events.latest_oom_events", autospec=True)
@mock.patch("paasta_tools.check_oom_events.get_services_for_cluster", autospec=True)
@mock.patch("paasta_tools.check_oom_events.send_sensu_event", autospec=True)
@mock.patch("paasta_tools.check_oom_events.get_instance_config", autospec=True)
def test_main(
mock_get_instance_config,
mock_send_sensu_event,
mock_get_services_for_cluster,
mock_latest_oom_events,
scribereader_output,
):
mock_get_services_for_cluster.return_value = [
("fake_service1", "fake_instance1"),
("fake_service2", "fake_instance2"),
("fake_service3", "fake_instance3"),
]
main(["", "-s", "some_superregion", "-d", "soa_dir", "--check-interval", "3"])
assert mock_send_sensu_event.call_count == 3
mock_latest_oom_events.assert_called_once_with(
cluster="fake_cluster", superregion="some_superregion", interval=180,
)
|
from __future__ import print_function
# by Siddharth Duahantha
# 28 July 2017
import sys
import argparse
COW = r""" \ ^__^
\ (oo)\_______
(__)\ )\/\\
||----w |
|| ||
"""
def get_cow(text):
"""create a string of a cow saying things."""
lines = text.split("\n")
nlines = len(lines)
longest_line = max([len(l) for l in lines])
lenght_of_lines = longest_line + 2
ret = (' ' + '_' * lenght_of_lines + "\n")
if nlines == 1:
formated = text.center(longest_line + 2)
ret += formated.join('<>') + "\n"
else:
t = ""
for i in range(nlines):
line = lines[i].center(longest_line + 2)
if i == 0:
t += ("/" + line + "\\\n")
elif i == (nlines - 1):
t += ("\\" + line + "/\n")
else:
t += ("|" + line + "|\n")
ret += t
ret += (' ' + '-' * lenght_of_lines + "\n")
ret += COW
return ret
def main():
"""main function"""
# todo: lookuo real description
parser = argparse.ArgumentParser(description="Let a cow speak for you")
parser.add_argument("text", nargs="*", default=None, help="text to say")
ns = parser.parse_args()
if (ns.text is None) or (len(ns.text) == 0):
text = ""
while True:
inp = sys.stdin.read(4096)
if inp.endswith("\n"):
inp = inp[:-1]
if not inp:
break
text += inp
else:
text = " ".join(ns.text)
cow = get_cow(text)
print(cow)
if __name__ == "__main__":
main()
|
from datetime import timedelta
from functools import partial, wraps
from inspect import getmodule
import logging
from pyhap.accessory import Accessory, Bridge
from pyhap.accessory_driver import AccessoryDriver
from pyhap.const import CATEGORY_OTHER
from homeassistant.components import cover, vacuum
from homeassistant.components.cover import (
DEVICE_CLASS_GARAGE,
DEVICE_CLASS_GATE,
DEVICE_CLASS_WINDOW,
)
from homeassistant.components.media_player import DEVICE_CLASS_TV
from homeassistant.const import (
ATTR_BATTERY_CHARGING,
ATTR_BATTERY_LEVEL,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_SERVICE,
ATTR_SUPPORTED_FEATURES,
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_TYPE,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
LIGHT_LUX,
PERCENTAGE,
STATE_ON,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
__version__,
)
from homeassistant.core import Context, callback as ha_callback, split_entity_id
from homeassistant.helpers.event import (
async_track_state_change_event,
track_point_in_utc_time,
)
from homeassistant.util import dt as dt_util
from homeassistant.util.decorator import Registry
from .const import (
ATTR_DISPLAY_NAME,
ATTR_INTERGRATION,
ATTR_MANUFACTURER,
ATTR_MODEL,
ATTR_SOFTWARE_VERSION,
ATTR_VALUE,
BRIDGE_MODEL,
BRIDGE_SERIAL_NUMBER,
CHAR_BATTERY_LEVEL,
CHAR_CHARGING_STATE,
CHAR_STATUS_LOW_BATTERY,
CONF_FEATURE_LIST,
CONF_LINKED_BATTERY_CHARGING_SENSOR,
CONF_LINKED_BATTERY_SENSOR,
CONF_LOW_BATTERY_THRESHOLD,
DEBOUNCE_TIMEOUT,
DEFAULT_LOW_BATTERY_THRESHOLD,
DEVICE_CLASS_CO,
DEVICE_CLASS_CO2,
DEVICE_CLASS_PM25,
EVENT_HOMEKIT_CHANGED,
HK_CHARGING,
HK_NOT_CHARGABLE,
HK_NOT_CHARGING,
MANUFACTURER,
SERV_BATTERY_SERVICE,
TYPE_FAUCET,
TYPE_OUTLET,
TYPE_SHOWER,
TYPE_SPRINKLER,
TYPE_SWITCH,
TYPE_VALVE,
)
from .util import (
convert_to_float,
dismiss_setup_message,
format_sw_version,
show_setup_message,
validate_media_player_features,
)
_LOGGER = logging.getLogger(__name__)
SWITCH_TYPES = {
TYPE_FAUCET: "Valve",
TYPE_OUTLET: "Outlet",
TYPE_SHOWER: "Valve",
TYPE_SPRINKLER: "Valve",
TYPE_SWITCH: "Switch",
TYPE_VALVE: "Valve",
}
TYPES = Registry()
def debounce(func):
"""Decorate function to debounce callbacks from HomeKit."""
@ha_callback
def call_later_listener(self, *args):
"""Handle call_later callback."""
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
self.hass.async_add_executor_job(func, self, *debounce_params[1:])
@wraps(func)
def wrapper(self, *args):
"""Start async timer."""
debounce_params = self.debounce.pop(func.__name__, None)
if debounce_params:
debounce_params[0]() # remove listener
remove_listener = track_point_in_utc_time(
self.hass,
partial(call_later_listener, self),
dt_util.utcnow() + timedelta(seconds=DEBOUNCE_TIMEOUT),
)
self.debounce[func.__name__] = (remove_listener, *args)
logger.debug(
"%s: Start %s timeout", self.entity_id, func.__name__.replace("set_", "")
)
name = getmodule(func).__name__
logger = logging.getLogger(name)
return wrapper
def get_accessory(hass, driver, state, aid, config):
"""Take state and return an accessory object if supported."""
if not aid:
_LOGGER.warning(
'The entity "%s" is not supported, since it '
"generates an invalid aid, please change it",
state.entity_id,
)
return None
a_type = None
name = config.get(CONF_NAME, state.name)
if state.domain == "alarm_control_panel":
a_type = "SecuritySystem"
elif state.domain in ("binary_sensor", "device_tracker", "person"):
a_type = "BinarySensor"
elif state.domain == "climate":
a_type = "Thermostat"
elif state.domain == "cover":
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if device_class in (DEVICE_CLASS_GARAGE, DEVICE_CLASS_GATE) and features & (
cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE
):
a_type = "GarageDoorOpener"
elif (
device_class == DEVICE_CLASS_WINDOW
and features & cover.SUPPORT_SET_POSITION
):
a_type = "Window"
elif features & cover.SUPPORT_SET_POSITION:
a_type = "WindowCovering"
elif features & (cover.SUPPORT_OPEN | cover.SUPPORT_CLOSE):
a_type = "WindowCoveringBasic"
elif state.domain == "fan":
a_type = "Fan"
elif state.domain == "humidifier":
a_type = "HumidifierDehumidifier"
elif state.domain == "light":
a_type = "Light"
elif state.domain == "lock":
a_type = "Lock"
elif state.domain == "media_player":
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
feature_list = config.get(CONF_FEATURE_LIST, [])
if device_class == DEVICE_CLASS_TV:
a_type = "TelevisionMediaPlayer"
elif validate_media_player_features(state, feature_list):
a_type = "MediaPlayer"
elif state.domain == "sensor":
device_class = state.attributes.get(ATTR_DEVICE_CLASS)
unit = state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
if device_class == DEVICE_CLASS_TEMPERATURE or unit in (
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
):
a_type = "TemperatureSensor"
elif device_class == DEVICE_CLASS_HUMIDITY and unit == PERCENTAGE:
a_type = "HumiditySensor"
elif device_class == DEVICE_CLASS_PM25 or DEVICE_CLASS_PM25 in state.entity_id:
a_type = "AirQualitySensor"
elif device_class == DEVICE_CLASS_CO:
a_type = "CarbonMonoxideSensor"
elif device_class == DEVICE_CLASS_CO2 or DEVICE_CLASS_CO2 in state.entity_id:
a_type = "CarbonDioxideSensor"
elif device_class == DEVICE_CLASS_ILLUMINANCE or unit in ("lm", LIGHT_LUX):
a_type = "LightSensor"
elif state.domain == "switch":
switch_type = config.get(CONF_TYPE, TYPE_SWITCH)
a_type = SWITCH_TYPES[switch_type]
elif state.domain == "vacuum":
features = state.attributes.get(ATTR_SUPPORTED_FEATURES, 0)
if features & (vacuum.SUPPORT_START | vacuum.SUPPORT_RETURN_HOME):
a_type = "DockVacuum"
else:
a_type = "Switch"
elif state.domain in ("automation", "input_boolean", "remote", "scene", "script"):
a_type = "Switch"
elif state.domain == "water_heater":
a_type = "WaterHeater"
elif state.domain == "camera":
a_type = "Camera"
if a_type is None:
return None
_LOGGER.debug('Add "%s" as "%s"', state.entity_id, a_type)
return TYPES[a_type](hass, driver, name, state.entity_id, aid, config)
class HomeAccessory(Accessory):
"""Adapter class for Accessory."""
def __init__(
self,
hass,
driver,
name,
entity_id,
aid,
config,
*args,
category=CATEGORY_OTHER,
**kwargs,
):
"""Initialize a Accessory object."""
super().__init__(driver=driver, display_name=name, aid=aid, *args, **kwargs)
self.config = config or {}
domain = split_entity_id(entity_id)[0].replace("_", " ")
if ATTR_MANUFACTURER in self.config:
manufacturer = self.config[ATTR_MANUFACTURER]
elif ATTR_INTERGRATION in self.config:
manufacturer = self.config[ATTR_INTERGRATION].replace("_", " ").title()
else:
manufacturer = f"{MANUFACTURER} {domain}".title()
if ATTR_MODEL in self.config:
model = self.config[ATTR_MODEL]
else:
model = domain.title()
if ATTR_SOFTWARE_VERSION in self.config:
sw_version = format_sw_version(self.config[ATTR_SOFTWARE_VERSION])
else:
sw_version = __version__
self.set_info_service(
manufacturer=manufacturer,
model=model,
serial_number=entity_id,
firmware_revision=sw_version,
)
self.category = category
self.entity_id = entity_id
self.hass = hass
self.debounce = {}
self._subscriptions = []
self._char_battery = None
self._char_charging = None
self._char_low_battery = None
self.linked_battery_sensor = self.config.get(CONF_LINKED_BATTERY_SENSOR)
self.linked_battery_charging_sensor = self.config.get(
CONF_LINKED_BATTERY_CHARGING_SENSOR
)
self.low_battery_threshold = self.config.get(
CONF_LOW_BATTERY_THRESHOLD, DEFAULT_LOW_BATTERY_THRESHOLD
)
"""Add battery service if available"""
entity_attributes = self.hass.states.get(self.entity_id).attributes
battery_found = entity_attributes.get(ATTR_BATTERY_LEVEL)
if self.linked_battery_sensor:
state = self.hass.states.get(self.linked_battery_sensor)
if state is not None:
battery_found = state.state
else:
self.linked_battery_sensor = None
_LOGGER.warning(
"%s: Battery sensor state missing: %s",
self.entity_id,
self.linked_battery_sensor,
)
if not battery_found:
return
_LOGGER.debug("%s: Found battery level", self.entity_id)
if self.linked_battery_charging_sensor:
state = self.hass.states.get(self.linked_battery_charging_sensor)
if state is None:
self.linked_battery_charging_sensor = None
_LOGGER.warning(
"%s: Battery charging binary_sensor state missing: %s",
self.entity_id,
self.linked_battery_charging_sensor,
)
else:
_LOGGER.debug("%s: Found battery charging", self.entity_id)
serv_battery = self.add_preload_service(SERV_BATTERY_SERVICE)
self._char_battery = serv_battery.configure_char(CHAR_BATTERY_LEVEL, value=0)
self._char_charging = serv_battery.configure_char(
CHAR_CHARGING_STATE, value=HK_NOT_CHARGABLE
)
self._char_low_battery = serv_battery.configure_char(
CHAR_STATUS_LOW_BATTERY, value=0
)
@property
def available(self):
"""Return if accessory is available."""
state = self.hass.states.get(self.entity_id)
return state is not None and state.state != STATE_UNAVAILABLE
async def run(self):
"""Handle accessory driver started event.
Run inside the HAP-python event loop.
"""
self.hass.add_job(self.run_handler)
async def run_handler(self):
"""Handle accessory driver started event.
Run inside the Home Assistant event loop.
"""
state = self.hass.states.get(self.entity_id)
self.async_update_state_callback(state)
self._subscriptions.append(
async_track_state_change_event(
self.hass, [self.entity_id], self.async_update_event_state_callback
)
)
battery_charging_state = None
battery_state = None
if self.linked_battery_sensor:
linked_battery_sensor_state = self.hass.states.get(
self.linked_battery_sensor
)
battery_state = linked_battery_sensor_state.state
battery_charging_state = linked_battery_sensor_state.attributes.get(
ATTR_BATTERY_CHARGING
)
self._subscriptions.append(
async_track_state_change_event(
self.hass,
[self.linked_battery_sensor],
self.async_update_linked_battery_callback,
)
)
elif state is not None:
battery_state = state.attributes.get(ATTR_BATTERY_LEVEL)
if self.linked_battery_charging_sensor:
state = self.hass.states.get(self.linked_battery_charging_sensor)
battery_charging_state = state and state.state == STATE_ON
self._subscriptions.append(
async_track_state_change_event(
self.hass,
[self.linked_battery_charging_sensor],
self.async_update_linked_battery_charging_callback,
)
)
elif battery_charging_state is None and state is not None:
battery_charging_state = state.attributes.get(ATTR_BATTERY_CHARGING)
if battery_state is not None or battery_charging_state is not None:
self.async_update_battery(battery_state, battery_charging_state)
@ha_callback
def async_update_event_state_callback(self, event):
"""Handle state change event listener callback."""
self.async_update_state_callback(event.data.get("new_state"))
@ha_callback
def async_update_state_callback(self, new_state):
"""Handle state change listener callback."""
_LOGGER.debug("New_state: %s", new_state)
if new_state is None:
return
battery_state = None
battery_charging_state = None
if (
not self.linked_battery_sensor
and ATTR_BATTERY_LEVEL in new_state.attributes
):
battery_state = new_state.attributes.get(ATTR_BATTERY_LEVEL)
if (
not self.linked_battery_charging_sensor
and ATTR_BATTERY_CHARGING in new_state.attributes
):
battery_charging_state = new_state.attributes.get(ATTR_BATTERY_CHARGING)
if battery_state is not None or battery_charging_state is not None:
self.async_update_battery(battery_state, battery_charging_state)
self.async_update_state(new_state)
@ha_callback
def async_update_linked_battery_callback(self, event):
"""Handle linked battery sensor state change listener callback."""
new_state = event.data.get("new_state")
if new_state is None:
return
if self.linked_battery_charging_sensor:
battery_charging_state = None
else:
battery_charging_state = new_state.attributes.get(ATTR_BATTERY_CHARGING)
self.async_update_battery(new_state.state, battery_charging_state)
@ha_callback
def async_update_linked_battery_charging_callback(self, event):
"""Handle linked battery charging sensor state change listener callback."""
new_state = event.data.get("new_state")
if new_state is None:
return
self.async_update_battery(None, new_state.state == STATE_ON)
@ha_callback
def async_update_battery(self, battery_level, battery_charging):
"""Update battery service if available.
Only call this function if self._support_battery_level is True.
"""
if not self._char_battery:
# Battery appeared after homekit was started
return
battery_level = convert_to_float(battery_level)
if battery_level is not None:
if self._char_battery.value != battery_level:
self._char_battery.set_value(battery_level)
is_low_battery = 1 if battery_level < self.low_battery_threshold else 0
if self._char_low_battery.value != is_low_battery:
self._char_low_battery.set_value(is_low_battery)
_LOGGER.debug(
"%s: Updated battery level to %d", self.entity_id, battery_level
)
# Charging state can appear after homekit was started
if battery_charging is None or not self._char_charging:
return
hk_charging = HK_CHARGING if battery_charging else HK_NOT_CHARGING
if self._char_charging.value != hk_charging:
self._char_charging.set_value(hk_charging)
_LOGGER.debug(
"%s: Updated battery charging to %d", self.entity_id, hk_charging
)
@ha_callback
def async_update_state(self, new_state):
"""Handle state change to update HomeKit value.
Overridden by accessory types.
"""
raise NotImplementedError()
def call_service(self, domain, service, service_data, value=None):
"""Fire event and call service for changes from HomeKit."""
self.hass.add_job(self.async_call_service, domain, service, service_data, value)
async def async_call_service(self, domain, service, service_data, value=None):
"""Fire event and call service for changes from HomeKit.
This method must be run in the event loop.
"""
event_data = {
ATTR_ENTITY_ID: self.entity_id,
ATTR_DISPLAY_NAME: self.display_name,
ATTR_SERVICE: service,
ATTR_VALUE: value,
}
context = Context()
self.hass.bus.async_fire(EVENT_HOMEKIT_CHANGED, event_data, context=context)
await self.hass.services.async_call(
domain, service, service_data, context=context
)
@ha_callback
def async_stop(self):
"""Cancel any subscriptions when the bridge is stopped."""
while self._subscriptions:
self._subscriptions.pop(0)()
class HomeBridge(Bridge):
"""Adapter class for Bridge."""
def __init__(self, hass, driver, name):
"""Initialize a Bridge object."""
super().__init__(driver, name)
self.set_info_service(
firmware_revision=__version__,
manufacturer=MANUFACTURER,
model=BRIDGE_MODEL,
serial_number=BRIDGE_SERIAL_NUMBER,
)
self.hass = hass
def setup_message(self):
"""Prevent print of pyhap setup message to terminal."""
def get_snapshot(self, info):
"""Get snapshot from accessory if supported."""
acc = self.accessories.get(info["aid"])
if acc is None:
raise ValueError("Requested snapshot for missing accessory")
if not hasattr(acc, "get_snapshot"):
raise ValueError(
"Got a request for snapshot, but the Accessory "
'does not define a "get_snapshot" method'
)
return acc.get_snapshot(info)
class HomeDriver(AccessoryDriver):
"""Adapter class for AccessoryDriver."""
def __init__(self, hass, entry_id, bridge_name, **kwargs):
"""Initialize a AccessoryDriver object."""
super().__init__(**kwargs)
self.hass = hass
self._entry_id = entry_id
self._bridge_name = bridge_name
def pair(self, client_uuid, client_public):
"""Override super function to dismiss setup message if paired."""
success = super().pair(client_uuid, client_public)
if success:
dismiss_setup_message(self.hass, self._entry_id)
return success
def unpair(self, client_uuid):
"""Override super function to show setup message if unpaired."""
super().unpair(client_uuid)
show_setup_message(
self.hass,
self._entry_id,
self._bridge_name,
self.state.pincode,
self.accessory.xhm_uri(),
)
|
import asyncio
import voluptuous as vol
from homeassistant.auth.const import GROUP_ID_ADMIN
from homeassistant.components.auth import indieauth
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.const import HTTP_BAD_REQUEST, HTTP_FORBIDDEN
from homeassistant.core import callback
from .const import (
DEFAULT_AREAS,
DOMAIN,
STEP_CORE_CONFIG,
STEP_INTEGRATION,
STEP_USER,
STEPS,
)
async def async_setup(hass, data, store):
"""Set up the onboarding view."""
hass.http.register_view(OnboardingView(data, store))
hass.http.register_view(UserOnboardingView(data, store))
hass.http.register_view(CoreConfigOnboardingView(data, store))
hass.http.register_view(IntegrationOnboardingView(data, store))
class OnboardingView(HomeAssistantView):
"""Return the onboarding status."""
requires_auth = False
url = "/api/onboarding"
name = "api:onboarding"
def __init__(self, data, store):
"""Initialize the onboarding view."""
self._store = store
self._data = data
async def get(self, request):
"""Return the onboarding status."""
return self.json(
[{"step": key, "done": key in self._data["done"]} for key in STEPS]
)
class _BaseOnboardingView(HomeAssistantView):
"""Base class for onboarding."""
step = None
def __init__(self, data, store):
"""Initialize the onboarding view."""
self._store = store
self._data = data
self._lock = asyncio.Lock()
@callback
def _async_is_done(self):
"""Return if this step is done."""
return self.step in self._data["done"]
async def _async_mark_done(self, hass):
"""Mark step as done."""
self._data["done"].append(self.step)
await self._store.async_save(self._data)
if set(self._data["done"]) == set(STEPS):
hass.data[DOMAIN] = True
class UserOnboardingView(_BaseOnboardingView):
"""View to handle create user onboarding step."""
url = "/api/onboarding/users"
name = "api:onboarding:users"
requires_auth = False
step = STEP_USER
@RequestDataValidator(
vol.Schema(
{
vol.Required("name"): str,
vol.Required("username"): str,
vol.Required("password"): str,
vol.Required("client_id"): str,
vol.Required("language"): str,
}
)
)
async def post(self, request, data):
"""Handle user creation, area creation."""
hass = request.app["hass"]
async with self._lock:
if self._async_is_done():
return self.json_message("User step already done", HTTP_FORBIDDEN)
provider = _async_get_hass_provider(hass)
await provider.async_initialize()
user = await hass.auth.async_create_user(data["name"], [GROUP_ID_ADMIN])
await hass.async_add_executor_job(
provider.data.add_auth, data["username"], data["password"]
)
credentials = await provider.async_get_or_create_credentials(
{"username": data["username"]}
)
await provider.data.async_save()
await hass.auth.async_link_user(user, credentials)
if "person" in hass.config.components:
await hass.components.person.async_create_person(
data["name"], user_id=user.id
)
# Create default areas using the users supplied language.
translations = await hass.helpers.translation.async_get_translations(
data["language"], "area", DOMAIN
)
area_registry = await hass.helpers.area_registry.async_get_registry()
for area in DEFAULT_AREAS:
area_registry.async_create(
translations[f"component.onboarding.area.{area}"]
)
await self._async_mark_done(hass)
# Return authorization code for fetching tokens and connect
# during onboarding.
auth_code = hass.components.auth.create_auth_code(data["client_id"], user)
return self.json({"auth_code": auth_code})
class CoreConfigOnboardingView(_BaseOnboardingView):
"""View to finish core config onboarding step."""
url = "/api/onboarding/core_config"
name = "api:onboarding:core_config"
step = STEP_CORE_CONFIG
async def post(self, request):
"""Handle finishing core config step."""
hass = request.app["hass"]
async with self._lock:
if self._async_is_done():
return self.json_message(
"Core config step already done", HTTP_FORBIDDEN
)
await self._async_mark_done(hass)
await hass.config_entries.flow.async_init(
"met", context={"source": "onboarding"}
)
if (
hass.components.hassio.is_hassio()
and "raspberrypi" in hass.components.hassio.get_core_info()["machine"]
):
await hass.config_entries.flow.async_init(
"rpi_power", context={"source": "onboarding"}
)
return self.json({})
class IntegrationOnboardingView(_BaseOnboardingView):
"""View to finish integration onboarding step."""
url = "/api/onboarding/integration"
name = "api:onboarding:integration"
step = STEP_INTEGRATION
@RequestDataValidator(
vol.Schema({vol.Required("client_id"): str, vol.Required("redirect_uri"): str})
)
async def post(self, request, data):
"""Handle token creation."""
hass = request.app["hass"]
user = request["hass_user"]
async with self._lock:
if self._async_is_done():
return self.json_message(
"Integration step already done", HTTP_FORBIDDEN
)
await self._async_mark_done(hass)
# Validate client ID and redirect uri
if not await indieauth.verify_redirect_uri(
request.app["hass"], data["client_id"], data["redirect_uri"]
):
return self.json_message(
"invalid client id or redirect uri", HTTP_BAD_REQUEST
)
# Return authorization code so we can redirect user and log them in
auth_code = hass.components.auth.create_auth_code(data["client_id"], user)
return self.json({"auth_code": auth_code})
@callback
def _async_get_hass_provider(hass):
"""Get the Home Assistant auth provider."""
for prv in hass.auth.auth_providers:
if prv.type == "homeassistant":
return prv
raise RuntimeError("No Home Assistant provider found")
|
import unittest
import pandas as pd
from pgmpy.models import BayesianModel
from pgmpy.estimators import K2Score
class TestK2Score(unittest.TestCase):
def setUp(self):
self.d1 = pd.DataFrame(
data={"A": [0, 0, 1], "B": [0, 1, 0], "C": [1, 1, 0], "D": ["X", "Y", "Z"]}
)
self.m1 = BayesianModel([("A", "C"), ("B", "C"), ("D", "B")])
self.m2 = BayesianModel([("C", "A"), ("C", "B"), ("A", "D")])
# data_link - "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv"
)
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
def test_score(self):
self.assertAlmostEqual(K2Score(self.d1).score(self.m1), -10.73813429536977)
self.assertEqual(K2Score(self.d1).score(BayesianModel()), 0)
def test_score_titanic(self):
scorer = K2Score(self.titanic_data2)
titanic = BayesianModel([("Sex", "Survived"), ("Pclass", "Survived")])
self.assertAlmostEqual(scorer.score(titanic), -1891.0630673606006)
titanic2 = BayesianModel([("Pclass", "Sex")])
titanic2.add_nodes_from(["Sex", "Survived", "Pclass"])
self.assertLess(scorer.score(titanic2), scorer.score(titanic))
def tearDown(self):
del self.d1
del self.m1
del self.m2
del self.titanic_data
del self.titanic_data2
|
Subsets and Splits