text
stringlengths 213
32.3k
|
---|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import zip
SUCCEEDED = 'SUCCEEDED'
FAILED = 'FAILED'
SKIPPED = 'SKIPPED'
ALL = SUCCEEDED, FAILED, SKIPPED
_COL_SEPARATOR = ' '
class FailedSubstatus(object):
"""Known failure modes for benchmarks."""
# Failure due to insufficient quota, user preventable
QUOTA = 'QUOTA_EXCEEDED'
# Failure due to insufficient capacity in the cloud provider, user
# non-preventable.
INSUFFICIENT_CAPACITY = 'INSUFFICIENT_CAPACITY'
# Failure during the execution of the benchmark. These are non-retryable,
# known failure modes of the benchmark. It is recommended that the benchmark
# be completely re-run.
KNOWN_INTERMITTENT = 'KNOWN_INTERMITTENT'
# Failure due to an interruptible vm being interrupted before the benchmark
# completes. User non-preventable.
INTERRUPTED = 'INTERRUPTED'
def _CreateSummaryTable(benchmark_specs):
"""Converts statuses of benchmark runs into a formatted string table.
Args:
benchmark_specs: List of BenchmarkSpecs.
Returns:
string. Multi-line string summarizing benchmark success statuses. Example:
--------------------------------------------------------
Name UID Status Failed Substatus
--------------------------------------------------------
iperf iperf0 SUCCEEDED
iperf iperf1 FAILED
iperf iperf2 FAILED QUOTA_EXCEEDED
cluster_boot cluster_boot0 SKIPPED
--------------------------------------------------------
"""
run_status_tuples = [(spec.name, spec.uid, spec.status,
spec.failed_substatus if spec.failed_substatus else '')
for spec in benchmark_specs]
assert run_status_tuples, ('run_status_tuples must contain at least one '
'element.')
col_headers = 'Name', 'UID', 'Status', 'Failed Substatus'
col_lengths = []
for col_header, col_entries in zip(col_headers,
list(zip(*run_status_tuples))):
max_col_content_length = max(len(entry) for entry in col_entries)
col_lengths.append(max(len(col_header), max_col_content_length))
line_length = (len(col_headers) - 1) * len(_COL_SEPARATOR) + sum(col_lengths)
dash_line = '-' * line_length
line_format = _COL_SEPARATOR.join(
'{{{0}:<{1}s}}'.format(col_index, col_length)
for col_index, col_length in enumerate(col_lengths))
msg = [dash_line, line_format.format(*col_headers), dash_line]
msg.extend(line_format.format(*row_entries)
for row_entries in run_status_tuples)
msg.append(dash_line)
return os.linesep.join(msg)
def CreateSummary(benchmark_specs):
"""Logs a summary of benchmark run statuses.
Args:
benchmark_specs: List of BenchmarkSpecs.
Returns:
string. Multi-line string summarizing benchmark success statuses. Example:
Benchmark run statuses:
--------------------------------------------------------
Name UID Status Failed Substatus
--------------------------------------------------------
iperf iperf0 SUCCEEDED
iperf iperf1 FAILED
iperf iperf2 FAILED QUOTA_EXCEEDED
cluster_boot cluster_boot0 SKIPPED
--------------------------------------------------------
Success rate: 25.00% (1/4)
"""
run_status_tuples = [(spec.name, spec.uid, spec.status)
for spec in benchmark_specs]
assert run_status_tuples, ('run_status_tuples must contain at least one '
'element.')
benchmark_count = len(run_status_tuples)
successful_benchmark_count = sum(1 for _, _, status in run_status_tuples
if status == SUCCEEDED)
return os.linesep.join((
'Benchmark run statuses:',
_CreateSummaryTable(benchmark_specs),
'Success rate: {0:.2f}% ({1}/{2})'.format(
100. * successful_benchmark_count / benchmark_count,
successful_benchmark_count, benchmark_count)))
|
import functools
from datetime import datetime as original_datetime
from io import BytesIO
from unittest import SkipTest
from unittest import skipIf
from urllib.parse import parse_qs
from urllib.parse import urlparse
from xmlrpc.client import Transport
from django.conf import settings
from django.template import Origin
from django.template.loaders.base import Loader
from django.test.client import Client
from django.utils import timezone
class TestTransport(Transport):
"""
Handles connections to XML-RPC server through Django test client.
"""
def __init__(self, *args, **kwargs):
Transport.__init__(self, *args, **kwargs)
self.client = Client()
def request(self, host, handler, request_body, verbose=0):
self.verbose = verbose
response = self.client.post(handler,
request_body,
content_type="text/xml")
res = BytesIO(response.content)
setattr(res, 'getheader', lambda *args: '') # For Python >= 2.7
res.seek(0)
return self.parse_response(res)
def omniscient_datetime(*args):
"""
Generating a datetime aware or naive depending of USE_TZ.
"""
d = original_datetime(*args)
if settings.USE_TZ:
d = timezone.make_aware(d, timezone.utc)
return d
datetime = omniscient_datetime
def is_lib_available(library):
"""
Check if a Python library is available.
"""
try:
__import__(library)
return True
except ImportError:
return False
def skip_if_lib_not_available(lib):
"""
Skip a test if a lib is not available
"""
def decorator(test_func):
@functools.wraps(test_func)
def f(*args, **kwargs):
if not is_lib_available(lib):
raise SkipTest('%s is not available' % lib.title())
return test_func(*args, **kwargs)
return f
return decorator
def skip_if_custom_user(test_func):
"""
Skip a test if a custom user model is in use.
"""
return skipIf(settings.AUTH_USER_MODEL != 'auth.User',
'Custom user model in use')(test_func)
def url_equal(url_1, url_2):
"""
Compare two URLs with query string where
ordering does not matter.
"""
parse_result_1 = urlparse(url_1)
parse_result_2 = urlparse(url_2)
return (parse_result_1[:4] == parse_result_2[:4] and
parse_qs(parse_result_1[5]) == parse_qs(parse_result_2[5]))
class VoidLoader(Loader):
"""
Template loader which is always returning
an empty template.
"""
is_usable = True
_accepts_engine_in_init = True
def get_template_sources(self, template_name):
yield Origin(
name='voidloader',
template_name=template_name,
loader=self)
def get_contents(self, origin):
return ''
class EntryDetailLoader(Loader):
"""
Template loader which only return the content
of an entry detail template.
"""
is_usable = True
_accepts_engine_in_init = True
def get_template_sources(self, template_name):
yield Origin(
name='entrydetailloader',
template_name=template_name,
loader=self)
def get_contents(self, origin):
return ('<html><head><title>{{ object.title }}</title></head>'
'<body>{{ object.html_content|safe }}</body></html>')
|
from datetime import timedelta
from pyecobee.const import ECOBEE_STATE_UNKNOWN
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
WeatherEntity,
)
from homeassistant.const import TEMP_FAHRENHEIT
from homeassistant.util import dt as dt_util
from .const import (
_LOGGER,
DOMAIN,
ECOBEE_MODEL_TO_NAME,
ECOBEE_WEATHER_SYMBOL_TO_HASS,
MANUFACTURER,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ecobee weather platform."""
data = hass.data[DOMAIN]
dev = []
for index in range(len(data.ecobee.thermostats)):
thermostat = data.ecobee.get_thermostat(index)
if "weather" in thermostat:
dev.append(EcobeeWeather(data, thermostat["name"], index))
async_add_entities(dev, True)
class EcobeeWeather(WeatherEntity):
"""Representation of Ecobee weather data."""
def __init__(self, data, name, index):
"""Initialize the Ecobee weather platform."""
self.data = data
self._name = name
self._index = index
self.weather = None
def get_forecast(self, index, param):
"""Retrieve forecast parameter."""
try:
forecast = self.weather["forecasts"][index]
return forecast[param]
except (IndexError, KeyError) as err:
raise ValueError from err
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def unique_id(self):
"""Return a unique identifier for the weather platform."""
return self.data.ecobee.get_thermostat(self._index)["identifier"]
@property
def device_info(self):
"""Return device information for the ecobee weather platform."""
thermostat = self.data.ecobee.get_thermostat(self._index)
try:
model = f"{ECOBEE_MODEL_TO_NAME[thermostat['modelNumber']]} Thermostat"
except KeyError:
_LOGGER.error(
"Model number for ecobee thermostat %s not recognized. "
"Please visit this link and provide the following information: "
"https://github.com/home-assistant/core/issues/27172 "
"Unrecognized model number: %s",
thermostat["name"],
thermostat["modelNumber"],
)
return None
return {
"identifiers": {(DOMAIN, thermostat["identifier"])},
"name": self.name,
"manufacturer": MANUFACTURER,
"model": model,
}
@property
def condition(self):
"""Return the current condition."""
try:
return ECOBEE_WEATHER_SYMBOL_TO_HASS[self.get_forecast(0, "weatherSymbol")]
except ValueError:
return None
@property
def temperature(self):
"""Return the temperature."""
try:
return float(self.get_forecast(0, "temperature")) / 10
except ValueError:
return None
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def pressure(self):
"""Return the pressure."""
try:
return int(self.get_forecast(0, "pressure"))
except ValueError:
return None
@property
def humidity(self):
"""Return the humidity."""
try:
return int(self.get_forecast(0, "relativeHumidity"))
except ValueError:
return None
@property
def visibility(self):
"""Return the visibility."""
try:
return int(self.get_forecast(0, "visibility")) / 1000
except ValueError:
return None
@property
def wind_speed(self):
"""Return the wind speed."""
try:
return int(self.get_forecast(0, "windSpeed"))
except ValueError:
return None
@property
def wind_bearing(self):
"""Return the wind direction."""
try:
return int(self.get_forecast(0, "windBearing"))
except ValueError:
return None
@property
def attribution(self):
"""Return the attribution."""
if not self.weather:
return None
station = self.weather.get("weatherStation", "UNKNOWN")
time = self.weather.get("timestamp", "UNKNOWN")
return f"Ecobee weather provided by {station} at {time} UTC"
@property
def forecast(self):
"""Return the forecast array."""
if "forecasts" not in self.weather:
return None
forecasts = []
date = dt_util.utcnow()
for day in range(0, 5):
forecast = _process_forecast(self.weather["forecasts"][day])
if forecast is None:
continue
forecast[ATTR_FORECAST_TIME] = date.isoformat()
date += timedelta(days=1)
forecasts.append(forecast)
if forecasts:
return forecasts
return None
async def async_update(self):
"""Get the latest weather data."""
await self.data.update()
thermostat = self.data.ecobee.get_thermostat(self._index)
self.weather = thermostat.get("weather")
def _process_forecast(json):
"""Process a single ecobee API forecast to return expected values."""
forecast = {}
try:
forecast[ATTR_FORECAST_CONDITION] = ECOBEE_WEATHER_SYMBOL_TO_HASS[
json["weatherSymbol"]
]
if json["tempHigh"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_TEMP] = float(json["tempHigh"]) / 10
if json["tempLow"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_TEMP_LOW] = float(json["tempLow"]) / 10
if json["windBearing"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_WIND_BEARING] = int(json["windBearing"])
if json["windSpeed"] != ECOBEE_STATE_UNKNOWN:
forecast[ATTR_FORECAST_WIND_SPEED] = int(json["windSpeed"])
except (ValueError, IndexError, KeyError):
return None
if forecast:
return forecast
return None
|
from django.views.generic.base import RedirectView
from zinnia.models.entry import Entry
class EntryRandom(RedirectView):
"""
View for handling a random entry
simply do a redirection after the random selection.
"""
permanent = False
def get_redirect_url(self, **kwargs):
"""
Get entry corresponding to 'pk' and
return the get_absolute_url of the entry.
"""
entry = Entry.published.all().order_by('?')[0]
return entry.get_absolute_url()
|
from typing import Dict
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from homeassistant.helpers.typing import HomeAssistantType
from .account import IcloudAccount, IcloudDevice
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up device tracker for iCloud component."""
account = hass.data[DOMAIN][entry.unique_id]
tracked = set()
@callback
def update_account():
"""Update the values of the account."""
add_entities(account, async_add_entities, tracked)
account.listeners.append(
async_dispatcher_connect(hass, account.signal_device_new, update_account)
)
update_account()
@callback
def add_entities(account, async_add_entities, tracked):
"""Add new tracker entities from the account."""
new_tracked = []
for dev_id, device in account.devices.items():
if dev_id in tracked or device.battery_level is None:
continue
new_tracked.append(IcloudDeviceBatterySensor(account, device))
tracked.add(dev_id)
if new_tracked:
async_add_entities(new_tracked, True)
class IcloudDeviceBatterySensor(Entity):
"""Representation of a iCloud device battery sensor."""
def __init__(self, account: IcloudAccount, device: IcloudDevice):
"""Initialize the battery sensor."""
self._account = account
self._device = device
self._unsub_dispatcher = None
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return f"{self._device.unique_id}_battery"
@property
def name(self) -> str:
"""Sensor name."""
return f"{self._device.name} battery state"
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def state(self) -> int:
"""Battery state percentage."""
return self._device.battery_level
@property
def unit_of_measurement(self) -> str:
"""Battery state measured in percentage."""
return PERCENTAGE
@property
def icon(self) -> str:
"""Battery state icon handling."""
return icon_for_battery_level(
battery_level=self._device.battery_level,
charging=self._device.battery_status == "Charging",
)
@property
def device_state_attributes(self) -> Dict[str, any]:
"""Return default attributes for the iCloud device entity."""
return self._device.state_attributes
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return {
"identifiers": {(DOMAIN, self._device.unique_id)},
"name": self._device.name,
"manufacturer": "Apple",
"model": self._device.device_model,
}
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
async def async_added_to_hass(self):
"""Register state update callback."""
self._unsub_dispatcher = async_dispatcher_connect(
self.hass, self._account.signal_device_update, self.async_write_ha_state
)
async def async_will_remove_from_hass(self):
"""Clean up after entity before removal."""
self._unsub_dispatcher()
|
import logging
from pathlib import Path
from typing import List, MutableMapping, Optional, Union
import discord
import lavalink
from redbot.core import Config, commands
from redbot.core.bot import Red
from redbot.core.i18n import Translator
from redbot.core.utils import AsyncIter
from ..errors import NotAllowed
from ..utils import PlaylistScope
from .api_utils import PlaylistFetchResult, prepare_config_scope, standardize_scope
from .playlist_wrapper import PlaylistWrapper
log = logging.getLogger("red.cogs.Audio.api.PlaylistsInterface")
_ = Translator("Audio", Path(__file__))
class Playlist:
"""A single playlist."""
def __init__(
self,
bot: Red,
playlist_api: PlaylistWrapper,
scope: str,
author: int,
playlist_id: int,
name: str,
playlist_url: Optional[str] = None,
tracks: Optional[List[MutableMapping]] = None,
guild: Union[discord.Guild, int, None] = None,
):
self.bot = bot
self.guild = guild
self.scope = standardize_scope(scope)
self.config_scope = prepare_config_scope(self.bot, self.scope, author, guild)
self.scope_id = self.config_scope[-1]
self.author = author
self.author_id = getattr(self.author, "id", self.author)
self.guild_id = (
getattr(guild, "id", guild) if self.scope == PlaylistScope.GLOBAL.value else None
)
self.id = playlist_id
self.name = name
self.url = playlist_url
self.tracks = tracks or []
self.tracks_obj = [lavalink.Track(data=track) for track in self.tracks]
self.playlist_api = playlist_api
def __repr__(self):
return (
f"Playlist(name={self.name}, id={self.id}, scope={self.scope}, "
f"scope_id={self.scope_id}, author={self.author_id}, "
f"tracks={len(self.tracks)}, url={self.url})"
)
async def edit(self, data: MutableMapping):
"""
Edits a Playlist.
Parameters
----------
data: dict
The attributes to change.
"""
# Disallow ID editing
if "id" in data:
raise NotAllowed("Playlist ID cannot be edited.")
for item in list(data.keys()):
setattr(self, item, data[item])
await self.save()
return self
async def save(self):
"""Saves a Playlist."""
scope, scope_id = self.config_scope
await self.playlist_api.upsert(
scope,
playlist_id=int(self.id),
playlist_name=self.name,
scope_id=scope_id,
author_id=self.author_id,
playlist_url=self.url,
tracks=self.tracks,
)
def to_json(self) -> MutableMapping:
"""Transform the object to a dict.
Returns
-------
dict
The playlist in the form of a dict.
"""
data = dict(
id=self.id,
author=self.author_id,
guild=self.guild_id,
name=self.name,
playlist_url=self.url,
tracks=self.tracks,
)
return data
@classmethod
async def from_json(
cls,
bot: Red,
playlist_api: PlaylistWrapper,
scope: str,
playlist_number: int,
data: PlaylistFetchResult,
**kwargs,
) -> "Playlist":
"""Get a Playlist object from the provided information.
Parameters
----------
bot: Red
The bot's instance. Needed to get the target user.
playlist_api: PlaylistWrapper
The Playlist API interface.
scope:str
The custom config scope. One of 'GLOBALPLAYLIST', 'GUILDPLAYLIST' or 'USERPLAYLIST'.
playlist_number: int
The playlist's number.
data: PlaylistFetchResult
The PlaylistFetchResult representation of the playlist to be gotten.
**kwargs
Extra attributes for the Playlist instance which override values
in the data dict. These should be complete objects and not
IDs, where possible.
Returns
-------
Playlist
The playlist object for the requested playlist.
Raises
------
`InvalidPlaylistScope`
Passing a scope that is not supported.
`MissingGuild`
Trying to access the Guild scope without a guild.
`MissingAuthor`
Trying to access the User scope without an user id.
"""
guild = data.scope_id if scope == PlaylistScope.GUILD.value else kwargs.get("guild")
author = data.author_id
playlist_id = data.playlist_id or playlist_number
name = data.playlist_name
playlist_url = data.playlist_url
tracks = data.tracks
return cls(
bot=bot,
playlist_api=playlist_api,
guild=guild,
scope=scope,
author=author,
playlist_id=playlist_id,
name=name,
playlist_url=playlist_url,
tracks=tracks,
)
class PlaylistCompat23:
"""A single playlist, migrating from Schema 2 to Schema 3"""
def __init__(
self,
bot: Red,
playlist_api: PlaylistWrapper,
scope: str,
author: int,
playlist_id: int,
name: str,
playlist_url: Optional[str] = None,
tracks: Optional[List[MutableMapping]] = None,
guild: Union[discord.Guild, int, None] = None,
):
self.bot = bot
self.guild = guild
self.scope = standardize_scope(scope)
self.author = author
self.id = playlist_id
self.name = name
self.url = playlist_url
self.tracks = tracks or []
self.playlist_api = playlist_api
@classmethod
async def from_json(
cls,
bot: Red,
playlist_api: PlaylistWrapper,
scope: str,
playlist_number: int,
data: MutableMapping,
**kwargs,
) -> "PlaylistCompat23":
"""Get a Playlist object from the provided information.
Parameters
----------
bot: Red
The Bot instance.
playlist_api: PlaylistWrapper
The Playlist API interface.
scope:str
The custom config scope. One of 'GLOBALPLAYLIST', 'GUILDPLAYLIST' or 'USERPLAYLIST'.
playlist_number: int
The playlist's number.
data: MutableMapping
The JSON representation of the playlist to be gotten.
**kwargs
Extra attributes for the Playlist instance which override values
in the data dict. These should be complete objects and not
IDs, where possible.
Returns
-------
Playlist
The playlist object for the requested playlist.
Raises
------
`InvalidPlaylistScope`
Passing a scope that is not supported.
`MissingGuild`
Trying to access the Guild scope without a guild.
`MissingAuthor`
Trying to access the User scope without an user id.
"""
guild = data.get("guild") or kwargs.get("guild")
author: int = data.get("author") or 0
playlist_id = data.get("id") or playlist_number
name = data.get("name", "Unnamed")
playlist_url = data.get("playlist_url", None)
tracks = data.get("tracks", [])
return cls(
bot=bot,
playlist_api=playlist_api,
guild=guild,
scope=scope,
author=author,
playlist_id=playlist_id,
name=name,
playlist_url=playlist_url,
tracks=tracks,
)
async def save(self):
"""Saves a Playlist to SQL."""
scope, scope_id = prepare_config_scope(self.bot, self.scope, self.author, self.guild)
await self.playlist_api.upsert(
scope,
playlist_id=int(self.id),
playlist_name=self.name,
scope_id=scope_id,
author_id=self.author,
playlist_url=self.url,
tracks=self.tracks,
)
async def get_all_playlist_for_migration23(
bot: Red,
playlist_api: PlaylistWrapper,
config: Config,
scope: str,
guild: Union[discord.Guild, int] = None,
) -> List[PlaylistCompat23]:
"""
Gets all playlist for the specified scope.
Parameters
----------
bot: Red
The Bot instance.
playlist_api: PlaylistWrapper
The Playlist API interface.
config: Config
The Audio cog Config instance.
scope: str
The custom config scope. One of 'GLOBALPLAYLIST', 'GUILDPLAYLIST' or 'USERPLAYLIST'.
guild: discord.Guild
The guild to get the playlist from if scope is GUILDPLAYLIST.
Returns
-------
list
A list of all playlists for the specified scope
Raises
------
`InvalidPlaylistScope`
Passing a scope that is not supported.
`MissingGuild`
Trying to access the Guild scope without a guild.
`MissingAuthor`
Trying to access the User scope without an user id.
"""
playlists = await config.custom(scope).all()
if scope == PlaylistScope.GLOBAL.value:
return [
await PlaylistCompat23.from_json(
bot,
playlist_api,
scope,
playlist_number,
playlist_data,
guild=guild,
author=int(playlist_data.get("author", 0)),
)
async for playlist_number, playlist_data in AsyncIter(playlists.items())
]
elif scope == PlaylistScope.USER.value:
return [
await PlaylistCompat23.from_json(
bot,
playlist_api,
scope,
playlist_number,
playlist_data,
guild=guild,
author=int(user_id),
)
async for user_id, scopedata in AsyncIter(playlists.items())
async for playlist_number, playlist_data in AsyncIter(scopedata.items())
]
else:
return [
await PlaylistCompat23.from_json(
bot,
playlist_api,
scope,
playlist_number,
playlist_data,
guild=int(guild_id),
author=int(playlist_data.get("author", 0)),
)
async for guild_id, scopedata in AsyncIter(playlists.items())
async for playlist_number, playlist_data in AsyncIter(scopedata.items())
]
async def get_playlist(
playlist_number: int,
scope: str,
bot: Red,
playlist_api: PlaylistWrapper,
guild: Union[discord.Guild, int] = None,
author: Union[discord.abc.User, int] = None,
) -> Playlist:
"""
Gets the playlist with the associated playlist number.
Parameters
----------
playlist_number: int
The playlist number for the playlist to get.
playlist_api: PlaylistWrapper
The Playlist API interface.
scope: str
The custom config scope. One of 'GLOBALPLAYLIST', 'GUILDPLAYLIST' or 'USERPLAYLIST'.
guild: discord.Guild
The guild to get the playlist from if scope is GUILDPLAYLIST.
author: int
The ID of the user to get the playlist from if scope is USERPLAYLIST.
bot: Red
The bot's instance.
Returns
-------
Playlist
The playlist associated with the playlist number.
Raises
------
`RuntimeError`
If there is no playlist for the specified number.
`InvalidPlaylistScope`
Passing a scope that is not supported.
`MissingGuild`
Trying to access the Guild scope without a guild.
`MissingAuthor`
Trying to access the User scope without an user id.
"""
scope_standard, scope_id = prepare_config_scope(bot, scope, author, guild)
playlist_data = await playlist_api.fetch(scope_standard, playlist_number, scope_id)
if not (playlist_data and playlist_data.playlist_id):
raise RuntimeError(f"That playlist does not exist for the following scope: {scope}")
return await Playlist.from_json(
bot,
playlist_api,
scope_standard,
playlist_number,
playlist_data,
guild=guild,
author=author,
)
async def get_all_playlist(
scope: str,
bot: Red,
playlist_api: PlaylistWrapper,
guild: Union[discord.Guild, int] = None,
author: Union[discord.abc.User, int] = None,
specified_user: bool = False,
) -> List[Playlist]:
"""
Gets all playlist for the specified scope.
Parameters
----------
scope: str
The custom config scope. One of 'GLOBALPLAYLIST', 'GUILDPLAYLIST' or 'USERPLAYLIST'.
guild: discord.Guild
The guild to get the playlist from if scope is GUILDPLAYLIST.
author: int
The ID of the user to get the playlist from if scope is USERPLAYLIST.
bot: Red
The bot's instance
playlist_api: PlaylistWrapper
The Playlist API interface.
specified_user:bool
Whether or not user ID was passed as an argparse.
Returns
-------
list
A list of all playlists for the specified scope
Raises
------
`InvalidPlaylistScope`
Passing a scope that is not supported.
`MissingGuild`
Trying to access the Guild scope without a guild.
`MissingAuthor`
Trying to access the User scope without an user id.
"""
scope_standard, scope_id = prepare_config_scope(bot, scope, author, guild)
if specified_user:
user_id = getattr(author, "id", author)
playlists = await playlist_api.fetch_all(scope_standard, scope_id, author_id=user_id)
else:
playlists = await playlist_api.fetch_all(scope_standard, scope_id)
playlist_list = []
async for playlist in AsyncIter(playlists):
playlist_list.append(
await Playlist.from_json(
bot,
playlist_api,
scope,
playlist.playlist_id,
playlist,
guild=guild,
author=author,
)
)
return playlist_list
async def get_all_playlist_converter(
scope: str,
bot: Red,
playlist_api: PlaylistWrapper,
arg: str,
guild: Union[discord.Guild, int] = None,
author: Union[discord.abc.User, int] = None,
) -> List[Playlist]:
"""
Gets all playlist for the specified scope.
Parameters
----------
scope: str
The custom config scope. One of 'GLOBALPLAYLIST', 'GUILDPLAYLIST' or 'USERPLAYLIST'.
guild: discord.Guild
The guild to get the playlist from if scope is GUILDPLAYLIST.
author: int
The ID of the user to get the playlist from if scope is USERPLAYLIST.
bot: Red
The bot's instance
arg:str
The value to lookup.
playlist_api: PlaylistWrapper
The Playlist API interface.
Returns
-------
list
A list of all playlists for the specified scope
Raises
------
`InvalidPlaylistScope`
Passing a scope that is not supported.
`MissingGuild`
Trying to access the Guild scope without a guild.
`MissingAuthor`
Trying to access the User scope without an user id.
"""
scope_standard, scope_id = prepare_config_scope(bot, scope, author, guild)
playlists = await playlist_api.fetch_all_converter(
scope_standard, playlist_name=arg, playlist_id=arg
)
playlist_list = []
async for playlist in AsyncIter(playlists):
playlist_list.append(
await Playlist.from_json(
bot,
playlist_api,
scope,
playlist.playlist_id,
playlist,
guild=guild,
author=author,
)
)
return playlist_list
async def create_playlist(
ctx: commands.Context,
playlist_api: PlaylistWrapper,
scope: str,
playlist_name: str,
playlist_url: Optional[str] = None,
tracks: Optional[List[MutableMapping]] = None,
author: Optional[discord.User] = None,
guild: Optional[discord.Guild] = None,
) -> Optional[Playlist]:
"""Creates a new Playlist.
Parameters
----------
ctx: commands.Context
The context in which the play list is being created.
scope: str
The custom config scope. One of 'GLOBALPLAYLIST', 'GUILDPLAYLIST' or 'USERPLAYLIST'.
playlist_name: str
The name of the new playlist.
playlist_url:str
the url of the new playlist.
tracks: List[MutableMapping]
A list of tracks to add to the playlist.
author: discord.User
The Author of the playlist.
If provided it will create a playlist under this user.
This is only required when creating a playlist in User scope.
guild: discord.Guild
The guild to create this playlist under.
This is only used when creating a playlist in the Guild scope
playlist_api: PlaylistWrapper
The Playlist API interface.
Raises
------
`InvalidPlaylistScope`
Passing a scope that is not supported.
`MissingGuild`
Trying to access the Guild scope without a guild.
`MissingAuthor`
Trying to access the User scope without an user id.
"""
playlist = Playlist(
ctx.bot,
playlist_api,
scope,
author.id if author else None,
ctx.message.id,
playlist_name,
playlist_url,
tracks,
guild or ctx.guild,
)
await playlist.save()
return playlist
async def reset_playlist(
bot: Red,
playlist_api: PlaylistWrapper,
scope: str,
guild: Union[discord.Guild, int] = None,
author: Union[discord.abc.User, int] = None,
) -> None:
"""Wipes all playlists for the specified scope.
Parameters
----------
bot: Red
The bot's instance
scope: str
The custom config scope. One of 'GLOBALPLAYLIST', 'GUILDPLAYLIST' or 'USERPLAYLIST'.
guild: discord.Guild
The guild to get the playlist from if scope is GUILDPLAYLIST.
author: int
The ID of the user to get the playlist from if scope is USERPLAYLIST.
playlist_api: PlaylistWrapper
The Playlist API interface.
Raises
------
`InvalidPlaylistScope`
Passing a scope that is not supported.
`MissingGuild`
Trying to access the Guild scope without a guild.
`MissingAuthor`
Trying to access the User scope without an user id.
"""
scope, scope_id = prepare_config_scope(bot, scope, author, guild)
await playlist_api.drop(scope)
await playlist_api.create_table()
async def delete_playlist(
bot: Red,
playlist_api: PlaylistWrapper,
scope: str,
playlist_id: Union[str, int],
guild: discord.Guild,
author: Union[discord.abc.User, int] = None,
) -> None:
"""Deletes the specified playlist.
Parameters
----------
bot: Red
The bot's instance
scope: str
The custom config scope. One of 'GLOBALPLAYLIST', 'GUILDPLAYLIST' or 'USERPLAYLIST'.
playlist_id: Union[str, int]
The ID of the playlist.
guild: discord.Guild
The guild to get the playlist from if scope is GUILDPLAYLIST.
author: int
The ID of the user to get the playlist from if scope is USERPLAYLIST.
playlist_api: PlaylistWrapper
The Playlist API interface.
Raises
------
`InvalidPlaylistScope`
Passing a scope that is not supported.
`MissingGuild`
Trying to access the Guild scope without a guild.
`MissingAuthor`
Trying to access the User scope without an user id.
"""
scope, scope_id = prepare_config_scope(bot, scope, author, guild)
await playlist_api.delete(scope, int(playlist_id), scope_id)
|
from django.conf import settings
from weblate.utils.errors import report_error
from weblate.utils.request import get_ip_address, get_user_agent_raw
from weblate.utils.site import get_site_url
def is_spam(text, request):
"""Generic spam checker interface."""
if settings.AKISMET_API_KEY:
from akismet import Akismet
akismet = Akismet(settings.AKISMET_API_KEY, get_site_url())
try:
return akismet.comment_check(
get_ip_address(request),
get_user_agent_raw(request),
comment_content=text,
comment_type="comment",
)
except OSError:
report_error()
return True
return False
def report_spam(text, user_ip, user_agent):
if not settings.AKISMET_API_KEY:
return
from akismet import Akismet, ProtocolError
akismet = Akismet(settings.AKISMET_API_KEY, get_site_url())
try:
akismet.submit_spam(
user_ip, user_agent, comment_content=text, comment_type="comment"
)
except (ProtocolError, OSError):
report_error()
|
import io
import json
import os
import re
import shutil
import shlex
import subprocess
import tempfile
from functools import wraps
import lxml
import requests
from .utils import req_missing, LOGGER, slugify
try:
import typogrify.filters as typo
except ImportError:
typo = None
class _ConfigurableFilter(object):
"""Allow Nikola to configure filter with site's config."""
def __init__(self, **configuration_variables):
"""Define which arguments to configure from which configuration variables."""
self.configuration_variables = configuration_variables
def __call__(self, f):
"""Store configuration_variables as attribute of function."""
f.configuration_variables = self.configuration_variables
return f
def apply_to_binary_file(f):
"""Apply a filter to a binary file.
Take a function f that transforms a data argument, and returns
a function that takes a filename and applies f to the contents,
in place. Reads files in binary mode.
"""
@wraps(f)
def f_in_file(fname, *args, **kwargs):
with open(fname, 'rb') as inf:
data = inf.read()
data = f(data, *args, **kwargs)
with open(fname, 'wb+') as outf:
outf.write(data)
return f_in_file
def apply_to_text_file(f):
"""Apply a filter to a text file.
Take a function f that transforms a data argument, and returns
a function that takes a filename and applies f to the contents,
in place. Reads files in UTF-8.
"""
@wraps(f)
def f_in_file(fname, *args, **kwargs):
with io.open(fname, 'r', encoding='utf-8-sig') as inf:
data = inf.read()
data = f(data, *args, **kwargs)
with io.open(fname, 'w+', encoding='utf-8') as outf:
outf.write(data)
return f_in_file
def list_replace(the_list, find, replacement):
"""Replace all occurrences of ``find`` with ``replacement`` in ``the_list``."""
for i, v in enumerate(the_list):
if v == find:
the_list[i] = replacement
def runinplace(command, infile):
"""Run a command in-place on a file.
command is a string of the form: "commandname %1 %2" and
it will be execed with infile as %1 and a temporary file
as %2. Then, that temporary file will be moved over %1.
Example usage:
runinplace("yui-compressor %1 -o %2", "myfile.css")
That will replace myfile.css with a minified version.
You can also supply command as a list.
"""
if not isinstance(command, list):
command = shlex.split(command)
tmpdir = None
if "%2" in command:
tmpdir = tempfile.mkdtemp(prefix="nikola")
tmpfname = os.path.join(tmpdir, os.path.basename(infile))
try:
list_replace(command, "%1", infile)
if tmpdir:
list_replace(command, "%2", tmpfname)
subprocess.check_call(command)
if tmpdir:
shutil.move(tmpfname, infile)
finally:
if tmpdir:
shutil.rmtree(tmpdir)
@_ConfigurableFilter(executable='YUI_COMPRESSOR_EXECUTABLE')
def yui_compressor(infile, executable=None):
"""Run YUI Compressor on a file."""
yuicompressor = executable
if not yuicompressor:
try:
subprocess.call('yui-compressor', stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
yuicompressor = 'yui-compressor'
except Exception:
pass
if not yuicompressor:
try:
subprocess.call('yuicompressor', stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
yuicompressor = 'yuicompressor'
except Exception:
raise Exception("yui-compressor is not installed.")
return False
return runinplace('{} --nomunge %1 -o %2'.format(yuicompressor), infile)
@_ConfigurableFilter(executable='CLOSURE_COMPILER_EXECUTABLE')
def closure_compiler(infile, executable='closure-compiler'):
"""Run closure-compiler on a file."""
return runinplace('{} --warning_level QUIET --js %1 --js_output_file %2'.format(executable), infile)
@_ConfigurableFilter(executable='OPTIPNG_EXECUTABLE')
def optipng(infile, executable='optipng'):
"""Run optipng on a file."""
return runinplace("{} -preserve -o2 -quiet %1".format(executable), infile)
@_ConfigurableFilter(executable='JPEGOPTIM_EXECUTABLE')
def jpegoptim(infile, executable='jpegoptim'):
"""Run jpegoptim on a file."""
return runinplace("{} -p --strip-all -q %1".format(executable), infile)
@_ConfigurableFilter(executable='JPEGOPTIM_EXECUTABLE')
def jpegoptim_progressive(infile, executable='jpegoptim'):
"""Run jpegoptim on a file and convert to progressive."""
return runinplace("{} -p --strip-all --all-progressive -q %1".format(executable), infile)
@_ConfigurableFilter(executable='HTML_TIDY_EXECUTABLE')
def html_tidy_withconfig(infile, executable='tidy5'):
"""Run HTML Tidy with tidy5.conf as config file."""
return _html_tidy_runner(infile, "-quiet --show-info no --show-warnings no -utf8 -indent -config tidy5.conf -modify %1", executable=executable)
@_ConfigurableFilter(executable='HTML_TIDY_EXECUTABLE')
def html_tidy_nowrap(infile, executable='tidy5'):
"""Run HTML Tidy without line wrapping."""
return _html_tidy_runner(infile, "-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1", executable=executable)
@_ConfigurableFilter(executable='HTML_TIDY_EXECUTABLE')
def html_tidy_wrap(infile, executable='tidy5'):
"""Run HTML Tidy with line wrapping."""
return _html_tidy_runner(infile, "-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes no --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1", executable=executable)
@_ConfigurableFilter(executable='HTML_TIDY_EXECUTABLE')
def html_tidy_wrap_attr(infile, executable='tidy5'):
"""Run HTML tidy with line wrapping and attribute indentation."""
return _html_tidy_runner(infile, "-quiet --show-info no --show-warnings no -utf8 -indent --indent-attributes yes --sort-attributes alpha --wrap 80 --wrap-sections no --drop-empty-elements no --tidy-mark no -modify %1", executable=executable)
@_ConfigurableFilter(executable='HTML_TIDY_EXECUTABLE')
def html_tidy_mini(infile, executable='tidy5'):
"""Run HTML tidy with minimal settings."""
return _html_tidy_runner(infile, "-quiet --show-info no --show-warnings no -utf8 --indent-attributes no --sort-attributes alpha --wrap 0 --wrap-sections no --tidy-mark no --drop-empty-elements no -modify %1", executable=executable)
def _html_tidy_runner(infile, options, executable='tidy5'):
"""Run HTML Tidy."""
# Warnings (returncode 1) are not critical, and *everything* is a warning.
try:
status = runinplace(executable + " " + options, infile)
except subprocess.CalledProcessError as err:
status = 0 if err.returncode == 1 else err.returncode
return status
@apply_to_text_file
def html5lib_minify(data):
"""Minify with html5lib."""
import html5lib
import html5lib.serializer
data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),
tree='lxml',
quote_attr_values='spec',
omit_optional_tags=True,
minimize_boolean_attributes=True,
strip_whitespace=True,
alphabetical_attributes=True,
escape_lt_in_attrs=True)
return data
@apply_to_text_file
def html5lib_xmllike(data):
"""Transform document to an XML-like form with html5lib."""
import html5lib
import html5lib.serializer
data = html5lib.serializer.serialize(html5lib.parse(data, treebuilder='lxml'),
tree='lxml',
quote_attr_values='always',
omit_optional_tags=False,
strip_whitespace=False,
alphabetical_attributes=True,
escape_lt_in_attrs=True)
return data
@apply_to_text_file
def minify_lines(data):
"""Do nothing -- deprecated filter."""
return data
def _run_typogrify(data, typogrify_filters, ignore_tags=None):
"""Run typogrify with ignore support."""
if ignore_tags is None:
ignore_tags = ["title"]
data = _normalize_html(data)
section_list = typo.process_ignores(data, ignore_tags)
rendered_text = ""
for text_item, should_process in section_list:
if should_process:
for f in typogrify_filters:
text_item = f(text_item)
rendered_text += text_item
return rendered_text
@apply_to_text_file
def typogrify(data):
"""Prettify text with typogrify."""
if typo is None:
req_missing(['typogrify'], 'use the typogrify filter', optional=True)
return data
return _run_typogrify(data, [typo.amp, typo.widont, typo.smartypants, typo.caps, typo.initial_quotes])
def _smarty_oldschool(text):
try:
import smartypants
except ImportError:
raise typo.TypogrifyError("Error in {% smartypants %} filter: The Python smartypants library isn't installed.")
else:
output = smartypants.convert_dashes_oldschool(text)
return output
@apply_to_text_file
def typogrify_oldschool(data):
"""Prettify text with typogrify."""
if typo is None:
req_missing(['typogrify'], 'use the typogrify_oldschool filter', optional=True)
return data
return _run_typogrify(data, [typo.amp, typo.widont, _smarty_oldschool, typo.smartypants, typo.caps, typo.initial_quotes])
@apply_to_text_file
def typogrify_sans_widont(data):
"""Prettify text with typogrify, skipping the widont filter."""
# typogrify with widont disabled because it caused broken headline
# wrapping, see issue #1465
if typo is None:
req_missing(['typogrify'], 'use the typogrify_sans_widont filter')
return data
return _run_typogrify(data, [typo.amp, typo.smartypants, typo.caps, typo.initial_quotes])
@apply_to_text_file
def typogrify_custom(data, typogrify_filters, ignore_tags=None):
"""Run typogrify with a custom list of fliter functions."""
if typo is None:
req_missing(['typogrify'], 'use the typogrify filter', optional=True)
return data
return _run_typogrify(data, typogrify_filters, ignore_tags)
@apply_to_text_file
def php_template_injection(data):
"""Insert PHP code into Nikola templates."""
template = re.search(r'<\!-- __NIKOLA_PHP_TEMPLATE_INJECTION source\:(.*) checksum\:(.*)__ -->', data)
if template:
source = template.group(1)
with io.open(source, "r", encoding="utf-8-sig") as in_file:
phpdata = in_file.read()
_META_SEPARATOR = '(' + os.linesep * 2 + '|' + ('\n' * 2) + '|' + ("\r\n" * 2) + ')'
phpdata = re.split(_META_SEPARATOR, phpdata, maxsplit=1)[-1]
phpdata = data.replace(template.group(0), phpdata)
return phpdata
else:
return data
@apply_to_text_file
def cssminify(data):
"""Minify CSS using https://cssminifier.com/."""
try:
url = 'https://cssminifier.com/raw'
_data = {'input': data}
response = requests.post(url, data=_data)
if response.status_code != 200:
LOGGER.error("can't use cssminifier.com: HTTP status {}", response.status_code)
return data
return response.text
except Exception as exc:
LOGGER.error("can't use cssminifier.com: {}", exc)
return data
@apply_to_text_file
def jsminify(data):
"""Minify JS using https://javascript-minifier.com/."""
try:
url = 'https://javascript-minifier.com/raw'
_data = {'input': data}
response = requests.post(url, data=_data)
if response.status_code != 200:
LOGGER.error("can't use javascript-minifier.com: HTTP status {}", response.status_code)
return data
return response.text
except Exception as exc:
LOGGER.error("can't use javascript-minifier.com: {}", exc)
return data
@apply_to_text_file
def jsonminify(data):
"""Minify JSON files (strip whitespace and use minimal separators)."""
data = json.dumps(json.loads(data), indent=None, separators=(',', ':'))
return data
@apply_to_binary_file
def xmlminify(data):
"""Minify XML files (strip whitespace and use minimal separators)."""
parser = lxml.etree.XMLParser(remove_blank_text=True)
newdata = lxml.etree.XML(data, parser=parser)
return lxml.etree.tostring(newdata, encoding='utf-8', method='xml', xml_declaration=True)
def _normalize_html(data):
"""Pass HTML through LXML to clean it up, if possible."""
try:
data = lxml.html.tostring(lxml.html.fromstring(data), encoding='unicode')
except Exception:
pass
return '<!DOCTYPE html>\n' + data
# The function is used in other filters, so the decorator cannot be used directly.
normalize_html = apply_to_text_file(_normalize_html)
@_ConfigurableFilter(xpath_list='HEADER_PERMALINKS_XPATH_LIST', file_blacklist='HEADER_PERMALINKS_FILE_BLACKLIST')
def add_header_permalinks(fname, xpath_list=None, file_blacklist=None):
"""Post-process HTML via lxml to add header permalinks Sphinx-style."""
# Blacklist requires custom file handling
file_blacklist = file_blacklist or []
if fname in file_blacklist:
return
with io.open(fname, 'r', encoding='utf-8-sig') as inf:
data = inf.read()
doc = lxml.html.document_fromstring(data)
# Get language for slugify
try:
lang = doc.attrib['lang'] # <html lang="…">
except KeyError:
# Circular import workaround (utils imports filters)
from nikola.utils import LocaleBorg
lang = LocaleBorg().current_lang
xpath_set = set()
if not xpath_list:
xpath_list = ['*//div[@class="e-content entry-content"]//{hx}']
for xpath_expr in xpath_list:
for hx in ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']:
xpath_set.add(xpath_expr.format(hx=hx))
for x in xpath_set:
nodes = doc.findall(x)
for node in nodes:
parent = node.getparent()
if 'id' in node.attrib:
hid = node.attrib['id']
elif 'id' in parent.attrib:
# docutils: <div> has an ID and contains the header
hid = parent.attrib['id']
else:
# Using force-mode, because not every character can appear in a
# HTML id
node.attrib['id'] = slugify(node.text_content(), lang, True)
hid = node.attrib['id']
new_node = lxml.html.fragment_fromstring('<a href="#{0}" class="headerlink" title="Permalink to this heading">¶</a>'.format(hid))
node.append(new_node)
with io.open(fname, 'w', encoding='utf-8') as outf:
outf.write('<!DOCTYPE html>\n' + lxml.html.tostring(doc, encoding="unicode"))
@_ConfigurableFilter(top_classes='DEDUPLICATE_IDS_TOP_CLASSES')
@apply_to_text_file
def deduplicate_ids(data, top_classes=None):
"""Post-process HTML via lxml to deduplicate IDs."""
if not top_classes:
top_classes = ('postpage', 'storypage')
doc = lxml.html.document_fromstring(data)
elements = doc.xpath('//*')
all_ids = [element.attrib.get('id') for element in elements]
seen_ids = set()
duplicated_ids = set()
for i in all_ids:
if i is not None and i in seen_ids:
duplicated_ids.add(i)
else:
seen_ids.add(i)
if duplicated_ids:
# Well, that sucks.
for i in duplicated_ids:
# Results are ordered the same way they are ordered in document
offending_elements = doc.xpath('//*[@id="{}"]'.format(i))
counter = 2
# If this is a story or a post, do it from top to bottom, because
# updates to those are more likely to appear at the bottom of pages.
# For anything else, including indexes, do it from bottom to top,
# because new posts appear at the top of pages.
# We also leave the first result out, so there is one element with
# "plain" ID
if any(doc.find_class(c) for c in top_classes):
off = offending_elements[1:]
else:
off = offending_elements[-2::-1]
for e in off:
new_id = i
while new_id in seen_ids:
new_id = '{0}-{1}'.format(i, counter)
counter += 1
e.attrib['id'] = new_id
seen_ids.add(new_id)
# Find headerlinks that we can fix.
headerlinks = e.find_class('headerlink')
for hl in headerlinks:
# We might get headerlinks of child elements
if hl.attrib['href'] == '#' + i:
hl.attrib['href'] = '#' + new_id
break
return '<!DOCTYPE html>\n' + lxml.html.tostring(doc, encoding='unicode')
else:
return data
|
from django.db import migrations
from django.db.models import F
from django.db.models.functions import Substr
def migrate_componentlist(apps, schema_editor):
Unit = apps.get_model("trans", "Unit")
db_alias = schema_editor.connection.alias
units = Unit.objects.using(db_alias).filter(
translation__component__file_format__in=(
"json",
"arb",
"go-i18n-json",
"i18next",
"webextension",
"json-nested",
),
context__startswith=".",
)
units.update(context=Substr(F("context"), 2))
class Migration(migrations.Migration):
dependencies = [
("trans", "0090_alert_updated"),
]
operations = [
migrations.RunPython(
migrate_componentlist, migrations.RunPython.noop, elidable=True
),
]
|
from os import path as op
import numpy as np
from numpy.polynomial import legendre
from numpy.testing import (assert_allclose, assert_array_equal, assert_equal,
assert_array_almost_equal)
from scipy.interpolate import interp1d
import pytest
import mne
from mne.forward import _make_surface_mapping, make_field_map
from mne.forward._lead_dots import (_comp_sum_eeg, _comp_sums_meg,
_get_legen_table, _do_cross_dots)
from mne.forward._make_forward import _create_meg_coils
from mne.forward._field_interpolation import _setup_dots
from mne.surface import get_meg_helmet_surf, get_head_surf
from mne.datasets import testing
from mne import read_evokeds, pick_types, make_fixed_length_events, Epochs
from mne.io import read_raw_fif
from mne.utils import run_tests_if_main
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_ctf_fname = op.join(base_dir, 'test_ctf_raw.fif')
data_path = testing.data_path(download=False)
trans_fname = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
subjects_dir = op.join(data_path, 'subjects')
@testing.requires_testing_data
def test_field_map_ctf():
"""Test that field mapping can be done with CTF data."""
raw = read_raw_fif(raw_ctf_fname).crop(0, 1)
raw.apply_gradient_compensation(3)
events = make_fixed_length_events(raw, duration=0.5)
evoked = Epochs(raw, events).average()
evoked.pick_channels(evoked.ch_names[:50]) # crappy mapping but faster
# smoke test
make_field_map(evoked, trans=trans_fname, subject='sample',
subjects_dir=subjects_dir)
def test_legendre_val():
"""Test Legendre polynomial (derivative) equivalence."""
rng = np.random.RandomState(0)
# check table equiv
xs = np.linspace(-1., 1., 1000)
n_terms = 100
# True, numpy
vals_np = legendre.legvander(xs, n_terms - 1)
# Table approximation
for nc, interp in zip([100, 50], ['nearest', 'linear']):
lut, n_fact = _get_legen_table('eeg', n_coeff=nc, force_calc=True)
lut_fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, interp,
axis=0)
vals_i = lut_fun(xs)
# Need a "1:" here because we omit the first coefficient in our table!
assert_allclose(vals_np[:, 1:vals_i.shape[1] + 1], vals_i,
rtol=1e-2, atol=5e-3)
# Now let's look at our sums
ctheta = rng.rand(20, 30) * 2.0 - 1.0
beta = rng.rand(20, 30) * 0.8
c1 = _comp_sum_eeg(beta.flatten(), ctheta.flatten(), lut_fun, n_fact)
c1.shape = beta.shape
# compare to numpy
n = np.arange(1, n_terms, dtype=float)[:, np.newaxis, np.newaxis]
coeffs = np.zeros((n_terms,) + beta.shape)
coeffs[1:] = (np.cumprod([beta] * (n_terms - 1), axis=0) *
(2.0 * n + 1.0) * (2.0 * n + 1.0) / n)
# can't use tensor=False here b/c it isn't in old numpy
c2 = np.empty((20, 30))
for ci1 in range(20):
for ci2 in range(30):
c2[ci1, ci2] = legendre.legval(ctheta[ci1, ci2],
coeffs[:, ci1, ci2])
assert_allclose(c1, c2, 1e-2, 1e-3) # close enough...
# compare fast and slow for MEG
ctheta = rng.rand(20 * 30) * 2.0 - 1.0
beta = rng.rand(20 * 30) * 0.8
lut, n_fact = _get_legen_table('meg', n_coeff=10, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'nearest', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
lut, n_fact = _get_legen_table('meg', n_coeff=20, force_calc=True)
fun = interp1d(np.linspace(-1, 1, lut.shape[0]), lut, 'linear', axis=0)
coeffs = _comp_sums_meg(beta, ctheta, fun, n_fact, False)
def test_legendre_table():
"""Test Legendre table calculation."""
# double-check our table generation
n = 10
for ch_type in ['eeg', 'meg']:
lut1, n_fact1 = _get_legen_table(ch_type, n_coeff=25, force_calc=True)
lut1 = lut1[:, :n - 1].copy()
n_fact1 = n_fact1[:n - 1].copy()
lut2, n_fact2 = _get_legen_table(ch_type, n_coeff=n, force_calc=True)
assert_allclose(lut1, lut2)
assert_allclose(n_fact1, n_fact2)
@testing.requires_testing_data
def test_make_field_map_eeg():
"""Test interpolation of EEG field onto head."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
evoked.info['bads'] = ['MEG 2443', 'EEG 053'] # add some bads
surf = get_head_surf('sample', subjects_dir=subjects_dir)
# we must have trans if surface is in MRI coords
pytest.raises(ValueError, _make_surface_mapping, evoked.info, surf, 'eeg')
evoked.pick_types(meg=False, eeg=True)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
# trans is necessary for EEG only
pytest.raises(RuntimeError, make_field_map, evoked, None,
subject='sample', subjects_dir=subjects_dir)
fmd = make_field_map(evoked, trans_fname,
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 59)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 59
@testing.requires_testing_data
@pytest.mark.slowtest
def test_make_field_map_meg():
"""Test interpolation of MEG field onto helmet | head."""
evoked = read_evokeds(evoked_fname, condition='Left Auditory')
info = evoked.info
surf = get_meg_helmet_surf(info)
# let's reduce the number of channels by a bunch to speed it up
info['bads'] = info['ch_names'][:200]
# bad ch_type
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'foo')
# bad mode
pytest.raises(ValueError, _make_surface_mapping, info, surf, 'meg',
mode='foo')
# no picks
evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
pytest.raises(RuntimeError, _make_surface_mapping, evoked_eeg.info,
surf, 'meg')
# bad surface def
nn = surf['nn']
del surf['nn']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['nn'] = nn
cf = surf['coord_frame']
del surf['coord_frame']
pytest.raises(KeyError, _make_surface_mapping, info, surf, 'meg')
surf['coord_frame'] = cf
# now do it with make_field_map
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj() # avoid projection warnings
fmd = make_field_map(evoked, None,
subject='sample', subjects_dir=subjects_dir)
assert (len(fmd) == 1)
assert_array_equal(fmd[0]['data'].shape, (304, 106)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, ch_type='foobar')
# now test the make_field_map on head surf for MEG
evoked.pick_types(meg=True, eeg=False)
evoked.info.normalize_proj()
fmd = make_field_map(evoked, trans_fname, meg_surf='head',
subject='sample', subjects_dir=subjects_dir)
assert len(fmd) == 1
assert_array_equal(fmd[0]['data'].shape, (642, 106)) # maps data onto surf
assert len(fmd[0]['ch_names']) == 106
pytest.raises(ValueError, make_field_map, evoked, meg_surf='foobar',
subjects_dir=subjects_dir, trans=trans_fname)
@testing.requires_testing_data
def test_make_field_map_meeg():
"""Test making a M/EEG field map onto helmet & head."""
evoked = read_evokeds(evoked_fname, baseline=(-0.2, 0.0))[0]
picks = pick_types(evoked.info, meg=True, eeg=True)
picks = picks[::10]
evoked.pick_channels([evoked.ch_names[p] for p in picks])
evoked.info.normalize_proj()
maps = make_field_map(evoked, trans_fname, subject='sample',
subjects_dir=subjects_dir, n_jobs=1, verbose='debug')
assert_equal(maps[0]['data'].shape, (642, 6)) # EEG->Head
assert_equal(maps[1]['data'].shape, (304, 31)) # MEG->Helmet
# reasonable ranges
maxs = (1.2, 2.0) # before #4418, was (1.1, 2.0)
mins = (-0.8, -1.3) # before #4418, was (-0.6, -1.2)
assert_equal(len(maxs), len(maps))
for map_, max_, min_ in zip(maps, maxs, mins):
assert_allclose(map_['data'].max(), max_, rtol=5e-2)
assert_allclose(map_['data'].min(), min_, rtol=5e-2)
# calculated from correct looking mapping on 2015/12/26
assert_allclose(np.sqrt(np.sum(maps[0]['data'] ** 2)), 19.0903, # 16.6088,
atol=1e-3, rtol=1e-3)
assert_allclose(np.sqrt(np.sum(maps[1]['data'] ** 2)), 19.4748, # 20.1245,
atol=1e-3, rtol=1e-3)
def _setup_args(info):
"""Configure args for test_as_meg_type_evoked."""
coils = _create_meg_coils(info['chs'], 'normal', info['dev_head_t'])
int_rad, noise, lut_fun, n_fact = _setup_dots('fast', coils, 'meg')
my_origin = np.array([0., 0., 0.04])
args_dict = dict(intrad=int_rad, volume=False, coils1=coils, r0=my_origin,
ch_type='meg', lut=lut_fun, n_fact=n_fact)
return args_dict
@testing.requires_testing_data
def test_as_meg_type_evoked():
"""Test interpolation of data on to virtual channels."""
# validation tests
raw = read_raw_fif(raw_fname)
events = mne.find_events(raw)
picks = pick_types(raw.info, meg=True, eeg=True, stim=True,
ecg=True, eog=True, include=['STI 014'],
exclude='bads')
epochs = mne.Epochs(raw, events, picks=picks)
evoked = epochs.average()
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.as_type('meg')
with pytest.raises(ValueError, match="Invalid value for the 'ch_type'"):
evoked.copy().pick_types(meg='grad').as_type('meg')
# channel names
ch_names = evoked.info['ch_names']
virt_evoked = evoked.copy().pick_channels(ch_names=ch_names[:10:1])
virt_evoked.info.normalize_proj()
virt_evoked = virt_evoked.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_evoked.info['ch_names']))
# pick from and to channels
evoked_from = evoked.copy().pick_channels(ch_names=ch_names[2:10:3])
evoked_to = evoked.copy().pick_channels(ch_names=ch_names[0:10:3])
info_from, info_to = evoked_from.info, evoked_to.info
# set up things
args1, args2 = _setup_args(info_from), _setup_args(info_to)
args1.update(coils2=args2['coils1'])
args2.update(coils2=args1['coils1'])
# test cross dots
cross_dots1 = _do_cross_dots(**args1)
cross_dots2 = _do_cross_dots(**args2)
assert_array_almost_equal(cross_dots1, cross_dots2.T)
# correlation test
evoked = evoked.pick_channels(ch_names=ch_names[:10:]).copy()
data1 = evoked.pick_types(meg='grad').data.ravel()
data2 = evoked.as_type('grad').data.ravel()
assert (np.corrcoef(data1, data2)[0, 1] > 0.95)
# Do it with epochs
virt_epochs = \
epochs.copy().load_data().pick_channels(ch_names=ch_names[:10:1])
virt_epochs.info.normalize_proj()
virt_epochs = virt_epochs.as_type('mag')
assert (all(ch.endswith('_v') for ch in virt_epochs.info['ch_names']))
assert_allclose(virt_epochs.get_data().mean(0), virt_evoked.data)
run_tests_if_main()
|
import os
from nikola.plugin_categories import Task
from nikola.image_processing import ImageProcessor
from nikola import utils
class ScaleImage(Task, ImageProcessor):
"""Resize images and create thumbnails for them."""
name = "scale_images"
def process_tree(self, src, dst):
"""Process all images in a src tree and put the (possibly) rescaled images in the dst folder."""
thumb_fmt = self.kw['image_thumbnail_format']
base_len = len(src.split(os.sep))
for root, dirs, files in os.walk(src, followlinks=True):
root_parts = root.split(os.sep)
dst_dir = os.path.join(dst, *root_parts[base_len:])
utils.makedirs(dst_dir)
for src_name in files:
if (not src_name.lower().endswith(tuple(self.image_ext_list)) and not src_name.upper().endswith(tuple(self.image_ext_list))):
continue
dst_file = os.path.join(dst_dir, src_name)
src_file = os.path.join(root, src_name)
thumb_name, thumb_ext = os.path.splitext(src_name)
thumb_file = os.path.join(dst_dir, thumb_fmt.format(
name=thumb_name,
ext=thumb_ext,
))
yield {
'name': dst_file,
'file_dep': [src_file],
'targets': [dst_file, thumb_file],
'actions': [(self.process_image, (src_file, dst_file, thumb_file))],
'clean': True,
}
def process_image(self, src, dst, thumb):
"""Resize an image."""
self.resize_image(
src,
dst_paths=[dst, thumb],
max_sizes=[self.kw['max_image_size'], self.kw['image_thumbnail_size']],
bigger_panoramas=True,
preserve_exif_data=self.kw['preserve_exif_data'],
exif_whitelist=self.kw['exif_whitelist'],
preserve_icc_profiles=self.kw['preserve_icc_profiles']
)
def gen_tasks(self):
"""Copy static files into the output folder."""
self.kw = {
'image_thumbnail_size': self.site.config['IMAGE_THUMBNAIL_SIZE'],
'image_thumbnail_format': self.site.config['IMAGE_THUMBNAIL_FORMAT'],
'max_image_size': self.site.config['MAX_IMAGE_SIZE'],
'image_folders': self.site.config['IMAGE_FOLDERS'],
'output_folder': self.site.config['OUTPUT_FOLDER'],
'filters': self.site.config['FILTERS'],
'preserve_exif_data': self.site.config['PRESERVE_EXIF_DATA'],
'exif_whitelist': self.site.config['EXIF_WHITELIST'],
'preserve_icc_profiles': self.site.config['PRESERVE_ICC_PROFILES'],
}
self.image_ext_list = self.image_ext_list_builtin
self.image_ext_list.extend(self.site.config.get('EXTRA_IMAGE_EXTENSIONS', []))
yield self.group_task()
for src in self.kw['image_folders']:
dst = self.kw['output_folder']
filters = self.kw['filters']
real_dst = os.path.join(dst, self.kw['image_folders'][src])
for task in self.process_tree(src, real_dst):
task['basename'] = self.name
task['uptodate'] = [utils.config_changed(self.kw)]
yield utils.apply_filters(task, filters)
|
from setuptools import setup, find_packages
import unittest
import codecs
def test_suite():
test_loader = unittest.TestLoader()
test_suite = test_loader.discover('subword_nmt/tests', pattern='test_*.py')
return test_suite
setup(
name='subword_nmt',
version='0.3.7',
description='Unsupervised Word Segmentation for Neural Machine Translation and Text Generation',
long_description=(codecs.open("README.md", encoding='utf-8').read() +
"\n\n" + codecs.open("CHANGELOG.md", encoding='utf-8').read()),
long_description_content_type="text/markdown",
url='https://github.com/rsennrich/subword-nmt',
author='Rico Sennrich',
license='MIT',
test_suite='setup.test_suite',
classifiers=[
'Intended Audience :: Developers',
'Topic :: Text Processing',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
packages=find_packages(),
entry_points={
'console_scripts': ['subword-nmt=subword_nmt.subword_nmt:main'],
},
include_package_data=True
)
|
import socket
import warnings
# pysnmp packages on debian 6.0 use sha and md5 which are deprecated
# packages. there is nothing to be done about it until pysnmp
# updates to use new hashlib module -- ignoring warning for now
old_showwarning = warnings.showwarning
warnings.filterwarnings("ignore", category=DeprecationWarning)
cmdgen = None
try:
import pysnmp.entity.rfc3413.oneliner.cmdgen as cmdgen
import pysnmp.debug
except ImportError:
pysnmp = None
cmdgen = None
warnings.showwarning = old_showwarning
import diamond.collector
class SNMPCollector(diamond.collector.Collector):
def __init__(self, *args, **kwargs):
super(SNMPCollector, self).__init__(*args, **kwargs)
if cmdgen is not None:
self.snmpCmdGen = cmdgen.CommandGenerator()
def get_default_config_help(self):
config_help = super(SNMPCollector, self).get_default_config_help()
config_help.update({
'timeout': 'Seconds before timing out the snmp connection',
'retries': 'Number of times to retry before bailing',
})
return config_help
def get_default_config(self):
# Initialize default config
default_config = super(SNMPCollector, self).get_default_config()
default_config['path_suffix'] = ''
default_config['path_prefix'] = 'systems'
default_config['timeout'] = 5
default_config['retries'] = 3
# Return default config
return default_config
def _convert_to_oid(self, s):
d = s.split(".")
return tuple([int(x) for x in d])
def _convert_from_oid(self, oid):
return ".".join([str(x) for x in oid])
def collect(self):
for device in self.config['devices']:
host = self.config['devices'][device]['host']
port = self.config['devices'][device]['port']
community = self.config['devices'][device]['community']
self.collect_snmp(device, host, port, community)
def get(self, oid, host, port, community):
"""
Perform SNMP get for a given OID
"""
# Initialize return value
ret = {}
# Convert OID to tuple if necessary
if not isinstance(oid, tuple):
oid = self._convert_to_oid(oid)
# Convert Host to IP if necessary
host = socket.gethostbyname(host)
# Assemble SNMP Auth Data
snmpAuthData = cmdgen.CommunityData(
'agent-{}'.format(community),
community)
# Assemble SNMP Transport Data
snmpTransportData = cmdgen.UdpTransportTarget(
(host, port),
int(self.config['timeout']),
int(self.config['retries']))
# Assemble SNMP Next Command
result = self.snmpCmdGen.getCmd(snmpAuthData, snmpTransportData, oid)
varBind = result[3]
# TODO: Error check
for o, v in varBind:
ret[str(o)] = v.prettyPrint()
return ret
def walk(self, oid, host, port, community):
"""
Perform an SNMP walk on a given OID
"""
# Initialize return value
ret = {}
# Convert OID to tuple if necessary
if not isinstance(oid, tuple):
oid = self._convert_to_oid(oid)
# Convert Host to IP if necessary
host = socket.gethostbyname(host)
# Assemble SNMP Auth Data
snmpAuthData = cmdgen.CommunityData(
'agent-{}'.format(community),
community)
# Assemble SNMP Transport Data
snmpTransportData = cmdgen.UdpTransportTarget(
(host, port),
int(self.config['timeout']),
int(self.config['retries']))
# Assemble SNMP Next Command
resultTable = self.snmpCmdGen.nextCmd(snmpAuthData,
snmpTransportData,
oid)
varBindTable = resultTable[3]
# TODO: Error Check
for varBindTableRow in varBindTable:
for o, v in varBindTableRow:
ret[str(o)] = v.prettyPrint()
return ret
|
import numpy as np
import pandas as pd
import statsmodels.api as sm
try:
import torch
except ImportError:
torch = None
from pgmpy.models import SEMGraph, SEMAlg, SEM
from pgmpy.global_vars import device, dtype
from pgmpy.utils import optimize, pinverse
class SEMEstimator(object):
"""
Base class of SEM estimators. All the estimators inherit this class.
"""
def __init__(self, model):
if isinstance(model, (SEMGraph, SEM)):
self.model = model.to_lisrel()
elif isinstance(model, SEMAlg):
self.model = model
else:
raise ValueError(
f"Model should be an instance of either SEMGraph or SEMAlg class. Got type: {type(model)}"
)
# Initialize trainable and fixed mask tensors
self.B_mask = torch.tensor(
self.model.B_mask, device=device, dtype=dtype, requires_grad=False
)
self.zeta_mask = torch.tensor(
self.model.zeta_mask, device=device, dtype=dtype, requires_grad=False
)
self.B_fixed_mask = torch.tensor(
self.model.B_fixed_mask, device=device, dtype=dtype, requires_grad=False
)
self.zeta_fixed_mask = torch.tensor(
self.model.zeta_fixed_mask, device=device, dtype=dtype, requires_grad=False
)
self.wedge_y = torch.tensor(
self.model.wedge_y, device=device, dtype=dtype, requires_grad=False
)
self.B_eye = torch.eye(
self.B_mask.shape[0], device=device, dtype=dtype, requires_grad=False
)
def _get_implied_cov(self, B, zeta):
"""
Computes the implied covariance matrix from the given parameters.
"""
B_masked = torch.mul(B, self.B_mask) + self.B_fixed_mask
B_inv = pinverse(self.B_eye - B_masked)
zeta_masked = torch.mul(zeta, self.zeta_mask) + self.zeta_fixed_mask
return self.wedge_y @ B_inv @ zeta_masked @ B_inv.t() @ self.wedge_y.t()
def ml_loss(self, params, loss_args):
r"""
Method to compute the Maximum Likelihood loss function. The optimizer calls this
method after each iteration with updated params to compute the new loss.
The fitting function for ML is:
.. math:: F_{ML} = \log |\Sigma(\theta)| + tr(S \Sigma^{-1}(\theta)) - \log S - (p+q)
Parameters
----------
params: dict
params contain all the variables which are updated in each iteration of the
optimization.
loss_args: dict
loss_args contain all the variable which are not updated in each iteration but
are required to compute the loss.
Returns
-------
torch.tensor: The loss value for the given params and loss_args
"""
S = loss_args["S"]
sigma = self._get_implied_cov(params["B"], params["zeta"])
return (
sigma.det().clamp(min=1e-4).log()
+ (S @ pinverse(sigma)).trace()
- S.logdet()
- len(self.model.y)
)
def uls_loss(self, params, loss_args):
r"""
Method to compute the Unweighted Least Squares fitting function. The optimizer calls
this method after each iteration with updated params to compute the new loss.
The fitting function for ML is:
.. math:: F_{ULS} = tr[(S - \Sigma(\theta))^2]
Parameters
----------
params: dict
params contain all the variables which are updated in each iteration of the
optimization.
loss_args: dict
loss_args contain all the variable which are not updated in each iteration but
are required to compute the loss.
Returns
-------
torch.tensor: The loss value for the given params and loss_args
"""
S = loss_args["S"]
sigma = self._get_implied_cov(params["B"], params["zeta"])
return (S - sigma).pow(2).trace()
def gls_loss(self, params, loss_args):
r"""
Method to compute the Weighted Least Squares fitting function. The optimizer calls
this method after each iteration with updated params to compute the new loss.
The fitting function for ML is:
.. math:: F_{ULS} = tr \{ [(S - \Sigma(\theta)) W^{-1}]^2 \}
Parameters
----------
params: dict
params contain all the variables which are updated in each iteration of the
optimization.
loss_args: dict
loss_args contain all the variable which are not updated in each iteration but
are required to compute the loss.
Returns
-------
torch.tensor: The loss value for the given params and loss_args
"""
S = loss_args["S"]
W_inv = pinverse(loss_args["W"])
sigma = self._get_implied_cov(params["B"], params["zeta"])
return ((S - sigma) @ W_inv).pow(2).trace()
def get_init_values(self, data, method):
"""
Computes the starting values for the optimizer.
Reference
---------
.. [1] Table 4C.1: Bollen, K. (2014). Structural Equations with Latent Variables.
New York, NY: John Wiley & Sons.
"""
# Initialize all the values even if the edge doesn't exist, masks would take care of that.
a = 0.4
scaling_vars = self.model.to_SEMGraph().get_scaling_indicators()
eta, m = self.model.eta, len(self.model.eta)
if method == "random":
B = np.random.rand(m, m)
zeta = np.random.rand(m, m)
elif method == "std":
# Add observed vars to `scaling_vars to point to itself. Trick to keep code short.
for observed_var in self.model.y:
scaling_vars[observed_var] = observed_var
B = np.random.rand(m, m)
for i in range(m):
for j in range(m):
if scaling_vars[eta[i]] == eta[j]:
B[i, j] = 1.0
elif i != j:
B[i, j] = a * (
data.loc[:, scaling_vars[eta[i]]].std()
/ data.loc[:, scaling_vars[eta[j]]].std()
)
zeta = np.random.rand(m, m)
for i in range(m):
zeta[i, i] = a * ((data.loc[:, scaling_vars[eta[i]]].std()) ** 2)
for i in range(m):
for j in range(m):
zeta[i, j] = zeta[j, i] = a * np.sqrt(zeta[i, i] * zeta[j, j])
elif method.lower() == "iv":
raise NotImplementedError("IV initialization not supported yet.")
return B, zeta
def fit(
self,
data,
method,
opt="adam",
init_values="random",
exit_delta=1e-4,
max_iter=1000,
**kwargs,
):
"""
Estimate the parameters of the model from the data.
Parameters
----------
data: pandas DataFrame or pgmpy.data.Data instance
The data from which to estimate the parameters of the model.
method: str ("ml"|"uls"|"gls"|"2sls")
The fitting function to use.
ML : Maximum Likelihood
ULS: Unweighted Least Squares
GLS: Generalized Least Squares
2sls: 2-SLS estimator
init_values: str or dict
Options for str: random | std | iv
dict: dictionary with keys `B` and `zeta`.
**kwargs: dict
Extra parameters required in case of some estimators.
GLS:
W: np.array (n x n) where n is the number of observe variables.
2sls:
x:
y:
Returns
-------
pgmpy.model.SEM instance: Instance of the model with estimated parameters
References
----------
.. [1] Bollen, K. A. (2010). Structural equations with latent variables. New York: Wiley.
"""
# Check if given arguements are valid
if not isinstance(data, pd.DataFrame):
raise ValueError(f"data must be a pandas DataFrame. Got type: {type(data)}")
if not sorted(data.columns) == sorted(self.model.y):
raise ValueError(
f"The column names data do not match the variables in the model. Expected: {sorted(self.model.observed)}. Got: {sorted(data.columns)}"
)
# Initialize the values of parameters as tensors.
if isinstance(init_values, dict):
B_init, zeta_init = init_values["B"], init_values["zeta"]
else:
B_init, zeta_init = self.get_init_values(data, method=init_values.lower())
B = torch.tensor(B_init, device=device, dtype=dtype, requires_grad=True)
zeta = torch.tensor(zeta_init, device=device, dtype=dtype, requires_grad=True)
# Compute the covariance of the data
variable_order = self.model.y
S = data.cov().reindex(variable_order, axis=1).reindex(variable_order, axis=0)
S = torch.tensor(S.values, device=device, dtype=dtype, requires_grad=False)
# Optimize the parameters
if method.lower() == "ml":
params = optimize(
self.ml_loss,
params={"B": B, "zeta": zeta},
loss_args={"S": S},
opt=opt,
exit_delta=exit_delta,
max_iter=max_iter,
)
elif method.lower() == "uls":
params = optimize(
self.uls_loss,
params={"B": B, "zeta": zeta},
loss_args={"S": S},
opt=opt,
exit_delta=exit_delta,
max_iter=max_iter,
)
elif method.lower() == "gls":
W = torch.tensor(
kwargs["W"], device=device, dtype=dtype, requires_grad=False
)
params = optimize(
self.gls_loss,
params={"B": B, "zeta": zeta},
loss_args={"S": S, "W": W},
opt=opt,
exit_delta=exit_delta,
max_iter=max_iter,
)
elif method.lower() == "2sls" or method.lower() == "2-sls":
raise NotImplementedError("2-SLS is not implemented yet")
B = params["B"] * self.B_mask + self.B_fixed_mask
zeta = params["zeta"] * self.zeta_mask + self.zeta_fixed_mask
# Compute goodness of fit statistics.
N = data.shape[0]
sample_cov = S.detach().numpy()
sigma_hat = self._get_implied_cov(B, zeta).detach().numpy()
residual = sample_cov - sigma_hat
norm_residual = np.zeros(residual.shape)
for i in range(norm_residual.shape[0]):
for j in range(norm_residual.shape[1]):
norm_residual[i, j] = (sample_cov[i, j] - sigma_hat[i, j]) / np.sqrt(
((sigma_hat[i, i] * sigma_hat[j, j]) + (sigma_hat[i, j] ** 2)) / N
)
# Compute chi-square value.
likelihood_ratio = -(N - 1) * (
np.log(np.linalg.det(sigma_hat))
+ (np.linalg.inv(sigma_hat) @ S).trace()
- np.log(np.linalg.det(S))
- S.shape[0]
)
if method.lower() == "ml":
error = self.ml_loss(params, loss_args={"S": S})
elif method.lower() == "uls":
error = self.uls_loss(params, loss_args={"S": S})
elif method.lower() == "gls":
error = self.gls_loss(params, loss_args={"S": S, "W": W})
chi_square = likelihood_ratio / error.detach().numpy()
free_params = self.B_mask.sum()
dof = ((S.shape[0] * (S.shape[0] + 1)) / 2) - free_params
summary = {
"Sample Size": N,
"Sample Covariance": sample_cov,
"Model Implied Covariance": sigma_hat,
"Residual": residual,
"Normalized Residual": norm_residual,
"chi_square": chi_square,
"dof": dof,
}
# Update the model with the learned params
self.model.set_params(
B=params["B"].detach().numpy(), zeta=params["B"].detach().numpy()
)
return summary
class IVEstimator:
"""
Implements Instrumental Variable (IV) based estimator.
"""
def __init__(self, model):
"""
Initialize IVEstimator object.
Parameters
----------
model: pgmpy.models.SEM
The model for which estimation need to be done.
Examples
--------
"""
self.model = model
def fit(self, X, Y, data, ivs=None, civs=None):
"""
Estimates the parameter X -> Y.
Parameters
----------
X: str
The covariate variable of the parameter being estimated.
Y: str
The predictor variable of the parameter being estimated.
data: pd.DataFrame
The data from which to learn the parameter.
ivs: List (default: None)
List of variable names which should be used as Instrumental Variables (IV).
If not specified, tries to find the IVs from the model structure, fails if
can't find either IV or Conditional IV.
civs: List of tuples (tuple form: (var, coditional_var))
List of conditional IVs to use for estimation.
If not specified, tries to find the IVs from the model structure, fails if
can't find either IV or Conditional IVs.
Examples
--------
>>> from pgmpy.estimators import IVEstimator # TODO: Finish example.
"""
if (ivs is None) and (civs is None):
ivs = self.model.get_ivs(X, Y)
civs = self.model.get_conditional_ivs(X, Y)
civs = [civ for civ in civs if civ[0] not in ivs]
reg_covars = []
for var in self.model.graph.predecessors(X):
if var in self.model.observed:
reg_covars.append(var)
# Get CIV conditionals
civ_conditionals = []
for civ in civs:
civ_conditionals.extend(civ[1])
# First stage regression.
params = (
sm.OLS(data.loc[:, X], data.loc[:, reg_covars + civ_conditionals])
.fit()
.params
)
data["X_pred"] = np.zeros(data.shape[0])
for var in reg_covars:
data.X_pred += params[var] * data.loc[:, var]
summary = sm.OLS(
data.loc[:, Y], data.loc[:, ["X_pred"] + civ_conditionals]
).fit()
return summary.params["X_pred"], summary
|
from homeassistant import data_entry_flow
from homeassistant.components.hangouts import config_flow
from homeassistant.const import CONF_EMAIL, CONF_PASSWORD
from tests.async_mock import patch
EMAIL = "[email protected]"
PASSWORD = "1232456"
async def test_flow_works(hass, aioclient_mock):
"""Test config flow without 2fa."""
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch("homeassistant.components.hangouts.config_flow.get_auth"):
result = await flow.async_step_user(
{CONF_EMAIL: EMAIL, CONF_PASSWORD: PASSWORD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == EMAIL
async def test_flow_works_with_authcode(hass, aioclient_mock):
"""Test config flow without 2fa."""
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch("homeassistant.components.hangouts.config_flow.get_auth"):
result = await flow.async_step_user(
{
CONF_EMAIL: EMAIL,
CONF_PASSWORD: PASSWORD,
"authorization_code": "c29tZXJhbmRvbXN0cmluZw==",
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == EMAIL
async def test_flow_works_with_2fa(hass, aioclient_mock):
"""Test config flow with 2fa."""
from homeassistant.components.hangouts.hangups_utils import Google2FAError
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch(
"homeassistant.components.hangouts.config_flow.get_auth",
side_effect=Google2FAError,
):
result = await flow.async_step_user(
{CONF_EMAIL: EMAIL, CONF_PASSWORD: PASSWORD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "2fa"
with patch("homeassistant.components.hangouts.config_flow.get_auth"):
result = await flow.async_step_2fa({"2fa": 123456})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == EMAIL
async def test_flow_with_unknown_2fa(hass, aioclient_mock):
"""Test config flow with invalid 2fa method."""
from homeassistant.components.hangouts.hangups_utils import GoogleAuthError
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch(
"homeassistant.components.hangouts.config_flow.get_auth",
side_effect=GoogleAuthError("Unknown verification code input"),
):
result = await flow.async_step_user(
{CONF_EMAIL: EMAIL, CONF_PASSWORD: PASSWORD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_2fa_method"
async def test_flow_invalid_login(hass, aioclient_mock):
"""Test config flow with invalid 2fa method."""
from homeassistant.components.hangouts.hangups_utils import GoogleAuthError
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch(
"homeassistant.components.hangouts.config_flow.get_auth",
side_effect=GoogleAuthError,
):
result = await flow.async_step_user(
{CONF_EMAIL: EMAIL, CONF_PASSWORD: PASSWORD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_login"
async def test_flow_invalid_2fa(hass, aioclient_mock):
"""Test config flow with 2fa."""
from homeassistant.components.hangouts.hangups_utils import Google2FAError
flow = config_flow.HangoutsFlowHandler()
flow.hass = hass
with patch(
"homeassistant.components.hangouts.config_flow.get_auth",
side_effect=Google2FAError,
):
result = await flow.async_step_user(
{CONF_EMAIL: EMAIL, CONF_PASSWORD: PASSWORD}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "2fa"
with patch(
"homeassistant.components.hangouts.config_flow.get_auth",
side_effect=Google2FAError,
):
result = await flow.async_step_2fa({"2fa": 123456})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"]["base"] == "invalid_2fa"
|
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_OCCUPANCY,
BinarySensorEntity,
)
from . import SleepIQSensor
from .const import DOMAIN, IS_IN_BED, SENSOR_TYPES, SIDES
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the SleepIQ sensors."""
if discovery_info is None:
return
data = hass.data[DOMAIN]
data.update()
dev = []
for bed_id, bed in data.beds.items():
for side in SIDES:
if getattr(bed, side) is not None:
dev.append(IsInBedBinarySensor(data, bed_id, side))
add_entities(dev)
class IsInBedBinarySensor(SleepIQSensor, BinarySensorEntity):
"""Implementation of a SleepIQ presence sensor."""
def __init__(self, sleepiq_data, bed_id, side):
"""Initialize the sensor."""
super().__init__(sleepiq_data, bed_id, side)
self._state = None
self._name = SENSOR_TYPES[IS_IN_BED]
self.update()
@property
def is_on(self):
"""Return the status of the sensor."""
return self._state is True
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_OCCUPANCY
def update(self):
"""Get the latest data from SleepIQ and updates the states."""
super().update()
self._state = self.side.is_in_bed
|
LOREM_IPSUM_WORDS = """\
a ac accumsan ad adipiscing aenean aliquam aliquet amet ante aptent arcu at
auctor augue bibendum blandit class commodo condimentum congue consectetuer
consequat conubia convallis cras cubilia cum curabitur curae cursus dapibus
diam dictum dictumst dignissim dis dolor donec dui duis egestas eget eleifend
elementum elit enim erat eros est et etiam eu euismod facilisi facilisis fames
faucibus felis fermentum feugiat fringilla fusce gravida habitant habitasse hac
hendrerit hymenaeos iaculis id imperdiet in inceptos integer interdum ipsum
justo lacinia lacus laoreet lectus leo libero ligula litora lobortis lorem
luctus maecenas magna magnis malesuada massa mattis mauris metus mi molestie
mollis montes morbi mus nam nascetur natoque nec neque netus nibh nisi nisl non
nonummy nostra nulla nullam nunc odio orci ornare parturient pede pellentesque
penatibus per pharetra phasellus placerat platea porta porttitor posuere
potenti praesent pretium primis proin pulvinar purus quam quis quisque rhoncus
ridiculus risus rutrum sagittis sapien scelerisque sed sem semper senectus sit
sociis sociosqu sodales sollicitudin suscipit suspendisse taciti tellus tempor
tempus tincidunt torquent tortor tristique turpis ullamcorper ultrices
ultricies urna ut varius vehicula vel velit venenatis vestibulum vitae vivamus
viverra volutpat vulputate"""
|
import os
from k8s_itests.utils import cmd
from k8s_itests.utils import init_all
terminate_on_exit = []
def setup_module(module):
init_all()
class TestSetupKubernetesJobs:
def test_autoscaling(self):
cmd("kubectl get hpa -n paasta", False)
def test_paasta_status(self):
instance = "autoscaling"
service = "compute-infra-test-service"
cmd(
f"python -m paasta_tools.cli.cli status -c {os.environ['KIND_CLUSTER']} -s {service} -i {instance} -v",
False,
)
|
import asyncio
import logging
from aiopylgtv import PyLGTVCmdException, PyLGTVPairException
from websockets.exceptions import ConnectionClosed
from homeassistant.components.notify import ATTR_DATA, BaseNotificationService
from homeassistant.const import CONF_HOST, CONF_ICON
from . import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_get_service(hass, config, discovery_info=None):
"""Return the notify service."""
if discovery_info is None:
return None
host = discovery_info.get(CONF_HOST)
icon_path = discovery_info.get(CONF_ICON)
client = hass.data[DOMAIN][host]["client"]
svc = LgWebOSNotificationService(client, icon_path)
return svc
class LgWebOSNotificationService(BaseNotificationService):
"""Implement the notification service for LG WebOS TV."""
def __init__(self, client, icon_path):
"""Initialize the service."""
self._client = client
self._icon_path = icon_path
async def async_send_message(self, message="", **kwargs):
"""Send a message to the tv."""
try:
if not self._client.is_connected():
await self._client.connect()
data = kwargs.get(ATTR_DATA)
icon_path = (
data.get(CONF_ICON, self._icon_path) if data else self._icon_path
)
await self._client.send_message(message, icon_path=icon_path)
except PyLGTVPairException:
_LOGGER.error("Pairing with TV failed")
except FileNotFoundError:
_LOGGER.error("Icon %s not found", icon_path)
except (
OSError,
ConnectionClosed,
ConnectionRefusedError,
asyncio.TimeoutError,
asyncio.CancelledError,
PyLGTVCmdException,
):
_LOGGER.error("TV unreachable")
|
from flask import Blueprint
from lemur.database import db
from lemur.extensions import sentry
mod = Blueprint("healthCheck", __name__)
@mod.route("/healthcheck")
def health():
try:
if healthcheck(db):
return "ok"
except Exception:
sentry.captureException()
return "db check failed"
def healthcheck(db):
with db.engine.connect() as connection:
connection.execute("SELECT 1;")
return True
|
from typing import Callable, Optional
from homeassistant.helpers.entity import Entity
from .account import StarlineAccount, StarlineDevice
class StarlineEntity(Entity):
"""StarLine base entity class."""
def __init__(
self, account: StarlineAccount, device: StarlineDevice, key: str, name: str
):
"""Initialize StarLine entity."""
self._account = account
self._device = device
self._key = key
self._name = name
self._unsubscribe_api: Optional[Callable] = None
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def available(self):
"""Return True if entity is available."""
return self._account.api.available
@property
def unique_id(self):
"""Return the unique ID of the entity."""
return f"starline-{self._key}-{self._device.device_id}"
@property
def name(self):
"""Return the name of the entity."""
return f"{self._device.name} {self._name}"
@property
def device_info(self):
"""Return the device info."""
return self._account.device_info(self._device)
def update(self):
"""Read new state data."""
self.schedule_update_ha_state()
async def async_added_to_hass(self):
"""Call when entity about to be added to Home Assistant."""
await super().async_added_to_hass()
self._unsubscribe_api = self._account.api.add_update_listener(self.update)
async def async_will_remove_from_hass(self):
"""Call when entity is being removed from Home Assistant."""
await super().async_will_remove_from_hass()
if self._unsubscribe_api is not None:
self._unsubscribe_api()
self._unsubscribe_api = None
|
import os
import pytest
import nikola.plugins.command.init
from nikola import __main__
from .helper import append_config, cd
from .test_empty_build import ( # NOQA
test_archive_exists,
test_avoid_double_slash_in_rss,
test_check_files,
test_check_links,
test_index_in_sitemap,
)
from .test_page_index_normal_urls import create_pages
from .test_page_index_normal_urls import ( # NOQA
test_page_index,
test_page_index_in_subdir,
test_page_index_content_in_pages,
test_page_index_content_in_subdir1,
test_page_index_content_in_subdir2,
test_page_index_content_in_subdir3,
)
@pytest.fixture(scope="module")
def output_path_func():
def output_path(dir, name):
"""Make a file path to the output."""
return os.path.join(dir, name + "/index.html")
return output_path
@pytest.fixture(scope="module")
def build(target_dir):
"""Build the site."""
init_command = nikola.plugins.command.init.CommandInit()
init_command.create_empty_site(target_dir)
init_command.create_configuration(target_dir)
create_pages(target_dir)
append_config(
target_dir,
"""
PAGE_INDEX = True
PRETTY_URLS = True
PAGES = PAGES + (('pages/*.php', 'pages', 'page.tmpl'),)
""",
)
with cd(target_dir):
__main__.main(["build"])
|
import json
import sys
import matplotlib.collections as mplc
import matplotlib.patches as mpl_patches
import matplotlib.pyplot as plt
import numpy as np
class DraggableXRange:
def __init__(self, figure, updater):
self.figure = figure
self.span = None
self.start = None
self.end = None
self.background = None
self.updater = updater
def connect(self):
'connect to all the events we need'
self.cidpress = self.figure.canvas.mpl_connect(
'button_press_event', self.on_press)
self.cidrelease = self.figure.canvas.mpl_connect(
'button_release_event', self.on_release)
self.cidmotion = self.figure.canvas.mpl_connect(
'motion_notify_event', self.on_motion)
def on_press(self, event):
'on button press we will see if the mouse is over us and store some data'
if event.button != 3:
# Only continue for right mouse button
return
if self.span is not None:
return
self.start = event.xdata
self.end = event.xdata
self.span = plt.axvspan(self.start, self.end, color='blue', alpha=0.5)
# draw everything but the selected rectangle and store the pixel buffer
canvas = self.figure.canvas
axes = self.span.axes
canvas.draw()
self.background = canvas.copy_from_bbox(self.span.axes.bbox)
# now redraw just the rectangle
axes.draw_artist(self.span)
# and blit just the redrawn area
canvas.blit(axes.bbox)
self.updater.update(self.start, self.end)
def on_motion(self, event):
'on motion we will move the rect if the mouse is over us'
if self.span is None:
return
self.span.remove()
self.end = event.xdata
self.span = plt.axvspan(self.start, self.end, color='blue', alpha=0.5)
canvas = self.figure.canvas
axes = self.span.axes
# restore the background region
canvas.restore_region(self.background)
# Save the new background
self.background = canvas.copy_from_bbox(self.span.axes.bbox)
# redraw just the current rectangle
axes.draw_artist(self.span)
# blit just the redrawn area
canvas.blit(axes.bbox)
self.updater.update(self.start, self.end)
def on_release(self, event):
'on release we reset the press data'
if event.button != 3:
# Only continue for right mouse button
return
if self.span is None:
return
self.span.remove()
self.start = None
self.end = None
self.span = None
self.background = None
# redraw the full figure
self.figure.canvas.draw()
self.updater.update(self.start, self.end)
def disconnect(self):
'disconnect all the stored connection ids'
self.figure.canvas.mpl_disconnect(self.cidpress)
self.figure.canvas.mpl_disconnect(self.cidrelease)
self.figure.canvas.mpl_disconnect(self.cidmotion)
class SelectionUpdate:
def __init__(self, figure, ax, start_times, latencies):
self.text = None
self.figure = figure
self.ax = ax
self.start_times = start_times
self.latencies = latencies
def update(self, start, end):
if self.text is not None:
axes = self.text.axes
self.text.remove()
self.text = None
self.figure.canvas.blit(axes.bbox)
if start is None:
assert end is None
return
start, end = min(start, end), max(start, end)
if start == end:
return
active_start_indexes = []
for start_time in self.start_times:
for i in xrange(len(start_time)):
if start_time[i] >= start:
active_start_indexes.append(i)
break
active_stop_indexes = []
for start_time, latency in zip(self.start_times, self.latencies):
for i in xrange(len(start_time) - 1, -1, -1):
if start_time[i] + latency[i] <= end:
active_stop_indexes.append(i + 1)
break
active_latencies = [
self.latencies[i][active_start_indexes[i]:active_stop_indexes[i]]
for i in range(len(self.latencies))]
all_active_latencies = np.concatenate(active_latencies)
qps = len(all_active_latencies) / (end - start)
latency_min = min(all_active_latencies)
latency_max = max(all_active_latencies)
latency_avg = sum(all_active_latencies) / len(all_active_latencies)
latency_stddev = np.std(all_active_latencies)
text_str = ('Duration: %s\nQPS: %s\nlatency min: %s\nlatency max: %s\n'
'latency avg: %s\nlatency stddev: %s'
% (end - start, qps, latency_min, latency_max,
latency_avg, latency_stddev))
# place a text box in upper left in axes coords
props = dict(boxstyle='round', facecolor='wheat', alpha=0.5)
self.text = self.ax.text(0.05, 0.95, text_str,
transform=self.ax.transAxes, fontsize=14,
verticalalignment='top', bbox=props)
# redraw just the text
self.text.axes.draw_artist(self.text)
# blit just the redrawn area
self.figure.canvas.blit(self.text.axes.bbox)
def GenerateObjectTimeline(file_name, start_times, latencies):
print("Generating object timeline")
assert len(start_times) == len(latencies)
rects = []
for i, worker_times in enumerate(zip(start_times, latencies)):
for j, (start_time, latency) in enumerate(np.vstack(worker_times).T):
rect = mpl_patches.Rectangle((start_time, i + 0.5), latency, 1.0,
color=(0.5 * (j % 2) + 0.5, 0, 0),
linewidth=0)
rects.append(rect)
pc = mplc.PatchCollection(rects, match_original=True)
fig, ax = plt.subplots(figsize=(30, 5))
ax.add_collection(pc)
ax.autoscale()
ax.margins(0.1)
print("Saving figure as %s" % file_name)
plt.savefig(file_name, bbox_inches='tight', dpi=1200)
print("Figured saved. Rendering figure...")
selection = DraggableXRange(fig, SelectionUpdate(fig, ax, start_times,
latencies))
selection.connect()
plt.show()
selection.disconnect()
def LoadWorkerOutput(output):
"""Load output from worker processes to our internal format.
Args:
output: list of strings. The stdouts of all worker processes.
Returns:
A tuple of start_time, latency, size. Each of these is a list of
numpy arrays, one array per worker process. start_time[i],
latency[i], and size[i] together form a table giving the start
time, latency, and size (bytes transmitted or received) of all
send/receive operations for worker i.
start_time holds POSIX timestamps, stored as np.float64. latency
holds times in seconds, stored as np.float64. size holds sizes in
bytes, stored as np.int64.
Example:
start_time[i] latency[i] size[i]
------------- ---------- -------
0.0 0.5 100
1.0 0.7 200
2.3 0.3 100
Raises:
AssertionError, if an individual worker doesn't have the same number of
start_times, latencies, or sizes.
"""
start_times = []
latencies = []
sizes = []
for worker_out in output:
json_out = json.loads(worker_out)
for stream in json_out:
assert len(stream['start_times']) == len(stream['latencies'])
assert len(stream['latencies']) == len(stream['sizes'])
start_times.append(np.asarray(stream['start_times'], dtype=np.float64))
latencies.append(np.asarray(stream['latencies'], dtype=np.float64))
sizes.append(np.asarray(stream['sizes'], dtype=np.int64))
return start_times, latencies, sizes
def main():
worker_output = None
print("Reading worker output")
with open(sys.argv[1], 'r') as worker_out_file:
worker_output = json.loads(worker_out_file.read())
print("Parsing worker output")
start_times, latencies, _ = LoadWorkerOutput(worker_output)
GenerateObjectTimeline(sys.argv[2], start_times, latencies)
########################################
if __name__ == '__main__':
main()
|
from .encoding import default_encode
import sys
def emergency_dump_state(state, open_file=open, dump=None, stderr=None):
"""Dump message state to stdout or file."""
from pprint import pformat
from tempfile import mktemp
stderr = sys.stderr if stderr is None else stderr
if dump is None:
import pickle
dump = pickle.dump
persist = mktemp()
print(f'EMERGENCY DUMP STATE TO FILE -> {persist} <-', # noqa
file=stderr)
fh = open_file(persist, 'w')
try:
try:
dump(state, fh, protocol=0)
except Exception as exc:
print( # noqa
f'Cannot pickle state: {exc!r}. Fallback to pformat.',
file=stderr,
)
fh.write(default_encode(pformat(state)))
finally:
fh.flush()
fh.close()
return persist
|
import base64
import pytest
from urllib.request import urlopen, Request
from urllib.parse import urlencode
from urllib.error import HTTPError
import vcr
import json
from assertions import assert_cassette_has_one_response, assert_is_json
def _request_with_auth(url, username, password):
request = Request(url)
base64string = base64.b64encode(username.encode("ascii") + b":" + password.encode("ascii"))
request.add_header(b"Authorization", b"Basic " + base64string)
return urlopen(request)
def _find_header(cassette, header):
return any(header in request.headers for request in cassette.requests)
def test_filter_basic_auth(tmpdir, httpbin):
url = httpbin.url + "/basic-auth/user/passwd"
cass_file = str(tmpdir.join("basic_auth_filter.yaml"))
my_vcr = vcr.VCR(match_on=["uri", "method", "headers"])
# 2 requests, one with auth failure and one with auth success
with my_vcr.use_cassette(cass_file, filter_headers=["authorization"]):
with pytest.raises(HTTPError):
resp = _request_with_auth(url, "user", "wrongpasswd")
assert resp.getcode() == 401
resp = _request_with_auth(url, "user", "passwd")
assert resp.getcode() == 200
# make same 2 requests, this time both served from cassette.
with my_vcr.use_cassette(cass_file, filter_headers=["authorization"]) as cass:
with pytest.raises(HTTPError):
resp = _request_with_auth(url, "user", "wrongpasswd")
assert resp.getcode() == 401
resp = _request_with_auth(url, "user", "passwd")
assert resp.getcode() == 200
# authorization header should not have been recorded
assert not _find_header(cass, "authorization")
assert len(cass) == 2
def test_filter_querystring(tmpdir, httpbin):
url = httpbin.url + "/?foo=bar"
cass_file = str(tmpdir.join("filter_qs.yaml"))
with vcr.use_cassette(cass_file, filter_query_parameters=["foo"]):
urlopen(url)
with vcr.use_cassette(cass_file, filter_query_parameters=["foo"]) as cass:
urlopen(url)
assert "foo" not in cass.requests[0].url
def test_filter_post_data(tmpdir, httpbin):
url = httpbin.url + "/post"
data = urlencode({"id": "secret", "foo": "bar"}).encode("utf-8")
cass_file = str(tmpdir.join("filter_pd.yaml"))
with vcr.use_cassette(cass_file, filter_post_data_parameters=["id"]):
urlopen(url, data)
with vcr.use_cassette(cass_file, filter_post_data_parameters=["id"]) as cass:
assert b"id=secret" not in cass.requests[0].body
def test_filter_json_post_data(tmpdir, httpbin):
data = json.dumps({"id": "secret", "foo": "bar"}).encode("utf-8")
request = Request(httpbin.url + "/post", data=data)
request.add_header("Content-Type", "application/json")
cass_file = str(tmpdir.join("filter_jpd.yaml"))
with vcr.use_cassette(cass_file, filter_post_data_parameters=["id"]):
urlopen(request)
with vcr.use_cassette(cass_file, filter_post_data_parameters=["id"]) as cass:
assert b'"id": "secret"' not in cass.requests[0].body
def test_filter_callback(tmpdir, httpbin):
url = httpbin.url + "/get"
cass_file = str(tmpdir.join("basic_auth_filter.yaml"))
def before_record_cb(request):
if request.path != "/get":
return request
# Test the legacy keyword.
my_vcr = vcr.VCR(before_record=before_record_cb)
with my_vcr.use_cassette(cass_file, filter_headers=["authorization"]) as cass:
urlopen(url)
assert len(cass) == 0
my_vcr = vcr.VCR(before_record_request=before_record_cb)
with my_vcr.use_cassette(cass_file, filter_headers=["authorization"]) as cass:
urlopen(url)
assert len(cass) == 0
def test_decompress_gzip(tmpdir, httpbin):
url = httpbin.url + "/gzip"
request = Request(url, headers={"Accept-Encoding": ["gzip, deflate"]})
cass_file = str(tmpdir.join("gzip_response.yaml"))
with vcr.use_cassette(cass_file, decode_compressed_response=True):
urlopen(request)
with vcr.use_cassette(cass_file) as cass:
decoded_response = urlopen(url).read()
assert_cassette_has_one_response(cass)
assert_is_json(decoded_response)
def test_decompress_deflate(tmpdir, httpbin):
url = httpbin.url + "/deflate"
request = Request(url, headers={"Accept-Encoding": ["gzip, deflate"]})
cass_file = str(tmpdir.join("deflate_response.yaml"))
with vcr.use_cassette(cass_file, decode_compressed_response=True):
urlopen(request)
with vcr.use_cassette(cass_file) as cass:
decoded_response = urlopen(url).read()
assert_cassette_has_one_response(cass)
assert_is_json(decoded_response)
def test_decompress_regular(tmpdir, httpbin):
"""Test that it doesn't try to decompress content that isn't compressed"""
url = httpbin.url + "/get"
cass_file = str(tmpdir.join("noncompressed_response.yaml"))
with vcr.use_cassette(cass_file, decode_compressed_response=True):
urlopen(url)
with vcr.use_cassette(cass_file) as cass:
resp = urlopen(url).read()
assert_cassette_has_one_response(cass)
assert_is_json(resp)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from absl import logging
from compare_gan.metrics import eval_task
import tensorflow as tf
import tensorflow_gan as tfgan
class InceptionScoreTask(eval_task.EvalTask):
"""Task that computes inception score for the generated images."""
_LABEL = "inception_score"
def run_after_session(self, fake_dset, real_dest):
del real_dest
logging.info("Computing inception score.")
with tf.Graph().as_default():
fake_logits = tf.convert_to_tensor(fake_dset.logits)
inception_score = tfgan.eval.classifier_score_from_logits(fake_logits)
with self._create_session() as sess:
inception_score = sess.run(inception_score)
logging.info("Inception score: %.3f", inception_score)
return {self._LABEL: inception_score}
|
import argparse
import logging
import sys
from typing import Sequence
from paasta_tools.kubernetes_tools import CustomResourceDefinition
from paasta_tools.kubernetes_tools import delete_custom_resource
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.kubernetes_tools import list_custom_resources
from paasta_tools.kubernetes_tools import load_custom_resource_definitions
from paasta_tools.kubernetes_tools import paasta_prefixed
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import load_all_configs
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Cleanup custom_resources.")
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose", default=False
)
parser.add_argument(
"-c", "--cluster", default=None, help="Cluster to cleanup CRs for"
)
args = parser.parse_args()
return args
def main() -> None:
args = parse_args()
soa_dir = args.soa_dir
if args.verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.INFO)
kube_client = KubeClient()
system_paasta_config = load_system_paasta_config()
cluster = args.cluster or system_paasta_config.get_cluster()
custom_resource_definitions = load_custom_resource_definitions(system_paasta_config)
cleanup_kube_succeeded = cleanup_all_custom_resources(
kube_client=kube_client,
soa_dir=soa_dir,
cluster=cluster,
custom_resource_definitions=custom_resource_definitions,
)
sys.exit(0 if cleanup_kube_succeeded else 1)
def cleanup_all_custom_resources(
kube_client: KubeClient,
soa_dir: str,
cluster: str,
custom_resource_definitions: Sequence[CustomResourceDefinition],
) -> bool:
cluster_crds = {
crd.spec.names.kind
for crd in kube_client.apiextensions.list_custom_resource_definition(
label_selector=paasta_prefixed("service")
).items
}
log.debug(f"CRDs found: {cluster_crds}")
results = []
for crd in custom_resource_definitions:
if crd.kube_kind.singular not in cluster_crds:
# TODO: kube_kind.singular seems to correspond to `crd.names.kind`
# and not `crd.names.singular`
log.warning(f"CRD {crd.kube_kind.singular} " f"not found in {cluster}")
continue
config_dicts = load_all_configs(
cluster=cluster, file_prefix=crd.file_prefix, soa_dir=soa_dir
)
if not config_dicts:
continue
crs = list_custom_resources(
kube_client=kube_client,
kind=crd.kube_kind,
version=crd.version,
group=crd.group,
)
for cr in crs:
service = config_dicts.get(cr.service)
if service is not None:
instance = service.get(cr.instance)
if instance is not None:
continue
result = False
try:
delete_custom_resource(
kube_client=kube_client,
name=cr.name,
namespace=cr.namespace,
plural=crd.kube_kind.plural,
version=crd.version,
group=crd.group,
)
result = True
except Exception:
log.exception("Error while deleting CR {cr.name}")
results.append(result)
return all(results) if results else True
if __name__ == "__main__":
main()
|
import mock
import pytest
from paasta_tools import setup_kubernetes_cr
from paasta_tools.kubernetes_tools import KubeCustomResource
from paasta_tools.utils import SystemPaastaConfig
def test_main():
with mock.patch(
"paasta_tools.setup_kubernetes_cr.KubeClient", autospec=True
), mock.patch(
"paasta_tools.setup_kubernetes_cr.setup_all_custom_resources", autospec=True
) as mock_setup:
mock_setup.return_value = True
with pytest.raises(SystemExit) as e:
setup_kubernetes_cr.main()
assert e.value.code == 0
mock_setup.return_value = False
with pytest.raises(SystemExit) as e:
setup_kubernetes_cr.main()
assert e.value.code == 1
def test_setup_all_custom_resources():
with mock.patch(
"paasta_tools.setup_kubernetes_cr.ensure_namespace", autospec=True
), mock.patch(
"paasta_tools.setup_kubernetes_cr.load_all_configs", autospec=True
), mock.patch(
"paasta_tools.setup_kubernetes_cr.setup_custom_resources", autospec=True
) as mock_setup, mock.patch(
"paasta_tools.setup_kubernetes_cr.load_custom_resource_definitions",
autospec=True,
) as mock_load_custom_resources:
mock_system_config = mock.Mock(
get_cluster=mock.Mock(return_value="westeros-prod")
)
# if some CRs setup okay should return True
mock_setup.side_effect = [True, False]
mock_client = mock.Mock()
flink_crd = mock.Mock()
flink_crd.spec.names = mock.Mock(plural="flinkclusters", kind="FlinkCluster")
cassandra_crd = mock.Mock()
cassandra_crd.spec.names = mock.Mock(
plural="cassandraclusters", kind="CassandraCluster"
)
mock_client.apiextensions.list_custom_resource_definition.return_value = mock.Mock(
items=[flink_crd, cassandra_crd]
)
custom_resource_definitions = [
mock.Mock(
kube_kind=mock.Mock(plural="flinkclusters", singular="FlinkCluster")
),
mock.Mock(
kube_kind=mock.Mock(
plural="cassandraclusters", singular="CassandraCluster"
)
),
]
assert setup_kubernetes_cr.setup_all_custom_resources(
mock_client,
"/nail/soa",
mock_system_config,
custom_resource_definitions=custom_resource_definitions,
)
mock_load_custom_resources.return_value = [
mock.Mock(plural="flinks"),
mock.Mock(plural="cassandraclusters"),
]
mock_setup.side_effect = [True, True]
mock_system_config = mock.Mock(
get_cluster=mock.Mock(return_value="westeros-prod")
)
# if all CRs setup okay should return True
assert setup_kubernetes_cr.setup_all_custom_resources(
mock_client,
"/nail/soa",
mock_system_config,
custom_resource_definitions=custom_resource_definitions,
)
mock_load_custom_resources.return_value = []
mock_system_config = mock.Mock(
get_cluster=mock.Mock(return_value="westeros-prod")
)
assert setup_kubernetes_cr.setup_all_custom_resources(
mock_client, "/nail/soa", mock_system_config, custom_resource_definitions=[]
)
mock_setup.side_effect = []
# if no CRs setup should return True
assert setup_kubernetes_cr.setup_all_custom_resources(
mock_client, "/nail/soa", mock_system_config, custom_resource_definitions=[]
)
mock_setup.side_effect = [False, False]
# if all CRs setup fail should return False
assert setup_kubernetes_cr.setup_all_custom_resources(
mock_client, "/nail/soa", mock_system_config, custom_resource_definitions=[]
)
def test_load_all_configs():
with mock.patch(
"paasta_tools.kubernetes_tools.service_configuration_lib.read_extra_service_information",
autospec=True,
) as mock_read_info, mock.patch("os.listdir", autospec=True) as mock_oslist:
mock_oslist.return_value = ["kurupt", "mc"]
ret = setup_kubernetes_cr.load_all_configs(
cluster="westeros-prod", file_prefix="thing", soa_dir="/nail/soa"
)
mock_read_info.assert_has_calls(
[
mock.call("mc", "thing-westeros-prod", soa_dir="/nail/soa"),
mock.call("kurupt", "thing-westeros-prod", soa_dir="/nail/soa"),
],
any_order=True,
)
assert "kurupt" in ret.keys()
assert "mc" in ret.keys()
def test_setup_custom_resources():
with mock.patch(
"paasta_tools.setup_kubernetes_cr.list_custom_resources", autospec=True
) as mock_list_cr, mock.patch(
"paasta_tools.setup_kubernetes_cr.reconcile_kubernetes_resource", autospec=True
) as mock_reconcile_kubernetes_resource:
mock_client = mock.Mock()
mock_kind = mock.Mock()
mock_crd = mock.Mock()
assert setup_kubernetes_cr.setup_custom_resources(
kube_client=mock_client,
kind=mock_kind,
version="v1",
config_dicts={},
group="yelp.com",
cluster="mycluster",
crd=mock_crd,
)
mock_reconcile_kubernetes_resource.side_effect = [True, False]
assert not setup_kubernetes_cr.setup_custom_resources(
kube_client=mock_client,
kind=mock_kind,
version="v1",
config_dicts={"kurupt": "something", "mc": "another"},
group="yelp.com",
cluster="mycluster",
crd=mock_crd,
)
mock_reconcile_kubernetes_resource.side_effect = [True, True]
assert setup_kubernetes_cr.setup_custom_resources(
kube_client=mock_client,
kind=mock_kind,
version="v1",
config_dicts={"kurupt": "something", "mc": "another"},
group="yelp.com",
cluster="mycluster",
crd=mock_crd,
)
mock_reconcile_kubernetes_resource.assert_has_calls(
[
mock.call(
kube_client=mock_client,
service="kurupt",
instance_configs="something",
cluster="mycluster",
instance=None,
kind=mock_kind,
custom_resources=mock_list_cr.return_value,
version="v1",
group="yelp.com",
crd=mock_crd,
),
mock.call(
kube_client=mock_client,
service="mc",
instance_configs="another",
cluster="mycluster",
instance=None,
kind=mock_kind,
custom_resources=mock_list_cr.return_value,
version="v1",
group="yelp.com",
crd=mock_crd,
),
]
)
def test_format_custom_resource():
with mock.patch(
"paasta_tools.setup_kubernetes_cr.get_config_hash", autospec=True
) as mock_get_config_hash, mock.patch(
"paasta_tools.setup_kubernetes_cr.load_system_paasta_config", autospec=True
) as mock_load_system_paasta_config:
mock_load_system_paasta_config.return_value = SystemPaastaConfig(
{"dashboard_links": {}}, ""
)
expected = {
"apiVersion": "yelp.com/v1",
"kind": "flink",
"metadata": {
"name": "kurupt--fm-radio--station",
"namespace": "paasta-flinks",
"labels": {
"yelp.com/paasta_service": "kurupt_fm",
"yelp.com/paasta_instance": "radio_station",
"yelp.com/paasta_cluster": "mycluster",
"yelp.com/paasta_config_sha": mock_get_config_hash.return_value,
"paasta.yelp.com/service": "kurupt_fm",
"paasta.yelp.com/instance": "radio_station",
"paasta.yelp.com/cluster": "mycluster",
"paasta.yelp.com/config_sha": mock_get_config_hash.return_value,
"paasta.yelp.com/git_sha": "gitsha",
},
"annotations": {
"yelp.com/desired_state": "running",
"paasta.yelp.com/desired_state": "running",
"paasta.yelp.com/dashboard_base_url": "http://flink.k8s.paasta-mycluster.yelp:31080/",
},
},
"spec": {"dummy": "conf"},
}
assert (
setup_kubernetes_cr.format_custom_resource(
instance_config={"dummy": "conf"},
service="kurupt_fm",
instance="radio_station",
cluster="mycluster",
kind="flink",
version="v1",
group="yelp.com",
namespace="paasta-flinks",
git_sha="gitsha",
)
== expected
)
def test_paasta_config_flink_dashboard_base_url():
with mock.patch(
"paasta_tools.setup_kubernetes_cr.load_system_paasta_config", autospec=True
) as mock_load_system_paasta_config:
mock_load_system_paasta_config.return_value = SystemPaastaConfig(
{
"dashboard_links": {
"mycluster": {"Flink": "http://flink.paasta-mycluster.yelp"}
}
},
"",
)
expected = "http://flink.paasta-mycluster.yelp/"
assert (
setup_kubernetes_cr.get_dashboard_base_url(
kind="flink", cluster="mycluster",
)
== expected
)
@mock.patch(
"paasta_tools.setup_kubernetes_cr.LONG_RUNNING_INSTANCE_TYPE_HANDLERS",
autospec=True,
)
def test_reconcile_kubernetes_resource(mock_LONG_RUNNING_INSTANCE_TYPE_HANDLERS):
with mock.patch(
"paasta_tools.setup_kubernetes_cr.format_custom_resource", autospec=True
) as mock_format_custom_resource, mock.patch(
"paasta_tools.setup_kubernetes_cr.create_custom_resource", autospec=True
) as mock_create_custom_resource, mock.patch(
"paasta_tools.setup_kubernetes_cr.update_custom_resource", autospec=True
) as mock_update_custom_resource:
mock_kind = mock.Mock(singular="flink", plural="flinks")
mock_custom_resources = [
KubeCustomResource(
service="kurupt",
instance="fm",
config_sha="conf123",
git_sha="git123",
kind="flink",
name="foo",
namespace="paasta-flinks",
)
]
mock_client = mock.Mock()
# no instances, do nothing
assert setup_kubernetes_cr.reconcile_kubernetes_resource(
kube_client=mock_client,
service="mc",
instance_configs={},
cluster="mycluster",
custom_resources=mock_custom_resources,
kind=mock_kind,
version="v1",
group="yelp.com",
crd=mock.Mock(),
)
assert not mock_create_custom_resource.called
assert not mock_update_custom_resource.called
# instance up to date, do nothing
mock_format_custom_resource.return_value = {
"metadata": {
"labels": {
"yelp.com/paasta_config_sha": "conf123",
"paasta.yelp.com/config_sha": "conf123",
"paasta.yelp.com/git_sha": "git123",
},
"name": "foo",
"namespace": "paasta-flinks",
}
}
assert setup_kubernetes_cr.reconcile_kubernetes_resource(
kube_client=mock_client,
service="kurupt",
instance_configs={"fm": {"some": "config"}},
cluster="cluster",
custom_resources=mock_custom_resources,
kind=mock_kind,
version="v1",
group="yelp.com",
crd=mock.Mock(),
)
assert not mock_create_custom_resource.called
assert not mock_update_custom_resource.called
# instance diff config, update
mock_format_custom_resource.return_value = {
"metadata": {
"labels": {"paasta.yelp.com/config_sha": "conf456"},
"name": "foo",
"namespace": "paasta-flinks",
}
}
assert setup_kubernetes_cr.reconcile_kubernetes_resource(
kube_client=mock_client,
service="kurupt",
instance_configs={"fm": {"some": "config"}},
cluster="mycluster",
custom_resources=mock_custom_resources,
kind=mock_kind,
version="v1",
group="yelp.com",
crd=mock.Mock(),
)
assert not mock_create_custom_resource.called
mock_update_custom_resource.assert_called_with(
kube_client=mock_client,
name="kurupt-fm",
version="v1",
kind=mock_kind,
formatted_resource=mock_format_custom_resource.return_value,
group="yelp.com",
)
# instance not exist, create
assert setup_kubernetes_cr.reconcile_kubernetes_resource(
kube_client=mock_client,
service="mc",
instance_configs={"grindah": {"some": "conf"}},
cluster="mycluster",
custom_resources=mock_custom_resources,
kind=mock_kind,
version="v1",
group="yelp.com",
crd=mock.Mock(),
)
mock_create_custom_resource.assert_called_with(
kube_client=mock_client,
version="v1",
kind=mock_kind,
formatted_resource=mock_format_custom_resource.return_value,
group="yelp.com",
)
# instance not exist, create but error with k8s
mock_create_custom_resource.side_effect = Exception
assert not setup_kubernetes_cr.reconcile_kubernetes_resource(
kube_client=mock_client,
service="mc",
instance_configs={"grindah": {"some": "conf"}},
cluster="mycluster",
custom_resources=mock_custom_resources,
kind=mock_kind,
version="v1",
group="yelp.com",
crd=mock.Mock(),
)
mock_create_custom_resource.assert_called_with(
kube_client=mock_client,
version="v1",
kind=mock_kind,
formatted_resource=mock_format_custom_resource.return_value,
group="yelp.com",
)
|
import collections
import html
from typing import TYPE_CHECKING, Dict, MutableMapping, Optional, Sequence
import attr
from PyQt5.QtCore import (pyqtSlot, pyqtSignal, QCoreApplication, QUrl,
QByteArray)
from PyQt5.QtNetwork import (QNetworkAccessManager, QNetworkReply, QSslSocket,
QSslError)
from qutebrowser.config import config
from qutebrowser.utils import (message, log, usertypes, utils, objreg,
urlutils, debug)
from qutebrowser.browser import shared
from qutebrowser.browser.network import proxy as proxymod
from qutebrowser.extensions import interceptors
from qutebrowser.browser.webkit import certificateerror, cookies, cache
from qutebrowser.browser.webkit.network import (webkitqutescheme, networkreply,
filescheme)
from qutebrowser.misc import objects
if TYPE_CHECKING:
from qutebrowser.mainwindow import prompt
HOSTBLOCK_ERROR_STRING = '%HOSTBLOCK%'
_proxy_auth_cache: Dict['ProxyId', 'prompt.AuthInfo'] = {}
@attr.s(frozen=True)
class ProxyId:
"""Information identifying a proxy server."""
type = attr.ib()
hostname = attr.ib()
port = attr.ib()
def _is_secure_cipher(cipher):
"""Check if a given SSL cipher (hopefully) isn't broken yet."""
tokens = [e.upper() for e in cipher.name().split('-')]
if cipher.usedBits() < 128:
# https://codereview.qt-project.org/#/c/75943/
return False
# OpenSSL should already protect against this in a better way
elif cipher.keyExchangeMethod() == 'DH' and utils.is_windows:
# https://weakdh.org/
return False
elif cipher.encryptionMethod().upper().startswith('RC4'):
# http://en.wikipedia.org/wiki/RC4#Security
# https://codereview.qt-project.org/#/c/148906/
return False
elif cipher.encryptionMethod().upper().startswith('DES'):
# http://en.wikipedia.org/wiki/Data_Encryption_Standard#Security_and_cryptanalysis
return False
elif 'MD5' in tokens:
# http://www.win.tue.nl/hashclash/rogue-ca/
return False
# OpenSSL should already protect against this in a better way
# elif (('CBC3' in tokens or 'CBC' in tokens) and (cipher.protocol() not in
# [QSsl.TlsV1_0, QSsl.TlsV1_1, QSsl.TlsV1_2])):
# # http://en.wikipedia.org/wiki/POODLE
# return False
### These things should never happen as those are already filtered out by
### either the SSL libraries or Qt - but let's be sure.
elif cipher.authenticationMethod() in ['aNULL', 'NULL']:
# Ciphers without authentication.
return False
elif cipher.encryptionMethod() in ['eNULL', 'NULL']:
# Ciphers without encryption.
return False
elif 'EXP' in tokens or 'EXPORT' in tokens:
# Weak export-grade ciphers
return False
elif 'ADH' in tokens:
# No MITM protection
return False
### This *should* happen ;)
else:
return True
def init():
"""Disable insecure SSL ciphers on old Qt versions."""
default_ciphers = QSslSocket.defaultCiphers()
log.init.vdebug( # type: ignore[attr-defined]
"Default Qt ciphers: {}".format(
', '.join(c.name() for c in default_ciphers)))
good_ciphers = []
bad_ciphers = []
for cipher in default_ciphers:
if _is_secure_cipher(cipher):
good_ciphers.append(cipher)
else:
bad_ciphers.append(cipher)
if bad_ciphers:
log.init.debug("Disabling bad ciphers: {}".format(
', '.join(c.name() for c in bad_ciphers)))
QSslSocket.setDefaultCiphers(good_ciphers)
_SavedErrorsType = MutableMapping[urlutils.HostTupleType, Sequence[QSslError]]
class NetworkManager(QNetworkAccessManager):
"""Our own QNetworkAccessManager.
Attributes:
adopted_downloads: If downloads are running with this QNAM but the
associated tab gets closed already, the NAM gets
reparented to the DownloadManager. This counts the
still running downloads, so the QNAM can clean
itself up when this reaches zero again.
_scheme_handlers: A dictionary (scheme -> handler) of supported custom
schemes.
_win_id: The window ID this NetworkManager is associated with.
(or None for generic network managers)
_tab_id: The tab ID this NetworkManager is associated with.
(or None for generic network managers)
_rejected_ssl_errors: A {QUrl: [SslError]} dict of rejected errors.
_accepted_ssl_errors: A {QUrl: [SslError]} dict of accepted errors.
_private: Whether we're in private browsing mode.
netrc_used: Whether netrc authentication was performed.
Signals:
shutting_down: Emitted when the QNAM is shutting down.
"""
shutting_down = pyqtSignal()
def __init__(self, *, win_id, tab_id, private, parent=None):
log.init.debug("Initializing NetworkManager")
super().__init__(parent)
log.init.debug("NetworkManager init done")
self.adopted_downloads = 0
self._win_id = win_id
self._tab_id = tab_id
self._private = private
self._scheme_handlers = {
'qute': webkitqutescheme.handler,
'file': filescheme.handler,
}
self._set_cookiejar()
self._set_cache()
self.sslErrors.connect( # type: ignore[attr-defined]
self.on_ssl_errors)
self._rejected_ssl_errors: _SavedErrorsType = collections.defaultdict(list)
self._accepted_ssl_errors: _SavedErrorsType = collections.defaultdict(list)
self.authenticationRequired.connect( # type: ignore[attr-defined]
self.on_authentication_required)
self.proxyAuthenticationRequired.connect( # type: ignore[attr-defined]
self.on_proxy_authentication_required)
self.netrc_used = False
def _set_cookiejar(self):
"""Set the cookie jar of the NetworkManager correctly."""
if self._private:
cookie_jar = cookies.ram_cookie_jar
else:
cookie_jar = cookies.cookie_jar
assert cookie_jar is not None
# We have a shared cookie jar - we restore its parent so we don't
# take ownership of it.
self.setCookieJar(cookie_jar)
app = QCoreApplication.instance()
cookie_jar.setParent(app)
def _set_cache(self):
"""Set the cache of the NetworkManager correctly."""
if self._private:
return
# We have a shared cache - we restore its parent so we don't take
# ownership of it.
app = QCoreApplication.instance()
self.setCache(cache.diskcache)
cache.diskcache.setParent(app)
def _get_abort_signals(self, owner=None):
"""Get a list of signals which should abort a question."""
abort_on = [self.shutting_down]
if owner is not None:
abort_on.append(owner.destroyed)
# This might be a generic network manager, e.g. one belonging to a
# DownloadManager. In this case, just skip the webview thing.
if self._tab_id is not None:
assert self._win_id is not None
tab = objreg.get('tab', scope='tab', window=self._win_id,
tab=self._tab_id)
abort_on.append(tab.load_started)
return abort_on
def shutdown(self):
"""Abort all running requests."""
self.setNetworkAccessible(QNetworkAccessManager.NotAccessible)
self.shutting_down.emit()
# No @pyqtSlot here, see
# https://github.com/qutebrowser/qutebrowser/issues/2213
def on_ssl_errors(self, reply, errors): # noqa: C901 pragma: no mccabe
"""Decide if SSL errors should be ignored or not.
This slot is called on SSL/TLS errors by the self.sslErrors signal.
Args:
reply: The QNetworkReply that is encountering the errors.
errors: A list of errors.
"""
errors = [certificateerror.CertificateErrorWrapper(e) for e in errors]
log.network.debug("Certificate errors: {!r}".format(
' / '.join(str(err) for err in errors)))
try:
host_tpl: Optional[urlutils.HostTupleType] = urlutils.host_tuple(
reply.url())
except ValueError:
host_tpl = None
is_accepted = False
is_rejected = False
else:
assert host_tpl is not None
is_accepted = set(errors).issubset(
self._accepted_ssl_errors[host_tpl])
is_rejected = set(errors).issubset(
self._rejected_ssl_errors[host_tpl])
log.network.debug("Already accepted: {} / "
"rejected {}".format(is_accepted, is_rejected))
if is_rejected:
return
elif is_accepted:
reply.ignoreSslErrors()
return
abort_on = self._get_abort_signals(reply)
ignore = shared.ignore_certificate_errors(reply.url(), errors,
abort_on=abort_on)
if ignore:
reply.ignoreSslErrors()
err_dict = self._accepted_ssl_errors
else:
err_dict = self._rejected_ssl_errors
if host_tpl is not None:
err_dict[host_tpl] += errors
def clear_all_ssl_errors(self):
"""Clear all remembered SSL errors."""
self._accepted_ssl_errors.clear()
self._rejected_ssl_errors.clear()
@pyqtSlot(QUrl)
def clear_rejected_ssl_errors(self, url):
"""Clear the rejected SSL errors on a reload.
Args:
url: The URL to remove.
"""
try:
del self._rejected_ssl_errors[url]
except KeyError:
pass
@pyqtSlot('QNetworkReply*', 'QAuthenticator*')
def on_authentication_required(self, reply, authenticator):
"""Called when a website needs authentication."""
url = reply.url()
log.network.debug("Authentication requested for {}, netrc_used {}"
.format(url.toDisplayString(), self.netrc_used))
netrc_success = False
if not self.netrc_used:
self.netrc_used = True
netrc_success = shared.netrc_authentication(url, authenticator)
if not netrc_success:
log.network.debug("Asking for credentials")
abort_on = self._get_abort_signals(reply)
shared.authentication_required(url, authenticator,
abort_on=abort_on)
@pyqtSlot('QNetworkProxy', 'QAuthenticator*')
def on_proxy_authentication_required(self, proxy, authenticator):
"""Called when a proxy needs authentication."""
proxy_id = ProxyId(proxy.type(), proxy.hostName(), proxy.port())
if proxy_id in _proxy_auth_cache:
authinfo = _proxy_auth_cache[proxy_id]
authenticator.setUser(authinfo.user)
authenticator.setPassword(authinfo.password)
else:
msg = '<b>{}</b> says:<br/>{}'.format(
html.escape(proxy.hostName()),
html.escape(authenticator.realm()))
abort_on = self._get_abort_signals()
answer = message.ask(
title="Proxy authentication required", text=msg,
mode=usertypes.PromptMode.user_pwd, abort_on=abort_on)
if answer is not None:
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
_proxy_auth_cache[proxy_id] = answer
@pyqtSlot()
def on_adopted_download_destroyed(self):
"""Check if we can clean up if an adopted download was destroyed.
See the description for adopted_downloads for details.
"""
self.adopted_downloads -= 1
log.downloads.debug("Adopted download destroyed, {} left.".format(
self.adopted_downloads))
assert self.adopted_downloads >= 0
if self.adopted_downloads == 0:
self.deleteLater()
@pyqtSlot(object) # DownloadItem
def adopt_download(self, download):
"""Adopt a new DownloadItem."""
self.adopted_downloads += 1
log.downloads.debug("Adopted download, {} adopted.".format(
self.adopted_downloads))
download.destroyed.connect(self.on_adopted_download_destroyed)
download.adopt_download.connect(self.adopt_download)
def set_referer(self, req, current_url):
"""Set the referer header."""
referer_header_conf = config.val.content.headers.referer
try:
if referer_header_conf == 'never':
# Note: using ''.encode('ascii') sends a header with no value,
# instead of no header at all
req.setRawHeader('Referer'.encode('ascii'), QByteArray())
elif (referer_header_conf == 'same-domain' and
not urlutils.same_domain(req.url(), current_url)):
req.setRawHeader('Referer'.encode('ascii'), QByteArray())
# If refer_header_conf is set to 'always', we leave the header
# alone as QtWebKit did set it.
except urlutils.InvalidUrlError:
# req.url() or current_url can be invalid - this happens on
# https://www.playstation.com/ for example.
pass
def createRequest(self, op, req, outgoing_data):
"""Return a new QNetworkReply object.
Args:
op: Operation op
req: const QNetworkRequest & req
outgoing_data: QIODevice * outgoingData
Return:
A QNetworkReply.
"""
if proxymod.application_factory is not None:
proxy_error = proxymod.application_factory.get_error()
if proxy_error is not None:
return networkreply.ErrorNetworkReply(
req, proxy_error, QNetworkReply.UnknownProxyError,
self)
if not req.url().isValid():
log.network.debug("Ignoring invalid requested URL: {}".format(
req.url().errorString()))
return networkreply.ErrorNetworkReply(
req, "Invalid request URL", QNetworkReply.HostNotFoundError,
self)
for header, value in shared.custom_headers(url=req.url()):
req.setRawHeader(header, value)
# There are some scenarios where we can't figure out current_url:
# - There's a generic NetworkManager, e.g. for downloads
# - The download was in a tab which is now closed.
current_url = QUrl()
if self._tab_id is not None:
assert self._win_id is not None
try:
tab = objreg.get('tab', scope='tab', window=self._win_id,
tab=self._tab_id)
current_url = tab.url()
except (KeyError, RuntimeError):
# https://github.com/qutebrowser/qutebrowser/issues/889
# Catching RuntimeError because we could be in the middle of
# the webpage shutdown here.
current_url = QUrl()
request = interceptors.Request(first_party_url=current_url,
request_url=req.url())
interceptors.run(request)
if request.is_blocked:
return networkreply.ErrorNetworkReply(
req, HOSTBLOCK_ERROR_STRING, QNetworkReply.ContentAccessDenied,
self)
if 'log-requests' in objects.debug_flags:
operation = debug.qenum_key(QNetworkAccessManager, op)
operation = operation.replace('Operation', '').upper()
log.network.debug("{} {}, first-party {}".format(
operation,
req.url().toDisplayString(),
current_url.toDisplayString()))
scheme = req.url().scheme()
if scheme in self._scheme_handlers:
result = self._scheme_handlers[scheme](req, op, current_url)
if result is not None:
result.setParent(self)
return result
self.set_referer(req, current_url)
return super().createRequest(op, req, outgoing_data)
|
import voluptuous as vol
from homeassistant.components.device_automation import TRIGGER_BASE_SCHEMA
from homeassistant.components.device_automation.exceptions import (
InvalidDeviceAutomationConfig,
)
from homeassistant.components.homeassistant.triggers import event as event_trigger
from homeassistant.const import (
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_EVENT,
CONF_PLATFORM,
CONF_TYPE,
CONF_UNIQUE_ID,
)
from . import DOMAIN
from .const import LOGGER
from .deconz_event import CONF_DECONZ_EVENT, CONF_GESTURE
CONF_SUBTYPE = "subtype"
CONF_SHORT_PRESS = "remote_button_short_press"
CONF_SHORT_RELEASE = "remote_button_short_release"
CONF_LONG_PRESS = "remote_button_long_press"
CONF_LONG_RELEASE = "remote_button_long_release"
CONF_DOUBLE_PRESS = "remote_button_double_press"
CONF_TRIPLE_PRESS = "remote_button_triple_press"
CONF_QUADRUPLE_PRESS = "remote_button_quadruple_press"
CONF_QUINTUPLE_PRESS = "remote_button_quintuple_press"
CONF_ROTATED = "remote_button_rotated"
CONF_ROTATED_FAST = "remote_button_rotated_fast"
CONF_ROTATION_STOPPED = "remote_button_rotation_stopped"
CONF_AWAKE = "remote_awakened"
CONF_MOVE = "remote_moved"
CONF_DOUBLE_TAP = "remote_double_tap"
CONF_SHAKE = "remote_gyro_activated"
CONF_FREE_FALL = "remote_falling"
CONF_FLIP_90 = "remote_flip_90_degrees"
CONF_FLIP_180 = "remote_flip_180_degrees"
CONF_MOVE_ANY = "remote_moved_any_side"
CONF_DOUBLE_TAP_ANY = "remote_double_tap_any_side"
CONF_TURN_CW = "remote_turned_clockwise"
CONF_TURN_CCW = "remote_turned_counter_clockwise"
CONF_ROTATE_FROM_SIDE_1 = "remote_rotate_from_side_1"
CONF_ROTATE_FROM_SIDE_2 = "remote_rotate_from_side_2"
CONF_ROTATE_FROM_SIDE_3 = "remote_rotate_from_side_3"
CONF_ROTATE_FROM_SIDE_4 = "remote_rotate_from_side_4"
CONF_ROTATE_FROM_SIDE_5 = "remote_rotate_from_side_5"
CONF_ROTATE_FROM_SIDE_6 = "remote_rotate_from_side_6"
CONF_TURN_ON = "turn_on"
CONF_TURN_OFF = "turn_off"
CONF_DIM_UP = "dim_up"
CONF_DIM_DOWN = "dim_down"
CONF_LEFT = "left"
CONF_RIGHT = "right"
CONF_OPEN = "open"
CONF_CLOSE = "close"
CONF_BOTH_BUTTONS = "both_buttons"
CONF_TOP_BUTTONS = "top_buttons"
CONF_BOTTOM_BUTTONS = "bottom_buttons"
CONF_BUTTON_1 = "button_1"
CONF_BUTTON_2 = "button_2"
CONF_BUTTON_3 = "button_3"
CONF_BUTTON_4 = "button_4"
CONF_SIDE_1 = "side_1"
CONF_SIDE_2 = "side_2"
CONF_SIDE_3 = "side_3"
CONF_SIDE_4 = "side_4"
CONF_SIDE_5 = "side_5"
CONF_SIDE_6 = "side_6"
HUE_DIMMER_REMOTE_MODEL_GEN1 = "RWL020"
HUE_DIMMER_REMOTE_MODEL_GEN2 = "RWL021"
HUE_DIMMER_REMOTE = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1000},
(CONF_SHORT_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_LONG_PRESS, CONF_TURN_ON): {CONF_EVENT: 1001},
(CONF_LONG_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1003},
(CONF_SHORT_PRESS, CONF_DIM_UP): {CONF_EVENT: 2000},
(CONF_SHORT_RELEASE, CONF_DIM_UP): {CONF_EVENT: 2002},
(CONF_LONG_PRESS, CONF_DIM_UP): {CONF_EVENT: 2001},
(CONF_LONG_RELEASE, CONF_DIM_UP): {CONF_EVENT: 2003},
(CONF_SHORT_PRESS, CONF_DIM_DOWN): {CONF_EVENT: 3000},
(CONF_SHORT_RELEASE, CONF_DIM_DOWN): {CONF_EVENT: 3002},
(CONF_LONG_PRESS, CONF_DIM_DOWN): {CONF_EVENT: 3001},
(CONF_LONG_RELEASE, CONF_DIM_DOWN): {CONF_EVENT: 3003},
(CONF_SHORT_PRESS, CONF_TURN_OFF): {CONF_EVENT: 4000},
(CONF_SHORT_RELEASE, CONF_TURN_OFF): {CONF_EVENT: 4002},
(CONF_LONG_PRESS, CONF_TURN_OFF): {CONF_EVENT: 4001},
(CONF_LONG_RELEASE, CONF_TURN_OFF): {CONF_EVENT: 4003},
}
HUE_BUTTON_REMOTE_MODEL = "ROM001" # Hue smart button
HUE_BUTTON_REMOTE = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1000},
(CONF_SHORT_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_LONG_PRESS, CONF_TURN_ON): {CONF_EVENT: 1001},
(CONF_LONG_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1003},
}
HUE_TAP_REMOTE_MODEL = "ZGPSWITCH"
HUE_TAP_REMOTE = {
(CONF_SHORT_PRESS, CONF_BUTTON_1): {CONF_EVENT: 34},
(CONF_SHORT_PRESS, CONF_BUTTON_2): {CONF_EVENT: 16},
(CONF_SHORT_PRESS, CONF_BUTTON_3): {CONF_EVENT: 17},
(CONF_SHORT_PRESS, CONF_BUTTON_4): {CONF_EVENT: 18},
}
FRIENDS_OF_HUE_SWITCH_MODEL = "FOHSWITCH"
FRIENDS_OF_HUE_SWITCH = {
(CONF_SHORT_PRESS, CONF_BUTTON_1): {CONF_EVENT: 1000},
(CONF_SHORT_RELEASE, CONF_BUTTON_1): {CONF_EVENT: 1002},
(CONF_LONG_PRESS, CONF_BUTTON_1): {CONF_EVENT: 1001},
(CONF_LONG_RELEASE, CONF_BUTTON_1): {CONF_EVENT: 1003},
(CONF_SHORT_PRESS, CONF_BUTTON_2): {CONF_EVENT: 2000},
(CONF_SHORT_RELEASE, CONF_BUTTON_2): {CONF_EVENT: 2002},
(CONF_LONG_PRESS, CONF_BUTTON_2): {CONF_EVENT: 2001},
(CONF_LONG_RELEASE, CONF_BUTTON_2): {CONF_EVENT: 2003},
(CONF_SHORT_PRESS, CONF_BUTTON_3): {CONF_EVENT: 3000},
(CONF_SHORT_RELEASE, CONF_BUTTON_3): {CONF_EVENT: 3002},
(CONF_LONG_PRESS, CONF_BUTTON_3): {CONF_EVENT: 3001},
(CONF_LONG_RELEASE, CONF_BUTTON_3): {CONF_EVENT: 3003},
(CONF_SHORT_PRESS, CONF_BUTTON_4): {CONF_EVENT: 4000},
(CONF_SHORT_RELEASE, CONF_BUTTON_4): {CONF_EVENT: 4002},
(CONF_LONG_PRESS, CONF_BUTTON_4): {CONF_EVENT: 4001},
(CONF_LONG_RELEASE, CONF_BUTTON_4): {CONF_EVENT: 4003},
(CONF_SHORT_PRESS, CONF_TOP_BUTTONS): {CONF_EVENT: 5000},
(CONF_SHORT_RELEASE, CONF_TOP_BUTTONS): {CONF_EVENT: 5002},
(CONF_LONG_PRESS, CONF_TOP_BUTTONS): {CONF_EVENT: 5001},
(CONF_LONG_RELEASE, CONF_TOP_BUTTONS): {CONF_EVENT: 5003},
(CONF_SHORT_PRESS, CONF_BOTTOM_BUTTONS): {CONF_EVENT: 6000},
(CONF_SHORT_RELEASE, CONF_BOTTOM_BUTTONS): {CONF_EVENT: 6002},
(CONF_LONG_PRESS, CONF_BOTTOM_BUTTONS): {CONF_EVENT: 6001},
(CONF_LONG_RELEASE, CONF_BOTTOM_BUTTONS): {CONF_EVENT: 6003},
}
SYMFONISK_SOUND_CONTROLLER_MODEL = "SYMFONISK Sound Controller"
SYMFONISK_SOUND_CONTROLLER = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_DOUBLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1004},
(CONF_TRIPLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1005},
(CONF_ROTATED, CONF_LEFT): {CONF_EVENT: 2001},
(CONF_ROTATION_STOPPED, CONF_LEFT): {CONF_EVENT: 2003},
(CONF_ROTATED, CONF_RIGHT): {CONF_EVENT: 3001},
(CONF_ROTATION_STOPPED, CONF_RIGHT): {CONF_EVENT: 3003},
}
TRADFRI_ON_OFF_SWITCH_MODEL = "TRADFRI on/off switch"
TRADFRI_ON_OFF_SWITCH = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_LONG_PRESS, CONF_TURN_ON): {CONF_EVENT: 1001},
(CONF_LONG_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1003},
(CONF_SHORT_PRESS, CONF_TURN_OFF): {CONF_EVENT: 2002},
(CONF_LONG_PRESS, CONF_TURN_OFF): {CONF_EVENT: 2001},
(CONF_LONG_RELEASE, CONF_TURN_OFF): {CONF_EVENT: 2003},
}
TRADFRI_OPEN_CLOSE_REMOTE_MODEL = "TRADFRI open/close remote"
TRADFRI_OPEN_CLOSE_REMOTE = {
(CONF_SHORT_PRESS, CONF_OPEN): {CONF_EVENT: 1002},
(CONF_LONG_PRESS, CONF_OPEN): {CONF_EVENT: 1003},
(CONF_SHORT_PRESS, CONF_CLOSE): {CONF_EVENT: 2002},
(CONF_LONG_PRESS, CONF_CLOSE): {CONF_EVENT: 2003},
}
TRADFRI_REMOTE_MODEL = "TRADFRI remote control"
TRADFRI_REMOTE = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_LONG_PRESS, CONF_TURN_ON): {CONF_EVENT: 1001},
(CONF_SHORT_PRESS, CONF_DIM_UP): {CONF_EVENT: 2002},
(CONF_LONG_PRESS, CONF_DIM_UP): {CONF_EVENT: 2001},
(CONF_LONG_RELEASE, CONF_DIM_UP): {CONF_EVENT: 2003},
(CONF_SHORT_PRESS, CONF_DIM_DOWN): {CONF_EVENT: 3002},
(CONF_LONG_PRESS, CONF_DIM_DOWN): {CONF_EVENT: 3001},
(CONF_LONG_RELEASE, CONF_DIM_DOWN): {CONF_EVENT: 3003},
(CONF_SHORT_PRESS, CONF_LEFT): {CONF_EVENT: 4002},
(CONF_LONG_PRESS, CONF_LEFT): {CONF_EVENT: 4001},
(CONF_LONG_RELEASE, CONF_LEFT): {CONF_EVENT: 4003},
(CONF_SHORT_PRESS, CONF_RIGHT): {CONF_EVENT: 5002},
(CONF_LONG_PRESS, CONF_RIGHT): {CONF_EVENT: 5001},
(CONF_LONG_RELEASE, CONF_RIGHT): {CONF_EVENT: 5003},
}
TRADFRI_WIRELESS_DIMMER_MODEL = "TRADFRI wireless dimmer"
TRADFRI_WIRELESS_DIMMER = {
(CONF_ROTATED_FAST, CONF_LEFT): {CONF_EVENT: 4002},
(CONF_ROTATED, CONF_LEFT): {CONF_EVENT: 3002},
(CONF_ROTATED, CONF_RIGHT): {CONF_EVENT: 2002},
(CONF_ROTATED_FAST, CONF_RIGHT): {CONF_EVENT: 1002},
}
AQARA_CUBE_MODEL = "lumi.sensor_cube"
AQARA_CUBE_MODEL_ALT1 = "lumi.sensor_cube.aqgl01"
AQARA_CUBE = {
(CONF_ROTATE_FROM_SIDE_1, CONF_SIDE_2): {CONF_EVENT: 2001},
(CONF_ROTATE_FROM_SIDE_1, CONF_SIDE_3): {CONF_EVENT: 3001},
(CONF_ROTATE_FROM_SIDE_1, CONF_SIDE_4): {CONF_EVENT: 4001},
(CONF_ROTATE_FROM_SIDE_1, CONF_SIDE_5): {CONF_EVENT: 5001},
(CONF_ROTATE_FROM_SIDE_1, CONF_SIDE_6): {CONF_EVENT: 6001},
(CONF_ROTATE_FROM_SIDE_2, CONF_SIDE_1): {CONF_EVENT: 1002},
(CONF_ROTATE_FROM_SIDE_2, CONF_SIDE_3): {CONF_EVENT: 3002},
(CONF_ROTATE_FROM_SIDE_2, CONF_SIDE_4): {CONF_EVENT: 4002},
(CONF_ROTATE_FROM_SIDE_2, CONF_SIDE_5): {CONF_EVENT: 5002},
(CONF_ROTATE_FROM_SIDE_2, CONF_SIDE_6): {CONF_EVENT: 6002},
(CONF_ROTATE_FROM_SIDE_3, CONF_SIDE_1): {CONF_EVENT: 1003},
(CONF_ROTATE_FROM_SIDE_3, CONF_SIDE_2): {CONF_EVENT: 2003},
(CONF_ROTATE_FROM_SIDE_3, CONF_SIDE_4): {CONF_EVENT: 4003},
(CONF_ROTATE_FROM_SIDE_3, CONF_SIDE_5): {CONF_EVENT: 5003},
(CONF_ROTATE_FROM_SIDE_3, CONF_SIDE_6): {CONF_EVENT: 6003},
(CONF_ROTATE_FROM_SIDE_4, CONF_SIDE_1): {CONF_EVENT: 1004},
(CONF_ROTATE_FROM_SIDE_4, CONF_SIDE_2): {CONF_EVENT: 2004},
(CONF_ROTATE_FROM_SIDE_4, CONF_SIDE_3): {CONF_EVENT: 3004},
(CONF_ROTATE_FROM_SIDE_4, CONF_SIDE_5): {CONF_EVENT: 5004},
(CONF_ROTATE_FROM_SIDE_4, CONF_SIDE_6): {CONF_EVENT: 6004},
(CONF_ROTATE_FROM_SIDE_5, CONF_SIDE_1): {CONF_EVENT: 1005},
(CONF_ROTATE_FROM_SIDE_5, CONF_SIDE_2): {CONF_EVENT: 2005},
(CONF_ROTATE_FROM_SIDE_5, CONF_SIDE_3): {CONF_EVENT: 3005},
(CONF_ROTATE_FROM_SIDE_5, CONF_SIDE_4): {CONF_EVENT: 4005},
(CONF_ROTATE_FROM_SIDE_5, CONF_SIDE_6): {CONF_EVENT: 6005},
(CONF_ROTATE_FROM_SIDE_6, CONF_SIDE_1): {CONF_EVENT: 1006},
(CONF_ROTATE_FROM_SIDE_6, CONF_SIDE_2): {CONF_EVENT: 2006},
(CONF_ROTATE_FROM_SIDE_6, CONF_SIDE_3): {CONF_EVENT: 3006},
(CONF_ROTATE_FROM_SIDE_6, CONF_SIDE_4): {CONF_EVENT: 4006},
(CONF_ROTATE_FROM_SIDE_6, CONF_SIDE_5): {CONF_EVENT: 5006},
(CONF_MOVE, CONF_SIDE_1): {CONF_EVENT: 1000},
(CONF_MOVE, CONF_SIDE_2): {CONF_EVENT: 2000},
(CONF_MOVE, CONF_SIDE_3): {CONF_EVENT: 3000},
(CONF_MOVE, CONF_SIDE_4): {CONF_EVENT: 4000},
(CONF_MOVE, CONF_SIDE_5): {CONF_EVENT: 5000},
(CONF_MOVE, CONF_SIDE_6): {CONF_EVENT: 6000},
(CONF_DOUBLE_TAP, CONF_SIDE_1): {CONF_EVENT: 1001},
(CONF_DOUBLE_TAP, CONF_SIDE_2): {CONF_EVENT: 2002},
(CONF_DOUBLE_TAP, CONF_SIDE_3): {CONF_EVENT: 3003},
(CONF_DOUBLE_TAP, CONF_SIDE_4): {CONF_EVENT: 4004},
(CONF_DOUBLE_TAP, CONF_SIDE_5): {CONF_EVENT: 5005},
(CONF_DOUBLE_TAP, CONF_SIDE_6): {CONF_EVENT: 6006},
(CONF_AWAKE, ""): {CONF_GESTURE: 0},
(CONF_SHAKE, ""): {CONF_GESTURE: 1},
(CONF_FREE_FALL, ""): {CONF_GESTURE: 2},
(CONF_FLIP_90, ""): {CONF_GESTURE: 3},
(CONF_FLIP_180, ""): {CONF_GESTURE: 4},
(CONF_MOVE_ANY, ""): {CONF_GESTURE: 5},
(CONF_DOUBLE_TAP_ANY, ""): {CONF_GESTURE: 6},
(CONF_TURN_CW, ""): {CONF_GESTURE: 7},
(CONF_TURN_CCW, ""): {CONF_GESTURE: 8},
}
AQARA_DOUBLE_WALL_SWITCH_MODEL = "lumi.remote.b286acn01"
AQARA_DOUBLE_WALL_SWITCH_MODEL_2020 = "lumi.remote.b286acn02"
AQARA_DOUBLE_WALL_SWITCH = {
(CONF_SHORT_PRESS, CONF_LEFT): {CONF_EVENT: 1002},
(CONF_LONG_PRESS, CONF_LEFT): {CONF_EVENT: 1001},
(CONF_DOUBLE_PRESS, CONF_LEFT): {CONF_EVENT: 1004},
(CONF_SHORT_PRESS, CONF_RIGHT): {CONF_EVENT: 2002},
(CONF_LONG_PRESS, CONF_RIGHT): {CONF_EVENT: 2001},
(CONF_DOUBLE_PRESS, CONF_RIGHT): {CONF_EVENT: 2004},
(CONF_SHORT_PRESS, CONF_BOTH_BUTTONS): {CONF_EVENT: 3002},
(CONF_LONG_PRESS, CONF_BOTH_BUTTONS): {CONF_EVENT: 3001},
(CONF_DOUBLE_PRESS, CONF_BOTH_BUTTONS): {CONF_EVENT: 3004},
}
AQARA_DOUBLE_WALL_SWITCH_WXKG02LM_MODEL = "lumi.sensor_86sw2"
AQARA_DOUBLE_WALL_SWITCH_WXKG02LM = {
(CONF_SHORT_PRESS, CONF_LEFT): {CONF_EVENT: 1002},
(CONF_SHORT_PRESS, CONF_RIGHT): {CONF_EVENT: 2002},
(CONF_SHORT_PRESS, CONF_BOTH_BUTTONS): {CONF_EVENT: 3002},
}
AQARA_SINGLE_WALL_SWITCH_WXKG03LM_MODEL = "lumi.remote.b186acn01"
AQARA_SINGLE_WALL_SWITCH_WXKG06LM_MODEL = "lumi.remote.b186acn02"
AQARA_SINGLE_WALL_SWITCH = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_LONG_PRESS, CONF_TURN_ON): {CONF_EVENT: 1001},
(CONF_DOUBLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1004},
}
AQARA_MINI_SWITCH_MODEL = "lumi.remote.b1acn01"
AQARA_MINI_SWITCH = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_DOUBLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1004},
(CONF_LONG_PRESS, CONF_TURN_ON): {CONF_EVENT: 1001},
(CONF_LONG_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1003},
}
AQARA_ROUND_SWITCH_MODEL = "lumi.sensor_switch"
AQARA_ROUND_SWITCH = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1000},
(CONF_SHORT_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_DOUBLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1004},
(CONF_TRIPLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1005},
(CONF_QUADRUPLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1006},
(CONF_QUINTUPLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1010},
(CONF_LONG_PRESS, CONF_TURN_ON): {CONF_EVENT: 1001},
(CONF_LONG_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1003},
}
AQARA_SQUARE_SWITCH_MODEL = "lumi.sensor_switch.aq3"
AQARA_SQUARE_SWITCH = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_DOUBLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1004},
(CONF_LONG_PRESS, CONF_TURN_ON): {CONF_EVENT: 1001},
(CONF_LONG_RELEASE, CONF_TURN_ON): {CONF_EVENT: 1003},
(CONF_SHAKE, ""): {CONF_EVENT: 1007},
}
AQARA_SQUARE_SWITCH_WXKG11LM_2016_MODEL = "lumi.sensor_switch.aq2"
AQARA_SQUARE_SWITCH_WXKG11LM_2016 = {
(CONF_SHORT_PRESS, CONF_TURN_ON): {CONF_EVENT: 1002},
(CONF_DOUBLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1004},
(CONF_TRIPLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1005},
(CONF_QUADRUPLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 1006},
}
AQARA_OPPLE_2_BUTTONS_MODEL = "lumi.remote.b286opcn01"
AQARA_OPPLE_2_BUTTONS = {
(CONF_LONG_PRESS, CONF_TURN_OFF): {CONF_EVENT: 1001},
(CONF_SHORT_RELEASE, CONF_TURN_OFF): {CONF_EVENT: 1002},
(CONF_LONG_RELEASE, CONF_TURN_OFF): {CONF_EVENT: 1003},
(CONF_DOUBLE_PRESS, CONF_TURN_OFF): {CONF_EVENT: 1004},
(CONF_TRIPLE_PRESS, CONF_TURN_OFF): {CONF_EVENT: 1005},
(CONF_LONG_PRESS, CONF_TURN_ON): {CONF_EVENT: 2001},
(CONF_SHORT_RELEASE, CONF_TURN_ON): {CONF_EVENT: 2002},
(CONF_LONG_RELEASE, CONF_TURN_ON): {CONF_EVENT: 2003},
(CONF_DOUBLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 2004},
(CONF_TRIPLE_PRESS, CONF_TURN_ON): {CONF_EVENT: 2005},
}
AQARA_OPPLE_4_BUTTONS_MODEL = "lumi.remote.b486opcn01"
AQARA_OPPLE_4_BUTTONS = {
**AQARA_OPPLE_2_BUTTONS,
(CONF_LONG_PRESS, CONF_DIM_DOWN): {CONF_EVENT: 3001},
(CONF_SHORT_RELEASE, CONF_DIM_DOWN): {CONF_EVENT: 3002},
(CONF_LONG_RELEASE, CONF_DIM_DOWN): {CONF_EVENT: 3003},
(CONF_DOUBLE_PRESS, CONF_DIM_DOWN): {CONF_EVENT: 3004},
(CONF_TRIPLE_PRESS, CONF_DIM_DOWN): {CONF_EVENT: 3005},
(CONF_LONG_PRESS, CONF_DIM_UP): {CONF_EVENT: 4001},
(CONF_SHORT_RELEASE, CONF_DIM_UP): {CONF_EVENT: 4002},
(CONF_LONG_RELEASE, CONF_DIM_UP): {CONF_EVENT: 4003},
(CONF_DOUBLE_PRESS, CONF_DIM_UP): {CONF_EVENT: 4004},
(CONF_TRIPLE_PRESS, CONF_DIM_UP): {CONF_EVENT: 4005},
}
AQARA_OPPLE_6_BUTTONS_MODEL = "lumi.remote.b686opcn01"
AQARA_OPPLE_6_BUTTONS = {
**AQARA_OPPLE_4_BUTTONS,
(CONF_LONG_PRESS, CONF_DIM_DOWN): {CONF_EVENT: 5001},
(CONF_SHORT_RELEASE, CONF_LEFT): {CONF_EVENT: 5002},
(CONF_LONG_RELEASE, CONF_LEFT): {CONF_EVENT: 5003},
(CONF_DOUBLE_PRESS, CONF_LEFT): {CONF_EVENT: 5004},
(CONF_TRIPLE_PRESS, CONF_LEFT): {CONF_EVENT: 5005},
(CONF_LONG_PRESS, CONF_RIGHT): {CONF_EVENT: 6001},
(CONF_SHORT_RELEASE, CONF_RIGHT): {CONF_EVENT: 6002},
(CONF_LONG_RELEASE, CONF_RIGHT): {CONF_EVENT: 6003},
(CONF_DOUBLE_PRESS, CONF_RIGHT): {CONF_EVENT: 6004},
(CONF_TRIPLE_PRESS, CONF_RIGHT): {CONF_EVENT: 6005},
}
REMOTES = {
HUE_DIMMER_REMOTE_MODEL_GEN1: HUE_DIMMER_REMOTE,
HUE_DIMMER_REMOTE_MODEL_GEN2: HUE_DIMMER_REMOTE,
HUE_BUTTON_REMOTE_MODEL: HUE_BUTTON_REMOTE,
HUE_TAP_REMOTE_MODEL: HUE_TAP_REMOTE,
FRIENDS_OF_HUE_SWITCH_MODEL: FRIENDS_OF_HUE_SWITCH,
SYMFONISK_SOUND_CONTROLLER_MODEL: SYMFONISK_SOUND_CONTROLLER,
TRADFRI_ON_OFF_SWITCH_MODEL: TRADFRI_ON_OFF_SWITCH,
TRADFRI_OPEN_CLOSE_REMOTE_MODEL: TRADFRI_OPEN_CLOSE_REMOTE,
TRADFRI_REMOTE_MODEL: TRADFRI_REMOTE,
TRADFRI_WIRELESS_DIMMER_MODEL: TRADFRI_WIRELESS_DIMMER,
AQARA_CUBE_MODEL: AQARA_CUBE,
AQARA_CUBE_MODEL_ALT1: AQARA_CUBE,
AQARA_DOUBLE_WALL_SWITCH_MODEL: AQARA_DOUBLE_WALL_SWITCH,
AQARA_DOUBLE_WALL_SWITCH_MODEL_2020: AQARA_DOUBLE_WALL_SWITCH,
AQARA_DOUBLE_WALL_SWITCH_WXKG02LM_MODEL: AQARA_DOUBLE_WALL_SWITCH_WXKG02LM,
AQARA_SINGLE_WALL_SWITCH_WXKG03LM_MODEL: AQARA_SINGLE_WALL_SWITCH,
AQARA_SINGLE_WALL_SWITCH_WXKG06LM_MODEL: AQARA_SINGLE_WALL_SWITCH,
AQARA_MINI_SWITCH_MODEL: AQARA_MINI_SWITCH,
AQARA_ROUND_SWITCH_MODEL: AQARA_ROUND_SWITCH,
AQARA_SQUARE_SWITCH_MODEL: AQARA_SQUARE_SWITCH,
AQARA_SQUARE_SWITCH_WXKG11LM_2016_MODEL: AQARA_SQUARE_SWITCH_WXKG11LM_2016,
AQARA_OPPLE_2_BUTTONS_MODEL: AQARA_OPPLE_2_BUTTONS,
AQARA_OPPLE_4_BUTTONS_MODEL: AQARA_OPPLE_4_BUTTONS,
AQARA_OPPLE_6_BUTTONS_MODEL: AQARA_OPPLE_6_BUTTONS,
}
TRIGGER_SCHEMA = TRIGGER_BASE_SCHEMA.extend(
{vol.Required(CONF_TYPE): str, vol.Required(CONF_SUBTYPE): str}
)
def _get_deconz_event_from_device_id(hass, device_id):
"""Resolve deconz event from device id."""
for gateway in hass.data.get(DOMAIN, {}).values():
for deconz_event in gateway.events:
if device_id == deconz_event.device_id:
return deconz_event
return None
async def async_validate_trigger_config(hass, config):
"""Validate config."""
config = TRIGGER_SCHEMA(config)
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(config[CONF_DEVICE_ID])
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
if (
not device
or device.model not in REMOTES
or trigger not in REMOTES[device.model]
):
raise InvalidDeviceAutomationConfig
return config
async def async_attach_trigger(hass, config, action, automation_info):
"""Listen for state changes based on configuration."""
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(config[CONF_DEVICE_ID])
trigger = (config[CONF_TYPE], config[CONF_SUBTYPE])
trigger = REMOTES[device.model][trigger]
deconz_event = _get_deconz_event_from_device_id(hass, device.id)
if deconz_event is None:
LOGGER.error("No deconz_event tied to device %s found", device.name)
raise InvalidDeviceAutomationConfig
event_id = deconz_event.serial
event_config = {
event_trigger.CONF_PLATFORM: "event",
event_trigger.CONF_EVENT_TYPE: CONF_DECONZ_EVENT,
event_trigger.CONF_EVENT_DATA: {CONF_UNIQUE_ID: event_id, **trigger},
}
event_config = event_trigger.TRIGGER_SCHEMA(event_config)
return await event_trigger.async_attach_trigger(
hass, event_config, action, automation_info, platform_type="device"
)
async def async_get_triggers(hass, device_id):
"""List device triggers.
Make sure device is a supported remote model.
Retrieve the deconz event object matching device entry.
Generate device trigger list.
"""
device_registry = await hass.helpers.device_registry.async_get_registry()
device = device_registry.async_get(device_id)
if device.model not in REMOTES:
return
triggers = []
for trigger, subtype in REMOTES[device.model].keys():
triggers.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_PLATFORM: "device",
CONF_TYPE: trigger,
CONF_SUBTYPE: subtype,
}
)
return triggers
|
import os
import select
import time
import sys
from mlpatches import base, l2c
from six import integer_types, string_types
_stash = base._stash
list2cmdline = l2c.list2cmdline
# constants
try:
# setup MAXFD. we dont require this, but other scripts may expect this value to be present
MAXFD = os.sysconf("SC_OPEN_MAX")
except:
MAXFD = 256
PIPE = -1
STDOUT = -2
class CalledProcessError(Exception):
"""Exception raised when a process run by check_call() or check_output() returns a non-zero exit status."""
def __init__(self, returncode, cmd, output=None):
self.returncode = returncode
self.cmd = cmd
self.output = output
def __str__(self):
return "Command '{c}' returned non-zero exit status {s}".format(c=self.cmd, s=self.returncode)
def call(*args, **kwargs):
"""Run the command described by args. Wait for command to complete, then return the returncode attribute."""
return Popen(*args, **kwargs).wait()
def check_call(*args, **kwargs):
"""Run command with arguments. Wait for command to complete. If the return code was zero then return, otherwise raise CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute."""
rc = call(*args, **kwargs)
if rc != 0:
cmd = kwargs.get("args")
if cmd is None:
cmd = args[0]
raise CalledProcessError(rc, cmd)
return 0
def check_output(*args, **kwargs):
"""Run command with arguments and return its output as a byte string.
If the return code was non-zero it raises a CalledProcessError. The CalledProcessError object will have the return code in the returncode attribute and any output in the output attribute."""
if "stdout" in kwargs:
raise ValueError("stdout argument not allowed, it will be overriden.")
p = Popen(stdout=PIPE, *args, **kwargs)
out, _ = p.communicate()
rc = p.poll()
if rc != 0:
cmd = kwargs.get("args")
if cmd is None:
cmd = args[0]
raise CalledProcessError(rc, cmd, output=out)
return out
class Popen(object):
"""Execute a child program in a new process. On Unix, the class uses os.execvp()-like behavior to execute the child program. On Windows, the class uses the Windows CreateProcess() function. The arguments to Popen are as follows."""
def __init__(
self,
args,
bufsize=0,
executable=None,
stdin=None,
stdout=None,
stderr=None,
preexec_fn=None,
close_fds=False,
shell=False,
cwd=None,
env=None,
universal_newlines=False,
startupinfo=None,
creationflags=None
):
# vars
self._fds = []
self.returncode = None
self._worker = None
self._cwd = cwd
self._environ = (env if env is not None else {})
if isinstance(args, string_types):
self.cmd = args
else:
if args[0] == sys.executable:
# common use case
args = ["python"] + list(args)[1:]
self.cmd = l2c.list2cmdline(args)
# === setup std* ===
rfm = "rU" if universal_newlines else "rb"
# setup stdout
if stdout is None:
# use own stdout
self.stdout = None
self._sp_stdout = None
elif stdout == PIPE:
# create new pipe
rfd, wfd = os.pipe()
self._fds += [rfd, wfd]
self.stdout = os.fdopen(rfd, rfm, bufsize)
self._sp_stdout = os.fdopen(wfd, "wb")
elif isinstance(stdout, integer_types):
# use fd
self.stdout = None
self._fds.append(stdout)
self._sp_stdout = os.fdopen(stdout, "wb")
else:
self.stdout = None
self._sp_stdout = stdout
# setup stderr
if stderr is None:
# use own stdout
self.stderr = None
self._sp_stderr = None
elif stderr == PIPE:
# create new pipe
rfd, wfd = os.pipe()
self._fds += [rfd, wfd]
self.stderr = os.fdopen(rfd, rfm, bufsize)
self._sp_stderr = os.fdopen(wfd, "wb")
elif stderr == STDOUT:
self.stderr = self.stdout
self._sp_stderr = self._sp_stdout
elif isinstance(stderr, integer_types):
# use fd
self.stderr = None
self._fds.append(stderr)
self._sp_stderr = os.fdopen(stderr, "wb")
else:
self.stderr = None
self._sp_stderr = stderr
# setup stdin
if stdin is None:
# use own stdin
self.stdin = None
self._sp_stdin = None
elif stdin == PIPE:
# create new pipe
rfd, wfd = os.pipe()
self._fds += [rfd, wfd]
self.stdin = os.fdopen(wfd, "wb")
self._sp_stdin = os.fdopen(rfd, "rb")
elif isinstance(stdin, integer_types):
# use fd
self.stdin = None
self._fds.append(stdin)
self._sp_stdin = os.fdopen(stdin)
else:
self.stdin = None
self._sp_stdin = stdin
# run
self._run()
def __del__(self):
"""called on deletion"""
try:
self._close()
except Exception as e:
pass
def _run(self):
"""creates and starts the worker."""
self._worker = _stash.runtime.run(
input_=self.cmd,
final_ins=self._sp_stdin,
final_outs=self._sp_stdout,
final_errs=self._sp_stderr,
add_to_history=None,
persistent_level=2,
is_background=False,
cwd=self._cwd,
environ=self._environ
)
self.pid = self._worker.job_id
def poll(self):
"""Check if child process has terminated. Set and return returncode attribute."""
if self._worker is None:
self.returncode = None
return self.returncode
elif self._worker.is_alive():
self.returncode = None
return self.returncode
else:
self.returncode = self._worker.state.return_value
return self.returncode
def wait(self):
"""Wait for child process to terminate. Set and return returncode attribute."""
while self._worker is None:
# wait() before self._run()
time.sleep(0.1)
self._worker.join()
return self.poll()
def terminate(self):
"""Stop the child. On Posix OSs the method sends SIGTERM to the child. On Windows the Win32 API function TerminateProcess() is called to stop the child."""
self._worker.kill()
kill = terminate
def send_signal(self, signal):
"""Sends the signal signal to the child."""
self.kill()
def communicate(self, input=None):
"""Interact with process: Send data to stdin. Read data from stdout and stderr, until end-of-file is reached. Wait for process to terminate. The optional input argument should be a string to be sent to the child process, or None, if no data should be sent to the child."""
rfs = []
wfs = []
ex = []
if self.stdout is not None:
stdoutdata = ""
rfs.append(self.stdout)
else:
stdoutdata = None
if self.stderr is self.stdout:
seo = True
else:
seo = False
if self.stderr is not None:
stderrdata = ""
rfs.append(self.stderr)
else:
stderrdata = None
if (self.stdin is not None) and (input is not None):
wfs.append(self.stdin)
while len(rfs + wfs) > 0:
tr, tw, he = select.select(rfs, wfs, ex)
if self.stdin in tw:
if len(input) < 4096:
self.stdin.write(input)
input = ""
wfs.remove(self.stdin)
else:
self.stdin.write(input[:4096])
input = input[4096:]
if self.stderr in tr:
data = self.stderr.read(4096)
if not data:
rfs.remove(self.stderr)
else:
stderrdata += data
if self.stdout in tr:
data = self.stdout.read(4096)
if not data:
rfs.remove(self.stdout)
else:
stdoutdata += data
if seo:
return (stdoutdata, stdoutdata)
else:
return (stdoutdata, stderrdata)
def _close(self):
"""close all fds and do other cleanup actions"""
for fd in self._fds:
try:
os.close(fd)
except:
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from preprocessing import cifarnet_preprocessing
from preprocessing import inception_preprocessing
from preprocessing import lenet_preprocessing
from preprocessing import vgg_preprocessing
from preprocessing import mobilenet_preprocessing
from preprocessing import mobilenetdet_preprocessing
slim = tf.contrib.slim
def get_preprocessing(name, is_training=False):
"""Returns preprocessing_fn(image, height, width, **kwargs).
Args:
name: The name of the preprocessing function.
is_training: `True` if the model is being used for training and `False`
otherwise.
Returns:
preprocessing_fn: A function that preprocessing a single image (pre-batch).
It has the following signature:
image = preprocessing_fn(image, output_height, output_width, ...).
Raises:
ValueError: If Preprocessing `name` is not recognized.
"""
preprocessing_fn_map = {
'cifarnet': cifarnet_preprocessing,
'inception': inception_preprocessing,
'inception_v1': inception_preprocessing,
'inception_v2': inception_preprocessing,
'inception_v3': inception_preprocessing,
'inception_v4': inception_preprocessing,
'inception_resnet_v2': inception_preprocessing,
'lenet': lenet_preprocessing,
'resnet_v1_50': vgg_preprocessing,
'resnet_v1_101': vgg_preprocessing,
'resnet_v1_152': vgg_preprocessing,
'resnet_v2_50': vgg_preprocessing,
'resnet_v2_101': vgg_preprocessing,
'resnet_v2_152': vgg_preprocessing,
'vgg': vgg_preprocessing,
'vgg_a': vgg_preprocessing,
'vgg_16': vgg_preprocessing,
'vgg_19': vgg_preprocessing,
'mobilenet': mobilenet_preprocessing,
'mobilenetdet': mobilenetdet_preprocessing
}
if name not in preprocessing_fn_map:
raise ValueError('Preprocessing name [%s] was not recognized' % name)
def preprocessing_fn(image, output_height, output_width, **kwargs):
return preprocessing_fn_map[name].preprocess_image(
image, output_height, output_width, is_training=is_training, **kwargs)
return preprocessing_fn
|
from __future__ import annotations
import asyncio
import datetime
from abc import ABC, abstractmethod
from collections import Counter
from pathlib import Path
from typing import Set, TYPE_CHECKING, Any, List, Mapping, MutableMapping, Optional, Tuple, Union
import aiohttp
import discord
import lavalink
from redbot.core import Config, commands
from redbot.core.bot import Red
from redbot.core.commands import Context
from redbot.core.utils.dbtools import APSWConnectionWrapper
if TYPE_CHECKING:
from ..apis.interface import AudioAPIInterface
from ..apis.playlist_interface import Playlist
from ..apis.playlist_wrapper import PlaylistWrapper
from ..audio_dataclasses import LocalPath, Query
from ..equalizer import Equalizer
from ..manager import ServerManager
class MixinMeta(ABC):
"""Base class for well behaved type hint detection with composite class.
Basically, to keep developers sane when not all attributes are defined in each mixin.
"""
bot: Red
config: Config
api_interface: Optional["AudioAPIInterface"]
player_manager: Optional["ServerManager"]
playlist_api: Optional["PlaylistWrapper"]
local_folder_current_path: Optional[Path]
db_conn: Optional[APSWConnectionWrapper]
session: aiohttp.ClientSession
skip_votes: MutableMapping[discord.Guild, List[discord.Member]]
play_lock: MutableMapping[int, bool]
_daily_playlist_cache: MutableMapping[int, bool]
_daily_global_playlist_cache: MutableMapping[int, bool]
_persist_queue_cache: MutableMapping[int, bool]
_dj_status_cache: MutableMapping[int, Optional[bool]]
_dj_role_cache: MutableMapping[int, Optional[int]]
_error_timer: MutableMapping[int, float]
_disconnected_players: MutableMapping[int, bool]
global_api_user: MutableMapping[str, Any]
cog_cleaned_up: bool
lavalink_connection_aborted: bool
_error_counter: Counter
lavalink_connect_task: Optional[asyncio.Task]
_restore_task: Optional[asyncio.Task]
player_automated_timer_task: Optional[asyncio.Task]
cog_init_task: Optional[asyncio.Task]
cog_ready_event: asyncio.Event
_default_lavalink_settings: Mapping
permission_cache = discord.Permissions
_last_ll_update: datetime.datetime
_ll_guild_updates: Set[int]
@abstractmethod
async def command_llsetup(self, ctx: commands.Context):
raise NotImplementedError()
@abstractmethod
async def maybe_reset_error_counter(self, player: lavalink.Player) -> None:
raise NotImplementedError()
@abstractmethod
async def update_bot_presence(self, track: lavalink.Track, playing_servers: int) -> None:
raise NotImplementedError()
@abstractmethod
async def get_active_player_count(self) -> Tuple[str, int]:
raise NotImplementedError()
@abstractmethod
async def increase_error_counter(self, player: lavalink.Player) -> bool:
raise NotImplementedError()
@abstractmethod
async def _close_database(self) -> None:
raise NotImplementedError()
@abstractmethod
async def maybe_run_pending_db_tasks(self, ctx: commands.Context) -> None:
raise NotImplementedError()
@abstractmethod
def update_player_lock(self, ctx: commands.Context, true_or_false: bool) -> None:
raise NotImplementedError()
@abstractmethod
async def initialize(self) -> None:
raise NotImplementedError()
@abstractmethod
async def data_schema_migration(self, from_version: int, to_version: int) -> None:
raise NotImplementedError()
@abstractmethod
def lavalink_restart_connect(self) -> None:
raise NotImplementedError()
@abstractmethod
async def lavalink_attempt_connect(self, timeout: int = 50) -> None:
raise NotImplementedError()
@abstractmethod
async def player_automated_timer(self) -> None:
raise NotImplementedError()
@abstractmethod
async def lavalink_event_handler(
self, player: lavalink.Player, event_type: lavalink.LavalinkEvents, extra
) -> None:
raise NotImplementedError()
@abstractmethod
async def lavalink_update_handler(
self, player: lavalink.Player, event_type: lavalink.enums.PlayerState, extra
) -> None:
raise NotImplementedError()
@abstractmethod
async def _clear_react(
self, message: discord.Message, emoji: MutableMapping = None
) -> asyncio.Task:
raise NotImplementedError()
@abstractmethod
async def remove_react(
self,
message: discord.Message,
react_emoji: Union[discord.Emoji, discord.Reaction, discord.PartialEmoji, str],
react_user: discord.abc.User,
) -> None:
raise NotImplementedError()
@abstractmethod
async def command_equalizer(self, ctx: commands.Context):
raise NotImplementedError()
@abstractmethod
async def _eq_msg_clear(self, eq_message: discord.Message) -> None:
raise NotImplementedError()
@abstractmethod
def _player_check(self, ctx: commands.Context) -> bool:
raise NotImplementedError()
@abstractmethod
async def maybe_charge_requester(self, ctx: commands.Context, jukebox_price: int) -> bool:
raise NotImplementedError()
@abstractmethod
async def _can_instaskip(self, ctx: commands.Context, member: discord.Member) -> bool:
raise NotImplementedError()
@abstractmethod
async def command_search(self, ctx: commands.Context, *, query: str):
raise NotImplementedError()
@abstractmethod
async def is_query_allowed(
self,
config: Config,
ctx_or_channel: Optional[Union[Context, discord.TextChannel]],
query: str,
query_obj: Query,
) -> bool:
raise NotImplementedError()
@abstractmethod
def is_track_length_allowed(self, track: Union[lavalink.Track, int], maxlength: int) -> bool:
raise NotImplementedError()
@abstractmethod
async def get_track_description(
self,
track: Union[lavalink.rest_api.Track, "Query"],
local_folder_current_path: Path,
shorten: bool = False,
) -> Optional[str]:
raise NotImplementedError()
@abstractmethod
async def get_track_description_unformatted(
self, track: Union[lavalink.rest_api.Track, "Query"], local_folder_current_path: Path
) -> Optional[str]:
raise NotImplementedError()
@abstractmethod
def humanize_scope(
self, scope: str, ctx: Union[discord.Guild, discord.abc.User, str] = None, the: bool = None
) -> Optional[str]:
raise NotImplementedError()
@abstractmethod
async def draw_time(self, ctx) -> str:
raise NotImplementedError()
@abstractmethod
def rsetattr(self, obj, attr, val) -> None:
raise NotImplementedError()
@abstractmethod
def rgetattr(self, obj, attr, *args) -> Any:
raise NotImplementedError()
@abstractmethod
async def _check_api_tokens(self) -> MutableMapping:
raise NotImplementedError()
@abstractmethod
async def send_embed_msg(
self, ctx: commands.Context, author: Mapping[str, str] = None, **kwargs
) -> discord.Message:
raise NotImplementedError()
@abstractmethod
async def update_external_status(self) -> bool:
raise NotImplementedError()
@abstractmethod
def get_track_json(
self,
player: lavalink.Player,
position: Union[int, str] = None,
other_track: lavalink.Track = None,
) -> MutableMapping:
raise NotImplementedError()
@abstractmethod
def track_to_json(self, track: lavalink.Track) -> MutableMapping:
raise NotImplementedError()
@abstractmethod
def time_convert(self, length: Union[int, str]) -> int:
raise NotImplementedError()
@abstractmethod
async def queue_duration(self, ctx: commands.Context) -> int:
raise NotImplementedError()
@abstractmethod
async def track_remaining_duration(self, ctx: commands.Context) -> int:
raise NotImplementedError()
@abstractmethod
def get_time_string(self, seconds: int) -> str:
raise NotImplementedError()
@abstractmethod
async def set_player_settings(self, ctx: commands.Context) -> None:
raise NotImplementedError()
@abstractmethod
async def get_playlist_match(
self,
context: commands.Context,
matches: MutableMapping,
scope: str,
author: discord.User,
guild: discord.Guild,
specified_user: bool = False,
) -> Tuple[Optional["Playlist"], str, str]:
raise NotImplementedError()
@abstractmethod
async def is_requester_alone(self, ctx: commands.Context) -> bool:
raise NotImplementedError()
@abstractmethod
async def is_requester(self, ctx: commands.Context, member: discord.Member) -> bool:
raise NotImplementedError()
@abstractmethod
async def _skip_action(self, ctx: commands.Context, skip_to_track: int = None) -> None:
raise NotImplementedError()
@abstractmethod
def is_vc_full(self, channel: discord.VoiceChannel) -> bool:
raise NotImplementedError()
@abstractmethod
async def _has_dj_role(self, ctx: commands.Context, member: discord.Member) -> bool:
raise NotImplementedError()
@abstractmethod
def match_url(self, url: str) -> bool:
raise NotImplementedError()
@abstractmethod
async def _playlist_check(self, ctx: commands.Context) -> bool:
raise NotImplementedError()
@abstractmethod
async def can_manage_playlist(
self, scope: str, playlist: "Playlist", ctx: commands.Context, user, guild
) -> bool:
raise NotImplementedError()
@abstractmethod
async def _maybe_update_playlist(
self, ctx: commands.Context, player: lavalink.player_manager.Player, playlist: "Playlist"
) -> Tuple[List[lavalink.Track], List[lavalink.Track], "Playlist"]:
raise NotImplementedError()
@abstractmethod
def is_url_allowed(self, url: str) -> bool:
raise NotImplementedError()
@abstractmethod
async def _eq_check(self, ctx: commands.Context, player: lavalink.Player) -> None:
raise NotImplementedError()
@abstractmethod
async def _enqueue_tracks(
self, ctx: commands.Context, query: Union["Query", list], enqueue: bool = True
) -> Union[discord.Message, List[lavalink.Track], lavalink.Track]:
raise NotImplementedError()
@abstractmethod
async def _eq_interact(
self,
ctx: commands.Context,
player: lavalink.Player,
eq: "Equalizer",
message: discord.Message,
selected: int,
) -> None:
raise NotImplementedError()
@abstractmethod
async def _apply_gains(self, guild_id: int, gains: List[float]) -> None:
NotImplementedError()
@abstractmethod
async def _apply_gain(self, guild_id: int, band: int, gain: float) -> None:
raise NotImplementedError()
@abstractmethod
async def _get_spotify_tracks(
self, ctx: commands.Context, query: "Query", forced: bool = False
) -> Union[discord.Message, List[lavalink.Track], lavalink.Track]:
raise NotImplementedError()
@abstractmethod
async def _genre_search_button_action(
self, ctx: commands.Context, options: List, emoji: str, page: int, playlist: bool = False
) -> str:
raise NotImplementedError()
@abstractmethod
async def _build_genre_search_page(
self,
ctx: commands.Context,
tracks: List,
page_num: int,
title: str,
playlist: bool = False,
) -> discord.Embed:
raise NotImplementedError()
@abstractmethod
async def command_audioset_autoplay_toggle(self, ctx: commands.Context):
raise NotImplementedError()
@abstractmethod
async def _search_button_action(
self, ctx: commands.Context, tracks: List, emoji: str, page: int
):
raise NotImplementedError()
@abstractmethod
async def get_localtrack_folder_tracks(
self, ctx, player: lavalink.player_manager.Player, query: "Query"
) -> List[lavalink.rest_api.Track]:
raise NotImplementedError()
@abstractmethod
async def get_localtrack_folder_list(
self, ctx: commands.Context, query: "Query"
) -> List["Query"]:
raise NotImplementedError()
@abstractmethod
async def _local_play_all(
self, ctx: commands.Context, query: "Query", from_search: bool = False
) -> None:
raise NotImplementedError()
@abstractmethod
async def _build_search_page(
self, ctx: commands.Context, tracks: List, page_num: int
) -> discord.Embed:
raise NotImplementedError()
@abstractmethod
async def command_play(self, ctx: commands.Context, *, query: str):
raise NotImplementedError()
@abstractmethod
async def localtracks_folder_exists(self, ctx: commands.Context) -> bool:
raise NotImplementedError()
@abstractmethod
async def get_localtracks_folders(
self, ctx: commands.Context, search_subfolders: bool = False
) -> List[Union[Path, "LocalPath"]]:
raise NotImplementedError()
@abstractmethod
async def _build_local_search_list(
self, to_search: List["Query"], search_words: str
) -> List[str]:
raise NotImplementedError()
@abstractmethod
async def command_stop(self, ctx: commands.Context):
raise NotImplementedError()
@abstractmethod
async def _build_queue_page(
self,
ctx: commands.Context,
queue: list,
player: lavalink.player_manager.Player,
page_num: int,
) -> discord.Embed:
raise NotImplementedError()
@abstractmethod
async def command_pause(self, ctx: commands.Context):
raise NotImplementedError()
@abstractmethod
async def _build_queue_search_list(
self, queue_list: List[lavalink.Track], search_words: str
) -> List[Tuple[int, str]]:
raise NotImplementedError()
@abstractmethod
async def _build_queue_search_page(
self, ctx: commands.Context, page_num: int, search_list: List[Tuple[int, str]]
) -> discord.Embed:
raise NotImplementedError()
@abstractmethod
async def fetch_playlist_tracks(
self,
ctx: commands.Context,
player: lavalink.player_manager.Player,
query: "Query",
skip_cache: bool = False,
) -> Union[discord.Message, None, List[MutableMapping]]:
raise NotImplementedError()
@abstractmethod
async def _build_playlist_list_page(
self, ctx: commands.Context, page_num: int, abc_names: List, scope: Optional[str]
) -> discord.Embed:
raise NotImplementedError()
@abstractmethod
def match_yt_playlist(self, url: str) -> bool:
raise NotImplementedError()
@abstractmethod
async def _load_v3_playlist(
self,
ctx: commands.Context,
scope: str,
uploaded_playlist_name: str,
uploaded_playlist_url: str,
track_list: List,
author: Union[discord.User, discord.Member],
guild: Union[discord.Guild],
) -> None:
raise NotImplementedError()
@abstractmethod
async def _load_v2_playlist(
self,
ctx: commands.Context,
uploaded_track_list,
player: lavalink.player_manager.Player,
playlist_url: str,
uploaded_playlist_name: str,
scope: str,
author: Union[discord.User, discord.Member],
guild: Union[discord.Guild],
):
raise NotImplementedError()
@abstractmethod
def format_time(self, time: int) -> str:
raise NotImplementedError()
@abstractmethod
async def get_lyrics_status(self, ctx: Context) -> bool:
raise NotImplementedError()
@abstractmethod
async def restore_players(self) -> bool:
raise NotImplementedError()
@abstractmethod
async def command_skip(self, ctx: commands.Context, skip_to_track: int = None):
raise NotImplementedError()
@abstractmethod
async def command_prev(self, ctx: commands.Context):
raise NotImplementedError()
@abstractmethod
async def icyparser(self, url: str) -> Optional[str]:
raise NotImplementedError()
async def self_deafen(self, player: lavalink.Player) -> None:
raise NotImplementedError()
|
import itertools
from datetime import timedelta
import numpy as np
import pandas as pd
import pytest
from pandas import to_datetime as dt
from pandas.util.testing import assert_frame_equal
from arctic.multi_index import groupby_asof, fancy_group_by, insert_at
from tests.util import multi_index_df_from_arrs
def get_bitemporal_test_data():
# Create an index of 8 sample dates, 2 rows per date
sample_dates = pd.date_range('1/1/2014', periods=4, freq='D')
sample_dates = pd.DatetimeIndex(data=sorted(itertools.chain(sample_dates, sample_dates)))
# Create a list of insert dates. These are a year later than sample date, to show
# that they don't necessarily have to be related
insert_dates = pd.date_range('1/1/2015', periods=8, freq='D')
# Build the bitemporal index
index = pd.MultiIndex.from_arrays([sample_dates, insert_dates], names=['sample_dt', 'observed_dt'])
# Create the dataframe with a couple of column, each value incrementing by 0.1 on the successive updates so
# we can tell them apart
prices = [[1.0, 10.0],
[1.1, 10.1],
[2.0, 20.0],
[2.1, 20.1],
[3.0, 30.0],
[3.1, 30.1],
[4.0, 40.0],
[4.1, 40.1]]
df = pd.DataFrame(prices, index=index, columns=['OPEN', 'CLOSE'])
# OPEN CLOSE
# sample_dt observed_dt
# 2014-01-01 2015-01-01 1.0 10.0
# 2015-01-02 1.1 10.1
# 2014-01-02 2015-01-03 2.0 20.0
# 2015-01-04 2.1 20.1
# 2014-01-03 2015-01-05 3.0 30.0
# 2015-01-06 3.1 30.1
# 2014-01-04 2015-01-07 4.0 40.0
# 2015-01-08 4.1 40.1
return df
def test__can_create_df_with_multiple_index():
""" I can make a Pandas DF with an multi-index on sampled_dt and observed_dt
"""
df = get_bitemporal_test_data()
# Our index has 2 levels
assert df.index.names == ['sample_dt', 'observed_dt']
assert all(df.columns == ['OPEN', 'CLOSE'])
# We should have 8 rows
assert len(df) == 8
# .. or 4, when we only count sample date
assert len(df.groupby(level='sample_dt').sum()) == 4
def test__get_ts__asof_latest():
""" I can get the latest known value for each sample date
"""
df = groupby_asof(get_bitemporal_test_data())
assert len(df) == 4
assert all(df['OPEN'] == [1.1, 2.1, 3.1, 4.1])
assert all(df['CLOSE'] == [10.1, 20.1, 30.1, 40.1])
def test__get_ts__asof_datetime():
""" I can get a timeseries as-of a particular point in time
"""
df = groupby_asof(get_bitemporal_test_data(), as_of=dt('2015-01-05'))
assert len(df) == 3
assert all(df['OPEN'] == [1.1, 2.1, 3.0])
assert all(df['CLOSE'] == [10.1, 20.1, 30.0])
def test__get_ts__unsorted_index():
""" I can get a timeseries as-of a date when the index isn't sorted properly
"""
df = get_bitemporal_test_data()
# Swap the third and fourth rows around. This would break the group-by if we didn't check
# for sortedness
df = df.reindex(df.index[[0, 1, 3, 2, 4, 5, 6, 7]])
df = groupby_asof(df)
assert len(df) == 4
assert all(df['OPEN'] == [1.1, 2.1, 3.1, 4.1])
assert all(df['CLOSE'] == [10.1, 20.1, 30.1, 40.1])
def test_fancy_group_by_multi_index():
ts = multi_index_df_from_arrs(
index_headers=('index 1', 'index 2', 'observed_dt'),
index_arrs=[
[
'2012-09-08 17:06:11.040',
'2012-09-08 17:06:11.040',
'2012-10-08 17:06:11.040',
'2012-10-08 17:06:11.040',
'2012-10-08 17:06:11.040',
'2012-10-09 17:06:11.040',
'2012-10-09 17:06:11.040',
'2012-11-08 17:06:11.040',
],
['SPAM Index', 'EGG Index', 'SPAM Index', 'SPAM Index'] + ['EGG Index', 'SPAM Index'] * 2,
['2015-01-01'] * 3 + ['2015-01-05'] + ['2015-01-01'] * 4
],
data_dict={'near': [1.0, 1.6, 2.0, 4.2, 2.1, 2.5, 2.6, 3.0]}
)
expected_ts= multi_index_df_from_arrs(
index_headers=('index 1', 'index 2'),
index_arrs=[
[
'2012-09-08 17:06:11.040',
'2012-09-08 17:06:11.040',
'2012-10-08 17:06:11.040',
'2012-10-08 17:06:11.040',
'2012-10-09 17:06:11.040',
'2012-10-09 17:06:11.040',
'2012-11-08 17:06:11.040',
],
['EGG Index', 'SPAM Index'] * 3 + ['SPAM Index']
],
data_dict={'near': [1.6, 1.0, 2.1, 4.2, 2.6, 2.5, 3.0]}
)
assert_frame_equal(expected_ts, groupby_asof(ts, dt_col=['index 1', 'index 2'], asof_col='observed_dt'))
# --------- Min/Max using numeric index ----------- #
def get_numeric_index_test_data():
group_idx = sorted(4 * list(range(4)))
agg_idx = list(range(16))
prices = np.arange(32).reshape(16, 2) * 10
df = pd.DataFrame(prices, index=[group_idx, agg_idx], columns=['OPEN', 'CLOSE'])
# OPEN CLOSE
# 0 0 0 10
# 1 20 30
# 2 40 50
# 3 60 70
# 1 4 80 90
# 5 100 110
# 6 120 130
# 7 140 150
# 2 8 160 170
# 9 180 190
# 10 200 210
# 11 220 230
# 3 12 240 250
# 13 260 270
# 14 280 290
# 15 300 310
return df
def test__minmax_last():
df = get_numeric_index_test_data()
df = fancy_group_by(df, min_=3, max_=10, method='last')
assert all(df['OPEN'] == [60, 140, 200])
assert all(df['CLOSE'] == [70, 150, 210])
def test__minmax_first():
df = get_numeric_index_test_data()
df = fancy_group_by(df, min_=3, max_=10, method='first')
assert all(df['OPEN'] == [60, 80, 160])
assert all(df['CLOSE'] == [70, 90, 170])
def test__within_numeric_first():
df = get_numeric_index_test_data()
df = fancy_group_by(df, within=5, method='first')
assert all(df['OPEN'] == [0, 80])
assert all(df['CLOSE'] == [10, 90])
def test__within_numeric_last():
df = get_numeric_index_test_data()
df = fancy_group_by(df, within=5, method='last')
assert all(df['OPEN'] == [60, 120])
assert all(df['CLOSE'] == [70, 130])
# --------- Datetime index ----------- #
def get_datetime_index_test_data():
sample_dates = pd.DatetimeIndex(4 * [dt('1/1/2014 21:30')] +
4 * [dt('2/1/2014 21:30')] +
4 * [dt('3/1/2014 21:30')])
observed_dates = [dt('1/1/2014 22:00'), dt('1/1/2014 22:30'), dt('2/1/2014 00:00'), dt('1/1/2015 21:30'),
dt('2/1/2014 23:00'), dt('2/1/2014 23:30'), dt('3/1/2014 00:00'), dt('2/1/2015 21:30'),
dt('3/1/2014 21:30'), dt('3/1/2014 22:30'), dt('4/1/2014 00:00'), dt('3/1/2015 21:30'),
]
index = pd.MultiIndex.from_arrays([sample_dates, observed_dates], names=['sample_dt', 'observed_dt'])
prices = np.arange(24).reshape(12, 2) * 10
df = pd.DataFrame(prices, index=index, columns=['OPEN', 'CLOSE'])
# OPEN CLOSE
# sample_dt observed_dt
# 2014-01-01 21:30:00 2014-01-01 22:00:00 0 10
# 2014-01-01 22:30:00 20 30
# 2014-02-01 00:00:00 40 50
# 2015-01-01 21:30:00 60 70
# 2014-02-01 21:30:00 2014-02-01 23:00:00 80 90
# 2014-02-01 23:30:00 100 110
# 2014-03-01 00:00:00 120 130
# 2015-02-01 21:30:00 140 150
# 2014-03-01 21:30:00 2014-03-01 21:30:00 160 170
# 2014-03-01 22:30:00 180 190
# 2014-04-01 00:00:00 200 210
# 2015-03-01 21:30:00 220 230
return df
def test__first_within_datetime():
''' This shows the groupby function can give you a timeseries of points that were observed
within a rolling window of the sample time.
This is like saying 'give me the timeseries as it was on the day'.
It usually makes sense I think for the window to be the same as the sample period.
'''
df = get_datetime_index_test_data()
df = fancy_group_by(df, within=timedelta(hours=1), method='first')
assert all(df['OPEN'] == [0, 160])
assert all(df['CLOSE'] == [10, 170])
def test__last_within_datetime():
''' Last-observed variant of the above.
'''
df = get_datetime_index_test_data()
df = fancy_group_by(df, within=timedelta(hours=1), method='last')
assert all(df['OPEN'] == [20, 180])
assert all(df['CLOSE'] == [30, 190])
# ----------------------- Row Insertion ---------------------------- #
def test__can_insert_row():
""" I can insert a new row into a bitemp ts and it comes back when selecting the latest data
"""
df = get_bitemporal_test_data()
df = insert_at(df, dt('2014-01-03'), [[9, 90]])
assert len(df) == 9
df = groupby_asof(df)
assert len(df) == 4
assert df.loc[dt('2014-01-03')]['OPEN'] == 9
assert df.loc[dt('2014-01-03')]['CLOSE'] == 90
def test__can_append_row():
""" I can append a new row to a bitemp ts and it comes back when selecting the latest data
"""
df = get_bitemporal_test_data()
df = insert_at(df, dt('2014-01-05'), [[9, 90]])
assert len(df) == 9
df = groupby_asof(df)
assert len(df) == 5
assert df.loc[dt('2014-01-05')]['OPEN'] == 9
assert df.loc[dt('2014-01-05')]['CLOSE'] == 90
def test_fancy_group_by_raises():
with pytest.raises(ValueError):
assert(fancy_group_by(None, method=None))
|
import datetime
import time
# 当前时间,可用于mysql datetime
def now_datetime_string():
return datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")
def now_datetime():
return datetime.datetime.now()
def now_date_string():
return datetime.datetime.now().strftime("%Y-%m-%d")
def now_timestamp():
return time.time()
if __name__ == '__main__':
print(now_datetime())
print(now_timestamp())
print(now_date_string())
|
from collections import deque
import logging
from aiohttp.web import Response
from homeassistant.components.http import HomeAssistantView
from homeassistant.components.zwave import DEVICE_CONFIG_SCHEMA_ENTRY, const
from homeassistant.const import HTTP_ACCEPTED, HTTP_BAD_REQUEST, HTTP_NOT_FOUND, HTTP_OK
import homeassistant.core as ha
import homeassistant.helpers.config_validation as cv
from . import EditKeyBasedConfigView
_LOGGER = logging.getLogger(__name__)
CONFIG_PATH = "zwave_device_config.yaml"
OZW_LOG_FILENAME = "OZW_Log.txt"
async def async_setup(hass):
"""Set up the Z-Wave config API."""
hass.http.register_view(
EditKeyBasedConfigView(
"zwave",
"device_config",
CONFIG_PATH,
cv.entity_id,
DEVICE_CONFIG_SCHEMA_ENTRY,
)
)
hass.http.register_view(ZWaveNodeValueView)
hass.http.register_view(ZWaveNodeGroupView)
hass.http.register_view(ZWaveNodeConfigView)
hass.http.register_view(ZWaveUserCodeView)
hass.http.register_view(ZWaveLogView)
hass.http.register_view(ZWaveConfigWriteView)
hass.http.register_view(ZWaveProtectionView)
return True
class ZWaveLogView(HomeAssistantView):
"""View to read the ZWave log file."""
url = "/api/zwave/ozwlog"
name = "api:zwave:ozwlog"
# pylint: disable=no-self-use
async def get(self, request):
"""Retrieve the lines from ZWave log."""
try:
lines = int(request.query.get("lines", 0))
except ValueError:
return Response(text="Invalid datetime", status=HTTP_BAD_REQUEST)
hass = request.app["hass"]
response = await hass.async_add_executor_job(self._get_log, hass, lines)
return Response(text="\n".join(response))
def _get_log(self, hass, lines):
"""Retrieve the logfile content."""
logfilepath = hass.config.path(OZW_LOG_FILENAME)
with open(logfilepath) as logfile:
data = (line.rstrip() for line in logfile)
if lines == 0:
loglines = list(data)
else:
loglines = deque(data, lines)
return loglines
class ZWaveConfigWriteView(HomeAssistantView):
"""View to save the ZWave configuration to zwcfg_xxxxx.xml."""
url = "/api/zwave/saveconfig"
name = "api:zwave:saveconfig"
@ha.callback
def post(self, request):
"""Save cache configuration to zwcfg_xxxxx.xml."""
hass = request.app["hass"]
network = hass.data.get(const.DATA_NETWORK)
if network is None:
return self.json_message("No Z-Wave network data found", HTTP_NOT_FOUND)
_LOGGER.info("Z-Wave configuration written to file")
network.write_config()
return self.json_message("Z-Wave configuration saved to file", HTTP_OK)
class ZWaveNodeValueView(HomeAssistantView):
"""View to return the node values."""
url = r"/api/zwave/values/{node_id:\d+}"
name = "api:zwave:values"
@ha.callback
def get(self, request, node_id):
"""Retrieve groups of node."""
nodeid = int(node_id)
hass = request.app["hass"]
values_list = hass.data[const.DATA_ENTITY_VALUES]
values_data = {}
# Return a list of values for this node that are used as a
# primary value for an entity
for entity_values in values_list:
if entity_values.primary.node.node_id != nodeid:
continue
values_data[entity_values.primary.value_id] = {
"label": entity_values.primary.label,
"index": entity_values.primary.index,
"instance": entity_values.primary.instance,
"poll_intensity": entity_values.primary.poll_intensity,
}
return self.json(values_data)
class ZWaveNodeGroupView(HomeAssistantView):
"""View to return the nodes group configuration."""
url = r"/api/zwave/groups/{node_id:\d+}"
name = "api:zwave:groups"
@ha.callback
def get(self, request, node_id):
"""Retrieve groups of node."""
nodeid = int(node_id)
hass = request.app["hass"]
network = hass.data.get(const.DATA_NETWORK)
node = network.nodes.get(nodeid)
if node is None:
return self.json_message("Node not found", HTTP_NOT_FOUND)
groupdata = node.groups
groups = {}
for key, value in groupdata.items():
groups[key] = {
"associations": value.associations,
"association_instances": value.associations_instances,
"label": value.label,
"max_associations": value.max_associations,
}
return self.json(groups)
class ZWaveNodeConfigView(HomeAssistantView):
"""View to return the nodes configuration options."""
url = r"/api/zwave/config/{node_id:\d+}"
name = "api:zwave:config"
@ha.callback
def get(self, request, node_id):
"""Retrieve configurations of node."""
nodeid = int(node_id)
hass = request.app["hass"]
network = hass.data.get(const.DATA_NETWORK)
node = network.nodes.get(nodeid)
if node is None:
return self.json_message("Node not found", HTTP_NOT_FOUND)
config = {}
for value in node.get_values(
class_id=const.COMMAND_CLASS_CONFIGURATION
).values():
config[value.index] = {
"label": value.label,
"type": value.type,
"help": value.help,
"data_items": value.data_items,
"data": value.data,
"max": value.max,
"min": value.min,
}
return self.json(config)
class ZWaveUserCodeView(HomeAssistantView):
"""View to return the nodes usercode configuration."""
url = r"/api/zwave/usercodes/{node_id:\d+}"
name = "api:zwave:usercodes"
@ha.callback
def get(self, request, node_id):
"""Retrieve usercodes of node."""
nodeid = int(node_id)
hass = request.app["hass"]
network = hass.data.get(const.DATA_NETWORK)
node = network.nodes.get(nodeid)
if node is None:
return self.json_message("Node not found", HTTP_NOT_FOUND)
usercodes = {}
if not node.has_command_class(const.COMMAND_CLASS_USER_CODE):
return self.json(usercodes)
for value in node.get_values(class_id=const.COMMAND_CLASS_USER_CODE).values():
if value.genre != const.GENRE_USER:
continue
usercodes[value.index] = {
"code": value.data,
"label": value.label,
"length": len(value.data),
}
return self.json(usercodes)
class ZWaveProtectionView(HomeAssistantView):
"""View for the protection commandclass of a node."""
url = r"/api/zwave/protection/{node_id:\d+}"
name = "api:zwave:protection"
async def get(self, request, node_id):
"""Retrieve the protection commandclass options of node."""
nodeid = int(node_id)
hass = request.app["hass"]
network = hass.data.get(const.DATA_NETWORK)
def _fetch_protection():
"""Get protection data."""
node = network.nodes.get(nodeid)
if node is None:
return self.json_message("Node not found", HTTP_NOT_FOUND)
protection_options = {}
if not node.has_command_class(const.COMMAND_CLASS_PROTECTION):
return self.json(protection_options)
protections = node.get_protections()
protection_options = {
"value_id": "{:d}".format(list(protections)[0]),
"selected": node.get_protection_item(list(protections)[0]),
"options": node.get_protection_items(list(protections)[0]),
}
return self.json(protection_options)
return await hass.async_add_executor_job(_fetch_protection)
async def post(self, request, node_id):
"""Change the selected option in protection commandclass."""
nodeid = int(node_id)
hass = request.app["hass"]
network = hass.data.get(const.DATA_NETWORK)
protection_data = await request.json()
def _set_protection():
"""Set protection data."""
node = network.nodes.get(nodeid)
selection = protection_data["selection"]
value_id = int(protection_data[const.ATTR_VALUE_ID])
if node is None:
return self.json_message("Node not found", HTTP_NOT_FOUND)
if not node.has_command_class(const.COMMAND_CLASS_PROTECTION):
return self.json_message(
"No protection commandclass on this node", HTTP_NOT_FOUND
)
state = node.set_protection(value_id, selection)
if not state:
return self.json_message(
"Protection setting did not complete", HTTP_ACCEPTED
)
return self.json_message("Protection setting succsessfully set", HTTP_OK)
return await hass.async_add_executor_job(_set_protection)
|
import datetime
import logging
import sys
from uuid import uuid4
from builtins import str
from influxdb import InfluxDBClient
from .decoder import Decoder
from ...common.interfaces import AbstractPlugin, \
MonitoringDataListener, AggregateResultListener
logger = logging.getLogger(__name__) # pylint: disable=C0103
def chop(data_list, chunk_size):
if sys.getsizeof(str(data_list)) <= chunk_size:
return [data_list]
elif len(data_list) == 1:
logger.warning("Too large piece of Telegraf data. Might experience upload problems.")
return [data_list]
else:
mid = len(data_list) / 2
return chop(data_list[:mid], chunk_size) + chop(data_list[mid:], chunk_size)
class Plugin(AbstractPlugin, AggregateResultListener,
MonitoringDataListener):
SECTION = 'influx'
def __init__(self, core, cfg, name):
AbstractPlugin.__init__(self, core, cfg, name)
self.tank_tag = self.get_option("tank_tag")
self.prefix_measurement = self.get_option("prefix_measurement")
self._client = None
self.start_time = None
self.end_time = None
self.decoder = Decoder(
self.tank_tag,
str(uuid4()),
self.get_option("custom_tags"),
self.get_option("labeled"),
self.get_option("histograms"),
)
@property
def client(self):
if not self._client:
self._client = InfluxDBClient(
self.get_option("address"),
self.get_option("port"),
ssl=self.get_option("ssl"),
verify_ssl=self.get_option("verify_ssl"),
path=self.get_option("path"),
username=self.get_option("username"),
password=self.get_option("password"),
database=self.get_option("database"),
)
return self._client
def prepare_test(self):
self.core.job.subscribe_plugin(self)
def start_test(self):
self.start_time = datetime.datetime.now()
def end_test(self, retcode):
self.end_time = datetime.datetime.now() + datetime.timedelta(minutes=1)
return retcode
def on_aggregated_data(self, data, stats):
self.client.write_points(
self.decoder.decode_aggregates(data, stats, self.prefix_measurement),
's'
)
def monitoring_data(self, data_list):
if len(data_list) > 0:
[
self._send_monitoring(chunk)
for chunk in chop(data_list, self.get_option("chunk_size"))
]
def _send_monitoring(self, data):
self.client.write_points(
self.decoder.decode_monitoring(data),
's'
)
def set_uuid(self, id_):
self.decoder.tags['uuid'] = id_
|
from lemur.plugins.bases import DestinationPlugin
class TestDestinationPlugin(DestinationPlugin):
title = "Test"
slug = "test-destination"
description = "Enables testing"
author = "Kevin Glisson"
author_url = "https://github.com/netflix/lemur.git"
def __init__(self, *args, **kwargs):
super(TestDestinationPlugin, self).__init__(*args, **kwargs)
def upload(self, name, body, private_key, cert_chain, options, **kwargs):
return
|
import os
from django.conf import settings
from django.test import SimpleTestCase
from django.test.utils import override_settings
from weblate.utils.backup import backup, get_paper_key, initialize, prune
from weblate.utils.data import data_dir
from weblate.utils.tasks import database_backup, settings_backup
from weblate.utils.unittest import tempdir_setting
class BackupTest(SimpleTestCase):
@tempdir_setting("DATA_DIR")
def test_settings_backup(self):
settings_backup()
filename = data_dir("backups", "settings-expanded.py")
with open(filename) as handle:
self.assertIn(settings.DATA_DIR, handle.read())
@tempdir_setting("DATA_DIR")
@tempdir_setting("BACKUP_DIR")
def test_backup(self):
initialize(settings.BACKUP_DIR, "key")
output = get_paper_key(settings.BACKUP_DIR)
self.assertIn("BORG PAPER KEY", output)
output = backup(settings.BACKUP_DIR, "key")
self.assertIn("Creating archive", output)
output = prune(settings.BACKUP_DIR, "key")
self.assertIn("Keeping archive", output)
@tempdir_setting("DATA_DIR")
def test_database_backup(self):
database_backup()
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql":
self.assertTrue(
os.path.exists(
os.path.join(settings.DATA_DIR, "backups", "database.sql")
)
)
@tempdir_setting("DATA_DIR")
@override_settings(DATABASE_BACKUP="compressed")
def test_database_backup_compress(self):
database_backup()
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql":
self.assertTrue(
os.path.exists(
os.path.join(settings.DATA_DIR, "backups", "database.sql.gz")
)
)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
import mock
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import sample
from perfkitbenchmarker.configs import benchmark_config_spec
import six
from six.moves import range
_BENCHMARK_NAME = 'test_benchmark'
_BENCHMARK_UID = 'uid'
class SamplesTestMixin(object):
"""A mixin for unittest.TestCase that adds a type-specific equality
predicate for samples.
"""
def __init__(self, *args, **kwargs):
super(SamplesTestMixin, self).__init__(self, *args, **kwargs)
self.addTypeEqualityFunc(sample.Sample, self.assertSamplesEqual)
def assertSamplesEqualUpToTimestamp(self, a, b, msg=None):
"""Assert that two samples are equal, ignoring timestamp differences."""
self.assertEqual(a.metric, b.metric,
msg or 'Samples %s and %s have different metrics' % (a, b))
self.assertEqual(a.value, b.value,
msg or 'Samples %s and %s have different values' % (a, b))
self.assertEqual(a.unit, b.unit,
msg or 'Samples %s and %s have different units' % (a, b))
self.assertDictEqual(a.metadata, b.metadata, msg or
'Samples %s and %s have different metadata' % (a, b))
# Deliberately don't compare the timestamp fields of the samples.
def assertSampleListsEqualUpToTimestamp(self, a, b, msg=None):
"""Compare two lists of samples.
Sadly, the builtin assertListsEqual will only use Python's
built-in equality predicate for testing the equality of elements
in a list. Since we compare lists of samples a lot, we need a
custom test for that.
"""
self.assertEqual(len(a), len(b),
msg or 'Lists %s and %s are not the same length' % (a, b))
for i in range(len(a)):
self.assertIsInstance(a[i], sample.Sample,
msg or ('%s (item %s in list) is '
'not a sample.Sample object' %
(a[i], i)))
self.assertIsInstance(b[i], sample.Sample,
msg or ('%s (item %s in list) is '
'not a sample.Sample object' %
(b[i], i)))
try:
self.assertSamplesEqualUpToTimestamp(a[i], b[i], msg=msg)
except self.failureException as ex:
ex.message = str(ex) + (' (was item %s in list)' % i)
ex.args = (ex.message,)
raise ex
def assertDiskMounts(benchmark_config, mount_point):
"""Test whether a disk mounts in a given configuration.
Sets up a virtual machine following benchmark_config and then tests
whether the path mount_point contains a working disk by trying to
create a file there. Returns nothing if file creation works;
otherwise raises an exception.
Args:
benchmark_config: a dict in the format of
benchmark_spec.BenchmarkSpec. The config must specify exactly
one virtual machine.
mount_point: a path, represented as a string.
Raises:
RemoteCommandError if it cannot create a file at mount_point and
verify that the file exists.
AssertionError if benchmark_config does not specify exactly one
virtual machine.
"""
assert len(benchmark_config['vm_groups']) == 1
vm_group = next(six.itervalues(benchmark_config['vm_groups']))
assert vm_group.get('num_vms', 1) == 1
m = mock.MagicMock()
m.BENCHMARK_NAME = _BENCHMARK_NAME
config_spec = benchmark_config_spec.BenchmarkConfigSpec(
_BENCHMARK_NAME, flag_values=flags.FLAGS, **benchmark_config)
spec = benchmark_spec.BenchmarkSpec(
m, config_spec, _BENCHMARK_UID)
with spec.RedirectGlobalFlags():
try:
spec.ConstructVirtualMachines()
spec.Provision()
vm = spec.vms[0]
test_file_path = os.path.join(mount_point, 'test_file')
vm.RemoteCommand('touch %s' % test_file_path)
# This will raise RemoteCommandError if the test file does not
# exist.
vm.RemoteCommand('test -e %s' % test_file_path)
finally:
spec.Delete()
|
from queue import Queue, Empty
import time
class ExpVar(object):
"""
This class stores variables
"""
def __init__(self):
self.variables = {}
def publish(self, name, var):
if name in self.variables:
raise RuntimeError(
"'%s' variable have been already published before" % name)
self.variables[name] = var
return var
def get(self, name):
if name not in self.variables:
raise RuntimeError("No such variable: %s", name)
return self.variables[name]
def get_dict(self):
return {k: v.get() for k, v in self.variables.items()}
class Var(object):
"""
This class stores generic variable value.
It is also a base class for other variable types
"""
def __init__(self, value=None):
self.value = value
def set(self, value):
self.value = value
def get(self):
return self.value
def __str__(self):
return str(self.value)
class Int(Var):
def __init__(self, value=0):
if not isinstance(value, int):
raise ValueError(
"Value should be an integer, but it is '%s'" % type(value))
super(Int, self).__init__(value)
def inc(self, delta=1):
self.value += delta
class Metric(object):
"""
This class stores generic time-series data in a queue.
Values are stored as (timestamp, value) tuples
"""
def __init__(self):
self.metric = Queue()
def push(self, value, timestamp=None):
if timestamp is None:
timestamp = int(time.time())
elif not isinstance(timestamp, int):
raise ValueError(
"Timestamp should be an integer, but it is '%s'" %
type(timestamp))
self.metric.put((timestamp, value))
def __next__(self):
try:
return self.metric.get_nowait()
except Empty:
raise StopIteration
def get(self):
# TODO: decide what we should return here
return None
def __iter__(self):
return self
EV = ExpVar()
def publish(name, var):
return EV.publish(name, var)
def get(name):
return EV.get(name)
def get_dict():
return EV.get_dict()
|
import collections
import socket
from typing import AbstractSet
from typing import Any
from typing import Collection
from typing import DefaultDict
from typing import Dict
from typing import FrozenSet
from typing import Iterable
from typing import List
from typing import Mapping
from typing import MutableMapping
from typing import Optional
from typing import Sequence
from typing import Set
from typing import Tuple
from typing import Union
import requests
from kubernetes.client import V1Pod
from mypy_extensions import TypedDict
from paasta_tools import marathon_tools
from paasta_tools.utils import get_user_agent
class EnvoyBackend(TypedDict, total=False):
address: str
port_value: int
hostname: str
eds_health_status: str
weight: int
has_associated_task: bool
def are_services_up_in_pod(
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
registrations: Collection[str],
pod_ip: str,
pod_port: int,
) -> bool:
"""Returns whether a service in a k8s pod is reachable via envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param registrations: The service_name.instance_name of the services
:param pod_ip: IP of the pod itself
:param pod_port: The port to reach the service in the pod
"""
for registration in registrations:
backends_per_registration = get_backends(
registration,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
healthy_backends = [
backend
for backend in backends_per_registration.get(registration, [])
if backend[0]["address"] == pod_ip
and backend[0]["port_value"] == pod_port
and backend[0]["eds_health_status"] == "HEALTHY"
]
if not healthy_backends:
return False
return True
def retrieve_envoy_clusters(
envoy_host: str, envoy_admin_port: int, envoy_admin_endpoint_format: str
) -> Dict[str, Any]:
envoy_uri = envoy_admin_endpoint_format.format(
host=envoy_host, port=envoy_admin_port, endpoint="clusters?format=json"
)
# timeout after 1 second and retry 3 times
envoy_admin_request = requests.Session()
envoy_admin_request.headers.update({"User-Agent": get_user_agent()})
envoy_admin_request.mount("http://", requests.adapters.HTTPAdapter(max_retries=3))
envoy_admin_request.mount("https://", requests.adapters.HTTPAdapter(max_retries=3))
envoy_admin_response = envoy_admin_request.get(envoy_uri, timeout=1)
return envoy_admin_response.json()
def get_casper_endpoints(
clusters_info: Mapping[str, Any]
) -> FrozenSet[Tuple[str, int]]:
"""Filters out and returns casper endpoints from Envoy clusters."""
casper_endpoints: Set[Tuple[str, int]] = set()
for cluster_status in clusters_info["cluster_statuses"]:
if "host_statuses" in cluster_status:
if cluster_status["name"].startswith("spectre.") and cluster_status[
"name"
].endswith(".egress_cluster"):
for host_status in cluster_status["host_statuses"]:
casper_endpoints.add(
(
host_status["address"]["socket_address"]["address"],
host_status["address"]["socket_address"]["port_value"],
)
)
return frozenset(casper_endpoints)
def get_backends(
service: str,
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
) -> Dict[str, List[Tuple[EnvoyBackend, bool]]]:
"""Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param service: If None, return backends for all services, otherwise only return backends for this particular
service.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
"""
if service:
services = [service]
else:
services = None
return get_multiple_backends(
services,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
def get_multiple_backends(
services: Optional[Sequence[str]],
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
) -> Dict[str, List[Tuple[EnvoyBackend, bool]]]:
"""Fetches JSON from Envoy admin's /clusters endpoint and returns a list of backends.
:param services: If None, return backends for all services, otherwise only return backends for these particular
services.
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port that Envoy's admin interface is listening on
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns backends: A list of dicts representing the backends of all
services or the requested service
"""
clusters_info = retrieve_envoy_clusters(
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
casper_endpoints = get_casper_endpoints(clusters_info)
backends: DefaultDict[
str, List[Tuple[EnvoyBackend, bool]]
] = collections.defaultdict(list)
for cluster_status in clusters_info["cluster_statuses"]:
if "host_statuses" in cluster_status:
if cluster_status["name"].endswith(".egress_cluster"):
service_name = cluster_status["name"][: -len(".egress_cluster")]
if services is None or service_name in services:
cluster_backends = []
casper_endpoint_found = False
for host_status in cluster_status["host_statuses"]:
address = host_status["address"]["socket_address"]["address"]
port_value = host_status["address"]["socket_address"][
"port_value"
]
# Check if this endpoint is actually a casper backend
# If so, omit from the service's list of backends
if not service_name.startswith("spectre."):
if (address, port_value) in casper_endpoints:
casper_endpoint_found = True
continue
try:
hostname = socket.gethostbyaddr(address)[0].split(".")[0]
except socket.herror:
# Default to the raw IP address if we can't lookup the hostname
hostname = address
cluster_backends.append(
(
EnvoyBackend(
address=address,
port_value=port_value,
hostname=hostname,
eds_health_status=host_status["health_status"][
"eds_health_status"
],
weight=host_status["weight"],
),
casper_endpoint_found,
)
)
backends[service_name] += cluster_backends
return backends
def match_backends_and_pods(
backends: Iterable[EnvoyBackend], pods: Iterable[V1Pod],
) -> List[Tuple[Optional[EnvoyBackend], Optional[V1Pod]]]:
"""Returns tuples of matching (backend, pod) pairs, as matched by IP. Each backend will be listed exactly
once. If a backend does not match with a pod, (backend, None) will be included.
If a pod's IP does not match with any backends, (None, pod) will be included.
:param backends: An iterable of Envoy backend dictionaries, e.g. the list returned by
envoy_tools.get_multiple_backends.
:param pods: A list of pods
"""
# { ip : [backend1, backend2], ... }
backends_by_ip: DefaultDict[str, List[EnvoyBackend]] = collections.defaultdict(list)
backend_pod_pairs = []
for backend in backends:
ip = backend["address"]
backends_by_ip[ip].append(backend)
for pod in pods:
ip = pod.status.pod_ip
for backend in backends_by_ip.pop(ip, [None]):
backend_pod_pairs.append((backend, pod))
# we've been popping in the above loop, so anything left didn't match a k8s pod.
for backends in backends_by_ip.values():
for backend in backends:
backend_pod_pairs.append((backend, None))
return backend_pod_pairs
def match_backends_and_tasks(
backends: Iterable[EnvoyBackend], tasks: Iterable[marathon_tools.MarathonTask]
) -> List[Tuple[Optional[EnvoyBackend], Optional[marathon_tools.MarathonTask]]]:
"""Returns tuples of matching (backend, task) pairs, as matched by IP and port. Each backend will be listed exactly
once, and each task will be listed once per port. If a backend does not match with a task, (backend, None) will
be included. If a task's port does not match with any backends, (None, task) will be included.
:param backends: An iterable of Envoy backend dictionaries, e.g. the list returned by
envoy_tools.get_multiple_backends.
:param tasks: An iterable of MarathonTask objects.
"""
# { (ip, port) : [backend1, backend2], ... }
backends_by_ip_port: DefaultDict[
Tuple[str, int], List[EnvoyBackend]
] = collections.defaultdict(list)
backend_task_pairs = []
for backend in backends:
ip = backend["address"]
port = backend["port_value"]
backends_by_ip_port[ip, port].append(backend)
for task in tasks:
ip = socket.gethostbyname(task.host)
for port in task.ports:
for backend in backends_by_ip_port.pop((ip, port), [None]):
backend_task_pairs.append((backend, task))
# we've been popping in the above loop, so anything left didn't match a marathon task.
for backends in backends_by_ip_port.values():
for backend in backends:
backend_task_pairs.append((backend, None))
return backend_task_pairs
def build_envoy_location_dict(
location: str,
matched_envoy_backends_and_tasks: Sequence[
Tuple[
Optional[EnvoyBackend], Optional[Union[marathon_tools.MarathonTask, V1Pod]]
]
],
should_return_individual_backends: bool,
casper_proxied_backends: AbstractSet[Tuple[str, int]],
) -> MutableMapping[str, Any]:
running_backends_count = 0
envoy_backends = []
is_proxied_through_casper = False
for backend, task in matched_envoy_backends_and_tasks:
if backend is None:
continue
if backend["eds_health_status"] == "HEALTHY":
running_backends_count += 1
if should_return_individual_backends:
backend["has_associated_task"] = task is not None
envoy_backends.append(backend)
if (backend["address"], backend["port_value"]) in casper_proxied_backends:
is_proxied_through_casper = True
return {
"name": location,
"running_backends_count": running_backends_count,
"backends": envoy_backends,
"is_proxied_through_casper": is_proxied_through_casper,
}
def get_replication_for_all_services(
envoy_host: str, envoy_admin_port: int, envoy_admin_endpoint_format: str,
) -> Dict[str, int]:
"""Returns the replication level for all services known to this Envoy
:param envoy_host: The host that this check should contact for replication information.
:param envoy_admin_port: The port number that this check should contact for replication information.
:param envoy_admin_endpoint_format: The format of Envoy's admin endpoint
:returns available_instance_counts: A dictionary mapping the service names
to an integer number of available replicas.
"""
backends = get_multiple_backends(
services=None,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
return collections.Counter(
[
service_name
for service_name, service_backends in backends.items()
for b in service_backends
if backend_is_up(b[0])
]
)
def backend_is_up(backend: EnvoyBackend) -> bool:
return backend["eds_health_status"] == "HEALTHY"
|
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.device_tracker import PLATFORM_SCHEMA, SOURCE_TYPES
from homeassistant.const import CONF_DEVICES, STATE_HOME, STATE_NOT_HOME
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from . import CONF_QOS
_LOGGER = logging.getLogger(__name__)
CONF_PAYLOAD_HOME = "payload_home"
CONF_PAYLOAD_NOT_HOME = "payload_not_home"
CONF_SOURCE_TYPE = "source_type"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(mqtt.SCHEMA_BASE).extend(
{
vol.Required(CONF_DEVICES): {cv.string: mqtt.valid_subscribe_topic},
vol.Optional(CONF_PAYLOAD_HOME, default=STATE_HOME): cv.string,
vol.Optional(CONF_PAYLOAD_NOT_HOME, default=STATE_NOT_HOME): cv.string,
vol.Optional(CONF_SOURCE_TYPE): vol.In(SOURCE_TYPES),
}
)
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up the MQTT tracker."""
devices = config[CONF_DEVICES]
qos = config[CONF_QOS]
payload_home = config[CONF_PAYLOAD_HOME]
payload_not_home = config[CONF_PAYLOAD_NOT_HOME]
source_type = config.get(CONF_SOURCE_TYPE)
for dev_id, topic in devices.items():
@callback
def async_message_received(msg, dev_id=dev_id):
"""Handle received MQTT message."""
if msg.payload == payload_home:
location_name = STATE_HOME
elif msg.payload == payload_not_home:
location_name = STATE_NOT_HOME
else:
location_name = msg.payload
see_args = {"dev_id": dev_id, "location_name": location_name}
if source_type:
see_args["source_type"] = source_type
hass.async_create_task(async_see(**see_args))
await mqtt.async_subscribe(hass, topic, async_message_received, qos)
return True
|
import tensorflow as tf
def trotter_prepare_gates(H, step_size, num_sites, euclidean):
"""Prepare gates using 1st-order trotter decomposition.
Currently only implemented for nearest-neighbor Hamiltonians.
Args:
H: List of Hamiltonian terms. Should be length num_sites-1.
step_size: The trotter step size (a scalar).
num_sites: The total number of sites in the system (an integer).
euclidean: Whether the evolution is euclidean, or not (boolean).
Returns:
layers: A list of layers, with each layer a list of gates, one for each
site, or `None` if no gate is applied to that site in the layer.
"""
if not len(H) == num_sites - 1:
raise ValueError("Number of H terms must match number of sites - 1.")
step_size = tf.cast(step_size, tf.float64) # must be real
step_size = tf.cast(step_size, H[0].dtype)
if euclidean:
step_size = -1.0 * step_size
else:
step_size = 1.j * step_size
eH = []
for h in H:
if len(h.shape) != 4:
raise ValueError("H must be nearest-neighbor.")
h_shp = tf.shape(h)
h_r = tf.reshape(h, (h_shp[0] * h_shp[1], h_shp[2] * h_shp[3]))
eh_r = tf.linalg.expm(step_size * h_r)
eH.append(tf.reshape(eh_r, h_shp))
eh_even = [None] * num_sites
eh_odd = [None] * num_sites
for (n, eh) in enumerate(eH):
if n % 2 == 0:
eh_even[n] = eh
else:
eh_odd[n] = eh
return [eh_even, eh_odd]
|
import asyncio
from datetime import timedelta
from typing import Awaitable, Callable
from aioguardian import Client
from aioguardian.errors import GuardianError
from homeassistant.core import HomeAssistant
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import LOGGER
DEFAULT_UPDATE_INTERVAL = timedelta(seconds=30)
class GuardianDataUpdateCoordinator(DataUpdateCoordinator[dict]):
"""Define an extended DataUpdateCoordinator with some Guardian goodies."""
def __init__(
self,
hass: HomeAssistant,
*,
client: Client,
api_name: str,
api_coro: Callable[..., Awaitable],
api_lock: asyncio.Lock,
valve_controller_uid: str,
):
"""Initialize."""
super().__init__(
hass,
LOGGER,
name=f"{valve_controller_uid}_{api_name}",
update_interval=DEFAULT_UPDATE_INTERVAL,
)
self._api_coro = api_coro
self._api_lock = api_lock
self._client = client
async def _async_update_data(self) -> dict:
"""Execute a "locked" API request against the valve controller."""
async with self._api_lock, self._client:
try:
resp = await self._api_coro()
except GuardianError as err:
raise UpdateFailed(err) from err
return resp["data"]
|
import logging
from pathlib import Path
from typing import Optional
from homematicip.aio.device import AsyncSwitchMeasuring
from homematicip.aio.group import AsyncHeatingGroup
from homematicip.aio.home import AsyncHome
from homematicip.base.helpers import handle_config
import voluptuous as vol
from homeassistant.const import ATTR_ENTITY_ID
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.config_validation import comp_entity_ids
from homeassistant.helpers.service import (
async_register_admin_service,
verify_domain_control,
)
from homeassistant.helpers.typing import HomeAssistantType, ServiceCallType
from .const import DOMAIN as HMIPC_DOMAIN
_LOGGER = logging.getLogger(__name__)
ATTR_ACCESSPOINT_ID = "accesspoint_id"
ATTR_ANONYMIZE = "anonymize"
ATTR_CLIMATE_PROFILE_INDEX = "climate_profile_index"
ATTR_CONFIG_OUTPUT_FILE_PREFIX = "config_output_file_prefix"
ATTR_CONFIG_OUTPUT_PATH = "config_output_path"
ATTR_DURATION = "duration"
ATTR_ENDTIME = "endtime"
ATTR_TEMPERATURE = "temperature"
DEFAULT_CONFIG_FILE_PREFIX = "hmip-config"
SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION = "activate_eco_mode_with_duration"
SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD = "activate_eco_mode_with_period"
SERVICE_ACTIVATE_VACATION = "activate_vacation"
SERVICE_DEACTIVATE_ECO_MODE = "deactivate_eco_mode"
SERVICE_DEACTIVATE_VACATION = "deactivate_vacation"
SERVICE_DUMP_HAP_CONFIG = "dump_hap_config"
SERVICE_RESET_ENERGY_COUNTER = "reset_energy_counter"
SERVICE_SET_ACTIVE_CLIMATE_PROFILE = "set_active_climate_profile"
HMIPC_SERVICES = [
SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION,
SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD,
SERVICE_ACTIVATE_VACATION,
SERVICE_DEACTIVATE_ECO_MODE,
SERVICE_DEACTIVATE_VACATION,
SERVICE_DUMP_HAP_CONFIG,
SERVICE_RESET_ENERGY_COUNTER,
SERVICE_SET_ACTIVE_CLIMATE_PROFILE,
]
SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION = vol.Schema(
{
vol.Required(ATTR_DURATION): cv.positive_int,
vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24)),
}
)
SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD = vol.Schema(
{
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24)),
}
)
SCHEMA_ACTIVATE_VACATION = vol.Schema(
{
vol.Required(ATTR_ENDTIME): cv.datetime,
vol.Required(ATTR_TEMPERATURE, default=18.0): vol.All(
vol.Coerce(float), vol.Range(min=0, max=55)
),
vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24)),
}
)
SCHEMA_DEACTIVATE_ECO_MODE = vol.Schema(
{vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24))}
)
SCHEMA_DEACTIVATE_VACATION = vol.Schema(
{vol.Optional(ATTR_ACCESSPOINT_ID): vol.All(str, vol.Length(min=24, max=24))}
)
SCHEMA_SET_ACTIVE_CLIMATE_PROFILE = vol.Schema(
{
vol.Required(ATTR_ENTITY_ID): comp_entity_ids,
vol.Required(ATTR_CLIMATE_PROFILE_INDEX): cv.positive_int,
}
)
SCHEMA_DUMP_HAP_CONFIG = vol.Schema(
{
vol.Optional(ATTR_CONFIG_OUTPUT_PATH): cv.string,
vol.Optional(
ATTR_CONFIG_OUTPUT_FILE_PREFIX, default=DEFAULT_CONFIG_FILE_PREFIX
): cv.string,
vol.Optional(ATTR_ANONYMIZE, default=True): cv.boolean,
}
)
SCHEMA_RESET_ENERGY_COUNTER = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): comp_entity_ids}
)
async def async_setup_services(hass: HomeAssistantType) -> None:
"""Set up the HomematicIP Cloud services."""
if hass.services.async_services().get(HMIPC_DOMAIN):
return
@verify_domain_control(hass, HMIPC_DOMAIN)
async def async_call_hmipc_service(service: ServiceCallType):
"""Call correct HomematicIP Cloud service."""
service_name = service.service
if service_name == SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION:
await _async_activate_eco_mode_with_duration(hass, service)
elif service_name == SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD:
await _async_activate_eco_mode_with_period(hass, service)
elif service_name == SERVICE_ACTIVATE_VACATION:
await _async_activate_vacation(hass, service)
elif service_name == SERVICE_DEACTIVATE_ECO_MODE:
await _async_deactivate_eco_mode(hass, service)
elif service_name == SERVICE_DEACTIVATE_VACATION:
await _async_deactivate_vacation(hass, service)
elif service_name == SERVICE_DUMP_HAP_CONFIG:
await _async_dump_hap_config(hass, service)
elif service_name == SERVICE_RESET_ENERGY_COUNTER:
await _async_reset_energy_counter(hass, service)
elif service_name == SERVICE_SET_ACTIVE_CLIMATE_PROFILE:
await _set_active_climate_profile(hass, service)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_ACTIVATE_ECO_MODE_WITH_DURATION,
service_func=async_call_hmipc_service,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_DURATION,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_ACTIVATE_ECO_MODE_WITH_PERIOD,
service_func=async_call_hmipc_service,
schema=SCHEMA_ACTIVATE_ECO_MODE_WITH_PERIOD,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_ACTIVATE_VACATION,
service_func=async_call_hmipc_service,
schema=SCHEMA_ACTIVATE_VACATION,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_DEACTIVATE_ECO_MODE,
service_func=async_call_hmipc_service,
schema=SCHEMA_DEACTIVATE_ECO_MODE,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_DEACTIVATE_VACATION,
service_func=async_call_hmipc_service,
schema=SCHEMA_DEACTIVATE_VACATION,
)
hass.services.async_register(
domain=HMIPC_DOMAIN,
service=SERVICE_SET_ACTIVE_CLIMATE_PROFILE,
service_func=async_call_hmipc_service,
schema=SCHEMA_SET_ACTIVE_CLIMATE_PROFILE,
)
async_register_admin_service(
hass=hass,
domain=HMIPC_DOMAIN,
service=SERVICE_DUMP_HAP_CONFIG,
service_func=async_call_hmipc_service,
schema=SCHEMA_DUMP_HAP_CONFIG,
)
async_register_admin_service(
hass=hass,
domain=HMIPC_DOMAIN,
service=SERVICE_RESET_ENERGY_COUNTER,
service_func=async_call_hmipc_service,
schema=SCHEMA_RESET_ENERGY_COUNTER,
)
async def async_unload_services(hass: HomeAssistantType):
"""Unload HomematicIP Cloud services."""
if hass.data[HMIPC_DOMAIN]:
return
for hmipc_service in HMIPC_SERVICES:
hass.services.async_remove(domain=HMIPC_DOMAIN, service=hmipc_service)
async def _async_activate_eco_mode_with_duration(
hass: HomeAssistantType, service: ServiceCallType
) -> None:
"""Service to activate eco mode with duration."""
duration = service.data[ATTR_DURATION]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hass, hapid)
if home:
await home.activate_absence_with_duration(duration)
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.activate_absence_with_duration(duration)
async def _async_activate_eco_mode_with_period(
hass: HomeAssistantType, service: ServiceCallType
) -> None:
"""Service to activate eco mode with period."""
endtime = service.data[ATTR_ENDTIME]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hass, hapid)
if home:
await home.activate_absence_with_period(endtime)
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.activate_absence_with_period(endtime)
async def _async_activate_vacation(
hass: HomeAssistantType, service: ServiceCallType
) -> None:
"""Service to activate vacation."""
endtime = service.data[ATTR_ENDTIME]
temperature = service.data[ATTR_TEMPERATURE]
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hass, hapid)
if home:
await home.activate_vacation(endtime, temperature)
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.activate_vacation(endtime, temperature)
async def _async_deactivate_eco_mode(
hass: HomeAssistantType, service: ServiceCallType
) -> None:
"""Service to deactivate eco mode."""
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hass, hapid)
if home:
await home.deactivate_absence()
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.deactivate_absence()
async def _async_deactivate_vacation(
hass: HomeAssistantType, service: ServiceCallType
) -> None:
"""Service to deactivate vacation."""
hapid = service.data.get(ATTR_ACCESSPOINT_ID)
if hapid:
home = _get_home(hass, hapid)
if home:
await home.deactivate_vacation()
else:
for hap in hass.data[HMIPC_DOMAIN].values():
await hap.home.deactivate_vacation()
async def _set_active_climate_profile(
hass: HomeAssistantType, service: ServiceCallType
) -> None:
"""Service to set the active climate profile."""
entity_id_list = service.data[ATTR_ENTITY_ID]
climate_profile_index = service.data[ATTR_CLIMATE_PROFILE_INDEX] - 1
for hap in hass.data[HMIPC_DOMAIN].values():
if entity_id_list != "all":
for entity_id in entity_id_list:
group = hap.hmip_device_by_entity_id.get(entity_id)
if group and isinstance(group, AsyncHeatingGroup):
await group.set_active_profile(climate_profile_index)
else:
for group in hap.home.groups:
if isinstance(group, AsyncHeatingGroup):
await group.set_active_profile(climate_profile_index)
async def _async_dump_hap_config(
hass: HomeAssistantType, service: ServiceCallType
) -> None:
"""Service to dump the configuration of a Homematic IP Access Point."""
config_path = service.data.get(ATTR_CONFIG_OUTPUT_PATH) or hass.config.config_dir
config_file_prefix = service.data[ATTR_CONFIG_OUTPUT_FILE_PREFIX]
anonymize = service.data[ATTR_ANONYMIZE]
for hap in hass.data[HMIPC_DOMAIN].values():
hap_sgtin = hap.config_entry.unique_id
if anonymize:
hap_sgtin = hap_sgtin[-4:]
file_name = f"{config_file_prefix}_{hap_sgtin}.json"
path = Path(config_path)
config_file = path / file_name
json_state = await hap.home.download_configuration()
json_state = handle_config(json_state, anonymize)
config_file.write_text(json_state, encoding="utf8")
async def _async_reset_energy_counter(
hass: HomeAssistantType, service: ServiceCallType
):
"""Service to reset the energy counter."""
entity_id_list = service.data[ATTR_ENTITY_ID]
for hap in hass.data[HMIPC_DOMAIN].values():
if entity_id_list != "all":
for entity_id in entity_id_list:
device = hap.hmip_device_by_entity_id.get(entity_id)
if device and isinstance(device, AsyncSwitchMeasuring):
await device.reset_energy_counter()
else:
for device in hap.home.devices:
if isinstance(device, AsyncSwitchMeasuring):
await device.reset_energy_counter()
def _get_home(hass: HomeAssistantType, hapid: str) -> Optional[AsyncHome]:
"""Return a HmIP home."""
hap = hass.data[HMIPC_DOMAIN].get(hapid)
if hap:
return hap.home
_LOGGER.info("No matching access point found for access point id %s", hapid)
return None
|
import numpy
import sys
from typing import (Iterator,
Tuple,
Mapping,
Union,
Iterable,
List,
Any)
if sys.version_info >= (3, 8):
from typing import TypedDict, Protocol, Literal
else:
from typing_extensions import TypedDict, Protocol, Literal
RecordDict = Mapping[str, Any]
RecordID = Union[int, str]
Record = Tuple[RecordID, RecordDict]
RecordPair = Tuple[Record, Record]
RecordPairs = Iterator[RecordPair]
Blocks = Iterator[List[RecordPair]]
Cluster = Tuple[Tuple[RecordID, ...], Union[numpy.ndarray, Tuple]]
Clusters = Iterable[Cluster]
Data = Mapping[RecordID, RecordDict]
TrainingExample = Tuple[RecordDict, RecordDict]
Links = Iterable[Union[numpy.ndarray,
Tuple[Tuple[RecordID, RecordID], float]]]
LookupResults = Iterable[Tuple[RecordID, Tuple[Tuple[RecordID, float], ...]]]
JoinConstraint = Literal['one-to-one', 'many-to-one', 'many-to-many']
class TrainingData(TypedDict):
match: List[TrainingExample]
distinct: List[TrainingExample]
class Classifier(Protocol):
def fit(self, X: object, y: object) -> None:
...
def predict_proba(self, X: object) -> Any:
...
|
import os
from threading import Event
import pytest as pytest
from yandextank.common.util import get_test_path
from yandextank.aggregator import TankAggregator
from yandextank.common.util import FileMultiReader
from yandextank.plugins.Phantom.reader import PhantomReader
class PhantomMock(object):
def __init__(self, phout):
self.phout_filename = phout
self.reader = None
self.finished = Event()
def get_reader(self):
if self.reader is None:
self.reader = PhantomReader(FileMultiReader(self.phout_filename, self.finished).get_file())
return self.reader
def get_stats_reader(self):
return (i for i in [])
def end_test(self, retcode):
return retcode
class ListenerMock(object):
def __init__(self, expected):
self.collected_data = []
self.cnt = 0
self.avg = 0
def on_aggregated_data(self, data, stats):
rps = data['counted_rps']
self.cnt += 1
self.avg = (self.avg * (self.cnt - 1) + rps) / self.cnt
@pytest.mark.parametrize('phout, expected_rps', [
('yandextank/aggregator/tests/phout1', 300)
])
def test_agregator(phout, expected_rps):
generator = PhantomMock(os.path.join(get_test_path(), phout))
aggregator = TankAggregator(generator)
listener = ListenerMock(expected_rps)
aggregator.add_result_listener(listener)
aggregator.start_test(poll_period=0)
generator.finished.set()
while not aggregator.is_aggr_finished():
aggregator.is_test_finished()
aggregator.end_test(1)
assert abs(listener.avg - expected_rps) < 0.1 * expected_rps
|
import json
from flask import Blueprint, request, abort
from jinja2 import Template
from app import app
from app.agents.models import Bot
from app.commons import build_response
from app.endpoint.utils import SilentUndefined
from app.endpoint.utils import call_api
from app.endpoint.utils import get_synonyms
from app.endpoint.utils import split_sentence
from app.intents.models import Intent
from app.nlu.classifiers.starspace_intent_classifier import \
EmbeddingIntentClassifier
from app.nlu.entity_extractor import EntityExtractor
from app.nlu.tasks import model_updated_signal
endpoint = Blueprint('api', __name__, url_prefix='/api')
sentence_classifier = None
synonyms = None
entity_extraction = None
# Request Handler
@endpoint.route('/v1', methods=['POST'])
def api():
"""
Endpoint to converse with chatbot.
Chat context is maintained by exchanging the payload between client and bot.
sample input/output payload =>
{
"currentNode": "",
"complete": false,
"parameters": [],
"extractedParameters": {},
"missingParameters": [],
"intent": {
},
"context": {},
"input": "hello",
"speechResponse": [
]
}
:param json:
:return json:
"""
request_json = request.get_json(silent=True)
result_json = request_json
if request_json:
context = {"context": request_json["context"]}
if app.config["DEFAULT_WELCOME_INTENT_NAME"] in request_json.get(
"input"):
intent = Intent.objects(
intentId=app.config["DEFAULT_WELCOME_INTENT_NAME"]).first()
result_json["complete"] = True
result_json["intent"]["object_id"] = str(intent.id)
result_json["intent"]["id"] = intent.intentId
result_json["input"] = request_json.get("input")
template = Template(
intent.speechResponse,
undefined=SilentUndefined)
result_json["speechResponse"] = split_sentence(template.render(**context))
app.logger.info(request_json.get("input"), extra=result_json)
return build_response.build_json(result_json)
# check if input method is event or raw text
elif request_json.get("event"):
intent_id = request_json.get("event")
confidence = 1
result_json["event"]=None
else:
intent_id, confidence, suggestions = predict(request_json.get("input"))
app.logger.info("intent_id => %s" % intent_id)
intent = Intent.objects.get(intentId=intent_id)
if intent.parameters:
parameters = intent.parameters
result_json["extractedParameters"] = request_json.get("extractedParameters") or {}
else:
parameters = []
if ((request_json.get("complete") is None) or (request_json.get("complete") is True)):
result_json["intent"] = {
"object_id": str(intent.id),
"confidence": confidence,
"id": intent.intentId
}
if parameters:
# Extract NER entities
result_json["extractedParameters"].update(entity_extraction.predict(
intent_id, request_json.get("input")))
missing_parameters = []
result_json["missingParameters"] = []
result_json["parameters"] = []
for parameter in parameters:
result_json["parameters"].append({
"name": parameter.name,
"type": parameter.type,
"required": parameter.required
})
if parameter.required:
if parameter.name not in result_json["extractedParameters"].keys():
result_json["missingParameters"].append(
parameter.name)
missing_parameters.append(parameter)
if missing_parameters:
result_json["complete"] = False
current_node = missing_parameters[0]
result_json["currentNode"] = current_node["name"]
result_json["speechResponse"] = split_sentence(current_node["prompt"])
else:
result_json["complete"] = True
context["parameters"] = result_json["extractedParameters"]
else:
result_json["complete"] = True
elif request_json.get("complete") is False:
if "cancel" not in intent.name:
intent_id = request_json["intent"]["id"]
app.logger.info(intent_id)
intent = Intent.objects.get(intentId=intent_id)
extracted_parameter = entity_extraction.replace_synonyms({
request_json.get("currentNode"): request_json.get("input")
})
# replace synonyms for entity values
result_json["extractedParameters"].update(extracted_parameter)
result_json["missingParameters"].remove(
request_json.get("currentNode"))
if len(result_json["missingParameters"]) == 0:
result_json["complete"] = True
context = {"parameters": result_json["extractedParameters"],
"context": request_json["context"]}
else:
missing_parameter = result_json["missingParameters"][0]
result_json["complete"] = False
current_node = [
node for node in intent.parameters if missing_parameter in node.name][0]
result_json["currentNode"] = current_node.name
result_json["speechResponse"] = split_sentence(current_node.prompt)
else:
result_json["currentNode"] = None
result_json["missingParameters"] = []
result_json["parameters"] = {}
result_json["intent"] = {}
result_json["complete"] = True
if result_json["complete"]:
if intent.apiTrigger:
isJson = False
parameters = result_json["extractedParameters"]
headers = intent.apiDetails.get_headers()
app.logger.info("headers %s" % headers)
url_template = Template(
intent.apiDetails.url, undefined=SilentUndefined)
rendered_url = url_template.render(**context)
if intent.apiDetails.isJson:
isJson = True
request_template = Template(
intent.apiDetails.jsonData, undefined=SilentUndefined)
parameters = json.loads(request_template.render(**context))
try:
result = call_api(rendered_url,
intent.apiDetails.requestType, headers,
parameters, isJson)
except Exception as e:
app.logger.warn("API call failed", e)
result_json["speechResponse"] = ["Service is not available. Please try again later."]
else:
context["result"] = result
template = Template(
intent.speechResponse, undefined=SilentUndefined)
result_json["speechResponse"] = split_sentence(template.render(**context))
else:
context["result"] = {}
template = Template(intent.speechResponse,
undefined=SilentUndefined)
result_json["speechResponse"] = split_sentence(template.render(**context))
app.logger.info(request_json.get("input"), extra=result_json)
return build_response.build_json(result_json)
else:
return abort(400)
def update_model(app, message, **extra):
"""
Signal hook to be called after training is completed.
Reloads ml models and synonyms.
:param app:
:param message:
:param extra:
:return:
"""
global sentence_classifier
sentence_classifier = EmbeddingIntentClassifier.load(
app.config["MODELS_DIR"], app.config["USE_WORD_VECTORS"])
synonyms = get_synonyms()
global entity_extraction
entity_extraction = EntityExtractor(synonyms)
app.logger.info("Intent Model updated")
with app.app_context():
update_model(app, "Models updated")
model_updated_signal.connect(update_model, app)
def predict(sentence):
"""
Predict Intent using Intent classifier
:param sentence:
:return:
"""
bot = Bot.objects.get(name="default")
predicted, intents = sentence_classifier.process(sentence)
app.logger.info("predicted intent %s", predicted)
if predicted["confidence"] < bot.config.get("confidence_threshold", .90):
intents = Intent.objects(intentId=app.config["DEFAULT_FALLBACK_INTENT_NAME"])
intents = intents.first().intentId
return intents, 1.0, []
else:
return predicted["intent"], predicted["confidence"], intents[1:]
|
import json
import mock
import pytest
from paasta_tools.cli.cmds.list_deploy_queue import list_deploy_queue
from paasta_tools.paastaapi import ApiException
from paasta_tools.paastaapi.models import DeployQueue
from paasta_tools.paastaapi.models import DeployQueueServiceInstance
@pytest.fixture(autouse=True)
def mock_load_system_paasta_config():
with mock.patch(
"paasta_tools.cli.cmds.list_deploy_queue.load_system_paasta_config",
autospec=True,
):
yield
@pytest.fixture(autouse=True)
def mock_list_clusters():
with mock.patch(
"paasta_tools.cli.cmds.list_deploy_queue.list_clusters", autospec=True,
) as _mock_list_clusters:
_mock_list_clusters.return_value = ["westeros-prod"]
yield
@pytest.fixture()
def mock_api():
with mock.patch(
"paasta_tools.cli.cmds.list_deploy_queue.get_paasta_oapi_client", autospec=True,
) as m:
yield m.return_value
def test_list_deploy_queue(mock_api, capfd):
args = mock.Mock(cluster="westeros-prod", json=False)
mock_api.default.deploy_queue.return_value = DeployQueue(
available_service_instances=[
DeployQueueServiceInstance(
service="service1",
instance="instance1",
watcher="watcher1",
bounce_by=1578038400.0,
wait_until=1578038400.0,
enqueue_time=1578038400.0,
bounce_start_time=1578038400.0,
failures=0,
processed_count=0,
),
],
unavailable_service_instances=[
DeployQueueServiceInstance(
service="service2",
instance="instance2",
watcher="watcher2",
bounce_by=1577952000.0,
wait_until=1577952000.0,
enqueue_time=1577952000.0,
bounce_start_time=1577952000.0,
failures=5,
processed_count=10,
),
],
)
return_value = list_deploy_queue(args)
assert return_value == 0
stdout, stderr = capfd.readouterr()
lines = stdout.split("\n")
assert args.cluster in lines[0]
assert "service1.instance1" in lines[3]
assert "service2.instance2" in lines[6]
def test_list_deploy_queue_json(mock_api, capfd):
args = mock.Mock(cluster="westeros-prod", json=True)
mock_api.default.deploy_queue.return_value = DeployQueue(
available_service_instances=[], unavailable_service_instances=[],
)
return_value = list_deploy_queue(args)
assert return_value == 0
stdout, stderr = capfd.readouterr()
assert stdout.strip() == json.dumps(
{"available_service_instances": [], "unavailable_service_instances": []}
)
def test_http_error(mock_api):
args = mock.Mock(cluster="westeros-prod")
mock_api.api_error = ApiException
mock_api.default.deploy_queue.side_effect = ApiException(
status=500, reason="Internal Server Error"
)
assert list_deploy_queue(args) == 500
|
from homeassistant.bootstrap import async_setup_component
from homeassistant.components.startca.sensor import StartcaData
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
DATA_GIGABYTES,
HTTP_NOT_FOUND,
PERCENTAGE,
)
from homeassistant.helpers.aiohttp_client import async_get_clientsession
async def test_capped_setup(hass, aioclient_mock):
"""Test the default setup."""
config = {
"platform": "startca",
"api_key": "NOTAKEY",
"total_bandwidth": 400,
"monitored_variables": [
"usage",
"usage_gb",
"limit",
"used_download",
"used_upload",
"used_total",
"grace_download",
"grace_upload",
"grace_total",
"total_download",
"total_upload",
"used_remaining",
],
}
result = (
'<?xml version="1.0" encoding="ISO-8859-15"?>'
"<usage>"
"<version>1.1</version>"
"<total> <!-- total actual usage -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</total>"
"<used> <!-- part of usage that counts against quota -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</used>"
"<grace> <!-- part of usage that is free -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</grace>"
"</usage>"
)
aioclient_mock.get(
"https://www.start.ca/support/usage/api?key=NOTAKEY", text=result
)
await async_setup_component(hass, "sensor", {"sensor": config})
await hass.async_block_till_done()
state = hass.states.get("sensor.start_ca_usage_ratio")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "76.24"
state = hass.states.get("sensor.start_ca_usage")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_data_limit")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "400"
state = hass.states.get("sensor.start_ca_used_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_used_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_used_total")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "311.43"
state = hass.states.get("sensor.start_ca_grace_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_grace_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_grace_total")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "311.43"
state = hass.states.get("sensor.start_ca_total_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_total_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_remaining")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "95.05"
async def test_unlimited_setup(hass, aioclient_mock):
"""Test the default setup."""
config = {
"platform": "startca",
"api_key": "NOTAKEY",
"total_bandwidth": 0,
"monitored_variables": [
"usage",
"usage_gb",
"limit",
"used_download",
"used_upload",
"used_total",
"grace_download",
"grace_upload",
"grace_total",
"total_download",
"total_upload",
"used_remaining",
],
}
result = (
'<?xml version="1.0" encoding="ISO-8859-15"?>'
"<usage>"
"<version>1.1</version>"
"<total> <!-- total actual usage -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</total>"
"<used> <!-- part of usage that counts against quota -->"
"<download>0</download>"
"<upload>0</upload>"
"</used>"
"<grace> <!-- part of usage that is free -->"
"<download>304946829777</download>"
"<upload>6480700153</upload>"
"</grace>"
"</usage>"
)
aioclient_mock.get(
"https://www.start.ca/support/usage/api?key=NOTAKEY", text=result
)
await async_setup_component(hass, "sensor", {"sensor": config})
await hass.async_block_till_done()
state = hass.states.get("sensor.start_ca_usage_ratio")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.state == "0"
state = hass.states.get("sensor.start_ca_usage")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "0.0"
state = hass.states.get("sensor.start_ca_data_limit")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "inf"
state = hass.states.get("sensor.start_ca_used_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "0.0"
state = hass.states.get("sensor.start_ca_used_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "0.0"
state = hass.states.get("sensor.start_ca_used_total")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "0.0"
state = hass.states.get("sensor.start_ca_grace_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_grace_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_grace_total")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "311.43"
state = hass.states.get("sensor.start_ca_total_download")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "304.95"
state = hass.states.get("sensor.start_ca_total_upload")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "6.48"
state = hass.states.get("sensor.start_ca_remaining")
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == DATA_GIGABYTES
assert state.state == "inf"
async def test_bad_return_code(hass, aioclient_mock):
"""Test handling a return code that isn't HTTP OK."""
aioclient_mock.get(
"https://www.start.ca/support/usage/api?key=NOTAKEY", status=HTTP_NOT_FOUND
)
scd = StartcaData(hass.loop, async_get_clientsession(hass), "NOTAKEY", 400)
result = await scd.async_update()
assert result is False
async def test_bad_json_decode(hass, aioclient_mock):
"""Test decoding invalid json result."""
aioclient_mock.get(
"https://www.start.ca/support/usage/api?key=NOTAKEY", text="this is not xml"
)
scd = StartcaData(hass.loop, async_get_clientsession(hass), "NOTAKEY", 400)
result = await scd.async_update()
assert result is False
|
from datetime import timedelta
import logging
import btlewrap
from btlewrap import BluetoothBackendException
from miflora import miflora_poller # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONDUCTIVITY,
CONF_FORCE_UPDATE,
CONF_MAC,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_SCAN_INTERVAL,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_ILLUMINANCE,
DEVICE_CLASS_TEMPERATURE,
EVENT_HOMEASSISTANT_START,
LIGHT_LUX,
PERCENTAGE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
from homeassistant.util.temperature import celsius_to_fahrenheit
try:
import bluepy.btle # noqa: F401 pylint: disable=unused-import
BACKEND = btlewrap.BluepyBackend
except ImportError:
BACKEND = btlewrap.GatttoolBackend
_LOGGER = logging.getLogger(__name__)
CONF_ADAPTER = "adapter"
CONF_MEDIAN = "median"
CONF_GO_UNAVAILABLE_TIMEOUT = "go_unavailable_timeout"
DEFAULT_ADAPTER = "hci0"
DEFAULT_FORCE_UPDATE = False
DEFAULT_MEDIAN = 3
DEFAULT_NAME = "Mi Flora"
DEFAULT_GO_UNAVAILABLE_TIMEOUT = timedelta(seconds=7200)
SCAN_INTERVAL = timedelta(seconds=1200)
ATTR_LAST_SUCCESSFUL_UPDATE = "last_successful_update"
# Sensor types are defined like: Name, units, icon, device_class
SENSOR_TYPES = {
"temperature": ["Temperature", TEMP_CELSIUS, None, DEVICE_CLASS_TEMPERATURE],
"light": ["Light intensity", LIGHT_LUX, None, DEVICE_CLASS_ILLUMINANCE],
"moisture": ["Moisture", PERCENTAGE, "mdi:water-percent", None],
"conductivity": ["Conductivity", CONDUCTIVITY, "mdi:flash-circle", None],
"battery": ["Battery", PERCENTAGE, None, DEVICE_CLASS_BATTERY],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MEDIAN, default=DEFAULT_MEDIAN): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_ADAPTER, default=DEFAULT_ADAPTER): cv.string,
vol.Optional(
CONF_GO_UNAVAILABLE_TIMEOUT, default=DEFAULT_GO_UNAVAILABLE_TIMEOUT
): cv.time_period,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the MiFlora sensor."""
backend = BACKEND
_LOGGER.debug("Miflora is using %s backend", backend.__name__)
cache = config.get(CONF_SCAN_INTERVAL, SCAN_INTERVAL).total_seconds()
poller = miflora_poller.MiFloraPoller(
config.get(CONF_MAC),
cache_timeout=cache,
adapter=config.get(CONF_ADAPTER),
backend=backend,
)
force_update = config.get(CONF_FORCE_UPDATE)
median = config.get(CONF_MEDIAN)
go_unavailable_timeout = config.get(CONF_GO_UNAVAILABLE_TIMEOUT)
devs = []
for parameter in config[CONF_MONITORED_CONDITIONS]:
name = SENSOR_TYPES[parameter][0]
unit = (
hass.config.units.temperature_unit
if parameter == "temperature"
else SENSOR_TYPES[parameter][1]
)
icon = SENSOR_TYPES[parameter][2]
device_class = SENSOR_TYPES[parameter][3]
prefix = config.get(CONF_NAME)
if prefix:
name = f"{prefix} {name}"
devs.append(
MiFloraSensor(
poller,
parameter,
name,
unit,
icon,
device_class,
force_update,
median,
go_unavailable_timeout,
)
)
async_add_entities(devs)
class MiFloraSensor(Entity):
"""Implementing the MiFlora sensor."""
def __init__(
self,
poller,
parameter,
name,
unit,
icon,
device_class,
force_update,
median,
go_unavailable_timeout,
):
"""Initialize the sensor."""
self.poller = poller
self.parameter = parameter
self._unit = unit
self._icon = icon
self._name = name
self._state = None
self._device_class = device_class
self.data = []
self._force_update = force_update
self.go_unavailable_timeout = go_unavailable_timeout
self.last_successful_update = dt_util.utc_from_timestamp(0)
# Median is used to filter out outliers. median of 3 will filter
# single outliers, while median of 5 will filter double outliers
# Use median_count = 1 if no filtering is required.
self.median_count = median
async def async_added_to_hass(self):
"""Set initial state."""
@callback
def on_startup(_):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, on_startup)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def available(self):
"""Return True if did update since 2h."""
return self.last_successful_update > (
dt_util.utcnow() - self.go_unavailable_timeout
)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return {ATTR_LAST_SUCCESSFUL_UPDATE: self.last_successful_update}
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit
@property
def icon(self):
"""Return the icon of the sensor."""
return self._icon
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""
Update current conditions.
This uses a rolling median over 3 values to filter out outliers.
"""
try:
_LOGGER.debug("Polling data for %s", self.name)
data = self.poller.parameter_value(self.parameter)
except (OSError, BluetoothBackendException) as err:
_LOGGER.info("Polling error %s: %s", type(err).__name__, err)
return
if data is not None:
_LOGGER.debug("%s = %s", self.name, data)
if self._unit == TEMP_FAHRENHEIT:
data = celsius_to_fahrenheit(data)
self.data.append(data)
self.last_successful_update = dt_util.utcnow()
else:
_LOGGER.info("Did not receive any data from Mi Flora sensor %s", self.name)
# Remove old data from median list or set sensor value to None
# if no data is available anymore
if self.data:
self.data = self.data[1:]
else:
self._state = None
return
_LOGGER.debug("Data collected: %s", self.data)
if len(self.data) > self.median_count:
self.data = self.data[1:]
if len(self.data) == self.median_count:
median = sorted(self.data)[int((self.median_count - 1) / 2)]
_LOGGER.debug("Median is: %s", median)
self._state = median
elif self._state is None:
_LOGGER.debug("Set initial state")
self._state = self.data[0]
else:
_LOGGER.debug("Not yet enough data for median calculation")
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import functools
import logging
import getpass
import os
import sys
import traceback
import warnings
import coloredlogs
import docopt
import fs
import six
import verboselogs
from .. import __version__
from ..looters import InstaLooter, HashtagLooter, ProfileLooter, PostLooter
from ..pbar import TqdmProgressBar
from ..batch import BatchRunner, logger as batch_logger
from . import logutils
from .constants import HELP, USAGE, WARNING_ACTIONS
from .time import get_times_from_cli
from .login import login, logger as login_logger
__all__ = ["main", "logger"]
#: A `~logging.Logger` instance used within the `.cli` module.
logger = verboselogs.VerboseLogger(__name__)
@logutils.wrap_warnings(logger)
def main(argv=None, stream=None):
"""Run from the command line interface.
Arguments:
argv (list): The positional arguments to read. Defaults to
`sys.argv` to use CLI arguments.
stream (~io.IOBase): A file where to write error messages.
Leave to `None` to use the `~coloredlogs.StandardErrorHandler`
for logs, and `sys.stderr` for error messages.
Returns:
int: An error code, or 0 if the program executed successfully.
"""
_print = functools.partial(print, file=stream or sys.stderr)
# Parse command line arguments
try:
args = docopt.docopt(
HELP, argv, version='instalooter {}'.format(__version__))
except docopt.DocoptExit as de:
_print(de)
return 1
# Print usage and exit if required (docopt does not do this !)
if args['--usage']:
_print(USAGE)
return 0
# Set the loggers up with the requested logging level
level = "ERROR" if args['--quiet'] else args.get("--loglevel", "INFO")
for logger_ in (logger, login_logger, batch_logger):
coloredlogs.install(
level=int(level) if level.isdigit() else level,
stream=stream,
logger=logger_)
# Check the requested logging level
if args['-W'] not in WARNING_ACTIONS:
_print("Unknown warning action:", args['-W'])
_print(" available actions:", ', '.join(WARNING_ACTIONS))
return 1
with warnings.catch_warnings():
warnings.simplefilter(args['-W'])
try:
# Run in batch mode
if args['batch']:
# Load the batch configuration from the given file
with open(args['<batch_file>']) as batch_file:
batch_runner = BatchRunner(batch_file, args)
# Run the batch
batch_runner.run_all()
return 0
# Login if requested
if args['login']:
try:
if not args['--username']:
args['--username'] = six.moves.input('Username: ')
login(args)
return 0
except ValueError as ve:
logger.error("%s", ve)
if args["--traceback"]:
traceback.print_exc()
return 1
# Logout if requested
if args['logout']:
if InstaLooter._cachefs().exists(InstaLooter._COOKIE_FILE):
InstaLooter._logout()
logger.success('Logged out.')
else:
warnings.warn('Cookie file not found.')
return 0
# Normal download mode:
if args['user']:
looter_cls = ProfileLooter
target = args['<profile>']
elif args['hashtag']:
looter_cls = HashtagLooter
target = args['<hashtag>']
elif args['post']:
looter_cls = PostLooter
target = args['<post_token>']
else:
raise NotImplementedError("TODO")
# Instantiate the looter
looter = looter_cls(
target,
add_metadata=args['--add-metadata'],
get_videos=args['--get-videos'],
videos_only=args['--videos-only'],
jobs=int(args['--jobs']) if args['--jobs'] is not None else 16,
template=args['--template'],
dump_json=args['--dump-json'],
dump_only=args['--dump-only'],
extended_dump=args['--extended-dump']
)
# Attempt to login and extract the timeframe
if args['--username']:
login(args)
if args['--num-to-dl']:
args['--num-to-dl'] = int(args['--num-to-dl'])
try:
if args['--time'] is not None:
args['--time'] = get_times_from_cli(args['--time'])
except ValueError as ve:
_print("invalid format for --time parameter:", args["--time"])
_print(" (format is [D]:[D] where D is an ISO 8601 date)")
return 1
logger.debug("Opening destination filesystem")
dest_url = args.get('<directory>') or os.getcwd()
dest_fs = fs.open_fs(dest_url, create=True)
logger.notice("Starting download of `%s`", target)
n = looter.download(
destination=dest_fs,
media_count=args['--num-to-dl'],
timeframe=args['--time'],
new_only=args['--new'],
pgpbar_cls=None if args['--quiet'] else TqdmProgressBar,
dlpbar_cls=None if args['--quiet'] else TqdmProgressBar)
if n > 1:
logger.success("Downloaded %i posts.", n)
elif n == 1:
logger.success("Downloaded %i post.", n)
except (Exception, KeyboardInterrupt) as e:
from .threadutils import threads_force_join, threads_count
# Show error traceback if any
if not isinstance(e, KeyboardInterrupt):
logger.critical("%s", e)
if args["--traceback"]:
traceback.print_exc()
else:
logger.critical("Interrupted")
# Close remaining threads spawned by InstaLooter.download
count = threads_count()
if count:
logger.notice("Terminating %i remaining workers...", count)
threads_force_join()
# Return the error number if any
errno = e.errno if hasattr(e, "errno") else None
return errno if errno is not None else 1
else:
return 0
finally:
logger.debug("Closing destination filesystem")
try:
dest_fs.close()
except Exception:
pass
|
import base64
import io
from pygal._compat import _ellipsis, is_list_like, u
from pygal.graph.base import BaseGraph
class PublicApi(BaseGraph):
"""Chart public functions"""
def add(self, title, values, **kwargs):
"""Add a serie to this graph, compat api"""
if not is_list_like(values) and not isinstance(values, dict):
values = [values]
kwargs['title'] = title
self.raw_series.append((values, kwargs))
return self
def __call__(self, *args, **kwargs):
"""Call api: chart(1, 2, 3, title='T')"""
self.raw_series.append((args, kwargs))
return self
def add_xml_filter(self, callback):
"""Add an xml filter for in tree post processing"""
self.xml_filters.append(callback)
return self
def render(self, is_unicode=False, **kwargs):
"""Render the graph, and return the svg string"""
self.setup(**kwargs)
svg = self.svg.render(
is_unicode=is_unicode, pretty_print=self.pretty_print
)
self.teardown()
return svg
def render_tree(self, **kwargs):
"""Render the graph, and return (l)xml etree"""
self.setup(**kwargs)
svg = self.svg.root
for f in self.xml_filters:
svg = f(svg)
self.teardown()
return svg
def render_table(self, **kwargs):
"""Render the data as a html table"""
# Import here to avoid lxml import
try:
from pygal.table import Table
except ImportError:
raise ImportError('You must install lxml to use render table')
return Table(self).render(**kwargs)
def render_pyquery(self, **kwargs):
"""Render the graph, and return a pyquery wrapped tree"""
from pyquery import PyQuery as pq
return pq(self.render(**kwargs), parser='html')
def render_in_browser(self, **kwargs):
"""Render the graph, open it in your browser with black magic"""
try:
from lxml.html import open_in_browser
except ImportError:
raise ImportError('You must install lxml to use render in browser')
kwargs.setdefault('force_uri_protocol', 'https')
open_in_browser(self.render_tree(**kwargs), encoding='utf-8')
def render_response(self, **kwargs):
"""Render the graph, and return a Flask response"""
from flask import Response
return Response(self.render(**kwargs), mimetype='image/svg+xml')
def render_django_response(self, **kwargs):
"""Render the graph, and return a Django response"""
from django.http import HttpResponse
return HttpResponse(
self.render(**kwargs), content_type='image/svg+xml'
)
def render_data_uri(self, **kwargs):
"""Output a base 64 encoded data uri"""
# Force protocol as data uri have none
kwargs.setdefault('force_uri_protocol', 'https')
return "data:image/svg+xml;charset=utf-8;base64,%s" % (
base64.b64encode(self.render(**kwargs)
).decode('utf-8').replace('\n', '')
)
def render_to_file(self, filename, **kwargs):
"""Render the graph, and write it to filename"""
with io.open(filename, 'w', encoding='utf-8') as f:
f.write(self.render(is_unicode=True, **kwargs))
def render_to_png(self, filename=None, dpi=72, **kwargs):
"""Render the graph, convert it to png and write it to filename"""
import cairosvg
return cairosvg.svg2png(
bytestring=self.render(**kwargs), write_to=filename, dpi=dpi
)
def render_sparktext(self, relative_to=None):
"""Make a mini text sparkline from chart"""
bars = u('▁▂▃▄▅▆▇█')
if len(self.raw_series) == 0:
return u('')
values = list(self.raw_series[0][0])
if len(values) == 0:
return u('')
chart = u('')
values = list(map(lambda x: max(x, 0), values))
vmax = max(values)
if relative_to is None:
relative_to = min(values)
if (vmax - relative_to) == 0:
chart = bars[0] * len(values)
return chart
divisions = len(bars) - 1
for value in values:
chart += bars[int(
divisions * (value - relative_to) / (vmax - relative_to)
)]
return chart
def render_sparkline(self, **kwargs):
"""Render a sparkline"""
spark_options = dict(
width=200,
height=50,
show_dots=False,
show_legend=False,
show_x_labels=False,
show_y_labels=False,
spacing=0,
margin=5,
min_scale=1,
max_scale=2,
explicit_size=True,
no_data_text='',
js=(),
classes=(_ellipsis, 'pygal-sparkline')
)
spark_options.update(kwargs)
return self.render(**spark_options)
|
from django.db.models import Sum
from django.utils import timezone
from django.utils.functional import cached_property
from django.utils.translation import gettext_lazy as _
from django_fsm import transition
from shop.models.delivery import DeliveryModel, DeliveryItemModel
class SimpleShippingWorkflowMixin:
"""
Workflow for simply marking the state of an Order while picking, packing and shipping items.
It does not create a Delivery object.
Add this class to ``settings.SHOP_ORDER_WORKFLOWS`` to mix it into the merchants Order model.
It is mutual exclusive with :class:`shop.shipping.workflows.CommissionGoodsWorkflowMixin` or
:class:`shop.shipping.workflows.PartialDeliveryWorkflowMixin`.
It adds all the methods required for state transitions, while picking and packing
the ordered goods for shipping.
"""
TRANSITION_TARGETS = {
'pick_goods': _("Picking goods"),
'pack_goods': _("Packing goods"),
'ship_goods': _("Prepare for shipping"),
'ready_for_delivery': _("Ready for delivery"),
}
@property
def associate_with_delivery(self):
"""
:returns: ``True`` if this Order requires a delivery object.
"""
return False
@property
def allow_partial_delivery(self):
"""
:returns: ``True`` if partial item delivery is allowed.
"""
return False
@transition(field='status', source='payment_confirmed', target='pick_goods',
custom=dict(admin=True, button_name=_("Pick the goods")))
def pick_goods(self, by=None):
"""Change status to 'pick_goods'."""
@transition(field='status', source='pick_goods', target='pack_goods',
custom=dict(admin=True, button_name=_("Pack the goods")))
def pack_goods(self, by=None):
"""Change status to 'pack_goods'."""
@transition(field='status', source='pack_goods', target='ship_goods',
custom=dict(admin=True, button_name=_("Prepare for shipping")))
def ship_goods(self, by=None):
"""
Ship the goods. This method implicitly invokes
:method:`shop.shipping.modifiers.ShippingModifier.ship_the_goods(delivery)`
"""
@transition(field='status', source='ship_goods', target='ready_for_delivery',
custom=dict(auto=True))
def prepare_for_delivery(self, by=None):
"""
Put the parcel into the outgoing delivery.
This method is invoked automatically by `ship_goods`.
"""
def update_or_create_delivery(self, orderitem_data):
"""
Hook to create a delivery object with items.
"""
class CommissionGoodsWorkflowMixin(SimpleShippingWorkflowMixin):
"""
Workflow to commission all ordered items in one common Delivery.
Add this class to ``settings.SHOP_ORDER_WORKFLOWS`` to mix it into the merchants Order model.
It is mutual exclusive with :class:`shop.shipping.workflows.SimpleShippingWorkflowMixin` or
:class:`shop.shipping.workflows.PartialDeliveryWorkflowMixin`.
It adds all the methods required for state transitions, while picking and packing
the ordered goods for shipping.
"""
@property
def associate_with_delivery(self):
return True
@transition(field='status', source='ship_goods', target='ready_for_delivery',
custom=dict(auto=True))
def prepare_for_delivery(self, by=None):
"""Put the parcel into the outgoing delivery."""
def update_or_create_delivery(self, orderitem_data):
"""
Update or create a Delivery object for all items of this Order object.
"""
delivery, _ = DeliveryModel.objects.get_or_create(
order=self,
shipping_id__isnull=True,
shipped_at__isnull=True,
shipping_method=self.extra.get('shipping_modifier'),
defaults={'fulfilled_at': timezone.now()}
)
for item in self.items.all():
DeliveryItemModel.objects.create(
delivery=delivery,
item=item,
quantity=item.quantity,
)
class PartialDeliveryWorkflowMixin(CommissionGoodsWorkflowMixin):
"""
Workflow to optionally commission ordered items partially.
Add this class to ``settings.SHOP_ORDER_WORKFLOWS`` to mix it into the merchants Order model.
It is mutual exclusive with :class:`shop.shipping.workflows.SimpleShippingWorkflowMixin` or
:class:`shop.shipping.workflows.CommissionGoodsWorkflowMixin`.
This mixin supports partial delivery, hence check that a materialized representation of the
models :class:`shop.models.delivery.DeliveryModel` and :class:`shop.models.delivery.DeliveryItemModel`
exists and is instantiated.
Importing the classes :class:`shop.models.defaults.delivery.DeliveryModel` and
:class:`shop.models.defaults.delivery_item.DeliveryItemModel` into the merchants
``models.py``, usually is enough. This adds all the methods required for state transitions,
while picking, packing and shipping the ordered goods for delivery.
"""
@property
def allow_partial_delivery(self):
return True
@cached_property
def unfulfilled_items(self):
unfulfilled_items = 0
for order_item in self.items.all():
if not order_item.canceled:
aggr = order_item.deliver_item.aggregate(delivered=Sum('quantity'))
unfulfilled_items += order_item.quantity - (aggr['delivered'] or 0)
return unfulfilled_items
def ready_for_picking(self):
return self.is_fully_paid() and self.unfulfilled_items > 0
def ready_for_shipping(self):
return self.delivery_set.filter(shipped_at__isnull=True).exists()
@transition(field='status', source='*', target='pick_goods', conditions=[ready_for_picking],
custom=dict(admin=True, button_name=_("Pick the goods")))
def pick_goods(self, by=None):
"""Change status to 'pick_goods'."""
@transition(field='status', source=['pick_goods'], target='pack_goods',
custom=dict(admin=True, button_name=_("Pack the goods")))
def pack_goods(self, by=None):
"""Prepare shipping object and change status to 'pack_goods'."""
@transition(field='status', source='*', target='ship_goods', conditions=[ready_for_shipping],
custom=dict(admin=True, button_name=_("Ship the goods")))
def ship_goods(self, by=None):
"""Ship the goods."""
@transition(field='status', source='ship_goods', target='ready_for_delivery',
custom=dict(auto=True))
def prepare_for_delivery(self, by=None):
"""Put the parcel into the outgoing delivery."""
def update_or_create_delivery(self, orderitem_data):
"""
Update or create a Delivery object and associate with selected ordered items.
"""
delivery, _ = DeliveryModel.objects.get_or_create(
order=self,
shipping_id__isnull=True,
shipped_at__isnull=True,
shipping_method=self.extra.get('shipping_modifier'),
defaults={'fulfilled_at': timezone.now()}
)
# create a DeliveryItem object for each ordered item to be shipped with this delivery
for data in orderitem_data:
if data['deliver_quantity'] > 0 and not data['canceled']:
DeliveryItemModel.objects.create(
delivery=delivery,
item=data['id'],
quantity=data['deliver_quantity'],
)
if not delivery.items.exists():
# since no OrderItem was added to this delivery, discard it
delivery.delete()
|
from __future__ import absolute_import, unicode_literals
import sys
try:
# noinspection PyCompatibility
from StringIO import StringIO
except ImportError:
from io import StringIO
from binarytree import build
class CaptureOutput(list):
"""Context manager to catch stdout."""
def __enter__(self):
self._original_stdout = sys.stdout
self._temp_stdout = StringIO()
sys.stdout = self._temp_stdout
return self
def __exit__(self, *args):
lines = self._temp_stdout.getvalue().splitlines()
self.extend(line.rstrip() for line in lines)
sys.stdout = self._original_stdout
def pprint_default(values):
"""Helper function for testing Node.pprint with default arguments."""
root = build(values)
with CaptureOutput() as output:
root.pprint(index=False, delimiter='-')
assert output[0] == '' and output[-1] == ''
return [line for line in output if line != '']
def pprint_with_index(values):
"""Helper function for testing Node.pprint with indexes."""
root = build(values)
with CaptureOutput() as output:
root.pprint(index=True, delimiter=':')
assert output[0] == '' and output[-1] == ''
return [line for line in output if line != '']
def builtin_print(values):
"""Helper function for testing builtin print on Node."""
root = build(values)
with CaptureOutput() as output:
print(root)
assert output[0] == '' and output[-1] == ''
return [line for line in output if line != '']
|
import pytest
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
@pytest.fixture(autouse=True)
def mock_all(aioclient_mock):
"""Mock all setup requests."""
aioclient_mock.post("http://127.0.0.1/homeassistant/options", json={"result": "ok"})
aioclient_mock.get("http://127.0.0.1/supervisor/ping", json={"result": "ok"})
aioclient_mock.post("http://127.0.0.1/supervisor/options", json={"result": "ok"})
aioclient_mock.get(
"http://127.0.0.1/homeassistant/info",
json={"result": "ok", "data": {"last_version": "10.0"}},
)
async def test_hassio_addon_panel_startup(hass, aioclient_mock, hassio_env):
"""Test startup and panel setup after event."""
aioclient_mock.get(
"http://127.0.0.1/ingress/panels",
json={
"result": "ok",
"data": {
"panels": {
"test1": {
"enable": True,
"title": "Test",
"icon": "mdi:test",
"admin": False,
},
"test2": {
"enable": False,
"title": "Test 2",
"icon": "mdi:test2",
"admin": True,
},
}
},
},
)
assert aioclient_mock.call_count == 0
with patch(
"homeassistant.components.hassio.addon_panel._register_panel",
) as mock_panel:
await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
assert aioclient_mock.call_count == 3
assert mock_panel.called
mock_panel.assert_called_with(
hass,
"test1",
{"enable": True, "title": "Test", "icon": "mdi:test", "admin": False},
)
async def test_hassio_addon_panel_api(hass, aioclient_mock, hassio_env, hass_client):
"""Test panel api after event."""
aioclient_mock.get(
"http://127.0.0.1/ingress/panels",
json={
"result": "ok",
"data": {
"panels": {
"test1": {
"enable": True,
"title": "Test",
"icon": "mdi:test",
"admin": False,
},
"test2": {
"enable": False,
"title": "Test 2",
"icon": "mdi:test2",
"admin": True,
},
}
},
},
)
assert aioclient_mock.call_count == 0
with patch(
"homeassistant.components.hassio.addon_panel._register_panel",
) as mock_panel:
await async_setup_component(hass, "hassio", {})
await hass.async_block_till_done()
assert aioclient_mock.call_count == 3
assert mock_panel.called
mock_panel.assert_called_with(
hass,
"test1",
{"enable": True, "title": "Test", "icon": "mdi:test", "admin": False},
)
hass_client = await hass_client()
resp = await hass_client.post("/api/hassio_push/panel/test2")
assert resp.status == 400
resp = await hass_client.post("/api/hassio_push/panel/test1")
assert resp.status == 200
assert mock_panel.call_count == 2
mock_panel.assert_called_with(
hass,
"test1",
{"enable": True, "title": "Test", "icon": "mdi:test", "admin": False},
)
|
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MOVIE,
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
REPEAT_MODE_OFF,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_REPEAT_SET,
SUPPORT_SEEK,
SUPPORT_SELECT_SOUND_MODE,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import STATE_OFF, STATE_PAUSED, STATE_PLAYING
import homeassistant.util.dt as dt_util
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the media player demo platform."""
async_add_entities(
[
DemoYoutubePlayer(
"Living Room",
"eyU3bRy2x44",
"♥♥ The Best Fireplace Video (3 hours)",
300,
),
DemoYoutubePlayer(
"Bedroom", "kxopViU98Xo", "Epic sax guy 10 hours", 360000
),
DemoMusicPlayer(),
DemoTVShowPlayer(),
]
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Demo config entry."""
await async_setup_platform(hass, {}, async_add_entities)
SOUND_MODE_LIST = ["Dummy Music", "Dummy Movie"]
DEFAULT_SOUND_MODE = "Dummy Music"
YOUTUBE_PLAYER_SUPPORT = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_SHUFFLE_SET
| SUPPORT_SELECT_SOUND_MODE
| SUPPORT_SEEK
)
MUSIC_PLAYER_SUPPORT = (
SUPPORT_PAUSE
| SUPPORT_VOLUME_SET
| SUPPORT_VOLUME_MUTE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_PLAY
| SUPPORT_SHUFFLE_SET
| SUPPORT_REPEAT_SET
| SUPPORT_VOLUME_STEP
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SELECT_SOUND_MODE
)
NETFLIX_PLAYER_SUPPORT = (
SUPPORT_PAUSE
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_SELECT_SOURCE
| SUPPORT_PLAY
| SUPPORT_SHUFFLE_SET
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_SELECT_SOUND_MODE
)
class AbstractDemoPlayer(MediaPlayerEntity):
"""A demo media players."""
# We only implement the methods that we support
def __init__(self, name, device_class=None):
"""Initialize the demo device."""
self._name = name
self._player_state = STATE_PLAYING
self._volume_level = 1.0
self._volume_muted = False
self._shuffle = False
self._sound_mode_list = SOUND_MODE_LIST
self._sound_mode = DEFAULT_SOUND_MODE
self._device_class = device_class
@property
def should_poll(self):
"""Push an update after each command."""
return False
@property
def name(self):
"""Return the name of the media player."""
return self._name
@property
def state(self):
"""Return the state of the player."""
return self._player_state
@property
def volume_level(self):
"""Return the volume level of the media player (0..1)."""
return self._volume_level
@property
def is_volume_muted(self):
"""Return boolean if volume is currently muted."""
return self._volume_muted
@property
def shuffle(self):
"""Boolean if shuffling is enabled."""
return self._shuffle
@property
def sound_mode(self):
"""Return the current sound mode."""
return self._sound_mode
@property
def sound_mode_list(self):
"""Return a list of available sound modes."""
return self._sound_mode_list
@property
def device_class(self):
"""Return the device class of the media player."""
return self._device_class
def turn_on(self):
"""Turn the media player on."""
self._player_state = STATE_PLAYING
self.schedule_update_ha_state()
def turn_off(self):
"""Turn the media player off."""
self._player_state = STATE_OFF
self.schedule_update_ha_state()
def mute_volume(self, mute):
"""Mute the volume."""
self._volume_muted = mute
self.schedule_update_ha_state()
def volume_up(self):
"""Increase volume."""
self._volume_level = min(1.0, self._volume_level + 0.1)
self.schedule_update_ha_state()
def volume_down(self):
"""Decrease volume."""
self._volume_level = max(0.0, self._volume_level - 0.1)
self.schedule_update_ha_state()
def set_volume_level(self, volume):
"""Set the volume level, range 0..1."""
self._volume_level = volume
self.schedule_update_ha_state()
def media_play(self):
"""Send play command."""
self._player_state = STATE_PLAYING
self.schedule_update_ha_state()
def media_pause(self):
"""Send pause command."""
self._player_state = STATE_PAUSED
self.schedule_update_ha_state()
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._shuffle = shuffle
self.schedule_update_ha_state()
def select_sound_mode(self, sound_mode):
"""Select sound mode."""
self._sound_mode = sound_mode
self.schedule_update_ha_state()
class DemoYoutubePlayer(AbstractDemoPlayer):
"""A Demo media player that only supports YouTube."""
# We only implement the methods that we support
def __init__(self, name, youtube_id=None, media_title=None, duration=360):
"""Initialize the demo device."""
super().__init__(name)
self.youtube_id = youtube_id
self._media_title = media_title
self._duration = duration
self._progress = int(duration * 0.15)
self._progress_updated_at = dt_util.utcnow()
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self.youtube_id
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MOVIE
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return self._duration
@property
def media_image_url(self):
"""Return the image url of current playing media."""
return f"https://img.youtube.com/vi/{self.youtube_id}/hqdefault.jpg"
@property
def media_title(self):
"""Return the title of current playing media."""
return self._media_title
@property
def app_name(self):
"""Return the current running application."""
return "YouTube"
@property
def supported_features(self):
"""Flag media player features that are supported."""
return YOUTUBE_PLAYER_SUPPORT
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._progress is None:
return None
position = self._progress
if self._player_state == STATE_PLAYING:
position += (dt_util.utcnow() - self._progress_updated_at).total_seconds()
return position
@property
def media_position_updated_at(self):
"""When was the position of the current playing media valid.
Returns value from homeassistant.util.dt.utcnow().
"""
if self._player_state == STATE_PLAYING:
return self._progress_updated_at
def play_media(self, media_type, media_id, **kwargs):
"""Play a piece of media."""
self.youtube_id = media_id
self.schedule_update_ha_state()
def media_pause(self):
"""Send pause command."""
self._progress = self.media_position
self._progress_updated_at = dt_util.utcnow()
super().media_pause()
class DemoMusicPlayer(AbstractDemoPlayer):
"""A Demo media player that only supports YouTube."""
# We only implement the methods that we support
tracks = [
("Technohead", "I Wanna Be A Hippy (Flamman & Abraxas Radio Mix)"),
("Paul Elstak", "Luv U More"),
("Dune", "Hardcore Vibes"),
("Nakatomi", "Children Of The Night"),
("Party Animals", "Have You Ever Been Mellow? (Flamman & Abraxas Radio Mix)"),
("Rob G.*", "Ecstasy, You Got What I Need"),
("Lipstick", "I'm A Raver"),
("4 Tune Fairytales", "My Little Fantasy (Radio Edit)"),
("Prophet", "The Big Boys Don't Cry"),
("Lovechild", "All Out Of Love (DJ Weirdo & Sim Remix)"),
("Stingray & Sonic Driver", "Cold As Ice (El Bruto Remix)"),
("Highlander", "Hold Me Now (Bass-D & King Matthew Remix)"),
("Juggernaut", 'Ruffneck Rules Da Artcore Scene (12" Edit)'),
("Diss Reaction", "Jiiieehaaaa "),
("Flamman And Abraxas", "Good To Go (Radio Mix)"),
("Critical Mass", "Dancing Together"),
(
"Charly Lownoise & Mental Theo",
"Ultimate Sex Track (Bass-D & King Matthew Remix)",
),
]
def __init__(self):
"""Initialize the demo device."""
super().__init__("Walkman")
self._cur_track = 0
self._repeat = REPEAT_MODE_OFF
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return "bounzz-1"
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return 213
@property
def media_image_url(self):
"""Return the image url of current playing media."""
return "https://graph.facebook.com/v2.5/107771475912710/picture?type=large"
@property
def media_title(self):
"""Return the title of current playing media."""
return self.tracks[self._cur_track][1] if self.tracks else ""
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self.tracks[self._cur_track][0] if self.tracks else ""
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return "Bounzz"
@property
def media_track(self):
"""Return the track number of current media (Music track only)."""
return self._cur_track + 1
@property
def repeat(self):
"""Return current repeat mode."""
return self._repeat
@property
def supported_features(self):
"""Flag media player features that are supported."""
return MUSIC_PLAYER_SUPPORT
def media_previous_track(self):
"""Send previous track command."""
if self._cur_track > 0:
self._cur_track -= 1
self.schedule_update_ha_state()
def media_next_track(self):
"""Send next track command."""
if self._cur_track < len(self.tracks) - 1:
self._cur_track += 1
self.schedule_update_ha_state()
def clear_playlist(self):
"""Clear players playlist."""
self.tracks = []
self._cur_track = 0
self._player_state = STATE_OFF
self.schedule_update_ha_state()
def set_repeat(self, repeat):
"""Enable/disable repeat mode."""
self._repeat = repeat
self.schedule_update_ha_state()
class DemoTVShowPlayer(AbstractDemoPlayer):
"""A Demo media player that only supports YouTube."""
# We only implement the methods that we support
def __init__(self):
"""Initialize the demo device."""
super().__init__("Lounge room")
self._cur_episode = 1
self._episode_count = 13
self._source = "dvd"
self._source_list = ["dvd", "youtube"]
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return "house-of-cards-1"
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_TVSHOW
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
return 3600
@property
def media_image_url(self):
"""Return the image url of current playing media."""
return "https://graph.facebook.com/v2.5/HouseofCards/picture?width=400"
@property
def media_title(self):
"""Return the title of current playing media."""
return f"Chapter {self._cur_episode}"
@property
def media_series_title(self):
"""Return the series title of current playing media (TV Show only)."""
return "House of Cards"
@property
def media_season(self):
"""Return the season of current playing media (TV Show only)."""
return 1
@property
def media_episode(self):
"""Return the episode of current playing media (TV Show only)."""
return self._cur_episode
@property
def app_name(self):
"""Return the current running application."""
return "Netflix"
@property
def source(self):
"""Return the current input source."""
return self._source
@property
def source_list(self):
"""List of available sources."""
return self._source_list
@property
def supported_features(self):
"""Flag media player features that are supported."""
return NETFLIX_PLAYER_SUPPORT
def media_previous_track(self):
"""Send previous track command."""
if self._cur_episode > 1:
self._cur_episode -= 1
self.schedule_update_ha_state()
def media_next_track(self):
"""Send next track command."""
if self._cur_episode < self._episode_count:
self._cur_episode += 1
self.schedule_update_ha_state()
def select_source(self, source):
"""Set the input source."""
self._source = source
self.schedule_update_ha_state()
|
import logging
from telegram import Update
from telegram.error import NetworkError, RetryAfter, TelegramError, TimedOut
from telegram.ext import Handler, Updater
from homeassistant.const import EVENT_HOMEASSISTANT_START, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from . import CONF_ALLOWED_CHAT_IDS, BaseTelegramBotEntity, initialize_bot
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config):
"""Set up the Telegram polling platform."""
bot = initialize_bot(config)
pol = TelegramPoll(bot, hass, config[CONF_ALLOWED_CHAT_IDS])
@callback
def _start_bot(_event):
"""Start the bot."""
pol.start_polling()
@callback
def _stop_bot(_event):
"""Stop the bot."""
pol.stop_polling()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, _start_bot)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _stop_bot)
return True
def process_error(bot, update, error):
"""Telegram bot error handler."""
try:
raise error
except (TimedOut, NetworkError, RetryAfter):
# Long polling timeout or connection problem. Nothing serious.
pass
except TelegramError:
_LOGGER.error('Update "%s" caused error "%s"', update, error)
def message_handler(handler):
"""Create messages handler."""
class MessageHandler(Handler):
"""Telegram bot message handler."""
def __init__(self):
"""Initialize the messages handler instance."""
super().__init__(handler)
def check_update(self, update):
"""Check is update valid."""
return isinstance(update, Update)
def handle_update(self, update, dispatcher):
"""Handle update."""
optional_args = self.collect_optional_args(dispatcher, update)
return self.callback(dispatcher.bot, update, **optional_args)
return MessageHandler()
class TelegramPoll(BaseTelegramBotEntity):
"""Asyncio telegram incoming message handler."""
def __init__(self, bot, hass, allowed_chat_ids):
"""Initialize the polling instance."""
BaseTelegramBotEntity.__init__(self, hass, allowed_chat_ids)
self.updater = Updater(bot=bot, workers=4)
self.dispatcher = self.updater.dispatcher
self.dispatcher.add_handler(message_handler(self.process_update))
self.dispatcher.add_error_handler(process_error)
def start_polling(self):
"""Start the polling task."""
self.updater.start_polling()
def stop_polling(self):
"""Stop the polling task."""
self.updater.stop()
def process_update(self, bot, update):
"""Process incoming message."""
self.process_message(update.to_dict())
|
from __future__ import (absolute_import, division, print_function,
unicode_literals)
from math import sqrt
import matplotlib.pyplot as plt
import numpy as np
from numpy import dot
import numpy.random as random
from scipy.linalg import inv
from filterpy.gh import GHFilter
from filterpy.leastsq import LeastSquaresFilter
def near_equal(x, y, e=1.e-14):
return abs(x-y) < e
class LSQ(object):
def __init__(self, dim_x):
self.dim_x = dim_x
self.I = np.eye(dim_x)
self.H = 0
self.x = np.zeros((dim_x, 1))
self.k = 0
def update(self,Z):
self.x += 1
self.k += 1
print('k=', self.k, 1/self.k, 1/(self.k+1))
S = dot(self.H, self.P).dot(self.H.T) + self.R
K1 = dot(self.P, self.H.T).dot(inv(S))
print('K1=', K1[0, 0])
I_KH = self.I - dot(K1, self.H)
y = Z - dot(self.H, self.x)
print('y=', y)
self.x = self.x + dot(K1, y)
self.P = dot(I_KH, self.P)
print(self.P)
class LeastSquaresFilterOriginal(object):
"""Implements a Least Squares recursive filter. Formulation is per
Zarchan [1].
Filter may be of order 0 to 2. Order 0 assumes the value being tracked is
a constant, order 1 assumes that it moves in a line, and order 2 assumes
that it is tracking a second order polynomial.
It is implemented to be directly callable like a function. See examples.
Examples
--------
lsq = LeastSquaresFilter(dt=0.1, order=1, noise_variance=2.3)
while True:
z = sensor_reading() # get a measurement
x = lsq(z) # get the filtered estimate.
print('error: {}, velocity error: {}'.format(lsq.error, lsq.derror))
Attributes
----------
n : int
step in the recursion. 0 prior to first call, 1 after the first call,
etc.
K1,K2,K3 : float
Gains for the filter. K1 for all orders, K2 for orders 0 and 1, and
K3 for order 2
x, dx, ddx: type(z)
estimate(s) of the output. 'd' denotes derivative, so 'dx' is the first
derivative of x, 'ddx' is the second derivative.
References
----------
[1] Zarchan and Musoff. "Fundamentals of Kalman Filtering: A Practical
Approach." Third Edition. AIAA, 2009.
"""
def __init__(self, dt, order, noise_variance=0.):
""" Least Squares filter of order 0 to 2.
Parameters
----------
dt : float
time step per update
order : int
order of filter 0..2
noise_variance : float
variance in x. This allows us to calculate the error of the filter,
it does not influence the filter output.
"""
assert order >= 0
assert order <= 2
self.reset()
self.dt = dt
self.dt2 = dt**2
self.sigma = noise_variance
self._order = order
def reset(self):
""" reset filter back to state at time of construction"""
self.n = 0 #nth step in the recursion
self.x = 0.
self.error = 0.
self.derror = 0.
self.dderror = 0.
self.dx = 0.
self.ddx = 0.
self.K1 = 0
self.K2 = 0
self.K3 = 0
def __call__(self, z):
self.n += 1
n = self.n
dt = self.dt
dt2 = self.dt2
if self._order == 0:
self.K1 = 1. / n
residual = z - self.x
self.x = self.x + residual * self.K1
self.error = self.sigma/sqrt(n)
elif self._order == 1:
self.K1 = 2*(2*n-1) / (n*(n+1))
self.K2 = 6 / (n*(n+1)*dt)
residual = z - self.x - self.dx*dt
self.x = self.x + self.dx*dt + self.K1*residual
self.dx = self.dx + self.K2*residual
if n > 1:
self.error = self.sigma*sqrt(2.*(2*n-1)/(n*(n+1)))
self.derror = self.sigma*sqrt(12./(n*(n*n-1)*dt*dt))
else:
den = n*(n+1)*(n+2)
self.K1 = 3*(3*n**2 - 3*n + 2) / den
self.K2 = 18*(2*n-1) / (den*dt)
self.K3 = 60./ (den*dt2)
residual = z - self.x - self.dx*dt - .5*self.ddx*dt2
self.x += self.dx*dt + .5*self.ddx*dt2 +self. K1 * residual
self.dx += self.ddx*dt + self.K2*residual
self.ddx += self.K3*residual
if n >= 3:
self.error = self.sigma*sqrt(3*(3*n*n-3*n+2)/(n*(n+1)*(n+2)))
self.derror = self.sigma*sqrt(12*(16*n*n-30*n+11) /
(n*(n*n-1)*(n*n-4)*dt2))
self.dderror = self.sigma*sqrt(720/(n*(n*n-1)*(n*n-4)*dt2*dt2))
return self.x
def standard_deviation(self):
if self.n == 0:
return 0.
if self._order == 0:
return 1./sqrt(self)
elif self._order == 1:
pass
def __repr__(self):
return 'LeastSquareFilter x={}, dx={}, ddx={}'.format(
self.x, self.dx, self.ddx)
def test_lsq():
""" implements alternative version of first order Least Squares filter
using g-h filter formulation and uses it to check the output of the
LeastSquaresFilter class."""
global lsq, lsq2, xs, lsq_xs
gh = GHFilter(x=0, dx=0, dt=1, g=.5, h=0.02)
lsq = LeastSquaresFilterOriginal(dt=1, order=1)
lsq2 = LeastSquaresFilter(dt=1, order=1)
zs = [x+random.randn()*10 for x in range(0, 10000)]
# test __repr__ at least doesn't crash
try:
str(lsq2)
except:
assert False, "LeastSquaresFilter.__repr__ exception"
xs = []
lsq_xs = []
for i, z in enumerate(zs):
g = 2*(2*i + 1) / ((i+2)*(i+1))
h = 6 / ((i+2)*(i+1))
x, dx = gh.update(z, g, h)
lx = lsq(z)
lsq_xs.append(lx)
x2 = lsq2.update(z)
assert near_equal(x2[0], lx, 1.e-10), '{}, {}, {}'.format(
i, x2[0], lx)
xs.append(x)
plt.plot(xs)
plt.plot(lsq_xs)
for x, y in zip(xs, lsq_xs):
r = x-y
assert r < 1.e-8
def test_first_order():
''' data and example from Zarchan, page 105-6'''
lsf = LeastSquaresFilter(dt=1, order=1)
xs = [1.2, .2, 2.9, 2.1]
ys = []
for x in xs:
ys.append(lsf.update(x)[0])
plt.plot(xs, c='b')
plt.plot(ys, c='g')
plt.plot([0, len(xs)-1], [ys[0], ys[-1]])
def test_second_order():
''' data and example from Zarchan, page 114'''
lsf = LeastSquaresFilter(1, order=2)
lsf0 = LeastSquaresFilterOriginal(1, order=2)
xs = [1.2, .2, 2.9, 2.1]
ys = []
for x in xs:
y = lsf.update(x)[0]
y0 = lsf0(x)
assert near_equal(y, y0)
ys.append(y)
plt.scatter(range(len(xs)), xs, c='r', marker='+')
plt.plot(ys, c='g')
plt.plot([0, len(xs)-1], [ys[0], ys[-1]], c='b')
def test_fig_3_8():
""" figure 3.8 in Zarchan, p. 108"""
lsf = LeastSquaresFilter(0.1, order=1)
lsf0 = LeastSquaresFilterOriginal(0.1, order=1)
xs = [x + 3 + random.randn() for x in np.arange(0, 10, 0.1)]
ys = []
for x in xs:
y0 = lsf0(x)
y = lsf.update(x)[0]
assert near_equal(y, y0)
ys.append(y)
plt.plot(xs)
plt.plot(ys)
def test_listing_3_4():
""" listing 3.4 in Zarchan, p. 117"""
lsf = LeastSquaresFilter(0.1, order=2)
xs = [5*x*x - x + 2 + 30*random.randn() for x in np.arange(0, 10, 0.1)]
ys = []
for x in xs:
ys.append(lsf.update(x)[0])
plt.plot(xs)
plt.plot(ys)
def lsq2_plot():
fl = LSQ(2)
fl.H = np.array([[1., 1.], [0., 1.]])
fl.R = np.eye(2)
fl.P = np.array([[2., .5], [.5, 2.]])
for x in range(10):
fl.update(np.array([[x], [x]], dtype=float))
plt.scatter(x, fl.x[0, 0])
def test_big_data():
N = 1000000
xs = np.array([i+random.randn() for i in range(N)])
for order in [1, 2]:
lsq = LeastSquaresFilter(dt=1, order=order)
ys = np.array([lsq.update(x)[0] for x in xs])
delta = xs - ys
assert delta.max() < 6, delta.max()
assert delta.min() > -6, delta.min()
# zero order is special case, it can't adapt quickly to changing data
xs = np.array([random.randn() for i in range(N)])
lsq = LeastSquaresFilter(dt=1, order=0)
ys = np.array([lsq.update(x)[0] for x in xs])
delta = xs - ys
assert delta.max() < 6, delta.max()
assert delta.min() > -6, delta.min()
if __name__ == "__main__":
test_big_data()
|
from typing import List
import voluptuous as vol
from homeassistant.components.device_automation import toggle_entity
from homeassistant.const import CONF_DOMAIN
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers.typing import ConfigType, TemplateVarsType
from . import DOMAIN
ACTION_SCHEMA = toggle_entity.ACTION_SCHEMA.extend({vol.Required(CONF_DOMAIN): DOMAIN})
async def async_call_action_from_config(
hass: HomeAssistant,
config: ConfigType,
variables: TemplateVarsType,
context: Context,
) -> None:
"""Change state based on configuration."""
await toggle_entity.async_call_action_from_config(
hass, config, variables, context, DOMAIN
)
async def async_get_actions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device actions."""
return await toggle_entity.async_get_actions(hass, device_id, DOMAIN)
|
import time
import uuid
from enum import Enum
from arctic.exceptions import RequestDurationException
class AsyncRequestType(Enum):
MODIFIER = 'modifier'
ACCESSOR = 'accessor'
class AsyncRequest(object):
def __init__(self, kind, library, fun, callback, *args, **kwargs):
self.id = uuid.uuid4()
# Request library call spec
self.fun = fun
self.args = args
self.kwargs = kwargs
# Request meta
self.kind = kind
self.library = library
self.symbol = kwargs.get('symbol')
# Request's state
self.future = None
self.callback = callback
self.data = None
self.exception = None
self.is_running = False
self.is_completed = False
# Timekeeping
self.start_time = None
self.end_time = None
self.create_time = time.time()
self.mongo_retry = bool(kwargs.get('mongo_retry'))
@property
def execution_duration(self):
if None in (self.start_time, self.end_time):
raise RequestDurationException("{} can't provide an execution_duration {}.".format(
self, (self.start_time, self.end_time)))
return self.end_time - self.start_time
@property
def schedule_delay(self):
if None in (self.start_time, self.create_time):
raise RequestDurationException("{} can't provide a schedule_delay {}.".format(
self, (self.start_time, self.create_time)))
return self.start_time - self.create_time
@property
def total_time(self):
if None in (self.end_time, self.create_time):
raise RequestDurationException("{} can't provide a total_time {}.".format(
self, (self.end_time, self.create_time)))
return self.end_time - self.create_time
def __str__(self):
return "Request id:{} library:{}, symbol:{} fun:{}, kind:{}".format(
self.id, self.library, self.symbol, getattr(self.fun, '__name__', None), self.kind)
|
from datetime import timedelta
import json
from homeassistant.components.accuweather.const import (
ATTRIBUTION,
CONCENTRATION_PARTS_PER_CUBIC_METER,
DOMAIN,
)
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_DEVICE_CLASS,
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
DEVICE_CLASS_TEMPERATURE,
LENGTH_METERS,
LENGTH_MILLIMETERS,
PERCENTAGE,
SPEED_KILOMETERS_PER_HOUR,
STATE_UNAVAILABLE,
TEMP_CELSIUS,
TIME_HOURS,
UV_INDEX,
)
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import patch
from tests.common import async_fire_time_changed, load_fixture
from tests.components.accuweather import init_integration
async def test_sensor_without_forecast(hass):
"""Test states of the sensor without forecast."""
await init_integration(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("sensor.home_cloud_ceiling")
assert state
assert state.state == "3200"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-fog"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_METERS
entry = registry.async_get("sensor.home_cloud_ceiling")
assert entry
assert entry.unique_id == "0123456-ceiling"
state = hass.states.get("sensor.home_precipitation")
assert state
assert state.state == "0.0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == LENGTH_MILLIMETERS
assert state.attributes.get(ATTR_ICON) == "mdi:weather-rainy"
assert state.attributes.get("type") is None
entry = registry.async_get("sensor.home_precipitation")
assert entry
assert entry.unique_id == "0123456-precipitation"
state = hass.states.get("sensor.home_pressure_tendency")
assert state
assert state.state == "falling"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:gauge"
assert state.attributes.get(ATTR_DEVICE_CLASS) == "accuweather__pressure_tendency"
entry = registry.async_get("sensor.home_pressure_tendency")
assert entry
assert entry.unique_id == "0123456-pressuretendency"
state = hass.states.get("sensor.home_realfeel_temperature")
assert state
assert state.state == "25.1"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature")
assert entry
assert entry.unique_id == "0123456-realfeeltemperature"
state = hass.states.get("sensor.home_uv_index")
assert state
assert state.state == "6"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UV_INDEX
assert state.attributes.get("level") == "High"
entry = registry.async_get("sensor.home_uv_index")
assert entry
assert entry.unique_id == "0123456-uvindex"
async def test_sensor_with_forecast(hass):
"""Test states of the sensor with forecast."""
await init_integration(hass, forecast=True)
registry = await hass.helpers.entity_registry.async_get_registry()
state = hass.states.get("sensor.home_hours_of_sun_0d")
assert state
assert state.state == "7.2"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-partly-cloudy"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TIME_HOURS
entry = registry.async_get("sensor.home_hours_of_sun_0d")
assert entry
assert entry.unique_id == "0123456-hoursofsun-0"
state = hass.states.get("sensor.home_realfeel_temperature_max_0d")
assert state
assert state.state == "29.8"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_max_0d")
assert entry
state = hass.states.get("sensor.home_realfeel_temperature_min_0d")
assert state
assert state.state == "15.1"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_min_0d")
assert entry
assert entry.unique_id == "0123456-realfeeltemperaturemin-0"
state = hass.states.get("sensor.home_thunderstorm_probability_day_0d")
assert state
assert state.state == "40"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-lightning"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.home_thunderstorm_probability_day_0d")
assert entry
assert entry.unique_id == "0123456-thunderstormprobabilityday-0"
state = hass.states.get("sensor.home_thunderstorm_probability_night_0d")
assert state
assert state.state == "40"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-lightning"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
entry = registry.async_get("sensor.home_thunderstorm_probability_night_0d")
assert entry
assert entry.unique_id == "0123456-thunderstormprobabilitynight-0"
state = hass.states.get("sensor.home_uv_index_0d")
assert state
assert state.state == "5"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_ICON) == "mdi:weather-sunny"
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == UV_INDEX
assert state.attributes.get("level") == "Moderate"
entry = registry.async_get("sensor.home_uv_index_0d")
assert entry
assert entry.unique_id == "0123456-uvindex-0"
async def test_sensor_disabled(hass):
"""Test sensor disabled by default."""
await init_integration(hass)
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get("sensor.home_apparent_temperature")
assert entry
assert entry.unique_id == "0123456-apparenttemperature"
assert entry.disabled
assert entry.disabled_by == "integration"
# Test enabling entity
updated_entry = registry.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
async def test_sensor_enabled_without_forecast(hass):
"""Test enabling an advanced sensor."""
registry = await hass.helpers.entity_registry.async_get_registry()
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-apparenttemperature",
suggested_object_id="home_apparent_temperature",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-cloudcover",
suggested_object_id="home_cloud_cover",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-dewpoint",
suggested_object_id="home_dew_point",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-realfeeltemperatureshade",
suggested_object_id="home_realfeel_temperature_shade",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-wetbulbtemperature",
suggested_object_id="home_wet_bulb_temperature",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-windchilltemperature",
suggested_object_id="home_wind_chill_temperature",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-windgust",
suggested_object_id="home_wind_gust",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-cloudcoverday-0",
suggested_object_id="home_cloud_cover_day_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-cloudcovernight-0",
suggested_object_id="home_cloud_cover_night_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-grass-0",
suggested_object_id="home_grass_pollen_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-mold-0",
suggested_object_id="home_mold_pollen_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-ozone-0",
suggested_object_id="home_ozone_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-ragweed-0",
suggested_object_id="home_ragweed_pollen_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-realfeeltemperatureshademax-0",
suggested_object_id="home_realfeel_temperature_shade_max_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-realfeeltemperatureshademin-0",
suggested_object_id="home_realfeel_temperature_shade_min_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-tree-0",
suggested_object_id="home_tree_pollen_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-windgustday-0",
suggested_object_id="home_wind_gust_day_0d",
disabled_by=None,
)
registry.async_get_or_create(
SENSOR_DOMAIN,
DOMAIN,
"0123456-windgustnight-0",
suggested_object_id="home_wind_gust_night_0d",
disabled_by=None,
)
await init_integration(hass, forecast=True)
state = hass.states.get("sensor.home_apparent_temperature")
assert state
assert state.state == "22.8"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_apparent_temperature")
assert entry
assert entry.unique_id == "0123456-apparenttemperature"
state = hass.states.get("sensor.home_cloud_cover")
assert state
assert state.state == "10"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_ICON) == "mdi:weather-cloudy"
entry = registry.async_get("sensor.home_cloud_cover")
assert entry
assert entry.unique_id == "0123456-cloudcover"
state = hass.states.get("sensor.home_dew_point")
assert state
assert state.state == "16.2"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_dew_point")
assert entry
assert entry.unique_id == "0123456-dewpoint"
state = hass.states.get("sensor.home_realfeel_temperature_shade")
assert state
assert state.state == "21.1"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_shade")
assert entry
assert entry.unique_id == "0123456-realfeeltemperatureshade"
state = hass.states.get("sensor.home_wet_bulb_temperature")
assert state
assert state.state == "18.6"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_wet_bulb_temperature")
assert entry
assert entry.unique_id == "0123456-wetbulbtemperature"
state = hass.states.get("sensor.home_wind_chill_temperature")
assert state
assert state.state == "22.8"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_wind_chill_temperature")
assert entry
assert entry.unique_id == "0123456-windchilltemperature"
state = hass.states.get("sensor.home_wind_gust")
assert state
assert state.state == "20.3"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == SPEED_KILOMETERS_PER_HOUR
assert state.attributes.get(ATTR_ICON) == "mdi:weather-windy"
entry = registry.async_get("sensor.home_wind_gust")
assert entry
assert entry.unique_id == "0123456-windgust"
state = hass.states.get("sensor.home_cloud_cover_day_0d")
assert state
assert state.state == "58"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_ICON) == "mdi:weather-cloudy"
entry = registry.async_get("sensor.home_cloud_cover_day_0d")
assert entry
assert entry.unique_id == "0123456-cloudcoverday-0"
state = hass.states.get("sensor.home_cloud_cover_night_0d")
assert state
assert state.state == "65"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == PERCENTAGE
assert state.attributes.get(ATTR_ICON) == "mdi:weather-cloudy"
entry = registry.async_get("sensor.home_cloud_cover_night_0d")
assert entry
state = hass.states.get("sensor.home_grass_pollen_0d")
assert state
assert state.state == "0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_CUBIC_METER
)
assert state.attributes.get("level") == "Low"
assert state.attributes.get(ATTR_ICON) == "mdi:grass"
entry = registry.async_get("sensor.home_grass_pollen_0d")
assert entry
assert entry.unique_id == "0123456-grass-0"
state = hass.states.get("sensor.home_mold_pollen_0d")
assert state
assert state.state == "0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_CUBIC_METER
)
assert state.attributes.get("level") == "Low"
assert state.attributes.get(ATTR_ICON) == "mdi:blur"
entry = registry.async_get("sensor.home_mold_pollen_0d")
assert entry
assert entry.unique_id == "0123456-mold-0"
state = hass.states.get("sensor.home_ozone_0d")
assert state
assert state.state == "32"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get("level") == "Good"
assert state.attributes.get(ATTR_ICON) == "mdi:vector-triangle"
entry = registry.async_get("sensor.home_ozone_0d")
assert entry
assert entry.unique_id == "0123456-ozone-0"
state = hass.states.get("sensor.home_ragweed_pollen_0d")
assert state
assert state.state == "0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_CUBIC_METER
)
assert state.attributes.get("level") == "Low"
assert state.attributes.get(ATTR_ICON) == "mdi:sprout"
entry = registry.async_get("sensor.home_ragweed_pollen_0d")
assert entry
assert entry.unique_id == "0123456-ragweed-0"
state = hass.states.get("sensor.home_realfeel_temperature_shade_max_0d")
assert state
assert state.state == "28.0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_shade_max_0d")
assert entry
assert entry.unique_id == "0123456-realfeeltemperatureshademax-0"
state = hass.states.get("sensor.home_realfeel_temperature_shade_min_0d")
assert state
assert state.state == "15.1"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == TEMP_CELSIUS
assert state.attributes.get(ATTR_DEVICE_CLASS) == DEVICE_CLASS_TEMPERATURE
entry = registry.async_get("sensor.home_realfeel_temperature_shade_min_0d")
assert entry
assert entry.unique_id == "0123456-realfeeltemperatureshademin-0"
state = hass.states.get("sensor.home_tree_pollen_0d")
assert state
assert state.state == "0"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert (
state.attributes.get(ATTR_UNIT_OF_MEASUREMENT)
== CONCENTRATION_PARTS_PER_CUBIC_METER
)
assert state.attributes.get("level") == "Low"
assert state.attributes.get(ATTR_ICON) == "mdi:tree-outline"
entry = registry.async_get("sensor.home_tree_pollen_0d")
assert entry
assert entry.unique_id == "0123456-tree-0"
state = hass.states.get("sensor.home_wind_gust_day_0d")
assert state
assert state.state == "29.6"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == SPEED_KILOMETERS_PER_HOUR
assert state.attributes.get("direction") == "S"
assert state.attributes.get(ATTR_ICON) == "mdi:weather-windy"
entry = registry.async_get("sensor.home_wind_gust_day_0d")
assert entry
assert entry.unique_id == "0123456-windgustday-0"
state = hass.states.get("sensor.home_wind_gust_night_0d")
assert state
assert state.state == "18.5"
assert state.attributes.get(ATTR_ATTRIBUTION) == ATTRIBUTION
assert state.attributes.get(ATTR_UNIT_OF_MEASUREMENT) == SPEED_KILOMETERS_PER_HOUR
assert state.attributes.get("direction") == "WSW"
assert state.attributes.get(ATTR_ICON) == "mdi:weather-windy"
entry = registry.async_get("sensor.home_wind_gust_night_0d")
assert entry
assert entry.unique_id == "0123456-windgustnight-0"
async def test_availability(hass):
"""Ensure that we mark the entities unavailable correctly when service is offline."""
await init_integration(hass)
state = hass.states.get("sensor.home_cloud_ceiling")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "3200"
future = utcnow() + timedelta(minutes=60)
with patch(
"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions",
side_effect=ConnectionError(),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.home_cloud_ceiling")
assert state
assert state.state == STATE_UNAVAILABLE
future = utcnow() + timedelta(minutes=120)
with patch(
"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions",
return_value=json.loads(
load_fixture("accuweather/current_conditions_data.json")
),
):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
state = hass.states.get("sensor.home_cloud_ceiling")
assert state
assert state.state != STATE_UNAVAILABLE
assert state.state == "3200"
async def test_manual_update_entity(hass):
"""Test manual update entity via service homeasasistant/update_entity."""
await init_integration(hass, forecast=True)
await async_setup_component(hass, "homeassistant", {})
current = json.loads(load_fixture("accuweather/current_conditions_data.json"))
forecast = json.loads(load_fixture("accuweather/forecast_data.json"))
with patch(
"homeassistant.components.accuweather.AccuWeather.async_get_current_conditions",
return_value=current,
) as mock_current, patch(
"homeassistant.components.accuweather.AccuWeather.async_get_forecast",
return_value=forecast,
) as mock_forecast:
await hass.services.async_call(
"homeassistant",
"update_entity",
{ATTR_ENTITY_ID: ["sensor.home_cloud_ceiling"]},
blocking=True,
)
assert mock_current.call_count == 1
assert mock_forecast.call_count == 1
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import inspect
from absl import logging
import six
# In Python 2 the inspect module does not have FullArgSpec. Define a named tuple
# instead.
if hasattr(inspect, "FullArgSpec"):
_FullArgSpec = inspect.FullArgSpec # pylint: disable=invalid-name
else:
_FullArgSpec = collections.namedtuple("FullArgSpec", [
"args", "varargs", "varkw", "defaults", "kwonlyargs", "kwonlydefaults",
"annotations"
])
def _getfullargspec(fn):
"""Python 2/3 compatible version of the inspect.getfullargspec method.
Args:
fn: The function object.
Returns:
A FullArgSpec. For Python 2 this is emulated by a named tuple.
"""
arg_spec_fn = inspect.getfullargspec if six.PY3 else inspect.getargspec
try:
arg_spec = arg_spec_fn(fn)
except TypeError:
# `fn` might be a callable object.
arg_spec = arg_spec_fn(fn.__call__)
if six.PY3:
assert isinstance(arg_spec, _FullArgSpec)
return arg_spec
return _FullArgSpec(
args=arg_spec.args,
varargs=arg_spec.varargs,
varkw=arg_spec.keywords,
defaults=arg_spec.defaults,
kwonlyargs=[],
kwonlydefaults=None,
annotations={})
def _has_arg(fn, arg_name):
"""Returns True if `arg_name` might be a valid parameter for `fn`.
Specifically, this means that `fn` either has a parameter named
`arg_name`, or has a `**kwargs` parameter.
Args:
fn: The function to check.
arg_name: The name fo the parameter.
Returns:
Whether `arg_name` might be a valid argument of `fn`.
"""
while isinstance(fn, functools.partial):
fn = fn.func
while hasattr(fn, "__wrapped__"):
fn = fn.__wrapped__
arg_spec = _getfullargspec(fn)
if arg_spec.varkw:
return True
return arg_name in arg_spec.args or arg_name in arg_spec.kwonlyargs
def call_with_accepted_args(fn, **kwargs):
"""Calls `fn` only with the keyword arguments that `fn` accepts."""
kwargs = {k: v for k, v in six.iteritems(kwargs) if _has_arg(fn, k)}
logging.debug("Calling %s with args %s.", fn, kwargs)
return fn(**kwargs)
def get_parameter_overview(variables, limit=40):
"""Returns a string with variables names, their shapes, count, and types.
To get all trainable parameters pass in `tf.trainable_variables()`.
Args:
variables: List of `tf.Variable`(s).
limit: If not `None`, the maximum number of variables to include.
Returns:
A string with a table like in the example.
+----------------+---------------+------------+---------+
| Name | Shape | Size | Type |
+----------------+---------------+------------+---------+
| FC_1/weights:0 | (63612, 1024) | 65,138,688 | float32 |
| FC_1/biases:0 | (1024,) | 1,024 | float32 |
| FC_2/weights:0 | (1024, 32) | 32,768 | float32 |
| FC_2/biases:0 | (32,) | 32 | float32 |
+----------------+---------------+------------+---------+
Total: 65,172,512
"""
max_name_len = max([len(v.name) for v in variables] + [len("Name")])
max_shape_len = max([len(str(v.get_shape())) for v in variables] + [len(
"Shape")])
max_size_len = max([len("{:,}".format(v.get_shape().num_elements()))
for v in variables] + [len("Size")])
max_type_len = max([len(v.dtype.base_dtype.name) for v in variables] + [len(
"Type")])
var_line_format = "| {: <{}s} | {: >{}s} | {: >{}s} | {: <{}s} |"
sep_line_format = var_line_format.replace(" ", "-").replace("|", "+")
header = var_line_format.replace(">", "<").format("Name", max_name_len,
"Shape", max_shape_len,
"Size", max_size_len,
"Type", max_type_len)
separator = sep_line_format.format("", max_name_len, "", max_shape_len, "",
max_size_len, "", max_type_len)
lines = [separator, header, separator]
total_weights = sum(v.get_shape().num_elements() for v in variables)
# Create lines for up to 80 variables.
for v in variables:
if limit is not None and len(lines) >= limit:
lines.append("[...]")
break
lines.append(var_line_format.format(
v.name, max_name_len,
str(v.get_shape()), max_shape_len,
"{:,}".format(v.get_shape().num_elements()), max_size_len,
v.dtype.base_dtype.name, max_type_len))
lines.append(separator)
lines.append("Total: {:,}".format(total_weights))
return "\n".join(lines)
def log_parameter_overview(variables, msg):
"""Writes a table with variables name and shapes to INFO log.
See get_parameter_overview for details.
Args:
variables: List of `tf.Variable`(s).
msg: Message to be logged before the table.
"""
table = get_parameter_overview(variables, limit=None)
# The table can to large to fit into one log entry.
lines = [msg] + table.split("\n")
for i in range(0, len(lines), 80):
logging.info("\n%s", "\n".join(lines[i:i + 80]))
|
import argparse
import ast
import os
import sys
def check(source):
checkers = (
check_reshape,
check_transpose,
check_empty_list,
check_empty_dict,
)
for node in ast.walk(ast.parse(source)):
for checker in checkers:
for err in checker(node):
yield err
def check_reshape(node):
if not isinstance(node, ast.Call):
return
if not isinstance(node.func, ast.Attribute):
return
if isinstance(node.func.value, ast.Name) and \
node.func.value.id in {'np', 'cupy', 'F'}:
return
if not node.func.attr == 'reshape':
return
if len(node.args) > 1:
yield (node.lineno, 'reshape(A, B, ...)')
if len(node.args) == 1 and \
isinstance(node.args[0], ast.Tuple) and \
len(node.args[0].elts) == 1:
yield (node.lineno, 'reshape((A,))')
def check_transpose(node):
if not isinstance(node, ast.Call):
return
if not isinstance(node.func, ast.Attribute):
return
if isinstance(node.func.value, ast.Name) and \
node.func.value.id in {'np', 'cupy', 'F'}:
return
if not node.func.attr == 'transpose':
return
if len(node.args) > 1:
yield (node.lineno, 'transpose(A, B, ...)')
if len(node.args) == 1 and \
isinstance(node.args[0], ast.Tuple) and \
len(node.args[0].elts) == 1:
yield (node.lineno, 'transpose((A,))')
def check_empty_list(node):
if not isinstance(node, ast.Call):
return
if not isinstance(node.func, ast.Name):
return
if node.func.id == 'list' and len(node.args) == 0:
yield (node.lineno, 'init by list()')
def check_empty_dict(node):
if not isinstance(node, ast.Call):
return
if not isinstance(node.func, ast.Name):
return
if node.func.id == 'dict' and len(node.args) == 0:
yield (node.lineno, 'init by dict()')
def check_empty_tuple(node):
if not isinstance(node, ast.Call):
return
if not isinstance(node.func, ast.Name):
return
if node.func.id == 'tuple' and len(node.args) == 0:
yield (node.lineno, 'init by tuple()')
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--exclude', nargs='+')
parser.add_argument('dir')
args = parser.parse_args()
n_err = 0
for dir, _, files in os.walk(args.dir):
for file in files:
_, ext = os.path.splitext(file)
if not ext == '.py':
continue
if args.exclude is not None and file in args.exclude:
continue
path = os.path.join(dir, file)
lines = open(path).readlines()
for lineno, msg in check(''.join(lines)):
print('{:s}:{:d} : {:s}'.format(path, lineno, msg))
print(lines[lineno - 1])
n_err += 1
if n_err > 0:
sys.exit('{:d} style errors are found.'.format(n_err))
if __name__ == '__main__':
main()
|
import logging
from homeassistant.components.vacuum import SUPPORT_FAN_SPEED
from .irobot_base import SUPPORT_IROBOT, IRobotVacuum
_LOGGER = logging.getLogger(__name__)
ATTR_BIN_FULL = "bin_full"
ATTR_BIN_PRESENT = "bin_present"
FAN_SPEED_AUTOMATIC = "Automatic"
FAN_SPEED_ECO = "Eco"
FAN_SPEED_PERFORMANCE = "Performance"
FAN_SPEEDS = [FAN_SPEED_AUTOMATIC, FAN_SPEED_ECO, FAN_SPEED_PERFORMANCE]
# Only Roombas with CarpetBost can set their fanspeed
SUPPORT_ROOMBA_CARPET_BOOST = SUPPORT_IROBOT | SUPPORT_FAN_SPEED
class RoombaVacuum(IRobotVacuum):
"""Basic Roomba robot (without carpet boost)."""
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
state_attrs = super().device_state_attributes
# Get bin state
bin_raw_state = self.vacuum_state.get("bin", {})
bin_state = {}
if bin_raw_state.get("present") is not None:
bin_state[ATTR_BIN_PRESENT] = bin_raw_state.get("present")
if bin_raw_state.get("full") is not None:
bin_state[ATTR_BIN_FULL] = bin_raw_state.get("full")
state_attrs.update(bin_state)
return state_attrs
class RoombaVacuumCarpetBoost(RoombaVacuum):
"""Roomba robot with carpet boost."""
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_ROOMBA_CARPET_BOOST
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
fan_speed = None
carpet_boost = self.vacuum_state.get("carpetBoost")
high_perf = self.vacuum_state.get("vacHigh")
if carpet_boost is not None and high_perf is not None:
if carpet_boost:
fan_speed = FAN_SPEED_AUTOMATIC
elif high_perf:
fan_speed = FAN_SPEED_PERFORMANCE
else: # carpet_boost and high_perf are False
fan_speed = FAN_SPEED_ECO
return fan_speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return FAN_SPEEDS
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if fan_speed.capitalize() in FAN_SPEEDS:
fan_speed = fan_speed.capitalize()
_LOGGER.debug("Set fan speed to: %s", fan_speed)
high_perf = None
carpet_boost = None
if fan_speed == FAN_SPEED_AUTOMATIC:
high_perf = False
carpet_boost = True
elif fan_speed == FAN_SPEED_ECO:
high_perf = False
carpet_boost = False
elif fan_speed == FAN_SPEED_PERFORMANCE:
high_perf = True
carpet_boost = False
else:
_LOGGER.error("No such fan speed available: %s", fan_speed)
return
# The set_preference method does only accept string values
await self.hass.async_add_executor_job(
self.vacuum.set_preference, "carpetBoost", str(carpet_boost)
)
await self.hass.async_add_executor_job(
self.vacuum.set_preference, "vacHigh", str(high_perf)
)
|
from __future__ import division
import numpy as np
import chainer
from chainercv import transforms
def mask_to_segm(mask, bbox, segm_size, index=None):
"""Crop and resize mask.
This function requires cv2.
Args:
mask (~numpy.ndarray): See below.
bbox (~numpy.ndarray): See below.
segm_size (int): The size of segm :math:`S`.
index (~numpy.ndarray): See below. :math:`R = N` when
:obj:`index` is :obj:`None`.
Returns:
~numpy.ndarray: See below.
.. csv-table::
:header: name, shape, dtype, format
:obj:`mask`, ":math:`(N, H, W)`", :obj:`bool`, --
:obj:`bbox`, ":math:`(R, 4)`", :obj:`float32`, \
":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
:obj:`index` (optional), ":math:`(R,)`", :obj:`int32`, --
:obj:`segms` (output), ":math:`(R, S, S)`", :obj:`float32`, \
":math:`[0, 1]`"
"""
pad = 1
_, H, W = mask.shape
bbox = chainer.backends.cuda.to_cpu(bbox)
# To work around an issue with cv2.resize (it seems to automatically
# pad with repeated border values), we manually zero-pad the masks by 1
# pixel prior to resizing back to the original image resolution.
# This prevents "top hat" artifacts. We therefore need to expand
# the reference boxes by an appropriate factor.
padded_segm_size = segm_size + pad * 2
expand_scale = padded_segm_size / segm_size
bbox = _expand_bbox(bbox, expand_scale)
resize_size = padded_segm_size
bbox = _integerize_bbox(bbox)
segm = []
if index is None:
index = np.arange(len(bbox))
else:
index = chainer.backends.cuda.to_cpu(index)
for i, bb in zip(index, bbox):
y_min = max(bb[0], 0)
x_min = max(bb[1], 0)
y_max = max(min(bb[2], H), 0)
x_max = max(min(bb[3], W), 0)
if y_max <= y_min or x_max <= x_min:
segm.append(np.zeros((segm_size, segm_size), dtype=np.float32))
continue
bb_height = bb[2] - bb[0]
bb_width = bb[3] - bb[1]
cropped_m = np.zeros((bb_height, bb_width), dtype=np.bool)
y_offset = y_min - bb[0]
x_offset = x_min - bb[1]
cropped_m[y_offset:y_offset + y_max - y_min,
x_offset:x_offset + x_max - x_min] =\
chainer.backends.cuda.to_cpu(mask[i, y_min:y_max, x_min:x_max])
with chainer.using_config('cv_resize_backend', 'cv2'):
sgm = transforms.resize(
cropped_m[None].astype(np.float32),
(resize_size, resize_size))[0].astype(np.int32)
segm.append(sgm[pad:-pad, pad:-pad])
return np.array(segm, dtype=np.float32)
def segm_to_mask(segm, bbox, size):
"""Recover mask from cropped and resized mask.
This function requires cv2.
Args:
segm (~numpy.ndarray): See below.
bbox (~numpy.ndarray): See below.
size (tuple): This is a tuple of length 2. Its elements are
ordered as (height, width).
Returns:
~numpy.ndarray: See below.
.. csv-table::
:header: name, shape, dtype, format
:obj:`segm`, ":math:`(R, S, S)`", :obj:`float32`, --
:obj:`bbox`, ":math:`(R, 4)`", :obj:`float32`, \
":math:`(y_{min}, x_{min}, y_{max}, x_{max})`"
:obj:`mask` (output), ":math:`(R, H, W)`", :obj:`bool`, --
"""
pad = 1
H, W = size
_, segm_size, _ = segm.shape
mask = np.zeros((len(bbox), H, W), dtype=np.bool)
# As commented in mask_to_segm, cv2.resize needs adjust.
padded_segm_size = segm_size + pad * 2
expand_scale = padded_segm_size / segm_size
bbox = _expand_bbox(bbox, expand_scale)
canvas_mask = np.zeros(
(padded_segm_size, padded_segm_size), dtype=np.float32)
bbox = _integerize_bbox(bbox)
for i, (bb, sgm) in enumerate(zip(bbox, segm)):
bb_height = bb[2] - bb[0]
bb_width = bb[3] - bb[1]
if bb_height == 0 or bb_width == 0:
continue
canvas_mask[pad:-pad, pad:-pad] = sgm
with chainer.using_config('cv_resize_backend', 'cv2'):
crop_mask = transforms.resize(
canvas_mask[None], (bb_height, bb_width))[0]
crop_mask = crop_mask > 0.5
y_min = max(bb[0], 0)
x_min = max(bb[1], 0)
y_max = max(min(bb[2], H), 0)
x_max = max(min(bb[3], W), 0)
y_offset = y_min - bb[0]
x_offset = x_min - bb[1]
mask[i, y_min:y_max, x_min:x_max] = crop_mask[
y_offset:y_offset + y_max - y_min,
x_offset:x_offset + x_max - x_min]
return mask
def _integerize_bbox(bbox):
return np.round(bbox).astype(np.int32)
def _expand_bbox(bbox, scale):
"""Expand an array of boxes by a given scale."""
xp = chainer.backends.cuda.get_array_module(bbox)
h_half = (bbox[:, 2] - bbox[:, 0]) * .5
w_half = (bbox[:, 3] - bbox[:, 1]) * .5
y_c = (bbox[:, 2] + bbox[:, 0]) * .5
x_c = (bbox[:, 3] + bbox[:, 1]) * .5
h_half *= scale
w_half *= scale
expanded_bbox = xp.zeros(bbox.shape)
expanded_bbox[:, 0] = y_c - h_half
expanded_bbox[:, 1] = x_c - w_half
expanded_bbox[:, 2] = y_c + h_half
expanded_bbox[:, 3] = x_c + w_half
return expanded_bbox
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
from absl.flags import _helpers
_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
class Error(Exception):
"""The base class for all flags errors."""
class CantOpenFlagFileError(Error):
"""Raised when flagfile fails to open.
E.g. the file doesn't exist, or has wrong permissions.
"""
class DuplicateFlagError(Error):
"""Raised if there is a flag naming conflict."""
@classmethod
def from_flag(cls, flagname, flag_values, other_flag_values=None):
"""Creates a DuplicateFlagError by providing flag name and values.
Args:
flagname: str, the name of the flag being redefined.
flag_values: FlagValues, the FlagValues instance containing the first
definition of flagname.
other_flag_values: FlagValues, if it is not None, it should be the
FlagValues object where the second definition of flagname occurs.
If it is None, we assume that we're being called when attempting
to create the flag a second time, and we use the module calling
this one as the source of the second definition.
Returns:
An instance of DuplicateFlagError.
"""
first_module = flag_values.find_module_defining_flag(
flagname, default='<unknown>')
if other_flag_values is None:
second_module = _helpers.get_calling_module()
else:
second_module = other_flag_values.find_module_defining_flag(
flagname, default='<unknown>')
flag_summary = flag_values[flagname].help
msg = ("The flag '%s' is defined twice. First from %s, Second from %s. "
"Description from first occurrence: %s") % (
flagname, first_module, second_module, flag_summary)
return cls(msg)
class IllegalFlagValueError(Error):
"""Raised when the flag command line argument is illegal."""
class UnrecognizedFlagError(Error):
"""Raised when a flag is unrecognized.
Attributes:
flagname: str, the name of the unrecognized flag.
flagvalue: The value of the flag, empty if the flag is not defined.
"""
def __init__(self, flagname, flagvalue='', suggestions=None):
self.flagname = flagname
self.flagvalue = flagvalue
if suggestions:
# Space before the question mark is intentional to not include it in the
# selection when copy-pasting the suggestion from (some) terminals.
tip = '. Did you mean: %s ?' % ', '.join(suggestions)
else:
tip = ''
super(UnrecognizedFlagError, self).__init__(
'Unknown command line flag \'%s\'%s' % (flagname, tip))
class UnparsedFlagAccessError(Error):
"""Raised when accessing the flag value from unparsed FlagValues."""
class ValidationError(Error):
"""Raised when flag validator constraint is not satisfied."""
class FlagNameConflictsWithMethodError(Error):
"""Raised when a flag name conflicts with FlagValues methods."""
|
import asyncio
import contextlib
import logging
import re
from collections import OrderedDict
from pathlib import Path
from typing import Final, Pattern
import discord
import lavalink
from aiohttp import ClientConnectorError
from discord.ext.commands import CheckFailure
from redbot.core import commands
from redbot.core.i18n import Translator
from redbot.core.utils.chat_formatting import box, humanize_list
from ...audio_logging import debug_exc_log
from ...errors import TrackEnqueueError
from ..abc import MixinMeta
from ..cog_utils import HUMANIZED_PERM, CompositeMetaClass
log = logging.getLogger("red.cogs.Audio.cog.Events.dpy")
_ = Translator("Audio", Path(__file__))
RE_CONVERSION: Final[Pattern] = re.compile('Converting to "(.*)" failed for parameter "(.*)".')
class DpyEvents(MixinMeta, metaclass=CompositeMetaClass):
async def cog_before_invoke(self, ctx: commands.Context) -> None:
await self.cog_ready_event.wait()
# check for unsupported arch
# Check on this needs refactoring at a later date
# so that we have a better way to handle the tasks
if self.command_llsetup in [ctx.command, ctx.command.root_parent]:
pass
elif self.lavalink_connect_task and self.lavalink_connect_task.cancelled():
await ctx.send(
_(
"You have attempted to run Audio's Lavalink server on an unsupported"
" architecture. Only settings related commands will be available."
)
)
raise RuntimeError(
"Not running audio command due to invalid machine architecture for Lavalink."
)
current_perms = ctx.channel.permissions_for(ctx.me)
surpass_ignore = (
isinstance(ctx.channel, discord.abc.PrivateChannel)
or await ctx.bot.is_owner(ctx.author)
or await ctx.bot.is_admin(ctx.author)
)
guild = ctx.guild
if guild and not current_perms.is_superset(self.permission_cache):
current_perms_set = set(iter(current_perms))
expected_perms_set = set(iter(self.permission_cache))
diff = expected_perms_set - current_perms_set
missing_perms = dict((i for i in diff if i[-1] is not False))
missing_perms = OrderedDict(sorted(missing_perms.items()))
missing_permissions = missing_perms.keys()
log.debug(
"Missing the following perms in %d, Owner ID: %d: %s",
ctx.guild.id,
ctx.guild.owner.id,
humanize_list(list(missing_permissions)),
)
if not surpass_ignore:
text = _(
"I'm missing permissions in this server, "
"Please address this as soon as possible.\n\n"
"Expected Permissions:\n"
)
for perm, value in missing_perms.items():
text += "{perm}: [{status}]\n".format(
status=_("Enabled") if value else _("Disabled"),
perm=HUMANIZED_PERM.get(perm),
)
text = text.strip()
if current_perms.send_messages and current_perms.read_messages:
await ctx.send(box(text=text, lang="ini"))
else:
log.info(
"Missing write permission in %d, Owner ID: %d",
ctx.guild.id,
ctx.guild.owner.id,
)
raise CheckFailure(message=text)
with contextlib.suppress(Exception):
player = lavalink.get_player(ctx.guild.id)
notify_channel = player.fetch("channel")
if not notify_channel:
player.store("channel", ctx.channel.id)
self._daily_global_playlist_cache.setdefault(
self.bot.user.id, await self.config.daily_playlists()
)
if self.local_folder_current_path is None:
self.local_folder_current_path = Path(await self.config.localpath())
if not ctx.guild:
return
dj_enabled = self._dj_status_cache.setdefault(
ctx.guild.id, await self.config.guild(ctx.guild).dj_enabled()
)
self._daily_playlist_cache.setdefault(
ctx.guild.id, await self.config.guild(ctx.guild).daily_playlists()
)
self._persist_queue_cache.setdefault(
ctx.guild.id, await self.config.guild(ctx.guild).persist_queue()
)
if dj_enabled:
dj_role = self._dj_role_cache.setdefault(
ctx.guild.id, await self.config.guild(ctx.guild).dj_role()
)
dj_role_obj = ctx.guild.get_role(dj_role)
if not dj_role_obj:
await self.config.guild(ctx.guild).dj_enabled.set(None)
self._dj_status_cache[ctx.guild.id] = None
await self.config.guild(ctx.guild).dj_role.set(None)
self._dj_role_cache[ctx.guild.id] = None
await self.send_embed_msg(ctx, title=_("No DJ role found. Disabling DJ mode."))
async def cog_after_invoke(self, ctx: commands.Context) -> None:
await self.maybe_run_pending_db_tasks(ctx)
async def cog_command_error(self, ctx: commands.Context, error: Exception) -> None:
error = getattr(error, "original", error)
handled = False
if isinstance(error, commands.ArgParserFailure):
handled = True
msg = _("`{user_input}` is not a valid value for `{command}`").format(
user_input=error.user_input,
command=error.cmd,
)
if error.custom_help_msg:
msg += f"\n{error.custom_help_msg}"
await self.send_embed_msg(
ctx,
title=_("Unable To Parse Argument"),
description=msg,
error=True,
)
if error.send_cmd_help:
await ctx.send_help()
elif isinstance(error, commands.ConversionFailure):
handled = True
if error.args:
if match := RE_CONVERSION.search(error.args[0]):
await self.send_embed_msg(
ctx,
title=_("Invalid Argument"),
description=_(
"The argument you gave for `{}` is not valid: I was expecting a `{}`."
).format(match.group(2), match.group(1)),
error=True,
)
else:
await self.send_embed_msg(
ctx,
title=_("Invalid Argument"),
description=error.args[0],
error=True,
)
else:
await ctx.send_help()
elif isinstance(error, (IndexError, ClientConnectorError)) and any(
e in str(error).lower() for e in ["no nodes found.", "cannot connect to host"]
):
handled = True
await self.send_embed_msg(
ctx,
title=_("Invalid Environment"),
description=_("Connection to Lavalink has been lost."),
error=True,
)
debug_exc_log(log, error, "This is a handled error")
elif isinstance(error, KeyError) and "such player for that guild" in str(error):
handled = True
await self.send_embed_msg(
ctx,
title=_("No Player Available"),
description=_("The bot is not connected to a voice channel."),
error=True,
)
debug_exc_log(log, error, "This is a handled error")
elif isinstance(error, (TrackEnqueueError, asyncio.exceptions.TimeoutError)):
handled = True
await self.send_embed_msg(
ctx,
title=_("Unable to Get Track"),
description=_(
"I'm unable to get a track from Lavalink at the moment, "
"try again in a few minutes."
),
error=True,
)
debug_exc_log(log, error, "This is a handled error")
elif isinstance(error, discord.errors.HTTPException):
handled = True
await self.send_embed_msg(
ctx,
title=_("There was an issue communicating with Discord."),
description=_("This error has been reported to the bot owner."),
error=True,
)
log.exception(
"This is not handled in the core Audio cog, please report it.", exc_info=error
)
if not isinstance(
error,
(
commands.CheckFailure,
commands.UserInputError,
commands.DisabledCommand,
commands.CommandOnCooldown,
commands.MaxConcurrencyReached,
),
):
self.update_player_lock(ctx, False)
if self.api_interface is not None:
await self.api_interface.run_tasks(ctx)
if not handled:
await self.bot.on_command_error(ctx, error, unhandled_by_cog=True)
def cog_unload(self) -> None:
if not self.cog_cleaned_up:
self.bot.dispatch("red_audio_unload", self)
self.session.detach()
self.bot.loop.create_task(self._close_database())
if self.player_automated_timer_task:
self.player_automated_timer_task.cancel()
if self.lavalink_connect_task:
self.lavalink_connect_task.cancel()
if self.cog_init_task:
self.cog_init_task.cancel()
if self._restore_task:
self._restore_task.cancel()
lavalink.unregister_event_listener(self.lavalink_event_handler)
lavalink.unregister_update_listener(self.lavalink_update_handler)
self.bot.loop.create_task(lavalink.close())
if self.player_manager is not None:
self.bot.loop.create_task(self.player_manager.shutdown())
self.cog_cleaned_up = True
@commands.Cog.listener()
async def on_voice_state_update(
self, member: discord.Member, before: discord.VoiceState, after: discord.VoiceState
) -> None:
if await self.bot.cog_disabled_in_guild(self, member.guild):
return
await self.cog_ready_event.wait()
if after.channel != before.channel:
try:
self.skip_votes[before.channel.guild].remove(member.id)
except (ValueError, KeyError, AttributeError):
pass
channel = self.rgetattr(member, "voice.channel", None)
bot_voice_state = self.rgetattr(member, "guild.me.voice.self_deaf", None)
if channel and bot_voice_state is False:
try:
player = lavalink.get_player(channel.guild.id)
except (KeyError, AttributeError):
pass
else:
if player.channel.id == channel.id:
await self.self_deafen(player)
|
import io
import os.path
import shutil
import functools
from typing import Dict, IO, Optional
import attr
from PyQt5.QtCore import pyqtSlot, pyqtSignal, QTimer, QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtNetwork import QNetworkRequest, QNetworkReply
from qutebrowser.config import config, websettings
from qutebrowser.utils import message, usertypes, log, urlutils, utils, debug, objreg
from qutebrowser.misc import quitter
from qutebrowser.browser import downloads
from qutebrowser.browser.webkit import http
from qutebrowser.browser.webkit.network import networkmanager
@attr.s
class _RetryInfo:
request = attr.ib()
manager = attr.ib()
class DownloadItem(downloads.AbstractDownloadItem):
"""A single download currently running.
There are multiple ways the data can flow from the QNetworkReply to the
disk.
If the filename/file object is known immediately when starting the
download, QNetworkReply's readyRead writes to the target file directly.
If not, readyRead is ignored and with self._read_timer we periodically read
into the self._buffer BytesIO slowly, so some broken servers don't close
our connection.
As soon as we know the file object, we copy self._buffer over and the next
readyRead will write to the real file object.
Class attributes:
_MAX_REDIRECTS: The maximum redirection count.
Attributes:
_retry_info: A _RetryInfo instance.
_redirects: How many time we were redirected already.
_buffer: A BytesIO object to buffer incoming data until we know the
target file.
_read_timer: A Timer which reads the QNetworkReply into self._buffer
periodically.
_reply: The QNetworkReply associated with this download.
_autoclose: Whether to close the associated file when the download is
done.
Signals:
adopt_download: Emitted when a download is retried and should be
adopted by the QNAM if needed.
arg 0: The new DownloadItem
"""
_MAX_REDIRECTS = 10
adopt_download = pyqtSignal(object) # DownloadItem
def __init__(self, reply, manager):
"""Constructor.
Args:
reply: The QNetworkReply to download.
"""
super().__init__(manager=manager, parent=manager)
self.fileobj: Optional[IO[bytes]] = None
self.raw_headers: Dict[bytes, bytes] = {}
self._autoclose = True
self._retry_info = None
self._reply = None
self._buffer = io.BytesIO()
self._read_timer = usertypes.Timer(self, name='download-read-timer')
self._read_timer.setInterval(500)
self._read_timer.timeout.connect(self._on_read_timer_timeout)
self._redirects = 0
self._url = reply.url()
self._init_reply(reply)
def _create_fileobj(self):
"""Create a file object using the internal filename."""
assert self._filename is not None
try:
fileobj = open(self._filename, 'wb')
except OSError as e:
self._die(e.strerror)
else:
self._set_fileobj(fileobj)
def _do_die(self):
"""Abort the download and emit an error."""
self._read_timer.stop()
if self._reply is None:
log.downloads.debug("Reply gone while dying")
return
self._reply.downloadProgress.disconnect()
self._reply.finished.disconnect()
self._reply.error.disconnect()
self._reply.readyRead.disconnect()
with log.hide_qt_warning('QNetworkReplyImplPrivate::error: Internal '
'problem, this method must only be called '
'once.'):
# See https://codereview.qt-project.org/#/c/107863/
self._reply.abort()
self._reply.deleteLater()
self._reply = None
if self.fileobj is not None:
try:
self.fileobj.close()
except OSError:
log.downloads.exception("Error while closing file object")
def _init_reply(self, reply):
"""Set a new reply and connect its signals.
Args:
reply: The QNetworkReply to handle.
"""
self.done = False
self.successful = False
self._reply = reply
reply.setReadBufferSize(16 * 1024 * 1024) # 16 MB
reply.downloadProgress.connect(self.stats.on_download_progress)
reply.finished.connect(self._on_reply_finished)
reply.error.connect(self._on_reply_error)
reply.readyRead.connect(self._on_ready_read)
reply.metaDataChanged.connect(self._on_meta_data_changed)
self._retry_info = _RetryInfo(request=reply.request(),
manager=reply.manager())
if not self.fileobj:
self._read_timer.start()
# We could have got signals before we connected slots to them.
# Here no signals are connected to the DownloadItem yet, so we use a
# singleShot QTimer to emit them after they are connected.
if reply.error() != QNetworkReply.NoError:
QTimer.singleShot(0, lambda: self._die(reply.errorString()))
def _do_cancel(self):
self._read_timer.stop()
if self._reply is not None:
self._reply.finished.disconnect(self._on_reply_finished)
self._reply.abort()
self._reply.deleteLater()
self._reply = None
if self.fileobj is not None:
self.fileobj.close()
self.cancelled.emit()
@pyqtSlot()
def retry(self):
"""Retry a failed download."""
assert self.done
assert not self.successful
assert self._retry_info is not None
# Not calling self.cancel() here because the download is done (albeit
# unsuccessfully)
self.remove()
self.delete()
new_reply = self._retry_info.manager.get(self._retry_info.request)
new_download = self._manager.fetch(new_reply,
suggested_filename=self.basename)
self.adopt_download.emit(new_download)
def _get_open_filename(self):
filename = self._filename
if filename is None:
filename = getattr(self.fileobj, 'name', None)
return filename
def url(self) -> QUrl:
# Note: self._reply is deleted when the download finishes
return self._url
def _ensure_can_set_filename(self, filename):
if self.fileobj is not None: # pragma: no cover
raise ValueError("fileobj was already set! filename: {}, "
"existing: {}, fileobj {}".format(
filename, self._filename, self.fileobj))
def _after_set_filename(self):
self._create_fileobj()
def _ask_confirm_question(self, title, msg, *, custom_yes_action=None):
yes_action = custom_yes_action or self._after_set_filename
no_action = functools.partial(self.cancel, remove_data=False)
url = 'file://{}'.format(self._filename)
message.confirm_async(title=title, text=msg, yes_action=yes_action,
no_action=no_action, cancel_action=no_action,
abort_on=[self.cancelled, self.error], url=url)
def _ask_create_parent_question(self, title, msg,
force_overwrite, remember_directory):
assert self._filename is not None
no_action = functools.partial(self.cancel, remove_data=False)
url = 'file://{}'.format(os.path.dirname(self._filename))
message.confirm_async(title=title, text=msg,
yes_action=(lambda:
self._after_create_parent_question(
force_overwrite,
remember_directory)),
no_action=no_action, cancel_action=no_action,
abort_on=[self.cancelled, self.error], url=url)
def _set_fileobj(self, fileobj, *, autoclose=True):
"""Set the file object to write the download to.
Args:
fileobj: A file-like object.
"""
assert self._reply is not None
if self.fileobj is not None: # pragma: no cover
raise ValueError("fileobj was already set! Old: {}, new: "
"{}".format(self.fileobj, fileobj))
self.fileobj = fileobj
self._autoclose = autoclose
try:
self._read_timer.stop()
log.downloads.debug("buffer: {} bytes".format(self._buffer.tell()))
self._buffer.seek(0)
shutil.copyfileobj(self._buffer, fileobj)
self._buffer.close()
if self._reply.isFinished():
# Downloading to the buffer in RAM has already finished so we
# write out the data and clean up now.
self._on_reply_finished()
else:
# Since the buffer already might be full, on_ready_read might
# not be called at all anymore, so we force it here to flush
# the buffer and continue receiving new data.
self._on_ready_read()
except OSError as e:
self._die(e.strerror)
def _set_tempfile(self, fileobj):
self._set_fileobj(fileobj)
def _finish_download(self):
"""Write buffered data to disk and finish the QNetworkReply."""
assert self._reply is not None
assert self.fileobj is not None
log.downloads.debug("Finishing download...")
if self._reply.isOpen():
self.fileobj.write(self._reply.readAll())
if self._autoclose:
self.fileobj.close()
self.successful = self._reply.error() == QNetworkReply.NoError
self._reply.close()
self._reply.deleteLater()
self._reply = None
self.finished.emit()
self.done = True
log.downloads.debug("Download {} finished".format(self.basename))
self.data_changed.emit()
@pyqtSlot()
def _on_reply_finished(self):
"""Clean up when the download was finished.
Note when this gets called, only the QNetworkReply has finished. This
doesn't mean the download (i.e. writing data to the disk) is finished
as well. Therefore, we can't close() the QNetworkReply in here yet.
"""
if self._reply is None:
return
self._read_timer.stop()
self.stats.finish()
is_redirected = self._handle_redirect()
if is_redirected:
return
log.downloads.debug("Reply finished, fileobj {}".format(self.fileobj))
if self.fileobj is not None:
# We can do a "delayed" write immediately to empty the buffer and
# clean up.
self._finish_download()
@pyqtSlot()
def _on_ready_read(self):
"""Read available data and save file when ready to read."""
if self.fileobj is None or self._reply is None:
# No filename has been set yet (so we don't empty the buffer) or we
# got a readyRead after the reply was finished (which happens on
# qute://log for example).
return
if not self._reply.isOpen():
raise OSError("Reply is closed!")
try:
self.fileobj.write(self._reply.readAll())
except OSError as e:
self._die(e.strerror)
@pyqtSlot('QNetworkReply::NetworkError')
def _on_reply_error(self, code):
"""Handle QNetworkReply errors."""
if code == QNetworkReply.OperationCanceledError:
return
if self._reply is None:
error = "Unknown error: {}".format(
debug.qenum_key(QNetworkReply, code))
else:
error = self._reply.errorString()
self._die(error)
@pyqtSlot()
def _on_read_timer_timeout(self):
"""Read some bytes from the QNetworkReply periodically."""
assert self._reply is not None
if not self._reply.isOpen():
raise OSError("Reply is closed!")
data = self._reply.read(1024)
if data is not None:
self._buffer.write(data)
@pyqtSlot()
def _on_meta_data_changed(self):
"""Update the download's metadata."""
if self._reply is None:
return
self.raw_headers = {}
for key, value in self._reply.rawHeaderPairs():
self.raw_headers[bytes(key)] = bytes(value)
def _handle_redirect(self):
"""Handle an HTTP redirect.
Return:
True if the download was redirected, False otherwise.
"""
assert self._reply is not None
redirect = self._reply.attribute(
QNetworkRequest.RedirectionTargetAttribute)
if redirect is None or redirect.isEmpty():
return False
new_url = self._reply.url().resolved(redirect)
new_request = self._reply.request()
if new_url == new_request.url():
return False
if self._redirects > self._MAX_REDIRECTS:
self._die("Maximum redirection count reached!")
self.delete()
return True # so on_reply_finished aborts
log.downloads.debug("{}: Handling redirect".format(self))
self._redirects += 1
new_request.setUrl(new_url)
old_reply = self._reply
assert old_reply is not None
old_reply.finished.disconnect(self._on_reply_finished)
self._read_timer.stop()
self._reply = None
if self.fileobj is not None:
self.fileobj.seek(0)
log.downloads.debug("redirected: {} -> {}".format(
old_reply.url(), new_request.url()))
new_reply = old_reply.manager().get(new_request)
self._init_reply(new_reply)
old_reply.deleteLater()
return True
def _uses_nam(self, nam):
"""Check if this download uses the given QNetworkAccessManager."""
assert self._retry_info is not None
running_nam = self._reply is not None and self._reply.manager() is nam
# user could request retry after tab is closed.
retry_nam = (self.done and (not self.successful) and
self._retry_info.manager is nam)
return running_nam or retry_nam
class DownloadManager(downloads.AbstractDownloadManager):
"""Manager for currently running downloads.
Attributes:
_networkmanager: A NetworkManager for generic downloads.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._networkmanager = networkmanager.NetworkManager(
win_id=None, tab_id=None,
private=config.val.content.private_browsing, parent=self)
@pyqtSlot('QUrl')
def get(self, url, **kwargs):
"""Start a download with a link URL.
Args:
url: The URL to get, as QUrl
**kwargs: passed to get_request().
Return:
The created DownloadItem.
"""
if not url.isValid():
urlutils.invalid_url_error(url, "start download")
return None
req = QNetworkRequest(url)
user_agent = websettings.user_agent(url)
req.setHeader(QNetworkRequest.UserAgentHeader, user_agent)
return self.get_request(req, **kwargs)
def get_mhtml(self, tab, target):
"""Download the given tab as mhtml to the given DownloadTarget."""
assert tab.backend == usertypes.Backend.QtWebKit
from qutebrowser.browser.webkit import mhtml
if target is not None:
mhtml.start_download_checked(target, tab=tab)
return
suggested_fn = utils.sanitize_filename(tab.title() + ".mhtml")
filename = downloads.immediate_download_path()
if filename is not None:
target = downloads.FileDownloadTarget(filename)
mhtml.start_download_checked(target, tab=tab)
else:
question = downloads.get_filename_question(
suggested_filename=suggested_fn, url=tab.url(), parent=tab)
question.answered.connect(functools.partial(
mhtml.start_download_checked, tab=tab))
message.global_bridge.ask(question, blocking=False)
def get_request(self, request, *, target=None,
suggested_fn=None, **kwargs):
"""Start a download with a QNetworkRequest.
Args:
request: The QNetworkRequest to download.
target: Where to save the download as downloads.DownloadTarget.
**kwargs: Passed to _fetch_request.
Return:
The created DownloadItem.
"""
# WORKAROUND for Qt corrupting data loaded from cache:
# https://bugreports.qt.io/browse/QTBUG-42757
request.setAttribute(QNetworkRequest.CacheLoadControlAttribute,
QNetworkRequest.AlwaysNetwork)
if suggested_fn is not None:
pass
elif request.url().scheme().lower() != 'data':
suggested_fn = urlutils.filename_from_url(request.url())
else:
# We might be downloading a binary blob embedded on a page or even
# generated dynamically via javascript. We try to figure out a more
# sensible name than the base64 content of the data.
origin = request.originatingObject()
try:
origin_url = origin.url()
except AttributeError:
# Raised either if origin is None or some object that doesn't
# have its own url. We're probably fine with a default fallback
# then.
suggested_fn = 'binary blob'
else:
# Use the originating URL as a base for the filename (works
# e.g. for pdf.js).
suggested_fn = urlutils.filename_from_url(origin_url)
if suggested_fn is None:
suggested_fn = 'qutebrowser-download'
return self._fetch_request(request,
target=target,
suggested_filename=suggested_fn,
**kwargs)
def _fetch_request(self, request, *, qnam=None, **kwargs):
"""Download a QNetworkRequest to disk.
Args:
request: The QNetworkRequest to download.
qnam: The QNetworkAccessManager to use.
**kwargs: passed to fetch().
Return:
The created DownloadItem.
"""
if qnam is None:
qnam = self._networkmanager
reply = qnam.get(request)
return self.fetch(reply, **kwargs)
@pyqtSlot('QNetworkReply')
def fetch(self, reply, *, target=None, auto_remove=False,
suggested_filename=None, prompt_download_directory=None):
"""Download a QNetworkReply to disk.
Args:
reply: The QNetworkReply to download.
target: Where to save the download as downloads.DownloadTarget.
auto_remove: Whether to remove the download even if
downloads.remove_finished is set to -1.
Return:
The created DownloadItem.
"""
if not suggested_filename:
try:
suggested_filename = target.suggested_filename()
except downloads.NoFilenameError:
_, suggested_filename = http.parse_content_disposition(reply)
log.downloads.debug("fetch: {} -> {}".format(reply.url(),
suggested_filename))
download = DownloadItem(reply, manager=self)
self._init_item(download, auto_remove, suggested_filename)
if target is not None:
download.set_target(target)
return download
# Neither filename nor fileobj were given
filename = downloads.immediate_download_path(prompt_download_directory)
if filename is not None:
# User doesn't want to be asked, so just use the download_dir
target = downloads.FileDownloadTarget(filename)
download.set_target(target)
return download
# Ask the user for a filename
question = downloads.get_filename_question(
suggested_filename=suggested_filename, url=reply.url(),
parent=self)
self._init_filename_question(question, download)
message.global_bridge.ask(question, blocking=False)
return download
def has_downloads_with_nam(self, nam):
"""Check if the DownloadManager has any downloads with the given QNAM.
Args:
nam: The QNetworkAccessManager to check.
Return:
A boolean.
"""
assert nam.adopted_downloads == 0
for download in self.downloads:
if download._uses_nam(nam): # pylint: disable=protected-access
nam.adopt_download(download)
return nam.adopted_downloads
def init():
"""Initialize the global QtNetwork download manager."""
download_manager = DownloadManager(parent=QApplication.instance())
objreg.register('qtnetwork-download-manager', download_manager)
quitter.instance.shutting_down.connect(download_manager.shutdown)
|
from collections import OrderedDict
from rest_framework import serializers
class OrderedDictField(serializers.Field):
"""
Serializer field which transparently bypasses the internal representation of an OrderedDict.
"""
def to_representation(self, obj):
return OrderedDict(obj)
def to_internal_value(self, data):
return OrderedDict(data)
class JSONSerializerField(serializers.Field):
"""
Serializer field which transparently bypasses its object instead of serializing/deserializing.
"""
def __init__(self, encoder=None, **kwargs):
super().__init__(**kwargs)
def to_representation(self, obj):
return obj
def to_internal_value(self, data):
return data
|
import asyncio
from pprint import pformat
from urllib.parse import urlparse
import async_timeout
from pydeconz.errors import RequestError, ResponseError
from pydeconz.utils import (
async_discovery,
async_get_api_key,
async_get_bridge_id,
normalize_bridge_id,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import CONF_API_KEY, CONF_HOST, CONF_PORT
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from .const import (
CONF_ALLOW_CLIP_SENSOR,
CONF_ALLOW_DECONZ_GROUPS,
CONF_ALLOW_NEW_DEVICES,
CONF_BRIDGE_ID,
DEFAULT_PORT,
DOMAIN,
LOGGER,
)
from .gateway import get_gateway_from_config_entry
DECONZ_MANUFACTURERURL = "http://www.dresden-elektronik.de"
CONF_SERIAL = "serial"
CONF_MANUAL_INPUT = "Manually define gateway"
@callback
def get_master_gateway(hass):
"""Return the gateway which is marked as master."""
for gateway in hass.data[DOMAIN].values():
if gateway.master:
return gateway
class DeconzFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a deCONZ config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
_hassio_discovery = None
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return DeconzOptionsFlowHandler(config_entry)
def __init__(self):
"""Initialize the deCONZ config flow."""
self.bridge_id = None
self.bridges = []
self.deconz_config = {}
async def async_step_user(self, user_input=None):
"""Handle a deCONZ config flow start.
Let user choose between discovered bridges and manual configuration.
If no bridge is found allow user to manually input configuration.
"""
if user_input is not None:
if CONF_MANUAL_INPUT == user_input[CONF_HOST]:
return await self.async_step_manual_input()
for bridge in self.bridges:
if bridge[CONF_HOST] == user_input[CONF_HOST]:
self.bridge_id = bridge[CONF_BRIDGE_ID]
self.deconz_config = {
CONF_HOST: bridge[CONF_HOST],
CONF_PORT: bridge[CONF_PORT],
}
return await self.async_step_link()
session = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(10):
self.bridges = await async_discovery(session)
except (asyncio.TimeoutError, ResponseError):
self.bridges = []
LOGGER.debug("Discovered deCONZ gateways %s", pformat(self.bridges))
if self.bridges:
hosts = []
for bridge in self.bridges:
hosts.append(bridge[CONF_HOST])
hosts.append(CONF_MANUAL_INPUT)
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({vol.Optional(CONF_HOST): vol.In(hosts)}),
)
return await self.async_step_manual_input()
async def async_step_manual_input(self, user_input=None):
"""Manual configuration."""
if user_input:
self.deconz_config = user_input
return await self.async_step_link()
return self.async_show_form(
step_id="manual_input",
data_schema=vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}
),
)
async def async_step_link(self, user_input=None):
"""Attempt to link with the deCONZ bridge."""
errors = {}
LOGGER.debug(
"Preparing linking with deCONZ gateway %s", pformat(self.deconz_config)
)
if user_input is not None:
session = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(10):
api_key = await async_get_api_key(session, **self.deconz_config)
except (ResponseError, RequestError, asyncio.TimeoutError):
errors["base"] = "no_key"
else:
self.deconz_config[CONF_API_KEY] = api_key
return await self._create_entry()
return self.async_show_form(step_id="link", errors=errors)
async def _create_entry(self):
"""Create entry for gateway."""
if not self.bridge_id:
session = aiohttp_client.async_get_clientsession(self.hass)
try:
with async_timeout.timeout(10):
self.bridge_id = await async_get_bridge_id(
session, **self.deconz_config
)
await self.async_set_unique_id(self.bridge_id)
self._abort_if_unique_id_configured(
updates={
CONF_HOST: self.deconz_config[CONF_HOST],
CONF_PORT: self.deconz_config[CONF_PORT],
CONF_API_KEY: self.deconz_config[CONF_API_KEY],
}
)
except asyncio.TimeoutError:
return self.async_abort(reason="no_bridges")
if self.bridge_id == "0000000000000000":
return self.async_abort(reason="no_hardware_available")
return self.async_create_entry(title=self.bridge_id, data=self.deconz_config)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered deCONZ bridge."""
if (
discovery_info.get(ssdp.ATTR_UPNP_MANUFACTURER_URL)
!= DECONZ_MANUFACTURERURL
):
return self.async_abort(reason="not_deconz_bridge")
LOGGER.debug("deCONZ SSDP discovery %s", pformat(discovery_info))
self.bridge_id = normalize_bridge_id(discovery_info[ssdp.ATTR_UPNP_SERIAL])
parsed_url = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION])
entry = await self.async_set_unique_id(self.bridge_id)
if entry and entry.source == "hassio":
return self.async_abort(reason="already_configured")
self._abort_if_unique_id_configured(
updates={CONF_HOST: parsed_url.hostname, CONF_PORT: parsed_url.port}
)
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context["title_placeholders"] = {"host": parsed_url.hostname}
self.deconz_config = {
CONF_HOST: parsed_url.hostname,
CONF_PORT: parsed_url.port,
}
return await self.async_step_link()
async def async_step_hassio(self, discovery_info):
"""Prepare configuration for a Hass.io deCONZ bridge.
This flow is triggered by the discovery component.
"""
LOGGER.debug("deCONZ HASSIO discovery %s", pformat(discovery_info))
self.bridge_id = normalize_bridge_id(discovery_info[CONF_SERIAL])
await self.async_set_unique_id(self.bridge_id)
self._abort_if_unique_id_configured(
updates={
CONF_HOST: discovery_info[CONF_HOST],
CONF_PORT: discovery_info[CONF_PORT],
CONF_API_KEY: discovery_info[CONF_API_KEY],
}
)
self._hassio_discovery = discovery_info
return await self.async_step_hassio_confirm()
async def async_step_hassio_confirm(self, user_input=None):
"""Confirm a Hass.io discovery."""
if user_input is not None:
self.deconz_config = {
CONF_HOST: self._hassio_discovery[CONF_HOST],
CONF_PORT: self._hassio_discovery[CONF_PORT],
CONF_API_KEY: self._hassio_discovery[CONF_API_KEY],
}
return await self._create_entry()
return self.async_show_form(
step_id="hassio_confirm",
description_placeholders={"addon": self._hassio_discovery["addon"]},
)
class DeconzOptionsFlowHandler(config_entries.OptionsFlow):
"""Handle deCONZ options."""
def __init__(self, config_entry):
"""Initialize deCONZ options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.gateway = None
async def async_step_init(self, user_input=None):
"""Manage the deCONZ options."""
self.gateway = get_gateway_from_config_entry(self.hass, self.config_entry)
return await self.async_step_deconz_devices()
async def async_step_deconz_devices(self, user_input=None):
"""Manage the deconz devices options."""
if user_input is not None:
self.options.update(user_input)
return self.async_create_entry(title="", data=self.options)
return self.async_show_form(
step_id="deconz_devices",
data_schema=vol.Schema(
{
vol.Optional(
CONF_ALLOW_CLIP_SENSOR,
default=self.gateway.option_allow_clip_sensor,
): bool,
vol.Optional(
CONF_ALLOW_DECONZ_GROUPS,
default=self.gateway.option_allow_deconz_groups,
): bool,
vol.Optional(
CONF_ALLOW_NEW_DEVICES,
default=self.gateway.option_allow_new_devices,
): bool,
}
),
)
|
import unittest
import numpy as np
import pandas as pd
from qgrid import QgridWidget
class TestQgrid(unittest.TestCase):
def test_nans(self):
df = pd.DataFrame([(pd.Timestamp('2017-02-02'), np.nan),
(4, 2),
('foo', 'bar')])
view = QgridWidget(df=df)
self.assertIsNotNone(view.get_changed_df())
|
from homeassistant.components.air_quality import AirQualityEntity
from homeassistant.const import CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
from homeassistant.core import callback
from . import AirVisualEntity
from .const import (
CONF_INTEGRATION_TYPE,
DATA_COORDINATOR,
DOMAIN,
INTEGRATION_TYPE_NODE_PRO,
)
ATTR_HUMIDITY = "humidity"
ATTR_SENSOR_LIFE = "{0}_sensor_life"
ATTR_VOC = "voc"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up AirVisual air quality entities based on a config entry."""
# Geography-based AirVisual integrations don't utilize this platform:
if config_entry.data[CONF_INTEGRATION_TYPE] != INTEGRATION_TYPE_NODE_PRO:
return
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][config_entry.entry_id]
async_add_entities([AirVisualNodeProSensor(coordinator)], True)
class AirVisualNodeProSensor(AirVisualEntity, AirQualityEntity):
"""Define a sensor for a AirVisual Node/Pro."""
def __init__(self, airvisual):
"""Initialize."""
super().__init__(airvisual)
self._icon = "mdi:chemical-weapon"
self._unit = CONCENTRATION_MICROGRAMS_PER_CUBIC_METER
@property
def air_quality_index(self):
"""Return the Air Quality Index (AQI)."""
if self.coordinator.data["settings"]["is_aqi_usa"]:
return self.coordinator.data["measurements"]["aqi_us"]
return self.coordinator.data["measurements"]["aqi_cn"]
@property
def available(self):
"""Return True if entity is available."""
return bool(self.coordinator.data)
@property
def carbon_dioxide(self):
"""Return the CO2 (carbon dioxide) level."""
return self.coordinator.data["measurements"].get("co2")
@property
def device_info(self):
"""Return device registry information for this entity."""
return {
"identifiers": {(DOMAIN, self.coordinator.data["serial_number"])},
"name": self.coordinator.data["settings"]["node_name"],
"manufacturer": "AirVisual",
"model": f'{self.coordinator.data["status"]["model"]}',
"sw_version": (
f'Version {self.coordinator.data["status"]["system_version"]}'
f'{self.coordinator.data["status"]["app_version"]}'
),
}
@property
def name(self):
"""Return the name."""
node_name = self.coordinator.data["settings"]["node_name"]
return f"{node_name} Node/Pro: Air Quality"
@property
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self.coordinator.data["measurements"].get("pm2_5")
@property
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self.coordinator.data["measurements"].get("pm1_0")
@property
def particulate_matter_0_1(self):
"""Return the particulate matter 0.1 level."""
return self.coordinator.data["measurements"].get("pm0_1")
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return self.coordinator.data["serial_number"]
@callback
def update_from_latest_data(self):
"""Update the entity from the latest data."""
self._attrs.update(
{
ATTR_VOC: self.coordinator.data["measurements"].get("voc"),
**{
ATTR_SENSOR_LIFE.format(pollutant): lifespan
for pollutant, lifespan in self.coordinator.data["status"][
"sensor_life"
].items()
},
}
)
|
import pytest
from homeassistant.components.fan import FanEntity
class BaseFan(FanEntity):
"""Implementation of the abstract FanEntity."""
def __init__(self):
"""Initialize the fan."""
def test_fanentity():
"""Test fan entity methods."""
fan = BaseFan()
assert fan.state == "off"
assert len(fan.speed_list) == 0
assert fan.supported_features == 0
assert fan.capability_attributes == {}
# Test set_speed not required
fan.oscillate(True)
with pytest.raises(NotImplementedError):
fan.set_speed("slow")
with pytest.raises(NotImplementedError):
fan.turn_on()
with pytest.raises(NotImplementedError):
fan.turn_off()
|
from __future__ import unicode_literals
import itertools
from lib.data.data import pyoptions
from lib.fun.fun import finishprinter, countchecker, range_compatible, finalsavepath, fun_name
def get_char_dic(objflag):
storepath = finalsavepath(fun_name())
countchecker(len(objflag), pyoptions.minlen, pyoptions.maxlen)
with open(storepath, "a") as f:
for i in range_compatible(pyoptions.minlen, pyoptions.maxlen+1):
for item in itertools.product(objflag, repeat=i):
if item:
f.write(pyoptions.operator.get(pyoptions.encode)(pyoptions.head + "".join(item) + pyoptions.tail) +
pyoptions.CRLF)
finishprinter(storepath)
|
import os
import pytest
from molecule import util
from molecule.command import side_effect
@pytest.fixture
def _command_provisioner_section_with_side_effect_data():
return {
'provisioner': {
'name': 'ansible',
'playbooks': {
'side_effect': 'side_effect.yml',
},
}
}
@pytest.fixture
def _patched_ansible_side_effect(mocker):
return mocker.patch('molecule.provisioner.ansible.Ansible.side_effect')
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.mark.parametrize(
'config_instance', ['_command_provisioner_section_with_side_effect_data'],
indirect=True)
def test_execute(mocker, _patched_ansible_side_effect, patched_logger_info,
patched_config_validate, config_instance):
pb = os.path.join(config_instance.scenario.directory, 'side_effect.yml')
util.write_file(pb, '')
se = side_effect.SideEffect(config_instance)
se.execute()
x = [
mocker.call("Scenario: 'default'"),
mocker.call("Action: 'side_effect'"),
]
assert x == patched_logger_info.mock_calls
_patched_ansible_side_effect.assert_called_once_with()
def test_execute_skips_when_playbook_not_configured(
patched_logger_warn, _patched_ansible_side_effect, config_instance):
se = side_effect.SideEffect(config_instance)
se.execute()
msg = 'Skipping, side effect playbook not configured.'
patched_logger_warn.assert_called_once_with(msg)
assert not _patched_ansible_side_effect.called
|
from gitless import core
from . import helpers, pprint
def parser(subparsers, repo):
"""Adds the checkout parser to the given subparsers object."""
desc = 'checkout committed versions of files'
checkout_parser = subparsers.add_parser(
'checkout', help=desc, description=desc.capitalize(), aliases=['co'])
checkout_parser.add_argument(
'-cp', '--commit-point', help=(
'the commit point to checkout the files at. Defaults to HEAD.'),
dest='cp', default='HEAD')
checkout_parser.add_argument(
'files', nargs='+', help='the file(s) to checkout',
action=helpers.PathProcessor, repo=repo, recursive=False)
checkout_parser.set_defaults(func=main)
def main(args, repo):
errors_found = False
curr_b = repo.current_branch
cp = args.cp
for fp in args.files:
conf_msg = (
'You have uncomitted changes in "{0}" that could be overwritten by '
'checkout'.format(fp))
try:
f = curr_b.status_file(fp)
if f.type == core.GL_STATUS_TRACKED and f.modified and (
not pprint.conf_dialog(conf_msg)):
pprint.err('Checkout aborted')
continue
except KeyError:
pass
try:
curr_b.checkout_file(fp, repo.revparse_single(cp))
pprint.ok(
'File {0} checked out successfully to its state at {1}'.format(
fp, cp))
except core.PathIsDirectoryError:
commit = repo.revparse_single(cp)
for fp in curr_b.get_paths(fp, commit):
curr_b.checkout_file(fp, commit)
pprint.ok(
'File {0} checked out successfully to its state at {1}'.format(
fp, cp))
except KeyError:
pprint.err('Checkout aborted')
pprint.err('There\'s no file {0} at {1}'.format(fp, cp))
errors_found = True
return not errors_found
|
import datetime
import logging
import re
import socket
import threading
import time
from fritzconnection.lib.fritzphonebook import FritzPhonebook
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
CONF_PHONEBOOK = "phonebook"
CONF_PREFIXES = "prefixes"
DEFAULT_HOST = "169.254.1.1" # IP valid for all Fritz!Box routers
DEFAULT_NAME = "Phone"
DEFAULT_PORT = 1012
INTERVAL_RECONNECT = 60
VALUE_CALL = "dialing"
VALUE_CONNECT = "talking"
VALUE_DEFAULT = "idle"
VALUE_DISCONNECT = "idle"
VALUE_RING = "ringing"
# Return cached results if phonebook was downloaded less then this time ago.
MIN_TIME_PHONEBOOK_UPDATE = datetime.timedelta(hours=6)
SCAN_INTERVAL = datetime.timedelta(hours=3)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_PASSWORD, default="admin"): cv.string,
vol.Optional(CONF_USERNAME, default=""): cv.string,
vol.Optional(CONF_PHONEBOOK, default=0): cv.positive_int,
vol.Optional(CONF_PREFIXES, default=[]): vol.All(cv.ensure_list, [cv.string]),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up Fritz!Box call monitor sensor platform."""
name = config.get(CONF_NAME)
host = config.get(CONF_HOST)
# Try to resolve a hostname; if it is already an IP, it will be returned as-is
try:
host = socket.gethostbyname(host)
except OSError:
_LOGGER.error("Could not resolve hostname %s", host)
return
port = config.get(CONF_PORT)
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
phonebook_id = config.get(CONF_PHONEBOOK)
prefixes = config.get(CONF_PREFIXES)
try:
phonebook = FritzBoxPhonebook(
host=host,
port=port,
username=username,
password=password,
phonebook_id=phonebook_id,
prefixes=prefixes,
)
except: # noqa: E722 pylint: disable=bare-except
phonebook = None
_LOGGER.warning("Phonebook with ID %s not found on Fritz!Box", phonebook_id)
sensor = FritzBoxCallSensor(name=name, phonebook=phonebook)
add_entities([sensor])
monitor = FritzBoxCallMonitor(host=host, port=port, sensor=sensor)
monitor.connect()
def _stop_listener(_event):
monitor.stopped.set()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, _stop_listener)
return monitor.sock is not None
class FritzBoxCallSensor(Entity):
"""Implementation of a Fritz!Box call monitor."""
def __init__(self, name, phonebook):
"""Initialize the sensor."""
self._state = VALUE_DEFAULT
self._attributes = {}
self._name = name
self.phonebook = phonebook
def set_state(self, state):
"""Set the state."""
self._state = state
def set_attributes(self, attributes):
"""Set the state attributes."""
self._attributes = attributes
@property
def should_poll(self):
"""Only poll to update phonebook, if defined."""
return self.phonebook is not None
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._attributes
def number_to_name(self, number):
"""Return a name for a given phone number."""
if self.phonebook is None:
return "unknown"
return self.phonebook.get_name(number)
def update(self):
"""Update the phonebook if it is defined."""
if self.phonebook is not None:
self.phonebook.update_phonebook()
class FritzBoxCallMonitor:
"""Event listener to monitor calls on the Fritz!Box."""
def __init__(self, host, port, sensor):
"""Initialize Fritz!Box monitor instance."""
self.host = host
self.port = port
self.sock = None
self._sensor = sensor
self.stopped = threading.Event()
def connect(self):
"""Connect to the Fritz!Box."""
_LOGGER.debug("Setting up socket...")
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.settimeout(10)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
try:
self.sock.connect((self.host, self.port))
threading.Thread(target=self._listen).start()
except OSError as err:
self.sock = None
_LOGGER.error(
"Cannot connect to %s on port %s: %s", self.host, self.port, err
)
def _listen(self):
"""Listen to incoming or outgoing calls."""
_LOGGER.debug("Connection established, waiting for response...")
while not self.stopped.isSet():
try:
response = self.sock.recv(2048)
except socket.timeout:
# if no response after 10 seconds, just recv again
continue
response = str(response, "utf-8")
_LOGGER.debug("Received %s", response)
if not response:
# if the response is empty, the connection has been lost.
# try to reconnect
_LOGGER.warning("Connection lost, reconnecting...")
self.sock = None
while self.sock is None:
self.connect()
time.sleep(INTERVAL_RECONNECT)
else:
line = response.split("\n", 1)[0]
self._parse(line)
time.sleep(1)
def _parse(self, line):
"""Parse the call information and set the sensor states."""
line = line.split(";")
df_in = "%d.%m.%y %H:%M:%S"
df_out = "%Y-%m-%dT%H:%M:%S"
isotime = datetime.datetime.strptime(line[0], df_in).strftime(df_out)
if line[1] == "RING":
self._sensor.set_state(VALUE_RING)
att = {
"type": "incoming",
"from": line[3],
"to": line[4],
"device": line[5],
"initiated": isotime,
}
att["from_name"] = self._sensor.number_to_name(att["from"])
self._sensor.set_attributes(att)
elif line[1] == "CALL":
self._sensor.set_state(VALUE_CALL)
att = {
"type": "outgoing",
"from": line[4],
"to": line[5],
"device": line[6],
"initiated": isotime,
}
att["to_name"] = self._sensor.number_to_name(att["to"])
self._sensor.set_attributes(att)
elif line[1] == "CONNECT":
self._sensor.set_state(VALUE_CONNECT)
att = {"with": line[4], "device": line[3], "accepted": isotime}
att["with_name"] = self._sensor.number_to_name(att["with"])
self._sensor.set_attributes(att)
elif line[1] == "DISCONNECT":
self._sensor.set_state(VALUE_DISCONNECT)
att = {"duration": line[3], "closed": isotime}
self._sensor.set_attributes(att)
self._sensor.schedule_update_ha_state()
class FritzBoxPhonebook:
"""This connects to a FritzBox router and downloads its phone book."""
def __init__(self, host, port, username, password, phonebook_id=0, prefixes=None):
"""Initialize the class."""
self.host = host
self.username = username
self.password = password
self.port = port
self.phonebook_id = phonebook_id
self.phonebook_dict = None
self.number_dict = None
self.prefixes = prefixes or []
# Establish a connection to the FRITZ!Box.
self.fph = FritzPhonebook(
address=self.host, user=self.username, password=self.password
)
if self.phonebook_id not in self.fph.list_phonebooks:
raise ValueError("Phonebook with this ID not found.")
self.update_phonebook()
@Throttle(MIN_TIME_PHONEBOOK_UPDATE)
def update_phonebook(self):
"""Update the phone book dictionary."""
self.phonebook_dict = self.fph.get_all_names(self.phonebook_id)
self.number_dict = {
re.sub(r"[^\d\+]", "", nr): name
for name, nrs in self.phonebook_dict.items()
for nr in nrs
}
_LOGGER.info("Fritz!Box phone book successfully updated")
def get_name(self, number):
"""Return a name for a given phone number."""
number = re.sub(r"[^\d\+]", "", str(number))
if self.number_dict is None:
return "unknown"
try:
return self.number_dict[number]
except KeyError:
pass
if self.prefixes:
for prefix in self.prefixes:
try:
return self.number_dict[prefix + number]
except KeyError:
pass
try:
return self.number_dict[prefix + number.lstrip("0")]
except KeyError:
pass
return "unknown"
|
from Handler import Handler
import socket
import time
class GraphiteHandler(Handler):
"""
Implements the abstract Handler class, sending data to graphite
"""
def __init__(self, config=None):
"""
Create a new instance of the GraphiteHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Data
self.socket = None
# Initialize Options
self.proto = self.config['proto'].lower().strip()
self.host = self.config['host']
self.port = int(self.config['port'])
self.timeout = float(self.config['timeout'])
self.keepalive = bool(self.config['keepalive'])
self.keepaliveinterval = int(self.config['keepaliveinterval'])
self.batch_size = int(self.config['batch'])
self.max_backlog_multiplier = int(
self.config['max_backlog_multiplier'])
self.trim_backlog_multiplier = int(
self.config['trim_backlog_multiplier'])
self.flow_info = self.config['flow_info']
self.scope_id = self.config['scope_id']
self.metrics = []
self.reconnect_interval = int(self.config['reconnect_interval'])
self.last_connect_timestamp = -1
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(GraphiteHandler, self).get_default_config_help()
config.update({
'host': 'Hostname',
'port': 'Port',
'proto': 'udp, udp4, udp6, tcp, tcp4, or tcp6',
'timeout': '',
'batch': 'How many to store before sending to the graphite server',
'max_backlog_multiplier': 'how many batches to store before trimming', # NOQA
'trim_backlog_multiplier': 'Trim down how many batches',
'keepalive': 'Enable keepalives for tcp streams',
'keepaliveinterval': 'How frequently to send keepalives',
'flow_info': 'IPv6 Flow Info',
'scope_id': 'IPv6 Scope ID',
'reconnect_interval': 'How often (seconds) to reconnect to '
'graphite. Default (0) is never',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(GraphiteHandler, self).get_default_config()
config.update({
'host': 'localhost',
'port': 2003,
'proto': 'tcp',
'timeout': 15,
'batch': 1,
'max_backlog_multiplier': 5,
'trim_backlog_multiplier': 4,
'keepalive': 0,
'keepaliveinterval': 10,
'flow_info': 0,
'scope_id': 0,
'reconnect_interval': 0,
})
return config
def __del__(self):
"""
Destroy instance of the GraphiteHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric by sending it to graphite
"""
# Append the data to the array as a string
self.metrics.append(str(metric))
if len(self.metrics) >= self.batch_size:
self._send()
def flush(self):
"""Flush metrics in queue"""
self._send()
def _send_data(self, data):
"""
Try to send all data in buffer.
"""
try:
self.socket.sendall(data)
self._reset_errors()
except:
self._close()
self._throttle_error("GraphiteHandler: Socket error, "
"trying reconnect.")
self._connect()
try:
self.socket.sendall(data)
except:
return
self._reset_errors()
def _time_to_reconnect(self):
if self.reconnect_interval > 0:
if time.time() > (
self.last_connect_timestamp + self.reconnect_interval):
return True
return False
def _send(self):
"""
Send data to graphite. Data that can not be sent will be queued.
"""
# Check to see if we have a valid socket. If not, try to connect.
try:
try:
if self.socket is None:
self.log.debug("GraphiteHandler: Socket is not connected. "
"Reconnecting.")
self._connect()
if self.socket is None:
self.log.debug("GraphiteHandler: Reconnect failed.")
else:
# Send data to socket
self._send_data(''.join(self.metrics))
self.metrics = []
if self._time_to_reconnect():
self._close()
except Exception:
self._close()
self._throttle_error("GraphiteHandler: Error sending metrics.")
raise
finally:
if len(self.metrics) >= (
self.batch_size * self.max_backlog_multiplier):
trim_offset = (self.batch_size *
self.trim_backlog_multiplier * -1)
self.log.warn('GraphiteHandler: Trimming backlog. Removing' +
' oldest %d and keeping newest %d metrics',
len(self.metrics) - abs(trim_offset),
abs(trim_offset))
self.metrics = self.metrics[trim_offset:]
def _connect(self):
"""
Connect to the graphite server
"""
if (self.proto == 'udp'):
stream = socket.SOCK_DGRAM
else:
stream = socket.SOCK_STREAM
if (self.proto[-1] == '4'):
family = socket.AF_INET
connection_struct = (self.host, self.port)
elif (self.proto[-1] == '6'):
family = socket.AF_INET6
connection_struct = (self.host, self.port,
self.flow_info, self.scope_id)
else:
connection_struct = (self.host, self.port)
try:
addrinfo = socket.getaddrinfo(self.host, self.port, 0, stream)
except socket.gaierror as ex:
self.log.error("GraphiteHandler: Error looking up graphite host"
" '%s' - %s",
self.host, ex)
return
if (len(addrinfo) > 0):
family = addrinfo[0][0]
if (family == socket.AF_INET6):
connection_struct = (self.host, self.port,
self.flow_info, self.scope_id)
else:
family = socket.AF_INET
# Create socket
self.socket = socket.socket(family, stream)
if self.socket is None:
# Log Error
self.log.error("GraphiteHandler: Unable to create socket.")
# Close Socket
self._close()
return
# Enable keepalives?
if self.proto != 'udp' and self.keepalive:
self.log.error("GraphiteHandler: Setting socket keepalives...")
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE,
self.keepaliveinterval)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPINTVL,
self.keepaliveinterval)
self.socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPCNT, 3)
# Set socket timeout
self.socket.settimeout(self.timeout)
# Connect to graphite server
try:
self.socket.connect(connection_struct)
# Log
self.log.debug("GraphiteHandler: Established connection to "
"graphite server %s:%d.",
self.host, self.port)
self.last_connect_timestamp = time.time()
except Exception as ex:
# Log Error
self._throttle_error("GraphiteHandler: Failed to connect to "
"%s:%i. %s.", self.host, self.port, ex)
# Close Socket
self._close()
return
def _close(self):
"""
Close the socket
"""
if self.socket is not None:
self.socket.close()
self.socket = None
|
import numpy as np
import unittest
import chainer
from chainer.datasets import TupleDataset
from chainer.iterators import SerialIterator
from chainer import testing
from chainercv.extensions import DetectionVOCEvaluator
from chainercv.utils import generate_random_bbox
from chainercv.utils.testing import attr
from chainermn import create_communicator
class _DetectionStubLink(chainer.Link):
def __init__(self, bboxes, labels, initial_count=0):
super(_DetectionStubLink, self).__init__()
self.count = initial_count
self.bboxes = bboxes
self.labels = labels
def predict(self, imgs):
n_img = len(imgs)
bboxes = self.bboxes[self.count:self.count + n_img]
labels = self.labels[self.count:self.count + n_img]
scores = [np.ones_like(l) for l in labels]
self.count += n_img
return bboxes, labels, scores
class TestDetectionVOCEvaluator(unittest.TestCase):
def setUp(self):
bboxes = [generate_random_bbox(5, (256, 324), 24, 120)
for _ in range(10)]
labels = np.ones((10, 5))
self.dataset = TupleDataset(
np.random.uniform(size=(10, 3, 32, 48)),
bboxes,
labels)
self.link = _DetectionStubLink(bboxes, labels)
self.iterator = SerialIterator(
self.dataset, 5, repeat=False, shuffle=False)
self.evaluator = DetectionVOCEvaluator(
self.iterator, self.link, label_names=('cls0', 'cls1', 'cls2'))
self.expected_ap = 1
def test_evaluate(self):
reporter = chainer.Reporter()
reporter.add_observer('target', self.link)
with reporter:
mean = self.evaluator.evaluate()
# No observation is reported to the current reporter. Instead the
# evaluator collect results in order to calculate their mean.
self.assertEqual(len(reporter.observation), 0)
np.testing.assert_equal(mean['target/map'], self.expected_ap)
np.testing.assert_equal(mean['target/ap/cls0'], np.nan)
np.testing.assert_equal(mean['target/ap/cls1'], self.expected_ap)
np.testing.assert_equal(mean['target/ap/cls2'], np.nan)
def test_call(self):
mean = self.evaluator()
# main is used as default
np.testing.assert_equal(mean['main/map'], self.expected_ap)
np.testing.assert_equal(mean['main/ap/cls0'], np.nan)
np.testing.assert_equal(mean['main/ap/cls1'], self.expected_ap)
np.testing.assert_equal(mean['main/ap/cls2'], np.nan)
def test_evaluator_name(self):
self.evaluator.name = 'eval'
mean = self.evaluator()
# name is used as a prefix
np.testing.assert_equal(mean['eval/main/map'], self.expected_ap)
np.testing.assert_equal(mean['eval/main/ap/cls0'], np.nan)
np.testing.assert_equal(mean['eval/main/ap/cls1'], self.expected_ap)
np.testing.assert_equal(mean['eval/main/ap/cls2'], np.nan)
def test_current_report(self):
reporter = chainer.Reporter()
with reporter:
mean = self.evaluator()
# The result is reported to the current reporter.
self.assertEqual(reporter.observation, mean)
@attr.mpi
class TestDetectionVOCEvaluatorMPI(unittest.TestCase):
def setUp(self):
self.comm = create_communicator('naive')
batchsize_per_process = 5
batchsize = batchsize_per_process * self.comm.size
if self.comm.rank == 0:
bboxes = [generate_random_bbox(5, (256, 324), 24, 120)
for _ in range(10)]
labels = [np.random.choice(np.arange(3, dtype=np.int32), size=(5,))
for _ in range(10)]
else:
bboxes = None
labels = None
initial_count = self.comm.rank * batchsize_per_process
bboxes = self.comm.bcast_obj(bboxes)
labels = self.comm.bcast_obj(labels)
self.bboxes = bboxes
self.labels = labels
self.dataset = TupleDataset(
np.random.uniform(size=(10, 3, 32, 48)),
bboxes, labels)
self.initial_count = initial_count
self.batchsize = batchsize
def test_consistency(self):
reporter = chainer.Reporter()
if self.comm.rank == 0:
multi_iterator = SerialIterator(
self.dataset, self.batchsize, repeat=False, shuffle=False)
else:
multi_iterator = None
multi_link = _DetectionStubLink(
self.bboxes, self.labels, self.initial_count)
multi_evaluator = DetectionVOCEvaluator(
multi_iterator, multi_link,
label_names=('cls0', 'cls1', 'cls2'),
comm=self.comm)
reporter.add_observer('target', multi_link)
with reporter:
multi_mean = multi_evaluator.evaluate()
if self.comm.rank != 0:
self.assertEqual(multi_mean, {})
return
single_iterator = SerialIterator(
self.dataset, self.batchsize, repeat=False, shuffle=False)
single_link = _DetectionStubLink(
self.bboxes, self.labels)
single_evaluator = DetectionVOCEvaluator(
single_iterator, single_link,
label_names=('cls0', 'cls1', 'cls2'))
reporter.add_observer('target', single_link)
with reporter:
single_mean = single_evaluator.evaluate()
self.assertEqual(set(multi_mean.keys()), set(single_mean.keys()))
for key in multi_mean.keys():
np.testing.assert_equal(single_mean[key], multi_mean[key])
testing.run_module(__name__, __file__)
|
import os.path as op
import numpy as np
from numpy.testing import assert_allclose
import pytest
from mne import Epochs, read_evokeds, pick_types
from mne.io.compensator import make_compensator, get_current_comp
from mne.io import read_raw_fif
from mne.utils import requires_mne, run_subprocess, run_tests_if_main
base_dir = op.join(op.dirname(__file__), 'data')
ctf_comp_fname = op.join(base_dir, 'test_ctf_comp_raw.fif')
def test_compensation_identity():
"""Test compensation identity."""
raw = read_raw_fif(ctf_comp_fname)
assert get_current_comp(raw.info) == 3
comp1 = make_compensator(raw.info, 3, 1, exclude_comp_chs=False)
assert comp1.shape == (340, 340)
comp2 = make_compensator(raw.info, 3, 1, exclude_comp_chs=True)
assert comp2.shape == (311, 340)
# round-trip
desired = np.eye(340)
for from_ in range(3):
for to in range(3):
if from_ == to:
continue
comp1 = make_compensator(raw.info, from_, to)
comp2 = make_compensator(raw.info, to, from_)
# To get 1e-12 here (instead of 1e-6) we must use the linalg.inv
# method mentioned in compensator.py
assert_allclose(np.dot(comp1, comp2), desired, atol=1e-12)
assert_allclose(np.dot(comp2, comp1), desired, atol=1e-12)
@pytest.mark.parametrize('preload', (True, False))
@pytest.mark.parametrize('pick', (False, True))
def test_compensation_apply(tmpdir, preload, pick):
"""Test applying compensation."""
# make sure that changing the comp doesn't modify the original data
raw = read_raw_fif(ctf_comp_fname, preload=preload)
assert raw._comp is None
raw2 = raw.copy()
raw2.apply_gradient_compensation(2)
if pick:
raw2.pick([0] + list(range(2, len(raw.ch_names))))
raw.pick([0] + list(range(2, len(raw.ch_names))))
assert get_current_comp(raw2.info) == 2
if preload:
assert raw2._comp is None
else:
assert raw2._comp.shape == (len(raw2.ch_names),) * 2
fname = op.join(tmpdir, 'ctf-raw.fif')
raw2.save(fname)
raw2 = read_raw_fif(fname)
assert raw2.compensation_grade == 2
raw2.apply_gradient_compensation(3)
assert raw2.compensation_grade == 3
data, _ = raw[:, :]
data2, _ = raw2[:, :]
# channels have norm ~1e-12
assert_allclose(data, data2, rtol=1e-9, atol=1e-18)
for ch1, ch2 in zip(raw.info['chs'], raw2.info['chs']):
assert ch1['coil_type'] == ch2['coil_type']
@requires_mne
def test_compensation_mne(tmpdir):
"""Test comensation by comparing with MNE."""
def make_evoked(fname, comp):
"""Make evoked data."""
raw = read_raw_fif(fname)
if comp is not None:
raw.apply_gradient_compensation(comp)
picks = pick_types(raw.info, meg=True, ref_meg=True)
events = np.array([[0, 0, 1]], dtype=np.int64)
evoked = Epochs(raw, events, 1, 0, 20e-3, picks=picks,
baseline=None).average()
return evoked
def compensate_mne(fname, comp):
"""Compensate using MNE-C."""
tmp_fname = '%s-%d-ave.fif' % (fname[:-4], comp)
cmd = ['mne_compensate_data', '--in', fname,
'--out', tmp_fname, '--grad', str(comp)]
run_subprocess(cmd)
return read_evokeds(tmp_fname)[0]
# save evoked response with default compensation
fname_default = op.join(tmpdir, 'ctf_default-ave.fif')
make_evoked(ctf_comp_fname, None).save(fname_default)
for comp in [0, 1, 2, 3]:
evoked_py = make_evoked(ctf_comp_fname, comp)
evoked_c = compensate_mne(fname_default, comp)
picks_py = pick_types(evoked_py.info, meg=True, ref_meg=True)
picks_c = pick_types(evoked_c.info, meg=True, ref_meg=True)
assert_allclose(evoked_py.data[picks_py], evoked_c.data[picks_c],
rtol=1e-3, atol=1e-17)
chs_py = [evoked_py.info['chs'][ii] for ii in picks_py]
chs_c = [evoked_c.info['chs'][ii] for ii in picks_c]
for ch_py, ch_c in zip(chs_py, chs_c):
assert ch_py['coil_type'] == ch_c['coil_type']
run_tests_if_main()
|
from django.contrib.auth import logout, get_user_model
from django.contrib.auth.models import AnonymousUser
from django.contrib.auth.tokens import default_token_generator
from django.core.exceptions import NON_FIELD_ERRORS
from django.utils.encoding import force_str
from django.utils.translation import gettext_lazy as _
from rest_framework import status
from rest_framework.views import APIView
from rest_framework.exceptions import ErrorDetail, ValidationError
from rest_framework.generics import GenericAPIView
from rest_framework.permissions import AllowAny
from rest_framework.renderers import JSONRenderer, BrowsableAPIRenderer
from rest_framework.response import Response
from rest_framework.settings import api_settings
from rest_auth.views import LoginView as OriginalLoginView, PasswordChangeView as OriginalPasswordChangeView
from shop.models.cart import CartModel
from shop.models.customer import CustomerModel
from shop.rest.renderers import CMSPageRenderer
from shop.serializers.auth import PasswordResetRequestSerializer, PasswordResetConfirmSerializer
from shop.signals import email_queued
class AuthFormsView(GenericAPIView):
"""
Generic view to handle authentication related forms such as user registration
"""
serializer_class = None
form_class = None
def post(self, request, *args, **kwargs):
if request.customer.is_visitor:
customer = CustomerModel.objects.get_or_create_from_request(request)
else:
customer = request.customer
form_data = request.data.get(self.form_class.scope_prefix, {})
form = self.form_class(data=form_data, instance=customer)
if form.is_valid():
form.save(request=request)
response_data = {form.form_name: {
'success_message': _("Successfully registered yourself."),
}}
return Response(response_data, status=status.HTTP_200_OK)
errors = dict(form.errors)
if 'email' in errors:
errors.update({NON_FIELD_ERRORS: errors.pop('email')})
return Response({form.form_name: errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
class LoginView(OriginalLoginView):
form_name = 'login_form'
def login(self):
"""
Logs in as the given user, and moves the items from the current to the new cart.
"""
try:
anonymous_cart = CartModel.objects.get_from_request(self.request)
except CartModel.DoesNotExist:
anonymous_cart = None
if self.request.customer.user.is_anonymous or self.request.customer.is_registered:
previous_user = None
else:
previous_user = self.request.customer.user
super().login() # this rotates the session_key
if not self.serializer.data.get('stay_logged_in'):
self.request.session.set_expiry(0) # log out when the browser is closed
authenticated_cart = CartModel.objects.get_from_request(self.request)
if anonymous_cart:
# an anonymous customer logged in, now merge his current cart with a cart,
# which previously might have been created under his account.
authenticated_cart.merge_with(anonymous_cart)
if previous_user and previous_user.is_active is False and previous_user != self.request.user:
# keep the database clean and remove this anonymous entity
if previous_user.customer.orders.count() == 0:
previous_user.delete()
def post(self, request, *args, **kwargs):
self.request = request
if request.user.is_anonymous:
form_data = request.data.get('form_data', {})
self.serializer = self.get_serializer(data=form_data)
if self.serializer.is_valid():
self.login()
return self.get_response()
exc = ValidationError({self.form_name: self.serializer.errors})
else:
message = ErrorDetail("Please log out before signing in again.")
exc = ValidationError({self.form_name: {api_settings.NON_FIELD_ERRORS_KEY: [message]}})
response = self.handle_exception(exc)
self.response = self.finalize_response(request, response, *args, **kwargs)
return self.response
class LogoutView(APIView):
"""
Calls Django logout method and delete the auth Token assigned to the current User object.
"""
permission_classes = (AllowAny,)
form_name = 'logout_form'
def post(self, request):
try:
request.user.auth_token.delete()
except:
pass
logout(request)
request.user = AnonymousUser()
response_data = {self.form_name: {'success_message': _("Successfully logged out.")}}
return Response(response_data)
class PasswordChangeView(OriginalPasswordChangeView):
form_name = 'password_change_form'
def post(self, request, *args, **kwargs):
form_data = request.data.get('form_data', {})
serializer = self.get_serializer(data=form_data)
if serializer.is_valid():
serializer.save()
response_data = {self.form_name: {
'success_message': _("Password has been changed successfully."),
}}
return Response(response_data)
return Response({self.form_name: serializer.errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
class PasswordResetRequestView(GenericAPIView):
"""
Calls Django Auth PasswordResetRequestForm save method.
Accepts the following POST parameters: email
Returns the success/fail message.
"""
serializer_class = PasswordResetRequestSerializer
permission_classes = (AllowAny,)
form_name = 'password_reset_request_form'
def post(self, request, *args, **kwargs):
form_data = request.data.get('form_data', {})
serializer = self.get_serializer(data=form_data)
if not serializer.is_valid():
return Response({self.form_name: serializer.errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
# send email containing a reset link
serializer.save()
# trigger async email queue
email_queued()
# Return the success message with OK HTTP status
msg = _("Instructions on how to reset the password have been sent to '{email}'.")
response_data = {self.form_name: {
'success_message': msg.format(**serializer.data),
}}
return Response(response_data)
class PasswordResetConfirmView(GenericAPIView):
"""
Password reset e-mail link points onto a CMS page with the Page ID = 'password-reset-confirm'.
This page then shall render the CMS plugin as provided by the **ShopAuthenticationPlugin** using
the form "Confirm Password Reset".
"""
renderer_classes = (CMSPageRenderer, JSONRenderer, BrowsableAPIRenderer)
serializer_class = PasswordResetConfirmSerializer
permission_classes = (AllowAny,)
token_generator = default_token_generator
form_name = 'password_reset_confirm_form'
def get(self, request, uidb64=None, token=None):
data = {'uid': uidb64, 'token': token}
serializer_class = self.get_serializer_class()
password = get_user_model().objects.make_random_password()
data.update(new_password1=password, new_password2=password)
serializer = serializer_class(data=data, context=self.get_serializer_context())
if not serializer.is_valid():
return Response({'validlink': False})
return Response({
'validlink': True,
'user_name': force_str(serializer.user),
'form_name': 'password_reset_form',
})
def post(self, request, uidb64=None, token=None):
try:
data = dict(request.data['form_data'], uid=uidb64, token=token)
except (KeyError, TypeError, ValueError):
errors = {'non_field_errors': [_("Invalid POST data.")]}
else:
serializer = self.get_serializer(data=data)
if serializer.is_valid():
serializer.save()
response_data = {self.form_name: {
'success_message': _("Password has been reset with the new password."),
}}
return Response(response_data)
else:
errors = serializer.errors
return Response({self.form_name: errors}, status=status.HTTP_422_UNPROCESSABLE_ENTITY)
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# EC2 provides unique random hostnames.
def test_hostname(host):
pass
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
filename = '/etc/molecule/{}'.format(host.check_output('hostname -s'))
f = host.file(filename)
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
import math
from numpy import array, asarray
from numpy.random import randn
import matplotlib.pyplot as plt
from filterpy.kalman import UnscentedKalmanFilter as UKF
from filterpy.kalman import ScaledUnscentedKalmanFilter as SUKF
from filterpy.common import Q_discrete_white_noise
""" This is an example of the bearing only problem. You have a platform,
usually a ship, that can only get the bearing to a moving target. Assuming
platform is stationary, this is a very difficult problem because there are
an infinite number of solutions. The literature is filled with this example,
along with proposed solutions (usually, platform makes manuevers).
This is very old code; it no longer runs due to changes in the UKF
"""
dt = 0.1
y = 20
platform_pos=(0,20)
sf = SUKF(2, 1, dt, alpha=1.e-4, beta=2., kappa=1.)
sf.Q = Q_discrete_white_noise(2, dt, .1)
f = UKF(2, 1, dt, kappa=0.)
f.Q = Q_discrete_white_noise(2, dt, .1)
def fx(x,dt):
""" state transition function"""
# pos = pos + vel
# vel = vel
return array([x[0]+x[1], x[1]])
def hx(x):
""" measurement function - convert position to bearing"""
return math.atan2(platform_pos[1],x[0]-platform_pos[0])
xs_scaled = []
xs = []
for i in range(300):
angle = hx([i+randn()*.1, 0]) + randn()
sf.update(angle, hx, fx)
xs_scaled.append(sf.x)
f.predict(fx)
f.update(angle, hx)
xs.append(f.x)
xs_scaled = asarray(xs_scaled)
xs = asarray(xs)
plt.subplot(211)
plt.plot(xs_scaled[:,0],label='scaled')
plt.plot(xs[:,0], label='Julier')
plt.legend(loc=4)
plt.subplot(212)
plt.plot(xs_scaled[:,1],label='scaled')
plt.plot(xs[:,1], label='Julier')
plt.legend(loc=4)
plt.show()
|
from __future__ import absolute_import
from pyVmomi.VmomiSupport import nsMap, versionMap, versionIdMap, serviceNsMap, parentMap
## Add an API version
def AddVersion(version, ns, versionId='', isLegacy=0, serviceNs=''):
if not ns:
ns = serviceNs
if version not in parentMap:
nsMap[version] = ns
if len(versionId) > 0:
versionMap[ns + '/' + versionId] = version
if isLegacy or ns is "":
versionMap[ns] = version
versionIdMap[version] = versionId
if not serviceNs:
serviceNs = ns
serviceNsMap[version] = serviceNs
parentMap[version] = set()
## Check if a version is a child of another
def IsChildVersion(child, parent):
return child == parent or parent in parentMap[child]
|
import logging
import secrets
import threading
from types import MappingProxyType
import voluptuous as vol
from homeassistant.const import CONF_FILENAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
import homeassistant.helpers.config_validation as cv
from homeassistant.loader import bind_hass
from .const import (
ATTR_ENDPOINTS,
ATTR_STREAMS,
CONF_DURATION,
CONF_LOOKBACK,
CONF_STREAM_SOURCE,
DOMAIN,
MAX_SEGMENTS,
SERVICE_RECORD,
)
from .core import PROVIDERS
from .hls import async_setup_hls
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema({DOMAIN: vol.Schema({})}, extra=vol.ALLOW_EXTRA)
STREAM_SERVICE_SCHEMA = vol.Schema({vol.Required(CONF_STREAM_SOURCE): cv.string})
SERVICE_RECORD_SCHEMA = STREAM_SERVICE_SCHEMA.extend(
{
vol.Required(CONF_FILENAME): cv.string,
vol.Optional(CONF_DURATION, default=30): int,
vol.Optional(CONF_LOOKBACK, default=0): int,
}
)
@bind_hass
def request_stream(hass, stream_source, *, fmt="hls", keepalive=False, options=None):
"""Set up stream with token."""
if DOMAIN not in hass.config.components:
raise HomeAssistantError("Stream integration is not set up.")
if options is None:
options = {}
# For RTSP streams, prefer TCP
if isinstance(stream_source, str) and stream_source[:7] == "rtsp://":
options = {
"rtsp_flags": "prefer_tcp",
"stimeout": "5000000",
**options,
}
try:
streams = hass.data[DOMAIN][ATTR_STREAMS]
stream = streams.get(stream_source)
if not stream:
stream = Stream(hass, stream_source, options=options, keepalive=keepalive)
streams[stream_source] = stream
else:
# Update keepalive option on existing stream
stream.keepalive = keepalive
# Add provider
stream.add_provider(fmt)
if not stream.access_token:
stream.access_token = secrets.token_hex()
stream.start()
return hass.data[DOMAIN][ATTR_ENDPOINTS][fmt].format(stream.access_token)
except Exception as err:
raise HomeAssistantError("Unable to get stream") from err
async def async_setup(hass, config):
"""Set up stream."""
# Set log level to error for libav
logging.getLogger("libav").setLevel(logging.ERROR)
logging.getLogger("libav.mp4").setLevel(logging.ERROR)
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .recorder import async_setup_recorder
hass.data[DOMAIN] = {}
hass.data[DOMAIN][ATTR_ENDPOINTS] = {}
hass.data[DOMAIN][ATTR_STREAMS] = {}
# Setup HLS
hls_endpoint = async_setup_hls(hass)
hass.data[DOMAIN][ATTR_ENDPOINTS]["hls"] = hls_endpoint
# Setup Recorder
async_setup_recorder(hass)
@callback
def shutdown(event):
"""Stop all stream workers."""
for stream in hass.data[DOMAIN][ATTR_STREAMS].values():
stream.keepalive = False
stream.stop()
_LOGGER.info("Stopped stream workers")
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, shutdown)
async def async_record(call):
"""Call record stream service handler."""
await async_handle_record_service(hass, call)
hass.services.async_register(
DOMAIN, SERVICE_RECORD, async_record, schema=SERVICE_RECORD_SCHEMA
)
return True
class Stream:
"""Represents a single stream."""
def __init__(self, hass, source, options=None, keepalive=False):
"""Initialize a stream."""
self.hass = hass
self.source = source
self.options = options
self.keepalive = keepalive
self.access_token = None
self._thread = None
self._thread_quit = None
self._outputs = {}
if self.options is None:
self.options = {}
@property
def outputs(self):
"""Return a copy of the stream outputs."""
# A copy is returned so the caller can iterate through the outputs
# without concern about self._outputs being modified from another thread.
return MappingProxyType(self._outputs.copy())
def add_provider(self, fmt):
"""Add provider output stream."""
if not self._outputs.get(fmt):
provider = PROVIDERS[fmt](self)
self._outputs[fmt] = provider
return self._outputs[fmt]
def remove_provider(self, provider):
"""Remove provider output stream."""
if provider.name in self._outputs:
del self._outputs[provider.name]
self.check_idle()
if not self._outputs:
self.stop()
def check_idle(self):
"""Reset access token if all providers are idle."""
if all([p.idle for p in self._outputs.values()]):
self.access_token = None
def start(self):
"""Start a stream."""
# Keep import here so that we can import stream integration without installing reqs
# pylint: disable=import-outside-toplevel
from .worker import stream_worker
if self._thread is None or not self._thread.is_alive():
if self._thread is not None:
# The thread must have crashed/exited. Join to clean up the
# previous thread.
self._thread.join(timeout=0)
self._thread_quit = threading.Event()
self._thread = threading.Thread(
name="stream_worker",
target=stream_worker,
args=(self.hass, self, self._thread_quit),
)
self._thread.start()
_LOGGER.info("Started stream: %s", self.source)
def stop(self):
"""Remove outputs and access token."""
self._outputs = {}
self.access_token = None
if not self.keepalive:
self._stop()
def _stop(self):
"""Stop worker thread."""
if self._thread is not None:
self._thread_quit.set()
self._thread.join()
self._thread = None
_LOGGER.info("Stopped stream: %s", self.source)
async def async_handle_record_service(hass, call):
"""Handle save video service calls."""
stream_source = call.data[CONF_STREAM_SOURCE]
video_path = call.data[CONF_FILENAME]
duration = call.data[CONF_DURATION]
lookback = call.data[CONF_LOOKBACK]
# Check for file access
if not hass.config.is_allowed_path(video_path):
raise HomeAssistantError(f"Can't write {video_path}, no access to path!")
# Check for active stream
streams = hass.data[DOMAIN][ATTR_STREAMS]
stream = streams.get(stream_source)
if not stream:
stream = Stream(hass, stream_source)
streams[stream_source] = stream
# Add recorder
recorder = stream.outputs.get("recorder")
if recorder:
raise HomeAssistantError(f"Stream already recording to {recorder.video_path}!")
recorder = stream.add_provider("recorder")
recorder.video_path = video_path
recorder.timeout = duration
stream.start()
# Take advantage of lookback
hls = stream.outputs.get("hls")
if lookback > 0 and hls:
num_segments = min(int(lookback // hls.target_duration), MAX_SEGMENTS)
# Wait for latest segment, then add the lookback
await hls.recv()
recorder.prepend(list(hls.get_segment())[-num_segments:])
|
import binascii
import math
import os
import sys
from datetime import timedelta
from hashlib import sha256
import vobject
from radicale import pathutils
from radicale.item import filter as radicale_filter
from radicale.log import logger
def predict_tag_of_parent_collection(vobject_items):
if len(vobject_items) != 1:
return ""
if vobject_items[0].name == "VCALENDAR":
return "VCALENDAR"
if vobject_items[0].name in ("VCARD", "VLIST"):
return "VADDRESSBOOK"
return ""
def predict_tag_of_whole_collection(vobject_items, fallback_tag=None):
if vobject_items and vobject_items[0].name == "VCALENDAR":
return "VCALENDAR"
if vobject_items and vobject_items[0].name in ("VCARD", "VLIST"):
return "VADDRESSBOOK"
if not fallback_tag and not vobject_items:
# Maybe an empty address book
return "VADDRESSBOOK"
return fallback_tag
def check_and_sanitize_items(vobject_items, is_collection=False, tag=None):
"""Check vobject items for common errors and add missing UIDs.
``is_collection`` indicates that vobject_item contains unrelated
components.
The ``tag`` of the collection.
"""
if tag and tag not in ("VCALENDAR", "VADDRESSBOOK"):
raise ValueError("Unsupported collection tag: %r" % tag)
if not is_collection and len(vobject_items) != 1:
raise ValueError("Item contains %d components" % len(vobject_items))
if tag == "VCALENDAR":
if len(vobject_items) > 1:
raise RuntimeError("VCALENDAR collection contains %d "
"components" % len(vobject_items))
vobject_item = vobject_items[0]
if vobject_item.name != "VCALENDAR":
raise ValueError("Item type %r not supported in %r "
"collection" % (vobject_item.name, tag))
component_uids = set()
for component in vobject_item.components():
if component.name in ("VTODO", "VEVENT", "VJOURNAL"):
component_uid = get_uid(component)
if component_uid:
component_uids.add(component_uid)
component_name = None
object_uid = None
object_uid_set = False
for component in vobject_item.components():
# https://tools.ietf.org/html/rfc4791#section-4.1
if component.name == "VTIMEZONE":
continue
if component_name is None or is_collection:
component_name = component.name
elif component_name != component.name:
raise ValueError("Multiple component types in object: %r, %r" %
(component_name, component.name))
if component_name not in ("VTODO", "VEVENT", "VJOURNAL"):
continue
component_uid = get_uid(component)
if not object_uid_set or is_collection:
object_uid_set = True
object_uid = component_uid
if not component_uid:
if not is_collection:
raise ValueError("%s component without UID in object" %
component_name)
component_uid = find_available_uid(
component_uids.__contains__)
component_uids.add(component_uid)
if hasattr(component, "uid"):
component.uid.value = component_uid
else:
component.add("UID").value = component_uid
elif not object_uid or not component_uid:
raise ValueError("Multiple %s components without UID in "
"object" % component_name)
elif object_uid != component_uid:
raise ValueError(
"Multiple %s components with different UIDs in object: "
"%r, %r" % (component_name, object_uid, component_uid))
# Workaround for bug in Lightning (Thunderbird)
# Rescheduling a single occurrence from a repeating event creates
# an event with DTEND and DURATION:PT0S
if (hasattr(component, "dtend") and
hasattr(component, "duration") and
component.duration.value == timedelta(0)):
logger.debug("Quirks: Removing zero duration from %s in "
"object %r", component_name, component_uid)
del component.duration
# vobject interprets recurrence rules on demand
try:
component.rruleset
except Exception as e:
raise ValueError("Invalid recurrence rules in %s in object %r"
% (component.name, component_uid)) from e
elif tag == "VADDRESSBOOK":
# https://tools.ietf.org/html/rfc6352#section-5.1
object_uids = set()
for vobject_item in vobject_items:
if vobject_item.name == "VCARD":
object_uid = get_uid(vobject_item)
if object_uid:
object_uids.add(object_uid)
for vobject_item in vobject_items:
if vobject_item.name == "VLIST":
# Custom format used by SOGo Connector to store lists of
# contacts
continue
if vobject_item.name != "VCARD":
raise ValueError("Item type %r not supported in %r "
"collection" % (vobject_item.name, tag))
object_uid = get_uid(vobject_item)
if not object_uid:
if not is_collection:
raise ValueError("%s object without UID" %
vobject_item.name)
object_uid = find_available_uid(object_uids.__contains__)
object_uids.add(object_uid)
if hasattr(vobject_item, "uid"):
vobject_item.uid.value = object_uid
else:
vobject_item.add("UID").value = object_uid
else:
for i in vobject_items:
raise ValueError("Item type %r not supported in %s collection" %
(i.name, repr(tag) if tag else "generic"))
def check_and_sanitize_props(props):
"""Check collection properties for common errors."""
for k, v in props.copy().items(): # Make copy to be able to delete items
if not isinstance(k, str):
raise ValueError("Key must be %r not %r: %r" % (
str.__name__, type(k).__name__, k))
if not isinstance(v, str):
if v is None:
del props[k]
continue
raise ValueError("Value of %r must be %r not %r: %r" % (
k, str.__name__, type(v).__name__, v))
if k == "tag":
if not v:
del props[k]
continue
if v not in ("VCALENDAR", "VADDRESSBOOK"):
raise ValueError("Unsupported collection tag: %r" % v)
def find_available_uid(exists_fn, suffix=""):
"""Generate a pseudo-random UID"""
# Prevent infinite loop
for _ in range(1000):
r = binascii.hexlify(os.urandom(16)).decode("ascii")
name = "%s-%s-%s-%s-%s%s" % (
r[:8], r[8:12], r[12:16], r[16:20], r[20:], suffix)
if not exists_fn(name):
return name
# something is wrong with the PRNG
raise RuntimeError("No unique random sequence found")
def get_etag(text):
"""Etag from collection or item.
Encoded as quoted-string (see RFC 2616).
"""
etag = sha256()
etag.update(text.encode())
return '"%s"' % etag.hexdigest()
def get_uid(vobject_component):
"""UID value of an item if defined."""
return (vobject_component.uid.value
if hasattr(vobject_component, "uid") else None)
def get_uid_from_object(vobject_item):
"""UID value of an calendar/addressbook object."""
if vobject_item.name == "VCALENDAR":
if hasattr(vobject_item, "vevent"):
return get_uid(vobject_item.vevent)
if hasattr(vobject_item, "vjournal"):
return get_uid(vobject_item.vjournal)
if hasattr(vobject_item, "vtodo"):
return get_uid(vobject_item.vtodo)
elif vobject_item.name == "VCARD":
return get_uid(vobject_item)
return None
def find_tag(vobject_item):
"""Find component name from ``vobject_item``."""
if vobject_item.name == "VCALENDAR":
for component in vobject_item.components():
if component.name != "VTIMEZONE":
return component.name or ""
return ""
def find_tag_and_time_range(vobject_item):
"""Find component name and enclosing time range from ``vobject item``.
Returns a tuple (``tag``, ``start``, ``end``) where ``tag`` is a string
and ``start`` and ``end`` are POSIX timestamps (as int).
This is intened to be used for matching against simplified prefilters.
"""
tag = find_tag(vobject_item)
if not tag:
return (
tag, radicale_filter.TIMESTAMP_MIN, radicale_filter.TIMESTAMP_MAX)
start = end = None
def range_fn(range_start, range_end, is_recurrence):
nonlocal start, end
if start is None or range_start < start:
start = range_start
if end is None or end < range_end:
end = range_end
return False
def infinity_fn(range_start):
nonlocal start, end
if start is None or range_start < start:
start = range_start
end = radicale_filter.DATETIME_MAX
return True
radicale_filter.visit_time_ranges(vobject_item, tag, range_fn, infinity_fn)
if start is None:
start = radicale_filter.DATETIME_MIN
if end is None:
end = radicale_filter.DATETIME_MAX
try:
return tag, math.floor(start.timestamp()), math.ceil(end.timestamp())
except ValueError as e:
if str(e) == ("offset must be a timedelta representing a whole "
"number of minutes") and sys.version_info < (3, 6):
raise RuntimeError("Unsupported in Python < 3.6: %s" % e) from e
raise
class Item:
"""Class for address book and calendar entries."""
def __init__(self, collection_path=None, collection=None,
vobject_item=None, href=None, last_modified=None, text=None,
etag=None, uid=None, name=None, component_name=None,
time_range=None):
"""Initialize an item.
``collection_path`` the path of the parent collection (optional if
``collection`` is set).
``collection`` the parent collection (optional).
``href`` the href of the item.
``last_modified`` the HTTP-datetime of when the item was modified.
``text`` the text representation of the item (optional if
``vobject_item`` is set).
``vobject_item`` the vobject item (optional if ``text`` is set).
``etag`` the etag of the item (optional). See ``get_etag``.
``uid`` the UID of the object (optional). See ``get_uid_from_object``.
``name`` the name of the item (optional). See ``vobject_item.name``.
``component_name`` the name of the primary component (optional).
See ``find_tag``.
``time_range`` the enclosing time range.
See ``find_tag_and_time_range``.
"""
if text is None and vobject_item is None:
raise ValueError(
"At least one of 'text' or 'vobject_item' must be set")
if collection_path is None:
if collection is None:
raise ValueError("At least one of 'collection_path' or "
"'collection' must be set")
collection_path = collection.path
assert collection_path == pathutils.strip_path(
pathutils.sanitize_path(collection_path))
self._collection_path = collection_path
self.collection = collection
self.href = href
self.last_modified = last_modified
self._text = text
self._vobject_item = vobject_item
self._etag = etag
self._uid = uid
self._name = name
self._component_name = component_name
self._time_range = time_range
def serialize(self):
if self._text is None:
try:
self._text = self.vobject_item.serialize()
except Exception as e:
raise RuntimeError("Failed to serialize item %r from %r: %s" %
(self.href, self._collection_path,
e)) from e
return self._text
@property
def vobject_item(self):
if self._vobject_item is None:
try:
self._vobject_item = vobject.readOne(self._text)
except Exception as e:
raise RuntimeError("Failed to parse item %r from %r: %s" %
(self.href, self._collection_path,
e)) from e
return self._vobject_item
@property
def etag(self):
"""Encoded as quoted-string (see RFC 2616)."""
if self._etag is None:
self._etag = get_etag(self.serialize())
return self._etag
@property
def uid(self):
if self._uid is None:
self._uid = get_uid_from_object(self.vobject_item)
return self._uid
@property
def name(self):
if self._name is None:
self._name = self.vobject_item.name or ""
return self._name
@property
def component_name(self):
if self._component_name is not None:
return self._component_name
return find_tag(self.vobject_item)
@property
def time_range(self):
if self._time_range is None:
self._component_name, *self._time_range = (
find_tag_and_time_range(self.vobject_item))
return self._time_range
def prepare(self):
"""Fill cache with values."""
orig_vobject_item = self._vobject_item
self.serialize()
self.etag
self.uid
self.name
self.time_range
self.component_name
self._vobject_item = orig_vobject_item
|
import argparse
import sys
import a_sync
from paasta_tools import mesos_tools
from paasta_tools.utils import get_docker_client
from paasta_tools.utils import get_running_mesos_docker_containers
def parse_args():
parser = argparse.ArgumentParser(
description=(
"Cross references running containers with task ids from the mesos slave"
" and optionally kills them."
)
)
parser.add_argument(
"-f",
"--force",
action="store_true",
help="Actually kill the containers. (defaults to dry-run)",
)
args = parser.parse_args()
return args
@a_sync.to_blocking
async def main():
args = parse_args()
docker_client = get_docker_client()
running_mesos_task_ids = [
task["id"]
for task in mesos_tools.filter_running_tasks(
await mesos_tools.get_running_tasks_from_frameworks("")
)
]
running_mesos_docker_containers = get_running_mesos_docker_containers()
orphaned_containers = []
for container in running_mesos_docker_containers:
mesos_task_id = mesos_tools.get_mesos_id_from_container(
container=container, client=docker_client
)
if mesos_task_id not in running_mesos_task_ids:
orphaned_containers.append(
(container["Names"][0].strip("/"), mesos_task_id)
)
if orphaned_containers:
print(
"CRIT: Docker containers are orphaned: {}{}".format(
", ".join(
f"{container_name} ({mesos_task_id})"
for container_name, mesos_task_id in orphaned_containers
),
" and will be killed" if args.force else "",
)
)
if args.force:
for container_name, mesos_task_id in orphaned_containers:
docker_client.kill(container_name)
sys.exit(1)
else:
print("OK: All mesos task IDs accounted for")
sys.exit(0)
if __name__ == "__main__":
main()
|
from collections import Counter
from functools import partial
from sys import version_info
from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc
class FeatsFromSpacyDocAndEmpath(FeatsFromSpacyDoc):
def __init__(self,
use_lemmas=False,
entity_types_to_censor=set(),
tag_types_to_censor=set(),
strip_final_period=False,
empath_analyze_function=None,
**kwargs):
'''
Parameters
----------
empath_analyze_function: function (default=empath.Empath().analyze)
Function that produces a dictionary mapping Empath categories to
Other parameters from FeatsFromSpacyDoc.__init__
'''
if empath_analyze_function is None:
try:
import empath
except ImportError:
raise Exception("Please install the empath library to use FeatsFromSpacyDocAndEmpath.")
self._empath_analyze_function = empath.Empath().analyze
else:
self._empath_analyze_function = partial(empath_analyze_function,
kwargs={'tokenizer': 'bigram'})
super(FeatsFromSpacyDocAndEmpath, self).__init__(use_lemmas,
entity_types_to_censor,
tag_types_to_censor,
strip_final_period)
def get_doc_metadata(self, doc, prefix=''):
empath_counter = Counter()
if version_info[0] >= 3:
doc = str(doc)
for empath_category, score in self._empath_analyze_function(doc).items():
if score > 0:
empath_counter[prefix + empath_category] = int(score)
return empath_counter
def has_metadata_term_list(self):
return True
def get_top_model_term_lists(self):
try:
import empath
except ImportError:
raise Exception("Please install the empath library to use FeatsFromSpacyDocAndEmpath.")
return dict(empath.Empath().cats)
|
import asyncio
import logging
from typing import Any, Dict, Iterable, Optional
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import Context, State
from homeassistant.helpers.typing import HomeAssistantType
from . import ATTR_VALUE, DOMAIN, SERVICE_SET_VALUE
_LOGGER = logging.getLogger(__name__)
async def _async_reproduce_state(
hass: HomeAssistantType,
state: State,
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce a single state."""
cur_state = hass.states.get(state.entity_id)
# Return if we can't find the entity
if cur_state is None:
_LOGGER.warning("Unable to find entity %s", state.entity_id)
return
# Return if we are already at the right state.
if cur_state.state == state.state:
return
# Call service
service = SERVICE_SET_VALUE
service_data = {ATTR_ENTITY_ID: state.entity_id, ATTR_VALUE: state.state}
await hass.services.async_call(
DOMAIN, service, service_data, context=context, blocking=True
)
async def async_reproduce_states(
hass: HomeAssistantType,
states: Iterable[State],
*,
context: Optional[Context] = None,
reproduce_options: Optional[Dict[str, Any]] = None,
) -> None:
"""Reproduce Input text states."""
# Reproduce states in parallel.
await asyncio.gather(
*(
_async_reproduce_state(
hass, state, context=context, reproduce_options=reproduce_options
)
for state in states
)
)
|
import urwid
class ActionButton(urwid.Button):
def __init__(self, caption, callback):
super(ActionButton, self).__init__("")
urwid.connect_signal(self, 'click', callback)
self._w = urwid.AttrMap(urwid.SelectableIcon(caption, 1),
None, focus_map='reversed')
class Place(urwid.WidgetWrap):
def __init__(self, name, choices):
super(Place, self).__init__(
ActionButton([u" > go to ", name], self.enter_place))
self.heading = urwid.Text([u"\nLocation: ", name, "\n"])
self.choices = choices
# create links back to ourself
for child in choices:
getattr(child, 'choices', []).insert(0, self)
def enter_place(self, button):
game.update_place(self)
class Thing(urwid.WidgetWrap):
def __init__(self, name):
super(Thing, self).__init__(
ActionButton([u" * take ", name], self.take_thing))
self.name = name
def take_thing(self, button):
self._w = urwid.Text(u" - %s (taken)" % self.name)
game.take_thing(self)
def exit_program(button):
raise urwid.ExitMainLoop()
map_top = Place(u'porch', [
Place(u'kitchen', [
Place(u'refrigerator', []),
Place(u'cupboard', [
Thing(u'jug'),
]),
]),
Place(u'garden', [
Place(u'tree', [
Thing(u'lemon'),
Thing(u'bird'),
]),
]),
Place(u'street', [
Place(u'store', [
Thing(u'sugar'),
]),
Place(u'lake', [
Place(u'beach', []),
]),
]),
])
class AdventureGame(object):
def __init__(self):
self.log = urwid.SimpleFocusListWalker([])
self.top = urwid.ListBox(self.log)
self.inventory = set()
self.update_place(map_top)
def update_place(self, place):
if self.log: # disable interaction with previous place
self.log[-1] = urwid.WidgetDisable(self.log[-1])
self.log.append(urwid.Pile([place.heading] + place.choices))
self.top.focus_position = len(self.log) - 1
self.place = place
def take_thing(self, thing):
self.inventory.add(thing.name)
if self.inventory >= set([u'sugar', u'lemon', u'jug']):
response = urwid.Text(u'You can make lemonade!\n')
done = ActionButton(u' - Joy', exit_program)
self.log[:] = [response, done]
else:
self.update_place(self.place)
game = AdventureGame()
urwid.MainLoop(game.top, palette=[('reversed', 'standout', '')]).run()
|
import logging
import pypck
import voluptuous as vol
from homeassistant.components.climate import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP
from homeassistant.const import (
CONF_ADDRESS,
CONF_BINARY_SENSORS,
CONF_COVERS,
CONF_HOST,
CONF_LIGHTS,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_SENSORS,
CONF_SWITCHES,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.entity import Entity
from .const import (
BINSENSOR_PORTS,
CONF_CLIMATES,
CONF_CONNECTIONS,
CONF_DIM_MODE,
CONF_DIMMABLE,
CONF_LOCKABLE,
CONF_MAX_TEMP,
CONF_MIN_TEMP,
CONF_MOTOR,
CONF_OUTPUT,
CONF_OUTPUTS,
CONF_REGISTER,
CONF_REVERSE_TIME,
CONF_SCENE,
CONF_SCENES,
CONF_SETPOINT,
CONF_SK_NUM_TRIES,
CONF_SOURCE,
CONF_TRANSITION,
DATA_LCN,
DIM_MODES,
DOMAIN,
KEYS,
LED_PORTS,
LOGICOP_PORTS,
MOTOR_PORTS,
MOTOR_REVERSE_TIME,
OUTPUT_PORTS,
RELAY_PORTS,
S0_INPUTS,
SETPOINTS,
THRESHOLDS,
VAR_UNITS,
VARIABLES,
)
from .helpers import has_unique_connection_names, is_address
from .services import (
DynText,
Led,
LockKeys,
LockRegulator,
OutputAbs,
OutputRel,
OutputToggle,
Pck,
Relays,
SendKeys,
VarAbs,
VarRel,
VarReset,
)
_LOGGER = logging.getLogger(__name__)
BINARY_SENSORS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_SOURCE): vol.All(
vol.Upper, vol.In(SETPOINTS + KEYS + BINSENSOR_PORTS)
),
}
)
CLIMATES_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_SOURCE): vol.All(vol.Upper, vol.In(VARIABLES)),
vol.Required(CONF_SETPOINT): vol.All(vol.Upper, vol.In(VARIABLES + SETPOINTS)),
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): vol.Coerce(float),
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): vol.Coerce(float),
vol.Optional(CONF_LOCKABLE, default=False): vol.Coerce(bool),
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default=TEMP_CELSIUS): vol.In(
TEMP_CELSIUS, TEMP_FAHRENHEIT
),
}
)
COVERS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_MOTOR): vol.All(vol.Upper, vol.In(MOTOR_PORTS)),
vol.Optional(CONF_REVERSE_TIME): vol.All(vol.Upper, vol.In(MOTOR_REVERSE_TIME)),
}
)
LIGHTS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_OUTPUT): vol.All(
vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS)
),
vol.Optional(CONF_DIMMABLE, default=False): vol.Coerce(bool),
vol.Optional(CONF_TRANSITION, default=0): vol.All(
vol.Coerce(float), vol.Range(min=0.0, max=486.0), lambda value: value * 1000
),
}
)
SCENES_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_REGISTER): vol.All(vol.Coerce(int), vol.Range(0, 9)),
vol.Required(CONF_SCENE): vol.All(vol.Coerce(int), vol.Range(0, 9)),
vol.Optional(CONF_OUTPUTS): vol.All(
cv.ensure_list, [vol.All(vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS))]
),
vol.Optional(CONF_TRANSITION, default=None): vol.Any(
vol.All(
vol.Coerce(int),
vol.Range(min=0.0, max=486.0),
lambda value: value * 1000,
),
None,
),
}
)
SENSORS_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_SOURCE): vol.All(
vol.Upper,
vol.In(
VARIABLES
+ SETPOINTS
+ THRESHOLDS
+ S0_INPUTS
+ LED_PORTS
+ LOGICOP_PORTS
),
),
vol.Optional(CONF_UNIT_OF_MEASUREMENT, default="native"): vol.All(
vol.Upper, vol.In(VAR_UNITS)
),
}
)
SWITCHES_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_ADDRESS): is_address,
vol.Required(CONF_OUTPUT): vol.All(
vol.Upper, vol.In(OUTPUT_PORTS + RELAY_PORTS)
),
}
)
CONNECTION_SCHEMA = vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PORT): cv.port,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_SK_NUM_TRIES, default=0): cv.positive_int,
vol.Optional(CONF_DIM_MODE, default="steps50"): vol.All(
vol.Upper, vol.In(DIM_MODES)
),
vol.Optional(CONF_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CONNECTIONS): vol.All(
cv.ensure_list, has_unique_connection_names, [CONNECTION_SCHEMA]
),
vol.Optional(CONF_BINARY_SENSORS): vol.All(
cv.ensure_list, [BINARY_SENSORS_SCHEMA]
),
vol.Optional(CONF_CLIMATES): vol.All(cv.ensure_list, [CLIMATES_SCHEMA]),
vol.Optional(CONF_COVERS): vol.All(cv.ensure_list, [COVERS_SCHEMA]),
vol.Optional(CONF_LIGHTS): vol.All(cv.ensure_list, [LIGHTS_SCHEMA]),
vol.Optional(CONF_SCENES): vol.All(cv.ensure_list, [SCENES_SCHEMA]),
vol.Optional(CONF_SENSORS): vol.All(cv.ensure_list, [SENSORS_SCHEMA]),
vol.Optional(CONF_SWITCHES): vol.All(cv.ensure_list, [SWITCHES_SCHEMA]),
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the LCN component."""
hass.data[DATA_LCN] = {}
conf_connections = config[DOMAIN][CONF_CONNECTIONS]
connections = []
for conf_connection in conf_connections:
connection_name = conf_connection.get(CONF_NAME)
settings = {
"SK_NUM_TRIES": conf_connection[CONF_SK_NUM_TRIES],
"DIM_MODE": pypck.lcn_defs.OutputPortDimMode[
conf_connection[CONF_DIM_MODE]
],
}
connection = pypck.connection.PchkConnectionManager(
hass.loop,
conf_connection[CONF_HOST],
conf_connection[CONF_PORT],
conf_connection[CONF_USERNAME],
conf_connection[CONF_PASSWORD],
settings=settings,
connection_id=connection_name,
)
try:
# establish connection to PCHK server
await hass.async_create_task(connection.async_connect(timeout=15))
connections.append(connection)
_LOGGER.info('LCN connected to "%s"', connection_name)
except TimeoutError:
_LOGGER.error('Connection to PCHK server "%s" failed', connection_name)
return False
hass.data[DATA_LCN][CONF_CONNECTIONS] = connections
# load platforms
for component, conf_key in (
("binary_sensor", CONF_BINARY_SENSORS),
("climate", CONF_CLIMATES),
("cover", CONF_COVERS),
("light", CONF_LIGHTS),
("scene", CONF_SCENES),
("sensor", CONF_SENSORS),
("switch", CONF_SWITCHES),
):
if conf_key in config[DOMAIN]:
hass.async_create_task(
async_load_platform(
hass, component, DOMAIN, config[DOMAIN][conf_key], config
)
)
# register service calls
for service_name, service in (
("output_abs", OutputAbs),
("output_rel", OutputRel),
("output_toggle", OutputToggle),
("relays", Relays),
("var_abs", VarAbs),
("var_reset", VarReset),
("var_rel", VarRel),
("lock_regulator", LockRegulator),
("led", Led),
("send_keys", SendKeys),
("lock_keys", LockKeys),
("dyn_text", DynText),
("pck", Pck),
):
hass.services.async_register(
DOMAIN, service_name, service(hass), service.schema
)
return True
class LcnDevice(Entity):
"""Parent class for all devices associated with the LCN component."""
def __init__(self, config, address_connection):
"""Initialize the LCN device."""
self.config = config
self.address_connection = address_connection
self._name = config[CONF_NAME]
@property
def should_poll(self):
"""Lcn device entity pushes its state to HA."""
return False
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
self.address_connection.register_for_inputs(self.input_received)
@property
def name(self):
"""Return the name of the device."""
return self._name
def input_received(self, input_obj):
"""Set state/value when LCN input object (command) is received."""
raise NotImplementedError("Pure virtual function.")
|
import logging
from meteofrance.helpers import (
get_warning_text_status_from_indice_color,
readeable_phenomenoms_dict,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from homeassistant.util import dt as dt_util
from .const import (
ATTR_NEXT_RAIN_1_HOUR_FORECAST,
ATTR_NEXT_RAIN_DT_REF,
ATTRIBUTION,
COORDINATOR_ALERT,
COORDINATOR_FORECAST,
COORDINATOR_RAIN,
DOMAIN,
ENTITY_API_DATA_PATH,
ENTITY_DEVICE_CLASS,
ENTITY_ENABLE,
ENTITY_ICON,
ENTITY_NAME,
ENTITY_UNIT,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the Meteo-France sensor platform."""
coordinator_forecast = hass.data[DOMAIN][entry.entry_id][COORDINATOR_FORECAST]
coordinator_rain = hass.data[DOMAIN][entry.entry_id][COORDINATOR_RAIN]
coordinator_alert = hass.data[DOMAIN][entry.entry_id][COORDINATOR_ALERT]
entities = []
for sensor_type in SENSOR_TYPES:
if sensor_type == "next_rain":
if coordinator_rain:
entities.append(MeteoFranceRainSensor(sensor_type, coordinator_rain))
elif sensor_type == "weather_alert":
if coordinator_alert:
entities.append(MeteoFranceAlertSensor(sensor_type, coordinator_alert))
elif sensor_type in ["rain_chance", "freeze_chance", "snow_chance"]:
if coordinator_forecast.data.probability_forecast:
entities.append(MeteoFranceSensor(sensor_type, coordinator_forecast))
else:
_LOGGER.warning(
"Sensor %s skipped for %s as data is missing in the API",
sensor_type,
coordinator_forecast.data.position["name"],
)
else:
entities.append(MeteoFranceSensor(sensor_type, coordinator_forecast))
async_add_entities(
entities,
False,
)
class MeteoFranceSensor(CoordinatorEntity):
"""Representation of a Meteo-France sensor."""
def __init__(self, sensor_type: str, coordinator: DataUpdateCoordinator):
"""Initialize the Meteo-France sensor."""
super().__init__(coordinator)
self._type = sensor_type
if hasattr(self.coordinator.data, "position"):
city_name = self.coordinator.data.position["name"]
self._name = f"{city_name} {SENSOR_TYPES[self._type][ENTITY_NAME]}"
self._unique_id = f"{self.coordinator.data.position['lat']},{self.coordinator.data.position['lon']}_{self._type}"
@property
def unique_id(self):
"""Return the unique id."""
return self._unique_id
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
path = SENSOR_TYPES[self._type][ENTITY_API_DATA_PATH].split(":")
data = getattr(self.coordinator.data, path[0])
# Specific case for probability forecast
if path[0] == "probability_forecast":
if len(path) == 3:
# This is a fix compared to other entitty as first index is always null in API result for unknown reason
value = _find_first_probability_forecast_not_null(data, path)
else:
value = data[0][path[1]]
# General case
else:
if len(path) == 3:
value = data[path[1]][path[2]]
else:
value = data[path[1]]
if self._type == "wind_speed":
# convert API wind speed from m/s to km/h
value = round(value * 3.6)
return value
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return SENSOR_TYPES[self._type][ENTITY_UNIT]
@property
def icon(self):
"""Return the icon."""
return SENSOR_TYPES[self._type][ENTITY_ICON]
@property
def device_class(self):
"""Return the device class."""
return SENSOR_TYPES[self._type][ENTITY_DEVICE_CLASS]
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return SENSOR_TYPES[self._type][ENTITY_ENABLE]
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
class MeteoFranceRainSensor(MeteoFranceSensor):
"""Representation of a Meteo-France rain sensor."""
@property
def state(self):
"""Return the state."""
# search first cadran with rain
next_rain = next(
(cadran for cadran in self.coordinator.data.forecast if cadran["rain"] > 1),
None,
)
return (
dt_util.utc_from_timestamp(next_rain["dt"]).isoformat()
if next_rain
else None
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
reference_dt = self.coordinator.data.forecast[0]["dt"]
return {
ATTR_NEXT_RAIN_DT_REF: dt_util.utc_from_timestamp(reference_dt).isoformat(),
ATTR_NEXT_RAIN_1_HOUR_FORECAST: {
f"{int((item['dt'] - reference_dt) / 60)} min": item["desc"]
for item in self.coordinator.data.forecast
},
ATTR_ATTRIBUTION: ATTRIBUTION,
}
class MeteoFranceAlertSensor(MeteoFranceSensor):
"""Representation of a Meteo-France alert sensor."""
# pylint: disable=super-init-not-called
def __init__(self, sensor_type: str, coordinator: DataUpdateCoordinator):
"""Initialize the Meteo-France sensor."""
super().__init__(sensor_type, coordinator)
dept_code = self.coordinator.data.domain_id
self._name = f"{dept_code} {SENSOR_TYPES[self._type][ENTITY_NAME]}"
self._unique_id = self._name
@property
def state(self):
"""Return the state."""
return get_warning_text_status_from_indice_color(
self.coordinator.data.get_domain_max_color()
)
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {
**readeable_phenomenoms_dict(self.coordinator.data.phenomenons_max_colors),
ATTR_ATTRIBUTION: ATTRIBUTION,
}
def _find_first_probability_forecast_not_null(
probability_forecast: list, path: list
) -> int:
"""Search the first not None value in the first forecast elements."""
for forecast in probability_forecast[0:3]:
if forecast[path[1]][path[2]] is not None:
return forecast[path[1]][path[2]]
# Default return value if no value founded
return None
|
import re
import pytest
from cerberus import errors, Validator, SchemaError
from cerberus.base import DocumentError
from cerberus.tests.conftest import sample_schema
def assert_exception(exception, document={}, schema=None, validator=None, msg=None):
"""
Tests whether a specific exception is raised. Optionally also tests whether the
exception message is as expected.
"""
if validator is None:
validator = Validator()
if msg is None:
with pytest.raises(exception):
validator(document, schema)
else:
with pytest.raises(exception, match=re.escape(msg)):
validator(document, schema)
def assert_schema_error(*args):
""" Tests whether a validation raises an exception due to a malformed schema. """
assert_exception(SchemaError, *args)
def assert_document_error(*args):
""" Tests whether a validation raises an exception due to a malformed document. """
assert_exception(DocumentError, *args)
def assert_fail(
document,
schema=None,
validator=None,
update=False,
error=None,
errors=None,
child_errors=None,
):
""" Tests whether a validation fails. """
if validator is None:
validator = Validator(sample_schema)
result = validator(document, schema, update)
assert isinstance(result, bool)
assert not result
actual_errors = validator._errors
assert not (error is not None and errors is not None)
assert not (errors is not None and child_errors is not None), (
'child_errors can only be tested in ' 'conjunction with the error parameter'
)
assert not (child_errors is not None and error is None)
if error is not None:
assert len(actual_errors) == 1
assert_has_error(actual_errors, *error)
if child_errors is not None:
assert len(actual_errors[0].child_errors) == len(child_errors)
assert_has_errors(actual_errors[0].child_errors, child_errors)
elif errors is not None:
assert len(actual_errors) == len(errors)
assert_has_errors(actual_errors, errors)
return actual_errors
def assert_success(document, schema=None, validator=None, update=False):
""" Tests whether a validation succeeds. """
if validator is None:
validator = Validator(sample_schema)
result = validator(document, schema, update)
assert isinstance(result, bool)
if not result:
raise AssertionError(validator.errors)
def assert_has_error(_errors, d_path, s_path, error_def, constraint, info=()):
if not isinstance(d_path, tuple):
d_path = (d_path,)
if not isinstance(info, tuple):
info = (info,)
assert isinstance(_errors, errors.ErrorList)
for i, error in enumerate(_errors):
assert isinstance(error, errors.ValidationError)
try:
assert error.document_path == d_path
assert error.schema_path == s_path
assert error.code == error_def.code
assert error.rule == error_def.rule
assert error.constraint == constraint
if not error.is_group_error:
assert error.info == info
except AssertionError:
continue
else:
break
else:
raise AssertionError(
"""
Error with properties:
document_path={doc_path}
schema_path={schema_path}
code={code}
constraint={constraint}
info={info}
not found in errors:
{errors}
""".format(
doc_path=d_path,
schema_path=s_path,
code=hex(error.code),
info=info,
constraint=constraint,
errors=_errors,
)
)
return i
def assert_has_errors(_errors, _exp_errors):
assert isinstance(_exp_errors, list)
for error in _exp_errors:
assert isinstance(error, tuple)
assert_has_error(_errors, *error)
def assert_not_has_error(_errors, *args, **kwargs):
try:
assert_has_error(_errors, *args, **kwargs)
except AssertionError:
pass
else:
raise AssertionError('An unexpected error occurred.')
def assert_normalized(document, expected, schema=None, validator=None):
if validator is None:
validator = Validator(sample_schema)
assert_success(document, schema, validator)
assert validator.document == expected
|
import os.path as op
import os
import numpy as np
import pytest
from numpy.testing import assert_equal
from mne.datasets import testing
from mne.utils import (_TempDir, _url_to_local_path, run_tests_if_main,
buggy_mkl_svd)
def test_buggy_mkl():
"""Test decorator for buggy MKL issues."""
from unittest import SkipTest
@buggy_mkl_svd
def foo(a, b):
raise np.linalg.LinAlgError('SVD did not converge')
with pytest.warns(RuntimeWarning, match='convergence error'):
pytest.raises(SkipTest, foo, 1, 2)
@buggy_mkl_svd
def bar(c, d, e):
raise RuntimeError('SVD did not converge')
pytest.raises(RuntimeError, bar, 1, 2, 3)
def test_tempdir():
"""Test TempDir."""
tempdir2 = _TempDir()
assert (op.isdir(tempdir2))
x = str(tempdir2)
del tempdir2
assert (not op.isdir(x))
def test_datasets():
"""Test dataset config."""
# gh-4192
data_path = testing.data_path(download=False)
os.environ['MNE_DATASETS_TESTING_PATH'] = op.dirname(data_path)
assert testing.data_path(download=False) == data_path
def test_url_to_local_path():
"""Test URL to local path."""
assert_equal(_url_to_local_path('http://google.com/home/why.html', '.'),
op.join('.', 'home', 'why.html'))
run_tests_if_main()
|
import sys
try:
from collections.abc import Iterable
except ImportError:
from collections import Iterable
from datetime import datetime, timedelta, tzinfo
base = (str, bytes)
coerce = str
_ellipsis = eval('...')
def is_list_like(value):
"""Return whether value is an iterable but not a mapping / string"""
return isinstance(value, Iterable) and not isinstance(value, (base, dict))
def is_str(string):
"""Return whether value is a string or a byte list"""
return isinstance(string, base)
def to_str(obj):
"""Cast obj to unicode string"""
if not is_str(obj):
return coerce(obj)
return obj
def to_unicode(string):
"""Force string to be a string in python 3 or a unicode in python 2"""
if not isinstance(string, coerce):
return string.decode('utf-8')
return string
def u(s):
"""Emulate u'str' in python 2, do nothing in python 3"""
if sys.version_info[0] == 2:
return s.decode('utf-8')
return s
try:
from datetime import timezone
utc = timezone.utc
except ImportError:
class UTC(tzinfo):
def tzname(self, dt):
return 'UTC'
def utcoffset(self, dt):
return timedelta(0)
def dst(self, dt):
return None
utc = UTC()
def timestamp(x):
"""Get a timestamp from a date in python 3 and python 2"""
if x.tzinfo is None:
# Naive dates to utc
x = x.replace(tzinfo=utc)
if hasattr(x, 'timestamp'):
return x.timestamp()
else:
return (x - datetime(1970, 1, 1, tzinfo=utc)).total_seconds()
|
from datetime import datetime as dt
import numpy as np
import pytest
from arctic.date import DateRange, CLOSED_OPEN, mktz
from arctic.exceptions import NoDataFoundException
def test_delete(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 1, 30, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
deleted = tickstore_lib.delete('SYM')
assert deleted.deleted_count == 2
with pytest.raises(NoDataFoundException):
tickstore_lib.read('SYM', date_range=DateRange(20130102), columns=None)
# Delete with a date-range
tickstore_lib.write('SYM', DUMMY_DATA)
deleted = tickstore_lib.delete(
'SYM',
DateRange(
dt(2013, 1, 1, tzinfo=mktz('Europe/London')),
dt(2013, 1, 2, tzinfo=mktz('Europe/London'))
)
)
assert deleted.deleted_count == 1
df = tickstore_lib.read('SYM', columns=None)
assert np.allclose(df['b'].values, np.array([4.]))
def test_delete_daterange(tickstore_lib):
DUMMY_DATA = [
{'a': 1.,
'b': 2.,
'index': dt(2013, 1, 1, tzinfo=mktz('Europe/London'))
},
{'a': 3.,
'b': 4.,
'index': dt(2013, 2, 1, tzinfo=mktz('Europe/London'))
},
]
tickstore_lib._chunk_size = 1
tickstore_lib.write('SYM', DUMMY_DATA)
# Delete with a date-range
deleted = tickstore_lib.delete(
'SYM',
DateRange(
dt(2013, 1, 1, tzinfo=mktz('Europe/London')),
dt(2013, 2, 1, tzinfo=mktz('Europe/London')),
CLOSED_OPEN
)
)
assert deleted.deleted_count == 1
df = tickstore_lib.read('SYM', columns=None)
assert np.allclose(df['b'].values, np.array([4.]))
|
import asyncio
import pytest
from homeassistant.components.hassio.http import _need_auth
from tests.async_mock import patch
async def test_forward_request(hassio_client, aioclient_mock):
"""Test fetching normal path."""
aioclient_mock.post("http://127.0.0.1/beer", text="response")
resp = await hassio_client.post("/api/hassio/beer")
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "response"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
@pytest.mark.parametrize(
"build_type", ["supervisor/info", "homeassistant/update", "host/info"]
)
async def test_auth_required_forward_request(hassio_noauth_client, build_type):
"""Test auth required for normal request."""
resp = await hassio_noauth_client.post(f"/api/hassio/{build_type}")
# Check we got right response
assert resp.status == 401
@pytest.mark.parametrize(
"build_type",
[
"app/index.html",
"app/hassio-app.html",
"app/index.html",
"app/hassio-app.html",
"app/some-chunk.js",
"app/app.js",
],
)
async def test_forward_request_no_auth_for_panel(
hassio_client, build_type, aioclient_mock
):
"""Test no auth needed for ."""
aioclient_mock.get(f"http://127.0.0.1/{build_type}", text="response")
resp = await hassio_client.get(f"/api/hassio/{build_type}")
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "response"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
async def test_forward_request_no_auth_for_logo(hassio_client, aioclient_mock):
"""Test no auth needed for logo."""
aioclient_mock.get("http://127.0.0.1/addons/bl_b392/logo", text="response")
resp = await hassio_client.get("/api/hassio/addons/bl_b392/logo")
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "response"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
async def test_forward_request_no_auth_for_icon(hassio_client, aioclient_mock):
"""Test no auth needed for icon."""
aioclient_mock.get("http://127.0.0.1/addons/bl_b392/icon", text="response")
resp = await hassio_client.get("/api/hassio/addons/bl_b392/icon")
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "response"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
async def test_forward_log_request(hassio_client, aioclient_mock):
"""Test fetching normal log path doesn't remove ANSI color escape codes."""
aioclient_mock.get("http://127.0.0.1/beer/logs", text="\033[32mresponse\033[0m")
resp = await hassio_client.get("/api/hassio/beer/logs")
# Check we got right response
assert resp.status == 200
body = await resp.text()
assert body == "\033[32mresponse\033[0m"
# Check we forwarded command
assert len(aioclient_mock.mock_calls) == 1
async def test_bad_gateway_when_cannot_find_supervisor(hassio_client):
"""Test we get a bad gateway error if we can't find supervisor."""
with patch(
"homeassistant.components.hassio.http.async_timeout.timeout",
side_effect=asyncio.TimeoutError,
):
resp = await hassio_client.get("/api/hassio/addons/test/info")
assert resp.status == 502
async def test_forwarding_user_info(hassio_client, hass_admin_user, aioclient_mock):
"""Test that we forward user info correctly."""
aioclient_mock.get("http://127.0.0.1/hello")
resp = await hassio_client.get("/api/hassio/hello")
# Check we got right response
assert resp.status == 200
assert len(aioclient_mock.mock_calls) == 1
req_headers = aioclient_mock.mock_calls[0][-1]
req_headers["X-Hass-User-ID"] == hass_admin_user.id
req_headers["X-Hass-Is-Admin"] == "1"
async def test_snapshot_upload_headers(hassio_client, aioclient_mock):
"""Test that we forward the full header for snapshot upload."""
content_type = "multipart/form-data; boundary='--webkit'"
aioclient_mock.get("http://127.0.0.1/snapshots/new/upload")
resp = await hassio_client.get(
"/api/hassio/snapshots/new/upload", headers={"Content-Type": content_type}
)
# Check we got right response
assert resp.status == 200
assert len(aioclient_mock.mock_calls) == 1
req_headers = aioclient_mock.mock_calls[0][-1]
req_headers["Content-Type"] == content_type
async def test_snapshot_download_headers(hassio_client, aioclient_mock):
"""Test that we forward the full header for snapshot download."""
content_disposition = "attachment; filename=test.tar"
aioclient_mock.get(
"http://127.0.0.1/snapshots/slug/download",
headers={
"Content-Length": "50000000",
"Content-Disposition": content_disposition,
},
)
resp = await hassio_client.get("/api/hassio/snapshots/slug/download")
# Check we got right response
assert resp.status == 200
assert len(aioclient_mock.mock_calls) == 1
resp.headers["Content-Disposition"] == content_disposition
def test_need_auth(hass):
"""Test if the requested path needs authentication."""
assert not _need_auth(hass, "addons/test/logo")
assert _need_auth(hass, "snapshots/new/upload")
assert _need_auth(hass, "supervisor/logs")
hass.data["onboarding"] = False
assert not _need_auth(hass, "snapshots/new/upload")
assert not _need_auth(hass, "supervisor/logs")
|
import importlib
import json
import pathlib
from typing import Any, Dict, List, Optional
import attr
@attr.s
class Error:
"""Error validating an integration."""
plugin: str = attr.ib()
error: str = attr.ib()
fixable: bool = attr.ib(default=False)
def __str__(self) -> str:
"""Represent error as string."""
return f"[{self.plugin.upper()}] {self.error}"
@attr.s
class Config:
"""Config for the run."""
specific_integrations: Optional[pathlib.Path] = attr.ib()
root: pathlib.Path = attr.ib()
action: str = attr.ib()
requirements: bool = attr.ib()
errors: List[Error] = attr.ib(factory=list)
cache: Dict[str, Any] = attr.ib(factory=dict)
def add_error(self, *args, **kwargs):
"""Add an error."""
self.errors.append(Error(*args, **kwargs))
@attr.s
class Integration:
"""Represent an integration in our validator."""
@classmethod
def load_dir(cls, path: pathlib.Path):
"""Load all integrations in a directory."""
assert path.is_dir()
integrations = {}
for fil in path.iterdir():
if fil.is_file() or fil.name == "__pycache__":
continue
init = fil / "__init__.py"
if not init.exists():
print(
f"Warning: {init} missing, skipping directory. "
"If this is your development environment, "
"you can safely delete this folder."
)
continue
integration = cls(fil)
integration.load_manifest()
integrations[integration.domain] = integration
return integrations
path: pathlib.Path = attr.ib()
manifest: Optional[dict] = attr.ib(default=None)
errors: List[Error] = attr.ib(factory=list)
warnings: List[Error] = attr.ib(factory=list)
@property
def domain(self) -> str:
"""Integration domain."""
return self.path.name
@property
def disabled(self) -> Optional[str]:
"""List of disabled."""
return self.manifest.get("disabled")
@property
def requirements(self) -> List[str]:
"""List of requirements."""
return self.manifest.get("requirements", [])
@property
def dependencies(self) -> List[str]:
"""List of dependencies."""
return self.manifest.get("dependencies", [])
def add_error(self, *args, **kwargs):
"""Add an error."""
self.errors.append(Error(*args, **kwargs))
def add_warning(self, *args, **kwargs):
"""Add an warning."""
self.warnings.append(Error(*args, **kwargs))
def load_manifest(self) -> None:
"""Load manifest."""
manifest_path = self.path / "manifest.json"
if not manifest_path.is_file():
self.add_error("model", f"Manifest file {manifest_path} not found")
return
try:
manifest = json.loads(manifest_path.read_text())
except ValueError as err:
self.add_error("model", f"Manifest contains invalid JSON: {err}")
return
self.manifest = manifest
def import_pkg(self, platform=None):
"""Import the Python file."""
pkg = f"homeassistant.components.{self.domain}"
if platform is not None:
pkg += f".{platform}"
return importlib.import_module(pkg)
|
Subsets and Splits