text
stringlengths 213
32.3k
|
---|
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
SUPPORT_BRIGHTNESS,
LightEntity,
)
from homeassistant.const import CONF_NAME
from . import LIGHTWAVE_LINK
MAX_BRIGHTNESS = 255
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Find and return LightWave lights."""
if not discovery_info:
return
lights = []
lwlink = hass.data[LIGHTWAVE_LINK]
for device_id, device_config in discovery_info.items():
name = device_config[CONF_NAME]
lights.append(LWRFLight(name, device_id, lwlink))
async_add_entities(lights)
class LWRFLight(LightEntity):
"""Representation of a LightWaveRF light."""
def __init__(self, name, device_id, lwlink):
"""Initialize LWRFLight entity."""
self._name = name
self._device_id = device_id
self._state = None
self._brightness = MAX_BRIGHTNESS
self._lwlink = lwlink
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
@property
def should_poll(self):
"""No polling needed for a LightWave light."""
return False
@property
def name(self):
"""Lightwave light name."""
return self._name
@property
def brightness(self):
"""Brightness of this light between 0..MAX_BRIGHTNESS."""
return self._brightness
@property
def is_on(self):
"""Lightwave light is on state."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the LightWave light on."""
self._state = True
if ATTR_BRIGHTNESS in kwargs:
self._brightness = kwargs[ATTR_BRIGHTNESS]
if self._brightness != MAX_BRIGHTNESS:
self._lwlink.turn_on_with_brightness(
self._device_id, self._name, self._brightness
)
else:
self._lwlink.turn_on_light(self._device_id, self._name)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn the LightWave light off."""
self._state = False
self._lwlink.turn_off(self._device_id, self._name)
self.async_write_ha_state()
|
__author__ = '[email protected]'
from absl import flags
flags.DEFINE_string('database', None,
'The database within which the command or query executes.')
flags.DEFINE_string('query_timeout', '600', 'Query timeout in seconds.')
flags.DEFINE_string(
'athena_query_output_bucket', None,
'Specifies where to save the results of the query execution.')
flags.DEFINE_string('athena_region', 'us-east-1', 'Region to use Athena in.')
flags.mark_flags_as_required(['database', 'athena_query_output_bucket'])
FLAGS = flags.FLAGS
def generate_provider_specific_cmd_list(script, driver, output, error):
"""Method to compile the BigQuery specific script execution command.
Arguments:
script: SQL script which contains the query.
driver: Driver that contains the BigQuery specific script executor.
output: Output log file.
error: Error log file.
Returns:
Command list to execute the supplied script.
"""
del error
cmd_list = [
driver, script, FLAGS.athena_region, FLAGS.database, FLAGS.query_timeout,
FLAGS.athena_query_output_bucket, output, 'athena.err'
]
return cmd_list
|
import logging
import time
import psutil
from ...common.util import expand_to_seconds
from ...common.interfaces import AbstractPlugin
from netort.process import execute
class Plugin(AbstractPlugin):
'''Plugin to check system resources'''
SECTION = "rcheck"
@staticmethod
def get_key():
return __file__
def __init__(self, core, cfg, name):
''' Constructor '''
AbstractPlugin.__init__(self, core, cfg, name)
self.interval = "10s"
self.disk_limit = 2048 # 2 GB
self.mem_limit = 512 # 0.5 GB
self.last_check = 0
def get_available_options(self):
return ["interval", "disk_limit", "mem_limit"]
def configure(self):
self.interval = expand_to_seconds(
self.get_option("interval", self.interval))
self.disk_limit = int(self.get_option("disk_limit", self.disk_limit))
self.mem_limit = int(self.get_option("mem_limit", self.mem_limit))
def prepare_test(self):
self.log.info("Checking tank resources...")
self.__check_disk()
self.__check_mem()
def is_test_finished(self):
self.log.debug("Checking tank resources...")
if time.time() - self.last_check < self.interval:
return -1
self.__check_disk()
self.__check_mem()
self.last_check = time.time()
return -1
def __check_disk(self):
''' raise exception on disk space exceeded '''
cmd = "sh -c \"df --no-sync -m -P -x fuse -x tmpfs -x devtmpfs -x davfs -x nfs "
cmd += self.core.artifacts_base_dir
cmd += " | tail -n 1 | awk '{print \$4}' \""
res = execute(cmd, True, 0.1, True)
logging.debug("Result: %s", res)
if not len(res[1]):
self.log.debug("No disk usage info: %s", res[2])
return
disk_free = res[1]
self.log.debug(
"Disk free space: %s/%s", disk_free.strip(), self.disk_limit)
if int(disk_free.strip()) < self.disk_limit:
raise RuntimeError(
"Not enough local resources: disk space less than %sMB in %s: %sMB"
% (
self.disk_limit, self.core.artifacts_base_dir,
int(disk_free.strip())))
def __check_mem(self):
''' raise exception on RAM exceeded '''
mem_free = psutil.virtual_memory().available / 2 ** 20
self.log.debug("Memory free: %s/%s", mem_free, self.mem_limit)
if mem_free < self.mem_limit:
raise RuntimeError(
"Not enough resources: free memory less "
"than %sMB: %sMB" % (self.mem_limit, mem_free))
|
from datetime import timedelta
import logging
from horimote import Client, keys
from horimote.exceptions import AuthenticationError
import voluptuous as vol
from homeassistant import util
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_CHANNEL,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Horizon"
DEFAULT_PORT = 5900
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(seconds=1)
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
SUPPORT_HORIZON = (
SUPPORT_NEXT_TRACK
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Horizon platform."""
host = config[CONF_HOST]
name = config[CONF_NAME]
port = config[CONF_PORT]
try:
client = Client(host, port=port)
except AuthenticationError as msg:
_LOGGER.error("Authentication to %s at %s failed: %s", name, host, msg)
return
except OSError as msg:
# occurs if horizon box is offline
_LOGGER.error("Connection to %s at %s failed: %s", name, host, msg)
raise PlatformNotReady from msg
_LOGGER.info("Connection to %s at %s established", name, host)
add_entities([HorizonDevice(client, name, keys)], True)
class HorizonDevice(MediaPlayerEntity):
"""Representation of a Horizon HD Recorder."""
def __init__(self, client, name, remote_keys):
"""Initialize the remote."""
self._client = client
self._name = name
self._state = None
self._keys = remote_keys
@property
def name(self):
"""Return the name of the remote."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_HORIZON
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update(self):
"""Update State using the media server running on the Horizon."""
try:
if self._client.is_powered_on():
self._state = STATE_PLAYING
else:
self._state = STATE_OFF
except OSError:
self._state = STATE_OFF
def turn_on(self):
"""Turn the device on."""
if self._state == STATE_OFF:
self._send_key(self._keys.POWER)
def turn_off(self):
"""Turn the device off."""
if self._state != STATE_OFF:
self._send_key(self._keys.POWER)
def media_previous_track(self):
"""Channel down."""
self._send_key(self._keys.CHAN_DOWN)
self._state = STATE_PLAYING
def media_next_track(self):
"""Channel up."""
self._send_key(self._keys.CHAN_UP)
self._state = STATE_PLAYING
def media_play(self):
"""Send play command."""
self._send_key(self._keys.PAUSE)
self._state = STATE_PLAYING
def media_pause(self):
"""Send pause command."""
self._send_key(self._keys.PAUSE)
self._state = STATE_PAUSED
def media_play_pause(self):
"""Send play/pause command."""
self._send_key(self._keys.PAUSE)
if self._state == STATE_PAUSED:
self._state = STATE_PLAYING
else:
self._state = STATE_PAUSED
def play_media(self, media_type, media_id, **kwargs):
"""Play media / switch to channel."""
if MEDIA_TYPE_CHANNEL == media_type:
try:
self._select_channel(int(media_id))
self._state = STATE_PLAYING
except ValueError:
_LOGGER.error("Invalid channel: %s", media_id)
else:
_LOGGER.error(
"Invalid media type %s. Supported type: %s",
media_type,
MEDIA_TYPE_CHANNEL,
)
def _select_channel(self, channel):
"""Select a channel (taken from einder library, thx)."""
self._send(channel=channel)
def _send_key(self, key):
"""Send a key to the Horizon device."""
self._send(key=key)
def _send(self, key=None, channel=None):
"""Send a key to the Horizon device."""
try:
if key:
self._client.send_key(key)
elif channel:
self._client.select_channel(channel)
except OSError as msg:
_LOGGER.error(
"%s disconnected: %s. Trying to reconnect...", self._name, msg
)
# for reconnect, first gracefully disconnect
self._client.disconnect()
try:
self._client.connect()
self._client.authorize()
except AuthenticationError as msg:
_LOGGER.error("Authentication to %s failed: %s", self._name, msg)
return
except OSError as msg:
# occurs if horizon box is offline
_LOGGER.error("Reconnect to %s failed: %s", self._name, msg)
return
self._send(key=key, channel=channel)
|
import diamond.collector
import os
class ConnTrackCollector(diamond.collector.Collector):
"""
Collector of number of conntrack connections
"""
def get_default_config_help(self):
"""
Return help text for collector configuration
"""
config_help = super(ConnTrackCollector, self).get_default_config_help()
config_help.update({
"dir": "Directories with files of interest, comma seperated",
"files": "List of files to collect statistics from",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ConnTrackCollector, self).get_default_config()
config.update({
"path": "conntrack",
"dir": "/proc/sys/net/ipv4/netfilter,/proc/sys/net/netfilter",
"files": "ip_conntrack_count,ip_conntrack_max,"
"nf_conntrack_count,nf_conntrack_max",
})
return config
def collect(self):
"""
Collect metrics
"""
collected = {}
files = []
if isinstance(self.config['dir'], basestring):
dirs = [d.strip() for d in self.config['dir'].split(',')]
elif isinstance(self.config['dir'], list):
dirs = self.config['dir']
if isinstance(self.config['files'], basestring):
files = [f.strip() for f in self.config['files'].split(',')]
elif isinstance(self.config['files'], list):
files = self.config['files']
for sdir in dirs:
for sfile in files:
if sfile.endswith('conntrack_count'):
metric_name = 'ip_conntrack_count'
elif sfile.endswith('conntrack_max'):
metric_name = 'ip_conntrack_max'
else:
self.log.error('Unknown file for collection: %s', sfile)
continue
fpath = os.path.join(sdir, sfile)
if not os.path.exists(fpath):
continue
try:
with open(fpath, "r") as fhandle:
metric = float(fhandle.readline().rstrip("\n"))
collected[metric_name] = metric
except Exception as exception:
self.log.error("Failed to collect from '%s': %s",
fpath,
exception)
if not collected:
self.log.error('No metric was collected, looks like '
'nf_conntrack/ip_conntrack kernel module was '
'not loaded')
else:
for key in collected.keys():
self.publish(key, collected[key])
|
import voluptuous as vol
from homeassistant.core import HomeAssistant
from homeassistant.helpers.discovery import async_load_platform
from .const import DOMAIN
CONFIG_SCHEMA = vol.Schema(
{vol.Optional(DOMAIN): {}},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the zodiac component."""
hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, config))
return True
|
import json
from django.db import migrations
def fix_enforced_checks(apps, schema_editor):
Component = apps.get_model("trans", "Component")
db_alias = schema_editor.connection.alias
for component in Component.objects.using(db_alias).filter(
enforced_checks__contains="'"
):
component.enforced_checks = json.loads(
component.enforced_checks.replace("'", '"')
)
component.save(update_fields=["enforced_checks"])
class Migration(migrations.Migration):
dependencies = [
("trans", "0095_fix_json_units"),
]
operations = [
migrations.RunPython(
fix_enforced_checks, migrations.RunPython.noop, elidable=True
),
]
|
import pandas as pd
import pytest
import pytz
from qstrader.asset.universe.dynamic import DynamicUniverse
@pytest.mark.parametrize(
'asset_dates,dt,expected',
[
(
{
'EQ:SPY': pd.Timestamp('1993-01-01 14:30:00', tz=pytz.utc),
'EQ:AGG': pd.Timestamp('2003-01-01 14:30:00', tz=pytz.utc),
'EQ:TLT': pd.Timestamp('2012-01-01 14:30:00', tz=pytz.utc)
},
pd.Timestamp('1990-01-01 14:30:00', tz=pytz.utc),
[]
),
(
{
'EQ:SPY': pd.Timestamp('1993-01-01 14:30:00', tz=pytz.utc),
'EQ:AGG': pd.Timestamp('2003-01-01 14:30:00', tz=pytz.utc),
'EQ:TLT': pd.Timestamp('2012-01-01 14:30:00', tz=pytz.utc)
},
pd.Timestamp('1995-01-01 14:30:00', tz=pytz.utc),
['EQ:SPY']
),
(
{
'EQ:SPY': pd.Timestamp('1993-01-01 14:30:00', tz=pytz.utc),
'EQ:AGG': pd.Timestamp('2003-01-01 14:30:00', tz=pytz.utc),
'EQ:TLT': pd.Timestamp('2012-01-01 14:30:00', tz=pytz.utc)
},
pd.Timestamp('2005-01-01 14:30:00', tz=pytz.utc),
['EQ:SPY', 'EQ:AGG']
),
(
{
'EQ:SPY': pd.Timestamp('1993-01-01 14:30:00', tz=pytz.utc),
'EQ:AGG': pd.Timestamp('2003-01-01 14:30:00', tz=pytz.utc),
'EQ:TLT': pd.Timestamp('2012-01-01 14:30:00', tz=pytz.utc)
},
pd.Timestamp('2015-01-01 14:30:00', tz=pytz.utc),
['EQ:SPY', 'EQ:AGG', 'EQ:TLT']
)
]
)
def test_dynamic_universe(asset_dates, dt, expected):
"""
Checks that the DynamicUniverse correctly returns the
list of assets for a particular datetime.
"""
universe = DynamicUniverse(asset_dates)
assert set(universe.get_assets(dt)) == set(expected)
|
from homeassistant.components import mysensors
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
DOMAIN,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.core import callback
import homeassistant.util.color as color_util
from homeassistant.util.color import rgb_hex_to_rgb_list
SUPPORT_MYSENSORS_RGBW = SUPPORT_COLOR | SUPPORT_WHITE_VALUE
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the mysensors platform for lights."""
device_class_map = {
"S_DIMMER": MySensorsLightDimmer,
"S_RGB_LIGHT": MySensorsLightRGB,
"S_RGBW_LIGHT": MySensorsLightRGBW,
}
mysensors.setup_mysensors_platform(
hass,
DOMAIN,
discovery_info,
device_class_map,
async_add_entities=async_add_entities,
)
class MySensorsLight(mysensors.device.MySensorsEntity, LightEntity):
"""Representation of a MySensors Light child node."""
def __init__(self, *args):
"""Initialize a MySensors Light."""
super().__init__(*args)
self._state = None
self._brightness = None
self._hs = None
self._white = None
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
return self._brightness
@property
def hs_color(self):
"""Return the hs color value [int, int]."""
return self._hs
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._white
@property
def assumed_state(self):
"""Return true if unable to access real state of entity."""
return self.gateway.optimistic
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def _turn_on_light(self):
"""Turn on light child device."""
set_req = self.gateway.const.SetReq
if self._state:
return
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_LIGHT, 1, ack=1
)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._state = True
self._values[set_req.V_LIGHT] = STATE_ON
def _turn_on_dimmer(self, **kwargs):
"""Turn on dimmer child device."""
set_req = self.gateway.const.SetReq
brightness = self._brightness
if (
ATTR_BRIGHTNESS not in kwargs
or kwargs[ATTR_BRIGHTNESS] == self._brightness
or set_req.V_DIMMER not in self._values
):
return
brightness = kwargs[ATTR_BRIGHTNESS]
percent = round(100 * brightness / 255)
self.gateway.set_child_value(
self.node_id, self.child_id, set_req.V_DIMMER, percent, ack=1
)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._brightness = brightness
self._values[set_req.V_DIMMER] = percent
def _turn_on_rgb_and_w(self, hex_template, **kwargs):
"""Turn on RGB or RGBW child device."""
rgb = list(color_util.color_hs_to_RGB(*self._hs))
white = self._white
hex_color = self._values.get(self.value_type)
hs_color = kwargs.get(ATTR_HS_COLOR)
if hs_color is not None:
new_rgb = color_util.color_hs_to_RGB(*hs_color)
else:
new_rgb = None
new_white = kwargs.get(ATTR_WHITE_VALUE)
if new_rgb is None and new_white is None:
return
if new_rgb is not None:
rgb = list(new_rgb)
if hex_template == "%02x%02x%02x%02x":
if new_white is not None:
rgb.append(new_white)
else:
rgb.append(white)
hex_color = hex_template % tuple(rgb)
if len(rgb) > 3:
white = rgb.pop()
self.gateway.set_child_value(
self.node_id, self.child_id, self.value_type, hex_color, ack=1
)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._hs = color_util.color_RGB_to_hs(*rgb)
self._white = white
self._values[self.value_type] = hex_color
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
value_type = self.gateway.const.SetReq.V_LIGHT
self.gateway.set_child_value(self.node_id, self.child_id, value_type, 0, ack=1)
if self.gateway.optimistic:
# optimistically assume that light has changed state
self._state = False
self._values[value_type] = STATE_OFF
self.async_write_ha_state()
@callback
def _async_update_light(self):
"""Update the controller with values from light child."""
value_type = self.gateway.const.SetReq.V_LIGHT
self._state = self._values[value_type] == STATE_ON
@callback
def _async_update_dimmer(self):
"""Update the controller with values from dimmer child."""
value_type = self.gateway.const.SetReq.V_DIMMER
if value_type in self._values:
self._brightness = round(255 * int(self._values[value_type]) / 100)
if self._brightness == 0:
self._state = False
@callback
def _async_update_rgb_or_w(self):
"""Update the controller with values from RGB or RGBW child."""
value = self._values[self.value_type]
color_list = rgb_hex_to_rgb_list(value)
if len(color_list) > 3:
self._white = color_list.pop()
self._hs = color_util.color_RGB_to_hs(*color_list)
class MySensorsLightDimmer(MySensorsLight):
"""Dimmer child class to MySensorsLight."""
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._turn_on_light()
self._turn_on_dimmer(**kwargs)
if self.gateway.optimistic:
self.async_write_ha_state()
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
await super().async_update()
self._async_update_light()
self._async_update_dimmer()
class MySensorsLightRGB(MySensorsLight):
"""RGB child class to MySensorsLight."""
@property
def supported_features(self):
"""Flag supported features."""
set_req = self.gateway.const.SetReq
if set_req.V_DIMMER in self._values:
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
return SUPPORT_COLOR
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._turn_on_light()
self._turn_on_dimmer(**kwargs)
self._turn_on_rgb_and_w("%02x%02x%02x", **kwargs)
if self.gateway.optimistic:
self.async_write_ha_state()
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
await super().async_update()
self._async_update_light()
self._async_update_dimmer()
self._async_update_rgb_or_w()
class MySensorsLightRGBW(MySensorsLightRGB):
"""RGBW child class to MySensorsLightRGB."""
@property
def supported_features(self):
"""Flag supported features."""
set_req = self.gateway.const.SetReq
if set_req.V_DIMMER in self._values:
return SUPPORT_BRIGHTNESS | SUPPORT_MYSENSORS_RGBW
return SUPPORT_MYSENSORS_RGBW
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._turn_on_light()
self._turn_on_dimmer(**kwargs)
self._turn_on_rgb_and_w("%02x%02x%02x%02x", **kwargs)
if self.gateway.optimistic:
self.async_write_ha_state()
|
import json
import base64
import webruntime
from .. import config, set_log_level
from ._app import App, manager
from ._component2 import PyComponent, JsComponent
from ._server import current_server
from ._assetstore import assets
from ._clientcore import serializer
from . import logger
reprs = json.dumps
## Main loop functions
def start():
"""
Start the server and event loop. This function generally does not
return until the application is stopped (although it may in
interactive environments (e.g. Pyzo)).
In more detail, this calls ``run_forever()`` on the asyncio event loop
associated with the current server.
"""
server = current_server()
server.start()
def run():
"""
Start the event loop in desktop app mode; the server will close
down when there are no more connections.
"""
server = current_server()
server._auto_stop = True
return start()
def stop():
"""
Stop the event loop. This function is thread safe (it can be used
even if ``app.start()`` was called from another thread).
The server can be restarted after it has been stopped. Note that
calling ``stop()`` too often will cause a subsequent call to ``start()``
to return almost immediately.
"""
server = current_server()
server.stop()
@manager.reaction('connections_changed')
def _auto_closer(*events):
server = current_server()
if not getattr(server, '_auto_stop', False):
return
for name in manager.get_app_names():
proxies = manager.get_connections(name)
if proxies:
return
else:
server.stop()
## App functions
class NoteBookHelper:
""" Object that captures commands send to the websocket during the
execution of a cell, and then applies these commands using a script
node. This way, Flexx widgets keep working in the exported notebook.
"""
close_code = None
def __init__(self, session):
self._session = session
self._real_ws = None
self._commands = []
self.enable()
def enable(self):
from IPython import get_ipython
ip = get_ipython()
ip.events.register('pre_execute', self.capture)
ip.events.register('post_execute', self.release)
def capture(self):
if self._real_ws is not None:
logger.warning('Notebookhelper already is in capture mode.')
else:
if self._session._ws is None:
raise RuntimeError(
'Session is missing a websocket connection. If you are '
'running in JupyterLab, this could be due to '
'https://github.com/jupyterlab/jupyterlab/issues/3118')
self._real_ws = self._session._ws
self._session._ws = self
def release(self):
if self._session._ws is self:
self._session._ws = self._real_ws
self._real_ws = None
if self._commands:
from IPython.display import display, Javascript
lines = []
lines.append('var bb64 = flexx.require("bb64");')
lines.append('function cmd(c) {'
'flexx.s1._receive_command('
'flexx.serializer.decode('
'bb64.decode(c)));}')
for command in self._commands: # also DEFINE commands!
command_str = base64.encodebytes(serializer.encode(command)).decode()
lines.append('cmd("' + command_str.replace('\n', '') + '");')
self._commands = []
display(Javascript('\n'.join(lines)))
def write_command(self, cmd):
assert isinstance(cmd, tuple) and len(cmd) >= 1
self._commands.append(cmd)
def init_notebook():
""" Initialize the Jupyter notebook by injecting the necessary CSS
and JS into the browser. Note that any Flexx-based libraries that
you plan to use should probably be imported *before* calling this.
Does not currently work in JupyterLab because
https://github.com/jupyterlab/jupyterlab/issues/3118.
"""
# Note: not using IPython Comm objects yet, since they seem rather
# undocumented and I could not get them to work when I tried for a bit.
# This means though, that flexx in the notebook only works on localhost.
from IPython.display import display, clear_output, HTML
# from .. import ui # noqa - make ui assets available
# Make default log level warning instead of "info" to avoid spamming
# This preserves the log level set by the user
config.load_from_string('log_level = warning', 'init_notebook')
set_log_level(config.log_level)
# Get session or create new
session = manager.get_default_session()
if session is None:
session = manager.create_default_session()
# Check if already loaded, if so, re-connect
if not getattr(session, 'init_notebook_done', False):
session.init_notebook_done = True
else:
display(HTML("<i>Flexx already loaded (the notebook cannot export now)</i>"))
return # Don't inject Flexx twice
# Open server - the notebook helper takes care of the JS resulting
# from running a cell, but any interaction goes over the websocket.
server = current_server()
host, port = server.serving
# Install helper to make things work in exported notebooks
NoteBookHelper(session)
proto = 'wss' if server.protocol == 'https' else 'ws'
url = '%s://%s:%i/flexx/ws/%s' % (proto, host, port, session.app_name)
# Determine JS snippets to run before and after init. The former is only
# run in live notebooks.
flexx_pre_init = "<script>window.flexx = {is_live_notebook: true};</script>"
flexx_post_init = """<script>
flexx.is_notebook = true;
flexx.is_exported = !flexx.is_live_notebook;
/* If Phosphor is already loaded, disable our Phosphor CSS. */
if (window.jupyter && window.jupyter.lab) {
document.getElementById('phosphor-all.css').disabled = true;
}
flexx.init();
flexx.create_session("%s", "%s", "%s");
</script>""" % (session.app_name, session.id, url)
# Compose HTML to inject
t = assets.get_asset('flexx-core.js').to_html('{}', 0)
t += flexx_post_init
t += "<i>Flexx is ready for use</i>\n"
display(HTML(flexx_pre_init)) # Create initial Flexx info dict
clear_output() # Make sure the info dict is gone in exported notebooks
display(HTML(t))
# Note: the Widget._repr_html_() method is responsible for making
# the widget show up in the notebook output area.
# Note: asyncio will need to be enabled via %gui asyncio
# Keep serve and launch, they are still quite nice shorthands to quickly
# get something done.
def serve(cls, name=None, properties=None):
""" Shorthand for ``app.App(cls).serve(name)``.
"""
if properties is not None:
raise RuntimeError('serve(... properties) is deprecated, '
'use app.App().serve() instead.')
# Note: this talks to the manager; it has nothing to do with the server
assert (isinstance(cls, type) and issubclass(cls, (PyComponent, JsComponent)))
a = App(cls)
a.serve(name)
return cls
def launch(cls, runtime=None, properties=None, **runtime_kwargs):
""" Shorthand for ``app.App(cls).launch(runtime, **runtime_kwargs)``.
"""
if properties is not None:
raise RuntimeError('launch(... properties) is deprecated, '
'use app.App().launch() instead.')
if isinstance(cls, str):
return webruntime.launch(cls, runtime, **runtime_kwargs)
assert (isinstance(cls, type) and issubclass(cls, (PyComponent, JsComponent)))
a = App(cls)
return a.launch(runtime, **runtime_kwargs)
def export(cls, filename, properties=None, **kwargs):
""" Shorthand for ``app.App(cls).export(filename, ...)``.
"""
if properties is not None:
raise RuntimeError('export(... properties) is deprecated, '
'use app.App(...).export() instead.')
assert (isinstance(cls, type) and issubclass(cls, (PyComponent, JsComponent)))
a = App(cls)
return a.export(filename, **kwargs)
|
import pytest
from homeassistant.components.huawei_lte import device_tracker
@pytest.mark.parametrize(
("value", "expected"),
(
("HTTP", "http"),
("ID", "id"),
("IPAddress", "ip_address"),
("HTTPResponse", "http_response"),
("foo_bar", "foo_bar"),
),
)
def test_better_snakecase(value, expected):
"""Test that better snakecase works better."""
assert device_tracker._better_snakecase(value) == expected
|
import contextlib
import datetime
import inspect
import warnings
from distutils.version import LooseVersion
from functools import partial
import numpy as np
import pandas as pd
from . import dask_array_compat, dask_array_ops, dtypes, npcompat, nputils
from .nputils import nanfirst, nanlast
from .pycompat import (
cupy_array_type,
dask_array_type,
is_duck_dask_array,
sparse_array_type,
)
from .utils import is_duck_array
try:
import dask.array as dask_array
from dask.base import tokenize
except ImportError:
dask_array = None # type: ignore
def _dask_or_eager_func(
name,
eager_module=np,
dask_module=dask_array,
list_of_args=False,
array_args=slice(1),
requires_dask=None,
):
"""Create a function that dispatches to dask for dask array inputs."""
if dask_module is not None:
def f(*args, **kwargs):
if list_of_args:
dispatch_args = args[0]
else:
dispatch_args = args[array_args]
if any(is_duck_dask_array(a) for a in dispatch_args):
try:
wrapped = getattr(dask_module, name)
except AttributeError as e:
raise AttributeError(f"{e}: requires dask >={requires_dask}")
else:
wrapped = getattr(eager_module, name)
return wrapped(*args, **kwargs)
else:
def f(*args, **kwargs):
return getattr(eager_module, name)(*args, **kwargs)
return f
def fail_on_dask_array_input(values, msg=None, func_name=None):
if is_duck_dask_array(values):
if msg is None:
msg = "%r is not yet a valid method on dask arrays"
if func_name is None:
func_name = inspect.stack()[1][3]
raise NotImplementedError(msg % func_name)
# switch to use dask.array / __array_function__ version when dask supports it:
# https://github.com/dask/dask/pull/4822
moveaxis = npcompat.moveaxis
around = _dask_or_eager_func("around")
isclose = _dask_or_eager_func("isclose")
isnat = np.isnat
isnan = _dask_or_eager_func("isnan")
zeros_like = _dask_or_eager_func("zeros_like")
pandas_isnull = _dask_or_eager_func("isnull", eager_module=pd)
def isnull(data):
data = asarray(data)
scalar_type = data.dtype.type
if issubclass(scalar_type, (np.datetime64, np.timedelta64)):
# datetime types use NaT for null
# note: must check timedelta64 before integers, because currently
# timedelta64 inherits from np.integer
return isnat(data)
elif issubclass(scalar_type, np.inexact):
# float types use NaN for null
return isnan(data)
elif issubclass(scalar_type, (np.bool_, np.integer, np.character, np.void)):
# these types cannot represent missing values
return zeros_like(data, dtype=bool)
else:
# at this point, array should have dtype=object
if isinstance(data, (np.ndarray, dask_array_type)):
return pandas_isnull(data)
else:
# Not reachable yet, but intended for use with other duck array
# types. For full consistency with pandas, we should accept None as
# a null value as well as NaN, but it isn't clear how to do this
# with duck typing.
return data != data
def notnull(data):
return ~isnull(data)
transpose = _dask_or_eager_func("transpose")
_where = _dask_or_eager_func("where", array_args=slice(3))
isin = _dask_or_eager_func("isin", array_args=slice(2))
take = _dask_or_eager_func("take")
broadcast_to = _dask_or_eager_func("broadcast_to")
pad = _dask_or_eager_func("pad", dask_module=dask_array_compat)
_concatenate = _dask_or_eager_func("concatenate", list_of_args=True)
_stack = _dask_or_eager_func("stack", list_of_args=True)
array_all = _dask_or_eager_func("all")
array_any = _dask_or_eager_func("any")
tensordot = _dask_or_eager_func("tensordot", array_args=slice(2))
einsum = _dask_or_eager_func("einsum", array_args=slice(1, None))
def gradient(x, coord, axis, edge_order):
if is_duck_dask_array(x):
return dask_array.gradient(x, coord, axis=axis, edge_order=edge_order)
return np.gradient(x, coord, axis=axis, edge_order=edge_order)
def trapz(y, x, axis):
if axis < 0:
axis = y.ndim + axis
x_sl1 = (slice(1, None),) + (None,) * (y.ndim - axis - 1)
x_sl2 = (slice(None, -1),) + (None,) * (y.ndim - axis - 1)
slice1 = (slice(None),) * axis + (slice(1, None),)
slice2 = (slice(None),) * axis + (slice(None, -1),)
dx = x[x_sl1] - x[x_sl2]
integrand = dx * 0.5 * (y[tuple(slice1)] + y[tuple(slice2)])
return sum(integrand, axis=axis, skipna=False)
masked_invalid = _dask_or_eager_func(
"masked_invalid", eager_module=np.ma, dask_module=getattr(dask_array, "ma", None)
)
def astype(data, dtype, **kwargs):
try:
import sparse
except ImportError:
sparse = None
if (
sparse is not None
and isinstance(data, sparse_array_type)
and LooseVersion(sparse.__version__) < LooseVersion("0.11.0")
and "casting" in kwargs
):
warnings.warn(
"The current version of sparse does not support the 'casting' argument. It will be ignored in the call to astype().",
RuntimeWarning,
stacklevel=4,
)
kwargs.pop("casting")
return data.astype(dtype, **kwargs)
def asarray(data, xp=np):
return data if is_duck_array(data) else xp.asarray(data)
def as_shared_dtype(scalars_or_arrays):
"""Cast a arrays to a shared dtype using xarray's type promotion rules."""
if any([isinstance(x, cupy_array_type) for x in scalars_or_arrays]):
import cupy as cp
arrays = [asarray(x, xp=cp) for x in scalars_or_arrays]
else:
arrays = [asarray(x) for x in scalars_or_arrays]
# Pass arrays directly instead of dtypes to result_type so scalars
# get handled properly.
# Note that result_type() safely gets the dtype from dask arrays without
# evaluating them.
out_type = dtypes.result_type(*arrays)
return [x.astype(out_type, copy=False) for x in arrays]
def lazy_array_equiv(arr1, arr2):
"""Like array_equal, but doesn't actually compare values.
Returns True when arr1, arr2 identical or their dask tokens are equal.
Returns False when shapes are not equal.
Returns None when equality cannot determined: one or both of arr1, arr2 are numpy arrays;
or their dask tokens are not equal
"""
if arr1 is arr2:
return True
arr1 = asarray(arr1)
arr2 = asarray(arr2)
if arr1.shape != arr2.shape:
return False
if dask_array and is_duck_dask_array(arr1) and is_duck_dask_array(arr2):
# GH3068, GH4221
if tokenize(arr1) == tokenize(arr2):
return True
else:
return None
return None
def allclose_or_equiv(arr1, arr2, rtol=1e-5, atol=1e-8):
"""Like np.allclose, but also allows values to be NaN in both arrays"""
arr1 = asarray(arr1)
arr2 = asarray(arr2)
lazy_equiv = lazy_array_equiv(arr1, arr2)
if lazy_equiv is None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", r"All-NaN (slice|axis) encountered")
return bool(isclose(arr1, arr2, rtol=rtol, atol=atol, equal_nan=True).all())
else:
return lazy_equiv
def array_equiv(arr1, arr2):
"""Like np.array_equal, but also allows values to be NaN in both arrays"""
arr1 = asarray(arr1)
arr2 = asarray(arr2)
lazy_equiv = lazy_array_equiv(arr1, arr2)
if lazy_equiv is None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
flag_array = (arr1 == arr2) | (isnull(arr1) & isnull(arr2))
return bool(flag_array.all())
else:
return lazy_equiv
def array_notnull_equiv(arr1, arr2):
"""Like np.array_equal, but also allows values to be NaN in either or both
arrays
"""
arr1 = asarray(arr1)
arr2 = asarray(arr2)
lazy_equiv = lazy_array_equiv(arr1, arr2)
if lazy_equiv is None:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "In the future, 'NAT == x'")
flag_array = (arr1 == arr2) | isnull(arr1) | isnull(arr2)
return bool(flag_array.all())
else:
return lazy_equiv
def count(data, axis=None):
"""Count the number of non-NA in this array along the given axis or axes"""
return np.sum(np.logical_not(isnull(data)), axis=axis)
def where(condition, x, y):
"""Three argument where() with better dtype promotion rules."""
return _where(condition, *as_shared_dtype([x, y]))
def where_method(data, cond, other=dtypes.NA):
if other is dtypes.NA:
other = dtypes.get_fill_value(data.dtype)
return where(cond, data, other)
def fillna(data, other):
# we need to pass data first so pint has a chance of returning the
# correct unit
# TODO: revert after https://github.com/hgrecco/pint/issues/1019 is fixed
return where(notnull(data), data, other)
def concatenate(arrays, axis=0):
"""concatenate() with better dtype promotion rules."""
return _concatenate(as_shared_dtype(arrays), axis=axis)
def stack(arrays, axis=0):
"""stack() with better dtype promotion rules."""
return _stack(as_shared_dtype(arrays), axis=axis)
@contextlib.contextmanager
def _ignore_warnings_if(condition):
if condition:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
yield
else:
yield
def _create_nan_agg_method(name, dask_module=dask_array, coerce_strings=False):
from . import nanops
def f(values, axis=None, skipna=None, **kwargs):
if kwargs.pop("out", None) is not None:
raise TypeError(f"`out` is not valid for {name}")
values = asarray(values)
if coerce_strings and values.dtype.kind in "SU":
values = values.astype(object)
func = None
if skipna or (skipna is None and values.dtype.kind in "cfO"):
nanname = "nan" + name
func = getattr(nanops, nanname)
else:
if name in ["sum", "prod"]:
kwargs.pop("min_count", None)
func = _dask_or_eager_func(name, dask_module=dask_module)
try:
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "All-NaN slice encountered")
return func(values, axis=axis, **kwargs)
except AttributeError:
if not is_duck_dask_array(values):
raise
try: # dask/dask#3133 dask sometimes needs dtype argument
# if func does not accept dtype, then raises TypeError
return func(values, axis=axis, dtype=values.dtype, **kwargs)
except (AttributeError, TypeError):
raise NotImplementedError(
f"{name} is not yet implemented on dask arrays"
)
f.__name__ = name
return f
# Attributes `numeric_only`, `available_min_count` is used for docs.
# See ops.inject_reduce_methods
argmax = _create_nan_agg_method("argmax", coerce_strings=True)
argmin = _create_nan_agg_method("argmin", coerce_strings=True)
max = _create_nan_agg_method("max", coerce_strings=True)
min = _create_nan_agg_method("min", coerce_strings=True)
sum = _create_nan_agg_method("sum")
sum.numeric_only = True
sum.available_min_count = True
std = _create_nan_agg_method("std")
std.numeric_only = True
var = _create_nan_agg_method("var")
var.numeric_only = True
median = _create_nan_agg_method("median", dask_module=dask_array_compat)
median.numeric_only = True
prod = _create_nan_agg_method("prod")
prod.numeric_only = True
prod.available_min_count = True
cumprod_1d = _create_nan_agg_method("cumprod")
cumprod_1d.numeric_only = True
cumsum_1d = _create_nan_agg_method("cumsum")
cumsum_1d.numeric_only = True
unravel_index = _dask_or_eager_func("unravel_index")
_mean = _create_nan_agg_method("mean")
def _datetime_nanmin(array):
"""nanmin() function for datetime64.
Caveats that this function deals with:
- In numpy < 1.18, min() on datetime64 incorrectly ignores NaT
- numpy nanmin() don't work on datetime64 (all versions at the moment of writing)
- dask min() does not work on datetime64 (all versions at the moment of writing)
"""
assert array.dtype.kind in "mM"
dtype = array.dtype
# (NaT).astype(float) does not produce NaN...
array = where(pandas_isnull(array), np.nan, array.astype(float))
array = min(array, skipna=True)
if isinstance(array, float):
array = np.array(array)
# ...but (NaN).astype("M8") does produce NaT
return array.astype(dtype)
def datetime_to_numeric(array, offset=None, datetime_unit=None, dtype=float):
"""Convert an array containing datetime-like data to numerical values.
Convert the datetime array to a timedelta relative to an offset.
Parameters
----------
da : array-like
Input data
offset: None, datetime or cftime.datetime
Datetime offset. If None, this is set by default to the array's minimum
value to reduce round off errors.
datetime_unit: {None, Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as}
If not None, convert output to a given datetime unit. Note that some
conversions are not allowed due to non-linear relationships between units.
dtype: dtype
Output dtype.
Returns
-------
array
Numerical representation of datetime object relative to an offset.
Notes
-----
Some datetime unit conversions won't work, for example from days to years, even
though some calendars would allow for them (e.g. no_leap). This is because there
is no `cftime.timedelta` object.
"""
# TODO: make this function dask-compatible?
# Set offset to minimum if not given
if offset is None:
if array.dtype.kind in "Mm":
offset = _datetime_nanmin(array)
else:
offset = min(array)
# Compute timedelta object.
# For np.datetime64, this can silently yield garbage due to overflow.
# One option is to enforce 1970-01-01 as the universal offset.
array = array - offset
# Scalar is converted to 0d-array
if not hasattr(array, "dtype"):
array = np.array(array)
# Convert timedelta objects to float by first converting to microseconds.
if array.dtype.kind in "O":
return py_timedelta_to_float(array, datetime_unit or "ns").astype(dtype)
# Convert np.NaT to np.nan
elif array.dtype.kind in "mM":
# Convert to specified timedelta units.
if datetime_unit:
array = array / np.timedelta64(1, datetime_unit)
return np.where(isnull(array), np.nan, array.astype(dtype))
def timedelta_to_numeric(value, datetime_unit="ns", dtype=float):
"""Convert a timedelta-like object to numerical values.
Parameters
----------
value : datetime.timedelta, numpy.timedelta64, pandas.Timedelta, str
Time delta representation.
datetime_unit : {Y, M, W, D, h, m, s, ms, us, ns, ps, fs, as}
The time units of the output values. Note that some conversions are not allowed due to
non-linear relationships between units.
dtype : type
The output data type.
"""
import datetime as dt
if isinstance(value, dt.timedelta):
out = py_timedelta_to_float(value, datetime_unit)
elif isinstance(value, np.timedelta64):
out = np_timedelta64_to_float(value, datetime_unit)
elif isinstance(value, pd.Timedelta):
out = pd_timedelta_to_float(value, datetime_unit)
elif isinstance(value, str):
try:
a = pd.to_timedelta(value)
except ValueError:
raise ValueError(
f"Could not convert {value!r} to timedelta64 using pandas.to_timedelta"
)
return py_timedelta_to_float(a, datetime_unit)
else:
raise TypeError(
f"Expected value of type str, pandas.Timedelta, datetime.timedelta "
f"or numpy.timedelta64, but received {type(value).__name__}"
)
return out.astype(dtype)
def _to_pytimedelta(array, unit="us"):
return array.astype(f"timedelta64[{unit}]").astype(datetime.timedelta)
def np_timedelta64_to_float(array, datetime_unit):
"""Convert numpy.timedelta64 to float.
Notes
-----
The array is first converted to microseconds, which is less likely to
cause overflow errors.
"""
array = array.astype("timedelta64[ns]").astype(np.float64)
conversion_factor = np.timedelta64(1, "ns") / np.timedelta64(1, datetime_unit)
return conversion_factor * array
def pd_timedelta_to_float(value, datetime_unit):
"""Convert pandas.Timedelta to float.
Notes
-----
Built on the assumption that pandas timedelta values are in nanoseconds,
which is also the numpy default resolution.
"""
value = value.to_timedelta64()
return np_timedelta64_to_float(value, datetime_unit)
def py_timedelta_to_float(array, datetime_unit):
"""Convert a timedelta object to a float, possibly at a loss of resolution."""
array = np.asarray(array)
array = np.reshape([a.total_seconds() for a in array.ravel()], array.shape) * 1e6
conversion_factor = np.timedelta64(1, "us") / np.timedelta64(1, datetime_unit)
return conversion_factor * array
def mean(array, axis=None, skipna=None, **kwargs):
"""inhouse mean that can handle np.datetime64 or cftime.datetime
dtypes"""
from .common import _contains_cftime_datetimes
array = asarray(array)
if array.dtype.kind in "Mm":
offset = _datetime_nanmin(array)
# xarray always uses np.datetime64[ns] for np.datetime64 data
dtype = "timedelta64[ns]"
return (
_mean(
datetime_to_numeric(array, offset), axis=axis, skipna=skipna, **kwargs
).astype(dtype)
+ offset
)
elif _contains_cftime_datetimes(array):
if is_duck_dask_array(array):
raise NotImplementedError(
"Computing the mean of an array containing "
"cftime.datetime objects is not yet implemented on "
"dask arrays."
)
offset = min(array)
timedeltas = datetime_to_numeric(array, offset, datetime_unit="us")
mean_timedeltas = _mean(timedeltas, axis=axis, skipna=skipna, **kwargs)
return _to_pytimedelta(mean_timedeltas, unit="us") + offset
else:
return _mean(array, axis=axis, skipna=skipna, **kwargs)
mean.numeric_only = True # type: ignore
def _nd_cum_func(cum_func, array, axis, **kwargs):
array = asarray(array)
if axis is None:
axis = tuple(range(array.ndim))
if isinstance(axis, int):
axis = (axis,)
out = array
for ax in axis:
out = cum_func(out, axis=ax, **kwargs)
return out
def cumprod(array, axis=None, **kwargs):
"""N-dimensional version of cumprod."""
return _nd_cum_func(cumprod_1d, array, axis, **kwargs)
def cumsum(array, axis=None, **kwargs):
"""N-dimensional version of cumsum."""
return _nd_cum_func(cumsum_1d, array, axis, **kwargs)
_fail_on_dask_array_input_skipna = partial(
fail_on_dask_array_input,
msg="%r with skipna=True is not yet implemented on dask arrays",
)
def first(values, axis, skipna=None):
"""Return the first non-NA elements in this array along the given axis"""
if (skipna or skipna is None) and values.dtype.kind not in "iSU":
# only bother for dtypes that can hold NaN
_fail_on_dask_array_input_skipna(values)
return nanfirst(values, axis)
return take(values, 0, axis=axis)
def last(values, axis, skipna=None):
"""Return the last non-NA elements in this array along the given axis"""
if (skipna or skipna is None) and values.dtype.kind not in "iSU":
# only bother for dtypes that can hold NaN
_fail_on_dask_array_input_skipna(values)
return nanlast(values, axis)
return take(values, -1, axis=axis)
def rolling_window(array, axis, window, center, fill_value):
"""
Make an ndarray with a rolling window of axis-th dimension.
The rolling dimension will be placed at the last dimension.
"""
if is_duck_dask_array(array):
return dask_array_ops.rolling_window(array, axis, window, center, fill_value)
else: # np.ndarray
return nputils.rolling_window(array, axis, window, center, fill_value)
def least_squares(lhs, rhs, rcond=None, skipna=False):
"""Return the coefficients and residuals of a least-squares fit."""
if is_duck_dask_array(rhs):
return dask_array_ops.least_squares(lhs, rhs, rcond=rcond, skipna=skipna)
else:
return nputils.least_squares(lhs, rhs, rcond=rcond, skipna=skipna)
|
from __future__ import unicode_literals
from rules.BaseTrick import strnumjoin
def SNrule(strs, nums):
for s in strs:
for n in nums:
for _ in strnumjoin(s, n):
yield _
|
import discord
import re
from datetime import timezone
from typing import Union, Set, Literal
from redbot.core import checks, Config, modlog, commands
from redbot.core.bot import Red
from redbot.core.i18n import Translator, cog_i18n, set_contextual_locales_from_guild
from redbot.core.utils import AsyncIter
from redbot.core.utils.chat_formatting import pagify, humanize_list
_ = Translator("Filter", __file__)
@cog_i18n(_)
class Filter(commands.Cog):
"""Filter unwanted words and phrases from text channels."""
def __init__(self, bot: Red):
super().__init__()
self.bot = bot
self.config = Config.get_conf(self, 4766951341)
default_guild_settings = {
"filter": [],
"filterban_count": 0,
"filterban_time": 0,
"filter_names": False,
"filter_default_name": "John Doe",
}
default_member_settings = {"filter_count": 0, "next_reset_time": 0}
default_channel_settings = {"filter": []}
self.config.register_guild(**default_guild_settings)
self.config.register_member(**default_member_settings)
self.config.register_channel(**default_channel_settings)
self.register_task = self.bot.loop.create_task(self.register_filterban())
self.pattern_cache = {}
async def red_delete_data_for_user(
self,
*,
requester: Literal["discord_deleted_user", "owner", "user", "user_strict"],
user_id: int,
):
if requester != "discord_deleted_user":
return
all_members = await self.config.all_members()
async for guild_id, guild_data in AsyncIter(all_members.items(), steps=100):
if user_id in guild_data:
await self.config.member_from_ids(guild_id, user_id).clear()
def cog_unload(self):
self.register_task.cancel()
@staticmethod
async def register_filterban():
try:
await modlog.register_casetype(
"filterban", False, ":filing_cabinet: :hammer:", "Filter ban"
)
except RuntimeError:
pass
@commands.group()
@commands.guild_only()
@checks.admin_or_permissions(manage_guild=True)
async def filterset(self, ctx: commands.Context):
"""Manage filter settings."""
pass
@filterset.command(name="defaultname")
async def filter_default_name(self, ctx: commands.Context, name: str):
"""Set the nickname for users with a filtered name.
Note that this has no effect if filtering names is disabled
(to toggle, run `[p]filter names`).
The default name used is *John Doe*.
"""
guild = ctx.guild
await self.config.guild(guild).filter_default_name.set(name)
await ctx.send(_("The name to use on filtered names has been set."))
@filterset.command(name="ban")
async def filter_ban(self, ctx: commands.Context, count: int, timeframe: int):
"""Set the filter's autoban conditions.
Users will be banned if they send `<count>` filtered words in
`<timeframe>` seconds.
Set both to zero to disable autoban.
"""
if (count <= 0) != (timeframe <= 0):
await ctx.send(
_(
"Count and timeframe either both need to be 0 "
"or both need to be greater than 0!"
)
)
return
elif count == 0 and timeframe == 0:
async with self.config.guild(ctx.guild).all() as guild_data:
guild_data["filterban_count"] = 0
guild_data["filterban_time"] = 0
await ctx.send(_("Autoban disabled."))
else:
async with self.config.guild(ctx.guild).all() as guild_data:
guild_data["filterban_count"] = count
guild_data["filterban_time"] = timeframe
await ctx.send(_("Count and time have been set."))
@commands.group(name="filter")
@commands.guild_only()
@checks.mod_or_permissions(manage_messages=True)
async def _filter(self, ctx: commands.Context):
"""Add or remove words from server filter.
Use double quotes to add or remove sentences.
"""
pass
@_filter.command(name="list")
async def _global_list(self, ctx: commands.Context):
"""Send a list of this servers filtered words."""
server = ctx.guild
author = ctx.author
word_list = await self.config.guild(server).filter()
if not word_list:
await ctx.send(_("There is no current words setup to be filtered in this server."))
return
words = humanize_list(word_list)
words = _("Filtered in this server:") + "\n\n" + words
try:
for page in pagify(words, delims=[" ", "\n"], shorten_by=8):
await author.send(page)
except discord.Forbidden:
await ctx.send(_("I can't send direct messages to you."))
@_filter.group(name="channel")
async def _filter_channel(self, ctx: commands.Context):
"""Add or remove words from channel filter.
Use double quotes to add or remove sentences.
"""
pass
@_filter_channel.command(name="list")
async def _channel_list(self, ctx: commands.Context):
"""Send the list of the channel's filtered words."""
channel = ctx.channel
author = ctx.author
word_list = await self.config.channel(channel).filter()
if not word_list:
await ctx.send(_("There is no current words setup to be filtered in this channel."))
return
words = humanize_list(word_list)
words = _("Filtered in this channel:") + "\n\n" + words
try:
for page in pagify(words, delims=[" ", "\n"], shorten_by=8):
await author.send(page)
except discord.Forbidden:
await ctx.send(_("I can't send direct messages to you."))
@_filter_channel.command("add")
async def filter_channel_add(self, ctx: commands.Context, *words: str):
"""Add words to the filter.
Use double quotes to add sentences.
Examples:
- `[p]filter channel add word1 word2 word3`
- `[p]filter channel add "This is a sentence"`
"""
channel = ctx.channel
added = await self.add_to_filter(channel, words)
if added:
self.invalidate_cache(ctx.guild, ctx.channel)
await ctx.send(_("Words added to filter."))
else:
await ctx.send(_("Words already in the filter."))
@_filter_channel.command("remove")
async def filter_channel_remove(self, ctx: commands.Context, *words: str):
"""Remove words from the filter.
Use double quotes to remove sentences.
Examples:
- `[p]filter channel remove word1 word2 word3`
- `[p]filter channel remove "This is a sentence"`
"""
channel = ctx.channel
removed = await self.remove_from_filter(channel, words)
if removed:
await ctx.send(_("Words removed from filter."))
self.invalidate_cache(ctx.guild, ctx.channel)
else:
await ctx.send(_("Those words weren't in the filter."))
@_filter.command(name="add")
async def filter_add(self, ctx: commands.Context, *words: str):
"""Add words to the filter.
Use double quotes to add sentences.
Examples:
- `[p]filter add word1 word2 word3`
- `[p]filter add "This is a sentence"`
"""
server = ctx.guild
added = await self.add_to_filter(server, words)
if added:
self.invalidate_cache(ctx.guild)
await ctx.send(_("Words successfully added to filter."))
else:
await ctx.send(_("Those words were already in the filter."))
@_filter.command(name="delete", aliases=["remove", "del"])
async def filter_remove(self, ctx: commands.Context, *words: str):
"""Remove words from the filter.
Use double quotes to remove sentences.
Examples:
- `[p]filter remove word1 word2 word3`
- `[p]filter remove "This is a sentence"`
"""
server = ctx.guild
removed = await self.remove_from_filter(server, words)
if removed:
self.invalidate_cache(ctx.guild)
await ctx.send(_("Words successfully removed from filter."))
else:
await ctx.send(_("Those words weren't in the filter."))
@_filter.command(name="names")
async def filter_names(self, ctx: commands.Context):
"""Toggle name and nickname filtering.
This is disabled by default.
"""
guild = ctx.guild
async with self.config.guild(guild).all() as guild_data:
current_setting = guild_data["filter_names"]
guild_data["filter_names"] = not current_setting
if current_setting:
await ctx.send(_("Names and nicknames will no longer be filtered."))
else:
await ctx.send(_("Names and nicknames will now be filtered."))
def invalidate_cache(self, guild: discord.Guild, channel: discord.TextChannel = None):
""" Invalidate a cached pattern"""
self.pattern_cache.pop((guild, channel), None)
if channel is None:
for keyset in list(self.pattern_cache.keys()): # cast needed, no remove
if guild in keyset:
self.pattern_cache.pop(keyset, None)
async def add_to_filter(
self, server_or_channel: Union[discord.Guild, discord.TextChannel], words: list
) -> bool:
added = False
if isinstance(server_or_channel, discord.Guild):
async with self.config.guild(server_or_channel).filter() as cur_list:
for w in words:
if w.lower() not in cur_list and w:
cur_list.append(w.lower())
added = True
elif isinstance(server_or_channel, discord.TextChannel):
async with self.config.channel(server_or_channel).filter() as cur_list:
for w in words:
if w.lower() not in cur_list and w:
cur_list.append(w.lower())
added = True
return added
async def remove_from_filter(
self, server_or_channel: Union[discord.Guild, discord.TextChannel], words: list
) -> bool:
removed = False
if isinstance(server_or_channel, discord.Guild):
async with self.config.guild(server_or_channel).filter() as cur_list:
for w in words:
if w.lower() in cur_list:
cur_list.remove(w.lower())
removed = True
elif isinstance(server_or_channel, discord.TextChannel):
async with self.config.channel(server_or_channel).filter() as cur_list:
for w in words:
if w.lower() in cur_list:
cur_list.remove(w.lower())
removed = True
return removed
async def filter_hits(
self, text: str, server_or_channel: Union[discord.Guild, discord.TextChannel]
) -> Set[str]:
try:
guild = server_or_channel.guild
channel = server_or_channel
except AttributeError:
guild = server_or_channel
channel = None
hits: Set[str] = set()
try:
pattern = self.pattern_cache[(guild, channel)]
except KeyError:
word_list = set(await self.config.guild(guild).filter())
if channel:
word_list |= set(await self.config.channel(channel).filter())
if word_list:
pattern = re.compile(
"|".join(rf"\b{re.escape(w)}\b" for w in word_list), flags=re.I
)
else:
pattern = None
self.pattern_cache[(guild, channel)] = pattern
if pattern:
hits |= set(pattern.findall(text))
return hits
async def check_filter(self, message: discord.Message):
guild = message.guild
author = message.author
guild_data = await self.config.guild(guild).all()
member_data = await self.config.member(author).all()
filter_count = guild_data["filterban_count"]
filter_time = guild_data["filterban_time"]
user_count = member_data["filter_count"]
next_reset_time = member_data["next_reset_time"]
created_at = message.created_at.replace(tzinfo=timezone.utc)
if filter_count > 0 and filter_time > 0:
if created_at.timestamp() >= next_reset_time:
next_reset_time = created_at.timestamp() + filter_time
async with self.config.member(author).all() as member_data:
member_data["next_reset_time"] = next_reset_time
if user_count > 0:
user_count = 0
member_data["filter_count"] = user_count
hits = await self.filter_hits(message.content, message.channel)
if hits:
try:
await message.delete()
except discord.HTTPException:
pass
else:
self.bot.dispatch("filter_message_delete", message, hits)
if filter_count > 0 and filter_time > 0:
user_count += 1
await self.config.member(author).filter_count.set(user_count)
if user_count >= filter_count and created_at.timestamp() < next_reset_time:
reason = _("Autoban (too many filtered messages.)")
try:
await guild.ban(author, reason=reason)
except discord.HTTPException:
pass
else:
await modlog.create_case(
self.bot,
guild,
message.created_at.replace(tzinfo=timezone.utc),
"filterban",
author,
guild.me,
reason,
)
@commands.Cog.listener()
async def on_message(self, message: discord.Message):
if isinstance(message.channel, discord.abc.PrivateChannel):
return
if await self.bot.cog_disabled_in_guild(self, message.guild):
return
author = message.author
valid_user = isinstance(author, discord.Member) and not author.bot
if not valid_user:
return
if await self.bot.is_automod_immune(message):
return
await set_contextual_locales_from_guild(self.bot, message.guild)
await self.check_filter(message)
@commands.Cog.listener()
async def on_message_edit(self, _prior, message):
# message content has to change for non-bot's currently.
# if this changes, we should compare before passing it.
await self.on_message(message)
@commands.Cog.listener()
async def on_member_update(self, before: discord.Member, after: discord.Member):
if before.display_name != after.display_name:
await self.maybe_filter_name(after)
@commands.Cog.listener()
async def on_member_join(self, member: discord.Member):
await self.maybe_filter_name(member)
async def maybe_filter_name(self, member: discord.Member):
guild = member.guild
if (not guild) or await self.bot.cog_disabled_in_guild(self, guild):
return
if not member.guild.me.guild_permissions.manage_nicknames:
return # No permissions to manage nicknames, so can't do anything
if member.top_role >= member.guild.me.top_role:
return # Discord Hierarchy applies to nicks
if await self.bot.is_automod_immune(member):
return
guild_data = await self.config.guild(member.guild).all()
if not guild_data["filter_names"]:
return
await set_contextual_locales_from_guild(self.bot, guild)
if await self.filter_hits(member.display_name, member.guild):
name_to_use = guild_data["filter_default_name"]
reason = _("Filtered nickname") if member.nick else _("Filtered name")
try:
await member.edit(nick=name_to_use, reason=reason)
except discord.HTTPException:
pass
return
|
import abc
import os
from molecule import status
Status = status.get_status()
class Base(object):
__metaclass__ = abc.ABCMeta
def __init__(self, config):
"""
Base initializer for all :ref:`Driver` classes.
:param config: An instance of a Molecule config.
:returns: None
"""
self._config = config
@property
@abc.abstractmethod
def name(self): # pragma: no cover
"""
Name of the driver and returns a string.
:returns: str
"""
pass
@name.setter
@abc.abstractmethod
def name(self, value): # pragma: no cover
"""
Driver name setter and returns None.
:returns: None
"""
pass
@property
def testinfra_options(self):
"""
Testinfra specific options and returns a dict.
:returns: dict
"""
return {
'connection': 'ansible',
'ansible-inventory': self._config.provisioner.inventory_file
}
@abc.abstractproperty
def login_cmd_template(self): # pragma: no cover
"""
The login command template to be populated by ``login_options`` and
returns a string.
:returns: str
"""
pass
@abc.abstractproperty
def default_ssh_connection_options(self): # pragma: no cover
"""
SSH client options and returns a list.
:returns: list
"""
pass
@abc.abstractproperty
def default_safe_files(self): # pragma: no cover
"""
Generated files to be preserved and returns a list.
:returns: list
"""
pass
@abc.abstractmethod
def login_options(self, instance_name): # pragma: no cover
"""
Options used in the login command and returns a dict.
:param instance_name: A string containing the instance to login to.
:returns: dict
"""
pass
@abc.abstractmethod
def ansible_connection_options(self, instance_name): # pragma: no cover
"""
Ansible specific connection options supplied to inventory and returns a
dict.
:param instance_name: A string containing the instance to login to.
:returns: dict
"""
pass
@abc.abstractmethod
def sanity_checks(self):
"""
Sanity checks to ensure the driver can do work successfully. For
example, when using the Docker driver, we want to know that the Docker
daemon is running and we have the correct Docker Python dependency.
Each driver implementation can decide what is the most stable sanity
check for itself.
:returns: None
"""
pass
@property
def options(self):
return self._config.config['driver']['options']
@property
def instance_config(self):
return os.path.join(self._config.scenario.ephemeral_directory,
'instance_config.yml')
@property
def ssh_connection_options(self):
if self._config.config['driver']['ssh_connection_options']:
return self._config.config['driver']['ssh_connection_options']
return self.default_ssh_connection_options
@property
def safe_files(self):
return (self.default_safe_files +
self._config.config['driver']['safe_files'])
@property
def delegated(self):
"""
Is the driver delegated and returns a bool.
:returns: bool
"""
return self.name == 'delegated'
@property
def managed(self):
"""
Is the driver is managed and returns a bool.
:returns: bool
"""
return self.options['managed']
def status(self):
"""
Collects the instances state and returns a list.
.. important::
Molecule assumes all instances were created successfully by
Ansible, otherwise Ansible would return an error on create. This
may prove to be a bad assumption. However, configuring Molecule's
driver to match the options passed to the playbook may prove
difficult. Especially in cases where the user is provisioning
instances off localhost.
:returns: list
"""
status_list = []
for platform in self._config.platforms.instances:
instance_name = platform['name']
driver_name = self.name
provisioner_name = self._config.provisioner.name
scenario_name = self._config.scenario.name
status_list.append(
Status(
instance_name=instance_name,
driver_name=driver_name,
provisioner_name=provisioner_name,
scenario_name=scenario_name,
created=self._created(),
converged=self._converged(),
))
return status_list
def _get_ssh_connection_options(self):
return [
'-o UserKnownHostsFile=/dev/null',
'-o ControlMaster=auto',
'-o ControlPersist=60s',
'-o IdentitiesOnly=yes',
'-o StrictHostKeyChecking=no',
]
def _created(self):
return str(self._config.state.created).lower()
def _converged(self):
return str(self._config.state.converged).lower()
|
import os
from babelfish import Language
import pytest
from vcr import VCR
from subliminal.providers.podnapisi import PodnapisiProvider, PodnapisiSubtitle
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
cassette_library_dir=os.path.realpath(os.path.join('tests', 'cassettes', 'podnapisi')))
def test_get_matches_movie(movies):
subtitle_releases = [
'Man.Of.Steel.2013.720p.BRRip.x264.AAC-ViSiON', 'Man.Of.Steel.2013.720p.BluRay.x264-Felony',
'Man.Of.Steel.2013.1080p.BluRay.x264-SECTOR7', 'Man.Of.Steel.2013.720p.BRRip.x264.AC3-UNDERCOVER',
'Man.Of.Steel.2013.BDRip.XviD.MP3-RARBG', 'Man.Of.Steel.(2013).BDRip.600MB.Ganool',
'Man.of.Steel.2013.BDRip.x264.700MB-Micromkv', 'Man.Of.Steel.2013.BRRip.AAC.x264-SSDD',
'Man.Of.Steel.2013.BDRip.x264-Larceny', 'Man.Of.Steel.2013.BDRiP.XViD-NoGRP',
'Man.Of.Steel.2013.720p.BRRip.x264.AC3-EVO', 'Man.of.Steel.2013.720p.BRRip.h264.AAC-RARBG',
'Man.Of.Steel.[2013].BRRip.XviD-ETRG', 'Man.of.Steel.[2013].BRRip.XViD.[AC3]-ETRG',
'Man.Of.Steel.2013.BRRiP.XVID.AC3-MAJESTIC', 'Man.of.steel.2013.BRRip.XviD.AC3-RARBG',
'Man.Of.Steel.2013.720p.BRRip.x264.AC3-SUPERM4N', 'Man.Of.Steel.2013.720p.BRRip.XviD.AC3-ViSiON',
'Man.Of.Steel.2013.720p.BRRip.x264.AC3-JYK', 'Man.of.Steel.[2013].DVDRIP.DIVX.[Eng]-DUQA',
'Man.of.Steel.2013.1080p.BluRay.x264.YIFY'
]
subtitle = PodnapisiSubtitle(Language('eng'), True, None, 'EMgo', subtitle_releases, 'Man of Steel', None, None,
2013)
matches = subtitle.get_matches(movies['man_of_steel'])
assert matches == {'title', 'year', 'country', 'video_codec', 'resolution', 'source', 'release_group'}
def test_get_matches_episode(episodes):
subtitle_releases = [
'The.Big.Bang.Theory.S07E05.HDTV.x264-LOL', 'The.Big.Bang.Theory.S07E05.720p.HDTV.x264-DIMENSION',
'The.Big.Bang.Theory.S07E05.480p.HDTV.x264-mSD', 'The.Big.Bang.Theory.S07E05.HDTV.XviD-AFG'
]
subtitle = PodnapisiSubtitle(Language('eng'), False, None, 'EdQo', subtitle_releases, 'The Big Bang Theory', 7, 5,
2007)
matches = subtitle.get_matches(episodes['bbt_s07e05'])
assert matches == {'series', 'season', 'episode', 'video_codec', 'resolution', 'source', 'release_group', 'year',
'country'}
def test_get_matches_episode_year(episodes):
subtitle_releases = ['Dallas.2012.S01E03.HDTV.x264-LOL']
subtitle = PodnapisiSubtitle(Language('eng'), True, None, '-5oa', subtitle_releases, 'Dallas', 1, 3, 2012)
matches = subtitle.get_matches(episodes['dallas_2012_s01e03'])
assert matches == {'series', 'year', 'season', 'episode'}
def test_get_matches_no_match(episodes):
subtitle_releases = ['The.Big.Bang.Theory.S07E05.1080p.HDTV.DIMENSION']
subtitle = PodnapisiSubtitle(Language('eng'), False, None, 'EdQo', subtitle_releases, 'The Big Bang Theory', 7, 5,
2007)
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == {'year', 'country'}
@pytest.mark.integration
@vcr.use_cassette
def test_query_movie(movies):
video = movies['man_of_steel']
language = Language('eng')
expected_subtitles = {'Nv0l', 'EMgo', '8RIm', 'whQm', 'aoYm', 'WMgp', 'Tsko', 'uYcm', 'XnUm', 'NLUo', 'ZmIm',
'MOko'}
with PodnapisiProvider() as provider:
subtitles = provider.query(language, video.title, year=video.year)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == {language}
@pytest.mark.integration
@vcr.use_cassette
def test_query_episode(episodes):
video = episodes['bbt_s07e05']
language = Language('eng')
expected_subtitles = {'EdQo', '2581', 'w581', 'ftUo', 'WNMo'}
with PodnapisiProvider() as provider:
subtitles = provider.query(language, video.series, season=video.season, episode=video.episode,
year=video.year)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == {language}
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_movie(movies):
video = movies['man_of_steel']
languages = {Language('eng'), Language('fra')}
expected_subtitles = {'Tsko', 'Nv0l', 'XnUm', 'EMgo', 'ZmIm', 'whQm', 'MOko', 'aoYm', 'WMgp', 'd_Im', 'GMso',
'8RIm', 'NLUo', 'uYcm'}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode(episodes):
video = episodes['got_s03e10']
languages = {Language('eng'), Language('fra')}
expected_subtitles = {'8cMl', '6MMl', 'jcYl', 'am0s', 'msYl', '7sMl', 'k8Yl', '8BM5', 'Eaom', 'z8Ml', 'lMYl',
'78Ml', '0MMl', 'a1I8'}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(movies):
video = movies['man_of_steel']
languages = {Language('eng'), Language('fra')}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
subtitle = [s for s in subtitles if s.pid == 'GMso'][0]
provider.download_subtitle(subtitle)
assert subtitle.content is not None
assert subtitle.is_valid() is True
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles_episode_alternative_series(episodes):
video = episodes['marvels_jessica_jones_s01e13']
languages = {Language('eng')}
expected_subtitles = {'JPY-', 'BURB', 'm_c-', 'wFFC', 'tVFC', 'wlFC',
'iZk-', 'w_g-', 'CJw-', 'v5c-', 's1FC', 'u5c-'}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_subtitles_with_title_unicode(movies):
video = movies['café_society']
languages = {Language('fra')}
expected_subtitles = {'iOlD', 'iulD', '2o5B', 'ielD'}
with PodnapisiProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
wanted_subtitle = [s for s in subtitles if s.pid == 'iOlD'][0]
matches = wanted_subtitle.get_matches(movies['café_society'])
provider.download_subtitle(wanted_subtitle)
assert {subtitle.pid for subtitle in subtitles} == expected_subtitles
assert {subtitle.language for subtitle in subtitles} == languages
assert matches == {'title', 'year', 'country'}
assert wanted_subtitle.content is not None
assert wanted_subtitle.is_valid() is True
|
from datetime import timedelta
import logging
from fastdotcom import fast_com
import voluptuous as vol
from homeassistant.const import CONF_SCAN_INTERVAL
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import async_track_time_interval
DOMAIN = "fastdotcom"
DATA_UPDATED = f"{DOMAIN}_data_updated"
_LOGGER = logging.getLogger(__name__)
CONF_MANUAL = "manual"
DEFAULT_INTERVAL = timedelta(hours=1)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_SCAN_INTERVAL, default=DEFAULT_INTERVAL): vol.All(
cv.time_period, cv.positive_timedelta
),
vol.Optional(CONF_MANUAL, default=False): cv.boolean,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the Fast.com component."""
conf = config[DOMAIN]
data = hass.data[DOMAIN] = SpeedtestData(hass)
if not conf[CONF_MANUAL]:
async_track_time_interval(hass, data.update, conf[CONF_SCAN_INTERVAL])
def update(call=None):
"""Service call to manually update the data."""
data.update()
hass.services.async_register(DOMAIN, "speedtest", update)
hass.async_create_task(async_load_platform(hass, "sensor", DOMAIN, {}, config))
return True
class SpeedtestData:
"""Get the latest data from fast.com."""
def __init__(self, hass):
"""Initialize the data object."""
self.data = None
self._hass = hass
def update(self, now=None):
"""Get the latest data from fast.com."""
_LOGGER.debug("Executing fast.com speedtest")
self.data = {"download": fast_com()}
dispatcher_send(self._hass, DATA_UPDATED)
|
import logging
import os
import gntp.errors
import gntp.notifier
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_TITLE,
ATTR_TITLE_DEFAULT,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONF_PASSWORD, CONF_PORT
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_APP_NAME = "app_name"
CONF_APP_ICON = "app_icon"
CONF_HOSTNAME = "hostname"
DEFAULT_APP_NAME = "HomeAssistant"
DEFAULT_HOST = "localhost"
DEFAULT_PORT = 23053
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_APP_NAME, default=DEFAULT_APP_NAME): cv.string,
vol.Optional(CONF_APP_ICON): vol.Url,
vol.Optional(CONF_HOSTNAME, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the GNTP notification service."""
logging.getLogger("gntp").setLevel(logging.ERROR)
if config.get(CONF_APP_ICON) is None:
icon_file = os.path.join(
os.path.dirname(__file__),
"..",
"frontend",
"www_static",
"icons",
"favicon-192x192.png",
)
with open(icon_file, "rb") as file:
app_icon = file.read()
else:
app_icon = config.get(CONF_APP_ICON)
return GNTPNotificationService(
config.get(CONF_APP_NAME),
app_icon,
config.get(CONF_HOSTNAME),
config.get(CONF_PASSWORD),
config.get(CONF_PORT),
)
class GNTPNotificationService(BaseNotificationService):
"""Implement the notification service for GNTP."""
def __init__(self, app_name, app_icon, hostname, password, port):
"""Initialize the service."""
self.gntp = gntp.notifier.GrowlNotifier(
applicationName=app_name,
notifications=["Notification"],
applicationIcon=app_icon,
hostname=hostname,
password=password,
port=port,
)
try:
self.gntp.register()
except gntp.errors.NetworkError:
_LOGGER.error("Unable to register with the GNTP host")
return
def send_message(self, message="", **kwargs):
"""Send a message to a user."""
self.gntp.notify(
noteType="Notification",
title=kwargs.get(ATTR_TITLE, ATTR_TITLE_DEFAULT),
description=message,
)
|
from rest_framework import serializers
from rest_framework.fields import empty
from shop.models.cart import CartModel
from shop.rest.money import MoneyField
from shop.serializers.bases import AvailabilitySerializer
class AddToCartSerializer(serializers.Serializer):
"""
By default, this serializer is used by the view class :class:`shop.views.catalog.AddToCartView`,
which handles the communication from the "Add to Cart" dialog box.
If a product has variations, which influence the fields in the "Add to Cart" dialog box, then
this serializer shall be overridden by a customized implementation. Such a customized "*Add to
Cart*" serializer has to be connected to the ``AddToCartView``. This usually is achieved in
the projects ``urls.py`` by changing the catalog's routing to:
```
urlpatterns = [
...
url(r'^(?P<slug>[\w-]+)/add-to-cart', AddToCartView.as_view(
serializer_class=CustomAddToCartSerializer,
)),
...
]
```
"""
quantity = serializers.IntegerField(default=1, min_value=1)
unit_price = MoneyField(read_only=True)
subtotal = MoneyField(read_only=True)
product = serializers.IntegerField(read_only=True, help_text="The product's primary key")
product_code = serializers.CharField(read_only=True, help_text="Exact product code of the cart item")
extra = serializers.DictField(read_only=True, default={})
is_in_cart = serializers.BooleanField(read_only=True, default=False)
availability = AvailabilitySerializer(read_only=True)
def __init__(self, instance=None, data=empty, **kwargs):
context = kwargs.get('context', {})
if 'product' in context:
instance = self.get_instance(context, data, kwargs)
if data is not empty and 'quantity' in data:
quantity = self.fields['quantity'].to_internal_value(data['quantity'])
else:
quantity = self.fields['quantity'].default
instance.setdefault('quantity', quantity)
super().__init__(instance, data, context=context)
else:
super().__init__(instance, data, **kwargs)
def to_representation(self, instance):
data = super().to_representation(instance)
try:
data['quantity'] = self._validated_data['quantity']
except AttributeError:
data['quantity'] = self.validate_quantity(data['quantity'])
data['subtotal'] = MoneyField().to_representation(data['quantity'] * instance['unit_price'])
return data
def validate_quantity(self, quantity):
"""
Restrict the quantity allowed putting into the cart to the available quantity in stock.
"""
availability = self.instance['availability']
return min(quantity, availability.quantity)
def get_instance(self, context, data, extra_args):
"""
Method to store the ordered products in the cart item instance.
Remember to override this method, if the ``product_code`` is part of the
variation rather than being part of the product itself.
"""
product = context['product']
request = context['request']
try:
cart = CartModel.objects.get_from_request(request)
except CartModel.DoesNotExist:
cart = None
extra = data.get('extra', {}) if data is not empty else {}
return {
'product': product.id,
'product_code': product.product_code,
'unit_price': product.get_price(request),
'is_in_cart': bool(product.is_in_cart(cart)),
'extra': extra,
'availability': product.get_availability(request, **extra),
}
|
import importlib
from homeassistant.const import CONF_PLATFORM
def _get_trigger_platform(config):
return importlib.import_module(f"..triggers.{config[CONF_PLATFORM]}", __name__)
async def async_validate_trigger_config(hass, config):
"""Validate config."""
platform = _get_trigger_platform(config)
if hasattr(platform, "async_validate_trigger_config"):
return await getattr(platform, "async_validate_trigger_config")(hass, config)
return platform.TRIGGER_SCHEMA(config)
async def async_attach_trigger(hass, config, action, automation_info):
"""Attach trigger of specified platform."""
platform = _get_trigger_platform(config)
return await platform.async_attach_trigger(hass, config, action, automation_info)
|
import re
from sphinx import addnodes
from sphinx.domains.std import Cmdoption
# RE for option descriptions without a '--' prefix
simple_option_desc_re = re.compile(r"([-_a-zA-Z0-9]+)(\s*.*?)(?=,\s+(?:/|-|--)|$)")
def setup(app):
app.add_crossref_type(
directivename="setting", rolename="setting", indextemplate="pair: %s; setting"
)
app.add_object_type(
directivename="django-admin",
rolename="djadmin",
indextemplate="pair: %s; weblate admin command",
parse_node=parse_django_admin_node,
)
app.add_directive("django-admin-option", Cmdoption)
def parse_django_admin_node(env, sig, signode):
command = sig.split(" ")[0]
env.ref_context["std:program"] = command
title = f"weblate {sig}"
signode += addnodes.desc_name(title, title)
return command
|
import re
from .utils import Str, classify, get_regexp_width, Py36, Serialize, suppress
from .exceptions import UnexpectedCharacters, LexError, UnexpectedToken
###{standalone
from copy import copy
class Pattern(Serialize):
raw = None
def __init__(self, value, flags=(), raw=None):
self.value = value
self.flags = frozenset(flags)
self.raw = raw
def __repr__(self):
return repr(self.to_regexp())
# Pattern Hashing assumes all subclasses have a different priority!
def __hash__(self):
return hash((type(self), self.value, self.flags))
def __eq__(self, other):
return type(self) == type(other) and self.value == other.value and self.flags == other.flags
def to_regexp(self):
raise NotImplementedError()
if Py36:
# Python 3.6 changed syntax for flags in regular expression
def _get_flags(self, value):
for f in self.flags:
value = ('(?%s:%s)' % (f, value))
return value
else:
def _get_flags(self, value):
for f in self.flags:
value = ('(?%s)' % f) + value
return value
class PatternStr(Pattern):
__serialize_fields__ = 'value', 'flags'
type = "str"
def to_regexp(self):
return self._get_flags(re.escape(self.value))
@property
def min_width(self):
return len(self.value)
max_width = min_width
class PatternRE(Pattern):
__serialize_fields__ = 'value', 'flags', '_width'
type = "re"
def to_regexp(self):
return self._get_flags(self.value)
_width = None
def _get_width(self):
if self._width is None:
self._width = get_regexp_width(self.to_regexp())
return self._width
@property
def min_width(self):
return self._get_width()[0]
@property
def max_width(self):
return self._get_width()[1]
class TerminalDef(Serialize):
__serialize_fields__ = 'name', 'pattern', 'priority'
__serialize_namespace__ = PatternStr, PatternRE
def __init__(self, name, pattern, priority=1):
assert isinstance(pattern, Pattern), pattern
self.name = name
self.pattern = pattern
self.priority = priority
def __repr__(self):
return '%s(%r, %r)' % (type(self).__name__, self.name, self.pattern)
def user_repr(self):
if self.name.startswith('__'): # We represent a generated terminal
return self.pattern.raw or self.name
else:
return self.name
class Token(Str):
"""A string with meta-information, that is produced by the lexer.
When parsing text, the resulting chunks of the input that haven't been discarded,
will end up in the tree as Token instances. The Token class inherits from Python's ``str``,
so normal string comparisons and operations will work as expected.
Attributes:
type: Name of the token (as specified in grammar)
value: Value of the token (redundant, as ``token.value == token`` will always be true)
pos_in_stream: The index of the token in the text
line: The line of the token in the text (starting with 1)
column: The column of the token in the text (starting with 1)
end_line: The line where the token ends
end_column: The next column after the end of the token. For example,
if the token is a single character with a column value of 4,
end_column will be 5.
end_pos: the index where the token ends (basically ``pos_in_stream + len(token)``)
"""
__slots__ = ('type', 'pos_in_stream', 'value', 'line', 'column', 'end_line', 'end_column', 'end_pos')
def __new__(cls, type_, value, pos_in_stream=None, line=None, column=None, end_line=None, end_column=None, end_pos=None):
try:
self = super(Token, cls).__new__(cls, value)
except UnicodeDecodeError:
value = value.decode('latin1')
self = super(Token, cls).__new__(cls, value)
self.type = type_
self.pos_in_stream = pos_in_stream
self.value = value
self.line = line
self.column = column
self.end_line = end_line
self.end_column = end_column
self.end_pos = end_pos
return self
def update(self, type_=None, value=None):
return Token.new_borrow_pos(
type_ if type_ is not None else self.type,
value if value is not None else self.value,
self
)
@classmethod
def new_borrow_pos(cls, type_, value, borrow_t):
return cls(type_, value, borrow_t.pos_in_stream, borrow_t.line, borrow_t.column, borrow_t.end_line, borrow_t.end_column, borrow_t.end_pos)
def __reduce__(self):
return (self.__class__, (self.type, self.value, self.pos_in_stream, self.line, self.column))
def __repr__(self):
return 'Token(%r, %r)' % (self.type, self.value)
def __deepcopy__(self, memo):
return Token(self.type, self.value, self.pos_in_stream, self.line, self.column)
def __eq__(self, other):
if isinstance(other, Token) and self.type != other.type:
return False
return Str.__eq__(self, other)
__hash__ = Str.__hash__
class LineCounter:
__slots__ = 'char_pos', 'line', 'column', 'line_start_pos', 'newline_char'
def __init__(self, newline_char):
self.newline_char = newline_char
self.char_pos = 0
self.line = 1
self.column = 1
self.line_start_pos = 0
def feed(self, token, test_newline=True):
"""Consume a token and calculate the new line & column.
As an optional optimization, set test_newline=False if token doesn't contain a newline.
"""
if test_newline:
newlines = token.count(self.newline_char)
if newlines:
self.line += newlines
self.line_start_pos = self.char_pos + token.rindex(self.newline_char) + 1
self.char_pos += len(token)
self.column = self.char_pos - self.line_start_pos + 1
class UnlessCallback:
def __init__(self, mres):
self.mres = mres
def __call__(self, t):
for mre, type_from_index in self.mres:
m = mre.match(t.value)
if m:
t.type = type_from_index[m.lastindex]
break
return t
class CallChain:
def __init__(self, callback1, callback2, cond):
self.callback1 = callback1
self.callback2 = callback2
self.cond = cond
def __call__(self, t):
t2 = self.callback1(t)
return self.callback2(t) if self.cond(t2) else t2
def _create_unless(terminals, g_regex_flags, re_, use_bytes):
tokens_by_type = classify(terminals, lambda t: type(t.pattern))
assert len(tokens_by_type) <= 2, tokens_by_type.keys()
embedded_strs = set()
callback = {}
for retok in tokens_by_type.get(PatternRE, []):
unless = []
for strtok in tokens_by_type.get(PatternStr, []):
if strtok.priority > retok.priority:
continue
s = strtok.pattern.value
m = re_.match(retok.pattern.to_regexp(), s, g_regex_flags)
if m and m.group(0) == s:
unless.append(strtok)
if strtok.pattern.flags <= retok.pattern.flags:
embedded_strs.add(strtok)
if unless:
callback[retok.name] = UnlessCallback(build_mres(unless, g_regex_flags, re_, match_whole=True, use_bytes=use_bytes))
terminals = [t for t in terminals if t not in embedded_strs]
return terminals, callback
def _build_mres(terminals, max_size, g_regex_flags, match_whole, re_, use_bytes):
# Python sets an unreasonable group limit (currently 100) in its re module
# Worse, the only way to know we reached it is by catching an AssertionError!
# This function recursively tries less and less groups until it's successful.
postfix = '$' if match_whole else ''
mres = []
while terminals:
pattern = u'|'.join(u'(?P<%s>%s)' % (t.name, t.pattern.to_regexp() + postfix) for t in terminals[:max_size])
if use_bytes:
pattern = pattern.encode('latin-1')
try:
mre = re_.compile(pattern, g_regex_flags)
except AssertionError: # Yes, this is what Python provides us.. :/
return _build_mres(terminals, max_size//2, g_regex_flags, match_whole, re_, use_bytes)
mres.append((mre, {i: n for n, i in mre.groupindex.items()}))
terminals = terminals[max_size:]
return mres
def build_mres(terminals, g_regex_flags, re_, use_bytes, match_whole=False):
return _build_mres(terminals, len(terminals), g_regex_flags, match_whole, re_, use_bytes)
def _regexp_has_newline(r):
r"""Expressions that may indicate newlines in a regexp:
- newlines (\n)
- escaped newline (\\n)
- anything but ([^...])
- any-char (.) when the flag (?s) exists
- spaces (\s)
"""
return '\n' in r or '\\n' in r or '\\s' in r or '[^' in r or ('(?s' in r and '.' in r)
class Lexer(object):
"""Lexer interface
Method Signatures:
lex(self, text) -> Iterator[Token]
"""
lex = NotImplemented
def make_lexer_state(self, text):
line_ctr = LineCounter(b'\n' if isinstance(text, bytes) else '\n')
return LexerState(text, line_ctr)
class TraditionalLexer(Lexer):
def __init__(self, conf):
terminals = list(conf.terminals)
assert all(isinstance(t, TerminalDef) for t in terminals), terminals
self.re = conf.re_module
if not conf.skip_validation:
# Sanitization
for t in terminals:
try:
self.re.compile(t.pattern.to_regexp(), conf.g_regex_flags)
except self.re.error:
raise LexError("Cannot compile token %s: %s" % (t.name, t.pattern))
if t.pattern.min_width == 0:
raise LexError("Lexer does not allow zero-width terminals. (%s: %s)" % (t.name, t.pattern))
if not (set(conf.ignore) <= {t.name for t in terminals}):
raise LexError("Ignore terminals are not defined: %s" % (set(conf.ignore) - {t.name for t in terminals}))
# Init
self.newline_types = frozenset(t.name for t in terminals if _regexp_has_newline(t.pattern.to_regexp()))
self.ignore_types = frozenset(conf.ignore)
terminals.sort(key=lambda x: (-x.priority, -x.pattern.max_width, -len(x.pattern.value), x.name))
self.terminals = terminals
self.user_callbacks = conf.callbacks
self.g_regex_flags = conf.g_regex_flags
self.use_bytes = conf.use_bytes
self.terminals_by_name = conf.terminals_by_name
self._mres = None
def _build(self):
terminals, self.callback = _create_unless(self.terminals, self.g_regex_flags, self.re, self.use_bytes)
assert all(self.callback.values())
for type_, f in self.user_callbacks.items():
if type_ in self.callback:
# Already a callback there, probably UnlessCallback
self.callback[type_] = CallChain(self.callback[type_], f, lambda t: t.type == type_)
else:
self.callback[type_] = f
self._mres = build_mres(terminals, self.g_regex_flags, self.re, self.use_bytes)
@property
def mres(self):
if self._mres is None:
self._build()
return self._mres
def match(self, text, pos):
for mre, type_from_index in self.mres:
m = mre.match(text, pos)
if m:
return m.group(0), type_from_index[m.lastindex]
def lex(self, state, parser_state):
with suppress(EOFError):
while True:
yield self.next_token(state, parser_state)
def next_token(self, lex_state, parser_state=None):
line_ctr = lex_state.line_ctr
while line_ctr.char_pos < len(lex_state.text):
res = self.match(lex_state.text, line_ctr.char_pos)
if not res:
allowed = {v for m, tfi in self.mres for v in tfi.values()} - self.ignore_types
if not allowed:
allowed = {"<END-OF-FILE>"}
raise UnexpectedCharacters(lex_state.text, line_ctr.char_pos, line_ctr.line, line_ctr.column,
allowed=allowed, token_history=lex_state.last_token and [lex_state.last_token],
state=parser_state, terminals_by_name=self.terminals_by_name)
value, type_ = res
if type_ not in self.ignore_types:
t = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
line_ctr.feed(value, type_ in self.newline_types)
t.end_line = line_ctr.line
t.end_column = line_ctr.column
t.end_pos = line_ctr.char_pos
if t.type in self.callback:
t = self.callback[t.type](t)
if not isinstance(t, Token):
raise LexError("Callbacks must return a token (returned %r)" % t)
lex_state.last_token = t
return t
else:
if type_ in self.callback:
t2 = Token(type_, value, line_ctr.char_pos, line_ctr.line, line_ctr.column)
self.callback[type_](t2)
line_ctr.feed(value, type_ in self.newline_types)
# EOF
raise EOFError(self)
class LexerState:
__slots__ = 'text', 'line_ctr', 'last_token'
def __init__(self, text, line_ctr, last_token=None):
self.text = text
self.line_ctr = line_ctr
self.last_token = last_token
def __copy__(self):
return type(self)(self.text, copy(self.line_ctr), self.last_token)
class ContextualLexer(Lexer):
def __init__(self, conf, states, always_accept=()):
terminals = list(conf.terminals)
terminals_by_name = conf.terminals_by_name
trad_conf = copy(conf)
trad_conf.terminals = terminals
lexer_by_tokens = {}
self.lexers = {}
for state, accepts in states.items():
key = frozenset(accepts)
try:
lexer = lexer_by_tokens[key]
except KeyError:
accepts = set(accepts) | set(conf.ignore) | set(always_accept)
lexer_conf = copy(trad_conf)
lexer_conf.terminals = [terminals_by_name[n] for n in accepts if n in terminals_by_name]
lexer = TraditionalLexer(lexer_conf)
lexer_by_tokens[key] = lexer
self.lexers[state] = lexer
assert trad_conf.terminals is terminals
self.root_lexer = TraditionalLexer(trad_conf)
def make_lexer_state(self, text):
return self.root_lexer.make_lexer_state(text)
def lex(self, lexer_state, parser_state):
try:
while True:
lexer = self.lexers[parser_state.position]
yield lexer.next_token(lexer_state, parser_state)
except EOFError:
pass
except UnexpectedCharacters as e:
# In the contextual lexer, UnexpectedCharacters can mean that the terminal is defined, but not in the current context.
# This tests the input against the global context, to provide a nicer error.
try:
last_token = lexer_state.last_token # Save last_token. Calling root_lexer.next_token will change this to the wrong token
token = self.root_lexer.next_token(lexer_state, parser_state)
raise UnexpectedToken(token, e.allowed, state=parser_state, token_history=[last_token], terminals_by_name=self.root_lexer.terminals_by_name)
except UnexpectedCharacters:
raise e # Raise the original UnexpectedCharacters. The root lexer raises it with the wrong expected set.
class LexerThread:
"""A thread that ties a lexer instance and a lexer state, to be used by the parser"""
def __init__(self, lexer, text):
self.lexer = lexer
self.state = lexer.make_lexer_state(text)
def lex(self, parser_state):
return self.lexer.lex(self.state, parser_state)
###}
|
class JuiceNetApi:
"""Represent a connection to JuiceNet."""
def __init__(self, api):
"""Create an object from the provided API instance."""
self.api = api
self._devices = []
async def setup(self):
"""JuiceNet device setup.""" # noqa: D403
self._devices = await self.api.get_devices()
@property
def devices(self) -> list:
"""Get a list of devices managed by this account."""
return self._devices
|
from typing import (
TYPE_CHECKING, Any, Callable, Dict, Iterator, Optional, Set, Tuple, Union)
from PyQt5.QtCore import QRect, QEventLoop
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEngineSettings
from qutebrowser.utils import log, javascript, urlutils, usertypes, utils
from qutebrowser.browser import webelem
if TYPE_CHECKING:
from qutebrowser.browser.webengine import webenginetab
class WebEngineElement(webelem.AbstractWebElement):
"""A web element for QtWebEngine, using JS under the hood."""
def __init__(self, js_dict: Dict[str, Any],
tab: 'webenginetab.WebEngineTab') -> None:
super().__init__(tab)
# Do some sanity checks on the data we get from JS
js_dict_types: Dict[str, Union[type, Tuple[type, ...]]] = {
'id': int,
'text': str,
'value': (str, int, float),
'tag_name': str,
'outer_xml': str,
'class_name': str,
'rects': list,
'attributes': dict,
'is_content_editable': bool,
'caret_position': (int, type(None)),
}
assert set(js_dict.keys()).issubset(js_dict_types.keys())
for name, typ in js_dict_types.items():
if name in js_dict and not isinstance(js_dict[name], typ):
raise TypeError("Got {} for {} from JS but expected {}: "
"{}".format(type(js_dict[name]), name, typ,
js_dict))
for name, value in js_dict['attributes'].items():
if not isinstance(name, str):
raise TypeError("Got {} ({}) for attribute name from JS: "
"{}".format(name, type(name), js_dict))
if not isinstance(value, str):
raise TypeError("Got {} ({}) for attribute {} from JS: "
"{}".format(value, type(value), name, js_dict))
for rect in js_dict['rects']:
assert set(rect.keys()) == {'top', 'right', 'bottom', 'left',
'height', 'width'}, rect.keys()
for value in rect.values():
if not isinstance(value, (int, float)):
raise TypeError("Got {} ({}) for rect from JS: "
"{}".format(value, type(value), js_dict))
self._id = js_dict['id']
self._js_dict = js_dict
def __str__(self) -> str:
return self._js_dict.get('text', '')
def __eq__(self, other: object) -> bool:
if not isinstance(other, WebEngineElement):
return NotImplemented
return self._id == other._id
def __getitem__(self, key: str) -> str:
attrs = self._js_dict['attributes']
return attrs[key]
def __setitem__(self, key: str, val: str) -> None:
self._js_dict['attributes'][key] = val
self._js_call('set_attribute', key, val)
def __delitem__(self, key: str) -> None:
utils.unused(key)
log.stub()
def __iter__(self) -> Iterator[str]:
return iter(self._js_dict['attributes'])
def __len__(self) -> int:
return len(self._js_dict['attributes'])
def _js_call(self, name: str, *args: webelem.JsValueType,
callback: Callable[[Any], None] = None) -> None:
"""Wrapper to run stuff from webelem.js."""
if self._tab.is_deleted():
raise webelem.OrphanedError("Tab containing element vanished")
js_code = javascript.assemble('webelem', name, self._id, *args)
self._tab.run_js_async(js_code, callback=callback)
def has_frame(self) -> bool:
return True
def geometry(self) -> QRect:
log.stub()
return QRect()
def classes(self) -> Set[str]:
"""Get a list of classes assigned to this element."""
return set(self._js_dict['class_name'].split())
def tag_name(self) -> str:
"""Get the tag name of this element.
The returned name will always be lower-case.
"""
tag = self._js_dict['tag_name']
assert isinstance(tag, str), tag
return tag.lower()
def outer_xml(self) -> str:
"""Get the full HTML representation of this element."""
return self._js_dict['outer_xml']
def is_content_editable_prop(self) -> bool:
return self._js_dict['is_content_editable']
def value(self) -> webelem.JsValueType:
return self._js_dict.get('value', None)
def set_value(self, value: webelem.JsValueType) -> None:
self._js_call('set_value', value)
def dispatch_event(self, event: str,
bubbles: bool = False,
cancelable: bool = False,
composed: bool = False) -> None:
self._js_call('dispatch_event', event, bubbles, cancelable, composed)
def caret_position(self) -> Optional[int]:
"""Get the text caret position for the current element.
If the element is not a text element, None is returned.
"""
return self._js_dict.get('caret_position', None)
def insert_text(self, text: str) -> None:
if not self.is_editable(strict=True):
raise webelem.Error("Element is not editable!")
log.webelem.debug("Inserting text into element {!r}".format(self))
self._js_call('insert_text', text)
def rect_on_view(self, *, elem_geometry: QRect = None,
no_js: bool = False) -> QRect:
"""Get the geometry of the element relative to the webview.
Skipping of small rectangles is due to <a> elements containing other
elements with "display:block" style, see
https://github.com/qutebrowser/qutebrowser/issues/1298
Args:
elem_geometry: The geometry of the element, or None.
Ignored with QtWebEngine.
no_js: Fall back to the Python implementation.
Ignored with QtWebEngine.
"""
utils.unused(elem_geometry)
utils.unused(no_js)
rects = self._js_dict['rects']
for rect in rects:
# FIXME:qtwebengine
# width = rect.get("width", 0)
# height = rect.get("height", 0)
width = rect['width']
height = rect['height']
left = rect['left']
top = rect['top']
if width > 1 and height > 1:
# Fix coordinates according to zoom level
# We're not checking for zoom.text_only here as that doesn't
# exist for QtWebEngine.
zoom = self._tab.zoom.factor()
rect = QRect(int(left * zoom), int(top * zoom),
int(width * zoom), int(height * zoom))
# FIXME:qtwebengine
# frame = self._elem.webFrame()
# while frame is not None:
# # Translate to parent frames' position (scroll position
# # is taken care of inside getClientRects)
# rect.translate(frame.geometry().topLeft())
# frame = frame.parentFrame()
return rect
log.webelem.debug("Couldn't find rectangle for {!r} ({})".format(
self, rects))
return QRect()
def remove_blank_target(self) -> None:
if self._js_dict['attributes'].get('target') == '_blank':
self._js_dict['attributes']['target'] = '_top'
self._js_call('remove_blank_target')
def delete(self) -> None:
self._js_call('delete')
def _move_text_cursor(self) -> None:
if self.is_text_input() and self.is_editable():
self._js_call('move_cursor_to_end')
def _requires_user_interaction(self) -> bool:
baseurl = self._tab.url()
url = self.resolve_url(baseurl)
if url is None:
return True
if baseurl.scheme() == url.scheme(): # e.g. a qute:// link
return False
return url.scheme() not in urlutils.WEBENGINE_SCHEMES
def _click_editable(self, click_target: usertypes.ClickTarget) -> None:
self._tab.setFocus() # Needed as WORKAROUND on Qt 5.12
# This actually "clicks" the element by calling focus() on it in JS.
self._js_call('focus')
self._move_text_cursor()
def _click_js(self, _click_target: usertypes.ClickTarget) -> None:
# FIXME:qtwebengine Have a proper API for this
# pylint: disable=protected-access
view = self._tab._widget
assert view is not None
# pylint: enable=protected-access
attribute = QWebEngineSettings.JavascriptCanOpenWindows
could_open_windows = view.settings().testAttribute(attribute)
view.settings().setAttribute(attribute, True)
# Get QtWebEngine do apply the settings
# (it does so with a 0ms QTimer...)
# This is also used in Qt's tests:
# https://github.com/qt/qtwebengine/commit/5e572e88efa7ba7c2b9138ec19e606d3e345ac90
QApplication.processEvents( # type: ignore[call-overload]
QEventLoop.ExcludeSocketNotifiers |
QEventLoop.ExcludeUserInputEvents)
def reset_setting(_arg: Any) -> None:
"""Set the JavascriptCanOpenWindows setting to its old value."""
assert view is not None
try:
view.settings().setAttribute(attribute, could_open_windows)
except RuntimeError:
# Happens if this callback gets called during QWebEnginePage
# destruction, i.e. if the tab was closed in the meantime.
pass
self._js_call('click', callback=reset_setting)
|
import pywink
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_DISARMED,
)
from . import DOMAIN, WinkDevice
STATE_ALARM_PRIVACY = "Private"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Wink platform."""
for camera in pywink.get_cameras():
# get_cameras returns multiple device types.
# Only add those that aren't sensors.
try:
camera.capability()
except AttributeError:
_id = camera.object_id() + camera.name()
if _id not in hass.data[DOMAIN]["unique_ids"]:
add_entities([WinkCameraDevice(camera, hass)])
class WinkCameraDevice(WinkDevice, alarm.AlarmControlPanelEntity):
"""Representation a Wink camera alarm."""
async def async_added_to_hass(self):
"""Call when entity is added to hass."""
self.hass.data[DOMAIN]["entities"]["alarm_control_panel"].append(self)
@property
def state(self):
"""Return the state of the device."""
wink_state = self.wink.state()
if wink_state == "away":
state = STATE_ALARM_ARMED_AWAY
elif wink_state == "home":
state = STATE_ALARM_DISARMED
elif wink_state == "night":
state = STATE_ALARM_ARMED_HOME
else:
state = None
return state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY
def alarm_disarm(self, code=None):
"""Send disarm command."""
self.wink.set_mode("home")
def alarm_arm_home(self, code=None):
"""Send arm home command."""
self.wink.set_mode("night")
def alarm_arm_away(self, code=None):
"""Send arm away command."""
self.wink.set_mode("away")
@property
def device_state_attributes(self):
"""Return the state attributes."""
return {"private": self.wink.private()}
|
from django.test import TestCase
from weblate.auth.models import AutoGroup, Group, User
class AutoGroupTest(TestCase):
@staticmethod
def create_user():
return User.objects.create_user("test1", "[email protected]", "pass")
def test_default(self):
user = self.create_user()
self.assertEqual(user.groups.count(), 2)
def test_none(self):
AutoGroup.objects.all().delete()
user = self.create_user()
self.assertEqual(user.groups.count(), 0)
def test_matching(self):
AutoGroup.objects.create(
match="^.*@weblate.org", group=Group.objects.get(name="Guests")
)
user = self.create_user()
self.assertEqual(user.groups.count(), 3)
def test_nonmatching(self):
AutoGroup.objects.create(
match="^.*@example.net", group=Group.objects.get(name="Guests")
)
user = self.create_user()
self.assertEqual(user.groups.count(), 2)
|
from test import CollectorTestCase
from test import get_collector_config
from mock import patch, Mock
from postgres import PostgresqlCollector
class TestPostgresqlCollector(CollectorTestCase):
def setUp(self, allowed_names=None):
if not allowed_names:
allowed_names = []
config = get_collector_config('PostgresqlCollector', {
})
self.collector = PostgresqlCollector(config, None)
def test_import(self):
self.assertTrue(PostgresqlCollector)
@patch('postgres.psycopg2')
def test_connect_with_password(self, psycopg2_mock):
conn_mock = Mock()
psycopg2_mock.connect.return_value = conn_mock
ret = self.collector._connect('test_db')
self.assertTrue(conn_mock.set_isolation_level.called)
self.assertEqual(ret, conn_mock)
psycopg2_mock.connect.assert_called_once_with(
database='test_db', host='localhost', password='postgres',
port=5432, sslmode='disable', user='postgres'
)
@patch('postgres.psycopg2')
def test_connect_with_pgpass(self, psycopg2_mock):
config = get_collector_config('PostgresqlCollector', {
'password_provider': 'pgpass'
})
self.collector = PostgresqlCollector(config, None)
conn_mock = Mock()
psycopg2_mock.connect.return_value = conn_mock
ret = self.collector._connect('test_db')
self.assertTrue(conn_mock.set_isolation_level.called)
self.assertEqual(ret, conn_mock)
psycopg2_mock.connect.assert_called_once_with(
database='test_db', host='localhost',
port=5432, sslmode='disable', user='postgres'
)
@patch('postgres.psycopg2')
def test_connect_error(self, psycopg2_mock):
psycopg2_mock.connect.side_effect = Exception('Some db exc')
with self.assertRaises(Exception):
self.collector._connect('test_db')
|
import os
from flexx import flx
BASE_DIR = os.getcwd()
with open(BASE_DIR + '/static/js/data.json') as f:
geojson = f.read()
ol_cdn = 'https://cdnjs.cloudflare.com/ajax/libs/openlayers/4.6.5/'
flx.assets.associate_asset(__name__, ol_cdn + 'ol.css')
flx.assets.associate_asset(__name__, ol_cdn + 'ol.js')
class Ol(flx.Widget):
initialised = False
@flx.action
def remove_layers(self):
self.map.removeLayer(self.vectorLayer)
self.vectorLayer.getSource().clear()
self.vectorLayer.getSource().refresh()
@flx.action
def add_drawing_interaction(self):
self.map.addLayer(self.drawVectorLayer)
self.map.addInteraction(self.drawPoint)
@flx.action
def remove_drawing_interaction(self):
self.map.removeInteraction(self.drawPoint)
self.map.removeLayer(self.drawVectorLayer)
self.drawVectorLayer.getSource().clear()
@flx.action
def map_init(self):
global ol
if not self.initialised:
self.olview = ol.View({
"zoom": 8,
"center": [-80.901813, 22.968599],
"projection": "EPSG:4326",
"minZoom": 3,
"maxZoom": 100
})
self.baseLayer = ol.layer.Tile({
"source": ol.source.OSM(),
})
self.vectorLayer = ol.layer.Vector({
"source": ol.source.Vector({
"format": ol.format.GeoJSON()
}),
"name": "Vector",
"style": ol.style.Style({
"image": ol.style.Circle({
"radius": 7,
"fill": ol.style.Fill({
"color": 'rgba(255, 0, 0, 0.5)'
})
})
}),
})
self.drawVectorLayer = ol.layer.Vector({
"source": ol.source.Vector({
"format": ol.format.GeoJSON()
}),
"name": "Draw Vector",
"style": ol.style.Style({
"image": ol.style.Circle({
"radius": 7,
"fill": ol.style.Fill({
"color": 'rgba(0, 255, 0, 0.5)'
})
})
}),
})
self.drawPoint = ol.interaction.Draw({
"type": 'Point',
"source": self.drawVectorLayer.getSource()
})
self.map_config = {
"target": self.mapnode,
'view': self.olview,
"controls": [ol.control.Zoom(), ol.control.MousePosition()],
"layers": []
}
self.map = ol.Map(self.map_config)
self.map.on('click', self.pointer_event)
self.initialised = True
@flx.emitter
def pointer_event(self, event):
return {"event": event}
@flx.action
def add_vector_layer(self):
format = self.vectorLayer.getSource().getFormat()
features = format.readFeatures(geojson)
self.vectorLayer.getSource().clear()
self.vectorLayer.getSource().addFeatures(features)
self.map.addLayer(self.vectorLayer)
@flx.action
def add_osm_layers(self):
self.map.addLayer(self.baseLayer)
def _create_dom(self):
global document
node = document.createElement('div')
self.mapnode = document.createElement('div')
node.appendChild(self.mapnode)
self.mapnode.id = 'maproot'
return node
def _render_dom(self):
self.map_init()
return super()._render_dom()
class MainWidget(flx.Widget):
def init(self):
self.set_title("Openlayers example")
with flx.VBox():
with flx.HBox():
self.map = Ol(flex=1)
self.btn = flx.Button(text="", disabled=True)
with flx.VBox():
self.btnosm = flx.Button(text='Load Openstreetmap')
self.btna = flx.Button(text='Load GEOJSON')
self.btnr = flx.Button(text='Remove GEOJSON')
self.btndraw = flx.Button(text='Draw Points')
self.btn_stop_draw = flx.Button(text='Stop Drawing')
flx.Widget(flex=1)
self.coords = flx.Label(flex=1)
@flx.reaction('btn_stop_draw.pointer_click')
def handle_stop_drawing(self, *events):
self.coords.set_text("Stop Drawing")
self.map.remove_drawing_interaction()
@flx.reaction('btndraw.pointer_click')
def handle_drawing(self, *events):
self.coords.set_text("Drawing..")
self.map.add_drawing_interaction()
@flx.reaction('btna.pointer_click')
def handle_vector_layers(self, *events):
self.coords.set_text("Adding GEOJSON")
self.map.add_vector_layer()
@flx.reaction('btnosm.pointer_click')
def handle_osm_layers(self, *events):
self.coords.set_text("Adding Openstreetmap")
self.map.add_osm_layers()
@flx.reaction('btnr.pointer_click')
def handle_remove_layers(self, *events):
self.map.remove_layers()
self.coords.set_text("Removing GEOJSON")
@flx.reaction('map.pointer_event')
def map_click(self, *events):
ev = events[-1]
coord = ev['event']['coordinate']
self.coords.set_text("Clicking on coordinate " + str(coord))
if __name__ == '__main__':
flx.launch(MainWidget, 'firefox-browser')
flx.run()
|
from datetime import timedelta
from typing import Optional
from bond_api import Action, DeviceType, Direction
from homeassistant import core
from homeassistant.components import fan
from homeassistant.components.fan import (
ATTR_DIRECTION,
ATTR_SPEED,
ATTR_SPEED_LIST,
DIRECTION_FORWARD,
DIRECTION_REVERSE,
DOMAIN as FAN_DOMAIN,
SERVICE_SET_DIRECTION,
SERVICE_SET_SPEED,
SPEED_OFF,
)
from homeassistant.const import ATTR_ENTITY_ID, SERVICE_TURN_OFF, SERVICE_TURN_ON
from homeassistant.helpers.entity_registry import EntityRegistry
from homeassistant.util import utcnow
from .common import (
help_test_entity_available,
patch_bond_action,
patch_bond_device_state,
setup_platform,
)
from tests.common import async_fire_time_changed
def ceiling_fan(name: str):
"""Create a ceiling fan with given name."""
return {
"name": name,
"type": DeviceType.CEILING_FAN,
"actions": ["SetSpeed", "SetDirection"],
}
async def turn_fan_on(
hass: core.HomeAssistant, fan_id: str, speed: Optional[str] = None
) -> None:
"""Turn the fan on at the specified speed."""
service_data = {ATTR_ENTITY_ID: fan_id}
if speed:
service_data[fan.ATTR_SPEED] = speed
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_ON,
service_data=service_data,
blocking=True,
)
await hass.async_block_till_done()
async def test_entity_registry(hass: core.HomeAssistant):
"""Tests that the devices are registered in the entity registry."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_version={"bondid": "test-hub-id"},
bond_device_id="test-device-id",
)
registry: EntityRegistry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.entities["fan.name_1"]
assert entity.unique_id == "test-hub-id_test-device-id"
async def test_non_standard_speed_list(hass: core.HomeAssistant):
"""Tests that the device is registered with custom speed list if number of supported speeds differs form 3."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"max_speed": 6},
)
actual_speeds = hass.states.get("fan.name_1").attributes[ATTR_SPEED_LIST]
assert actual_speeds == [
fan.SPEED_OFF,
fan.SPEED_LOW,
fan.SPEED_MEDIUM,
fan.SPEED_HIGH,
]
with patch_bond_device_state():
with patch_bond_action() as mock_set_speed_low:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed_low.assert_called_once_with(
"test-device-id", Action.set_speed(1)
)
with patch_bond_action() as mock_set_speed_medium:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_MEDIUM)
mock_set_speed_medium.assert_called_once_with(
"test-device-id", Action.set_speed(3)
)
with patch_bond_action() as mock_set_speed_high:
await turn_fan_on(hass, "fan.name_1", fan.SPEED_HIGH)
mock_set_speed_high.assert_called_once_with(
"test-device-id", Action.set_speed(6)
)
async def test_fan_speed_with_no_max_seed(hass: core.HomeAssistant):
"""Tests that fans without max speed (increase/decrease controls) map speed to HA standard."""
await setup_platform(
hass,
FAN_DOMAIN,
ceiling_fan("name-1"),
bond_device_id="test-device-id",
props={"no": "max_speed"},
state={"power": 1, "speed": 14},
)
assert hass.states.get("fan.name_1").attributes["speed"] == fan.SPEED_HIGH
async def test_turn_on_fan_with_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to set speed API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_speed, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", fan.SPEED_LOW)
mock_set_speed.assert_called_with("test-device-id", Action.set_speed(1))
async def test_turn_on_fan_without_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn on API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_on, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1")
mock_turn_on.assert_called_with("test-device-id", Action.turn_on())
async def test_turn_on_fan_with_off_speed(hass: core.HomeAssistant):
"""Tests that turn on command delegates to turn off API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await turn_fan_on(hass, "fan.name_1", fan.SPEED_OFF)
mock_turn_off.assert_called_with("test-device-id", Action.turn_off())
async def test_set_speed_off(hass: core.HomeAssistant):
"""Tests that set_speed(off) command delegates to turn off API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_SPEED,
service_data={ATTR_ENTITY_ID: "fan.name_1", ATTR_SPEED: SPEED_OFF},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_with("test-device-id", Action.turn_off())
async def test_turn_off_fan(hass: core.HomeAssistant):
"""Tests that turn off command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_turn_off, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "fan.name_1"},
blocking=True,
)
await hass.async_block_till_done()
mock_turn_off.assert_called_once_with("test-device-id", Action.turn_off())
async def test_update_reports_fan_on(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is on."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 1, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "on"
async def test_update_reports_fan_off(hass: core.HomeAssistant):
"""Tests that update command sets correct state when Bond API reports fan power is off."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"power": 0, "speed": 1}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").state == "off"
async def test_update_reports_direction_forward(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is forward."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.FORWARD}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_FORWARD
async def test_update_reports_direction_reverse(hass: core.HomeAssistant):
"""Tests that update command sets correct direction when Bond API reports fan direction is reverse."""
await setup_platform(hass, FAN_DOMAIN, ceiling_fan("name-1"))
with patch_bond_device_state(return_value={"direction": Direction.REVERSE}):
async_fire_time_changed(hass, utcnow() + timedelta(seconds=30))
await hass.async_block_till_done()
assert hass.states.get("fan.name_1").attributes[ATTR_DIRECTION] == DIRECTION_REVERSE
async def test_set_fan_direction(hass: core.HomeAssistant):
"""Tests that set direction command delegates to API."""
await setup_platform(
hass, FAN_DOMAIN, ceiling_fan("name-1"), bond_device_id="test-device-id"
)
with patch_bond_action() as mock_set_direction, patch_bond_device_state():
await hass.services.async_call(
FAN_DOMAIN,
SERVICE_SET_DIRECTION,
{ATTR_ENTITY_ID: "fan.name_1", ATTR_DIRECTION: DIRECTION_FORWARD},
blocking=True,
)
await hass.async_block_till_done()
mock_set_direction.assert_called_once_with(
"test-device-id", Action.set_direction(Direction.FORWARD)
)
async def test_fan_available(hass: core.HomeAssistant):
"""Tests that available state is updated based on API errors."""
await help_test_entity_available(
hass, FAN_DOMAIN, ceiling_fan("name-1"), "fan.name_1"
)
|
import argparse
import logging
import os
import signal
import socket
import socketserver
import sys
import syslogmp
from paasta_tools.firewall import services_running_here
from paasta_tools.utils import _log
from paasta_tools.utils import configure_log
from paasta_tools.utils import load_system_paasta_config
DEFAULT_NUM_WORKERS = 5
log = logging.getLogger(__name__)
class SyslogUDPHandler(socketserver.BaseRequestHandler):
def setup(self):
configure_log()
self.cluster = load_system_paasta_config().get_cluster()
def handle(self):
data, socket = self.request
syslog_to_paasta_log(data, self.cluster)
def syslog_to_paasta_log(data, cluster):
iptables_log = parse_syslog(data)
if iptables_log is None:
return
service, instance = lookup_service_instance_by_ip(iptables_log["SRC"])
if service is None or instance is None:
return
# prepend hostname
log_line = iptables_log["hostname"] + ": " + iptables_log["message"]
_log(
service=service,
component="security",
level="debug",
cluster=cluster,
instance=instance,
line=log_line,
)
def parse_syslog(data):
parsed_data = syslogmp.parse(data)
try:
full_message = parsed_data.message.decode()
except UnicodeDecodeError:
return None
if not full_message.startswith("kernel: ["):
# Not a kernel message
return None
close_bracket = full_message.find("]")
if close_bracket == -1:
return None
iptables_message = full_message[close_bracket + 1 :].strip()
parts = iptables_message.split(" ")
# parts[0] is the log-prefix
# parts[1..] is either KEY=VALUE or just KEY
if not parts[1].startswith("IN="):
# not an iptables message
return None
fields = {k: v for k, _, v in (field.partition("=") for field in parts[1:])}
fields["hostname"] = parsed_data.hostname
fields["message"] = iptables_message
return fields
def lookup_service_instance_by_ip(ip_lookup):
for service, instance, mac, ip in services_running_here():
if ip == ip_lookup:
return (service, instance)
log.info(f"Unable to find container for ip {ip_lookup}")
return (None, None)
def parse_args(argv=None):
parser = argparse.ArgumentParser(
description="Adapts iptables syslog messages into scribe"
)
parser.add_argument(
"-v", "--verbose", action="store_true", dest="verbose", default=False
)
parser.add_argument(
"-l", "--listen-host", help="Default %(default)s", default="127.0.0.1"
)
parser.add_argument(
"-p", "--listen-port", type=int, help="Default %(default)s", default=1516
)
parser.add_argument(
"-w",
"--num-workers",
type=int,
help="Default %(default)s",
default=DEFAULT_NUM_WORKERS,
)
args = parser.parse_args(argv)
return args
def setup_logging(verbose):
if verbose:
logging.basicConfig(level=logging.DEBUG)
else:
logging.basicConfig(level=logging.WARNING)
class MultiUDPServer(socketserver.UDPServer):
# UDPServer with SO_REUSEPORT enabled so that incoming packets are
# load-balanced across listeners
def server_bind(self):
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
# UDPServer is old-style class so can't use super
socketserver.UDPServer.server_bind(self)
def run_server(listen_host, listen_port):
server = MultiUDPServer((listen_host, listen_port), SyslogUDPHandler)
server.serve_forever()
def main(argv=None):
args = parse_args(argv)
setup_logging(args.verbose)
assert args.num_workers > 0
# start n-1 separate processes, then run_server() on this one
num_forks = args.num_workers - 1
for x in range(num_forks):
if os.fork() == 0:
run_server(args.listen_host, args.listen_port)
# propagate SIGTERM to all my children then exit
signal.signal(
signal.SIGTERM, lambda signum, _: os.killpg(os.getpid(), signum) or sys.exit(1)
)
run_server(args.listen_host, args.listen_port)
|
import numpy as np
import chainer
from chainercv.links.model.faster_rcnn import FasterRCNN
from chainercv.utils import generate_random_bbox
def _random_array(xp, shape):
return xp.array(
np.random.uniform(-1, 1, size=shape), dtype=np.float32)
class DummyExtractor(chainer.Link):
def __init__(self, feat_stride):
super(DummyExtractor, self).__init__()
self.feat_stride = feat_stride
def forward(self, x):
_, _, H, W = x.shape
return _random_array(
self.xp,
(1, 8, H // self.feat_stride, W // self.feat_stride))
class DummyHead(chainer.Chain):
def __init__(self, n_class):
super(DummyHead, self).__init__()
self.n_class = n_class
def forward(self, x, rois, roi_indices):
n_roi = len(rois)
cls_locs = chainer.Variable(
_random_array(self.xp, (n_roi, self.n_class * 4)))
# For each bbox, the score for a selected class is
# overwhelmingly higher than the scores for the other classes.
score_idx = np.random.randint(
low=0, high=self.n_class, size=(n_roi,))
scores = self.xp.zeros((n_roi, self.n_class), dtype=np.float32)
scores[np.arange(n_roi), score_idx] = 100
scores = chainer.Variable(scores)
return cls_locs, scores
class DummyRegionProposalNetwork(chainer.Chain):
def __init__(self, n_anchor_base, n_roi):
super(DummyRegionProposalNetwork, self).__init__()
self.n_anchor_base = n_anchor_base
self.n_roi = n_roi
def forward(self, x, img_size, scale):
B, _, H, W = x.shape
n_anchor = self.n_anchor_base * H * W
rpn_locs = _random_array(self.xp, (B, n_anchor, 4))
rpn_cls_scores = _random_array(self.xp, (B, n_anchor, 2))
rois = self.xp.asarray(generate_random_bbox(
self.n_roi, img_size, 16, min(img_size)))
roi_indices = self.xp.zeros((len(rois),), dtype=np.int32)
anchor = self.xp.asarray(generate_random_bbox(
n_anchor, img_size, 16, min(img_size)))
return (chainer.Variable(rpn_locs),
chainer.Variable(rpn_cls_scores), rois, roi_indices, anchor)
class DummyFasterRCNN(FasterRCNN):
def __init__(self, n_anchor_base, feat_stride, n_fg_class, n_roi,
min_size, max_size
):
super(DummyFasterRCNN, self).__init__(
DummyExtractor(feat_stride),
DummyRegionProposalNetwork(n_anchor_base, n_roi),
DummyHead(n_fg_class + 1),
mean=np.array([[[100]], [[122.5]], [[145]]]),
min_size=min_size,
max_size=max_size
)
|
import logging
from typing import Dict, Iterable, Optional
from sharkiqpy import OperatingModes, PowerModes, Properties, SharkIqVacuum
from homeassistant.components.vacuum import (
STATE_CLEANING,
STATE_DOCKED,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STATUS,
SUPPORT_STOP,
StateVacuumEntity,
)
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DOMAIN, SHARK
from .update_coordinator import SharkIqUpdateCoordinator
_LOGGER = logging.getLogger(__name__)
# Supported features
SUPPORT_SHARKIQ = (
SUPPORT_BATTERY
| SUPPORT_FAN_SPEED
| SUPPORT_PAUSE
| SUPPORT_RETURN_HOME
| SUPPORT_START
| SUPPORT_STATE
| SUPPORT_STATUS
| SUPPORT_STOP
| SUPPORT_LOCATE
)
OPERATING_STATE_MAP = {
OperatingModes.PAUSE: STATE_PAUSED,
OperatingModes.START: STATE_CLEANING,
OperatingModes.STOP: STATE_IDLE,
OperatingModes.RETURN: STATE_RETURNING,
}
FAN_SPEEDS_MAP = {
"Eco": PowerModes.ECO,
"Normal": PowerModes.NORMAL,
"Max": PowerModes.MAX,
}
STATE_RECHARGING_TO_RESUME = "recharging_to_resume"
# Attributes to expose
ATTR_ERROR_CODE = "last_error_code"
ATTR_ERROR_MSG = "last_error_message"
ATTR_LOW_LIGHT = "low_light"
ATTR_RECHARGE_RESUME = "recharge_and_resume"
ATTR_RSSI = "rssi"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Shark IQ vacuum cleaner."""
coordinator: SharkIqUpdateCoordinator = hass.data[DOMAIN][config_entry.entry_id]
devices: Iterable["SharkIqVacuum"] = coordinator.shark_vacs.values()
device_names = [d.name for d in devices]
_LOGGER.debug(
"Found %d Shark IQ device(s): %s",
len(device_names),
", ".join([d.name for d in devices]),
)
async_add_entities([SharkVacuumEntity(d, coordinator) for d in devices])
class SharkVacuumEntity(CoordinatorEntity, StateVacuumEntity):
"""Shark IQ vacuum entity."""
def __init__(self, sharkiq: SharkIqVacuum, coordinator: SharkIqUpdateCoordinator):
"""Create a new SharkVacuumEntity."""
super().__init__(coordinator)
self.sharkiq = sharkiq
def clean_spot(self, **kwargs):
"""Clean a spot. Not yet implemented."""
raise NotImplementedError()
def send_command(self, command, params=None, **kwargs):
"""Send a command to the vacuum. Not yet implemented."""
raise NotImplementedError()
@property
def is_online(self) -> bool:
"""Tell us if the device is online."""
return self.coordinator.device_is_online(self.sharkiq.serial_number)
@property
def name(self) -> str:
"""Device name."""
return self.sharkiq.name
@property
def serial_number(self) -> str:
"""Vacuum API serial number (DSN)."""
return self.sharkiq.serial_number
@property
def model(self) -> str:
"""Vacuum model number."""
if self.sharkiq.vac_model_number:
return self.sharkiq.vac_model_number
return self.sharkiq.oem_model_number
@property
def device_info(self) -> Dict:
"""Device info dictionary."""
return {
"identifiers": {(DOMAIN, self.serial_number)},
"name": self.name,
"manufacturer": SHARK,
"model": self.model,
"sw_version": self.sharkiq.get_property_value(
Properties.ROBOT_FIRMWARE_VERSION
),
}
@property
def supported_features(self) -> int:
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_SHARKIQ
@property
def is_docked(self) -> Optional[bool]:
"""Is vacuum docked."""
return self.sharkiq.get_property_value(Properties.DOCKED_STATUS)
@property
def error_code(self) -> Optional[int]:
"""Return the last observed error code (or None)."""
return self.sharkiq.error_code
@property
def error_message(self) -> Optional[str]:
"""Return the last observed error message (or None)."""
if not self.error_code:
return None
return self.sharkiq.error_text
@property
def operating_mode(self) -> Optional[str]:
"""Operating mode.."""
op_mode = self.sharkiq.get_property_value(Properties.OPERATING_MODE)
return OPERATING_STATE_MAP.get(op_mode)
@property
def recharging_to_resume(self) -> Optional[int]:
"""Return True if vacuum set to recharge and resume cleaning."""
return self.sharkiq.get_property_value(Properties.RECHARGING_TO_RESUME)
@property
def state(self):
"""
Get the current vacuum state.
NB: Currently, we do not return an error state because they can be very, very stale.
In the app, these are (usually) handled by showing the robot as stopped and sending the
user a notification.
"""
if self.is_docked:
return STATE_DOCKED
return self.operating_mode
@property
def unique_id(self) -> str:
"""Return the unique id of the vacuum cleaner."""
return self.serial_number
@property
def available(self) -> bool:
"""Determine if the sensor is available based on API results."""
# If the last update was successful...
return self.coordinator.last_update_success and self.is_online
@property
def battery_level(self):
"""Get the current battery level."""
return self.sharkiq.get_property_value(Properties.BATTERY_CAPACITY)
async def async_return_to_base(self, **kwargs):
"""Have the device return to base."""
await self.sharkiq.async_set_operating_mode(OperatingModes.RETURN)
await self.coordinator.async_refresh()
async def async_pause(self):
"""Pause the cleaning task."""
await self.sharkiq.async_set_operating_mode(OperatingModes.PAUSE)
await self.coordinator.async_refresh()
async def async_start(self):
"""Start the device."""
await self.sharkiq.async_set_operating_mode(OperatingModes.START)
await self.coordinator.async_refresh()
async def async_stop(self, **kwargs):
"""Stop the device."""
await self.sharkiq.async_set_operating_mode(OperatingModes.STOP)
await self.coordinator.async_refresh()
async def async_locate(self, **kwargs):
"""Cause the device to generate a loud chirp."""
await self.sharkiq.async_find_device()
@property
def fan_speed(self) -> str:
"""Return the current fan speed."""
fan_speed = None
speed_level = self.sharkiq.get_property_value(Properties.POWER_MODE)
for k, val in FAN_SPEEDS_MAP.items():
if val == speed_level:
fan_speed = k
return fan_speed
async def async_set_fan_speed(self, fan_speed: str, **kwargs):
"""Set the fan speed."""
await self.sharkiq.async_set_property_value(
Properties.POWER_MODE, FAN_SPEEDS_MAP.get(fan_speed.capitalize())
)
await self.coordinator.async_refresh()
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(FAN_SPEEDS_MAP)
# Various attributes we want to expose
@property
def recharge_resume(self) -> Optional[bool]:
"""Recharge and resume mode active."""
return self.sharkiq.get_property_value(Properties.RECHARGE_RESUME)
@property
def rssi(self) -> Optional[int]:
"""Get the WiFi RSSI."""
return self.sharkiq.get_property_value(Properties.RSSI)
@property
def low_light(self):
"""Let us know if the robot is operating in low-light mode."""
return self.sharkiq.get_property_value(Properties.LOW_LIGHT_MISSION)
@property
def device_state_attributes(self) -> Dict:
"""Return a dictionary of device state attributes specific to sharkiq."""
data = {
ATTR_ERROR_CODE: self.error_code,
ATTR_ERROR_MSG: self.sharkiq.error_text,
ATTR_LOW_LIGHT: self.low_light,
ATTR_RECHARGE_RESUME: self.recharge_resume,
}
return data
|
import os
from collections import defaultdict
from copy import copy
from urllib.parse import urljoin
import blinker
import natsort
from nikola import utils, hierarchy_utils
from nikola.nikola import _enclosure
from nikola.plugin_categories import Task
class RenderTaxonomies(Task):
"""Render taxonomy pages and feeds."""
name = "render_taxonomies"
def _generate_classification_overview_kw_context(self, taxonomy, lang):
"""Create context and kw for a classification overview page."""
context, kw = taxonomy.provide_overview_context_and_uptodate(lang)
context = copy(context)
context["kind"] = "{}_index".format(taxonomy.classification_name)
sorted_links = []
for other_lang in sorted(self.site.config['TRANSLATIONS'].keys()):
if other_lang != lang:
sorted_links.append((other_lang, None, None))
# Put the current language in front, so that it appears first in links
# (Issue #3248)
sorted_links_all = [(lang, None, None)] + sorted_links
context['has_other_languages'] = True
context['other_languages'] = sorted_links
context['all_languages'] = sorted_links_all
kw = copy(kw)
kw["messages"] = self.site.MESSAGES
kw["translations"] = self.site.config['TRANSLATIONS']
kw["filters"] = self.site.config['FILTERS']
kw["minimum_post_count"] = taxonomy.minimum_post_count_per_classification_in_overview
kw["output_folder"] = self.site.config['OUTPUT_FOLDER']
kw["pretty_urls"] = self.site.config['PRETTY_URLS']
kw["strip_indexes"] = self.site.config['STRIP_INDEXES']
kw["index_file"] = self.site.config['INDEX_FILE']
# Collect all relevant classifications
if taxonomy.has_hierarchy:
def acceptor(node):
return len(self._filter_list(self.site.posts_per_classification[taxonomy.classification_name][lang][node.classification_name], lang)) >= kw["minimum_post_count"]
clipped_root_list = [hierarchy_utils.clone_treenode(node, parent=None, acceptor=acceptor) for node in self.site.hierarchy_per_classification[taxonomy.classification_name][lang]]
clipped_root_list = [node for node in clipped_root_list if node]
clipped_flat_hierarchy = hierarchy_utils.flatten_tree_structure(clipped_root_list)
classifications = [cat.classification_name for cat in clipped_flat_hierarchy]
else:
classifications = natsort.natsorted([tag for tag, posts in self.site.posts_per_classification[taxonomy.classification_name][lang].items()
if len(self._filter_list(posts, lang)) >= kw["minimum_post_count"]],
alg=natsort.ns.F | natsort.ns.IC)
taxonomy.sort_classifications(classifications, lang)
# Set up classifications in context
context[taxonomy.overview_page_variable_name] = classifications
context["has_hierarchy"] = taxonomy.has_hierarchy
if taxonomy.overview_page_items_variable_name:
items = [(classification,
self.site.link(taxonomy.classification_name, classification, lang))
for classification in classifications]
items_with_postcount = [
(classification,
self.site.link(taxonomy.classification_name, classification, lang),
len(self._filter_list(self.site.posts_per_classification[taxonomy.classification_name][lang][classification], lang)))
for classification in classifications
]
context[taxonomy.overview_page_items_variable_name] = items
context[taxonomy.overview_page_items_variable_name + "_with_postcount"] = items_with_postcount
if taxonomy.has_hierarchy and taxonomy.overview_page_hierarchy_variable_name:
hier_items = [
(node.name, node.classification_name, node.classification_path,
self.site.link(taxonomy.classification_name, node.classification_name, lang),
node.indent_levels, node.indent_change_before,
node.indent_change_after)
for node in clipped_flat_hierarchy
]
hier_items_with_postcount = [
(node.name, node.classification_name, node.classification_path,
self.site.link(taxonomy.classification_name, node.classification_name, lang),
node.indent_levels, node.indent_change_before,
node.indent_change_after,
len(node.children),
len(self._filter_list(self.site.posts_per_classification[taxonomy.classification_name][lang][node.classification_name], lang)))
for node in clipped_flat_hierarchy
]
context[taxonomy.overview_page_hierarchy_variable_name] = hier_items
context[taxonomy.overview_page_hierarchy_variable_name + '_with_postcount'] = hier_items_with_postcount
return context, kw
def _render_classification_overview(self, classification_name, template, lang, context, kw):
# Prepare rendering
context["permalink"] = self.site.link("{}_index".format(classification_name), None, lang)
if "pagekind" not in context:
context["pagekind"] = ["list", "tags_page"]
output_name = os.path.join(self.site.config['OUTPUT_FOLDER'], self.site.path('{}_index'.format(classification_name), None, lang))
blinker.signal('generate_classification_overview').send({
'site': self.site,
'classification_name': classification_name,
'lang': lang,
'context': context,
'kw': kw,
'output_name': output_name,
})
task = self.site.generic_post_list_renderer(
lang,
[],
output_name,
template,
kw['filters'],
context,
)
task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.plugins.task.taxonomies:page')]
task['basename'] = str(self.name)
yield task
def _generate_classification_overview(self, taxonomy, lang):
"""Create a global "all your tags/categories" page for a given language."""
context, kw = self._generate_classification_overview_kw_context(taxonomy, lang)
for task in self._render_classification_overview(taxonomy.classification_name, taxonomy.template_for_classification_overview, lang, context, kw):
yield task
def _generate_tag_and_category_overview(self, tag_taxonomy, category_taxonomy, lang):
"""Create a global "all your tags/categories" page for a given language."""
# Create individual contexts and kw dicts
tag_context, tag_kw = self._generate_classification_overview_kw_context(tag_taxonomy, lang)
cat_context, cat_kw = self._generate_classification_overview_kw_context(category_taxonomy, lang)
# Combine resp. select dicts
if tag_context['items'] and cat_context['cat_items']:
# Combine contexts. We must merge the tag context into the category context
# so that tag_context['items'] makes it into the result.
context = cat_context
context.update(tag_context)
kw = cat_kw
kw.update(tag_kw)
# Update title
title = self.site.MESSAGES[lang]["Tags and Categories"]
context['title'] = title
context['description'] = title
kw['title'] = title
kw['description'] = title
elif cat_context['cat_items']:
# Use category overview page
context = cat_context
kw = cat_kw
else:
# Use tag overview page
context = tag_context
kw = tag_kw
# Render result
for task in self._render_classification_overview('tag', tag_taxonomy.template_for_classification_overview, lang, context, kw):
yield task
def _generate_classification_page_as_rss(self, taxonomy, classification, filtered_posts, title, description, kw, lang):
"""Create a RSS feed for a single classification in a given language."""
kind = taxonomy.classification_name
# Render RSS
output_name = os.path.normpath(os.path.join(self.site.config['OUTPUT_FOLDER'], self.site.path(kind + "_rss", classification, lang)))
feed_url = urljoin(self.site.config['BASE_URL'], self.site.link(kind + "_rss", classification, lang).lstrip('/'))
deps = []
deps_uptodate = []
for post in filtered_posts:
deps += post.deps(lang)
deps_uptodate += post.deps_uptodate(lang)
blog_title = kw["blog_title"](lang)
task = {
'basename': str(self.name),
'name': output_name,
'file_dep': deps,
'targets': [output_name],
'actions': [(utils.generic_rss_renderer,
(lang, "{0} ({1})".format(blog_title, title) if blog_title != title else blog_title,
kw["site_url"], description, filtered_posts,
output_name, kw["feed_teasers"], kw["feed_plain"], kw['feed_length'],
feed_url, _enclosure, kw["feed_links_append_query"]))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.taxonomies:rss')] + deps_uptodate,
'task_dep': ['render_posts'],
}
return utils.apply_filters(task, kw['filters'])
def _generate_classification_page_as_index(self, taxonomy, classification, filtered_posts, context, kw, lang):
"""Render an index page collection using only this classification's posts."""
kind = taxonomy.classification_name
def page_link(i, displayed_i, num_pages, force_addition, extension=None):
return self.site.link(kind, classification, lang, alternative_path=force_addition, page=i)
def page_path(i, displayed_i, num_pages, force_addition, extension=None):
return self.site.path(kind, classification, lang, alternative_path=force_addition, page=i)
context = copy(context)
context["kind"] = kind
if "pagekind" not in context:
context["pagekind"] = ["index", "tag_page"]
template_name = taxonomy.template_for_single_list
yield self.site.generic_index_renderer(lang, filtered_posts, context['title'], template_name, context, kw, str(self.name), page_link, page_path)
def _generate_classification_page_as_atom(self, taxonomy, classification, filtered_posts, context, kw, lang):
"""Generate atom feeds for classification lists."""
kind = taxonomy.classification_name
context = copy(context)
context["kind"] = kind
yield self.site.generic_atom_renderer(lang, filtered_posts, context, kw, str(self.name), classification, kind)
def _generate_classification_page_as_list(self, taxonomy, classification, filtered_posts, context, kw, lang):
"""Render a single flat link list with this classification's posts."""
kind = taxonomy.classification_name
template_name = taxonomy.template_for_single_list
output_name = os.path.join(self.site.config['OUTPUT_FOLDER'], self.site.path(kind, classification, lang))
context["lang"] = lang
# list.tmpl expects a different format than list_post.tmpl (Issue #2701)
if template_name == 'list.tmpl':
context["items"] = [(post.title(lang), post.permalink(lang), None) for post in filtered_posts]
else:
context["posts"] = filtered_posts
if "pagekind" not in context:
context["pagekind"] = ["list", "tag_page"]
task = self.site.generic_post_list_renderer(lang, filtered_posts, output_name, template_name, kw['filters'], context)
task['uptodate'] = task['uptodate'] + [utils.config_changed(kw, 'nikola.plugins.task.taxonomies:list')]
task['basename'] = str(self.name)
yield task
def _filter_list(self, post_list, lang):
"""Return only the posts which should be shown for this language."""
if self.site.config["SHOW_UNTRANSLATED_POSTS"]:
return post_list
else:
return [x for x in post_list if x.is_translation_available(lang)]
def _generate_subclassification_page(self, taxonomy, node, context, kw, lang):
"""Render a list of subclassifications."""
def get_subnode_data(subnode):
return [
taxonomy.get_classification_friendly_name(subnode.classification_name, lang, only_last_component=True),
self.site.link(taxonomy.classification_name, subnode.classification_name, lang),
len(self._filter_list(self.site.posts_per_classification[taxonomy.classification_name][lang][subnode.classification_name], lang))
]
items = [get_subnode_data(subnode) for subnode in node.children]
context = copy(context)
context["lang"] = lang
context["permalink"] = self.site.link(taxonomy.classification_name, node.classification_name, lang)
if "pagekind" not in context:
context["pagekind"] = ["list", "archive_page"]
context["items"] = items
task = self.site.generic_post_list_renderer(
lang,
[],
os.path.join(kw['output_folder'], self.site.path(taxonomy.classification_name, node.classification_name, lang)),
taxonomy.subcategories_list_template,
kw['filters'],
context,
)
task_cfg = {1: kw, 2: items}
task['uptodate'] = task['uptodate'] + [utils.config_changed(task_cfg, 'nikola.plugins.task.taxonomy')]
task['basename'] = self.name
return task
def _generate_classification_page(self, taxonomy, classification, filtered_posts, generate_list, generate_rss, generate_atom, lang, post_lists_per_lang, classification_set_per_lang=None):
"""Render index or post list and associated feeds per classification."""
# Should we create this list?
if not any((generate_list, generate_rss, generate_atom)):
return
# Get data
node = None
if taxonomy.has_hierarchy:
node = self.site.hierarchy_lookup_per_classification[taxonomy.classification_name][lang].get(classification)
context, kw = taxonomy.provide_context_and_uptodate(classification, lang, node)
kw = copy(kw)
kw["messages"] = self.site.MESSAGES
kw["translations"] = self.site.config['TRANSLATIONS']
kw["filters"] = self.site.config['FILTERS']
kw["site_url"] = self.site.config['SITE_URL']
kw["blog_title"] = self.site.config['BLOG_TITLE']
kw["generate_rss"] = self.site.config['GENERATE_RSS']
kw["generate_atom"] = self.site.config['GENERATE_ATOM']
kw["feed_teasers"] = self.site.config["FEED_TEASERS"]
kw["feed_plain"] = self.site.config["FEED_PLAIN"]
kw["feed_links_append_query"] = self.site.config["FEED_LINKS_APPEND_QUERY"]
kw["feed_length"] = self.site.config['FEED_LENGTH']
kw["output_folder"] = self.site.config['OUTPUT_FOLDER']
kw["pretty_urls"] = self.site.config['PRETTY_URLS']
kw["strip_indexes"] = self.site.config['STRIP_INDEXES']
kw["index_file"] = self.site.config['INDEX_FILE']
context = copy(context)
context["permalink"] = self.site.link(taxonomy.classification_name, classification, lang)
context["kind"] = taxonomy.classification_name
# Get links to other language versions of this classification
if classification_set_per_lang is not None:
other_lang_links = taxonomy.get_other_language_variants(classification, lang, classification_set_per_lang)
# Collect by language
links_per_lang = defaultdict(list)
for other_lang, link in other_lang_links:
# Make sure we ignore the current language (in case the
# plugin accidentally returns links for it as well)
if other_lang != lang:
links_per_lang[other_lang].append(link)
# Sort first by language, then by classification
sorted_links = []
sorted_links_all = []
for other_lang in sorted(list(links_per_lang.keys()) + [lang]):
if other_lang == lang:
sorted_links_all.append((lang, classification, taxonomy.get_classification_friendly_name(classification, lang)))
else:
links = hierarchy_utils.sort_classifications(taxonomy, links_per_lang[other_lang], other_lang)
links = [(other_lang, other_classification,
taxonomy.get_classification_friendly_name(other_classification, other_lang))
for other_classification in links if post_lists_per_lang[other_lang].get(other_classification, ('', False, False))[1]]
sorted_links.extend(links)
sorted_links_all.extend(links)
# Store result in context and kw
context['has_other_languages'] = True
context['other_languages'] = sorted_links
context['all_languages'] = sorted_links_all
kw['other_languages'] = sorted_links
kw['all_languages'] = sorted_links_all
else:
context['has_other_languages'] = False
# Allow other plugins to modify the result
blinker.signal('generate_classification_page').send({
'site': self.site,
'taxonomy': taxonomy,
'classification': classification,
'lang': lang,
'posts': filtered_posts,
'context': context,
'kw': kw,
})
# Decide what to do
if taxonomy.has_hierarchy and taxonomy.show_list_as_subcategories_list:
# Determine whether there are subcategories
node = self.site.hierarchy_lookup_per_classification[taxonomy.classification_name][lang][classification]
# Are there subclassifications?
if len(node.children) > 0:
# Yes: create list with subclassifications instead of list of posts
if generate_list:
yield self._generate_subclassification_page(taxonomy, node, context, kw, lang)
return
# Generate RSS feed
if generate_rss and kw["generate_rss"] and not taxonomy.always_disable_rss:
yield self._generate_classification_page_as_rss(taxonomy, classification, filtered_posts, context['title'], context.get("description"), kw, lang)
# Generate Atom feed
if generate_atom and kw["generate_atom"] and not taxonomy.always_disable_atom:
yield self._generate_classification_page_as_atom(taxonomy, classification, filtered_posts, context, kw, lang)
# Render HTML
if generate_list and taxonomy.show_list_as_index:
yield self._generate_classification_page_as_index(taxonomy, classification, filtered_posts, context, kw, lang)
elif generate_list:
yield self._generate_classification_page_as_list(taxonomy, classification, filtered_posts, context, kw, lang)
def gen_tasks(self):
"""Render the tag pages and feeds."""
self.site.scan_posts()
yield self.group_task()
# Cache classification sets per language for taxonomies where
# add_other_languages_variable is True.
classification_set_per_lang = {}
for taxonomy in self.site.taxonomy_plugins.values():
if taxonomy.add_other_languages_variable:
lookup = self.site.posts_per_classification[taxonomy.classification_name]
cspl = {lang: set(lookup[lang].keys()) for lang in lookup}
classification_set_per_lang[taxonomy.classification_name] = cspl
# Collect post lists for classification pages and determine whether
# they should be generated.
post_lists_per_lang = {}
for taxonomy in self.site.taxonomy_plugins.values():
plpl = {}
for lang in self.site.config["TRANSLATIONS"]:
result = {}
for classification, posts in self.site.posts_per_classification[taxonomy.classification_name][lang].items():
# Filter list
filtered_posts = self._filter_list(posts, lang)
if len(filtered_posts) == 0 and taxonomy.omit_empty_classifications:
generate_list = generate_rss = generate_atom = False
else:
# Should we create this list?
generate_list = taxonomy.should_generate_classification_page(classification, filtered_posts, lang)
generate_rss = taxonomy.should_generate_rss_for_classification_page(classification, filtered_posts, lang)
generate_atom = taxonomy.should_generate_atom_for_classification_page(classification, filtered_posts, lang)
result[classification] = (filtered_posts, generate_list, generate_rss, generate_atom)
plpl[lang] = result
post_lists_per_lang[taxonomy.classification_name] = plpl
# Now generate pages
for lang in self.site.config["TRANSLATIONS"]:
# To support that tag and category classifications share the same overview,
# we explicitly detect this case:
ignore_plugins_for_overview = set()
if 'tag' in self.site.taxonomy_plugins and 'category' in self.site.taxonomy_plugins and self.site.link("tag_index", None, lang) == self.site.link("category_index", None, lang):
# Block both plugins from creating overviews
ignore_plugins_for_overview.add(self.site.taxonomy_plugins['tag'])
ignore_plugins_for_overview.add(self.site.taxonomy_plugins['category'])
for taxonomy in self.site.taxonomy_plugins.values():
if not taxonomy.is_enabled(lang):
continue
# Generate list of classifications (i.e. classification overview)
if taxonomy not in ignore_plugins_for_overview:
if taxonomy.template_for_classification_overview is not None:
for task in self._generate_classification_overview(taxonomy, lang):
yield task
# Process classifications
for classification, (filtered_posts, generate_list, generate_rss, generate_atom) in post_lists_per_lang[taxonomy.classification_name][lang].items():
for task in self._generate_classification_page(taxonomy, classification, filtered_posts,
generate_list, generate_rss, generate_atom, lang,
post_lists_per_lang[taxonomy.classification_name],
classification_set_per_lang.get(taxonomy.classification_name)):
yield task
# In case we are ignoring plugins for overview, we must have a collision for
# tags and categories. Handle this special case with extra code.
if ignore_plugins_for_overview:
for task in self._generate_tag_and_category_overview(self.site.taxonomy_plugins['tag'], self.site.taxonomy_plugins['category'], lang):
yield task
|
__docformat__ = "restructuredtext en"
import sys
import re
import os.path as osp
from warnings import warn
from unicodedata import normalize as _uninormalize
try:
from os import linesep
except ImportError:
linesep = '\n' # gae
from logilab.common.deprecation import deprecated
MANUAL_UNICODE_MAP = {
u'\xa1': u'!', # INVERTED EXCLAMATION MARK
u'\u0142': u'l', # LATIN SMALL LETTER L WITH STROKE
u'\u2044': u'/', # FRACTION SLASH
u'\xc6': u'AE', # LATIN CAPITAL LETTER AE
u'\xa9': u'(c)', # COPYRIGHT SIGN
u'\xab': u'"', # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xe6': u'ae', # LATIN SMALL LETTER AE
u'\xae': u'(r)', # REGISTERED SIGN
u'\u0153': u'oe', # LATIN SMALL LIGATURE OE
u'\u0152': u'OE', # LATIN CAPITAL LIGATURE OE
u'\xd8': u'O', # LATIN CAPITAL LETTER O WITH STROKE
u'\xf8': u'o', # LATIN SMALL LETTER O WITH STROKE
u'\xbb': u'"', # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xdf': u'ss', # LATIN SMALL LETTER SHARP S
u'\u2013': u'-', # HYPHEN
u'\u2019': u"'", # SIMPLE QUOTE
}
def unormalize(ustring, ignorenonascii=None, substitute=None):
"""replace diacritical characters with their corresponding ascii characters
Convert the unicode string to its long normalized form (unicode character
will be transform into several characters) and keep the first one only.
The normal form KD (NFKD) will apply the compatibility decomposition, i.e.
replace all compatibility characters with their equivalents.
:type substitute: str
:param substitute: replacement character to use if decomposition fails
:see: Another project about ASCII transliterations of Unicode text
http://pypi.python.org/pypi/Unidecode
"""
# backward compatibility, ignorenonascii was a boolean
if ignorenonascii is not None:
warn("ignorenonascii is deprecated, use substitute named parameter instead",
DeprecationWarning, stacklevel=2)
if ignorenonascii:
substitute = ''
res = []
for letter in ustring[:]:
try:
replacement = MANUAL_UNICODE_MAP[letter]
except KeyError:
replacement = _uninormalize('NFKD', letter)[0]
if ord(replacement) >= 2 ** 7:
if substitute is None:
raise ValueError("can't deal with non-ascii based characters")
replacement = substitute
res.append(replacement)
return u''.join(res)
def unquote(string):
"""remove optional quotes (simple or double) from the string
:type string: str or unicode
:param string: an optionally quoted string
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
if not string:
return string
if string[0] in '"\'':
string = string[1:]
if string[-1] in '"\'':
string = string[:-1]
return string
_BLANKLINES_RGX = re.compile('\r?\n\r?\n')
_NORM_SPACES_RGX = re.compile('\s+')
def normalize_text(text, line_len=80, indent='', rest=False):
"""normalize a text to display it with a maximum line size and
optionally arbitrary indentation. Line jumps are normalized but blank
lines are kept. The indentation string may be used to insert a
comment (#) or a quoting (>) mark for instance.
:type text: str or unicode
:param text: the input text to normalize
:type line_len: int
:param line_len: expected maximum line's length, default to 80
:type indent: str or unicode
:param indent: optional string to use as indentation
:rtype: str or unicode
:return:
the input text normalized to fit on lines with a maximized size
inferior to `line_len`, and optionally prefixed by an
indentation string
"""
if rest:
normp = normalize_rest_paragraph
else:
normp = normalize_paragraph
result = []
for text in _BLANKLINES_RGX.split(text):
result.append(normp(text, line_len, indent))
return ('%s%s%s' % (linesep, indent, linesep)).join(result)
def normalize_paragraph(text, line_len=80, indent=''):
"""normalize a text to display it with a maximum line size and
optionally arbitrary indentation. Line jumps are normalized. The
indentation string may be used top insert a comment mark for
instance.
:type text: str or unicode
:param text: the input text to normalize
:type line_len: int
:param line_len: expected maximum line's length, default to 80
:type indent: str or unicode
:param indent: optional string to use as indentation
:rtype: str or unicode
:return:
the input text normalized to fit on lines with a maximized size
inferior to `line_len`, and optionally prefixed by an
indentation string
"""
text = _NORM_SPACES_RGX.sub(' ', text)
line_len = line_len - len(indent)
lines = []
while text:
aline, text = splittext(text.strip(), line_len)
lines.append(indent + aline)
return linesep.join(lines)
def normalize_rest_paragraph(text, line_len=80, indent=''):
"""normalize a ReST text to display it with a maximum line size and
optionally arbitrary indentation. Line jumps are normalized. The
indentation string may be used top insert a comment mark for
instance.
:type text: str or unicode
:param text: the input text to normalize
:type line_len: int
:param line_len: expected maximum line's length, default to 80
:type indent: str or unicode
:param indent: optional string to use as indentation
:rtype: str or unicode
:return:
the input text normalized to fit on lines with a maximized size
inferior to `line_len`, and optionally prefixed by an
indentation string
"""
toreport = ''
lines = []
line_len = line_len - len(indent)
for line in text.splitlines():
line = toreport + _NORM_SPACES_RGX.sub(' ', line.strip())
toreport = ''
while len(line) > line_len:
# too long line, need split
line, toreport = splittext(line, line_len)
lines.append(indent + line)
if toreport:
line = toreport + ' '
toreport = ''
else:
line = ''
if line:
lines.append(indent + line.strip())
return linesep.join(lines)
def splittext(text, line_len):
"""split the given text on space according to the given max line size
return a 2-uple:
* a line <= line_len if possible
* the rest of the text which has to be reported on another line
"""
if len(text) <= line_len:
return text, ''
pos = min(len(text)-1, line_len)
while pos > 0 and text[pos] != ' ':
pos -= 1
if pos == 0:
pos = min(len(text), line_len)
while len(text) > pos and text[pos] != ' ':
pos += 1
return text[:pos], text[pos+1:].strip()
def splitstrip(string, sep=','):
"""return a list of stripped string by splitting the string given as
argument on `sep` (',' by default). Empty string are discarded.
>>> splitstrip('a, b, c , 4,,')
['a', 'b', 'c', '4']
>>> splitstrip('a')
['a']
>>>
:type string: str or unicode
:param string: a csv line
:type sep: str or unicode
:param sep: field separator, default to the comma (',')
:rtype: str or unicode
:return: the unquoted string (or the input string if it wasn't quoted)
"""
return [word.strip() for word in string.split(sep) if word.strip()]
get_csv = deprecated('get_csv is deprecated, use splitstrip')(splitstrip)
def split_url_or_path(url_or_path):
"""return the latest component of a string containing either an url of the
form <scheme>://<path> or a local file system path
"""
if '://' in url_or_path:
return url_or_path.rstrip('/').rsplit('/', 1)
return osp.split(url_or_path.rstrip(osp.sep))
def text_to_dict(text):
"""parse multilines text containing simple 'key=value' lines and return a
dict of {'key': 'value'}. When the same key is encountered multiple time,
value is turned into a list containing all values.
>>> d = text_to_dict('''multiple=1
... multiple= 2
... single =3
... ''')
>>> d['single']
'3'
>>> d['multiple']
['1', '2']
"""
res = {}
if not text:
return res
for line in text.splitlines():
line = line.strip()
if line and not line.startswith('#'):
key, value = [w.strip() for w in line.split('=', 1)]
if key in res:
try:
res[key].append(value)
except AttributeError:
res[key] = [res[key], value]
else:
res[key] = value
return res
_BLANK_URE = r'(\s|,)+'
_BLANK_RE = re.compile(_BLANK_URE)
__VALUE_URE = r'-?(([0-9]+\.[0-9]*)|((0x?)?[0-9]+))'
__UNITS_URE = r'[a-zA-Z]+'
_VALUE_RE = re.compile(r'(?P<value>%s)(?P<unit>%s)?'%(__VALUE_URE, __UNITS_URE))
_VALIDATION_RE = re.compile(r'^((%s)(%s))*(%s)?$' % (__VALUE_URE, __UNITS_URE,
__VALUE_URE))
BYTE_UNITS = {
"b": 1,
"kb": 1024,
"mb": 1024 ** 2,
"gb": 1024 ** 3,
"tb": 1024 ** 4,
}
TIME_UNITS = {
"ms": 0.0001,
"s": 1,
"min": 60,
"h": 60 * 60,
"d": 60 * 60 *24,
}
def apply_units(string, units, inter=None, final=float, blank_reg=_BLANK_RE,
value_reg=_VALUE_RE):
"""Parse the string applying the units defined in units
(e.g.: "1.5m",{'m',60} -> 80).
:type string: str or unicode
:param string: the string to parse
:type units: dict (or any object with __getitem__ using basestring key)
:param units: a dict mapping a unit string repr to its value
:type inter: type
:param inter: used to parse every intermediate value (need __sum__)
:type blank_reg: regexp
:param blank_reg: should match every blank char to ignore.
:type value_reg: regexp with "value" and optional "unit" group
:param value_reg: match a value and it's unit into the
"""
if inter is None:
inter = final
fstring = _BLANK_RE.sub('', string)
if not (fstring and _VALIDATION_RE.match(fstring)):
raise ValueError("Invalid unit string: %r." % string)
values = []
for match in value_reg.finditer(fstring):
dic = match.groupdict()
lit, unit = dic["value"], dic.get("unit")
value = inter(lit)
if unit is not None:
try:
value *= units[unit.lower()]
except KeyError:
raise KeyError('invalid unit %s. valid units are %s' %
(unit, units.keys()))
values.append(value)
return final(sum(values))
_LINE_RGX = re.compile('\r\n|\r+|\n')
def pretty_match(match, string, underline_char='^'):
"""return a string with the match location underlined:
>>> import re
>>> print(pretty_match(re.search('mange', 'il mange du bacon'), 'il mange du bacon'))
il mange du bacon
^^^^^
>>>
:type match: _sre.SRE_match
:param match: object returned by re.match, re.search or re.finditer
:type string: str or unicode
:param string:
the string on which the regular expression has been applied to
obtain the `match` object
:type underline_char: str or unicode
:param underline_char:
character to use to underline the matched section, default to the
carret '^'
:rtype: str or unicode
:return:
the original string with an inserted line to underline the match
location
"""
start = match.start()
end = match.end()
string = _LINE_RGX.sub(linesep, string)
start_line_pos = string.rfind(linesep, 0, start)
if start_line_pos == -1:
start_line_pos = 0
result = []
else:
result = [string[:start_line_pos]]
start_line_pos += len(linesep)
offset = start - start_line_pos
underline = ' ' * offset + underline_char * (end - start)
end_line_pos = string.find(linesep, end)
if end_line_pos == -1:
string = string[start_line_pos:]
result.append(string)
result.append(underline)
else:
end = string[end_line_pos + len(linesep):]
string = string[start_line_pos:end_line_pos]
result.append(string)
result.append(underline)
result.append(end)
return linesep.join(result).rstrip()
# Ansi colorization ###########################################################
ANSI_PREFIX = '\033['
ANSI_END = 'm'
ANSI_RESET = '\033[0m'
ANSI_STYLES = {
'reset': "0",
'bold': "1",
'italic': "3",
'underline': "4",
'blink': "5",
'inverse': "7",
'strike': "9",
}
ANSI_COLORS = {
'reset': "0",
'black': "30",
'red': "31",
'green': "32",
'yellow': "33",
'blue': "34",
'magenta': "35",
'cyan': "36",
'white': "37",
}
def _get_ansi_code(color=None, style=None):
"""return ansi escape code corresponding to color and style
:type color: str or None
:param color:
the color name (see `ANSI_COLORS` for available values)
or the color number when 256 colors are available
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str
:return: the built escape code
"""
ansi_code = []
if style:
style_attrs = splitstrip(style)
for effect in style_attrs:
ansi_code.append(ANSI_STYLES[effect])
if color:
if color.isdigit():
ansi_code.extend(['38', '5'])
ansi_code.append(color)
else:
ansi_code.append(ANSI_COLORS[color])
if ansi_code:
return ANSI_PREFIX + ';'.join(ansi_code) + ANSI_END
return ''
def colorize_ansi(msg, color=None, style=None):
"""colorize message by wrapping it with ansi escape codes
:type msg: str or unicode
:param msg: the message string to colorize
:type color: str or None
:param color:
the color identifier (see `ANSI_COLORS` for available values)
:type style: str or None
:param style:
style string (see `ANSI_COLORS` for available values). To get
several style effects at the same time, use a coma as separator.
:raise KeyError: if an unexistent color or style identifier is given
:rtype: str or unicode
:return: the ansi escaped string
"""
# If both color and style are not defined, then leave the text as is
if color is None and style is None:
return msg
escape_code = _get_ansi_code(color, style)
# If invalid (or unknown) color, don't wrap msg with ansi codes
if escape_code:
return '%s%s%s' % (escape_code, msg, ANSI_RESET)
return msg
DIFF_STYLE = {'separator': 'cyan', 'remove': 'red', 'add': 'green'}
def diff_colorize_ansi(lines, out=sys.stdout, style=DIFF_STYLE):
for line in lines:
if line[:4] in ('--- ', '+++ '):
out.write(colorize_ansi(line, style['separator']))
elif line[0] == '-':
out.write(colorize_ansi(line, style['remove']))
elif line[0] == '+':
out.write(colorize_ansi(line, style['add']))
elif line[:4] == '--- ':
out.write(colorize_ansi(line, style['separator']))
elif line[:4] == '+++ ':
out.write(colorize_ansi(line, style['separator']))
else:
out.write(line)
|
import os.path as op
import numpy as np
from numpy.testing import assert_array_equal
from mne.io import read_info
from mne.io.constants import FIFF
base_dir = op.join(op.dirname(__file__), 'data')
raw_fname = op.join(base_dir, 'test_chpi_raw_sss.fif')
def test_maxfilter_io():
"""Test maxfilter io."""
info = read_info(raw_fname)
mf = info['proc_history'][1]['max_info']
assert mf['sss_info']['frame'] == FIFF.FIFFV_COORD_HEAD
# based on manual 2.0, rev. 5.0 page 23
assert 5 <= mf['sss_info']['in_order'] <= 11
assert mf['sss_info']['out_order'] <= 5
assert mf['sss_info']['nchan'] > len(mf['sss_info']['components'])
assert (info['ch_names'][:mf['sss_info']['nchan']] ==
mf['sss_ctc']['proj_items_chs'])
assert (mf['sss_ctc']['decoupler'].shape ==
(mf['sss_info']['nchan'], mf['sss_info']['nchan']))
assert_array_equal(
np.unique(np.diag(mf['sss_ctc']['decoupler'].toarray())),
np.array([1.], dtype=np.float32))
assert mf['sss_cal']['cal_corrs'].shape == (306, 14)
assert mf['sss_cal']['cal_chans'].shape == (306, 2)
vv_coils = [v for k, v in FIFF.items() if 'FIFFV_COIL_VV' in k]
assert all(k in vv_coils for k in set(mf['sss_cal']['cal_chans'][:, 1]))
|
from __future__ import print_function
import os
import sys
import rospkg
import rosunit
NAME = 'check_test_ran.py'
def usage():
print("""Usage:
\t%s test-file.xml
or
\t%s --rostest pkg-name test-file.xml
""" % (NAME, NAME), file=sys.stderr)
print(sys.argv)
sys.exit(getattr(os, 'EX_USAGE', 1))
def check_main():
if len(sys.argv) < 2:
usage()
if '--rostest' in sys.argv[1:]:
if len(sys.argv) != 4:
usage()
test_pkg, test_file = [a for a in sys.argv[1:] if a != '--rostest']
# this logic derives the output filename that rostest uses
r = rospkg.RosPack()
pkg_name = rospkg.get_package_name(test_file)
pkg_dir = r.get_path(pkg_name)
# compute test name for friendlier reporting
outname = rosunit.rostest_name_from_path(pkg_dir, test_file)
test_file = rosunit.xml_results_file(test_pkg, outname, is_rostest=True)
else:
if len(sys.argv) != 2:
usage()
test_file = sys.argv[1]
print('Checking for test results in %s' % test_file)
if not os.path.exists(test_file):
if not os.path.exists(os.path.dirname(test_file)):
os.makedirs(os.path.dirname(test_file))
print('Cannot find results, writing failure results to', test_file)
with open(test_file, 'w') as f:
test_name = os.path.basename(test_file)
d = {'test': test_name, 'test_file': test_file}
f.write("""<?xml version="1.0" encoding="UTF-8"?>
<testsuite tests="1" failures="1" time="1" errors="0" name="%(test)s">
<testcase name="test_ran" status="run" time="1" classname="Results">
<failure message="Unable to find test results for %(test)s, test did not run.\nExpected results in %(test_file)s" type=""/>
</testcase>
</testsuite>""" % d)
if __name__ == '__main__':
check_main()
|
import logging
import unittest
import os
import zlib
from gensim.corpora.hashdictionary import HashDictionary
from gensim.test.utils import get_tmpfile, common_texts
class TestHashDictionary(unittest.TestCase):
def setUp(self):
self.texts = common_texts
def testDocFreqOneDoc(self):
texts = [['human', 'interface', 'computer']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {10608: 1, 12466: 1, 31002: 1}
self.assertEqual(d.dfs, expected)
def testDocFreqAndToken2IdForSeveralDocsWithOneWord(self):
# two docs
texts = [['human'], ['human']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {31002: 2}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 31002}
self.assertEqual(d.token2id['human'], expected['human'])
self.assertEqual(d.token2id.keys(), expected.keys())
# three docs
texts = [['human'], ['human'], ['human']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {31002: 3}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 31002}
self.assertEqual(d.token2id['human'], expected['human'])
self.assertEqual(d.token2id.keys(), expected.keys())
# four docs
texts = [['human'], ['human'], ['human'], ['human']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {31002: 4}
self.assertEqual(d.dfs, expected)
# only one token (human) should exist
expected = {'human': 31002}
self.assertEqual(d.token2id['human'], expected['human'])
self.assertEqual(d.token2id.keys(), expected.keys())
def testDocFreqForOneDocWithSeveralWord(self):
# two words
texts = [['human', 'cat']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {9273: 1, 31002: 1}
self.assertEqual(d.dfs, expected)
# three words
texts = [['human', 'cat', 'minors']]
d = HashDictionary(texts, myhash=zlib.adler32)
expected = {9273: 1, 15001: 1, 31002: 1}
self.assertEqual(d.dfs, expected)
def testDebugMode(self):
# two words
texts = [['human', 'cat']]
d = HashDictionary(texts, debug=True, myhash=zlib.adler32)
expected = {9273: {'cat'}, 31002: {'human'}}
self.assertEqual(d.id2token, expected)
# now the same thing, with debug off
texts = [['human', 'cat']]
d = HashDictionary(texts, debug=False, myhash=zlib.adler32)
expected = {}
self.assertEqual(d.id2token, expected)
def testRange(self):
# all words map to the same id
d = HashDictionary(self.texts, id_range=1, debug=True)
dfs = {0: 9}
id2token = {
0: {
'minors', 'graph', 'system', 'trees', 'eps', 'computer',
'survey', 'user', 'human', 'time', 'interface', 'response'
}
}
token2id = {
'minors': 0, 'graph': 0, 'system': 0, 'trees': 0,
'eps': 0, 'computer': 0, 'survey': 0, 'user': 0,
'human': 0, 'time': 0, 'interface': 0, 'response': 0
}
self.assertEqual(d.dfs, dfs)
self.assertEqual(d.id2token, id2token)
self.assertEqual(d.token2id, token2id)
# 2 ids: 0/1 for even/odd number of bytes in the word
d = HashDictionary(self.texts, id_range=2, myhash=lambda key: len(key))
dfs = {0: 7, 1: 7}
id2token = {
0: {'minors', 'system', 'computer', 'survey', 'user', 'time', 'response'},
1: {'interface', 'graph', 'trees', 'eps', 'human'}
}
token2id = {
'minors': 0, 'graph': 1, 'system': 0, 'trees': 1, 'eps': 1, 'computer': 0,
'survey': 0, 'user': 0, 'human': 1, 'time': 0, 'interface': 1, 'response': 0
}
self.assertEqual(d.dfs, dfs)
self.assertEqual(d.id2token, id2token)
self.assertEqual(d.token2id, token2id)
def testBuild(self):
d = HashDictionary(self.texts, myhash=zlib.adler32)
expected = {
5232: 2, 5798: 3, 10608: 2, 12466: 2, 12736: 3, 15001: 2,
18451: 3, 23844: 3, 28591: 2, 29104: 2, 31002: 2, 31049: 2
}
self.assertEqual(d.dfs, expected)
expected = {
'minors': 15001, 'graph': 18451, 'system': 5798, 'trees': 23844,
'eps': 31049, 'computer': 10608, 'survey': 28591, 'user': 12736,
'human': 31002, 'time': 29104, 'interface': 12466, 'response': 5232
}
for ex in expected:
self.assertEqual(d.token2id[ex], expected[ex])
def testFilter(self):
d = HashDictionary(self.texts, myhash=zlib.adler32)
d.filter_extremes()
expected = {}
self.assertEqual(d.dfs, expected)
d = HashDictionary(self.texts, myhash=zlib.adler32)
d.filter_extremes(no_below=0, no_above=0.3)
expected = {
29104: 2, 31049: 2, 28591: 2, 5232: 2,
10608: 2, 12466: 2, 15001: 2, 31002: 2
}
self.assertEqual(d.dfs, expected)
d = HashDictionary(self.texts, myhash=zlib.adler32)
d.filter_extremes(no_below=3, no_above=1.0, keep_n=4)
expected = {5798: 3, 12736: 3, 18451: 3, 23844: 3}
self.assertEqual(d.dfs, expected)
def test_saveAsText(self):
""" `HashDictionary` can be saved as textfile. """
tmpf = get_tmpfile('dict_test.txt')
# use some utf8 strings, to test encoding serialization
d = HashDictionary(['žloťoučký koníček'.split(), 'Малйж обльйквюэ ат эжт'.split()])
d.save_as_text(tmpf)
self.assertTrue(os.path.exists(tmpf))
def test_saveAsTextBz2(self):
""" `HashDictionary` can be saved & loaded as compressed pickle. """
tmpf = get_tmpfile('dict_test.txt.bz2')
# use some utf8 strings, to test encoding serialization
d = HashDictionary(['žloťoučký koníček'.split(), 'Малйж обльйквюэ ат эжт'.split()])
d.save(tmpf)
self.assertTrue(os.path.exists(tmpf))
d2 = d.load(tmpf)
self.assertEqual(len(d), len(d2))
if __name__ == '__main__':
logging.basicConfig(level=logging.WARNING)
unittest.main()
|
from homeassistant.helpers import intent
import homeassistant.helpers.config_validation as cv
from .const import CONF_BOT, DOMAIN, INTENT_HELP
class HelpIntent(intent.IntentHandler):
"""Handle Help intents."""
intent_type = INTENT_HELP
slot_schema = {"conv_id": cv.string}
def __init__(self, hass):
"""Set up the intent."""
self.hass = hass
async def async_handle(self, intent_obj):
"""Handle the intent."""
slots = self.async_validate_slots(intent_obj.slots)
conv_id = slots["conv_id"]["value"]
intents = self.hass.data[DOMAIN][CONF_BOT].get_intents(conv_id)
response = intent_obj.create_response()
help_text = "I understand the following sentences:"
for intent_data in intents.values():
for sentence in intent_data["sentences"]:
help_text += f"\n'{sentence}'"
response.async_set_speech(help_text)
return response
|
try:
import json
except ImportError:
import simplejson as json
import glob
import os
import subprocess
import diamond.collector
def flatten_dictionary(input, sep='.', prefix=None):
"""Produces iterator of pairs where the first value is
the joined key names and the second value is the value
associated with the lowest level key. For example::
{'a': {'b': 10},
'c': 20,
}
produces::
[('a.b', 10), ('c', 20)]
"""
for name, value in sorted(input.items()):
fullname = sep.join(filter(None, [prefix, name]))
if isinstance(value, dict):
for result in flatten_dictionary(value, sep, fullname):
yield result
else:
yield (fullname, value)
class CephCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(CephCollector, self).get_default_config_help()
config_help.update({
'socket_path': 'The location of the ceph monitoring sockets.'
' Defaults to "/var/run/ceph"',
'socket_prefix': 'The first part of all socket names.'
' Defaults to "ceph-"',
'socket_ext': 'Extension for socket filenames.'
' Defaults to "asok"',
'ceph_binary': 'Path to "ceph" executable. '
'Defaults to /usr/bin/ceph.',
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(CephCollector, self).get_default_config()
config.update({
'socket_path': '/var/run/ceph',
'socket_prefix': 'ceph-',
'socket_ext': 'asok',
'ceph_binary': '/usr/bin/ceph',
})
return config
def _get_socket_paths(self):
"""Return a sequence of paths to sockets for communicating
with ceph daemons.
"""
socket_pattern = os.path.join(self.config['socket_path'],
(self.config['socket_prefix'] +
'*.' + self.config['socket_ext']))
return glob.glob(socket_pattern)
def _get_counter_prefix_from_socket_name(self, name):
"""Given the name of a UDS socket, return the prefix
for counters coming from that source.
"""
base = os.path.splitext(os.path.basename(name))[0]
if base.startswith(self.config['socket_prefix']):
base = base[len(self.config['socket_prefix']):]
return 'ceph.' + base
def _get_stats_from_socket(self, name):
"""Return the parsed JSON data returned when ceph is told to
dump the stats from the named socket.
In the event of an error error, the exception is logged, and
an empty result set is returned.
"""
try:
json_blob = subprocess.check_output(
[self.config['ceph_binary'],
'--admin-daemon',
name,
'perf',
'dump',
])
except subprocess.CalledProcessError as err:
self.log.info('Could not get stats from %s: %s',
name, err)
self.log.exception('Could not get stats from %s' % name)
return {}
try:
json_data = json.loads(json_blob)
except Exception as err:
self.log.info('Could not parse stats from %s: %s',
name, err)
self.log.exception('Could not parse stats from %s' % name)
return {}
return json_data
def _publish_stats(self, counter_prefix, stats):
"""Given a stats dictionary from _get_stats_from_socket,
publish the individual values.
"""
for stat_name, stat_value in flatten_dictionary(
stats,
prefix=counter_prefix,
):
self.publish_gauge(stat_name, stat_value)
def collect(self):
"""
Collect stats
"""
for path in self._get_socket_paths():
self.log.debug('checking %s', path)
counter_prefix = self._get_counter_prefix_from_socket_name(path)
stats = self._get_stats_from_socket(path)
self._publish_stats(counter_prefix, stats)
return
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import logging
from absl import flags
from perfkitbenchmarker import linux_virtual_machine as linux_vm
from perfkitbenchmarker import providers
from perfkitbenchmarker import virtual_machine
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.cloudstack import cloudstack_disk
from perfkitbenchmarker.providers.cloudstack import cloudstack_network
from perfkitbenchmarker.providers.cloudstack import util
from six.moves import range
FLAGS = flags.FLAGS
class CloudStackVirtualMachine(virtual_machine.BaseVirtualMachine):
"""Object representing a CloudStack Virtual Machine."""
CLOUD = providers.CLOUDSTACK
DEFAULT_ZONE = 'QC-1'
DEFAULT_MACHINE_TYPE = '1vCPU.1GB'
DEFAULT_IMAGE = None
DEFAULT_USER_NAME = 'cca-user'
DEFAULT_PROJECT = 'cloudops-Engineering'
def __init__(self, vm_spec):
"""Initialize a CloudStack virtual machine.
Args:
vm_spec: virtual_machine.BaseVirtualMachineSpec object of the vm.
"""
super(CloudStackVirtualMachine, self).__init__(vm_spec)
self.network = cloudstack_network.CloudStackNetwork.GetNetwork(self)
self.cs = util.CsClient(FLAGS.CS_API_URL,
FLAGS.CS_API_KEY,
FLAGS.CS_API_SECRET)
self.project_id = None
if FLAGS.project:
project = self.cs.get_project(FLAGS.project)
assert project, "Project not found"
self.project_id = project['id']
zone = self.cs.get_zone(self.zone)
assert zone, "Zone not found"
self.zone_id = zone['id']
self.user_name = self.DEFAULT_USER_NAME
self.image = self.image or self.DEFAULT_IMAGE
self.disk_counter = 0
@vm_util.Retry(max_retries=3)
def _CreateDependencies(self):
"""Create VM dependencies."""
# Create an ssh keypair
with open(self.ssh_public_key) as keyfd:
self.ssh_keypair_name = 'perfkit-sshkey-%s' % FLAGS.run_uri
pub_key = keyfd.read()
if not self.cs.get_ssh_keypair(self.ssh_keypair_name, self.project_id):
res = self.cs.register_ssh_keypair(self.ssh_keypair_name,
pub_key,
self.project_id)
assert res, "Unable to create ssh keypair"
# Allocate a public ip
network_id = self.network.id
if self.network.is_vpc:
network_id = self.network.vpc_id
public_ip = self.cs.alloc_public_ip(network_id, self.network.is_vpc)
if public_ip:
self.ip_address = public_ip['ipaddress']
self.ip_address_id = public_ip['id']
else:
logging.warn("Unable to allocate public IP")
def _DeleteDependencies(self):
"""Delete VM dependencies."""
# Remove the keypair
if self.cs.get_ssh_keypair(self.ssh_keypair_name, self.project_id):
self.cs.unregister_ssh_keypair(self.ssh_keypair_name, self.project_id)
# Remove the IP
if self.ip_address_id:
self.cs.release_public_ip(self.ip_address_id)
@vm_util.Retry(max_retries=3)
def _Create(self):
"""Create a Cloudstack VM instance."""
service_offering = self.cs.get_serviceoffering(self.machine_type)
assert service_offering, "No service offering found"
template = self.cs.get_template(self.image, self.project_id)
assert template, "No template found"
network_id = self.network.id
vm = None
vm = self.cs.create_vm(self.name,
self.zone_id,
service_offering['id'],
template['id'],
[network_id],
self.ssh_keypair_name,
self.project_id)
assert vm, "Unable to create VM"
self._vm = vm
self.id = vm['virtualmachine']['id']
@vm_util.Retry(max_retries=3)
def _PostCreate(self):
"""Get the instance's data."""
# assosiate the public ip created with the VMid
network_interface = self._vm['virtualmachine']['nic'][0]
self.internal_ip = network_interface['ipaddress']
# Create a Static NAT rule
if not self.cs.snat_rule_exists(self.ip_address_id, self.id):
snat_rule = self.cs.enable_static_nat(self.ip_address_id,
self.id,
self.network.id)
assert snat_rule, "Unable to create static NAT"
def _Delete(self):
"""Delete the VM instance."""
# Delete the VM
self.cs.delete_vm(self.id)
def _Exists(self):
"""Returns true if the VM exists."""
# Check if VM exisits
vm = self.cs.get_virtual_machine(self.name, self.project_id)
if vm and 'id' in vm:
return True
return False
def CreateScratchDisk(self, disk_spec):
"""Create a VM's scratch disk.
Args:
disk_spec: virtual_machine.BaseDiskSpec object of the disk.
"""
# Cloudstack doesn't really have a concept of local or remote disks A VM
# starts with one disk and all other volumes have to be attached via the
# API
self.disks = []
for i in range(disk_spec.num_striped_disks):
name = 'disk-%s-%d-%d' % (self.name, i + 1, self.disk_counter)
scratch_disk = cloudstack_disk.CloudStackDisk(disk_spec,
name,
self.zone_id,
self.project_id)
self.disks.append(scratch_disk)
self.disk_counter += 1
self._CreateScratchDiskFromDisks(disk_spec, self.disks)
class CentOs7BasedCloudStackVirtualMachine(CloudStackVirtualMachine,
linux_vm.CentOs7Mixin):
DEFAULT_IMAGE = 'CentOS 7 HVM base (64bit)'
|
import pyzerproc
from homeassistant import config_entries, setup
from homeassistant.components.zerproc.config_flow import DOMAIN
from tests.async_mock import patch
async def test_flow_success(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.zerproc.config_flow.pyzerproc.discover",
return_value=["Light1", "Light2"],
), patch(
"homeassistant.components.zerproc.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zerproc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "Zerproc"
assert result2["data"] == {}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_flow_no_devices_found(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.zerproc.config_flow.pyzerproc.discover",
return_value=[],
), patch(
"homeassistant.components.zerproc.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zerproc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
async def test_flow_exceptions_caught(hass):
"""Test we get the form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] is None
with patch(
"homeassistant.components.zerproc.config_flow.pyzerproc.discover",
side_effect=pyzerproc.ZerprocException("TEST"),
), patch(
"homeassistant.components.zerproc.async_setup", return_value=True
) as mock_setup, patch(
"homeassistant.components.zerproc.async_setup_entry",
return_value=True,
) as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{},
)
assert result2["type"] == "abort"
assert result2["reason"] == "no_devices_found"
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
|
import uuid
from lxml.html import builder, tostring
from pygal.util import template
class HTML(object):
"""Lower case adapter of lxml builder"""
def __getattribute__(self, attr):
"""Get the uppercase builder attribute"""
return getattr(builder, attr.upper())
class Table(object):
"""Table generator class"""
_dual = None
def __init__(self, chart):
"""Init the table"""
self.chart = chart
def render(self, total=False, transpose=False, style=False):
"""Render the HTMTL table of the chart.
`total` can be specified to include data sums
`transpose` make labels becomes columns
`style` include scoped style for the table
"""
self.chart.setup()
ln = self.chart._len
html = HTML()
attrs = {}
if style:
attrs['id'] = 'table-%s' % uuid.uuid4()
table = []
_ = lambda x: x if x is not None else ''
if self.chart.x_labels:
labels = [None] + list(self.chart.x_labels)
if len(labels) < ln:
labels += [None] * (ln + 1 - len(labels))
if len(labels) > ln + 1:
labels = labels[:ln + 1]
table.append(labels)
if total:
if len(table):
table[0].append('Total')
else:
table.append([None] * (ln + 1) + ['Total'])
acc = [0] * (ln + 1)
for i, serie in enumerate(self.chart.all_series):
row = [serie.title]
if total:
sum_ = 0
for j, value in enumerate(serie.values):
if total:
v = value or 0
acc[j] += v
sum_ += v
row.append(self.chart._format(serie, j))
if total:
acc[-1] += sum_
row.append(self.chart._serie_format(serie, sum_))
table.append(row)
width = ln + 1
if total:
width += 1
table.append(['Total'])
for val in acc:
table[-1].append(self.chart._serie_format(serie, val))
# Align values
len_ = max([len(r) for r in table] or [0])
for i, row in enumerate(table[:]):
len_ = len(row)
if len_ < width:
table[i] = row + [None] * (width - len_)
if not transpose:
table = list(zip(*table))
thead = []
tbody = []
tfoot = []
if not transpose or self.chart.x_labels:
# There's always series title but not always x_labels
thead = [table[0]]
tbody = table[1:]
else:
tbody = table
if total:
tfoot = [tbody[-1]]
tbody = tbody[:-1]
parts = []
if thead:
parts.append(
html.thead(
*[html.tr(*[html.th(_(col)) for col in r]) for r in thead]
)
)
if tbody:
parts.append(
html.tbody(
*[html.tr(*[html.td(_(col)) for col in r]) for r in tbody]
)
)
if tfoot:
parts.append(
html.tfoot(
*[html.tr(*[html.th(_(col)) for col in r]) for r in tfoot]
)
)
table = tostring(html.table(*parts, **attrs))
if style:
if style is True:
css = '''
#{{ id }} {
border-collapse: collapse;
border-spacing: 0;
empty-cells: show;
border: 1px solid #cbcbcb;
}
#{{ id }} td, #{{ id }} th {
border-left: 1px solid #cbcbcb;
border-width: 0 0 0 1px;
margin: 0;
padding: 0.5em 1em;
}
#{{ id }} td:first-child, #{{ id }} th:first-child {
border-left-width: 0;
}
#{{ id }} thead, #{{ id }} tfoot {
color: #000;
text-align: left;
vertical-align: bottom;
}
#{{ id }} thead {
background: #e0e0e0;
}
#{{ id }} tfoot {
background: #ededed;
}
#{{ id }} tr:nth-child(2n-1) td {
background-color: #f2f2f2;
}
'''
else:
css = style
table = tostring(
html.style(template(css, **attrs), scoped='scoped')
) + table
table = table.decode('utf-8')
self.chart.teardown()
return table
|
from __future__ import print_function
import tests
from pyVim import connect
class ManagedObjectTests(tests.VCRTestBase):
@tests.VCRTestBase.my_vcr.use_cassette('root_folder_parent.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='once')
def test_root_folder_parent(self):
# see: http://python3porting.com/noconv.html
si = connect.SmartConnect(host='vcsa',
user='my_user',
pwd='my_password')
root_folder = si.content.rootFolder
self.assertTrue(hasattr(root_folder, 'parent'))
# NOTE (hartsock): assertIsNone does not work in Python 2.6
self.assertTrue(root_folder.parent is None)
|
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from .const import DATA_SDM
from .sensor_legacy import async_setup_legacy_entry
from .sensor_sdm import async_setup_sdm_entry
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the sensors."""
if DATA_SDM not in entry.data:
await async_setup_legacy_entry(hass, entry, async_add_entities)
return
await async_setup_sdm_entry(hass, entry, async_add_entities)
|
from datetime import timedelta
from logging import getLogger
from os import remove
from os.path import exists
import time
import unittest
from unittest import mock
from homeassistant.components import feedreader
from homeassistant.components.feedreader import (
CONF_MAX_ENTRIES,
CONF_URLS,
DEFAULT_MAX_ENTRIES,
DEFAULT_SCAN_INTERVAL,
EVENT_FEEDREADER,
FeedManager,
StoredData,
)
from homeassistant.const import CONF_SCAN_INTERVAL, EVENT_HOMEASSISTANT_START
from homeassistant.core import callback
from homeassistant.setup import setup_component
from tests.async_mock import patch
from tests.common import get_test_home_assistant, load_fixture
_LOGGER = getLogger(__name__)
URL = "http://some.rss.local/rss_feed.xml"
VALID_CONFIG_1 = {feedreader.DOMAIN: {CONF_URLS: [URL]}}
VALID_CONFIG_2 = {feedreader.DOMAIN: {CONF_URLS: [URL], CONF_SCAN_INTERVAL: 60}}
VALID_CONFIG_3 = {feedreader.DOMAIN: {CONF_URLS: [URL], CONF_MAX_ENTRIES: 100}}
class TestFeedreaderComponent(unittest.TestCase):
"""Test the feedreader component."""
def setUp(self):
"""Initialize values for this testcase class."""
self.hass = get_test_home_assistant()
self.addCleanup(self.tear_down_cleanup)
def tear_down_cleanup(self):
"""Clean up files and stop Home Assistant."""
data_file = self.hass.config.path(f"{feedreader.DOMAIN}.pickle")
if exists(data_file):
remove(data_file)
self.hass.stop()
def test_setup_one_feed(self):
"""Test the general setup of this component."""
with patch(
"homeassistant.components.feedreader.track_time_interval"
) as track_method:
assert setup_component(self.hass, feedreader.DOMAIN, VALID_CONFIG_1)
track_method.assert_called_once_with(
self.hass, mock.ANY, DEFAULT_SCAN_INTERVAL
)
def test_setup_scan_interval(self):
"""Test the setup of this component with scan interval."""
with patch(
"homeassistant.components.feedreader.track_time_interval"
) as track_method:
assert setup_component(self.hass, feedreader.DOMAIN, VALID_CONFIG_2)
track_method.assert_called_once_with(
self.hass, mock.ANY, timedelta(seconds=60)
)
def test_setup_max_entries(self):
"""Test the setup of this component with max entries."""
assert setup_component(self.hass, feedreader.DOMAIN, VALID_CONFIG_3)
def setup_manager(self, feed_data, max_entries=DEFAULT_MAX_ENTRIES):
"""Set up feed manager."""
events = []
@callback
def record_event(event):
"""Add recorded event to set."""
events.append(event)
self.hass.bus.listen(EVENT_FEEDREADER, record_event)
# Loading raw data from fixture and plug in to data object as URL
# works since the third-party feedparser library accepts a URL
# as well as the actual data.
data_file = self.hass.config.path(f"{feedreader.DOMAIN}.pickle")
storage = StoredData(data_file)
with patch(
"homeassistant.components.feedreader.track_time_interval"
) as track_method:
manager = FeedManager(
feed_data, DEFAULT_SCAN_INTERVAL, max_entries, self.hass, storage
)
# Can't use 'assert_called_once' here because it's not available
# in Python 3.5 yet.
track_method.assert_called_once_with(
self.hass, mock.ANY, DEFAULT_SCAN_INTERVAL
)
# Artificially trigger update.
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
# Collect events.
self.hass.block_till_done()
return manager, events
def test_feed(self):
"""Test simple feed with valid data."""
feed_data = load_fixture("feedreader.xml")
manager, events = self.setup_manager(feed_data)
assert len(events) == 1
assert events[0].data.title == "Title 1"
assert events[0].data.description == "Description 1"
assert events[0].data.link == "http://www.example.com/link/1"
assert events[0].data.id == "GUID 1"
assert events[0].data.published_parsed.tm_year == 2018
assert events[0].data.published_parsed.tm_mon == 4
assert events[0].data.published_parsed.tm_mday == 30
assert events[0].data.published_parsed.tm_hour == 5
assert events[0].data.published_parsed.tm_min == 10
assert manager.last_update_successful is True
def test_feed_updates(self):
"""Test feed updates."""
# 1. Run
feed_data = load_fixture("feedreader.xml")
manager, events = self.setup_manager(feed_data)
assert len(events) == 1
# 2. Run
feed_data2 = load_fixture("feedreader1.xml")
# Must patch 'get_timestamp' method because the timestamp is stored
# with the URL which in these tests is the raw XML data.
with patch(
"homeassistant.components.feedreader.StoredData.get_timestamp",
return_value=time.struct_time((2018, 4, 30, 5, 10, 0, 0, 120, 0)),
):
manager2, events2 = self.setup_manager(feed_data2)
assert len(events2) == 1
# 3. Run
feed_data3 = load_fixture("feedreader1.xml")
with patch(
"homeassistant.components.feedreader.StoredData.get_timestamp",
return_value=time.struct_time((2018, 4, 30, 5, 11, 0, 0, 120, 0)),
):
manager3, events3 = self.setup_manager(feed_data3)
assert len(events3) == 0
def test_feed_default_max_length(self):
"""Test long feed beyond the default 20 entry limit."""
feed_data = load_fixture("feedreader2.xml")
manager, events = self.setup_manager(feed_data)
assert len(events) == 20
def test_feed_max_length(self):
"""Test long feed beyond a configured 5 entry limit."""
feed_data = load_fixture("feedreader2.xml")
manager, events = self.setup_manager(feed_data, max_entries=5)
assert len(events) == 5
def test_feed_without_publication_date_and_title(self):
"""Test simple feed with entry without publication date and title."""
feed_data = load_fixture("feedreader3.xml")
manager, events = self.setup_manager(feed_data)
assert len(events) == 3
def test_feed_with_unrecognized_publication_date(self):
"""Test simple feed with entry with unrecognized publication date."""
feed_data = load_fixture("feedreader4.xml")
manager, events = self.setup_manager(feed_data)
assert len(events) == 1
def test_feed_invalid_data(self):
"""Test feed with invalid data."""
feed_data = "INVALID DATA"
manager, events = self.setup_manager(feed_data)
assert len(events) == 0
assert manager.last_update_successful is True
@mock.patch("feedparser.parse", return_value=None)
def test_feed_parsing_failed(self, mock_parse):
"""Test feed where parsing fails."""
data_file = self.hass.config.path(f"{feedreader.DOMAIN}.pickle")
storage = StoredData(data_file)
manager = FeedManager(
"FEED DATA", DEFAULT_SCAN_INTERVAL, DEFAULT_MAX_ENTRIES, self.hass, storage
)
# Artificially trigger update.
self.hass.bus.fire(EVENT_HOMEASSISTANT_START)
# Collect events.
self.hass.block_till_done()
assert manager.last_update_successful is False
|
from asyncio import run_coroutine_threadsafe
import pyatmo
from homeassistant import config_entries, core
from homeassistant.helpers import config_entry_oauth2_flow
class ConfigEntryNetatmoAuth(pyatmo.auth.NetatmoOAuth2):
"""Provide Netatmo authentication tied to an OAuth2 based config entry."""
def __init__(
self,
hass: core.HomeAssistant,
config_entry: config_entries.ConfigEntry,
implementation: config_entry_oauth2_flow.AbstractOAuth2Implementation,
):
"""Initialize Netatmo Auth."""
self.hass = hass
self.session = config_entry_oauth2_flow.OAuth2Session(
hass, config_entry, implementation
)
super().__init__(token=self.session.token)
def refresh_tokens(
self,
) -> dict:
"""Refresh and return new Netatmo tokens using Home Assistant OAuth2 session."""
run_coroutine_threadsafe(
self.session.async_ensure_token_valid(), self.hass.loop
).result()
return self.session.token
|
from __future__ import print_function
from __future__ import unicode_literals
import gzip
import os.path as P
import subprocess
import unittest
import mock
import smart_open.hdfs
CURR_DIR = P.dirname(P.abspath(__file__))
#
# We want our mocks to emulate the real implementation as close as possible,
# so we use a Popen call during each test. If we mocked using io.BytesIO, then
# it is possible the mocks would behave differently to what we expect in real
# use.
#
# Since these tests use cat, they will not work in an environment without cat,
# such as Windows.
#
class CliRawInputBaseTest(unittest.TestCase):
def test_read(self):
path = P.join(CURR_DIR, 'test_data/crime-and-punishment.txt')
cat = subprocess.Popen(['cat', path], stdout=subprocess.PIPE)
with mock.patch('subprocess.Popen', return_value=cat):
reader = smart_open.hdfs.CliRawInputBase('hdfs://dummy/url')
as_bytes = reader.read()
as_text = as_bytes.decode('utf-8')
self.assertTrue(as_text.startswith('В начале июля, в чрезвычайно жаркое время'))
self.assertTrue(as_text.endswith('улизнуть, чтобы никто не видал.\n'))
def test_read_100(self):
path = P.join(CURR_DIR, 'test_data/crime-and-punishment.txt')
cat = subprocess.Popen(['cat', path], stdout=subprocess.PIPE)
with mock.patch('subprocess.Popen', return_value=cat):
reader = smart_open.hdfs.CliRawInputBase('hdfs://dummy/url')
as_bytes = reader.read(75)
as_text = as_bytes.decode('utf-8')
expected = 'В начале июля, в чрезвычайно жаркое время'
self.assertEqual(expected, as_text)
def test_unzip(self):
path = P.join(CURR_DIR, 'test_data/crime-and-punishment.txt.gz')
cat = subprocess.Popen(['cat', path], stdout=subprocess.PIPE)
with mock.patch('subprocess.Popen', return_value=cat):
with gzip.GzipFile(fileobj=smart_open.hdfs.CliRawInputBase('hdfs://dummy/url')) as fin:
as_bytes = fin.read()
as_text = as_bytes.decode('utf-8')
self.assertTrue(as_text.startswith('В начале июля, в чрезвычайно жаркое время'))
self.assertTrue(as_text.endswith('улизнуть, чтобы никто не видал.\n'))
def test_context_manager(self):
path = P.join(CURR_DIR, 'test_data/crime-and-punishment.txt')
cat = subprocess.Popen(['cat', path], stdout=subprocess.PIPE)
with mock.patch('subprocess.Popen', return_value=cat):
with smart_open.hdfs.CliRawInputBase('hdfs://dummy/url') as fin:
as_bytes = fin.read()
as_text = as_bytes.decode('utf-8')
self.assertTrue(as_text.startswith('В начале июля, в чрезвычайно жаркое время'))
self.assertTrue(as_text.endswith('улизнуть, чтобы никто не видал.\n'))
class CliRawOutputBaseTest(unittest.TestCase):
def test_write(self):
cat = subprocess.Popen(['cat'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
as_text = 'мы в ответе за тех, кого приручили'
with mock.patch('subprocess.Popen', return_value=cat):
with smart_open.hdfs.CliRawOutputBase('hdfs://dummy/url') as fout:
fout.write(as_text.encode('utf-8'))
actual = cat.stdout.read().decode('utf-8')
self.assertEqual(as_text, actual)
def test_zip(self):
cat = subprocess.Popen(['cat'], stdin=subprocess.PIPE, stdout=subprocess.PIPE)
as_text = 'мы в ответе за тех, кого приручили'
with mock.patch('subprocess.Popen', return_value=cat):
with smart_open.hdfs.CliRawOutputBase('hdfs://dummy/url') as fout:
with gzip.GzipFile(fileobj=fout, mode='wb') as gz_fout:
gz_fout.write(as_text.encode('utf-8'))
with gzip.GzipFile(fileobj=cat.stdout) as fin:
actual = fin.read().decode('utf-8')
self.assertEqual(as_text, actual)
|
import logging
import unittest
import numpy as np
from gensim.corpora.mmcorpus import MmCorpus
from gensim.models import tfidfmodel
from gensim.test.utils import datapath, get_tmpfile, common_dictionary, common_corpus
from gensim.corpora import Dictionary
texts = [
['complier', 'system', 'computer'],
['eulerian', 'node', 'cycle', 'graph', 'tree', 'path'],
['graph', 'flow', 'network', 'graph'],
['loading', 'computer', 'system'],
['user', 'server', 'system'],
['tree', 'hamiltonian'],
['graph', 'trees'],
['computer', 'kernel', 'malfunction', 'computer'],
['server', 'system', 'computer'],
]
dictionary = Dictionary(texts)
corpus = [dictionary.doc2bow(text) for text in texts]
class TestTfidfModel(unittest.TestCase):
def setUp(self):
self.corpus = MmCorpus(datapath('testcorpus.mm'))
def test_transform(self):
# create the transformation model
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
# transform one document
doc = list(self.corpus)[0]
transformed = model[doc]
expected = [(0, 0.57735026918962573), (1, 0.57735026918962573), (2, 0.57735026918962573)]
self.assertTrue(np.allclose(transformed, expected))
def test_init(self):
# create the transformation model by analyzing a corpus
# uses the global `corpus`!
model1 = tfidfmodel.TfidfModel(common_corpus)
dfs = common_dictionary.dfs
# make sure the dfs<->idfs transformation works
self.assertEqual(model1.dfs, dfs)
self.assertEqual(model1.idfs, tfidfmodel.precompute_idfs(model1.wglobal, dfs, len(common_corpus)))
# create the transformation model by directly supplying a term->docfreq
# mapping from the global var `dictionary`.
model2 = tfidfmodel.TfidfModel(dictionary=common_dictionary)
self.assertEqual(model1.idfs, model2.idfs)
def test_persistence(self):
# Test persistence without using `smartirs`
fname = get_tmpfile('gensim_models.tst')
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence with using `smartirs`
fname = get_tmpfile('gensim_models_smartirs.tst')
model = tfidfmodel.TfidfModel(self.corpus, smartirs="nfc")
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence between Gensim v3.2.0 and current model.
model3 = tfidfmodel.TfidfModel(self.corpus, smartirs="nfc")
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst'))
idfs3 = [model3.idfs[key] for key in sorted(model3.idfs.keys())]
idfs4 = [model4.idfs[key] for key in sorted(model4.idfs.keys())]
self.assertTrue(np.allclose(idfs3, idfs4))
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
self.assertTrue(np.allclose(model3[[]], model4[[]])) # try projecting an empty vector
# Test persistence with using pivoted normalization
fname = get_tmpfile('gensim_models_smartirs.tst')
model = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
# Test persistence between Gensim v3.2.0 and pivoted normalization compressed model.
model3 = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst'))
idfs3 = [model3.idfs[key] for key in sorted(model3.idfs.keys())]
idfs4 = [model4.idfs[key] for key in sorted(model4.idfs.keys())]
self.assertTrue(np.allclose(idfs3, idfs4))
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
def test_persistence_compressed(self):
# Test persistence without using `smartirs`
fname = get_tmpfile('gensim_models.tst.gz')
model = tfidfmodel.TfidfModel(self.corpus, normalize=True)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence with using `smartirs`
fname = get_tmpfile('gensim_models_smartirs.tst.gz')
model = tfidfmodel.TfidfModel(self.corpus, smartirs="nfc")
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
self.assertTrue(np.allclose(model[[]], model2[[]])) # try projecting an empty vector
# Test persistence between Gensim v3.2.0 and current compressed model.
model3 = tfidfmodel.TfidfModel(self.corpus, smartirs="nfc")
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst.bz2'))
idfs3 = [model3.idfs[key] for key in sorted(model3.idfs.keys())]
idfs4 = [model4.idfs[key] for key in sorted(model4.idfs.keys())]
self.assertTrue(np.allclose(idfs3, idfs4))
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
self.assertTrue(np.allclose(model3[[]], model4[[]])) # try projecting an empty vector
# Test persistence with using pivoted normalization
fname = get_tmpfile('gensim_models_smartirs.tst.gz')
model = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
model.save(fname)
model2 = tfidfmodel.TfidfModel.load(fname, mmap=None)
self.assertTrue(model.idfs == model2.idfs)
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model[tstvec[0]], model2[tstvec[0]]))
self.assertTrue(np.allclose(model[tstvec[1]], model2[tstvec[1]]))
# Test persistence between Gensim v3.2.0 and pivoted normalization compressed model.
model3 = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
model4 = tfidfmodel.TfidfModel.load(datapath('tfidf_model.tst.bz2'))
idfs3 = [model3.idfs[key] for key in sorted(model3.idfs.keys())]
idfs4 = [model4.idfs[key] for key in sorted(model4.idfs.keys())]
self.assertTrue(np.allclose(idfs3, idfs4))
tstvec = [corpus[1], corpus[2]]
self.assertTrue(np.allclose(model3[tstvec[0]], model4[tstvec[0]]))
self.assertTrue(np.allclose(model3[tstvec[1]], model4[tstvec[1]]))
def test_consistency(self):
docs = [corpus[1], corpus[2]]
# Test if `ntc` yields the default docs.
model = tfidfmodel.TfidfModel(corpus, smartirs='nfc')
transformed_docs = [model[docs[0]], model[docs[1]]]
model = tfidfmodel.TfidfModel(corpus)
expected_docs = [model[docs[0]], model[docs[1]]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Testing all the variations of `wlocal`
# tnn
model = tfidfmodel.TfidfModel(corpus, smartirs='tnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = docs[:]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nnn
model = tfidfmodel.TfidfModel(corpus, smartirs='nnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = docs[:]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# lnn
model = tfidfmodel.TfidfModel(corpus, smartirs='lnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0), (8, 1.0)],
[(5, 2.0), (9, 1.0), (10, 1.0)]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# dnn
model = tfidfmodel.TfidfModel(corpus, smartirs='dnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0), (8, 1.0)],
[(5, 2.0), (9, 1.0), (10, 1.0)]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# ann
model = tfidfmodel.TfidfModel(corpus, smartirs='ann')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0), (7, 1.0), (8, 1.0)],
[(5, 1.0), (9, 0.75), (10, 0.75)]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# bnn
model = tfidfmodel.TfidfModel(corpus, smartirs='bnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(3, 1), (4, 1), (5, 1), (6, 1), (7, 1), (8, 1)],
[(5, 1), (9, 1), (10, 1)]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Lnn
model = tfidfmodel.TfidfModel(corpus, smartirs='Lnn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 1.0), (4, 1.0), (5, 1.0), (6, 1.0),
(7, 1.0), (8, 1.0)
],
[
(5, 1.4133901052), (9, 0.7066950526), (10, 0.7066950526)
]
]
# Testing all the variations of `glocal`
# nxn
model = tfidfmodel.TfidfModel(corpus, smartirs='nxn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = docs[:]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nfn
model = tfidfmodel.TfidfModel(corpus, smartirs='nfn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 3.169925001442312), (4, 3.169925001442312), (5, 1.584962500721156), (6, 3.169925001442312),
(7, 3.169925001442312), (8, 2.169925001442312)
],
[
(5, 3.169925001442312), (9, 3.169925001442312), (10, 3.169925001442312)
]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# ntn
model = tfidfmodel.TfidfModel(corpus, smartirs='ntn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 3.321928094887362), (4, 3.321928094887362), (5, 1.736965594166206), (6, 3.321928094887362),
(7, 3.321928094887362), (8, 2.321928094887362)
],
[
(5, 3.473931188332412), (9, 3.321928094887362), (10, 3.321928094887362)
]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# npn
model = tfidfmodel.TfidfModel(corpus, smartirs='npn')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 3.0), (4, 3.0), (5, 1.0), (6, 3.0),
(7, 3.0), (8, 1.8073549220576042)
],
[
(5, 2.0), (9, 3.0), (10, 3.0)
]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# Testing all the variations of `normalize`
# nnx
model = tfidfmodel.TfidfModel(corpus, smartirs='nnx')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = docs[:]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nnc
model = tfidfmodel.TfidfModel(corpus, smartirs='nnc')
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(3, 0.4082482905), (4, 0.4082482905), (5, 0.4082482905), (6, 0.4082482905),
(7, 0.4082482905), (8, 0.4082482905)
],
[
(5, 0.81649658092772603), (9, 0.40824829046386302), (10, 0.40824829046386302)
]
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
model = tfidfmodel.TfidfModel(corpus, wlocal=lambda x: x, wglobal=lambda x, y: x * x, smartirs='nnc')
transformed_docs = [model[docs[0]], model[docs[1]]]
model = tfidfmodel.TfidfModel(corpus, wlocal=lambda x: x * x, wglobal=lambda x, y: x, smartirs='nnc')
expected_docs = [model[docs[0]], model[docs[1]]]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nnu
slope = 0.2
model = tfidfmodel.TfidfModel(corpus, smartirs='nnu', slope=slope)
transformed_docs = [model[docs[0]], model[docs[1]]]
average_unique_length = 1.0 * sum(len(set(text)) for text in texts) / len(texts)
vector_norms = [
(1.0 - slope) * average_unique_length + slope * 6.0,
(1.0 - slope) * average_unique_length + slope * 3.0,
]
expected_docs = [
[(termid, weight / vector_norms[0]) for termid, weight in docs[0]],
[(termid, weight / vector_norms[1]) for termid, weight in docs[1]],
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
# nnb
slope = 0.2
model = tfidfmodel.TfidfModel(dictionary=dictionary, smartirs='nnb', slope=slope)
transformed_docs = [model[docs[0]], model[docs[1]]]
average_character_length = sum(len(word) + 1.0 for text in texts for word in text) / len(texts)
vector_norms = [
(1.0 - slope) * average_character_length + slope * 36.0,
(1.0 - slope) * average_character_length + slope * 25.0,
]
expected_docs = [
[(termid, weight / vector_norms[0]) for termid, weight in docs[0]],
[(termid, weight / vector_norms[1]) for termid, weight in docs[1]],
]
self.assertTrue(np.allclose(transformed_docs[0], expected_docs[0]))
self.assertTrue(np.allclose(transformed_docs[1], expected_docs[1]))
def test_pivoted_normalization(self):
docs = [corpus[1], corpus[2]]
# Test if slope=1 yields the default docs for pivoted normalization.
model = tfidfmodel.TfidfModel(self.corpus)
transformed_docs = [model[docs[0]], model[docs[1]]]
model = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=1)
expected_docs = [model[docs[0]], model[docs[1]]]
self.assertTrue(np.allclose(sorted(transformed_docs[0]), sorted(expected_docs[0])))
self.assertTrue(np.allclose(sorted(transformed_docs[1]), sorted(expected_docs[1])))
# Test if pivoted model is consistent
model = tfidfmodel.TfidfModel(self.corpus, pivot=0, slope=0.5)
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[
(8, 0.8884910505493495), (7, 0.648974041227711), (6, 0.8884910505493495),
(5, 0.648974041227711), (4, 0.8884910505493495), (3, 0.8884910505493495)
],
[
(10, 0.8164965809277263), (9, 0.8164965809277263), (5, 1.6329931618554525)
]
]
self.assertTrue(np.allclose(sorted(transformed_docs[0]), sorted(expected_docs[0])))
self.assertTrue(np.allclose(sorted(transformed_docs[1]), sorted(expected_docs[1])))
def test_wlocal_wglobal(self):
def wlocal(tf):
assert isinstance(tf, np.ndarray)
return iter(tf + 1)
def wglobal(df, total_docs):
return 1
docs = [corpus[1], corpus[2]]
model = tfidfmodel.TfidfModel(corpus, wlocal=wlocal, wglobal=wglobal, normalize=False)
transformed_docs = [model[docs[0]], model[docs[1]]]
expected_docs = [
[(termid, weight + 1) for termid, weight in docs[0]],
[(termid, weight + 1) for termid, weight in docs[1]],
]
self.assertTrue(np.allclose(sorted(transformed_docs[0]), sorted(expected_docs[0])))
self.assertTrue(np.allclose(sorted(transformed_docs[1]), sorted(expected_docs[1])))
def test_backwards_compatibility(self):
model = tfidfmodel.TfidfModel.load(datapath('tfidf_model_3_2.tst'))
# attrs ensured by load method
attrs = ['pivot', 'slope', 'smartirs']
for a in attrs:
self.assertTrue(hasattr(model, a))
# __getitem__: assumes smartirs attr is present
self.assertEqual(len(model[corpus]), len(corpus))
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
import logging
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CURRENCY_EURO,
)
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import DOMAIN, NAME
_LOGGER = logging.getLogger(__name__)
ATTR_BRAND = "brand"
ATTR_CITY = "city"
ATTR_FUEL_TYPE = "fuel_type"
ATTR_HOUSE_NUMBER = "house_number"
ATTR_IS_OPEN = "is_open"
ATTR_POSTCODE = "postcode"
ATTR_STATION_NAME = "station_name"
ATTR_STREET = "street"
ATTRIBUTION = "Data provided by https://creativecommons.tankerkoenig.de"
ICON = "mdi:gas-station"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the tankerkoenig sensors."""
if discovery_info is None:
return
tankerkoenig = hass.data[DOMAIN]
async def async_update_data():
"""Fetch data from API endpoint."""
try:
return await tankerkoenig.fetch_data()
except LookupError as err:
raise UpdateFailed("Failed to fetch data") from err
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name=NAME,
update_method=async_update_data,
update_interval=tankerkoenig.update_interval,
)
# Fetch initial data so we have data when entities subscribe
await coordinator.async_refresh()
stations = discovery_info.values()
entities = []
for station in stations:
for fuel in tankerkoenig.fuel_types:
if fuel not in station:
_LOGGER.warning(
"Station %s does not offer %s fuel", station["id"], fuel
)
continue
sensor = FuelPriceSensor(
fuel,
station,
coordinator,
f"{NAME}_{station['name']}_{fuel}",
tankerkoenig.show_on_map,
)
entities.append(sensor)
_LOGGER.debug("Added sensors %s", entities)
async_add_entities(entities)
class FuelPriceSensor(CoordinatorEntity):
"""Contains prices for fuel in a given station."""
def __init__(self, fuel_type, station, coordinator, name, show_on_map):
"""Initialize the sensor."""
super().__init__(coordinator)
self._station = station
self._station_id = station["id"]
self._fuel_type = fuel_type
self._name = name
self._latitude = station["lat"]
self._longitude = station["lng"]
self._city = station["place"]
self._house_number = station["houseNumber"]
self._postcode = station["postCode"]
self._street = station["street"]
self._price = station[fuel_type]
self._show_on_map = show_on_map
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend."""
return ICON
@property
def unit_of_measurement(self):
"""Return unit of measurement."""
return CURRENCY_EURO
@property
def state(self):
"""Return the state of the device."""
# key Fuel_type is not available when the fuel station is closed, use "get" instead of "[]" to avoid exceptions
return self.coordinator.data[self._station_id].get(self._fuel_type)
@property
def unique_id(self) -> str:
"""Return a unique identifier for this entity."""
return f"{self._station_id}_{self._fuel_type}"
@property
def device_state_attributes(self):
"""Return the attributes of the device."""
data = self.coordinator.data[self._station_id]
attrs = {
ATTR_ATTRIBUTION: ATTRIBUTION,
ATTR_BRAND: self._station["brand"],
ATTR_FUEL_TYPE: self._fuel_type,
ATTR_STATION_NAME: self._station["name"],
ATTR_STREET: self._street,
ATTR_HOUSE_NUMBER: self._house_number,
ATTR_POSTCODE: self._postcode,
ATTR_CITY: self._city,
}
if self._show_on_map:
attrs[ATTR_LATITUDE] = self._latitude
attrs[ATTR_LONGITUDE] = self._longitude
if data is not None and "status" in data:
attrs[ATTR_IS_OPEN] = data["status"] == "open"
return attrs
|
import numpy as np
import pytest
from keras import backend as K
from matchzoo import layers
from matchzoo.contrib.layers import SpatialGRU
from matchzoo.contrib.layers import MatchingTensorLayer
def test_matching_layers():
s1_value = np.array([[[1, 2], [2, 3], [3, 4]],
[[0.1, 0.2], [0.2, 0.3], [0.3, 0.4]]
])
s2_value = np.array([[[1, 2], [2, 3]],
[[0.1, 0.2], [0.2, 0.3]]
])
s3_value = np.array([[[1, 2], [2, 3]],
[[0.1, 0.2], [0.2, 0.3]],
[[0.1, 0.2], [0.2, 0.3]]
])
s1_tensor = K.variable(s1_value)
s2_tensor = K.variable(s2_value)
s3_tensor = K.variable(s3_value)
for matching_type in ['dot', 'mul', 'plus', 'minus', 'concat']:
model = layers.MatchingLayer(matching_type=matching_type)([s1_tensor, s2_tensor])
ret = K.eval(model)
with pytest.raises(ValueError):
layers.MatchingLayer(matching_type='error')
with pytest.raises(ValueError):
layers.MatchingLayer()([s1_tensor, s3_tensor])
def test_spatial_gru():
s_value = K.variable(np.array([[[[1, 2], [2, 3], [3, 4]],
[[4, 5], [5, 6], [6, 7]]],
[[[0.1, 0.2], [0.2, 0.3], [0.3, 0.4]],
[[0.4, 0.5], [0.5, 0.6], [0.6, 0.7]]]]))
for direction in ['lt', 'rb']:
model = SpatialGRU(direction=direction)
_ = K.eval(model(s_value))
with pytest.raises(ValueError):
SpatialGRU(direction='lr')(s_value)
def test_matching_tensor_layer():
s1_value = np.array([[[1, 2], [2, 3], [3, 4]],
[[0.1, 0.2], [0.2, 0.3], [0.3, 0.4]]])
s2_value = np.array([[[1, 2], [2, 3]],
[[0.1, 0.2], [0.2, 0.3]]])
s3_value = np.array([[[1, 2], [2, 3]],
[[0.1, 0.2], [0.2, 0.3]],
[[0.1, 0.2], [0.2, 0.3]]])
s1_tensor = K.variable(s1_value)
s2_tensor = K.variable(s2_value)
s3_tensor = K.variable(s3_value)
for init_diag in [True, False]:
model = MatchingTensorLayer(init_diag=init_diag)
_ = K.eval(model([s1_tensor, s2_tensor]))
with pytest.raises(ValueError):
MatchingTensorLayer()([s1_tensor, s3_tensor])
|
from trashcli.trash import TrashDirs
from mock import Mock, call
class TestListTrashinfo:
def test_howto_list_trashdirs(self):
out = Mock()
environ = {'HOME':'/home/user'}
trashdirs = TrashDirs(
environ = environ,
getuid = lambda:123,
list_volumes = lambda:['/vol', '/vol2'],
top_trashdir_rules = Mock(),
)
trashdirs.on_trash_dir_found = out
trashdirs.list_trashdirs()
assert ([call('/home/user/.local/share/Trash', '/'),
call('/vol/.Trash-123', '/vol'),
call('/vol2/.Trash-123', '/vol2')] ==
out.mock_calls)
|
import unittest
import pandas as pd
from pgmpy.models import BayesianModel
from pgmpy.estimators import BicScore
class TestBicScore(unittest.TestCase):
def setUp(self):
self.d1 = pd.DataFrame(
data={"A": [0, 0, 1], "B": [0, 1, 0], "C": [1, 1, 0], "D": ["X", "Y", "Z"]}
)
self.m1 = BayesianModel([("A", "C"), ("B", "C"), ("D", "B")])
self.m2 = BayesianModel([("C", "A"), ("C", "B"), ("A", "D")])
# data_link - "https://www.kaggle.com/c/titanic/download/train.csv"
self.titanic_data = pd.read_csv(
"pgmpy/tests/test_estimators/testdata/titanic_train.csv"
)
self.titanic_data2 = self.titanic_data[["Survived", "Sex", "Pclass"]]
def test_score(self):
self.assertAlmostEqual(BicScore(self.d1).score(self.m1), -10.698440814229318)
self.assertEqual(BicScore(self.d1).score(BayesianModel()), 0)
def test_score_titanic(self):
scorer = BicScore(self.titanic_data2)
titanic = BayesianModel([("Sex", "Survived"), ("Pclass", "Survived")])
self.assertAlmostEqual(scorer.score(titanic), -1896.7250012840179)
titanic2 = BayesianModel([("Pclass", "Sex")])
titanic2.add_nodes_from(["Sex", "Survived", "Pclass"])
self.assertLess(scorer.score(titanic2), scorer.score(titanic))
def tearDown(self):
del self.d1
del self.m1
del self.m2
del self.titanic_data
del self.titanic_data2
|
from absl import flags
from csapi import API
from six.moves import urllib
try:
from requests.packages import urllib3
urllib3.disable_warnings()
except ImportError:
pass
FLAGS = flags.FLAGS
class CsClient(object):
def __init__(self, url, api_key, secret):
self._cs = API(api_key,
secret,
url,
logging=False)
def get_zone(self, zone_name):
cs_args = {
'command': 'listZones'
}
zones = self._cs.request(cs_args)
if zones and 'zone' in zones:
for zone in zones['zone']:
if zone['name'] == zone_name:
return zone
return None
def get_template(self, template_name, project_id=None):
cs_args = {
'command': 'listTemplates',
'templatefilter': 'executable'
}
if project_id:
cs_args.update({'projectid': project_id})
templates = self._cs.request(cs_args)
if templates and 'template' in templates:
for templ in templates['template']:
if templ['name'] == template_name:
return templ
return None
def get_serviceoffering(self, service_offering_name):
cs_args = {
'command': 'listServiceOfferings',
}
service_offerings = self._cs.request(cs_args)
if service_offerings and 'serviceoffering' in service_offerings:
for servo in service_offerings['serviceoffering']:
if servo['name'] == service_offering_name:
return servo
return None
def get_project(self, project_name):
cs_args = {
'command': 'listProjects'
}
projects = self._cs.request(cs_args)
if projects and 'project' in projects:
for proj in projects['project']:
if proj['name'] == project_name:
return proj
return None
def get_network(self, network_name, project_id=None, vpc_id=None):
cs_args = {
'command': 'listNetworks',
}
if project_id:
cs_args.update({"projectid": project_id})
if vpc_id:
cs_args.update({"vpcid": vpc_id})
networks = self._cs.request(cs_args)
if networks and 'network' in networks:
for network in networks['network']:
if network['name'] == network_name:
return network
return None
def get_network_offering(self, network_offering_name, project_id):
cs_args = {
'command': 'listNetworkOfferings',
}
nw_offerings = self._cs.request(cs_args)
if nw_offerings and 'networkoffering' in nw_offerings:
for nw_off in nw_offerings['networkoffering']:
if nw_off['name'] == network_offering_name:
return nw_off
return None
def get_vpc(self, vpc_name, project_id=None):
cs_args = {
'command': 'listVPCs',
}
if project_id:
cs_args.update({"projectid": project_id})
vpcs = self._cs.request(cs_args)
if vpcs and 'vpc' in vpcs:
for vpc in vpcs['vpc']:
if vpc['name'] == vpc_name:
return vpc
return None
def get_vpc_offering(self, vpc_offering_name):
cs_args = {
'command': 'listVPCOfferings',
}
vpc_offerings = self._cs.request(cs_args)
if vpc_offerings and 'vpcoffering' in vpc_offerings:
for vpc_off in vpc_offerings['vpcoffering']:
if vpc_off['name'] == vpc_offering_name:
return vpc_off
return None
def get_virtual_machine(self, vm_name, project_id=None):
cs_args = {
'command': 'listVirtualMachines',
}
if project_id:
cs_args.update({"projectid": project_id})
vms = self._cs.request(cs_args)
if vms and 'virtualmachine' in vms:
for vm in vms['virtualmachine']:
if vm['name'] == vm_name:
return vm
return None
def create_vm(self,
name,
zone_id,
service_offering_id,
template_id,
network_ids=None,
keypair=None,
project_id=None):
create_vm_args = {
'command': 'deployVirtualMachine',
'serviceofferingid': service_offering_id,
'templateid': template_id,
'zoneid': zone_id,
'name': name,
}
if network_ids:
create_vm_args.update({"networkids": network_ids})
if keypair:
create_vm_args.update({'keypair': keypair})
if project_id:
create_vm_args.update({"projectid": project_id})
vm = self._cs.request(create_vm_args)
return vm
def delete_vm(self, vm_id):
cs_args = {
'command': 'destroyVirtualMachine',
'id': vm_id,
'expunge': 'true' # Requres root/domain admin
}
res = self._cs.request(cs_args)
return res
def create_vpc(self, name, zone_id, cidr, vpc_offering_id, project_id=None):
cs_args = {
'command': 'createVPC',
'name': name,
'displaytext': name,
'vpcofferingid': vpc_offering_id,
'cidr': cidr,
'zoneid': zone_id,
}
if project_id:
cs_args.update({"projectid": project_id})
vpc = self._cs.request(cs_args)
if vpc and 'vpc' in vpc:
return vpc['vpc']
return None
def delete_vpc(self, vpc_id):
cs_args = {
'command': 'deleteVPC',
'id': vpc_id
}
res = self._cs.request(cs_args)
return res
def create_network(self,
name,
network_offering_id,
zone_id,
project_id=None,
vpc_id=None,
gateway=None,
netmask=None,
acl_id=None):
cs_args = {
'command': 'createNetwork',
'name': name,
'displaytext': name,
'zoneid': zone_id,
'networkofferingid': network_offering_id,
}
if project_id:
cs_args.update({"projectid": project_id})
if vpc_id:
cs_args.update({
'vpcid': vpc_id,
'gateway': gateway,
'netmask': netmask,
'aclid': acl_id
})
nw = self._cs.request(cs_args)
if nw and 'network' in nw:
return nw['network']
return nw
def delete_network(self, network_id):
cs_args = {
'command': 'deleteNetwork',
'id': network_id,
}
res = self._cs.request(cs_args)
return res
def alloc_public_ip(self, network_id, is_vpc=False):
cs_args = {
'command': 'associateIpAddress',
}
if is_vpc:
cs_args.update({'vpcid': network_id})
else:
cs_args.update({'networkid': network_id})
res = self._cs.request(cs_args)
if res and 'ipaddress' in res:
return res['ipaddress']
return None
def release_public_ip(self, ipaddress_id):
cs_args = {
'command': 'disassociateIpAddress',
'id': ipaddress_id
}
res = self._cs.request(cs_args)
return res
def enable_static_nat(self, ip_address_id, vm_id, network_id):
cs_args = {
'command': 'enableStaticNat',
'ipaddressid': ip_address_id,
'virtualmachineid': vm_id
}
if network_id:
cs_args.update({'networkid': network_id})
res = self._cs.request(cs_args)
if res and 'success' in res:
return res['success']
return None
def snat_rule_exists(self, ip_address_id, vm_id):
cs_args = {
'command': 'listPublicIpAddresses',
'id': ip_address_id
}
res = self._cs.request(cs_args)
assert 'publicipaddress' in res, "No public IP address found"
assert len(res['publicipaddress']) == 1, "More than One\
Public IP address"
res = res['publicipaddress'][0]
if res and 'virtualmachineid' in res and \
res['virtualmachineid'] == vm_id:
return True
return False
def register_ssh_keypair(self, name, public_key, project_id=None):
cs_args = {
'command': 'registerSSHKeyPair',
'name': name,
'publickey': urllib.parse.quote(public_key),
}
if project_id:
cs_args.update({"projectid": project_id})
res = self._cs.request(cs_args, method='post')
return res
def unregister_ssh_keypair(self, name, project_id=None):
cs_args = {
'command': 'deleteSSHKeyPair',
'name': name,
}
if project_id:
cs_args.update({"projectid": project_id})
res = self._cs.request(cs_args)
return res
def get_ssh_keypair(self, name, project_id=None):
cs_args = {
'command': 'listSSHKeyPairs',
'name': name,
}
if project_id:
cs_args.update({"projectid": project_id})
kps = self._cs.request(cs_args)
if kps and 'sshkeypair' in kps:
for kp in kps['sshkeypair']:
if kp['name'] == name:
return kp
return None
def get_network_acl(self, name, project_id=None):
cs_args = {
'command': 'listNetworkACLLists',
}
if project_id:
cs_args.update({"projectid": project_id})
acllist = self._cs.request(cs_args)
for acl in acllist['networkacllist']:
if acl['name'] == name:
return acl
return None
def create_volume(self, name, diskoffering_id, zone_id, project_id=None):
cs_args = {
'command': 'createVolume',
'diskofferingid': diskoffering_id,
'zoneid': zone_id,
'name': name
}
if project_id:
cs_args.update({'projectid': project_id})
vol = self._cs.request(cs_args)
if vol and 'volume' in vol:
return vol['volume']
return None
def get_volume(self, name, project_id=None):
cs_args = {
'command': 'listVolumes',
}
if project_id:
cs_args.update({"projectid": project_id})
vols = self._cs.request(cs_args)
if vols and 'volume' in vols:
for v in vols['volume']:
if v['name'] == name:
return v
return None
def delete_volume(self, volume_id):
cs_args = {
'command': 'deleteVolume',
'id': volume_id
}
res = self._cs.request(cs_args)
return res
def attach_volume(self, vol_id, vm_id):
cs_args = {
'command': 'attachVolume',
'id': vol_id,
'virtualmachineid': vm_id
}
res = self._cs.request(cs_args)
if res and 'volume' in res:
return res['volume']
return None
def detach_volume(self, vol_id):
cs_args = {
'command': 'detachVolume',
'id': vol_id,
}
res = self._cs.request(cs_args)
return res
def list_disk_offerings(self):
cs_args = {
'command': 'listDiskOfferings',
}
disk_off = self._cs.request(cs_args)
if disk_off and 'diskoffering' in disk_off:
return disk_off['diskoffering']
return None
|
import logging
import time
from collections import defaultdict
from threading import RLock
from concurrent.futures import FIRST_COMPLETED
from ._workers_pool import LazySingletonTasksCoordinator
from .async_utils import AsyncRequestType, AsyncRequest
from ..decorators import mongo_retry
from ..exceptions import AsyncArcticException
def _arctic_task_exec(request):
request.start_time = time.time()
logging.debug("Executing asynchronous request for {}/{}".format(request.library, request.symbol))
result = None
try:
request.is_running = True
if request.mongo_retry:
result = mongo_retry(request.fun)(*request.args, **request.kwargs)
else:
result = request.fun(*request.args, **request.kwargs)
except Exception as e:
request.exception = e
finally:
request.data = result
request.end_time = time.time()
request.is_running = False
return result
class AsyncArctic(LazySingletonTasksCoordinator):
_instance = None
_SINGLETON_LOCK = RLock()
_POOL_LOCK = RLock()
def __init__(self, pool_size):
# Only allow creation via get_instance
if not type(self)._SINGLETON_LOCK._is_owned():
raise AsyncArcticException("AsyncArctic is a singleton, can't create a new instance")
# Enforce the singleton pattern
with type(self)._SINGLETON_LOCK:
super(AsyncArctic, self).__init__(pool_size)
self.requests_per_library = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
self.requests_by_id = dict()
self.local_shutdown = False
self.deferred_requests = list()
def __reduce__(self):
return "ASYNC_ARCTIC"
def _get_modifiers(self, library_name, symbol=None):
return self.requests_per_library[library_name][symbol][AsyncRequestType.MODIFIER]
def _get_accessors(self, library_name, symbol=None):
return self.requests_per_library[library_name][symbol][AsyncRequestType.ACCESSOR]
@staticmethod
def _verify_request(store, is_modifier, **kwargs):
library_name = None if store is None else store._arctic_lib.get_name()
symbol = kwargs.get('symbol')
kind = AsyncRequestType.MODIFIER if is_modifier else AsyncRequestType.ACCESSOR
callback = kwargs.get('async_callback')
mongo_retry = bool(kwargs.get('mongo_retry'))
return library_name, symbol, kind, callback, mongo_retry
def _is_clashing(self, request):
return bool(self._get_modifiers(request.library, request.symbol) or
request.kind is AsyncRequestType.MODIFIER and self._get_accessors(request.library, request.symbol))
def _add_request(self, request):
self.requests_per_library[request.library][request.symbol][request.kind].append(request)
self.requests_by_id[request.id] = request
def _remove_request(self, request):
self.requests_per_library[request.library][request.symbol][request.kind].remove(request)
if request.id in self.requests_by_id:
del self.requests_by_id[request.id]
def _schedule_request(self, request):
try:
new_id, new_future = self.submit_task(False, _arctic_task_exec, request)
request.id = new_id
# Update the state of tracked tasks
self._add_request(request)
request.future = new_future
request.future.add_done_callback(lambda the_future: self._request_finished(request))
except Exception:
# clean up the state
self._remove_request(request)
raise
def submit_arctic_request(self, store, fun, is_modifier, *args, **kwargs):
lib_name, symbol, kind, callback, mongo_retry = AsyncArctic._verify_request(store, is_modifier, **kwargs)
for k in ('async_callback', 'mongo_retry'):
kwargs.pop(k, None)
with type(self)._POOL_LOCK: # class level lock, since it is a Singleton
if self.local_shutdown:
raise AsyncArcticException("AsyncArctic has been shutdown and can no longer accept new requests.")
# Create the request object
request = AsyncRequest(kind, lib_name, fun, callback, *args, **kwargs)
if lib_name and self._is_clashing(request):
self.deferred_requests.append(request)
return request
self._schedule_request(request)
return request
def _reschedule_deferred(self):
picked = None
try:
for deferred in self.deferred_requests:
if not self._is_clashing(deferred):
picked = deferred
self._schedule_request(deferred)
break
except:
logging.exception("Failed to re-schedule a deferred task: {}".format(picked))
return
self.deferred_requests.remove(picked)
def _request_finished(self, request):
with type(self)._POOL_LOCK:
self._remove_request(request)
if self.deferred_requests:
self._reschedule_deferred()
elif self.local_shutdown:
# Deferred shutdown of the underlying pool until the deferred jobs have finished
super(AsyncArctic, self).shutdown()
request.is_completed = True
if callable(request.callback):
request.callback(request)
def reset(self, pool_size=None, timeout=None):
self.shutdown(timeout=timeout)
self.await_termination(timeout=timeout)
super(AsyncArctic, self).reset(pool_size, timeout)
with type(self)._SINGLETON_LOCK:
self.requests_per_library = defaultdict(lambda: defaultdict(lambda: defaultdict(list)))
self.requests_by_id = dict()
self.local_shutdown = False
self.deferred_requests = list()
def shutdown(self, timeout=None):
if self.local_shutdown:
return
with type(self)._POOL_LOCK:
self.local_shutdown = True
if self.total_pending_requests() == 0:
# safe to use this non-atomic check here, as self.deferred_requests
# is updated only after the deferred request is scheduled
super(AsyncArctic, self).shutdown(timeout=timeout)
def await_termination(self, timeout=None):
while self.total_pending_requests() > 0:
AsyncArctic.wait_requests(self.requests_by_id.values() + self.deferred_requests,
do_raise=False, timeout=timeout)
# super(AsyncArctic, self).await_termination(timeout)
def total_pending_requests(self):
with type(self)._POOL_LOCK: # the lock here is "really" necessary
return len(self.requests_by_id) + len(self.deferred_requests)
@staticmethod
def _wait_until_scheduled(requests, timeout=None, check_interval=0.1):
start = time.time()
while True:
if any(r for r in requests if r.future is None):
time.sleep(check_interval)
else:
return True
if timeout is not None and time.time() - start >= timeout:
break
return False
@staticmethod
def wait_request(request, do_raise=False, timeout=None):
if request is None:
return
if not AsyncArctic._wait_until_scheduled((request,), timeout):
raise AsyncArcticException("Timed-out while waiting for request to be scheduled")
while request.is_completed:
AsyncArctic.wait_tasks((request.future,), timeout=timeout, raise_exceptions=do_raise)
@staticmethod
def wait_requests(requests, do_raise=False, timeout=None):
if not AsyncArctic._wait_until_scheduled(requests, timeout):
raise AsyncArcticException("Timed-out while waiting for request to be scheduled")
while requests and not all(r.is_completed for r in requests):
AsyncArctic.wait_tasks(tuple(r.future for r in requests if not r.is_completed and r.future is not None),
timeout=timeout, raise_exceptions=do_raise)
@staticmethod
def wait_any_request(requests, do_raise=False, timeout=None):
if not AsyncArctic._wait_until_scheduled(requests, timeout):
raise AsyncArcticException("Timed-out while waiting for request to be scheduled")
while requests and not any(r.is_completed for r in requests):
AsyncArctic.wait_tasks(tuple(r.future for r in requests if not r.is_completed and r.future is not None),
timeout=timeout, return_when=FIRST_COMPLETED, raise_exceptions=do_raise)
@staticmethod
def filter_finished_requests(requests, do_raise=True):
if not requests:
return requests, requests
alive_requests = [r for r in requests if not r.is_completed]
done_requests = [r for r in requests if r.is_completed]
if do_raise:
AsyncArctic.raise_errored(done_requests)
return alive_requests, done_requests
@staticmethod
def raise_first_errored(requests):
errored = tuple(r for r in requests if r.is_completed and r.exception is not None)
if errored:
raise errored[0].exception
@staticmethod
def filter_errored(requests):
return tuple(r for r in requests if r.is_completed and r.exception is not None)
ASYNC_ARCTIC = AsyncArctic.get_instance()
async_arctic_submit = ASYNC_ARCTIC.submit_arctic_request
async_wait_request = ASYNC_ARCTIC.wait_request
async_wait_requests = ASYNC_ARCTIC.wait_requests
async_shutdown = ASYNC_ARCTIC.shutdown
async_await_termination = ASYNC_ARCTIC.await_termination
async_reset_pool = ASYNC_ARCTIC.reset
async_total_requests = ASYNC_ARCTIC.total_pending_requests
# def async_modifier(func):
# @wraps(func)
# def wrapper(self, *args, **kwargs):
# return async_arctic_submit(self, func, True, *args, **kwargs)
#
# return wrapper
#
#
# def async_accessor(func):
# @wraps(func)
# def wrapper(self, *args, **kwargs):
# return async_arctic_submit(self, func, False, *args, **kwargs)
#
# return wrapper
|
from datetime import datetime, timedelta
from typing import Tuple, List
from collections import namedtuple
Interval = Tuple[timedelta, int]
AntiSpamInterval = namedtuple("AntiSpamInterval", ["period", "frequency"])
class AntiSpam:
"""
Custom class which is more flexible than using discord.py's
`commands.cooldown()`
Can be intialized with a custom set of intervals
These should be provided as a list of tuples in the form
(timedelta, quantity)
Where quantity represents the maximum amount of times
something should be allowed in an interval.
"""
# TODO : Decorator interface for command check using `spammy`
# with insertion of the antispam element into context
# for manual stamping on successful command completion
default_intervals = [
(timedelta(seconds=5), 3),
(timedelta(minutes=1), 5),
(timedelta(hours=1), 10),
(timedelta(days=1), 24),
]
def __init__(self, intervals: List[Interval]):
self.__event_timestamps = []
_itvs = intervals if intervals else self.default_intervals
self.__intervals = [AntiSpamInterval(*x) for x in _itvs]
self.__discard_after = max([x.period for x in self.__intervals])
def __interval_check(self, interval: AntiSpamInterval):
return (
len([t for t in self.__event_timestamps if (t + interval.period) > datetime.utcnow()])
>= interval.frequency
)
@property
def spammy(self):
"""
use this to check if any interval criteria are met
"""
return any(self.__interval_check(x) for x in self.__intervals)
def stamp(self):
"""
Use this to mark an event that counts against the intervals
as happening now
"""
self.__event_timestamps.append(datetime.utcnow())
self.__event_timestamps = [
t for t in self.__event_timestamps if t + self.__discard_after > datetime.utcnow()
]
|
from __future__ import unicode_literals, division
import sys
import os
import inspect
import codecs
import io
import argparse
import re
import warnings
import random
import tempfile
from multiprocessing import Pool, cpu_count
# hack for python2/3 compatibility
from io import open
argparse.open = open
class BPE(object):
def __init__(self, codes, merges=-1, separator='@@', vocab=None, glossaries=None):
codes.seek(0)
offset=1
# check version information
firstline = codes.readline()
if firstline.startswith('#version:'):
self.version = tuple([int(x) for x in re.sub(r'(\.0+)*$','', firstline.split()[-1]).split(".")])
offset += 1
else:
self.version = (0, 1)
codes.seek(0)
self.bpe_codes = [tuple(item.strip('\r\n ').split(' ')) for (n, item) in enumerate(codes.read().rstrip('\n').split('\n')) if (n < merges or merges == -1)]
for i, item in enumerate(self.bpe_codes):
if len(item) != 2:
sys.stderr.write('Error: invalid line {0} in BPE codes file: {1}\n'.format(i+offset, ' '.join(item)))
sys.stderr.write('The line should exist of exactly two subword units, separated by whitespace\n')
sys.exit(1)
# some hacking to deal with duplicates (only consider first instance)
self.bpe_codes = dict([(code,i) for (i,code) in reversed(list(enumerate(self.bpe_codes)))])
self.bpe_codes_reverse = dict([(pair[0] + pair[1], pair) for pair,i in self.bpe_codes.items()])
self.separator = separator
self.vocab = vocab
self.glossaries = glossaries if glossaries else []
self.glossaries_regex = re.compile('^({})$'.format('|'.join(glossaries))) if glossaries else None
self.cache = {}
def process_lines(self, filename, outfile, dropout=0, num_workers=1):
if sys.version_info < (3, 0):
print("Parallel mode is only supported in Python3.")
sys.exit(1)
if num_workers == 1:
_process_lines(self, filename, outfile, dropout, 0, 0)
elif num_workers > 1:
with open(filename, encoding="utf-8") as f:
size = os.fstat(f.fileno()).st_size
chunk_size = int(size / num_workers)
offsets = [0 for _ in range(num_workers + 1)]
for i in range(1, num_workers):
f.seek(chunk_size * i)
pos = f.tell()
while True:
try:
line = f.readline()
break
except UnicodeDecodeError:
pos -= 1
f.seek(pos)
offsets[i] = f.tell()
assert 0 <= offsets[i] < 1e20, "Bad new line separator, e.g. '\\r'"
res_files = []
pool = Pool(processes=num_workers)
for i in range(num_workers):
tmp = tempfile.NamedTemporaryFile(delete=False)
tmp.close()
res_files.append(tmp)
pool.apply_async(_process_lines, (self, filename, tmp.name, dropout, offsets[i], offsets[i + 1]))
pool.close()
pool.join()
for i in range(num_workers):
with open(res_files[i].name, encoding="utf-8") as fi:
for line in fi:
outfile.write(line)
os.remove(res_files[i].name)
else:
raise ValueError('`num_workers` is expected to be a positive number, but got {}.'.format(num_workers))
def process_line(self, line, dropout=0):
"""segment line, dealing with leading and trailing whitespace"""
out = ""
leading_whitespace = len(line)-len(line.lstrip('\r\n '))
if leading_whitespace:
out += line[:leading_whitespace]
out += self.segment(line, dropout)
trailing_whitespace = len(line)-len(line.rstrip('\r\n '))
if trailing_whitespace and trailing_whitespace != len(line):
out += line[-trailing_whitespace:]
return out
def segment(self, sentence, dropout=0):
"""segment single sentence (whitespace-tokenized string) with BPE encoding"""
segments = self.segment_tokens(sentence.strip('\r\n ').split(' '), dropout)
return ' '.join(segments)
def segment_tokens(self, tokens, dropout=0):
"""segment a sequence of tokens with BPE encoding"""
output = []
for word in tokens:
# eliminate double spaces
if not word:
continue
new_word = [out for segment in self._isolate_glossaries(word)
for out in encode(segment,
self.bpe_codes,
self.bpe_codes_reverse,
self.vocab,
self.separator,
self.version,
self.cache,
self.glossaries_regex,
dropout)]
for item in new_word[:-1]:
output.append(item + self.separator)
output.append(new_word[-1])
return output
def _isolate_glossaries(self, word):
word_segments = [word]
for gloss in self.glossaries:
word_segments = [out_segments for segment in word_segments
for out_segments in isolate_glossary(segment, gloss)]
return word_segments
def _process_lines(bpe, filename, outfile, dropout, begin, end):
if isinstance(outfile, str):
fo = open(outfile, "w", encoding="utf-8")
else:
fo = outfile
with open(filename, encoding="utf-8") as f:
f.seek(begin)
line = f.readline()
while line:
pos = f.tell()
assert 0 <= pos < 1e20, "Bad new line separator, e.g. '\\r'"
if end > 0 and pos > end:
break
fo.write(bpe.process_line(line, dropout))
line = f.readline()
if isinstance(outfile, str):
fo.close()
def create_parser(subparsers=None):
if subparsers:
parser = subparsers.add_parser('apply-bpe',
formatter_class=argparse.RawDescriptionHelpFormatter,
description="learn BPE-based word segmentation")
else:
parser = argparse.ArgumentParser(
formatter_class=argparse.RawDescriptionHelpFormatter,
description="learn BPE-based word segmentation")
parser.add_argument(
'--input', '-i', type=argparse.FileType('r'), default=sys.stdin,
metavar='PATH',
help="Input file (default: standard input).")
parser.add_argument(
'--codes', '-c', type=argparse.FileType('r'), metavar='PATH',
required=True,
help="File with BPE codes (created by learn_bpe.py).")
parser.add_argument(
'--merges', '-m', type=int, default=-1,
metavar='INT',
help="Use this many BPE operations (<= number of learned symbols)"+
"default: Apply all the learned merge operations")
parser.add_argument(
'--output', '-o', type=argparse.FileType('w'), default=sys.stdout,
metavar='PATH',
help="Output file (default: standard output)")
parser.add_argument(
'--separator', '-s', type=str, default='@@', metavar='STR',
help="Separator between non-final subword units (default: '%(default)s'))")
parser.add_argument(
'--vocabulary', type=argparse.FileType('r'), default=None,
metavar="PATH",
help="Vocabulary file (built with get_vocab.py). If provided, this script reverts any merge operations that produce an OOV.")
parser.add_argument(
'--vocabulary-threshold', type=int, default=None,
metavar="INT",
help="Vocabulary threshold. If vocabulary is provided, any word with frequency < threshold will be treated as OOV")
parser.add_argument(
'--dropout', type=float, default=0,
metavar="P",
help="Dropout BPE merge operations with probability P (Provilkov et al., 2019). Use this on training data only.")
parser.add_argument(
'--glossaries', type=str, nargs='+', default=None,
metavar="STR",
help="Glossaries. Words matching any of the words/regex provided in glossaries will not be affected "+
"by the BPE (i.e. they will neither be broken into subwords, nor concatenated with other subwords. "+
"Can be provided as a list of words/regex after the --glossaries argument. Enclose each regex in quotes.")
parser.add_argument(
'--seed', type=int, default=None,
metavar="S",
help="Random seed for the random number generators (e.g. for BPE dropout with --dropout).")
parser.add_argument(
'--num-workers', type=int, default=1,
help="Number of processors to process texts, only supported in Python3. If -1, set `multiprocessing.cpu_count()`. (default: %(default)s)")
return parser
def encode(orig, bpe_codes, bpe_codes_reverse, vocab, separator, version, cache, glossaries_regex=None, dropout=0):
"""Encode word based on list of BPE merge operations, which are applied consecutively
"""
if not dropout and orig in cache:
return cache[orig]
if glossaries_regex and glossaries_regex.match(orig):
cache[orig] = (orig,)
return (orig,)
if len(orig) == 1:
return orig
if version == (0, 1):
word = list(orig) + ['</w>']
elif version == (0, 2): # more consistent handling of word-final segments
word = list(orig[:-1]) + [orig[-1] + '</w>']
else:
raise NotImplementedError
while len(word) > 1:
# get list of symbol pairs; optionally apply dropout
pairs = [(bpe_codes[pair],i,pair) for (i,pair) in enumerate(zip(word, word[1:])) if (not dropout or random.random() > dropout) and pair in bpe_codes]
if not pairs:
break
#get first merge operation in list of BPE codes
bigram = min(pairs)[2]
# find start position of all pairs that we want to merge
positions = [i for (rank,i,pair) in pairs if pair == bigram]
i = 0
new_word = []
bigram = ''.join(bigram)
for j in positions:
# merges are invalid if they start before current position. This can happen if there are overlapping pairs: (x x x -> xx x)
if j < i:
continue
new_word.extend(word[i:j]) # all symbols before merged pair
new_word.append(bigram) # merged pair
i = j+2 # continue after merged pair
new_word.extend(word[i:]) # add all symbols until end of word
word = new_word
# don't print end-of-word symbols
if word[-1] == '</w>':
word = word[:-1]
elif word[-1].endswith('</w>'):
word[-1] = word[-1][:-4]
word = tuple(word)
if vocab:
word = check_vocab_and_split(word, bpe_codes_reverse, vocab, separator)
cache[orig] = word
return word
def recursive_split(segment, bpe_codes, vocab, separator, final=False):
"""Recursively split segment into smaller units (by reversing BPE merges)
until all units are either in-vocabulary, or cannot be split futher."""
try:
if final:
left, right = bpe_codes[segment + '</w>']
right = right[:-4]
else:
left, right = bpe_codes[segment]
except:
#sys.stderr.write('cannot split {0} further.\n'.format(segment))
yield segment
return
if left + separator in vocab:
yield left
else:
for item in recursive_split(left, bpe_codes, vocab, separator, False):
yield item
if (final and right in vocab) or (not final and right + separator in vocab):
yield right
else:
for item in recursive_split(right, bpe_codes, vocab, separator, final):
yield item
def check_vocab_and_split(orig, bpe_codes, vocab, separator):
"""Check for each segment in word if it is in-vocabulary,
and segment OOV segments into smaller units by reversing the BPE merge operations"""
out = []
for segment in orig[:-1]:
if segment + separator in vocab:
out.append(segment)
else:
#sys.stderr.write('OOV: {0}\n'.format(segment))
for item in recursive_split(segment, bpe_codes, vocab, separator, False):
out.append(item)
segment = orig[-1]
if segment in vocab:
out.append(segment)
else:
#sys.stderr.write('OOV: {0}\n'.format(segment))
for item in recursive_split(segment, bpe_codes, vocab, separator, True):
out.append(item)
return out
def read_vocabulary(vocab_file, threshold):
"""read vocabulary file produced by get_vocab.py, and filter according to frequency threshold.
"""
vocabulary = set()
for line in vocab_file:
word, freq = line.strip('\r\n ').split(' ')
freq = int(freq)
if threshold == None or freq >= threshold:
vocabulary.add(word)
return vocabulary
def isolate_glossary(word, glossary):
"""
Isolate a glossary present inside a word.
Returns a list of subwords. In which all 'glossary' glossaries are isolated
For example, if 'USA' is the glossary and '1934USABUSA' the word, the return value is:
['1934', 'USA', 'B', 'USA']
"""
# regex equivalent of (if word == glossary or glossary not in word)
if re.match('^'+glossary+'$', word) or not re.search(glossary, word):
return [word]
else:
segments = re.split(r'({})'.format(glossary), word)
segments, ending = segments[:-1], segments[-1]
segments = list(filter(None, segments)) # Remove empty strings in regex group.
return segments + [ending.strip('\r\n ')] if ending != '' else segments
if __name__ == '__main__':
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
newdir = os.path.join(currentdir, 'subword_nmt')
if os.path.isdir(newdir):
warnings.simplefilter('default')
warnings.warn(
"this script's location has moved to {0}. This symbolic link will be removed in a future version. Please point to the new location, or install the package and use the command 'subword-nmt'".format(newdir),
DeprecationWarning
)
# python 2/3 compatibility
if sys.version_info < (3, 0):
sys.stderr = codecs.getwriter('UTF-8')(sys.stderr)
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
sys.stdin = codecs.getreader('UTF-8')(sys.stdin)
else:
sys.stdin = io.TextIOWrapper(sys.stdin.buffer, encoding='utf-8')
sys.stderr = io.TextIOWrapper(sys.stderr.buffer, encoding='utf-8')
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8', write_through=True, line_buffering=True)
parser = create_parser()
args = parser.parse_args()
if args.num_workers <= 0:
args.num_workers = cpu_count()
# read/write files as UTF-8
args.codes = codecs.open(args.codes.name, encoding='utf-8')
if args.input.name != '<stdin>':
args.input = codecs.open(args.input.name, encoding='utf-8')
if args.output.name != '<stdout>':
args.output = codecs.open(args.output.name, 'w', encoding='utf-8')
if args.vocabulary:
args.vocabulary = codecs.open(args.vocabulary.name, encoding='utf-8')
if args.vocabulary:
vocabulary = read_vocabulary(args.vocabulary, args.vocabulary_threshold)
else:
vocabulary = None
if sys.version_info < (3, 0):
args.separator = args.separator.decode('UTF-8')
if args.glossaries:
args.glossaries = [g.decode('UTF-8') for g in args.glossaries]
if args.num_workers > 1:
args.num_workers = 1
warnings.warn("Parallel mode is only supported in Python3. Using 1 processor instead.")
if args.seed is not None:
random.seed(args.seed)
bpe = BPE(args.codes, args.merges, args.separator, vocabulary, args.glossaries)
if args.input.name == '<stdin>' or args.num_workers == 1:
if args.num_workers > 1:
warnings.warn("In parallel mode, the input cannot be STDIN. Using 1 processor instead.")
for line in args.input:
args.output.write(bpe.process_line(line, args.dropout))
else:
bpe.process_lines(args.input.name, args.output, args.dropout, args.num_workers)
|
from homeassistant.const import (
CONF_NAME,
CONF_TEMPERATURE_UNIT,
POWER_WATT,
TIME_HOURS,
TIME_MINUTES,
TIME_SECONDS,
VOLT,
)
from homeassistant.helpers.entity import Entity
from . import (
CONF_COUNTED_QUANTITY,
CONF_COUNTED_QUANTITY_PER_PULSE,
CONF_MONITOR_SERIAL_NUMBER,
CONF_NET_METERING,
CONF_NUMBER,
CONF_SENSOR_TYPE,
CONF_TIME_UNIT,
DATA_GREENEYE_MONITOR,
SENSOR_TYPE_CURRENT,
SENSOR_TYPE_PULSE_COUNTER,
SENSOR_TYPE_TEMPERATURE,
SENSOR_TYPE_VOLTAGE,
)
DATA_PULSES = "pulses"
DATA_WATT_SECONDS = "watt_seconds"
UNIT_WATTS = POWER_WATT
COUNTER_ICON = "mdi:counter"
CURRENT_SENSOR_ICON = "mdi:flash"
TEMPERATURE_ICON = "mdi:thermometer"
VOLTAGE_ICON = "mdi:current-ac"
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up a single GEM temperature sensor."""
if not discovery_info:
return
entities = []
for sensor in discovery_info:
sensor_type = sensor[CONF_SENSOR_TYPE]
if sensor_type == SENSOR_TYPE_CURRENT:
entities.append(
CurrentSensor(
sensor[CONF_MONITOR_SERIAL_NUMBER],
sensor[CONF_NUMBER],
sensor[CONF_NAME],
sensor[CONF_NET_METERING],
)
)
elif sensor_type == SENSOR_TYPE_PULSE_COUNTER:
entities.append(
PulseCounter(
sensor[CONF_MONITOR_SERIAL_NUMBER],
sensor[CONF_NUMBER],
sensor[CONF_NAME],
sensor[CONF_COUNTED_QUANTITY],
sensor[CONF_TIME_UNIT],
sensor[CONF_COUNTED_QUANTITY_PER_PULSE],
)
)
elif sensor_type == SENSOR_TYPE_TEMPERATURE:
entities.append(
TemperatureSensor(
sensor[CONF_MONITOR_SERIAL_NUMBER],
sensor[CONF_NUMBER],
sensor[CONF_NAME],
sensor[CONF_TEMPERATURE_UNIT],
)
)
elif sensor_type == SENSOR_TYPE_VOLTAGE:
entities.append(
VoltageSensor(
sensor[CONF_MONITOR_SERIAL_NUMBER],
sensor[CONF_NUMBER],
sensor[CONF_NAME],
)
)
async_add_entities(entities)
class GEMSensor(Entity):
"""Base class for GreenEye Monitor sensors."""
def __init__(self, monitor_serial_number, name, sensor_type, number):
"""Construct the entity."""
self._monitor_serial_number = monitor_serial_number
self._name = name
self._sensor = None
self._sensor_type = sensor_type
self._number = number
@property
def should_poll(self):
"""GEM pushes changes, so this returns False."""
return False
@property
def unique_id(self):
"""Return a unique ID for this sensor."""
return f"{self._monitor_serial_number}-{self._sensor_type}-{self._number}"
@property
def name(self):
"""Return the name of the channel."""
return self._name
async def async_added_to_hass(self):
"""Wait for and connect to the sensor."""
monitors = self.hass.data[DATA_GREENEYE_MONITOR]
if not self._try_connect_to_monitor(monitors):
monitors.add_listener(self._on_new_monitor)
def _on_new_monitor(self, *args):
monitors = self.hass.data[DATA_GREENEYE_MONITOR]
if self._try_connect_to_monitor(monitors):
monitors.remove_listener(self._on_new_monitor)
async def async_will_remove_from_hass(self):
"""Remove listener from the sensor."""
if self._sensor:
self._sensor.remove_listener(self.async_write_ha_state)
else:
monitors = self.hass.data[DATA_GREENEYE_MONITOR]
monitors.remove_listener(self._on_new_monitor)
def _try_connect_to_monitor(self, monitors):
monitor = monitors.monitors.get(self._monitor_serial_number)
if not monitor:
return False
self._sensor = self._get_sensor(monitor)
self._sensor.add_listener(self.async_write_ha_state)
return True
def _get_sensor(self, monitor):
raise NotImplementedError()
class CurrentSensor(GEMSensor):
"""Entity showing power usage on one channel of the monitor."""
def __init__(self, monitor_serial_number, number, name, net_metering):
"""Construct the entity."""
super().__init__(monitor_serial_number, name, "current", number)
self._net_metering = net_metering
def _get_sensor(self, monitor):
return monitor.channels[self._number - 1]
@property
def icon(self):
"""Return the icon that should represent this sensor in the UI."""
return CURRENT_SENSOR_ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement used by this sensor."""
return UNIT_WATTS
@property
def state(self):
"""Return the current number of watts being used by the channel."""
if not self._sensor:
return None
return self._sensor.watts
@property
def device_state_attributes(self):
"""Return total wattseconds in the state dictionary."""
if not self._sensor:
return None
if self._net_metering:
watt_seconds = self._sensor.polarized_watt_seconds
else:
watt_seconds = self._sensor.absolute_watt_seconds
return {DATA_WATT_SECONDS: watt_seconds}
class PulseCounter(GEMSensor):
"""Entity showing rate of change in one pulse counter of the monitor."""
def __init__(
self,
monitor_serial_number,
number,
name,
counted_quantity,
time_unit,
counted_quantity_per_pulse,
):
"""Construct the entity."""
super().__init__(monitor_serial_number, name, "pulse", number)
self._counted_quantity = counted_quantity
self._counted_quantity_per_pulse = counted_quantity_per_pulse
self._time_unit = time_unit
def _get_sensor(self, monitor):
return monitor.pulse_counters[self._number - 1]
@property
def icon(self):
"""Return the icon that should represent this sensor in the UI."""
return COUNTER_ICON
@property
def state(self):
"""Return the current rate of change for the given pulse counter."""
if not self._sensor or self._sensor.pulses_per_second is None:
return None
return (
self._sensor.pulses_per_second
* self._counted_quantity_per_pulse
* self._seconds_per_time_unit
)
@property
def _seconds_per_time_unit(self):
"""Return the number of seconds in the given display time unit."""
if self._time_unit == TIME_SECONDS:
return 1
if self._time_unit == TIME_MINUTES:
return 60
if self._time_unit == TIME_HOURS:
return 3600
@property
def unit_of_measurement(self):
"""Return the unit of measurement for this pulse counter."""
return f"{self._counted_quantity}/{self._time_unit}"
@property
def device_state_attributes(self):
"""Return total pulses in the data dictionary."""
if not self._sensor:
return None
return {DATA_PULSES: self._sensor.pulses}
class TemperatureSensor(GEMSensor):
"""Entity showing temperature from one temperature sensor."""
def __init__(self, monitor_serial_number, number, name, unit):
"""Construct the entity."""
super().__init__(monitor_serial_number, name, "temp", number)
self._unit = unit
def _get_sensor(self, monitor):
return monitor.temperature_sensors[self._number - 1]
@property
def icon(self):
"""Return the icon that should represent this sensor in the UI."""
return TEMPERATURE_ICON
@property
def state(self):
"""Return the current temperature being reported by this sensor."""
if not self._sensor:
return None
return self._sensor.temperature
@property
def unit_of_measurement(self):
"""Return the unit of measurement for this sensor (user specified)."""
return self._unit
class VoltageSensor(GEMSensor):
"""Entity showing voltage."""
def __init__(self, monitor_serial_number, number, name):
"""Construct the entity."""
super().__init__(monitor_serial_number, name, "volts", number)
def _get_sensor(self, monitor):
"""Wire the updates to the monitor itself, since there is no voltage element in the API."""
return monitor
@property
def icon(self):
"""Return the icon that should represent this sensor in the UI."""
return VOLTAGE_ICON
@property
def state(self):
"""Return the current voltage being reported by this sensor."""
if not self._sensor:
return None
return self._sensor.voltage
@property
def unit_of_measurement(self):
"""Return the unit of measurement for this sensor."""
return VOLT
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.tag import async_scan_tag
from homeassistant.components.tag.const import DOMAIN, TAG_ID
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service
@pytest.fixture
def tag_setup(hass, hass_storage):
"""Tag setup."""
async def _storage(items=None):
if items is None:
hass_storage[DOMAIN] = {
"key": DOMAIN,
"version": 1,
"data": {"items": [{"id": "test tag"}]},
}
else:
hass_storage[DOMAIN] = items
config = {DOMAIN: {}}
return await async_setup_component(hass, DOMAIN, config)
return _storage
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_triggers(hass, tag_setup, calls):
"""Test tag triggers."""
assert await tag_setup()
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"platform": DOMAIN, TAG_ID: "abc123"},
"action": {
"service": "test.automation",
"data": {"message": "service called"},
},
}
]
},
)
await hass.async_block_till_done()
await async_scan_tag(hass, "abc123", None)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["message"] == "service called"
async def test_exception_bad_trigger(hass, calls, caplog):
"""Test for exception on event triggers firing."""
await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {"trigger": {"platform": DOMAIN, "oops": "abc123"}},
"action": {
"service": "test.automation",
"data": {"message": "service called"},
},
}
]
},
)
await hass.async_block_till_done()
assert "Invalid config for [automation]" in caplog.text
|
from itertools import count
from .imports import symbol_by_name
__all__ = (
'FairCycle', 'priority_cycle', 'round_robin_cycle', 'sorted_cycle',
)
CYCLE_ALIASES = {
'priority': 'kombu.utils.scheduling:priority_cycle',
'round_robin': 'kombu.utils.scheduling:round_robin_cycle',
'sorted': 'kombu.utils.scheduling:sorted_cycle',
}
class FairCycle:
"""Cycle between resources.
Consume from a set of resources, where each resource gets
an equal chance to be consumed from.
Arguments:
fun (Callable): Callback to call.
resources (Sequence[Any]): List of resources.
predicate (type): Exception predicate.
"""
def __init__(self, fun, resources, predicate=Exception):
self.fun = fun
self.resources = resources
self.predicate = predicate
self.pos = 0
def _next(self):
while 1:
try:
resource = self.resources[self.pos]
self.pos += 1
return resource
except IndexError:
self.pos = 0
if not self.resources:
raise self.predicate()
def get(self, callback, **kwargs):
"""Get from next resource."""
for tried in count(0): # for infinity
resource = self._next()
try:
return self.fun(resource, callback, **kwargs)
except self.predicate:
# reraise when retries exchausted.
if tried >= len(self.resources) - 1:
raise
def close(self):
"""Close cycle."""
def __repr__(self):
"""``repr(cycle)``."""
return '<FairCycle: {self.pos}/{size} {self.resources}>'.format(
self=self, size=len(self.resources))
class round_robin_cycle:
"""Iterator that cycles between items in round-robin."""
def __init__(self, it=None):
self.items = it if it is not None else []
def update(self, it):
"""Update items from iterable."""
self.items[:] = it
def consume(self, n):
"""Consume n items."""
return self.items[:n]
def rotate(self, last_used):
"""Move most recently used item to end of list."""
items = self.items
try:
items.append(items.pop(items.index(last_used)))
except ValueError:
pass
return last_used
class priority_cycle(round_robin_cycle):
"""Cycle that repeats items in order."""
def rotate(self, last_used):
"""Unused in this implementation."""
class sorted_cycle(priority_cycle):
"""Cycle in sorted order."""
def consume(self, n):
"""Consume n items."""
return sorted(self.items[:n])
def cycle_by_name(name):
"""Get cycle class by name."""
return symbol_by_name(name, CYCLE_ALIASES)
|
from datetime import timedelta
import logging
from aiohttp.hdrs import AUTHORIZATION
import requests
import voluptuous as vol
from homeassistant.const import (
CONF_API_KEY,
HTTP_METHOD_NOT_ALLOWED,
HTTP_OK,
HTTP_UNAUTHORIZED,
)
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
BLOOMSKY_TYPE = ["camera", "binary_sensor", "sensor"]
DOMAIN = "bloomsky"
# The BloomSky only updates every 5-8 minutes as per the API spec so there's
# no point in polling the API more frequently
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=300)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_API_KEY): cv.string})}, extra=vol.ALLOW_EXTRA
)
def setup(hass, config):
"""Set up the BloomSky component."""
api_key = config[DOMAIN][CONF_API_KEY]
try:
bloomsky = BloomSky(api_key, hass.config.units.is_metric)
except RuntimeError:
return False
hass.data[DOMAIN] = bloomsky
for component in BLOOMSKY_TYPE:
discovery.load_platform(hass, component, DOMAIN, {}, config)
return True
class BloomSky:
"""Handle all communication with the BloomSky API."""
# API documentation at http://weatherlution.com/bloomsky-api/
API_URL = "http://api.bloomsky.com/api/skydata"
def __init__(self, api_key, is_metric):
"""Initialize the BookSky."""
self._api_key = api_key
self._endpoint_argument = "unit=intl" if is_metric else ""
self.devices = {}
self.is_metric = is_metric
_LOGGER.debug("Initial BloomSky device load...")
self.refresh_devices()
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def refresh_devices(self):
"""Use the API to retrieve a list of devices."""
_LOGGER.debug("Fetching BloomSky update")
response = requests.get(
f"{self.API_URL}?{self._endpoint_argument}",
headers={AUTHORIZATION: self._api_key},
timeout=10,
)
if response.status_code == HTTP_UNAUTHORIZED:
raise RuntimeError("Invalid API_KEY")
if response.status_code == HTTP_METHOD_NOT_ALLOWED:
_LOGGER.error("You have no bloomsky devices configured")
return
if response.status_code != HTTP_OK:
_LOGGER.error("Invalid HTTP response: %s", response.status_code)
return
# Create dictionary keyed off of the device unique id
self.devices.update({device["DeviceID"]: device for device in response.json()})
|
import pytest
import sh
from molecule import config
from molecule.provisioner import ansible_playbook
@pytest.fixture
def _instance(config_instance):
return ansible_playbook.AnsiblePlaybook('playbook', config_instance)
@pytest.fixture
def _inventory_directory(_instance):
return _instance._config.provisioner.inventory_directory
def test_ansible_command_private_member(_instance):
assert _instance._ansible_command is None
def test_ansible_playbook_private_member(_instance):
assert 'playbook' == _instance._playbook
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_bake(_inventory_directory, _instance):
pb = _instance._config.provisioner.playbooks.converge
_instance._playbook = pb
_instance.bake()
x = [
str(sh.ansible_playbook),
'--become',
'--inventory={}'.format(_inventory_directory),
'--skip-tags=molecule-notest,notest',
pb,
]
result = str(_instance._ansible_command).split()
assert sorted(x) == sorted(result)
def test_bake_removes_non_interactive_options_from_non_converge_playbooks(
_inventory_directory, _instance):
_instance.bake()
x = [
str(sh.ansible_playbook),
'--inventory={}'.format(_inventory_directory),
'--skip-tags=molecule-notest,notest',
'playbook',
]
result = str(_instance._ansible_command).split()
assert sorted(x) == sorted(result)
def test_bake_has_ansible_args(_inventory_directory, _instance):
_instance._config.ansible_args = ('foo', 'bar')
_instance._config.config['provisioner']['ansible_args'] = ('frob', 'nitz')
_instance.bake()
x = [
str(sh.ansible_playbook),
'--inventory={}'.format(_inventory_directory),
'--skip-tags=molecule-notest,notest',
'playbook',
'frob',
'nitz',
'foo',
'bar',
]
result = str(_instance._ansible_command).split()
assert sorted(x) == sorted(result)
def test_bake_does_not_have_ansible_args(_inventory_directory, _instance):
for action in ['create', 'destroy']:
_instance._config.ansible_args = ('foo', 'bar')
_instance._config.action = action
_instance.bake()
x = [
str(sh.ansible_playbook),
'--inventory={}'.format(_inventory_directory),
'--skip-tags=molecule-notest,notest',
'playbook',
]
result = str(_instance._ansible_command).split()
assert sorted(x) == sorted(result)
def test_bake_idem_does_have_skip_tag(_inventory_directory, _instance):
_instance._config.action = 'idempotence'
_instance.bake()
x = [
str(sh.ansible_playbook),
'--inventory={}'.format(_inventory_directory),
'--skip-tags=molecule-notest,notest,molecule-idempotence-notest',
'playbook',
]
result = str(_instance._ansible_command).split()
assert sorted(x) == sorted(result)
def test_execute(patched_run_command, _instance):
_instance._ansible_command = 'patched-command'
result = _instance.execute()
patched_run_command.assert_called_once_with('patched-command', debug=False)
assert 'patched-run-command-stdout' == result
def test_execute_bakes(_inventory_directory, patched_run_command, _instance):
_instance.execute()
assert _instance._ansible_command is not None
x = [
str(sh.ansible_playbook),
'--inventory={}'.format(_inventory_directory),
'--skip-tags=molecule-notest,notest',
'playbook',
]
result = str(patched_run_command.mock_calls[0][1][0]).split()
assert sorted(x) == sorted(result)
def test_execute_bakes_with_ansible_args(_inventory_directory,
patched_run_command, _instance):
_instance._config.ansible_args = ('--foo', '--bar')
_instance.execute()
assert _instance._ansible_command is not None
x = [
str(sh.ansible_playbook),
'--inventory={}'.format(_inventory_directory),
'--skip-tags=molecule-notest,notest',
'playbook',
'--foo',
'--bar',
]
result = str(patched_run_command.mock_calls[0][1][0]).split()
assert sorted(x) == sorted(result)
def test_executes_catches_and_exits_return_code_with_stdout(
patched_run_command, patched_logger_critical, _instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(
sh.ansible_playbook, b'out', b'err')
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
msg = 'out'
patched_logger_critical.assert_called_once_with(msg)
def test_add_cli_arg(_instance):
assert {} == _instance._cli
_instance.add_cli_arg('foo', 'bar')
assert {'foo': 'bar'} == _instance._cli
def test_add_env_arg(_instance):
assert 'foo' not in _instance._env
_instance.add_env_arg('foo', 'bar')
assert 'bar' == _instance._env['foo']
|
from typing import TYPE_CHECKING
import attr
if TYPE_CHECKING:
# pylint: disable=unused-import
from homeassistant.helpers import ( # noqa: F401
device_registry as dev_reg,
entity_registry as ent_reg,
)
@attr.s(slots=True)
class PermissionLookup:
"""Class to hold data for permission lookups."""
entity_registry: "ent_reg.EntityRegistry" = attr.ib()
device_registry: "dev_reg.DeviceRegistry" = attr.ib()
|
import logging
import voluptuous as vol
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TIME,
ATTR_WEATHER_HUMIDITY,
ATTR_WEATHER_PRESSURE,
ATTR_WEATHER_TEMPERATURE,
ATTR_WEATHER_WIND_BEARING,
ATTR_WEATHER_WIND_SPEED,
PLATFORM_SCHEMA,
WeatherEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import (
CONF_ELEVATION,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
LENGTH_KILOMETERS,
LENGTH_MILES,
PRESSURE_HPA,
PRESSURE_INHG,
TEMP_CELSIUS,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util.distance import convert as convert_distance
from homeassistant.util.pressure import convert as convert_pressure
from .const import ATTR_MAP, CONDITIONS_MAP, CONF_TRACK_HOME, DOMAIN, FORECAST_MAP
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = (
"Weather forecast from met.no, delivered by the Norwegian "
"Meteorological Institute."
)
DEFAULT_NAME = "Met.no"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Inclusive(
CONF_LATITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.latitude,
vol.Inclusive(
CONF_LONGITUDE, "coordinates", "Latitude and longitude must exist together"
): cv.longitude,
vol.Optional(CONF_ELEVATION): int,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Met.no weather platform."""
_LOGGER.warning("Loading Met.no via platform config is deprecated")
# Add defaults.
config = {CONF_ELEVATION: hass.config.elevation, **config}
if config.get(CONF_LATITUDE) is None:
config[CONF_TRACK_HOME] = True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add a weather entity from a config_entry."""
coordinator = hass.data[DOMAIN][config_entry.entry_id]
async_add_entities(
[
MetWeather(
coordinator, config_entry.data, hass.config.units.is_metric, False
),
MetWeather(
coordinator, config_entry.data, hass.config.units.is_metric, True
),
]
)
def format_condition(condition: str) -> str:
"""Return condition from dict CONDITIONS_MAP."""
for key, value in CONDITIONS_MAP.items():
if condition in value:
return key
return condition
class MetWeather(CoordinatorEntity, WeatherEntity):
"""Implementation of a Met.no weather condition."""
def __init__(self, coordinator, config, is_metric, hourly):
"""Initialise the platform with a data instance and site."""
super().__init__(coordinator)
self._config = config
self._is_metric = is_metric
self._hourly = hourly
@property
def track_home(self):
"""Return if we are tracking home."""
return self._config.get(CONF_TRACK_HOME, False)
@property
def unique_id(self):
"""Return unique ID."""
name_appendix = ""
if self._hourly:
name_appendix = "-hourly"
if self.track_home:
return f"home{name_appendix}"
return f"{self._config[CONF_LATITUDE]}-{self._config[CONF_LONGITUDE]}{name_appendix}"
@property
def name(self):
"""Return the name of the sensor."""
name = self._config.get(CONF_NAME)
name_appendix = ""
if self._hourly:
name_appendix = " Hourly"
if name is not None:
return f"{name}{name_appendix}"
if self.track_home:
return f"{self.hass.config.location_name}{name_appendix}"
return f"{DEFAULT_NAME}{name_appendix}"
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return not self._hourly
@property
def condition(self):
"""Return the current condition."""
condition = self.coordinator.data.current_weather_data.get("condition")
return format_condition(condition)
@property
def temperature(self):
"""Return the temperature."""
return self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_TEMPERATURE]
)
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def pressure(self):
"""Return the pressure."""
pressure_hpa = self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_PRESSURE]
)
if self._is_metric or pressure_hpa is None:
return pressure_hpa
return round(convert_pressure(pressure_hpa, PRESSURE_HPA, PRESSURE_INHG), 2)
@property
def humidity(self):
"""Return the humidity."""
return self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_HUMIDITY]
)
@property
def wind_speed(self):
"""Return the wind speed."""
speed_km_h = self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_WIND_SPEED]
)
if self._is_metric or speed_km_h is None:
return speed_km_h
speed_mi_h = convert_distance(speed_km_h, LENGTH_KILOMETERS, LENGTH_MILES)
return int(round(speed_mi_h))
@property
def wind_bearing(self):
"""Return the wind direction."""
return self.coordinator.data.current_weather_data.get(
ATTR_MAP[ATTR_WEATHER_WIND_BEARING]
)
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def forecast(self):
"""Return the forecast array."""
if self._hourly:
met_forecast = self.coordinator.data.hourly_forecast
else:
met_forecast = self.coordinator.data.daily_forecast
required_keys = {ATTR_FORECAST_TEMP, ATTR_FORECAST_TIME}
ha_forecast = []
for met_item in met_forecast:
if not set(met_item).issuperset(required_keys):
continue
ha_item = {
k: met_item[v] for k, v in FORECAST_MAP.items() if met_item.get(v)
}
if ha_item.get(ATTR_FORECAST_CONDITION):
ha_item[ATTR_FORECAST_CONDITION] = format_condition(
ha_item[ATTR_FORECAST_CONDITION]
)
ha_forecast.append(ha_item)
return ha_forecast
@property
def device_info(self):
"""Device info."""
return {
"identifiers": {(DOMAIN,)},
"manufacturer": "Met.no",
"model": "Forecast",
"default_name": "Forecast",
"entry_type": "service",
}
|
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
DOMAIN as BINARY_SENSOR_DOMAIN,
)
from homeassistant.components.ozw.const import DOMAIN
from homeassistant.const import ATTR_DEVICE_CLASS
from .common import setup_ozw
async def test_binary_sensor(hass, generic_data, binary_sensor_msg):
"""Test setting up config entry."""
receive_msg = await setup_ozw(hass, fixture=generic_data)
# Test Legacy sensor (disabled by default)
registry = await hass.helpers.entity_registry.async_get_registry()
entity_id = "binary_sensor.trisensor_sensor"
state = hass.states.get(entity_id)
assert state is None
entry = registry.async_get(entity_id)
assert entry
assert entry.disabled
assert entry.disabled_by == "integration"
# Test enabling legacy entity
updated_entry = registry.async_update_entity(
entry.entity_id, **{"disabled_by": None}
)
assert updated_entry != entry
assert updated_entry.disabled is False
# Test Sensor for Notification CC
state = hass.states.get("binary_sensor.trisensor_home_security_motion_detected")
assert state
assert state.state == "off"
assert state.attributes[ATTR_DEVICE_CLASS] == DEVICE_CLASS_MOTION
# Test incoming state change
receive_msg(binary_sensor_msg)
await hass.async_block_till_done()
state = hass.states.get("binary_sensor.trisensor_home_security_motion_detected")
assert state.state == "on"
async def test_sensor_enabled(hass, generic_data, binary_sensor_alt_msg):
"""Test enabling a legacy binary_sensor."""
registry = await hass.helpers.entity_registry.async_get_registry()
entry = registry.async_get_or_create(
BINARY_SENSOR_DOMAIN,
DOMAIN,
"1-37-625737744",
suggested_object_id="trisensor_sensor_instance_1_sensor",
disabled_by=None,
)
assert entry.disabled is False
receive_msg = await setup_ozw(hass, fixture=generic_data)
receive_msg(binary_sensor_alt_msg)
await hass.async_block_till_done()
state = hass.states.get(entry.entity_id)
assert state is not None
assert state.state == "on"
|
import json
from airly.exceptions import AirlyError
from homeassistant import data_entry_flow
from homeassistant.components.airly.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import (
CONF_API_KEY,
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
HTTP_FORBIDDEN,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry, load_fixture
CONFIG = {
CONF_NAME: "abcd",
CONF_API_KEY: "foo",
CONF_LATITUDE: 123,
CONF_LONGITUDE: 456,
}
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == SOURCE_USER
async def test_invalid_api_key(hass):
"""Test that errors are shown when API key is invalid."""
with patch(
"airly._private._RequestsHandler.get",
side_effect=AirlyError(
HTTP_FORBIDDEN, {"message": "Invalid authentication credentials"}
),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["errors"] == {"base": "invalid_api_key"}
async def test_invalid_location(hass):
"""Test that errors are shown when location is invalid."""
with patch(
"airly._private._RequestsHandler.get",
return_value=json.loads(load_fixture("airly_no_station.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["errors"] == {"base": "wrong_location"}
async def test_duplicate_error(hass):
"""Test that errors are shown when duplicates are added."""
with patch(
"airly._private._RequestsHandler.get",
return_value=json.loads(load_fixture("airly_valid_station.json")),
):
MockConfigEntry(domain=DOMAIN, unique_id="123-456", data=CONFIG).add_to_hass(
hass
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
async def test_create_entry(hass):
"""Test that the user step works."""
with patch(
"airly._private._RequestsHandler.get",
return_value=json.loads(load_fixture("airly_valid_station.json")),
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=CONFIG
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == CONFIG[CONF_NAME]
assert result["data"][CONF_LATITUDE] == CONFIG[CONF_LATITUDE]
assert result["data"][CONF_LONGITUDE] == CONFIG[CONF_LONGITUDE]
assert result["data"][CONF_API_KEY] == CONFIG[CONF_API_KEY]
|
from weblate.lang.models import Language
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
help = "Populates language definitions"
def add_arguments(self, parser):
parser.add_argument(
"--no-update",
action="store_false",
dest="update",
default=True,
help="Prevents updates to existing language definitions",
)
def handle(self, *args, **options):
"""Create default set of languages."""
kwargs = {}
if options["verbosity"] >= 1:
kwargs["logger"] = self.stdout.write
Language.objects.setup(options["update"], **kwargs)
|
import numpy as np
from scipy.sparse import csr_matrix
class CSRMatrixFactory:
''' Factory class to create a csr_matrix.
'''
def __init__(self, dtype=np.int32):
self.rows = []
self.cols = []
self.data = []
self._max_col = 0
self._max_row = 0
self._dtype = dtype
def __setitem__(self, row_col, datum):
'''Insert a value into the matrix
Parameters
----------
row_col : tuple
Row and column indices
datum : float or int
Numeric value to insert into the matrix
>>> mat_fact = CSRMatrixFactory()
>>> mat_fact[3,1] = 1
Returns
-------
Noone
'''
row, col = row_col
self.rows.append(row)
self.cols.append(col)
self.data.append(datum)
if row > self._max_row: self._max_row = row
if col > self._max_col: self._max_col = col
if isinstance(datum, float):
self._dtype = type(datum)
def set_last_col_idx(self, last_col_idx):
'''
Parameters
----------
param last_col_idx : int
number of columns
'''
assert last_col_idx >= self._max_col
self._max_col = last_col_idx
return self
def set_last_row_idx(self, last_row_idx):
'''
Parameters
----------
param last_row_idx : int
number of rows
'''
assert last_row_idx >= self._max_row
self._max_row = last_row_idx
return self
def get_csr_matrix(self, dtype = None, make_square = False):
shape = (self._max_row + 1, self._max_col + 1)
if make_square:
shape = (max(shape), max(shape))
return csr_matrix((self.data, (self.rows, self.cols)),
shape=shape,
dtype=self._dtype if dtype is None else dtype)
def delete_columns(mat, columns_to_delete):
'''
>>> a = csr_matrix(np.array([[0, 1, 3, 0, 1, 0],
[0, 0, 1, 0, 1, 1]])
>>> delete_columns(a, [1,2]).todense()
matrix([[0, 0, 1, 0],
[0, 0, 1, 1]])
Parameters
----------
mat : csr_matrix
columns_to_delete : list[int]
Returns
-------
csr_matrix that is stripped of columns indices columns_to_delete
'''
column_mask = np.ones(mat.shape[1], dtype=bool)
column_mask[columns_to_delete] = 0
return mat.tocsc()[:, column_mask].tocsr()
|
import os
import os.path as op
from setuptools import setup
# get the version (don't import mne here, so dependencies are not needed)
version = None
with open(op.join('mne', '_version.py'), 'r') as fid:
for line in (line.strip() for line in fid):
if line.startswith('__version__'):
version = line.split('=')[1].strip().strip('\'')
break
if version is None:
raise RuntimeError('Could not determine version')
descr = """MNE python project for MEG and EEG data analysis."""
DISTNAME = 'mne'
DESCRIPTION = descr
MAINTAINER = 'Alexandre Gramfort'
MAINTAINER_EMAIL = '[email protected]'
URL = 'https://mne.tools/dev/'
LICENSE = 'BSD (3-clause)'
DOWNLOAD_URL = 'http://github.com/mne-tools/mne-python'
VERSION = version
def package_tree(pkgroot):
"""Get the submodule list."""
# Adapted from VisPy
path = op.dirname(__file__)
subdirs = [op.relpath(i[0], path).replace(op.sep, '.')
for i in os.walk(op.join(path, pkgroot))
if '__init__.py' in i[2]]
return sorted(subdirs)
if __name__ == "__main__":
if op.exists('MANIFEST'):
os.remove('MANIFEST')
with open('README.rst', 'r') as fid:
long_description = fid.read()
setup(name=DISTNAME,
maintainer=MAINTAINER,
include_package_data=True,
maintainer_email=MAINTAINER_EMAIL,
description=DESCRIPTION,
license=LICENSE,
url=URL,
version=VERSION,
download_url=DOWNLOAD_URL,
long_description=long_description,
long_description_content_type='text/x-rst',
zip_safe=False, # the package can run out of an .egg file
classifiers=['Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Topic :: Software Development',
'Topic :: Scientific/Engineering',
'Operating System :: Microsoft :: Windows',
'Operating System :: POSIX',
'Operating System :: Unix',
'Operating System :: MacOS',
'Programming Language :: Python :: 3',
],
keywords='neuroscience neuroimaging MEG EEG ECoG fNIRS brain',
project_urls={
'Documentation': 'https://mne.tools/',
'Source': 'https://github.com/mne-tools/mne-python/',
'Tracker': 'https://github.com/mne-tools/mne-python/issues/',
},
platforms='any',
python_requires='>=3.6',
install_requires=['numpy>=1.11.3', 'scipy>=0.17.1'],
packages=package_tree('mne'),
package_data={'mne': [
op.join('data', '*.sel'),
op.join('data', 'icos.fif.gz'),
op.join('data', 'coil_def*.dat'),
op.join('data', 'helmets', '*.fif.gz'),
op.join('data', 'FreeSurferColorLUT.txt'),
op.join('data', 'image', '*gif'),
op.join('data', 'image', '*lout'),
op.join('data', 'fsaverage', '*.fif'),
op.join('channels', 'data', 'layouts', '*.lout'),
op.join('channels', 'data', 'layouts', '*.lay'),
op.join('channels', 'data', 'montages', '*.sfp'),
op.join('channels', 'data', 'montages', '*.txt'),
op.join('channels', 'data', 'montages', '*.elc'),
op.join('channels', 'data', 'neighbors', '*.mat'),
op.join('datasets', 'sleep_physionet', 'SHA1SUMS'),
op.join('gui', 'help', '*.json'),
op.join('html', '*.js'),
op.join('html', '*.css'),
op.join('icons', '*.svg'),
op.join('icons', '*.png'),
op.join('io', 'artemis123', 'resources', '*.csv'),
op.join('io', 'edf', 'gdf_encodes.txt')
]},
entry_points={'console_scripts': [
'mne = mne.commands.utils:main',
]})
|
import speech_recognition as sr
from kalliope.core import Utils
from kalliope.stt.Utils import SpeechRecognition
class Apiai(SpeechRecognition):
def __init__(self, callback=None, **kwargs):
"""
Start recording the microphone and analyse audio with Apiai api
:param callback: The callback function to call to send the text
:param kwargs:
"""
# give the audio file path to process directly to the mother class if exist
SpeechRecognition.__init__(self, kwargs.get('audio_file_path', None))
# callback function to call after the translation speech/tex
self.main_controller_callback = callback
self.key = kwargs.get('key', None)
self.language = kwargs.get('language', "en")
self.session_id = kwargs.get('session_id', None)
self.show_all = kwargs.get('show_all', False)
# start listening in the background
self.set_callback(self.apiai_callback)
# start processing, record a sample from the microphone if no audio file path provided, else read the file
self.start_processing()
def apiai_callback(self, recognizer, audio):
"""
called from the background thread
:param recognizer:
:param audio:
:return:
"""
try:
captured_audio = recognizer.recognize_api(audio,
client_access_token=self.key,
language=self.language,
session_id=self.session_id,
show_all=self.show_all)
Utils.print_success("Apiai Speech Recognition thinks you said %s" % captured_audio)
self._analyse_audio(captured_audio)
except sr.UnknownValueError as e:
Utils.print_warning("Apiai Speech Recognition could not understand audio; {0}".format(e))
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except sr.RequestError as e:
Utils.print_danger("Could not request results from Apiai Speech Recognition service; {0}".format(e))
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except AssertionError:
Utils.print_warning("No audio caught from microphone")
self._analyse_audio(audio_to_text=None)
def _analyse_audio(self, audio_to_text):
"""
Confirm the audio exists and run it in a Callback
:param audio_to_text: the captured audio
"""
if self.main_controller_callback is not None:
self.main_controller_callback(audio_to_text)
|
import numpy as np
from scipy import linalg
from numpy.testing import assert_almost_equal, assert_array_almost_equal
from mne.time_frequency import stft, istft, stftfreq
from mne.time_frequency._stft import stft_norm2
def test_stft():
"""Test stft and istft tight frame property."""
sfreq = 1000. # Hz
f = 7. # Hz
for T in [127, 128]: # try with even and odd numbers
# Test with low frequency signal
t = np.arange(T).astype(np.float64)
x = np.sin(2 * np.pi * f * t / sfreq)
x = np.array([x, x + 1.])
wsize = 128
tstep = 4
X = stft(x, wsize, tstep)
xp = istft(X, tstep, Tx=T)
freqs = stftfreq(wsize, sfreq=1000)
max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]
assert X.shape[1] == len(freqs)
assert np.all(freqs >= 0.)
assert np.abs(max_freq - f) < 1.
assert_array_almost_equal(x, xp, decimal=6)
# norm conservation thanks to tight frame property
assert_almost_equal(np.sqrt(stft_norm2(X)),
[linalg.norm(xx) for xx in x], decimal=6)
# Test with random signal
x = np.random.randn(2, T)
wsize = 16
tstep = 8
X = stft(x, wsize, tstep)
xp = istft(X, tstep, Tx=T)
freqs = stftfreq(wsize, sfreq=1000)
max_freq = freqs[np.argmax(np.sum(np.abs(X[0]) ** 2, axis=1))]
assert X.shape[1] == len(freqs)
assert np.all(freqs >= 0.)
assert_array_almost_equal(x, xp, decimal=6)
# norm conservation thanks to tight frame property
assert_almost_equal(np.sqrt(stft_norm2(X)),
[linalg.norm(xx) for xx in x],
decimal=6)
# Try with empty array
x = np.zeros((0, T))
X = stft(x, wsize, tstep)
xp = istft(X, tstep, T)
assert xp.shape == x.shape
|
from tests.async_mock import patch
async def test_get_id_empty(hass, hass_storage):
"""Get unique ID."""
uuid = await hass.helpers.instance_id.async_get()
assert uuid is not None
# Assert it's stored
assert hass_storage["core.uuid"]["data"]["uuid"] == uuid
async def test_get_id_migrate(hass, hass_storage):
"""Migrate existing file."""
with patch(
"homeassistant.util.json.load_json", return_value={"uuid": "1234"}
), patch("os.path.isfile", return_value=True), patch("os.remove") as mock_remove:
uuid = await hass.helpers.instance_id.async_get()
assert uuid == "1234"
# Assert it's stored
assert hass_storage["core.uuid"]["data"]["uuid"] == uuid
# assert old deleted
assert len(mock_remove.mock_calls) == 1
|
class AlexaGlobalCatalog:
"""The Global Alexa catalog.
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#global-alexa-catalog
You can use the global Alexa catalog for pre-defined names of devices, settings, values, and units.
This catalog is localized into all the languages that Alexa supports.
You can reference the following catalog of pre-defined friendly names.
Each item in the following list is an asset identifier followed by its supported friendly names.
The first friendly name for each identifier is the one displayed in the Alexa mobile app.
"""
# Air Purifier, Air Cleaner,Clean Air Machine
DEVICE_NAME_AIR_PURIFIER = "Alexa.DeviceName.AirPurifier"
# Fan, Blower
DEVICE_NAME_FAN = "Alexa.DeviceName.Fan"
# Router, Internet Router, Network Router, Wifi Router, Net Router
DEVICE_NAME_ROUTER = "Alexa.DeviceName.Router"
# Shade, Blind, Curtain, Roller, Shutter, Drape, Awning, Window shade, Interior blind
DEVICE_NAME_SHADE = "Alexa.DeviceName.Shade"
# Shower
DEVICE_NAME_SHOWER = "Alexa.DeviceName.Shower"
# Space Heater, Portable Heater
DEVICE_NAME_SPACE_HEATER = "Alexa.DeviceName.SpaceHeater"
# Washer, Washing Machine
DEVICE_NAME_WASHER = "Alexa.DeviceName.Washer"
# 2.4G Guest Wi-Fi, 2.4G Guest Network, Guest Network 2.4G, 2G Guest Wifi
SETTING_2G_GUEST_WIFI = "Alexa.Setting.2GGuestWiFi"
# 5G Guest Wi-Fi, 5G Guest Network, Guest Network 5G, 5G Guest Wifi
SETTING_5G_GUEST_WIFI = "Alexa.Setting.5GGuestWiFi"
# Auto, Automatic, Automatic Mode, Auto Mode
SETTING_AUTO = "Alexa.Setting.Auto"
# Direction
SETTING_DIRECTION = "Alexa.Setting.Direction"
# Dry Cycle, Dry Preset, Dry Setting, Dryer Cycle, Dryer Preset, Dryer Setting
SETTING_DRY_CYCLE = "Alexa.Setting.DryCycle"
# Fan Speed, Airflow speed, Wind Speed, Air speed, Air velocity
SETTING_FAN_SPEED = "Alexa.Setting.FanSpeed"
# Guest Wi-fi, Guest Network, Guest Net
SETTING_GUEST_WIFI = "Alexa.Setting.GuestWiFi"
# Heat
SETTING_HEAT = "Alexa.Setting.Heat"
# Mode
SETTING_MODE = "Alexa.Setting.Mode"
# Night, Night Mode
SETTING_NIGHT = "Alexa.Setting.Night"
# Opening, Height, Lift, Width
SETTING_OPENING = "Alexa.Setting.Opening"
# Oscillate, Swivel, Oscillation, Spin, Back and forth
SETTING_OSCILLATE = "Alexa.Setting.Oscillate"
# Preset, Setting
SETTING_PRESET = "Alexa.Setting.Preset"
# Quiet, Quiet Mode, Noiseless, Silent
SETTING_QUIET = "Alexa.Setting.Quiet"
# Temperature, Temp
SETTING_TEMPERATURE = "Alexa.Setting.Temperature"
# Wash Cycle, Wash Preset, Wash setting
SETTING_WASH_CYCLE = "Alexa.Setting.WashCycle"
# Water Temperature, Water Temp, Water Heat
SETTING_WATER_TEMPERATURE = "Alexa.Setting.WaterTemperature"
# Handheld Shower, Shower Wand, Hand Shower
SHOWER_HAND_HELD = "Alexa.Shower.HandHeld"
# Rain Head, Overhead shower, Rain Shower, Rain Spout, Rain Faucet
SHOWER_RAIN_HEAD = "Alexa.Shower.RainHead"
# Degrees, Degree
UNIT_ANGLE_DEGREES = "Alexa.Unit.Angle.Degrees"
# Radians, Radian
UNIT_ANGLE_RADIANS = "Alexa.Unit.Angle.Radians"
# Feet, Foot
UNIT_DISTANCE_FEET = "Alexa.Unit.Distance.Feet"
# Inches, Inch
UNIT_DISTANCE_INCHES = "Alexa.Unit.Distance.Inches"
# Kilometers
UNIT_DISTANCE_KILOMETERS = "Alexa.Unit.Distance.Kilometers"
# Meters, Meter, m
UNIT_DISTANCE_METERS = "Alexa.Unit.Distance.Meters"
# Miles, Mile
UNIT_DISTANCE_MILES = "Alexa.Unit.Distance.Miles"
# Yards, Yard
UNIT_DISTANCE_YARDS = "Alexa.Unit.Distance.Yards"
# Grams, Gram, g
UNIT_MASS_GRAMS = "Alexa.Unit.Mass.Grams"
# Kilograms, Kilogram, kg
UNIT_MASS_KILOGRAMS = "Alexa.Unit.Mass.Kilograms"
# Percent
UNIT_PERCENT = "Alexa.Unit.Percent"
# Celsius, Degrees Celsius, Degrees, C, Centigrade, Degrees Centigrade
UNIT_TEMPERATURE_CELSIUS = "Alexa.Unit.Temperature.Celsius"
# Degrees, Degree
UNIT_TEMPERATURE_DEGREES = "Alexa.Unit.Temperature.Degrees"
# Fahrenheit, Degrees Fahrenheit, Degrees F, Degrees, F
UNIT_TEMPERATURE_FAHRENHEIT = "Alexa.Unit.Temperature.Fahrenheit"
# Kelvin, Degrees Kelvin, Degrees K, Degrees, K
UNIT_TEMPERATURE_KELVIN = "Alexa.Unit.Temperature.Kelvin"
# Cubic Feet, Cubic Foot
UNIT_VOLUME_CUBIC_FEET = "Alexa.Unit.Volume.CubicFeet"
# Cubic Meters, Cubic Meter, Meters Cubed
UNIT_VOLUME_CUBIC_METERS = "Alexa.Unit.Volume.CubicMeters"
# Gallons, Gallon
UNIT_VOLUME_GALLONS = "Alexa.Unit.Volume.Gallons"
# Liters, Liter, L
UNIT_VOLUME_LITERS = "Alexa.Unit.Volume.Liters"
# Pints, Pint
UNIT_VOLUME_PINTS = "Alexa.Unit.Volume.Pints"
# Quarts, Quart
UNIT_VOLUME_QUARTS = "Alexa.Unit.Volume.Quarts"
# Ounces, Ounce, oz
UNIT_WEIGHT_OUNCES = "Alexa.Unit.Weight.Ounces"
# Pounds, Pound, lbs
UNIT_WEIGHT_POUNDS = "Alexa.Unit.Weight.Pounds"
# Close
VALUE_CLOSE = "Alexa.Value.Close"
# Delicates, Delicate
VALUE_DELICATE = "Alexa.Value.Delicate"
# High
VALUE_HIGH = "Alexa.Value.High"
# Low
VALUE_LOW = "Alexa.Value.Low"
# Maximum, Max
VALUE_MAXIMUM = "Alexa.Value.Maximum"
# Medium, Mid
VALUE_MEDIUM = "Alexa.Value.Medium"
# Minimum, Min
VALUE_MINIMUM = "Alexa.Value.Minimum"
# Open
VALUE_OPEN = "Alexa.Value.Open"
# Quick Wash, Fast Wash, Wash Quickly, Speed Wash
VALUE_QUICK_WASH = "Alexa.Value.QuickWash"
class AlexaCapabilityResource:
"""Base class for Alexa capabilityResources, modeResources, and presetResources objects.
Resources objects labels must be unique across all modeResources and presetResources within the same device.
To provide support for all supported locales, include one label from the AlexaGlobalCatalog in the labels array.
You cannot use any words from the following list as friendly names:
https://developer.amazon.com/docs/alexa/device-apis/resources-and-assets.html#names-you-cannot-use
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#capability-resources
"""
def __init__(self, labels):
"""Initialize an Alexa resource."""
self._resource_labels = []
for label in labels:
self._resource_labels.append(label)
def serialize_capability_resources(self):
"""Return capabilityResources object serialized for an API response."""
return self.serialize_labels(self._resource_labels)
@staticmethod
def serialize_configuration():
"""Return ModeResources, PresetResources friendlyNames serialized for an API response."""
return []
@staticmethod
def serialize_labels(resources):
"""Return resource label objects for friendlyNames serialized for an API response."""
labels = []
for label in resources:
if label in AlexaGlobalCatalog.__dict__.values():
label = {"@type": "asset", "value": {"assetId": label}}
else:
label = {"@type": "text", "value": {"text": label, "locale": "en-US"}}
labels.append(label)
return {"friendlyNames": labels}
class AlexaModeResource(AlexaCapabilityResource):
"""Implements Alexa ModeResources.
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#capability-resources
"""
def __init__(self, labels, ordered=False):
"""Initialize an Alexa modeResource."""
super().__init__(labels)
self._supported_modes = []
self._mode_ordered = ordered
def add_mode(self, value, labels):
"""Add mode to the supportedModes object."""
self._supported_modes.append({"value": value, "labels": labels})
def serialize_configuration(self):
"""Return configuration for ModeResources friendlyNames serialized for an API response."""
mode_resources = []
for mode in self._supported_modes:
result = {
"value": mode["value"],
"modeResources": self.serialize_labels(mode["labels"]),
}
mode_resources.append(result)
return {"ordered": self._mode_ordered, "supportedModes": mode_resources}
class AlexaPresetResource(AlexaCapabilityResource):
"""Implements Alexa PresetResources.
Use presetResources with RangeController to provide a set of friendlyNames for each RangeController preset.
https://developer.amazon.com/docs/device-apis/resources-and-assets.html#presetresources
"""
def __init__(self, labels, min_value, max_value, precision, unit=None):
"""Initialize an Alexa presetResource."""
super().__init__(labels)
self._presets = []
self._minimum_value = min_value
self._maximum_value = max_value
self._precision = precision
self._unit_of_measure = None
if unit in AlexaGlobalCatalog.__dict__.values():
self._unit_of_measure = unit
def add_preset(self, value, labels):
"""Add preset to configuration presets array."""
self._presets.append({"value": value, "labels": labels})
def serialize_configuration(self):
"""Return configuration for PresetResources friendlyNames serialized for an API response."""
configuration = {
"supportedRange": {
"minimumValue": self._minimum_value,
"maximumValue": self._maximum_value,
"precision": self._precision,
}
}
if self._unit_of_measure:
configuration["unitOfMeasure"] = self._unit_of_measure
if self._presets:
preset_resources = [
{
"rangeValue": preset["value"],
"presetResources": self.serialize_labels(preset["labels"]),
}
for preset in self._presets
]
configuration["presets"] = preset_resources
return configuration
class AlexaSemantics:
"""Class for Alexa Semantics Object.
You can optionally enable additional utterances by using semantics. When you use semantics,
you manually map the phrases "open", "close", "raise", and "lower" to directives.
Semantics is supported for the following interfaces only: ModeController, RangeController, and ToggleController.
Semantics stateMappings are only supported for one interface of the same type on the same device. If a device has
multiple RangeControllers only one interface may use stateMappings otherwise discovery will fail.
You can support semantics actionMappings on different controllers for the same device, however each controller must
support different phrases. For example, you can support "raise" on a RangeController, and "open" on a ModeController,
but you can't support "open" on both RangeController and ModeController. Semantics stateMappings are only supported
for one interface on the same device.
https://developer.amazon.com/docs/device-apis/alexa-discovery.html#semantics-object
"""
MAPPINGS_ACTION = "actionMappings"
MAPPINGS_STATE = "stateMappings"
ACTIONS_TO_DIRECTIVE = "ActionsToDirective"
STATES_TO_VALUE = "StatesToValue"
STATES_TO_RANGE = "StatesToRange"
ACTION_CLOSE = "Alexa.Actions.Close"
ACTION_LOWER = "Alexa.Actions.Lower"
ACTION_OPEN = "Alexa.Actions.Open"
ACTION_RAISE = "Alexa.Actions.Raise"
STATES_OPEN = "Alexa.States.Open"
STATES_CLOSED = "Alexa.States.Closed"
DIRECTIVE_RANGE_SET_VALUE = "SetRangeValue"
DIRECTIVE_RANGE_ADJUST_VALUE = "AdjustRangeValue"
DIRECTIVE_TOGGLE_TURN_ON = "TurnOn"
DIRECTIVE_TOGGLE_TURN_OFF = "TurnOff"
DIRECTIVE_MODE_SET_MODE = "SetMode"
DIRECTIVE_MODE_ADJUST_MODE = "AdjustMode"
def __init__(self):
"""Initialize an Alexa modeResource."""
self._action_mappings = []
self._state_mappings = []
def _add_action_mapping(self, semantics):
"""Add action mapping between actions and interface directives."""
self._action_mappings.append(semantics)
def _add_state_mapping(self, semantics):
"""Add state mapping between states and interface directives."""
self._state_mappings.append(semantics)
def add_states_to_value(self, states, value):
"""Add StatesToValue stateMappings."""
self._add_state_mapping(
{"@type": self.STATES_TO_VALUE, "states": states, "value": value}
)
def add_states_to_range(self, states, min_value, max_value):
"""Add StatesToRange stateMappings."""
self._add_state_mapping(
{
"@type": self.STATES_TO_RANGE,
"states": states,
"range": {"minimumValue": min_value, "maximumValue": max_value},
}
)
def add_action_to_directive(self, actions, directive, payload):
"""Add ActionsToDirective actionMappings."""
self._add_action_mapping(
{
"@type": self.ACTIONS_TO_DIRECTIVE,
"actions": actions,
"directive": {"name": directive, "payload": payload},
}
)
def serialize_semantics(self):
"""Return semantics object serialized for an API response."""
semantics = {}
if self._action_mappings:
semantics[self.MAPPINGS_ACTION] = self._action_mappings
if self._state_mappings:
semantics[self.MAPPINGS_STATE] = self._state_mappings
return semantics
|
import io
import contextlib
import urllib.parse
from sys import exc_info as _exc_info
from traceback import format_exception as _format_exception
from xml.sax import saxutils
import html
from more_itertools import always_iterable
import cherrypy
from cherrypy._cpcompat import ntob
from cherrypy._cpcompat import tonative
from cherrypy._helper import classproperty
from cherrypy.lib import httputil as _httputil
class CherryPyException(Exception):
"""A base class for CherryPy exceptions."""
pass
class InternalRedirect(CherryPyException):
"""Exception raised to switch to the handler for a different URL.
This exception will redirect processing to another path within the site
(without informing the client). Provide the new path as an argument when
raising the exception. Provide any params in the querystring for the new
URL.
"""
def __init__(self, path, query_string=''):
self.request = cherrypy.serving.request
self.query_string = query_string
if '?' in path:
# Separate any params included in the path
path, self.query_string = path.split('?', 1)
# Note that urljoin will "do the right thing" whether url is:
# 1. a URL relative to root (e.g. "/dummy")
# 2. a URL relative to the current path
# Note that any query string will be discarded.
path = urllib.parse.urljoin(self.request.path_info, path)
# Set a 'path' member attribute so that code which traps this
# error can have access to it.
self.path = path
CherryPyException.__init__(self, path, self.query_string)
class HTTPRedirect(CherryPyException):
"""Exception raised when the request should be redirected.
This exception will force a HTTP redirect to the URL or URL's you give it.
The new URL must be passed as the first argument to the Exception,
e.g., HTTPRedirect(newUrl). Multiple URLs are allowed in a list.
If a URL is absolute, it will be used as-is. If it is relative, it is
assumed to be relative to the current cherrypy.request.path_info.
If one of the provided URL is a unicode object, it will be encoded
using the default encoding or the one passed in parameter.
There are multiple types of redirect, from which you can select via the
``status`` argument. If you do not provide a ``status`` arg, it defaults to
303 (or 302 if responding with HTTP/1.0).
Examples::
raise cherrypy.HTTPRedirect("")
raise cherrypy.HTTPRedirect("/abs/path", 307)
raise cherrypy.HTTPRedirect(["path1", "path2?a=1&b=2"], 301)
See :ref:`redirectingpost` for additional caveats.
"""
urls = None
"""The list of URL's to emit."""
encoding = 'utf-8'
"""The encoding when passed urls are not native strings"""
def __init__(self, urls, status=None, encoding=None):
self.urls = abs_urls = [
# Note that urljoin will "do the right thing" whether url is:
# 1. a complete URL with host (e.g. "http://www.example.com/test")
# 2. a URL relative to root (e.g. "/dummy")
# 3. a URL relative to the current path
# Note that any query string in cherrypy.request is discarded.
urllib.parse.urljoin(
cherrypy.url(),
tonative(url, encoding or self.encoding),
)
for url in always_iterable(urls)
]
status = (
int(status)
if status is not None
else self.default_status
)
if not 300 <= status <= 399:
raise ValueError('status must be between 300 and 399.')
CherryPyException.__init__(self, abs_urls, status)
@classproperty
def default_status(cls):
"""
The default redirect status for the request.
RFC 2616 indicates a 301 response code fits our goal; however,
browser support for 301 is quite messy. Use 302/303 instead. See
http://www.alanflavell.org.uk/www/post-redirect.html
"""
return 303 if cherrypy.serving.request.protocol >= (1, 1) else 302
@property
def status(self):
"""The integer HTTP status code to emit."""
_, status = self.args[:2]
return status
def set_response(self):
"""Modify cherrypy.response status, headers, and body to represent
self.
CherryPy uses this internally, but you can also use it to create an
HTTPRedirect object and set its output without *raising* the exception.
"""
response = cherrypy.serving.response
response.status = status = self.status
if status in (300, 301, 302, 303, 307, 308):
response.headers['Content-Type'] = 'text/html;charset=utf-8'
# "The ... URI SHOULD be given by the Location field
# in the response."
response.headers['Location'] = self.urls[0]
# "Unless the request method was HEAD, the entity of the response
# SHOULD contain a short hypertext note with a hyperlink to the
# new URI(s)."
msg = {
300: 'This resource can be found at ',
301: 'This resource has permanently moved to ',
302: 'This resource resides temporarily at ',
303: 'This resource can be found at ',
307: 'This resource has moved temporarily to ',
308: 'This resource has been moved to ',
}[status]
msg += '<a href=%s>%s</a>.'
msgs = [
msg % (saxutils.quoteattr(u), html.escape(u, quote=False))
for u in self.urls
]
response.body = ntob('<br />\n'.join(msgs), 'utf-8')
# Previous code may have set C-L, so we have to reset it
# (allow finalize to set it).
response.headers.pop('Content-Length', None)
elif status == 304:
# Not Modified.
# "The response MUST include the following header fields:
# Date, unless its omission is required by section 14.18.1"
# The "Date" header should have been set in Response.__init__
# "...the response SHOULD NOT include other entity-headers."
for key in ('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Location', 'Content-MD5',
'Content-Range', 'Content-Type', 'Expires',
'Last-Modified'):
if key in response.headers:
del response.headers[key]
# "The 304 response MUST NOT contain a message-body."
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
elif status == 305:
# Use Proxy.
# self.urls[0] should be the URI of the proxy.
response.headers['Location'] = ntob(self.urls[0], 'utf-8')
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
else:
raise ValueError('The %s status code is unknown.' % status)
def __call__(self):
"""Use this exception as a request.handler (raise self)."""
raise self
def clean_headers(status):
"""Remove any headers which should not apply to an error response."""
response = cherrypy.serving.response
# Remove headers which applied to the original content,
# but do not apply to the error page.
respheaders = response.headers
for key in ['Accept-Ranges', 'Age', 'ETag', 'Location', 'Retry-After',
'Vary', 'Content-Encoding', 'Content-Length', 'Expires',
'Content-Location', 'Content-MD5', 'Last-Modified']:
if key in respheaders:
del respheaders[key]
if status != 416:
# A server sending a response with status code 416 (Requested
# range not satisfiable) SHOULD include a Content-Range field
# with a byte-range-resp-spec of "*". The instance-length
# specifies the current length of the selected resource.
# A response with status code 206 (Partial Content) MUST NOT
# include a Content-Range field with a byte-range- resp-spec of "*".
if 'Content-Range' in respheaders:
del respheaders['Content-Range']
class HTTPError(CherryPyException):
"""Exception used to return an HTTP error code (4xx-5xx) to the client.
This exception can be used to automatically send a response using a
http status code, with an appropriate error page. It takes an optional
``status`` argument (which must be between 400 and 599); it defaults to 500
("Internal Server Error"). It also takes an optional ``message`` argument,
which will be returned in the response body. See
`RFC2616 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4>`_
for a complete list of available error codes and when to use them.
Examples::
raise cherrypy.HTTPError(403)
raise cherrypy.HTTPError(
"403 Forbidden", "You are not allowed to access this resource.")
"""
status = None
"""The HTTP status code. May be of type int or str (with a Reason-Phrase).
"""
code = None
"""The integer HTTP status code."""
reason = None
"""The HTTP Reason-Phrase string."""
def __init__(self, status=500, message=None):
self.status = status
try:
self.code, self.reason, defaultmsg = _httputil.valid_status(status)
except ValueError:
raise self.__class__(500, _exc_info()[1].args[0])
if self.code < 400 or self.code > 599:
raise ValueError('status must be between 400 and 599.')
# See http://www.python.org/dev/peps/pep-0352/
# self.message = message
self._message = message or defaultmsg
CherryPyException.__init__(self, status, message)
def set_response(self):
"""Modify cherrypy.response status, headers, and body to represent
self.
CherryPy uses this internally, but you can also use it to create an
HTTPError object and set its output without *raising* the exception.
"""
response = cherrypy.serving.response
clean_headers(self.code)
# In all cases, finalize will be called after this method,
# so don't bother cleaning up response values here.
response.status = self.status
tb = None
if cherrypy.serving.request.show_tracebacks:
tb = format_exc()
response.headers.pop('Content-Length', None)
content = self.get_error_page(self.status, traceback=tb,
message=self._message)
response.body = content
_be_ie_unfriendly(self.code)
def get_error_page(self, *args, **kwargs):
return get_error_page(*args, **kwargs)
def __call__(self):
"""Use this exception as a request.handler (raise self)."""
raise self
@classmethod
@contextlib.contextmanager
def handle(cls, exception, status=500, message=''):
"""Translate exception into an HTTPError."""
try:
yield
except exception as exc:
raise cls(status, message or str(exc))
class NotFound(HTTPError):
"""Exception raised when a URL could not be mapped to any handler (404).
This is equivalent to raising
:class:`HTTPError("404 Not Found") <cherrypy._cperror.HTTPError>`.
"""
def __init__(self, path=None):
if path is None:
request = cherrypy.serving.request
path = request.script_name + request.path_info
self.args = (path,)
HTTPError.__init__(self, 404, "The path '%s' was not found." % path)
_HTTPErrorTemplate = '''<!DOCTYPE html PUBLIC
"-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"></meta>
<title>%(status)s</title>
<style type="text/css">
#powered_by {
margin-top: 20px;
border-top: 2px solid black;
font-style: italic;
}
#traceback {
color: red;
}
</style>
</head>
<body>
<h2>%(status)s</h2>
<p>%(message)s</p>
<pre id="traceback">%(traceback)s</pre>
<div id="powered_by">
<span>
Powered by <a href="http://www.cherrypy.org">CherryPy %(version)s</a>
</span>
</div>
</body>
</html>
'''
def get_error_page(status, **kwargs):
"""Return an HTML page, containing a pretty error response.
status should be an int or a str.
kwargs will be interpolated into the page template.
"""
try:
code, reason, message = _httputil.valid_status(status)
except ValueError:
raise cherrypy.HTTPError(500, _exc_info()[1].args[0])
# We can't use setdefault here, because some
# callers send None for kwarg values.
if kwargs.get('status') is None:
kwargs['status'] = '%s %s' % (code, reason)
if kwargs.get('message') is None:
kwargs['message'] = message
if kwargs.get('traceback') is None:
kwargs['traceback'] = ''
if kwargs.get('version') is None:
kwargs['version'] = cherrypy.__version__
for k, v in kwargs.items():
if v is None:
kwargs[k] = ''
else:
kwargs[k] = html.escape(kwargs[k], quote=False)
# Use a custom template or callable for the error page?
pages = cherrypy.serving.request.error_page
error_page = pages.get(code) or pages.get('default')
# Default template, can be overridden below.
template = _HTTPErrorTemplate
if error_page:
try:
if hasattr(error_page, '__call__'):
# The caller function may be setting headers manually,
# so we delegate to it completely. We may be returning
# an iterator as well as a string here.
#
# We *must* make sure any content is not unicode.
result = error_page(**kwargs)
if cherrypy.lib.is_iterator(result):
from cherrypy.lib.encoding import UTF8StreamEncoder
return UTF8StreamEncoder(result)
elif isinstance(result, str):
return result.encode('utf-8')
else:
if not isinstance(result, bytes):
raise ValueError(
'error page function did not '
'return a bytestring, str or an '
'iterator - returned object of type %s.'
% (type(result).__name__))
return result
else:
# Load the template from this path.
template = io.open(error_page, newline='').read()
except Exception:
e = _format_exception(*_exc_info())[-1]
m = kwargs['message']
if m:
m += '<br />'
m += 'In addition, the custom error page failed:\n<br />%s' % e
kwargs['message'] = m
response = cherrypy.serving.response
response.headers['Content-Type'] = 'text/html;charset=utf-8'
result = template % kwargs
return result.encode('utf-8')
_ie_friendly_error_sizes = {
400: 512, 403: 256, 404: 512, 405: 256,
406: 512, 408: 512, 409: 512, 410: 256,
500: 512, 501: 512, 505: 512,
}
def _be_ie_unfriendly(status):
response = cherrypy.serving.response
# For some statuses, Internet Explorer 5+ shows "friendly error
# messages" instead of our response.body if the body is smaller
# than a given size. Fix this by returning a body over that size
# (by adding whitespace).
# See http://support.microsoft.com/kb/q218155/
s = _ie_friendly_error_sizes.get(status, 0)
if s:
s += 1
# Since we are issuing an HTTP error status, we assume that
# the entity is short, and we should just collapse it.
content = response.collapse_body()
content_length = len(content)
if content_length and content_length < s:
# IN ADDITION: the response must be written to IE
# in one chunk or it will still get replaced! Bah.
content = content + (b' ' * (s - content_length))
response.body = content
response.headers['Content-Length'] = str(len(content))
def format_exc(exc=None):
"""Return exc (or sys.exc_info if None), formatted."""
try:
if exc is None:
exc = _exc_info()
if exc == (None, None, None):
return ''
import traceback
return ''.join(traceback.format_exception(*exc))
finally:
del exc
def bare_error(extrabody=None):
"""Produce status, headers, body for a critical error.
Returns a triple without calling any other questionable functions,
so it should be as error-free as possible. Call it from an HTTP server
if you get errors outside of the request.
If extrabody is None, a friendly but rather unhelpful error message
is set in the body. If extrabody is a string, it will be appended
as-is to the body.
"""
# The whole point of this function is to be a last line-of-defense
# in handling errors. That is, it must not raise any errors itself;
# it cannot be allowed to fail. Therefore, don't add to it!
# In particular, don't call any other CP functions.
body = b'Unrecoverable error in the server.'
if extrabody is not None:
if not isinstance(extrabody, bytes):
extrabody = extrabody.encode('utf-8')
body += b'\n' + extrabody
return (b'500 Internal Server Error',
[(b'Content-Type', b'text/plain'),
(b'Content-Length', ntob(str(len(body)), 'ISO-8859-1'))],
[body])
|
import os.path as op
import numpy as np
from numpy.testing import assert_array_almost_equal, assert_equal
import pytest
from mne import io, Epochs, read_events, pick_types
from mne.utils import requires_sklearn, run_tests_if_main
from mne.decoding import compute_ems, EMS
data_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
curdir = op.join(op.dirname(__file__))
raw_fname = op.join(data_dir, 'test_raw.fif')
event_name = op.join(data_dir, 'test-eve.fif')
tmin, tmax = -0.2, 0.5
event_id = dict(aud_l=1, vis_l=3)
@requires_sklearn
def test_ems():
"""Test event-matched spatial filters."""
from sklearn.model_selection import StratifiedKFold
raw = io.read_raw_fif(raw_fname, preload=False)
# create unequal number of events
events = read_events(event_name)
events[-2, 2] = 3
picks = pick_types(raw.info, meg=True, stim=False, ecg=False,
eog=False, exclude='bads')
picks = picks[1:13:3]
epochs = Epochs(raw, events, event_id, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
pytest.raises(ValueError, compute_ems, epochs, ['aud_l', 'vis_l'])
epochs.equalize_event_counts(epochs.event_id)
pytest.raises(KeyError, compute_ems, epochs, ['blah', 'hahah'])
surrogates, filters, conditions = compute_ems(epochs)
assert_equal(list(set(conditions)), [1, 3])
events = read_events(event_name)
event_id2 = dict(aud_l=1, aud_r=2, vis_l=3)
epochs = Epochs(raw, events, event_id2, tmin, tmax, picks=picks,
baseline=(None, 0), preload=True)
epochs.equalize_event_counts(epochs.event_id)
n_expected = sum([len(epochs[k]) for k in ['aud_l', 'vis_l']])
pytest.raises(ValueError, compute_ems, epochs)
surrogates, filters, conditions = compute_ems(epochs, ['aud_r', 'vis_l'])
assert_equal(n_expected, len(surrogates))
assert_equal(n_expected, len(conditions))
assert_equal(list(set(conditions)), [2, 3])
# test compute_ems cv
epochs = epochs['aud_r', 'vis_l']
epochs.equalize_event_counts(epochs.event_id)
cv = StratifiedKFold(n_splits=3)
compute_ems(epochs, cv=cv)
compute_ems(epochs, cv=2)
pytest.raises(ValueError, compute_ems, epochs, cv='foo')
pytest.raises(ValueError, compute_ems, epochs, cv=len(epochs) + 1)
raw.close()
# EMS transformer, check that identical to compute_ems
X = epochs.get_data()
y = epochs.events[:, 2]
X = X / np.std(X) # X scaled outside cv in compute_ems
Xt, coefs = list(), list()
ems = EMS()
assert_equal(ems.__repr__(), '<EMS: not fitted.>')
# manual leave-one-out to avoid sklearn version problem
for test in range(len(y)):
train = np.setdiff1d(range(len(y)), np.atleast_1d(test))
ems.fit(X[train], y[train])
coefs.append(ems.filters_)
Xt.append(ems.transform(X[[test]]))
assert_equal(ems.__repr__(), '<EMS: fitted with 4 filters on 2 classes.>')
assert_array_almost_equal(filters, np.mean(coefs, axis=0))
assert_array_almost_equal(surrogates, np.vstack(Xt))
run_tests_if_main()
|
import inspect
import json
import logging
import os
import sys
import tempfile
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict
import appdirs
from discord.utils import deprecated
from . import commands
__all__ = [
"create_temp_config",
"load_basic_configuration",
"cog_data_path",
"core_data_path",
"load_bundled_data",
"bundled_data_path",
"storage_details",
"storage_type",
]
log = logging.getLogger("red.data_manager")
basic_config = None
instance_name = None
basic_config_default: Dict[str, Any] = {
"DATA_PATH": None,
"COG_PATH_APPEND": "cogs",
"CORE_PATH_APPEND": "core",
}
config_dir = None
appdir = appdirs.AppDirs("Red-DiscordBot")
if sys.platform == "linux":
if 0 < os.getuid() < 1000: # pylint: disable=no-member
config_dir = Path(appdir.site_data_dir)
if not config_dir:
config_dir = Path(appdir.user_config_dir)
config_file = config_dir / "config.json"
def create_temp_config():
"""
Creates a default instance for Red, so it can be ran
without creating an instance.
.. warning:: The data of this instance will be removed
on next system restart.
"""
name = "temporary_red"
default_dirs = deepcopy(basic_config_default)
default_dirs["DATA_PATH"] = tempfile.mkdtemp()
default_dirs["STORAGE_TYPE"] = "JSON"
default_dirs["STORAGE_DETAILS"] = {}
with config_file.open("r", encoding="utf-8") as fs:
config = json.load(fs)
config[name] = default_dirs
with config_file.open("w", encoding="utf-8") as fs:
json.dump(config, fs, indent=4)
def load_basic_configuration(instance_name_: str):
"""Loads the basic bootstrap configuration necessary for `Config`
to know where to store or look for data.
.. important::
It is necessary to call this function BEFORE getting any `Config`
objects!
Parameters
----------
instance_name_ : str
The instance name given by CLI argument and created during
redbot setup.
"""
global basic_config
global instance_name
instance_name = instance_name_
try:
with config_file.open(encoding="utf-8") as fs:
config = json.load(fs)
except FileNotFoundError:
print(
"You need to configure the bot instance using `redbot-setup`"
" prior to running the bot."
)
sys.exit(1)
try:
basic_config = config[instance_name]
except KeyError:
print(
"Instance with this name doesn't exist."
" You can create new instance using `redbot-setup` prior to running the bot."
)
sys.exit(1)
def _base_data_path() -> Path:
if basic_config is None:
raise RuntimeError("You must load the basic config before you can get the base data path.")
path = basic_config["DATA_PATH"]
return Path(path).resolve()
def cog_data_path(cog_instance=None, raw_name: str = None) -> Path:
"""Gets the base cog data path. If you want to get the folder with
which to store your own cog's data please pass in an instance
of your cog class.
Either ``cog_instance`` or ``raw_name`` will be used, not both.
Parameters
----------
cog_instance
The instance of the cog you wish to get a data path for.
If calling from a command or method of your cog, this should be ``self``.
raw_name : str
The name of the cog to get a data path for.
Returns
-------
pathlib.Path
If ``cog_instance`` is provided it will return a path to a folder
dedicated to a given cog. Otherwise it will return a path to the
folder that contains data for all cogs.
"""
try:
base_data_path = Path(_base_data_path())
except RuntimeError as e:
raise RuntimeError(
"You must load the basic config before you can get the cog data path."
) from e
cog_path = base_data_path / basic_config["COG_PATH_APPEND"]
if raw_name is not None:
cog_path = cog_path / raw_name
elif cog_instance is not None:
cog_path = cog_path / cog_instance.__class__.__name__
cog_path.mkdir(exist_ok=True, parents=True)
return cog_path.resolve()
def core_data_path() -> Path:
try:
base_data_path = Path(_base_data_path())
except RuntimeError as e:
raise RuntimeError(
"You must load the basic config before you can get the core data path."
) from e
core_path = base_data_path / basic_config["CORE_PATH_APPEND"]
core_path.mkdir(exist_ok=True, parents=True)
return core_path.resolve()
# noinspection PyUnusedLocal
@deprecated("bundled_data_path() without calling this function")
def load_bundled_data(cog_instance, init_location: str):
pass
def bundled_data_path(cog_instance: commands.Cog) -> Path:
"""
Get the path to the "data" directory bundled with this cog.
The bundled data folder must be located alongside the ``.py`` file
which contains the cog class.
.. important::
You should *NEVER* write to this directory.
Parameters
----------
cog_instance
An instance of your cog. If calling from a command or method of
your cog, this should be ``self``.
Returns
-------
pathlib.Path
Path object to the bundled data folder.
Raises
------
FileNotFoundError
If no bundled data folder exists.
"""
bundled_path = Path(inspect.getfile(cog_instance.__class__)).parent / "data"
if not bundled_path.is_dir():
raise FileNotFoundError("No such directory {}".format(bundled_path))
return bundled_path
def storage_type() -> str:
"""Gets the storage type as a string.
Returns
-------
str
Storage type.
"""
try:
return basic_config["STORAGE_TYPE"]
except KeyError as e:
raise RuntimeError("Bot basic config has not been loaded yet.") from e
def storage_details() -> dict:
"""Gets any details necessary for config drivers to load.
These are set on setup.
Returns
-------
dict
Storage details.
"""
return basic_config.get("STORAGE_DETAILS", {})
|
from enum import Enum
from homeassistant.components.climate.const import (
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_COMFORT,
PRESET_ECO,
PRESET_SLEEP,
)
DOMAIN = "knx"
CONF_STATE_ADDRESS = "state_address"
CONF_SYNC_STATE = "sync_state"
class ColorTempModes(Enum):
"""Color temperature modes for config validation."""
absolute = "DPT-7.600"
relative = "DPT-5.001"
class SupportedPlatforms(Enum):
"""Supported platforms."""
cover = "cover"
light = "light"
binary_sensor = "binary_sensor"
climate = "climate"
switch = "switch"
notify = "notify"
scene = "scene"
sensor = "sensor"
weather = "weather"
# Map KNX operation modes to HA modes. This list might not be complete.
OPERATION_MODES = {
# Map DPT 20.105 HVAC control modes
"Auto": HVAC_MODE_AUTO,
"Heat": HVAC_MODE_HEAT,
"Cool": HVAC_MODE_COOL,
"Off": HVAC_MODE_OFF,
"Fan only": HVAC_MODE_FAN_ONLY,
"Dry": HVAC_MODE_DRY,
}
PRESET_MODES = {
# Map DPT 20.102 HVAC operating modes to HA presets
"Frost Protection": PRESET_ECO,
"Night": PRESET_SLEEP,
"Standby": PRESET_AWAY,
"Comfort": PRESET_COMFORT,
}
ATTR_COUNTER = "counter"
|
from pprint import pprint
from riko import get_path
from riko.bado import coroutine
from riko.collections import SyncPipe, AsyncPipe
p1_conf = {'url': get_path('gigs.json'), 'path': 'value.items'}
p2_conf = {'uniq_key': 'link'}
p3_conf = {
'combine': 'or',
'mode': 'block',
'rule': [{'field': 'title', 'value': 'php', 'op': 'contains'}]}
p4_conf = {'rule': [{'sort_key': 'pubDate', 'sort_dir': 'desc'}]}
def pipe(test=False):
stream = (SyncPipe('fetchdata', conf=p1_conf, test=test)
.uniq(conf=p2_conf)
.filter(conf=p3_conf)
.sort(conf=p4_conf)
.list)
for i in stream:
pprint(i)
return stream
@coroutine
def async_pipe(reactor, test=False):
stream = yield (AsyncPipe('fetchdata', conf=p1_conf, test=test)
.uniq(conf=p2_conf)
.filter(conf=p3_conf)
.sort(conf=p4_conf)
.output)
for i in stream:
pprint(i)
|
import voluptuous as vol
from homeassistant.const import CONF_ICON, CONF_URL
import homeassistant.helpers.config_validation as cv
DOMAIN = "panel_iframe"
CONF_TITLE = "title"
CONF_RELATIVE_URL_ERROR_MSG = "Invalid relative URL. Absolute path required."
CONF_RELATIVE_URL_REGEX = r"\A/"
CONF_REQUIRE_ADMIN = "require_admin"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: cv.schema_with_slug_keys(
vol.Schema(
{
# pylint: disable=no-value-for-parameter
vol.Optional(CONF_TITLE): cv.string,
vol.Optional(CONF_ICON): cv.icon,
vol.Optional(CONF_REQUIRE_ADMIN, default=False): cv.boolean,
vol.Required(CONF_URL): vol.Any(
vol.Match(
CONF_RELATIVE_URL_REGEX, msg=CONF_RELATIVE_URL_ERROR_MSG
),
vol.Url(),
),
}
)
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Set up the iFrame frontend panels."""
for url_path, info in config[DOMAIN].items():
hass.components.frontend.async_register_built_in_panel(
"iframe",
info.get(CONF_TITLE),
info.get(CONF_ICON),
url_path,
{"url": info[CONF_URL]},
require_admin=info[CONF_REQUIRE_ADMIN],
)
return True
|
from collections import OrderedDict, defaultdict
import json
from typing import Dict
from .model import Config, Integration
BASE = """
\"\"\"Automatically generated by hassfest.
To update, run python3 -m script.hassfest
\"\"\"
# fmt: off
ZEROCONF = {}
HOMEKIT = {}
""".strip()
def generate_and_validate(integrations: Dict[str, Integration]):
"""Validate and generate zeroconf data."""
service_type_dict = defaultdict(list)
homekit_dict = {}
for domain in sorted(integrations):
integration = integrations[domain]
if not integration.manifest:
continue
service_types = integration.manifest.get("zeroconf", [])
homekit = integration.manifest.get("homekit", {})
homekit_models = homekit.get("models", [])
if not (service_types or homekit_models):
continue
for entry in service_types:
data = {"domain": domain}
if isinstance(entry, dict):
typ = entry["type"]
entry_without_type = entry.copy()
del entry_without_type["type"]
data.update(entry_without_type)
else:
typ = entry
service_type_dict[typ].append(data)
for model in homekit_models:
if model in homekit_dict:
integration.add_error(
"zeroconf",
f"Integrations {domain} and {homekit_dict[model]} "
"have overlapping HomeKit models",
)
break
homekit_dict[model] = domain
# HomeKit models are matched on starting string, make sure none overlap.
warned = set()
for key in homekit_dict:
if key in warned:
continue
# n^2 yoooo
for key_2 in homekit_dict:
if key == key_2 or key_2 in warned:
continue
if key.startswith(key_2) or key_2.startswith(key):
integration.add_error(
"zeroconf",
f"Integrations {homekit_dict[key]} and {homekit_dict[key_2]} "
"have overlapping HomeKit models",
)
warned.add(key)
warned.add(key_2)
break
zeroconf = OrderedDict(
(key, service_type_dict[key]) for key in sorted(service_type_dict)
)
homekit = OrderedDict((key, homekit_dict[key]) for key in sorted(homekit_dict))
return BASE.format(json.dumps(zeroconf, indent=4), json.dumps(homekit, indent=4))
def validate(integrations: Dict[str, Integration], config: Config):
"""Validate zeroconf file."""
zeroconf_path = config.root / "homeassistant/generated/zeroconf.py"
config.cache["zeroconf"] = content = generate_and_validate(integrations)
if config.specific_integrations:
return
with open(str(zeroconf_path)) as fp:
current = fp.read().strip()
if current != content:
config.add_error(
"zeroconf",
"File zeroconf.py is not up to date. Run python3 -m script.hassfest",
fixable=True,
)
return
def generate(integrations: Dict[str, Integration], config: Config):
"""Generate zeroconf file."""
zeroconf_path = config.root / "homeassistant/generated/zeroconf.py"
with open(str(zeroconf_path), "w") as fp:
fp.write(f"{config.cache['zeroconf']}\n")
|
from typing import cast
import os.path
from PyQt5.QtCore import QUrl
from PyQt5.QtGui import QFont
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import QWebPage
from qutebrowser.config import config, websettings
from qutebrowser.config.websettings import AttributeInfo as Attr
from qutebrowser.utils import standarddir, urlutils
from qutebrowser.browser import shared
# The global WebKitSettings object
global_settings = cast('WebKitSettings', None)
parsed_user_agent = None
class WebKitSettings(websettings.AbstractSettings):
"""A wrapper for the config for QWebSettings."""
_ATTRIBUTES = {
'content.images':
Attr(QWebSettings.AutoLoadImages),
'content.javascript.enabled':
Attr(QWebSettings.JavascriptEnabled),
'content.javascript.can_open_tabs_automatically':
Attr(QWebSettings.JavascriptCanOpenWindows),
'content.javascript.can_close_tabs':
Attr(QWebSettings.JavascriptCanCloseWindows),
'content.javascript.can_access_clipboard':
Attr(QWebSettings.JavascriptCanAccessClipboard),
'content.plugins':
Attr(QWebSettings.PluginsEnabled),
'content.webgl':
Attr(QWebSettings.WebGLEnabled),
'content.hyperlink_auditing':
Attr(QWebSettings.HyperlinkAuditingEnabled),
'content.local_content_can_access_remote_urls':
Attr(QWebSettings.LocalContentCanAccessRemoteUrls),
'content.local_content_can_access_file_urls':
Attr(QWebSettings.LocalContentCanAccessFileUrls),
'content.dns_prefetch':
Attr(QWebSettings.DnsPrefetchEnabled),
'content.frame_flattening':
Attr(QWebSettings.FrameFlatteningEnabled),
'content.cache.appcache':
Attr(QWebSettings.OfflineWebApplicationCacheEnabled),
'content.local_storage':
Attr(QWebSettings.LocalStorageEnabled,
QWebSettings.OfflineStorageDatabaseEnabled),
'content.print_element_backgrounds':
Attr(QWebSettings.PrintElementBackgrounds),
'content.xss_auditing':
Attr(QWebSettings.XSSAuditingEnabled),
'content.site_specific_quirks':
Attr(QWebSettings.SiteSpecificQuirksEnabled),
'input.spatial_navigation':
Attr(QWebSettings.SpatialNavigationEnabled),
'input.links_included_in_focus_chain':
Attr(QWebSettings.LinksIncludedInFocusChain),
'zoom.text_only':
Attr(QWebSettings.ZoomTextOnly),
'scrolling.smooth':
Attr(QWebSettings.ScrollAnimatorEnabled),
}
_FONT_SIZES = {
'fonts.web.size.minimum':
QWebSettings.MinimumFontSize,
'fonts.web.size.minimum_logical':
QWebSettings.MinimumLogicalFontSize,
'fonts.web.size.default':
QWebSettings.DefaultFontSize,
'fonts.web.size.default_fixed':
QWebSettings.DefaultFixedFontSize,
}
_FONT_FAMILIES = {
'fonts.web.family.standard': QWebSettings.StandardFont,
'fonts.web.family.fixed': QWebSettings.FixedFont,
'fonts.web.family.serif': QWebSettings.SerifFont,
'fonts.web.family.sans_serif': QWebSettings.SansSerifFont,
'fonts.web.family.cursive': QWebSettings.CursiveFont,
'fonts.web.family.fantasy': QWebSettings.FantasyFont,
}
# Mapping from QWebSettings::QWebSettings() in
# qtwebkit/Source/WebKit/qt/Api/qwebsettings.cpp
_FONT_TO_QFONT = {
QWebSettings.StandardFont: QFont.Serif,
QWebSettings.FixedFont: QFont.Monospace,
QWebSettings.SerifFont: QFont.Serif,
QWebSettings.SansSerifFont: QFont.SansSerif,
QWebSettings.CursiveFont: QFont.Cursive,
QWebSettings.FantasyFont: QFont.Fantasy,
}
def _set_user_stylesheet(settings):
"""Set the generated user-stylesheet."""
stylesheet = shared.get_user_stylesheet().encode('utf-8')
url = urlutils.data_url('text/css;charset=utf-8', stylesheet)
settings.setUserStyleSheetUrl(url)
def _set_cookie_accept_policy(settings):
"""Update the content.cookies.accept setting."""
mapping = {
'all': QWebSettings.AlwaysAllowThirdPartyCookies,
'no-3rdparty': QWebSettings.AlwaysBlockThirdPartyCookies,
'never': QWebSettings.AlwaysBlockThirdPartyCookies,
'no-unknown-3rdparty': QWebSettings.AllowThirdPartyWithExistingCookies,
}
value = config.val.content.cookies.accept
settings.setThirdPartyCookiePolicy(mapping[value])
def _set_cache_maximum_pages(settings):
"""Update the content.cache.maximum_pages setting."""
value = config.val.content.cache.maximum_pages
settings.setMaximumPagesInCache(value)
def _update_settings(option):
"""Update global settings when qwebsettings changed."""
global_settings.update_setting(option)
settings = QWebSettings.globalSettings()
if option in ['scrollbar.hide', 'content.user_stylesheets']:
_set_user_stylesheet(settings)
elif option == 'content.cookies.accept':
_set_cookie_accept_policy(settings)
elif option == 'content.cache.maximum_pages':
_set_cache_maximum_pages(settings)
def _init_user_agent():
global parsed_user_agent
ua = QWebPage().userAgentForUrl(QUrl())
parsed_user_agent = websettings.UserAgent.parse(ua)
def init():
"""Initialize the global QWebSettings."""
cache_path = standarddir.cache()
data_path = standarddir.data()
QWebSettings.setIconDatabasePath(standarddir.cache())
QWebSettings.setOfflineWebApplicationCachePath(
os.path.join(cache_path, 'application-cache'))
QWebSettings.globalSettings().setLocalStoragePath(
os.path.join(data_path, 'local-storage'))
QWebSettings.setOfflineStoragePath(
os.path.join(data_path, 'offline-storage'))
settings = QWebSettings.globalSettings()
_set_user_stylesheet(settings)
_set_cookie_accept_policy(settings)
_set_cache_maximum_pages(settings)
_init_user_agent()
config.instance.changed.connect(_update_settings)
global global_settings
global_settings = WebKitSettings(QWebSettings.globalSettings())
global_settings.init_settings()
def shutdown():
"""Disable storage so removing tmpdir will work."""
QWebSettings.setIconDatabasePath('')
QWebSettings.setOfflineWebApplicationCachePath('')
QWebSettings.globalSettings().setLocalStoragePath('')
|
from test import CollectorTestCase
from test import get_collector_config
from test import run_only
from mock import patch
from slony import SlonyCollector
def run_only_if_psycopg2_is_available(func):
try:
import psycopg2
except ImportError:
psycopg2 = None
pred = lambda: psycopg2 is not None
return run_only(func, pred)
class TestSlonyCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SlonyCollector', {})
self.collector = SlonyCollector(config, None)
def test_import(self):
self.assertTrue(SlonyCollector)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
@patch.object(SlonyCollector, 'publish')
def test_default(self, publish, _get_stats_by_database):
_get_stats_by_database.return_value = [('foo', 7)]
self.collector.collect()
_get_stats_by_database.assert_called_with(
'localhost',
5432,
'postgres',
'postgres',
'postgres',
'_postgres',
'Node [0-9]+ - postgres@localhost',
)
self.assertPublished(publish, 'foo', 7)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
@patch.object(SlonyCollector, 'publish')
def test_instances(self, publish, _get_stats_by_database):
def side_effect(host, port, user, pwd, slony_db, slony_schema, node):
if (slony_db, slony_schema) == ('postgres', '_postgres'):
return [('foo', 7)]
elif (slony_db, slony_schema) == ('data', '_data'):
return [('bar', 14)]
_get_stats_by_database.side_effect = side_effect
config = get_collector_config('SlonyCollector', {
'instances': {
'alpha': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
},
'beta': {
'slony_db': 'data',
'slony_schema': '_data',
},
}
})
collector = SlonyCollector(config, None)
collector.collect()
self.assertPublished(publish, 'foo', 7)
self.assertPublished(publish, 'bar', 14)
@run_only_if_psycopg2_is_available
@patch.object(SlonyCollector, '_get_stats_by_database')
def test_override_user_password_nodestr(self, _get_stats_by_database):
config = get_collector_config('SlonyCollector', {
'instances': {
'alpha': {
'slony_db': 'postgres',
'slony_schema': '_postgres',
'user': 'postgres',
'password': 'postgres',
'slony_node_string': '(.*)',
},
'beta': {
'slony_db': 'data',
'slony_schema': '_data',
'user': 'data',
'password': 'data',
'slony_node_string': 'Node (.*)',
},
}
})
collector = SlonyCollector(config, None)
collector.collect()
_get_stats_by_database.assert_any_call(
'localhost', 5432, 'postgres', 'postgres',
'postgres', '_postgres', '(.*)'
)
_get_stats_by_database.assert_any_call(
'localhost', 5432, 'data', 'data',
'data', '_data', 'Node (.*)'
)
|
from typing import Callable, Dict, Union
from pyisy.constants import ISY_VALUE_UNKNOWN
from homeassistant.components.sensor import DOMAIN as SENSOR
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
_LOGGER,
DOMAIN as ISY994_DOMAIN,
ISY994_NODES,
ISY994_VARIABLES,
UOM_DOUBLE_TEMP,
UOM_FRIENDLY_NAME,
UOM_INDEX,
UOM_ON_OFF,
UOM_TO_STATES,
)
from .entity import ISYEntity, ISYNodeEntity
from .helpers import convert_isy_value_to_hass, migrate_old_unique_ids
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 sensor platform."""
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
devices = []
for node in hass_isy_data[ISY994_NODES][SENSOR]:
_LOGGER.debug("Loading %s", node.name)
devices.append(ISYSensorEntity(node))
for vname, vobj in hass_isy_data[ISY994_VARIABLES]:
devices.append(ISYSensorVariableEntity(vname, vobj))
await migrate_old_unique_ids(hass, SENSOR, devices)
async_add_entities(devices)
class ISYSensorEntity(ISYNodeEntity):
"""Representation of an ISY994 sensor device."""
@property
def raw_unit_of_measurement(self) -> Union[dict, str]:
"""Get the raw unit of measurement for the ISY994 sensor device."""
uom = self._node.uom
# Backwards compatibility for ISYv4 Firmware:
if isinstance(uom, list):
return UOM_FRIENDLY_NAME.get(uom[0], uom[0])
# Special cases for ISY UOM index units:
isy_states = UOM_TO_STATES.get(uom)
if isy_states:
return isy_states
if uom in [UOM_ON_OFF, UOM_INDEX]:
return uom
return UOM_FRIENDLY_NAME.get(uom)
@property
def state(self) -> str:
"""Get the state of the ISY994 sensor device."""
value = self._node.status
if value == ISY_VALUE_UNKNOWN:
return None
# Get the translated ISY Unit of Measurement
uom = self.raw_unit_of_measurement
# Check if this is a known index pair UOM
if isinstance(uom, dict):
return uom.get(value, value)
if uom in [UOM_INDEX, UOM_ON_OFF]:
return self._node.formatted
# Handle ISY precision and rounding
value = convert_isy_value_to_hass(value, uom, self._node.prec)
# Convert temperatures to Home Assistant's unit
if uom in (TEMP_CELSIUS, TEMP_FAHRENHEIT):
value = self.hass.config.units.temperature(value, uom)
return value
@property
def unit_of_measurement(self) -> str:
"""Get the Home Assistant unit of measurement for the device."""
raw_units = self.raw_unit_of_measurement
# Check if this is a known index pair UOM
if isinstance(raw_units, dict) or raw_units in [UOM_ON_OFF, UOM_INDEX]:
return None
if raw_units in (TEMP_FAHRENHEIT, TEMP_CELSIUS, UOM_DOUBLE_TEMP):
return self.hass.config.units.temperature_unit
return raw_units
class ISYSensorVariableEntity(ISYEntity):
"""Representation of an ISY994 variable as a sensor device."""
def __init__(self, vname: str, vobj: object) -> None:
"""Initialize the ISY994 binary sensor program."""
super().__init__(vobj)
self._name = vname
@property
def state(self):
"""Return the state of the variable."""
return convert_isy_value_to_hass(self._node.status, "", self._node.prec)
@property
def device_state_attributes(self) -> Dict:
"""Get the state attributes for the device."""
return {
"init_value": convert_isy_value_to_hass(
self._node.init, "", self._node.prec
)
}
@property
def icon(self):
"""Return the icon."""
return "mdi:counter"
|
import io
import pkgutil
from collections import Counter
from re import split
from sys import version_info
import pandas as pd
from scattertext.features.FeatsFromSpacyDoc import FeatsFromSpacyDoc
class FeatsFromMoralFoundationsDictionary(FeatsFromSpacyDoc):
def __init__(self,
use_lemmas=False,
entity_types_to_censor=set(),
tag_types_to_censor=set(),
strip_final_period=False,
**kwargs):
'''
Parameters
----------
Other parameters from FeatsFromSpacyDoc.__init__
'''
self._lexicon_df = self._load_mfd()
super(FeatsFromMoralFoundationsDictionary, self).__init__(use_lemmas,
entity_types_to_censor,
tag_types_to_censor,
strip_final_period)
def _load_mfd(self):
return pd.read_csv(
io.StringIO(pkgutil.get_data('scattertext', 'data/mfd2.0.csv').decode('utf-8'))
).set_index('term')
def _analyze(self, doc):
text_df = (pd.DataFrame(pd.Series(Counter(t for t in split(r"(\W)", doc.lower()) if t.strip())))
.join(self._lexicon_df)
.dropna()
.groupby('cat')
.sum()
)
return text_df
def get_definitions(self):
'''
These definitions are from https://osf.io/xakyw/
:return: dict
'''
return {
'care.virtue': '...acted with kindness, compassion, or empathy, or nurtured another person.',
'care.vice': '...acted with cruelty, or hurt or harmed another person/animal and caused suffering.',
'fairness.virtue': '...acted in a fair manner, promoting equality, justice, or rights.',
'fairness.vice': '...was unfair or cheated, or caused an injustice or engaged in fraud.',
'loyalty.virtue': '...acted with fidelity, or as a team player, or was loyal or patriotic.',
'loyalty.vice': '...acted disloyal, betrayed someone, was disloyal, or was a traitor.',
'authority.virtue': '...obeyed, or acted with respect for authority or tradition.',
'authority.vice': '...disobeyed or showed disrespect, or engaged in subversion or caused chaos',
'sanctity.virtue': '...acted in a way that was wholesome or sacred, or displayed purity or sanctity',
'sanctity.vice': '...was depraved, degrading, impure, or unnatural.'
}
def get_doc_metadata(self, doc, prefix=''):
topic_counter = Counter()
if version_info[0] >= 3:
doc = str(doc)
for topic_category, score in self._analyze(doc).to_dict()[0].items():
topic_counter[prefix + topic_category] = int(score)
return topic_counter
def has_metadata_term_list(self):
return True
def get_top_model_term_lists(self):
return self._lexicon_df.reset_index().groupby('cat')['term'].apply(list).to_dict()
|
import logging
import pytest
def test_log_debug():
logging.debug('foo')
def test_log_warning():
with pytest.raises(pytest.fail.Exception):
logging.warning('foo')
def test_log_expected(caplog):
with caplog.at_level(logging.ERROR):
logging.error('foo')
def test_log_expected_logger(caplog):
logger = 'logfail_test_logger'
with caplog.at_level(logging.ERROR, logger):
logging.getLogger(logger).error('foo')
def test_log_expected_wrong_level(caplog):
with pytest.raises(pytest.fail.Exception):
with caplog.at_level(logging.ERROR):
logging.critical('foo')
def test_log_expected_logger_wrong_level(caplog):
logger = 'logfail_test_logger'
with pytest.raises(pytest.fail.Exception):
with caplog.at_level(logging.ERROR, logger):
logging.getLogger(logger).critical('foo')
def test_log_expected_wrong_logger(caplog):
logger = 'logfail_test_logger'
with pytest.raises(pytest.fail.Exception):
with caplog.at_level(logging.ERROR, logger):
logging.error('foo')
|
from typing import Callable, Sequence, Type, Any, Iterable
from dedupe import predicates
class Variable(object):
def __len__(self):
return 1
def __repr__(self):
return self.name
def __hash__(self):
return hash(self.name)
def __eq__(self, other):
return self.name == other.name
def __init__(self, definition):
if definition.get('has missing', False):
self.has_missing = True
try:
exists_pred = predicates.ExistsPredicate(definition['field'])
self.predicates.append(exists_pred)
except KeyError:
pass
else:
self.has_missing = False
class DerivedType(Variable):
type = "Derived"
def __init__(self, definition):
self.name = "(%s: %s)" % (str(definition['name']),
str(definition['type']))
super(DerivedType, self).__init__(definition)
class MissingDataType(Variable):
type = "MissingData"
def __init__(self, name):
self.name = "(%s: Not Missing)" % name
self.has_missing = False
class FieldType(Variable):
_index_thresholds: Sequence[float] = []
_index_predicates: Sequence[Type[predicates.IndexPredicate]] = []
_predicate_functions: Sequence[Callable[[Any], Iterable[str]]] = ()
_Predicate = predicates.SimplePredicate
def __init__(self, definition):
self.field = definition['field']
if 'variable name' in definition:
self.name = definition['variable name']
else:
self.name = "(%s: %s)" % (self.field, self.type)
self.predicates = [self._Predicate(pred, self.field)
for pred in self._predicate_functions]
self.predicates += indexPredicates(self._index_predicates,
self._index_thresholds,
self.field)
super(FieldType, self).__init__(definition)
class CustomType(FieldType):
type = "Custom"
def __init__(self, definition):
super(CustomType, self).__init__(definition)
try:
self.comparator = definition["comparator"]
except KeyError:
raise KeyError("For 'Custom' field types you must define "
"a 'comparator' function in the field "
"definition. ")
if 'variable name' not in definition:
self.name = "(%s: %s, %s)" % (self.field,
self.type,
self.comparator.__name__)
def allSubclasses(cls):
for q in cls.__subclasses__():
yield q.type, q
for p in allSubclasses(q):
yield p
def indexPredicates(predicates, thresholds, field):
index_predicates = []
for predicate in predicates:
for threshold in thresholds:
index_predicates.append(predicate(threshold, field))
return index_predicates
|
import logging
import sounddevice as sd
import soundfile as sf
from kalliope.core.PlayerModule import PlayerModule
logging.basicConfig()
logger = logging.getLogger("kalliope")
FS = 48000
class Sounddeviceplayer(PlayerModule):
"""
This Class is representing the Player Object used to play the all sound of the system.
"""
def __init__(self, **kwargs):
super(Sounddeviceplayer, self).__init__(**kwargs)
logger.debug("[Sounddeviceplayer.__init__] instance")
logger.debug("[Sounddeviceplayer.__init__] args : %s " % str(kwargs))
def play(self, file_path):
if self.convert:
self.convert_mp3_to_wav(file_path_mp3=file_path)
data, fs = sf.read(file_path)
sd.play(data, fs)
sd.wait()
|
from typing import Optional
from homeassistant.components.device_tracker import SOURCE_TYPE_ROUTER
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import callback
from homeassistant.helpers import entity_registry
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import (
API_ACCESS_POINT,
API_CLIENTS,
API_NAME,
COORDINATOR,
DOMAIN,
MANUFACTURER,
UNDO_UPDATE_LISTENERS,
)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up device tracker for Ruckus Unleashed component."""
coordinator = hass.data[DOMAIN][entry.entry_id][COORDINATOR]
tracked = set()
@callback
def router_update():
"""Update the values of the router."""
add_new_entities(coordinator, async_add_entities, tracked)
router_update()
hass.data[DOMAIN][entry.entry_id][UNDO_UPDATE_LISTENERS].append(
coordinator.async_add_listener(router_update)
)
registry = await entity_registry.async_get_registry(hass)
restore_entities(registry, coordinator, entry, async_add_entities, tracked)
@callback
def add_new_entities(coordinator, async_add_entities, tracked):
"""Add new tracker entities from the router."""
new_tracked = []
for mac in coordinator.data[API_CLIENTS]:
if mac in tracked:
continue
device = coordinator.data[API_CLIENTS][mac]
new_tracked.append(RuckusUnleashedDevice(coordinator, mac, device[API_NAME]))
tracked.add(mac)
if new_tracked:
async_add_entities(new_tracked)
@callback
def restore_entities(registry, coordinator, entry, async_add_entities, tracked):
"""Restore clients that are not a part of active clients list."""
missing = []
for entity in registry.entities.values():
if entity.config_entry_id == entry.entry_id and entity.platform == DOMAIN:
if entity.unique_id not in coordinator.data[API_CLIENTS]:
missing.append(
RuckusUnleashedDevice(
coordinator, entity.unique_id, entity.original_name
)
)
tracked.add(entity.unique_id)
if missing:
async_add_entities(missing)
class RuckusUnleashedDevice(CoordinatorEntity, ScannerEntity):
"""Representation of a Ruckus Unleashed client."""
def __init__(self, coordinator, mac, name) -> None:
"""Initialize a Ruckus Unleashed client."""
super().__init__(coordinator)
self._mac = mac
self._name = name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._mac
@property
def name(self) -> str:
"""Return the name."""
if self.is_connected:
return (
self.coordinator.data[API_CLIENTS][self._mac][API_NAME]
or f"{MANUFACTURER} {self._mac}"
)
return self._name
@property
def is_connected(self) -> bool:
"""Return true if the device is connected to the network."""
return self._mac in self.coordinator.data[API_CLIENTS]
@property
def source_type(self) -> str:
"""Return the source type."""
return SOURCE_TYPE_ROUTER
@property
def device_info(self) -> Optional[dict]:
"""Return the device information."""
if self.is_connected:
return {
"name": self.name,
"connections": {(CONNECTION_NETWORK_MAC, self._mac)},
"via_device": (
CONNECTION_NETWORK_MAC,
self.coordinator.data[API_CLIENTS][self._mac][API_ACCESS_POINT],
),
}
return None
|
from datetime import timedelta
import logging
from pyephember.pyephember import (
EphEmber,
ZoneMode,
zone_current_temperature,
zone_is_active,
zone_is_boost_active,
zone_is_hot_water,
zone_mode,
zone_name,
zone_target_temperature,
)
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SUPPORT_AUX_HEAT,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_PASSWORD,
CONF_USERNAME,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
# Return cached results if last scan was less then this time ago
SCAN_INTERVAL = timedelta(seconds=120)
OPERATION_LIST = [HVAC_MODE_HEAT_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
EPH_TO_HA_STATE = {
"AUTO": HVAC_MODE_HEAT_COOL,
"ON": HVAC_MODE_HEAT,
"OFF": HVAC_MODE_OFF,
}
HA_STATE_TO_EPH = {value: key for key, value in EPH_TO_HA_STATE.items()}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ephember thermostat."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
ember = EphEmber(username, password)
zones = ember.get_zones()
for zone in zones:
add_entities([EphEmberThermostat(ember, zone)])
except RuntimeError:
_LOGGER.error("Cannot connect to EphEmber")
return
return
class EphEmberThermostat(ClimateEntity):
"""Representation of a EphEmber thermostat."""
def __init__(self, ember, zone):
"""Initialize the thermostat."""
self._ember = ember
self._zone_name = zone_name(zone)
self._zone = zone
self._hot_water = zone_is_hot_water(zone)
@property
def supported_features(self):
"""Return the list of supported features."""
if self._hot_water:
return SUPPORT_AUX_HEAT
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_AUX_HEAT
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._zone_name
@property
def temperature_unit(self):
"""Return the unit of measurement which this thermostat uses."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return zone_current_temperature(self._zone)
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return zone_target_temperature(self._zone)
@property
def target_temperature_step(self):
"""Return the supported step of target temperature."""
if self._hot_water:
return None
return 0.5
@property
def hvac_action(self):
"""Return current HVAC action."""
if zone_is_active(self._zone):
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
mode = zone_mode(self._zone)
return self.map_mode_eph_hass(mode)
@property
def hvac_modes(self):
"""Return the supported operations."""
return OPERATION_LIST
def set_hvac_mode(self, hvac_mode):
"""Set the operation mode."""
mode = self.map_mode_hass_eph(hvac_mode)
if mode is not None:
self._ember.set_mode_by_name(self._zone_name, mode)
else:
_LOGGER.error("Invalid operation mode provided %s", hvac_mode)
@property
def is_aux_heat(self):
"""Return true if aux heater."""
return zone_is_boost_active(self._zone)
def turn_aux_heat_on(self):
"""Turn auxiliary heater on."""
self._ember.activate_boost_by_name(
self._zone_name, zone_target_temperature(self._zone)
)
def turn_aux_heat_off(self):
"""Turn auxiliary heater off."""
self._ember.deactivate_boost_by_name(self._zone_name)
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
if self._hot_water:
return
if temperature == self.target_temperature:
return
if temperature > self.max_temp or temperature < self.min_temp:
return
self._ember.set_target_temperture_by_name(self._zone_name, temperature)
@property
def min_temp(self):
"""Return the minimum temperature."""
# Hot water temp doesn't support being changed
if self._hot_water:
return zone_target_temperature(self._zone)
return 5.0
@property
def max_temp(self):
"""Return the maximum temperature."""
if self._hot_water:
return zone_target_temperature(self._zone)
return 35.0
def update(self):
"""Get the latest data."""
self._zone = self._ember.get_zone(self._zone_name)
@staticmethod
def map_mode_hass_eph(operation_mode):
"""Map from Home Assistant mode to eph mode."""
return getattr(ZoneMode, HA_STATE_TO_EPH.get(operation_mode), None)
@staticmethod
def map_mode_eph_hass(operation_mode):
"""Map from eph mode to Home Assistant mode."""
return EPH_TO_HA_STATE.get(operation_mode.name, HVAC_MODE_HEAT_COOL)
|
revision = 'b33c838cb669'
down_revision = '318b66568358'
from alembic import op
import sqlalchemy as sa
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_index('ix_certificates_not_after', 'certificates', [sa.text('not_after DESC')], unique=False)
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_index('ix_certificates_not_after', table_name='certificates')
# ### end Alembic commands ###
|
import asyncio
import logging
from spiderpy.spiderapi import SpiderApi, UnauthorizedException
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_PASSWORD, CONF_SCAN_INTERVAL, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from .const import DEFAULT_SCAN_INTERVAL, DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(
CONF_SCAN_INTERVAL, default=DEFAULT_SCAN_INTERVAL
): cv.time_period,
}
)
},
extra=vol.ALLOW_EXTRA,
)
def _spider_startup_wrapper(entry):
"""Startup wrapper for spider."""
api = SpiderApi(
entry.data[CONF_USERNAME],
entry.data[CONF_PASSWORD],
entry.data[CONF_SCAN_INTERVAL],
)
return api
async def async_setup(hass, config):
"""Set up a config entry."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
conf = config[DOMAIN]
if not hass.config_entries.async_entries(DOMAIN):
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up Spider via config entry."""
try:
hass.data[DOMAIN][entry.entry_id] = await hass.async_add_executor_job(
_spider_startup_wrapper, entry
)
except UnauthorizedException:
_LOGGER.error("Can't connect to the Spider API")
return False
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass, entry):
"""Unload Spider entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if not unload_ok:
return False
hass.data[DOMAIN].pop(entry.entry_id)
return True
|
import urllib2
import re
import diamond.collector
import json
class NginxCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(NginxCollector, self).get_default_config_help()
config_help.update({
'precision': 'Number of decimal places to report to',
'req_host': 'Hostname',
'req_port': 'Port',
'req_path': 'Path',
'req_ssl': 'SSL Support',
'req_host_header': 'HTTP Host header (required for SSL)',
})
return config_help
def get_default_config(self):
default_config = super(NginxCollector, self).get_default_config()
default_config['precision'] = 0
default_config['req_host'] = 'localhost'
default_config['req_port'] = 8080
default_config['req_path'] = '/nginx_status'
default_config['req_ssl'] = False
default_config['req_host_header'] = None
default_config['path'] = 'nginx'
return default_config
def collect_nginx(self, status):
activeConnectionsRE = re.compile(r'Active connections: (?P<conn>\d+)')
totalConnectionsRE = re.compile('^\s+(?P<conn>\d+)\s+' +
'(?P<acc>\d+)\s+(?P<req>\d+)')
connectionStatusRE = re.compile('Reading: (?P<reading>\d+) ' +
'Writing: (?P<writing>\d+) ' +
'Waiting: (?P<waiting>\d+)')
precision = int(self.config['precision'])
for l in status.readlines():
l = l.rstrip('\r\n')
if activeConnectionsRE.match(l):
self.publish_gauge(
'active_connections',
int(activeConnectionsRE.match(l).group('conn')),
precision)
elif totalConnectionsRE.match(l):
m = totalConnectionsRE.match(l)
req_per_conn = float(m.group('req')) / \
float(m.group('acc'))
self.publish_counter('conn_accepted',
int(m.group('conn')),
precision)
self.publish_counter('conn_handled',
int(m.group('acc')),
precision)
self.publish_counter('req_handled',
int(m.group('req')),
precision)
self.publish_gauge('req_per_conn',
float(req_per_conn),
precision)
elif connectionStatusRE.match(l):
m = connectionStatusRE.match(l)
self.publish_gauge('act_reads',
int(m.group('reading')),
precision)
self.publish_gauge('act_writes',
int(m.group('writing')),
precision)
self.publish_gauge('act_waits',
int(m.group('waiting')),
precision)
def collect_nginx_plus(self, status):
# Collect standard stats
self.collect_connections(status['connections'])
self.collect_requests(status['requests'])
# Collect specialty stats, if present
if 'server_zones' in status:
self.collect_server_zones(status['server_zones'])
if 'ssl' in status:
self.collect_ssl(status['ssl'])
if 'upstreams' in status:
self.collect_upstreams(status['upstreams'])
def collect_connections(self, status):
self.publish_gauge('conn.active', status['active'])
self.publish_counter('conn.accepted', status['accepted'])
self.publish_counter('conn.dropped', status['dropped'])
self.publish_gauge('conn.idle', status['idle'])
def collect_requests(self, status):
self.publish_gauge('req.current', status['current'])
self.publish_counter('req.total', status['total'])
def collect_server_zones(self, status):
for zone in status:
prefix = 'servers.%s' % re.sub('\.', '_', zone)
self.publish_gauge('%s.processing' % (prefix),
status[zone]['processing'])
for counter in ['requests', 'discarded', 'received', 'sent']:
self.publish_counter('%s.%s' % (prefix, counter),
status[zone][counter])
for code in status[zone]['responses']:
self.publish_counter('%s.responses.%s' % (prefix, code),
status[zone]['responses'][code])
def collect_ssl(self, status):
for stat in ['handshakes', 'session_reuses', 'handshakes_failed']:
self.publish_counter('ssl.%s' % stat, status[stat])
def collect_upstreams(self, status):
for upstream in status:
prefix = 'upstreams.%s' % re.sub('\.', '_', upstream)
self.publish_gauge('%s.keepalive' % prefix,
status[upstream]['keepalive'])
for peer in status[upstream]['peers']:
peer_prefix = '%s.peers.%s' % (prefix, re.sub(':', "-",
re.sub('\.', '_',
peer['server'])))
self.publish_gauge('%s.active' % peer_prefix, peer['active'])
if 'max_conns' in peer:
self.publish_gauge('%s.max_conns' % peer_prefix,
peer['max_conns'])
for counter in ['downtime', 'fails', 'received', 'requests',
'sent', 'unavail']:
self.publish_counter('%s.%s' %
(peer_prefix, counter), peer[counter])
for code in peer['responses']:
self.publish_counter('%s.responses.%s' %
(peer_prefix, code),
peer['responses'][code])
def collect(self):
# Determine what HTTP scheme to use based on SSL usage or not
if str(self.config['req_ssl']).lower() == 'true':
scheme = 'https'
else:
scheme = 'http'
# Add host headers if present (Required for SSL cert validation)
if self.config['req_host_header'] is not None:
headers = {'Host': str(self.config['req_host_header'])}
else:
headers = {}
url = '%s://%s:%i%s' % (scheme,
self.config['req_host'],
int(self.config['req_port']),
self.config['req_path'])
req = urllib2.Request(url=url, headers=headers)
try:
handle = urllib2.urlopen(req)
# Test for json payload; indicates nginx+
if handle.info().gettype() == 'application/json':
self.collect_nginx_plus(json.load(handle))
# Plain payload; indicates open source nginx
else:
self.collect_nginx(handle)
except IOError:
self.log.error("Unable to open %s" % url)
except Exception as e:
self.log.error("Unknown error opening url: %s", e)
|
from homeassistant.components.smappee.const import DOMAIN
from homeassistant.config_entries import SOURCE_ZEROCONF
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_unload_config_entry(hass):
"""Test unload config entry flow."""
with patch("pysmappee.api.SmappeeLocalApi.logon", return_value={}), patch(
"pysmappee.api.SmappeeLocalApi.load_advanced_config",
return_value=[{"key": "mdnsHostName", "value": "Smappee1006000212"}],
), patch(
"pysmappee.api.SmappeeLocalApi.load_command_control_config", return_value=[]
), patch(
"pysmappee.api.SmappeeLocalApi.load_instantaneous",
return_value=[{"key": "phase0ActivePower", "value": 0}],
):
config_entry = MockConfigEntry(
domain=DOMAIN,
data={"host": "1.2.3.4"},
unique_id="smappee1006000212",
source=SOURCE_ZEROCONF,
)
config_entry.add_to_hass(hass)
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
await hass.config_entries.async_unload(entry.entry_id)
await hass.async_block_till_done()
assert not hass.data.get(DOMAIN)
|
class PortfolioEvent(object):
"""
Stores an individual instance of a portfolio event used to create
an event trail to track all changes to a portfolio through time.
Parameters
----------
dt : `datetime`
Datetime of the event.
type : `str`
The type of portfolio event, e.g. 'subscription', 'withdrawal'.
description ; `str`
Human-readable portfolio event type.
debit : `float`
A debit to the cash balance of the portfolio.
credit : `float`
A credit to the cash balance of the portfolio.
balance : `float`
The current cash balance of the portfolio.
"""
def __init__(
self,
dt,
type,
description,
debit,
credit,
balance
):
self.dt = dt
self.type = type
self.description = description
self.debit = debit
self.credit = credit
self.balance = balance
def __eq__(self, other):
if self.dt != other.dt:
return False
if self.type != other.type:
return False
if self.description != other.description:
return False
if self.debit != other.debit:
return False
if self.credit != other.credit:
return False
if self.balance != other.balance:
return False
return True
def __repr__(self):
return (
"PortfolioEvent(dt=%s, type=%s, description=%s, "
"debit=%s, credit=%s, balance=%s)" % (
self.dt, self.type, self.description,
self.debit, self.credit, self.balance
)
)
@classmethod
def create_subscription(cls, dt, credit, balance):
return cls(
dt, type='subscription', description='SUBSCRIPTION',
debit=0.0, credit=round(credit, 2), balance=round(balance, 2)
)
@classmethod
def create_withdrawal(cls, dt, debit, balance):
return cls(
dt, type='withdrawal', description='WITHDRAWAL',
debit=round(debit, 2), credit=0.0, balance=round(balance, 2)
)
def to_dict(self):
return {
'dt': self.dt,
'type': self.type,
'description': self.description,
'debit': self.debit,
'credit': self.credit,
'balance': self.balance
}
|
import logging
import unittest
import os
import os.path
import tempfile
from collections import defaultdict
from gensim.corpora import Dictionary
import gensim.models.wrappers.ldavowpalwabbit as ldavowpalwabbit
from gensim.models.wrappers.ldavowpalwabbit import LdaVowpalWabbit
from gensim.test.utils import datapath
# set up vars used in testing ("Deerwester" from the web tutorial)
TOPIC_WORDS = [
'cat lion leopard mouse jaguar lynx cheetah tiger kitten puppy'.split(),
'engine car wheel brakes tyre motor suspension cylinder exhaust clutch'.split(),
'alice bob robert tim sue rachel dave harry alex jim'.split(),
'c cplusplus go python haskell scala java ruby csharp erlang'.split(),
'eggs ham mushrooms cereal coffee beans tea juice sausages bacon'.split()
]
def get_corpus():
text_path = datapath('ldavowpalwabbit.txt')
dict_path = datapath('ldavowpalwabbit.dict.txt')
dictionary = Dictionary.load_from_text(dict_path)
with open(text_path) as fhandle:
corpus = [dictionary.doc2bow(line.strip().split()) for line in fhandle]
return corpus, dictionary
class TestLdaVowpalWabbit(unittest.TestCase):
def setUp(self):
vw_path = os.environ.get('VOWPAL_WABBIT_PATH', None)
if not vw_path:
msg = "Environment variable 'VOWPAL_WABBIT_PATH' not specified, skipping tests"
try:
raise unittest.SkipTest(msg)
except AttributeError:
# couldn't find a way of skipping tests in python 2.6
self.vw_path = None
corpus, dictionary = get_corpus()
self.vw_path = vw_path
self.corpus = corpus
self.dictionary = dictionary
def test_save_load(self):
"""Test loading/saving LdaVowpalWabbit model."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(
self.vw_path, corpus=self.corpus, passes=10, chunksize=256,
id2word=self.dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1
)
with tempfile.NamedTemporaryFile() as fhandle:
lda.save(fhandle.name)
lda2 = LdaVowpalWabbit.load(fhandle.name)
# ensure public fields are saved/loaded correctly
saved_fields = [
lda.alpha, lda.chunksize, lda.cleanup_files,
lda.decay, lda.eta, lda.gamma_threshold,
lda.id2word, lda.num_terms, lda.num_topics,
lda.passes, lda.random_seed, lda.vw_path
]
loaded_fields = [
lda2.alpha, lda2.chunksize, lda2.cleanup_files,
lda2.decay, lda2.eta, lda2.gamma_threshold,
lda2.id2word, lda2.num_terms, lda2.num_topics,
lda2.passes, lda2.random_seed, lda2.vw_path
]
self.assertEqual(saved_fields, loaded_fields)
# ensure topic matrices are saved/loaded correctly
saved_topics = lda.show_topics(num_topics=5, num_words=10)
loaded_topics = lda2.show_topics(num_topics=5, num_words=10)
self.assertEqual(loaded_topics, saved_topics)
def test_model_update(self):
"""Test updating existing LdaVowpalWabbit model."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(
self.vw_path, corpus=[self.corpus[0]], passes=10, chunksize=256,
id2word=self.dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1
)
lda.update(self.corpus[1:])
result = lda.log_perplexity(self.corpus)
self.assertTrue(result < -1)
self.assertTrue(result > -5)
def test_perplexity(self):
"""Test LdaVowpalWabbit perplexity is within expected range."""
if not self.vw_path: # for python 2.6
return
lda = LdaVowpalWabbit(
self.vw_path, corpus=self.corpus, passes=10, chunksize=256,
id2word=self.dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1)
# varies, but should be between -1 and -5
result = lda.log_perplexity(self.corpus)
self.assertTrue(result < -1)
self.assertTrue(result > -5)
def test_topic_coherence(self):
"""Test LdaVowpalWabbit topic coherence."""
if not self.vw_path: # for python 2.6
return
corpus, dictionary = get_corpus()
lda = LdaVowpalWabbit(
self.vw_path, corpus=corpus, passes=10, chunksize=256,
id2word=dictionary, cleanup_files=True, alpha=0.1,
eta=0.1, num_topics=len(TOPIC_WORDS), random_seed=1
)
lda.print_topics(5, 10)
# map words in known topic to an ID
topic_map = {}
for i, words in enumerate(TOPIC_WORDS):
topic_map[frozenset(words)] = i
n_coherent = 0
for topic_id in range(lda.num_topics):
topic = lda.show_topic(topic_id, topn=20)
# get all words from LDA topic
topic_words = [w[1] for w in topic]
# get list of original topics that each word actually belongs to
ids = []
for word in topic_words:
for src_topic_words, src_topic_id in topic_map.items():
if word in src_topic_words:
ids.append(src_topic_id)
# count the number of times each original topic appears
counts = defaultdict(int)
for found_topic_id in ids:
counts[found_topic_id] += 1
# if at least 6/10 words assigned to same topic, consider it coherent
max_count = 0
for count in counts.values():
max_count = max(max_count, count)
if max_count >= 6:
n_coherent += 1
# not 100% deterministic, but should always get 3+ coherent topics
self.assertTrue(n_coherent >= 3)
def test_corpus_to_vw(self):
"""Test corpus to Vowpal Wabbit format conversion."""
if not self.vw_path: # for python 2.6
return
corpus = [
[(0, 5), (7, 1), (5, 3), (0, 2)],
[(7, 2), (2, 1), (3, 11)],
[(1, 1)],
[],
[(5, 2), (0, 1)]
]
expected = """
| 0:5 7:1 5:3 0:2
| 7:2 2:1 3:11
| 1:1
|
| 5:2 0:1
""".strip()
result = '\n'.join(ldavowpalwabbit.corpus_to_vw(corpus))
self.assertEqual(result, expected)
def testvwmodel2ldamodel(self):
"""Test copying of VWModel to LdaModel"""
if not self.vw_path:
return
tm1 = LdaVowpalWabbit(vw_path=self.vw_path, corpus=self.corpus, num_topics=2, id2word=self.dictionary)
tm2 = ldavowpalwabbit.vwmodel2ldamodel(tm1)
for document in self.corpus:
element1_1, element1_2 = tm1[document][0]
element2_1, element2_2 = tm2[document][0]
self.assertAlmostEqual(element1_1, element2_1)
self.assertAlmostEqual(element1_2, element2_2, 5)
logging.debug('%d %d', element1_1, element2_1)
logging.debug('%d %d', element1_2, element2_2)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
import logging
import subprocess
import threading
# pylint: disable=import-error
from openrazer_daemon.keyboard import XTE_MAPPING
# This determines if the macro keys are executed with their natural spacing
XTE_SLEEP = False
class MacroObject(object):
"""
Macro base object
"""
def to_dict(self):
"""
Convert the object to a dict to be sent over DBus
:return: Dictionary
:rtype: dict
"""
raise NotImplementedError()
@classmethod
def from_dict(cls, values_dict):
"""
Create class from dict
:param values_dict: Dictionary of values (and a type key)
:type values_dict: dict
:return: Class object
"""
del values_dict['type']
return cls(**values_dict)
class MacroKey(MacroObject):
"""
Is an object of a key event used in macros
"""
def __init__(self, key_id, pre_pause, state):
self.key_id = key_id
self.pre_pause = pre_pause
self.state = state
def __repr__(self):
return '{0} {1}'.format(self.key_id, self.state)
def __str__(self):
return 'MacroKey|{0}|{1}|{2}'.format(self.key_id, self.pre_pause, self.state)
def to_dict(self):
return {
'type': 'MacroKey',
'key_id': self.key_id,
'pre_pause': self.pre_pause,
'state': self.state
}
@property
def xte_key(self):
"""
Convert key to XTE compatible name
:return: XTE Name
:rtype: str
"""
return XTE_MAPPING.get(self.key_id, self.key_id)
# If it only opens a new tab in chroma - https://askubuntu.com/questions/540939/xdg-open-only-opens-a-new-tab-in-a-new-chromium-window-despite-passing-it-a-url
class MacroURL(MacroObject):
"""
Is an object of a key event used in macros
"""
def __init__(self, url):
self.url = url
def __repr__(self):
return '{0}'.format(self.url)
def __str__(self):
return 'MacroURL|{0}'.format(self.url)
def to_dict(self):
return {
'type': 'MacroURL',
'url': self.url,
}
def execute(self):
"""
Open URL in the browser
"""
proc = subprocess.Popen(['xdg-open', self.url], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
proc.communicate()
class MacroScript(MacroObject):
"""
Is an object of a key event used in macros
"""
def __init__(self, script, args=None):
self.script = script
if isinstance(args, str):
self.args = ' ' + args
else:
self.args = ''
def __repr__(self):
return '{0}'.format(self.script)
def __str__(self):
return 'MacroScript|{0}'.format(self.script)
def to_dict(self):
return {
'type': 'MacroScript',
'script': self.script,
'args': self.args
}
def execute(self):
"""
Run script
"""
proc = subprocess.Popen(self.script + self.args, shell=True, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
proc.communicate()
class MacroRunner(threading.Thread):
"""
Thread to run macros
"""
def __init__(self, device_id, macro_bind, macro_data):
super(MacroRunner, self).__init__()
self._logger = logging.getLogger('razer.device{0}.macro{1}'.format(device_id, macro_bind))
self._macro_data = macro_data
self._macro_bind = macro_bind
@staticmethod
def xte_line(key_event):
"""
Generate a line to be fet into XTE
:param key_event: Key event object
:type key_event: MacroKey
:return: String XTE script
:rtype: str
"""
# Save key here to prevent 5odd dictionary lookups
key = key_event.xte_key
cmd = ''
if key is not None:
if XTE_SLEEP:
cmd += 'usleep {0}\n'.format(key_event.pre_pause)
if key_event.state == 'UP':
cmd += 'keyup {0}\n'.format(key)
else:
cmd += 'keydown {0}\n'.format(key)
return cmd
def run(self):
"""
Main thread function
"""
# TODO move the xte-munging to the init
xte = ''
for event in self._macro_data:
if isinstance(event, MacroKey):
xte += self.xte_line(event)
else:
if xte != '':
proc = subprocess.Popen(['xte'], stdin=subprocess.PIPE)
proc.communicate(input=xte.encode('ascii'))
xte = ''
# Now run everything else (this just allows for less calls to xte
if not isinstance(event, MacroKey):
event.execute()
if xte != '':
proc = subprocess.Popen(['xte'], stdin=subprocess.PIPE)
proc.communicate(input=xte.encode('ascii'))
self._logger.debug("Finished running macro %s", self._macro_bind)
def macro_dict_to_obj(macro_dict):
"""
Converts a macro string to its relevant object
:param macro_dict: Macro string
:type macro_dict: dict
:return: Macro Object
:rtype: object
:raises ValueError: When a type isn't known
"""
if macro_dict['type'] == 'MacroKey':
result = MacroKey.from_dict(macro_dict)
elif macro_dict['type'] == 'MacroURL':
result = MacroURL.from_dict(macro_dict)
elif macro_dict['type'] == 'MacroScript':
result = MacroScript.from_dict(macro_dict)
else:
raise ValueError("unknown type")
return result
|
from xknx.devices import Sensor as XknxSensor
from homeassistant.components.sensor import DEVICE_CLASSES
from homeassistant.helpers.entity import Entity
from .const import DOMAIN
from .knx_entity import KnxEntity
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up sensor(s) for KNX platform."""
entities = []
for device in hass.data[DOMAIN].xknx.devices:
if isinstance(device, XknxSensor):
entities.append(KNXSensor(device))
async_add_entities(entities)
class KNXSensor(KnxEntity, Entity):
"""Representation of a KNX sensor."""
def __init__(self, device: XknxSensor):
"""Initialize of a KNX sensor."""
super().__init__(device)
@property
def state(self):
"""Return the state of the sensor."""
return self._device.resolve_state()
@property
def unit_of_measurement(self):
"""Return the unit this state is expressed in."""
return self._device.unit_of_measurement()
@property
def device_class(self):
"""Return the device class of the sensor."""
device_class = self._device.ha_device_class()
if device_class in DEVICE_CLASSES:
return device_class
return None
|
import itertools
import random
import time
import unittest
import mock
from perfkitbenchmarker.scripts.object_storage_api_test_scripts import object_storage_api_tests
class TestSizeDistributionIterator(unittest.TestCase):
def testPointDistribution(self):
dist = {10: 100.0}
iter = object_storage_api_tests.SizeDistributionIterator(dist)
values = list(itertools.islice(iter, 5))
self.assertEqual(values, [10, 10, 10, 10, 10])
def testTwoElementDistribution(self):
dist = {1: 50.0, 10: 50.0}
iter = object_storage_api_tests.SizeDistributionIterator(dist)
with mock.patch(random.__name__ + '.random') as rand:
rand.side_effect = [0.2, 0.7, 0.2]
values = list(itertools.islice(iter, 3))
self.assertTrue(values == [1, 10, 1])
def testNonTerminatingBinaryPercent(self):
# 20/100 = 1/5 does not terminate in binary
dist = {1: 20.0, 10: 80.0}
iter = object_storage_api_tests.SizeDistributionIterator(dist)
with mock.patch(random.__name__ + '.random') as rand:
rand.side_effect = [0.1, 0.9]
values = list(itertools.islice(iter, 2))
self.assertTrue(values == [1, 10])
class TestMaxSizeInDistribution(unittest.TestCase):
def testPointDistribution(self):
dist = {10: 100.0}
dist[10] = 100.0
self.assertEqual(object_storage_api_tests.MaxSizeInDistribution(dist),
10)
def testTwoElementDistribution(self):
dist = {1: 50.0, 10: 50.0}
self.assertEqual(object_storage_api_tests.MaxSizeInDistribution(dist),
10)
class TestPrefixCounterIterator(unittest.TestCase):
def testIterator(self):
iterator = object_storage_api_tests.PrefixCounterIterator('foo')
values = list(itertools.islice(iterator, 3))
self.assertEqual(values, ['foo_0', 'foo_1', 'foo_2'])
class TestPrefixTimestampSuffixIterator(unittest.TestCase):
def testIterator(self):
iterator = object_storage_api_tests.PrefixTimestampSuffixIterator(
'foo', 'bar')
with mock.patch(time.__name__ + '.time',
side_effect=[0, 1, 2]):
values = list(itertools.islice(iterator, 3))
self.assertEqual(values, ['foo_0.000000_bar',
'foo_1.000000_bar',
'foo_2.000000_bar'])
if __name__ == '__main__':
unittest.main()
|
from math import ceil
import numpy as np
from ..fixes import fft, ifft, fftfreq
from ..utils import logger, verbose
@verbose
def stft(x, wsize, tstep=None, verbose=None):
"""STFT Short-Term Fourier Transform using a sine window.
The transformation is designed to be a tight frame that can be
perfectly inverted. It only returns the positive frequencies.
Parameters
----------
x : array, shape (n_signals, n_times)
Containing multi-channels signal.
wsize : int
Length of the STFT window in samples (must be a multiple of 4).
tstep : int
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
%(verbose)s
Returns
-------
X : array, shape (n_signals, wsize // 2 + 1, n_step)
STFT coefficients for positive frequencies with
``n_step = ceil(T / tstep)``.
See Also
--------
istft
stftfreq
"""
if not np.isrealobj(x):
raise ValueError("x is not a real valued array")
if x.ndim == 1:
x = x[None, :]
n_signals, T = x.shape
wsize = int(wsize)
# Errors and warnings
if wsize % 4:
raise ValueError('The window length must be a multiple of 4.')
if tstep is None:
tstep = wsize / 2
tstep = int(tstep)
if (wsize % tstep) or (tstep % 2):
raise ValueError('The step size must be a multiple of 2 and a '
'divider of the window length.')
if tstep > wsize / 2:
raise ValueError('The step size must be smaller than half the '
'window length.')
n_step = int(ceil(T / float(tstep)))
n_freq = wsize // 2 + 1
logger.info("Number of frequencies: %d" % n_freq)
logger.info("Number of time steps: %d" % n_step)
X = np.zeros((n_signals, n_freq, n_step), dtype=np.complex128)
if n_signals == 0:
return X
# Defining sine window
win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)
win2 = win ** 2
swin = np.zeros((n_step - 1) * tstep + wsize)
for t in range(n_step):
swin[t * tstep:t * tstep + wsize] += win2
swin = np.sqrt(wsize * swin)
# Zero-padding and Pre-processing for edges
xp = np.zeros((n_signals, wsize + (n_step - 1) * tstep),
dtype=x.dtype)
xp[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T] = x
x = xp
for t in range(n_step):
# Framing
wwin = win / swin[t * tstep: t * tstep + wsize]
frame = x[:, t * tstep: t * tstep + wsize] * wwin[None, :]
# FFT
fframe = fft(frame)
X[:, :, t] = fframe[:, :n_freq]
return X
def istft(X, tstep=None, Tx=None):
"""ISTFT Inverse Short-Term Fourier Transform using a sine window.
Parameters
----------
X : array, shape (n_signals, wsize / 2 + 1, n_step)
The STFT coefficients for positive frequencies.
tstep : int
Step between successive windows in samples (must be a multiple of 2,
a divider of wsize and smaller than wsize/2) (default: wsize/2).
Tx : int
Length of returned signal. If None Tx = n_step * tstep.
Returns
-------
x : array, shape (Tx,)
Array containing the inverse STFT signal.
See Also
--------
stft
"""
# Errors and warnings
n_signals, n_win, n_step = X.shape
if (n_win % 2 == 0):
ValueError('The number of rows of the STFT matrix must be odd.')
wsize = 2 * (n_win - 1)
if tstep is None:
tstep = wsize / 2
if wsize % tstep:
raise ValueError('The step size must be a divider of two times the '
'number of rows of the STFT matrix minus two.')
if wsize % 2:
raise ValueError('The step size must be a multiple of 2.')
if tstep > wsize / 2:
raise ValueError('The step size must be smaller than the number of '
'rows of the STFT matrix minus one.')
if Tx is None:
Tx = n_step * tstep
T = n_step * tstep
x = np.zeros((n_signals, T + wsize - tstep), dtype=np.float64)
if n_signals == 0:
return x[:, :Tx]
# Defining sine window
win = np.sin(np.arange(.5, wsize + .5) / wsize * np.pi)
# win = win / norm(win);
# Pre-processing for edges
swin = np.zeros(T + wsize - tstep, dtype=np.float64)
for t in range(n_step):
swin[t * tstep:t * tstep + wsize] += win ** 2
swin = np.sqrt(swin / wsize)
fframe = np.empty((n_signals, n_win + wsize // 2 - 1), dtype=X.dtype)
for t in range(n_step):
# IFFT
fframe[:, :n_win] = X[:, :, t]
fframe[:, n_win:] = np.conj(X[:, wsize // 2 - 1: 0: -1, t])
frame = ifft(fframe)
wwin = win / swin[t * tstep:t * tstep + wsize]
# Overlap-add
x[:, t * tstep: t * tstep + wsize] += np.real(np.conj(frame) * wwin)
# Truncation
x = x[:, (wsize - tstep) // 2: (wsize - tstep) // 2 + T + 1][:, :Tx].copy()
return x
def stftfreq(wsize, sfreq=None): # noqa: D401
"""Compute frequencies of stft transformation.
Parameters
----------
wsize : int
Size of stft window.
sfreq : float
Sampling frequency. If None the frequencies are given between 0 and pi
otherwise it's given in Hz.
Returns
-------
freqs : array
The positive frequencies returned by stft.
See Also
--------
stft
istft
"""
n_freq = wsize // 2 + 1
freqs = fftfreq(wsize)
freqs = np.abs(freqs[:n_freq])
if sfreq is not None:
freqs *= float(sfreq)
return freqs
def stft_norm2(X):
"""Compute L2 norm of STFT transform.
It takes into account that stft only return positive frequencies.
As we use tight frame this quantity is conserved by the stft.
Parameters
----------
X : 3D complex array
The STFT transforms
Returns
-------
norms2 : array
The squared L2 norm of every row of X.
"""
X2 = (X * X.conj()).real
# compute all L2 coefs and remove first and last frequency once.
norms2 = (2. * X2.sum(axis=2).sum(axis=1) - np.sum(X2[:, 0, :], axis=1) -
np.sum(X2[:, -1, :], axis=1))
return norms2
def stft_norm1(X):
"""Compute L1 norm of STFT transform.
It takes into account that stft only return positive frequencies.
Parameters
----------
X : 3D complex array
The STFT transforms
Returns
-------
norms : array
The L1 norm of every row of X.
"""
X_abs = np.abs(X)
# compute all L1 coefs and remove first and last frequency once.
norms = (2. * X_abs.sum(axis=(1, 2)) -
np.sum(X_abs[:, 0, :], axis=1) - np.sum(X_abs[:, -1, :], axis=1))
return norms
|
from datetime import timedelta
import logging
import async_timeout
from pyowm.exceptions.api_call_error import APICallError
from pyowm.exceptions.api_response_error import UnauthorizedError
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
ATTR_API_CLOUDS,
ATTR_API_CONDITION,
ATTR_API_HUMIDITY,
ATTR_API_PRESSURE,
ATTR_API_RAIN,
ATTR_API_SNOW,
ATTR_API_TEMPERATURE,
ATTR_API_WEATHER,
ATTR_API_WEATHER_CODE,
ATTR_API_WIND_BEARING,
ATTR_API_WIND_SPEED,
CONDITION_CLASSES,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
WEATHER_UPDATE_INTERVAL = timedelta(minutes=10)
class WeatherUpdateCoordinator(DataUpdateCoordinator):
"""Weather data update coordinator."""
def __init__(self, owm, latitude, longitude, hass):
"""Initialize coordinator."""
self._owm_client = owm
self._latitude = latitude
self._longitude = longitude
super().__init__(
hass, _LOGGER, name=DOMAIN, update_interval=WEATHER_UPDATE_INTERVAL
)
async def _async_update_data(self):
data = {}
with async_timeout.timeout(20):
try:
weather_response = await self._get_owm_weather()
data = self._convert_weather_response(weather_response)
except (APICallError, UnauthorizedError) as error:
raise UpdateFailed(error) from error
return data
async def _get_owm_weather(self):
weather = await self.hass.async_add_executor_job(
self._owm_client.weather_at_coords, self._latitude, self._longitude
)
return weather.get_weather()
def _convert_weather_response(self, weather_response):
return {
ATTR_API_TEMPERATURE: weather_response.get_temperature("celsius").get(
"temp"
),
ATTR_API_PRESSURE: weather_response.get_pressure().get("press"),
ATTR_API_HUMIDITY: weather_response.get_humidity(),
ATTR_API_WIND_BEARING: weather_response.get_wind().get("deg"),
ATTR_API_WIND_SPEED: weather_response.get_wind().get("speed"),
ATTR_API_CLOUDS: weather_response.get_clouds(),
ATTR_API_RAIN: self._get_rain(weather_response.get_rain()),
ATTR_API_SNOW: self._get_snow(weather_response.get_snow()),
ATTR_API_WEATHER: weather_response.get_detailed_status(),
ATTR_API_CONDITION: self._get_condition(
weather_response.get_weather_code()
),
ATTR_API_WEATHER_CODE: weather_response.get_weather_code(),
}
@staticmethod
def _get_rain(rain):
if "1h" in rain:
return round(rain["1h"], 0)
return "not raining"
@staticmethod
def _get_snow(snow):
if snow:
return round(snow, 0)
return "not snowing"
@staticmethod
def _get_condition(weather_code):
return [k for k, v in CONDITION_CLASSES.items() if weather_code in v][0]
|
from datetime import date
import logging
import pysaj
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_TYPE,
CONF_USERNAME,
DEVICE_CLASS_POWER,
DEVICE_CLASS_TEMPERATURE,
ENERGY_KILO_WATT_HOUR,
EVENT_HOMEASSISTANT_START,
EVENT_HOMEASSISTANT_STOP,
MASS_KILOGRAMS,
POWER_WATT,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TIME_HOURS,
)
from homeassistant.core import CALLBACK_TYPE, callback
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
MIN_INTERVAL = 5
MAX_INTERVAL = 300
INVERTER_TYPES = ["ethernet", "wifi"]
SAJ_UNIT_MAPPINGS = {
"": None,
"h": TIME_HOURS,
"kg": MASS_KILOGRAMS,
"kWh": ENERGY_KILO_WATT_HOUR,
"W": POWER_WATT,
"°C": TEMP_CELSIUS,
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_TYPE, default=INVERTER_TYPES[0]): vol.In(INVERTER_TYPES),
vol.Inclusive(CONF_USERNAME, "credentials"): cv.string,
vol.Inclusive(CONF_PASSWORD, "credentials"): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the SAJ sensors."""
remove_interval_update = None
wifi = config[CONF_TYPE] == INVERTER_TYPES[1]
# Init all sensors
sensor_def = pysaj.Sensors(wifi)
# Use all sensors by default
hass_sensors = []
kwargs = {}
if wifi:
kwargs["wifi"] = True
if config.get(CONF_USERNAME) and config.get(CONF_PASSWORD):
kwargs["username"] = config[CONF_USERNAME]
kwargs["password"] = config[CONF_PASSWORD]
try:
saj = pysaj.SAJ(config[CONF_HOST], **kwargs)
done = await saj.read(sensor_def)
except pysaj.UnauthorizedException:
_LOGGER.error("Username and/or password is wrong")
return
except pysaj.UnexpectedResponseException as err:
_LOGGER.error(
"Error in SAJ, please check host/ip address. Original error: %s", err
)
return
if not done:
raise PlatformNotReady
for sensor in sensor_def:
if sensor.enabled:
hass_sensors.append(
SAJsensor(saj.serialnumber, sensor, inverter_name=config.get(CONF_NAME))
)
async_add_entities(hass_sensors)
async def async_saj():
"""Update all the SAJ sensors."""
values = await saj.read(sensor_def)
for sensor in hass_sensors:
state_unknown = False
if not values:
# SAJ inverters are powered by DC via solar panels and thus are
# offline after the sun has set. If a sensor resets on a daily
# basis like "today_yield", this reset won't happen automatically.
# Code below checks if today > day when sensor was last updated
# and if so: set state to None.
# Sensors with live values like "temperature" or "current_power"
# will also be reset to None.
if (sensor.per_day_basis and date.today() > sensor.date_updated) or (
not sensor.per_day_basis and not sensor.per_total_basis
):
state_unknown = True
sensor.async_update_values(unknown_state=state_unknown)
return values
def start_update_interval(event):
"""Start the update interval scheduling."""
nonlocal remove_interval_update
remove_interval_update = async_track_time_interval_backoff(hass, async_saj)
def stop_update_interval(event):
"""Properly cancel the scheduled update."""
remove_interval_update() # pylint: disable=not-callable
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_START, start_update_interval)
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, stop_update_interval)
@callback
def async_track_time_interval_backoff(hass, action) -> CALLBACK_TYPE:
"""Add a listener that fires repetitively and increases the interval when failed."""
remove = None
interval = MIN_INTERVAL
async def interval_listener(now=None):
"""Handle elapsed interval with backoff."""
nonlocal interval, remove
try:
if await action():
interval = MIN_INTERVAL
else:
interval = min(interval * 2, MAX_INTERVAL)
finally:
remove = async_call_later(hass, interval, interval_listener)
hass.async_create_task(interval_listener())
def remove_listener():
"""Remove interval listener."""
if remove:
remove() # pylint: disable=not-callable
return remove_listener
class SAJsensor(Entity):
"""Representation of a SAJ sensor."""
def __init__(self, serialnumber, pysaj_sensor, inverter_name=None):
"""Initialize the SAJ sensor."""
self._sensor = pysaj_sensor
self._inverter_name = inverter_name
self._serialnumber = serialnumber
self._state = self._sensor.value
@property
def name(self):
"""Return the name of the sensor."""
if self._inverter_name:
return f"saj_{self._inverter_name}_{self._sensor.name}"
return f"saj_{self._sensor.name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return SAJ_UNIT_MAPPINGS[self._sensor.unit]
@property
def device_class(self):
"""Return the device class the sensor belongs to."""
if self.unit_of_measurement == POWER_WATT:
return DEVICE_CLASS_POWER
if (
self.unit_of_measurement == TEMP_CELSIUS
or self._sensor.unit == TEMP_FAHRENHEIT
):
return DEVICE_CLASS_TEMPERATURE
@property
def should_poll(self) -> bool:
"""SAJ sensors are updated & don't poll."""
return False
@property
def per_day_basis(self) -> bool:
"""Return if the sensors value is on daily basis or not."""
return self._sensor.per_day_basis
@property
def per_total_basis(self) -> bool:
"""Return if the sensors value is cumulative or not."""
return self._sensor.per_total_basis
@property
def date_updated(self) -> date:
"""Return the date when the sensor was last updated."""
return self._sensor.date
@callback
def async_update_values(self, unknown_state=False):
"""Update this sensor."""
update = False
if self._sensor.value != self._state:
update = True
self._state = self._sensor.value
if unknown_state and self._state is not None:
update = True
self._state = None
if update:
self.async_write_ha_state()
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
return f"{self._serialnumber}_{self._sensor.name}"
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.