text
stringlengths 213
32.3k
|
---|
from datetime import timedelta
import glob
import logging
import os
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import DATA_MEGABYTES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_FOLDER_PATHS = "folder"
CONF_FILTER = "filter"
DEFAULT_FILTER = "*"
SCAN_INTERVAL = timedelta(minutes=1)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_FOLDER_PATHS): cv.isdir,
vol.Optional(CONF_FILTER, default=DEFAULT_FILTER): cv.string,
}
)
def get_files_list(folder_path, filter_term):
"""Return the list of files, applying filter."""
query = folder_path + filter_term
files_list = glob.glob(query)
return files_list
def get_size(files_list):
"""Return the sum of the size in bytes of files in the list."""
size_list = [os.stat(f).st_size for f in files_list if os.path.isfile(f)]
return sum(size_list)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the folder sensor."""
path = config.get(CONF_FOLDER_PATHS)
if not hass.config.is_allowed_path(path):
_LOGGER.error("folder %s is not valid or allowed", path)
else:
folder = Folder(path, config.get(CONF_FILTER))
add_entities([folder], True)
class Folder(Entity):
"""Representation of a folder."""
ICON = "mdi:folder"
def __init__(self, folder_path, filter_term):
"""Initialize the data object."""
folder_path = os.path.join(folder_path, "") # If no trailing / add it
self._folder_path = folder_path # Need to check its a valid path
self._filter_term = filter_term
self._number_of_files = None
self._size = None
self._name = os.path.split(os.path.split(folder_path)[0])[1]
self._unit_of_measurement = DATA_MEGABYTES
self._file_list = None
def update(self):
"""Update the sensor."""
files_list = get_files_list(self._folder_path, self._filter_term)
self._file_list = files_list
self._number_of_files = len(files_list)
self._size = get_size(files_list)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
decimals = 2
size_mb = round(self._size / 1e6, decimals)
return size_mb
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return self.ICON
@property
def device_state_attributes(self):
"""Return other details about the sensor state."""
return {
"path": self._folder_path,
"filter": self._filter_term,
"number_of_files": self._number_of_files,
"bytes": self._size,
"file_list": self._file_list,
}
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
|
from abc import ABC, abstractmethod
import logging
from broadlink.exceptions import BroadlinkException
import voluptuous as vol
from homeassistant.components.switch import (
DEVICE_CLASS_OUTLET,
DEVICE_CLASS_SWITCH,
PLATFORM_SCHEMA,
SwitchEntity,
)
from homeassistant.const import (
CONF_COMMAND_OFF,
CONF_COMMAND_ON,
CONF_FRIENDLY_NAME,
CONF_HOST,
CONF_MAC,
CONF_NAME,
CONF_SWITCHES,
CONF_TIMEOUT,
CONF_TYPE,
STATE_ON,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.restore_state import RestoreEntity
from .const import DOMAIN, SWITCH_DOMAIN
from .helpers import data_packet, import_device, mac_address
_LOGGER = logging.getLogger(__name__)
CONF_SLOTS = "slots"
SWITCH_SCHEMA = vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_COMMAND_OFF): data_packet,
vol.Optional(CONF_COMMAND_ON): data_packet,
}
)
OLD_SWITCH_SCHEMA = vol.Schema(
{
vol.Optional(CONF_COMMAND_OFF): data_packet,
vol.Optional(CONF_COMMAND_ON): data_packet,
vol.Optional(CONF_FRIENDLY_NAME): cv.string,
}
)
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_HOST),
cv.deprecated(CONF_SLOTS),
cv.deprecated(CONF_TIMEOUT),
cv.deprecated(CONF_TYPE),
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): mac_address,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_SWITCHES, default=[]): vol.Any(
cv.schema_with_slug_keys(OLD_SWITCH_SCHEMA),
vol.All(cv.ensure_list, [SWITCH_SCHEMA]),
),
}
),
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Import the device and set up custom switches.
This is for backward compatibility.
Do not use this method.
"""
mac_addr = config[CONF_MAC]
host = config.get(CONF_HOST)
switches = config.get(CONF_SWITCHES)
if not isinstance(switches, list):
switches = [
{CONF_NAME: switch.pop(CONF_FRIENDLY_NAME, name), **switch}
for name, switch in switches.items()
]
_LOGGER.warning(
"Your configuration for the switch platform is deprecated. "
"Please refer to the Broadlink documentation to catch up"
)
if switches:
platform_data = hass.data[DOMAIN].platforms.setdefault(SWITCH_DOMAIN, {})
platform_data.setdefault(mac_addr, []).extend(switches)
else:
_LOGGER.warning(
"The switch platform is deprecated, except for custom IR/RF "
"switches. Please refer to the Broadlink documentation to "
"catch up"
)
if host:
import_device(hass, host)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Broadlink switch."""
device = hass.data[DOMAIN].devices[config_entry.entry_id]
if device.api.type in {"RM2", "RM4"}:
platform_data = hass.data[DOMAIN].platforms.get(SWITCH_DOMAIN, {})
user_defined_switches = platform_data.get(device.api.mac, {})
switches = [
BroadlinkRMSwitch(device, config) for config in user_defined_switches
]
elif device.api.type == "SP1":
switches = [BroadlinkSP1Switch(device)]
elif device.api.type == "SP2":
switches = [BroadlinkSP2Switch(device)]
elif device.api.type == "MP1":
switches = [BroadlinkMP1Slot(device, slot) for slot in range(1, 5)]
async_add_entities(switches)
class BroadlinkSwitch(SwitchEntity, RestoreEntity, ABC):
"""Representation of a Broadlink switch."""
def __init__(self, device, command_on, command_off):
"""Initialize the switch."""
self._device = device
self._command_on = command_on
self._command_off = command_off
self._coordinator = device.update_manager.coordinator
self._device_class = None
self._state = None
@property
def name(self):
"""Return the name of the switch."""
return f"{self._device.name} Switch"
@property
def assumed_state(self):
"""Return True if unable to access real state of the switch."""
return True
@property
def available(self):
"""Return True if the switch is available."""
return self._device.update_manager.available
@property
def is_on(self):
"""Return True if the switch is on."""
return self._state
@property
def should_poll(self):
"""Return True if the switch has to be polled for state."""
return False
@property
def device_class(self):
"""Return device class."""
return self._device_class
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {(DOMAIN, self._device.unique_id)},
"manufacturer": self._device.api.manufacturer,
"model": self._device.api.model,
"name": self._device.name,
"sw_version": self._device.fw_version,
}
@callback
def update_data(self):
"""Update data."""
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Call when the switch is added to hass."""
if self._state is None:
state = await self.async_get_last_state()
self._state = state is not None and state.state == STATE_ON
self.async_on_remove(self._coordinator.async_add_listener(self.update_data))
async def async_update(self):
"""Update the switch."""
await self._coordinator.async_request_refresh()
async def async_turn_on(self, **kwargs):
"""Turn on the switch."""
if await self._async_send_packet(self._command_on):
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs):
"""Turn off the switch."""
if await self._async_send_packet(self._command_off):
self._state = False
self.async_write_ha_state()
@abstractmethod
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
class BroadlinkRMSwitch(BroadlinkSwitch):
"""Representation of a Broadlink RM switch."""
def __init__(self, device, config):
"""Initialize the switch."""
super().__init__(
device, config.get(CONF_COMMAND_ON), config.get(CONF_COMMAND_OFF)
)
self._name = config[CONF_NAME]
@property
def name(self):
"""Return the name of the switch."""
return self._name
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
if packet is None:
return True
try:
await self._device.async_request(self._device.api.send_data, packet)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
class BroadlinkSP1Switch(BroadlinkSwitch):
"""Representation of a Broadlink SP1 switch."""
def __init__(self, device):
"""Initialize the switch."""
super().__init__(device, 1, 0)
self._device_class = DEVICE_CLASS_OUTLET
@property
def unique_id(self):
"""Return the unique id of the switch."""
return self._device.unique_id
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
try:
await self._device.async_request(self._device.api.set_power, packet)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
class BroadlinkSP2Switch(BroadlinkSP1Switch):
"""Representation of a Broadlink SP2 switch."""
def __init__(self, device, *args, **kwargs):
"""Initialize the switch."""
super().__init__(device, *args, **kwargs)
self._state = self._coordinator.data["state"]
self._load_power = self._coordinator.data["load_power"]
if device.api.model == "SC1":
self._device_class = DEVICE_CLASS_SWITCH
@property
def assumed_state(self):
"""Return True if unable to access real state of the switch."""
return False
@property
def current_power_w(self):
"""Return the current power usage in Watt."""
return self._load_power
@callback
def update_data(self):
"""Update data."""
if self._coordinator.last_update_success:
self._state = self._coordinator.data["state"]
self._load_power = self._coordinator.data["load_power"]
self.async_write_ha_state()
class BroadlinkMP1Slot(BroadlinkSwitch):
"""Representation of a Broadlink MP1 slot."""
def __init__(self, device, slot):
"""Initialize the switch."""
super().__init__(device, 1, 0)
self._slot = slot
self._state = self._coordinator.data[f"s{slot}"]
self._device_class = DEVICE_CLASS_OUTLET
@property
def unique_id(self):
"""Return the unique id of the slot."""
return f"{self._device.unique_id}-s{self._slot}"
@property
def name(self):
"""Return the name of the switch."""
return f"{self._device.name} S{self._slot}"
@property
def assumed_state(self):
"""Return True if unable to access real state of the switch."""
return False
@callback
def update_data(self):
"""Update data."""
if self._coordinator.last_update_success:
self._state = self._coordinator.data[f"s{self._slot}"]
self.async_write_ha_state()
async def _async_send_packet(self, packet):
"""Send a packet to the device."""
try:
await self._device.async_request(
self._device.api.set_power, self._slot, packet
)
except (BroadlinkException, OSError) as err:
_LOGGER.error("Failed to send packet: %s", err)
return False
return True
|
import getpass
import logging
import urllib.parse
import warnings
import smart_open.utils
logger = logging.getLogger(__name__)
#
# Global storage for SSH connections.
#
_SSH = {}
SCHEMES = ("ssh", "scp", "sftp")
"""Supported URL schemes."""
DEFAULT_PORT = 22
URI_EXAMPLES = (
'ssh://username@host/path/file',
'ssh://username@host//path/file',
'scp://username@host/path/file',
'sftp://username@host/path/file',
)
def _unquote(text):
return text and urllib.parse.unquote(text)
def parse_uri(uri_as_string):
split_uri = urllib.parse.urlsplit(uri_as_string)
assert split_uri.scheme in SCHEMES
return dict(
scheme=split_uri.scheme,
uri_path=_unquote(split_uri.path),
user=_unquote(split_uri.username),
host=split_uri.hostname,
port=int(split_uri.port or DEFAULT_PORT),
password=_unquote(split_uri.password),
)
def open_uri(uri, mode, transport_params):
smart_open.utils.check_kwargs(open, transport_params)
parsed_uri = parse_uri(uri)
uri_path = parsed_uri.pop('uri_path')
parsed_uri.pop('scheme')
return open(uri_path, mode, transport_params=transport_params, **parsed_uri)
def _connect(hostname, username, port, password, transport_params):
try:
import paramiko
except ImportError:
warnings.warn(
'paramiko missing, opening SSH/SCP/SFTP paths will be disabled. '
'`pip install paramiko` to suppress'
)
raise
key = (hostname, username)
ssh = _SSH.get(key)
if ssh is None:
ssh = _SSH[key] = paramiko.client.SSHClient()
ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
kwargs = transport_params.get('connect_kwargs', {}).copy()
# if 'key_filename' is present in transport_params, then I do not
# overwrite the credentials.
if 'key_filename' not in kwargs:
kwargs.setdefault('password', password)
kwargs.setdefault('username', username)
ssh.connect(hostname, port, **kwargs)
return ssh
def open(path, mode='r', host=None, user=None, password=None, port=DEFAULT_PORT, transport_params=None):
"""Open a file on a remote machine over SSH.
Expects authentication to be already set up via existing keys on the local machine.
Parameters
----------
path: str
The path to the file to open on the remote machine.
mode: str, optional
The mode to use for opening the file.
host: str, optional
The hostname of the remote machine. May not be None.
user: str, optional
The username to use to login to the remote machine.
If None, defaults to the name of the current user.
password: str, optional
The password to use to login to the remote machine.
port: int, optional
The port to connect to.
transport_params: dict, optional
Any additional settings to be passed to paramiko.SSHClient.connect
Returns
-------
A file-like object.
Important
---------
If you specify a previously unseen host, then its host key will be added to
the local ~/.ssh/known_hosts *automatically*.
If ``username`` or ``password`` are specified in *both* the uri and
``transport_params``, ``transport_params`` will take precedence
"""
if not host:
raise ValueError('you must specify the host to connect to')
if not user:
user = getpass.getuser()
if not transport_params:
transport_params = {}
conn = _connect(host, user, port, password, transport_params)
sftp_client = conn.get_transport().open_sftp_client()
fobj = sftp_client.open(path, mode)
fobj.name = path
return fobj
|
import os
import sys
import pytest
from coverage import env
from coverage.python import get_zip_bytes, source_for_file
from tests.coveragetest import CoverageTest
class GetZipBytesTest(CoverageTest):
"""Tests of `get_zip_bytes`."""
run_in_temp_dir = False
def test_get_encoded_zip_files(self):
# See igor.py, do_zipmods, for the text of these files.
zip_file = "tests/zipmods.zip"
sys.path.append(zip_file) # So we can import the files.
for encoding in ["utf8", "gb2312", "hebrew", "shift_jis", "cp1252"]:
filename = zip_file + "/encoded_" + encoding + ".py"
filename = filename.replace("/", os.sep)
zip_data = get_zip_bytes(filename)
zip_text = zip_data.decode(encoding)
self.assertIn('All OK', zip_text)
# Run the code to see that we really got it encoded properly.
__import__("encoded_"+encoding)
def test_source_for_file(tmpdir):
path = tmpdir.join("a.py")
src = str(path)
assert source_for_file(src) == src
assert source_for_file(src + 'c') == src
assert source_for_file(src + 'o') == src
unknown = src + 'FOO'
assert source_for_file(unknown) == unknown
@pytest.mark.skipif(not env.WINDOWS, reason="not windows")
def test_source_for_file_windows(tmpdir):
path = tmpdir.join("a.py")
src = str(path)
# On windows if a pyw exists, it is an acceptable source
path_windows = tmpdir.ensure("a.pyw")
assert str(path_windows) == source_for_file(src + 'c')
# If both pyw and py exist, py is preferred
path.ensure(file=True)
assert source_for_file(src + 'c') == src
def test_source_for_file_jython():
assert source_for_file("a$py.class") == "a.py"
|
from __future__ import unicode_literals
import os
import stat
import random
import shutil
import string
import traceback
from lib.data.data import paths, pystrs, pyoptions
from lib.fun.fun import range_compatible, get_subdir_files_path, cool
def rewrite(filepath):
try:
filesize = os.path.getsize(filepath)
with open(filepath, "w+b") as f:
f.write("".join(chr(random.randint(0, 255)) for _ in range_compatible(0, filesize)))
except:
pass
def truncating(filepath):
# default: 2 times
for _ in range_compatible(0, 2):
try:
with open(filepath, "w"):
pass
except:
pass
def renamefile(filepath):
newname = os.path.join(os.path.dirname(filepath), "".join(random.sample(string.ascii_letters, random.randint(4, 8))))
try:
os.rename(filepath, newname)
except:
pass
return newname
def renamedir(dirpaths):
# equals python 2 version: dirpaths.sort(cmp=lambda x, y: y.count(os.path.sep) - x.count(os.path.sep))
dirpaths.sort()
for dirpath in dirpaths:
try:
os.rename(dirpath, os.path.join(os.path.dirname(dirpath), "".join(random.sample(string.ascii_letters,
random.randint(4, 8)))))
except:
pass
def shreder_dir(directory, rewritecounts=pyoptions.dir_rewrite_count):
filepaths = []
dirpaths = []
print(pyoptions.CRLF + "[+] Shredding '%s' ..." % cool.orange(directory))
try:
newdirectoryname = os.path.join(os.path.dirname(directory), "".join(chr(random.randint(97, 122))
for _ in range_compatible(1, 6)))
os.rename(directory, newdirectoryname)
directory = newdirectoryname
except:
traceback.print_exc()
exit(pyoptions.CRLF + cool.red("[-] Error: cannot rename root directory name, Please check permissions"))
subdir_files_path = get_subdir_files_path(directory, only_file_path=False)
dirpaths.extend(subdir_files_path[0])
filepaths.extend(subdir_files_path[1])
for filepath in filepaths:
try:
os.chmod(filepath, stat.S_IREAD | stat.S_IWRITE)
except:
pass
for _ in range_compatible(0, rewritecounts):
print("[+] Rewrite count: %d" % (_+1))
for filepath in filepaths:
rewrite(filepath)
for filepath in filepaths:
truncating(filepath)
for filepath in filepaths:
renamefile(filepath)
renamedir(dirpaths)
os.chdir(os.path.join(directory, ".."))
try:
shutil.rmtree(directory)
except OSError as ex:
print(cool.fuchsia("[!] Error: Cannot removing directory: '%s' " % directory))
traceback.print_exc()
print(cool.orange("[+] Done"))
def shreder_file(filepath, rewritecounts=pyoptions.file_rewrite_count):
try:
os.chmod(filepath, stat.S_IREAD | stat.S_IWRITE)
except:
pass
for _ in range_compatible(0, rewritecounts):
rewrite(filepath)
truncating(filepath)
newname = renamefile(filepath)
os.remove(newname)
print("[+] Shredded %s Completely!" % cool.orange(filepath))
def shredder_magic(*args):
"""[file_or_dir]"""
args = list(args[0])
if len(args) == 1:
_ = paths.results_path
elif len(args) >= 2:
_ = args[1]
else:
exit(pyoptions.CRLF + cool.fuchsia("[!] Usage: {} {}".format(args[0], pyoptions.tools_info.get(args[0]))))
fnum = 0
if _ and os.path.isdir(_):
shreder_dir(_)
elif _ and os.path.isfile(_):
shreder_file(_)
elif _ and _.lower() in pyoptions.prefix_range:
for filename in os.listdir(paths.results_path):
if _.lower() in str(filename[0:10]).lower():
fnum += 1
shreder_file(os.path.join(paths.results_path, filename))
if fnum == 0:
exit(pyoptions.CRLF + cool.orange("[+] prefix %s files has been clean" % _.upper()))
else:
exit(pyoptions.CRLF + cool.fuchsia("[!] Usage: {} {}".format(args[0], pyoptions.tools_info.get(args[0]))))
|
import asyncio
from zigpy.exceptions import ZigbeeException
import zigpy.zcl.clusters.security as security
from homeassistant.core import callback
from .. import registries
from ..const import (
SIGNAL_ATTR_UPDATED,
WARNING_DEVICE_MODE_EMERGENCY,
WARNING_DEVICE_SOUND_HIGH,
WARNING_DEVICE_SQUAWK_MODE_ARMED,
WARNING_DEVICE_STROBE_HIGH,
WARNING_DEVICE_STROBE_YES,
)
from .base import ZigbeeChannel
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasAce.cluster_id)
class IasAce(ZigbeeChannel):
"""IAS Ancillary Control Equipment channel."""
@registries.CHANNEL_ONLY_CLUSTERS.register(security.IasWd.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasWd.cluster_id)
class IasWd(ZigbeeChannel):
"""IAS Warning Device channel."""
@staticmethod
def set_bit(destination_value, destination_bit, source_value, source_bit):
"""Set the specified bit in the value."""
if IasWd.get_bit(source_value, source_bit):
return destination_value | (1 << destination_bit)
return destination_value
@staticmethod
def get_bit(value, bit):
"""Get the specified bit from the value."""
return (value & (1 << bit)) != 0
async def issue_squawk(
self,
mode=WARNING_DEVICE_SQUAWK_MODE_ARMED,
strobe=WARNING_DEVICE_STROBE_YES,
squawk_level=WARNING_DEVICE_SOUND_HIGH,
):
"""Issue a squawk command.
This command uses the WD capabilities to emit a quick audible/visible pulse called a
"squawk". The squawk command has no effect if the WD is currently active
(warning in progress).
"""
value = 0
value = IasWd.set_bit(value, 0, squawk_level, 0)
value = IasWd.set_bit(value, 1, squawk_level, 1)
value = IasWd.set_bit(value, 3, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.squawk(value)
async def issue_start_warning(
self,
mode=WARNING_DEVICE_MODE_EMERGENCY,
strobe=WARNING_DEVICE_STROBE_YES,
siren_level=WARNING_DEVICE_SOUND_HIGH,
warning_duration=5, # seconds
strobe_duty_cycle=0x00,
strobe_intensity=WARNING_DEVICE_STROBE_HIGH,
):
"""Issue a start warning command.
This command starts the WD operation. The WD alerts the surrounding area by audible
(siren) and visual (strobe) signals.
strobe_duty_cycle indicates the length of the flash cycle. This provides a means
of varying the flash duration for different alarm types (e.g., fire, police, burglar).
Valid range is 0-100 in increments of 10. All other values SHALL be rounded to the
nearest valid value. Strobe SHALL calculate duty cycle over a duration of one second.
The ON state SHALL precede the OFF state. For example, if Strobe Duty Cycle Field specifies
“40,” then the strobe SHALL flash ON for 4/10ths of a second and then turn OFF for
6/10ths of a second.
"""
value = 0
value = IasWd.set_bit(value, 0, siren_level, 0)
value = IasWd.set_bit(value, 1, siren_level, 1)
value = IasWd.set_bit(value, 2, strobe, 0)
value = IasWd.set_bit(value, 4, mode, 0)
value = IasWd.set_bit(value, 5, mode, 1)
value = IasWd.set_bit(value, 6, mode, 2)
value = IasWd.set_bit(value, 7, mode, 3)
await self.start_warning(
value, warning_duration, strobe_duty_cycle, strobe_intensity
)
@registries.BINARY_SENSOR_CLUSTERS.register(security.IasZone.cluster_id)
@registries.ZIGBEE_CHANNEL_REGISTRY.register(security.IasZone.cluster_id)
class IASZoneChannel(ZigbeeChannel):
"""Channel for the IASZone Zigbee cluster."""
@callback
def cluster_command(self, tsn, command_id, args):
"""Handle commands received to this cluster."""
if command_id == 0:
state = args[0] & 3
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}", 2, "zone_status", state
)
self.debug("Updated alarm state: %s", state)
elif command_id == 1:
self.debug("Enroll requested")
res = self._cluster.enroll_response(0, 0)
asyncio.create_task(res)
async def async_configure(self):
"""Configure IAS device."""
await self.get_attribute_value("zone_type", from_cache=False)
if self._ch_pool.skip_configuration:
self.debug("skipping IASZoneChannel configuration")
return
self.debug("started IASZoneChannel configuration")
await self.bind()
ieee = self.cluster.endpoint.device.application.ieee
try:
res = await self._cluster.write_attributes({"cie_addr": ieee})
self.debug(
"wrote cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
res[0],
)
except ZigbeeException as ex:
self.debug(
"Failed to write cie_addr: %s to '%s' cluster: %s",
str(ieee),
self._cluster.ep_attribute,
str(ex),
)
try:
self.debug("Sending pro-active IAS enroll response")
await self._cluster.enroll_response(0, 0)
except ZigbeeException as ex:
self.debug(
"Failed to send pro-active IAS enroll response: %s",
str(ex),
)
self.debug("finished IASZoneChannel configuration")
@callback
def attribute_updated(self, attrid, value):
"""Handle attribute updates on this cluster."""
if attrid == 2:
value = value & 3
self.async_send_signal(
f"{self.unique_id}_{SIGNAL_ATTR_UPDATED}",
attrid,
self.cluster.attributes.get(attrid, [attrid])[0],
value,
)
async def async_initialize(self, from_cache):
"""Initialize channel."""
attributes = ["zone_status", "zone_state"]
await self.get_attributes(attributes, from_cache=from_cache)
await super().async_initialize(from_cache)
|
from typing import Union, Iterable, Optional, Text, Callable
import functools
import tensornetwork.backends.abstract_backend as abstract_backend
import tensornetwork.backends as backends
import tensornetwork.backend_contextmanager as backend_contextmanager
AbstractBackend = abstract_backend.AbstractBackend
def jit(fun: Callable,
backend: Union[Text, AbstractBackend] = None,
backend_argnum: Optional[int] = None,
static_argnums: Union[int, Iterable[int]] = (), device=None,
xla_backend: Optional[str] = None) -> Callable:
"""
Return a jitted or graph-compiled version of `fun`
for JAX backend. For all other backends returns `fun`.
Args:
fun: Callable
backend: The backend.
backend_argnum: Labels the argument of the decorated function which
specifies the backend.
This argument will be treated
as static in the sense of static_argnums.
If backend_argnum is specified, backend must be None.
static_argnums: Label the arguments which will be statically compiled
against.
xla_backend: Specifies the backend ('gpu', 'cpu'...) against which
XLA is to run.
donate_argnums: Labels arguments that Jit is allowed to overwrite.
args: Arguments to `fun`.
kwargs: Keyword arguments to `fun`.
Raises:
ValueError: If backend_argnum is specified but backend is not None.
If backend_argnum is specified but the corresponding
argument neither is nor labels a backend.
Returns:
Callable: jitted/graph-compiled version of `fun`, or just `fun`.
"""
argnum_mode = False
if backend_argnum is not None:
if backend is not None:
raise ValueError("backend must be None if backend_argnum is specified.")
argnum_mode = True
static_argnums = tuple(list(static_argnums) + [backend_argnum,])
if not argnum_mode:
if backend is None:
backend = backend_contextmanager.get_default_backend()
backend_obj = backends.backend_factory.get_backend(backend)
@functools.wraps(fun)
def wrapper(*args, **kwargs):
jitted = backend_obj.jit(fun, static_argnums=static_argnums,
device=device, backend=xla_backend)
return jitted(*args, **kwargs)
else:
@functools.wraps(fun)
def wrapper(*args, **kwargs):
backend = args[backend_argnum]
try:
backend_obj = backends.backend_factory.get_backend(backend)
except ValueError as error:
errstr = (f"backend_argnum={backend_argnum} was specified"
f"but the corresponding argument {args[backend_argnum]}"
f"did not specify a backend.")
raise ValueError(errstr) from error
jitted = backend_obj.jit(fun, static_argnums=static_argnums,
device=device, backend=xla_backend)
return jitted(*args, **kwargs)
return wrapper
|
import os.path as op
from collections import namedtuple
import numpy as np
import pytest
import matplotlib
import matplotlib.pyplot as plt
from mne import (read_events, Epochs, pick_channels_evoked, read_cov,
compute_proj_evoked)
from mne.channels import read_layout
from mne.io import read_raw_fif
from mne.time_frequency.tfr import AverageTFR
from mne.utils import run_tests_if_main
from mne.viz import (plot_topo_image_epochs, _get_presser,
mne_analyze_colormap, plot_evoked_topo)
from mne.viz.evoked import _line_plot_onselect
from mne.viz.utils import _fake_click
from mne.viz.topo import (_plot_update_evoked_topo_proj, iter_topography,
_imshow_tfr)
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
evoked_fname = op.join(base_dir, 'test-ave.fif')
raw_fname = op.join(base_dir, 'test_raw.fif')
event_name = op.join(base_dir, 'test-eve.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
event_id, tmin, tmax = 1, -0.2, 0.2
layout = read_layout('Vectorview-all')
def _get_events():
"""Get events."""
return read_events(event_name)
def _get_picks(raw):
"""Get picks."""
return [0, 1, 2, 6, 7, 8, 306, 340, 341, 342] # take a only few channels
def _get_epochs():
"""Get epochs."""
raw = read_raw_fif(raw_fname)
raw.add_proj([], remove_existing=True)
events = _get_events()
picks = _get_picks(raw)
# bad proj warning
epochs = Epochs(raw, events[:10], event_id, tmin, tmax, picks=picks)
return epochs
def _get_epochs_delayed_ssp():
"""Get epochs with delayed SSP."""
raw = read_raw_fif(raw_fname)
events = _get_events()
picks = _get_picks(raw)
reject = dict(mag=4e-12)
with pytest.warns(RuntimeWarning, match='projection'):
epochs_delayed_ssp = Epochs(
raw, events[:10], event_id, tmin, tmax, picks=picks,
proj='delayed', reject=reject)
return epochs_delayed_ssp
def test_plot_joint():
"""Test joint plot."""
evoked = _get_epochs().average()
evoked.plot_joint(ts_args=dict(time_unit='s'),
topomap_args=dict(time_unit='s'))
def return_inds(d): # to test function kwarg to zorder arg of evoked.plot
return list(range(d.shape[0]))
evoked.plot_joint(title='test', topomap_args=dict(contours=0, res=8,
time_unit='ms'),
ts_args=dict(spatial_colors=True, zorder=return_inds,
time_unit='s'))
pytest.raises(ValueError, evoked.plot_joint, ts_args=dict(axes=True,
time_unit='s'))
axes = plt.subplots(nrows=3)[-1].flatten().tolist()
evoked.plot_joint(times=[0], picks=[6, 7, 8], ts_args=dict(axes=axes[0]),
topomap_args={"axes": axes[1:], "time_unit": "s"})
with pytest.raises(ValueError, match='array of length 6'):
evoked.plot_joint(picks=[6, 7, 8], ts_args=dict(axes=axes[0]),
topomap_args=dict(axes=axes[2:]))
plt.close('all')
# test proj options
assert len(evoked.info['projs']) == 0
evoked.pick_types(meg=True)
evoked.add_proj(compute_proj_evoked(
evoked, n_mag=1, n_grad=1, meg='combined'))
assert len(evoked.info['projs']) == 1
with pytest.raises(ValueError, match='must match ts_args'):
evoked.plot_joint(ts_args=dict(proj=True),
topomap_args=dict(proj=False))
evoked.plot_joint(ts_args=dict(proj='reconstruct'),
topomap_args=dict(proj='reconstruct'))
plt.close('all')
def test_plot_topo():
"""Test plotting of ERP topography."""
# Show topography
evoked = _get_epochs().average()
# should auto-find layout
plot_evoked_topo([evoked, evoked], merge_grads=True,
background_color='w')
picked_evoked = evoked.copy().pick_channels(evoked.ch_names[:3])
picked_evoked_eeg = evoked.copy().pick_types(meg=False, eeg=True)
picked_evoked_eeg.pick_channels(picked_evoked_eeg.ch_names[:3])
# test scaling
for ylim in [dict(mag=[-600, 600]), None]:
plot_evoked_topo([picked_evoked] * 2, layout, ylim=ylim)
for evo in [evoked, [evoked, picked_evoked]]:
pytest.raises(ValueError, plot_evoked_topo, evo, layout,
color=['y', 'b'])
evoked_delayed_ssp = _get_epochs_delayed_ssp().average()
ch_names = evoked_delayed_ssp.ch_names[:3] # make it faster
picked_evoked_delayed_ssp = pick_channels_evoked(evoked_delayed_ssp,
ch_names)
fig = plot_evoked_topo(picked_evoked_delayed_ssp, layout,
proj='interactive')
func = _get_presser(fig)
event = namedtuple('Event', ['inaxes', 'xdata', 'ydata'])
func(event(inaxes=fig.axes[0], xdata=fig.axes[0]._mne_axs[0].pos[0],
ydata=fig.axes[0]._mne_axs[0].pos[1]))
func(event(inaxes=fig.axes[0], xdata=0, ydata=0))
params = dict(evokeds=[picked_evoked_delayed_ssp],
times=picked_evoked_delayed_ssp.times,
fig=fig, projs=picked_evoked_delayed_ssp.info['projs'])
bools = [True] * len(params['projs'])
with pytest.warns(RuntimeWarning, match='projection'):
_plot_update_evoked_topo_proj(params, bools)
# should auto-generate layout
plot_evoked_topo(picked_evoked_eeg.copy(),
fig_background=np.zeros((4, 3, 3)), proj=True,
background_color='k')
# Test RMS plot of grad pairs
picked_evoked.plot_topo(merge_grads=True, background_color='w')
plt.close('all')
for ax, idx in iter_topography(evoked.info, legend=True):
ax.plot(evoked.data[idx], color='red')
# test status bar message
if idx != -1:
assert (evoked.ch_names[idx] in ax.format_coord(.5, .5))
assert idx == -1
plt.close('all')
cov = read_cov(cov_fname)
cov['projs'] = []
evoked.pick_types(meg=True).plot_topo(noise_cov=cov)
plt.close('all')
# test plot_topo
evoked.plot_topo() # should auto-find layout
_line_plot_onselect(0, 200, ['mag', 'grad'], evoked.info, evoked.data,
evoked.times)
plt.close('all')
for ax, idx in iter_topography(evoked.info): # brief test with false
ax.plot([0, 1, 2])
break
plt.close('all')
def test_plot_topo_nirs(fnirs_evoked):
"""Test plotting of ERP topography for nirs data."""
fnirs_evoked.pick(picks='hbo')
fig = plot_evoked_topo(fnirs_evoked)
assert len(fig.axes) == 1
plt.close('all')
def test_plot_topo_single_ch():
"""Test single channel topoplot with time cursor."""
evoked = _get_epochs().average()
evoked2 = evoked.copy()
# test plotting several evokeds on different time grids
evoked.crop(-.19, 0)
evoked2.crop(.05, .19)
fig = plot_evoked_topo([evoked, evoked2], background_color='w')
# test status bar message
ax = plt.gca()
assert ('MEG 0113' in ax.format_coord(.065, .63))
num_figures_before = len(plt.get_fignums())
_fake_click(fig, fig.axes[0], (0.08, 0.65))
assert num_figures_before + 1 == len(plt.get_fignums())
fig = plt.gcf()
ax = plt.gca()
_fake_click(fig, ax, (.5, .5), kind='motion') # cursor should appear
assert (isinstance(ax._cursorline, matplotlib.lines.Line2D))
_fake_click(fig, ax, (1.5, 1.5), kind='motion') # cursor should disappear
assert ax._cursorline is None
plt.close('all')
def test_plot_topo_image_epochs():
"""Test plotting of epochs image topography."""
title = 'ERF images - MNE sample data'
epochs = _get_epochs()
epochs.load_data()
cmap = mne_analyze_colormap(format='matplotlib')
data_min = epochs._data.min()
plt.close('all')
fig = plot_topo_image_epochs(epochs, sigma=0.5, vmin=-200, vmax=200,
colorbar=True, title=title, cmap=cmap)
assert epochs._data.min() == data_min
num_figures_before = len(plt.get_fignums())
_fake_click(fig, fig.axes[0], (0.08, 0.64))
assert num_figures_before + 1 == len(plt.get_fignums())
# test for auto-showing a colorbar when only 1 sensor type
ep = epochs.copy().pick_types(meg=False, eeg=True)
fig = plot_topo_image_epochs(ep, vmin=None, vmax=None, colorbar=None,
cmap=cmap)
ax = [x for x in fig.get_children() if isinstance(x, matplotlib.axes.Axes)]
qm_cmap = [y.cmap for x in ax for y in x.get_children()
if isinstance(y, matplotlib.collections.QuadMesh)]
assert qm_cmap[0] is cmap
plt.close('all')
def test_plot_tfr_topo():
"""Test plotting of TFR data."""
epochs = _get_epochs()
n_freqs = 3
nave = 1
data = np.random.RandomState(0).randn(len(epochs.ch_names),
n_freqs, len(epochs.times))
tfr = AverageTFR(epochs.info, data, epochs.times, np.arange(n_freqs), nave)
plt.close('all')
fig = tfr.plot_topo(baseline=(None, 0), mode='ratio',
title='Average power', vmin=0., vmax=14.)
# test opening tfr by clicking
num_figures_before = len(plt.get_fignums())
# could use np.reshape(fig.axes[-1].images[0].get_extent(), (2, 2)).mean(1)
with pytest.warns(RuntimeWarning, match='not masking'):
_fake_click(fig, fig.axes[0], (0.08, 0.65))
assert num_figures_before + 1 == len(plt.get_fignums())
plt.close('all')
tfr.plot([4], baseline=(None, 0), mode='ratio', show=False, title='foo')
pytest.raises(ValueError, tfr.plot, [4], yscale='lin', show=False)
# nonuniform freqs
freqs = np.logspace(*np.log10([3, 10]), num=3)
tfr = AverageTFR(epochs.info, data, epochs.times, freqs, nave)
fig = tfr.plot([4], baseline=(None, 0), mode='mean', vmax=14., show=False)
assert fig.axes[0].get_yaxis().get_scale() == 'log'
# one timesample
tfr = AverageTFR(epochs.info, data[:, :, [0]], epochs.times[[1]],
freqs, nave)
with pytest.warns(None): # matplotlib equal left/right
tfr.plot([4], baseline=None, vmax=14., show=False, yscale='linear')
# one frequency bin, log scale required: as it doesn't make sense
# to plot log scale for one value, we test whether yscale is set to linear
vmin, vmax = 0., 2.
fig, ax = plt.subplots()
tmin, tmax = epochs.times[0], epochs.times[-1]
with pytest.warns(RuntimeWarning, match='not masking'):
_imshow_tfr(ax, 3, tmin, tmax, vmin, vmax, None, tfr=data[:, [0], :],
freq=freqs[[-1]], x_label=None, y_label=None,
colorbar=False, cmap=('RdBu_r', True), yscale='log')
fig = plt.gcf()
assert fig.axes[0].get_yaxis().get_scale() == 'linear'
# ValueError when freq[0] == 0 and yscale == 'log'
these_freqs = freqs[:3].copy()
these_freqs[0] = 0
with pytest.warns(RuntimeWarning, match='not masking'):
pytest.raises(ValueError, _imshow_tfr, ax, 3, tmin, tmax, vmin, vmax,
None, tfr=data[:, :3, :], freq=these_freqs, x_label=None,
y_label=None, colorbar=False, cmap=('RdBu_r', True),
yscale='log')
run_tests_if_main()
|
import asyncio
from collections import OrderedDict
import logging
from typing import Callable, Dict
import async_timeout
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.core import callback
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
from homeassistant.loader import bind_hass
_LOGGER = logging.getLogger(__name__)
DOMAIN = "system_health"
INFO_CALLBACK_TIMEOUT = 5
@bind_hass
@callback
def async_register_info(
hass: HomeAssistantType,
domain: str,
info_callback: Callable[[HomeAssistantType], Dict],
):
"""Register an info callback."""
data = hass.data.setdefault(DOMAIN, OrderedDict()).setdefault("info", OrderedDict())
data[domain] = info_callback
async def async_setup(hass: HomeAssistantType, config: ConfigType):
"""Set up the System Health component."""
hass.components.websocket_api.async_register_command(handle_info)
return True
async def _info_wrapper(hass, info_callback):
"""Wrap info callback."""
try:
with async_timeout.timeout(INFO_CALLBACK_TIMEOUT):
return await info_callback(hass)
except asyncio.TimeoutError:
return {"error": "Fetching info timed out"}
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("Error fetching info")
return {"error": str(err)}
@websocket_api.async_response
@websocket_api.websocket_command({vol.Required("type"): "system_health/info"})
async def handle_info(
hass: HomeAssistantType, connection: websocket_api.ActiveConnection, msg: Dict
):
"""Handle an info request."""
info_callbacks = hass.data.get(DOMAIN, {}).get("info", {})
data = OrderedDict()
data["homeassistant"] = await hass.helpers.system_info.async_get_system_info()
if info_callbacks:
for domain, domain_data in zip(
info_callbacks,
await asyncio.gather(
*(
_info_wrapper(hass, info_callback)
for info_callback in info_callbacks.values()
)
),
):
data[domain] = domain_data
connection.send_message(websocket_api.result_message(msg["id"], data))
|
import operator
import re
import collections
from collections import Counter
from functools import reduce
def get_id_pairs(track_list):
"""Create a list of (sid, eid) tuples from a list of tracks.
Tracks without an eid will have an eid of None."""
return [(t["id"], t.get("playlistEntryId")) for t in track_list]
def find_playlist_changes(orig_tracks, modified_tracks):
"""Finds the changes between two playlists.
Returns a tuple of (deletions, additions, staying).
Deletions and additions are both Counters of (sid, eid) tuples;
staying is a set of (sid, eid) tuples.
:param old: the original playlist.
:param modified: the modified playlist."""
s_pairs = get_id_pairs(orig_tracks)
# Three cases for desired pairs:
# 1: (sid, eid from this playlist): either no action or add
# (if someone adds a dupe from the same playlist)
# 2: (sid, eid not from this playlist): add
# 3: (sid, None): add
d_pairs = get_id_pairs(modified_tracks)
# Counters are multisets.
s_count = Counter(s_pairs)
d_count = Counter(d_pairs)
to_del = s_count - d_count
to_add = d_count - s_count
to_keep = set(s_count & d_count) # guaranteed to be counts of 1
return (to_del, to_add, to_keep)
def filter_song_md(song, md_list=['id'], no_singletons=True):
"""Returns a list of desired metadata from a song.
Does not modify the given song.
:param song: Dictionary representing a GM song.
:param md_list: (optional) the ordered list of metadata to select.
:param no_singletons: (optional) if md_list is of length 1, return the data,
not a singleton list.
"""
filtered = [song[md_type] for md_type in md_list]
if len(md_list) == 1 and no_singletons:
return filtered[0]
else:
return filtered
def build_song_rep(song, md_list=['title', 'artist', 'album'], divider=" - "):
"""Returns a string of the requested metadata types.
The order of md_list determines order in the string.
:param song: Dictionary representing a GM song.
:param md_list: (optional) list of valid GM metadata types.
:param divider: (optional) string to join the metadata.
"""
filtered = filter_song_md(song, md_list, no_singletons=False)
return divider.join(filtered)
def reorder_to(l, order):
"""Returns a list, reordered to a specific ordering.
:param l: the list to reorder. It is not modified.
:param order: a list containing the new ordering,
eg [2,1,0] to reverse a list of length 3
"""
# Zip on ordering, sort by it, then remove ordering.
return [el[1] for el in sorted(zip(order, l), key=lambda el: el[0])]
def build_queries_from(f, regex, cap_types, cap_pr, encoding='ascii'):
"""Returns a list of queries from the given file.
Queries have the form [(<query>, <metadata type>), ...]
:param f: opened file, ready to read.
:param regex: a compiled regex to capture query info from file lines.
:param cap_types: the GM metadata types of the regex captures.
:param cap_pr: the priority of the captures.
:param encoding: (optional) encoding of the file.
"""
queries = []
for line in f:
matches = regex.match(line)
if matches:
# Zip captures to their types and order by priority to build a query.
query = reorder_to(
list(zip(matches.groups(), cap_types)),
cap_pr)
queries.append(query)
return queries
def build_query_rep(query, divider=" - "):
"""Build a string representation of a query, without metadata types"""
return divider.join([el[0] for el in query])
# Not mine. From: http://en.wikipedia.org/wiki/Function_composition_(computer_science)
def compose(*funcs, **kfuncs):
"""Compose a group of functions (f(g(h(..)))) into (fogoh...)(...)"""
return reduce(lambda f, g: lambda *args, **kaargs: f(g(*args, **kaargs)), funcs)
class SongMatcher:
"""Matches GM songs to user-provided metadata."""
def __init__(self, songs, log_metadata=['title', 'artist', 'album']):
"""Prepares songs for matching and determines logging options.
:param songs: list of GM songs to match against.
:param log_metadata: list of valid GM metadata types to show in the log.
order given will be order outputted.
"""
# If match times are a problem, could
# read to an indexed format here.
self.library = songs
# Lines of a log of how matching went.
self.log_lines = []
self.log_metadata = log_metadata
def build_log(self):
"""Returns a string built from the current log lines."""
encoded_lines = [line.encode('utf-8') for line in self.log_lines]
return "\n".join(encoded_lines)
def build_song_for_log(self, song):
"""Returns a string built from a song using log options.
:param song:
"""
return build_song_rep(song, self.log_metadata)
class SearchModifier:
"""Controls how to query the library.
Implementations define a comparator, and 2 functions
(transformers) to modify the query and song data on the fly.
Sometimes it makes sense to chain implementations.
In this case, transformers are composed and the most
outward comparator is used.
"""
def __init__(self, q_t, s_t, comp):
# Comparator - defines how to compare query and song data.
# f(song data, query) -> truthy value
self.comp = comp
# Query and song transformers -
# manipulate query, song before comparison.
# f(unicode) -> unicode
self.q_t = q_t
self.s_t = s_t
# Some modifiers that are useful in my library:
# Ignore capitalization:
ignore_caps = SearchModifier(
# Change query and song to lowercase,
# before comparing with ==.
str.lower,
str.lower,
operator.eq
)
# Wildcard punctuation (also non ascii chars):
ignore_punc = SearchModifier(
# Replace query with a regex, where punc matches any (or no) characters.
lambda q: re.sub(r"[^a-zA-Z0-9\s]", ".*", q),
# Don't change the song.
lambda s: s,
# The comparator becomes regex matching.
lambda sd, q: re.search(q, sd)
)
implemented_modifiers = (ignore_caps, ignore_punc)
# The modifiers and order to be used in auto query mode.
auto_modifiers = implemented_modifiers
# Tiebreakers are used when there are multiple results from a query.
@staticmethod
def manual_tiebreak(query, results):
"""Prompts a user to choose a result from multiple.
For use with query_library as a tiebreaker.
Returns a singleton list or None.
:param query: the original query.
:param results: list of results.
"""
print()
print("Manual tiebreak for query:")
print(build_query_rep(query).encode('utf-8'))
print()
print("Enter the number next to your choice:")
print()
print("0: None of these.")
menu_lines = []
key = 1
for song in results:
menu_lines.append(
str(key) +
": " +
build_song_rep(song).encode('utf-8'))
key += 1
print("\n".join(menu_lines))
choice = -1
while not (0 <= choice <= len(results)):
try:
choice = int(input("Choice: "))
except ValueError:
pass
return None if choice == 0 else [results[choice - 1]]
# Tiebreaker which does nothing with results.
@staticmethod
def no_tiebreak(query, results):
return results
# Exception thrown when a tie is broken.
class TieBroken(Exception):
def __init__(self, results):
self.results = results
# A named tuple to hold the frozen args when querying recursively.
QueryState = collections.namedtuple('QueryState', 'orig t_breaker mods auto')
def query_library(self, query, tie_breaker=no_tiebreak, modifiers=None, auto=False):
"""Queries the library for songs.
returns a list of matches, or None.
"""
if not modifiers:
modifiers = []
try:
if not auto:
return self.query_library_rec(query, self.library,
self.QueryState(query, tie_breaker, modifiers, auto))
else:
# Auto mode attempts a search with the current modifiers.
# If we get 1 result, we return it.
# If we get no results, we add the next mod from auto_modifers and try again.
# If we get many results, we branch and try with another modifier.
# On no results, we tiebreak our old results.
# Otherwise, we return the branched results.
current_mods = modifiers[:]
# Be ready to use any mods from the auto list which we aren't using already.
future_mods = (m for m in self.auto_modifiers if m not in modifiers)
while True: # broken when future_mods runs out
# will not break ties in auto mode
results = self.query_library_rec(
query, self.library,
self.QueryState(query, tie_breaker, current_mods, auto))
if not results:
try:
current_mods.append(next(future_mods))
except StopIteration:
return results
elif len(results) == 1:
return results
else:
# Received many results from our current search.
# Branch; try more modifers to try and improve.
# If results, use them; otherwise tiebreak ours.
try:
current_mods.append(next(future_mods))
except StopIteration:
raise self.TieBroken(tie_breaker.__func__(query, results))
next_results = self.query_library(query, tie_breaker, current_mods, auto)
if not next_results:
raise self.TieBroken(tie_breaker.__func__(query, results))
else:
return next_results
except self.TieBroken as tie:
return tie.results
def query_library_rec(self, query, library, state):
"""Returns a list of matches, or None.
Recursive querying routine for query_library.
"""
if len(query) == 0:
return None
# Composing applies right to left; currently mods are left to right.
# Reverse then append the default modifier for proper compose order.
mods_to_apply = [sm for sm in reversed(state.mods)]
mods_to_apply.append(self.SearchModifier(
lambda q: q,
lambda sd: sd,
operator.eq))
# Create the transformers by composing all of them.
q_t = compose(*list(map((lambda sm: sm.q_t), mods_to_apply)))
s_t = compose(*list(map((lambda sm: sm.s_t), mods_to_apply)))
# Use the most outward comparator.
comp = mods_to_apply[0].comp
q, md_type = query[0]
# No need to repeatedly transform q.
q_transformed = q_t(q)
# GM limits libraries to 20k songs; this isn't a big performance hit.
results = [s for s in library if comp(s_t(s[md_type]), q_transformed)]
# Check for immediate return conditions.
if not results:
return None
if len(results) == 1:
return [results[0]]
# Try to refine results by querying them with the next metadata in the query.
next_query = query[1:]
next_results = self.query_library_rec(next_query, results, state)
if not next_results:
# Don't break ties in auto mode; it's handled a level up.
if not state.auto:
raise self.TieBroken(state.t_breaker(state.orig, results))
else:
return results
# Now we have multiple for both our query and the next.
# Always prefer the next query to ours.
return next_results
def match(self, queries, tie_breaker=manual_tiebreak, auto=True):
"""Runs queries against the library; returns a list of songs.
Match success is logged.
:param query: list of (query, metadata type) in order of precedence.
eg [('The Car Song', 'title'), ('The Cat Empire', 'artist')]
:param tie_breaker: (optional) tie breaker to use.
:param modifiers: (optional) An ordered collection of SearchModifers.
Applied during the query left to right.
:param auto: (optional) When True, automagically manage modifiers to find results.
"""
matches = []
self.log_lines.append("## Starting match of " + str(len(queries)) + " queries ##")
for query in queries:
res = self.query_library(query, tie_breaker, auto=auto)
if res:
matches += res
# Log the results.
# The alert precedes the information for a quick view of what happened.
alert = None
if res is None:
alert = "!!"
elif len(res) == 1:
alert = "=="
else:
alert = "??"
# Each query shows the alert and the query.
self.log_lines.append(alert + " " + build_query_rep(query))
# Displayed on the line below the alert (might be useful later).
extra_info = None
if res:
for song in res:
self.log_lines.append(
(extra_info if extra_info else (' ' * len(alert))) +
" " +
self.build_song_for_log(song))
elif extra_info:
self.log_lines.append(extra_info)
return matches
|
import socket
import sys
import types
from unittest.mock import Mock, patch
from case import mock
from kombu.utils import compat
from kombu.utils.compat import entrypoints, maybe_fileno
def test_entrypoints():
with patch(
'kombu.utils.compat.importlib_metadata.entry_points', create=True
) as iterep:
eps = [Mock(), Mock()]
iterep.return_value = {'kombu.test': eps}
assert list(entrypoints('kombu.test'))
iterep.assert_called_with()
eps[0].load.assert_called_with()
eps[1].load.assert_called_with()
def test_maybe_fileno():
assert maybe_fileno(3) == 3
f = Mock(name='file')
assert maybe_fileno(f) is f.fileno()
f.fileno.side_effect = ValueError()
assert maybe_fileno(f) is None
class test_detect_environment:
def test_detect_environment(self):
try:
compat._environment = None
X = compat.detect_environment()
assert compat._environment == X
Y = compat.detect_environment()
assert Y == X
finally:
compat._environment = None
@mock.module_exists('eventlet', 'eventlet.patcher')
def test_detect_environment_eventlet(self):
with patch('eventlet.patcher.is_monkey_patched', create=True) as m:
assert sys.modules['eventlet']
m.return_value = True
env = compat._detect_environment()
m.assert_called_with(socket)
assert env == 'eventlet'
@mock.module_exists('gevent')
def test_detect_environment_gevent(self):
with patch('gevent.socket', create=True) as m:
prev, socket.socket = socket.socket, m.socket
try:
assert sys.modules['gevent']
env = compat._detect_environment()
assert env == 'gevent'
finally:
socket.socket = prev
def test_detect_environment_no_eventlet_or_gevent(self):
try:
sys.modules['eventlet'] = types.ModuleType('eventlet')
sys.modules['eventlet.patcher'] = types.ModuleType('patcher')
assert compat._detect_environment() == 'default'
finally:
sys.modules.pop('eventlet.patcher', None)
sys.modules.pop('eventlet', None)
compat._detect_environment()
try:
sys.modules['gevent'] = types.ModuleType('gevent')
assert compat._detect_environment() == 'default'
finally:
sys.modules.pop('gevent', None)
compat._detect_environment()
|
revision = "5bc47fa7cac4"
down_revision = "c05a8998b371"
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column(
"roles", sa.Column("third_party", sa.Boolean(), nullable=True, default=False)
)
def downgrade():
op.drop_column("roles", "third_party")
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from postqueue import PostqueueCollector
##########################################################################
class TestPostqueueCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('PostqueueCollector', {
})
self.collector = PostqueueCollector(config, {})
def test_import(self):
self.assertTrue(PostqueueCollector)
@patch.object(Collector, 'publish')
def test_should_work_with_emails_in_queue(self, publish_mock):
patch_collector = patch.object(
PostqueueCollector,
'get_postqueue_output',
Mock(return_value=self.getFixture(
'postqueue_emails').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'count': 3
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_with_empty_queue(self, publish_mock):
patch_collector = patch.object(
PostqueueCollector,
'get_postqueue_output',
Mock(return_value=self.getFixture(
'postqueue_empty').getvalue()))
patch_collector.start()
self.collector.collect()
patch_collector.stop()
metrics = {
'count': 0
}
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import io
from PIL import Image
from pyzbar import pyzbar
from homeassistant.components.image_processing import (
CONF_ENTITY_ID,
CONF_NAME,
CONF_SOURCE,
ImageProcessingEntity,
)
from homeassistant.core import split_entity_id
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the QR code image processing platform."""
# pylint: disable=unused-argument
entities = []
for camera in config[CONF_SOURCE]:
entities.append(QrEntity(camera[CONF_ENTITY_ID], camera.get(CONF_NAME)))
add_entities(entities)
class QrEntity(ImageProcessingEntity):
"""A QR image processing entity."""
def __init__(self, camera_entity, name):
"""Initialize QR image processing entity."""
super().__init__()
self._camera = camera_entity
if name:
self._name = name
else:
self._name = f"QR {split_entity_id(camera_entity)[1]}"
self._state = None
@property
def camera_entity(self):
"""Return camera entity id from process pictures."""
return self._camera
@property
def state(self):
"""Return the state of the entity."""
return self._state
@property
def name(self):
"""Return the name of the entity."""
return self._name
def process_image(self, image):
"""Process image."""
stream = io.BytesIO(image)
img = Image.open(stream)
barcodes = pyzbar.decode(img)
if barcodes:
self._state = barcodes[0].data.decode("utf-8")
else:
self._state = None
|
from __future__ import print_function
import argparse
import os
import sys
_stash = globals()['_stash']
collapseuser = _stash.libcore.collapseuser
def main(args):
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("-b", "--basename", action="store_true", help="show basename only")
p.add_argument('-f', '--fullname', action='store_true', help='show full path')
ns = p.parse_args(args)
status = 0
try:
if ns.fullname:
print(os.getcwd())
elif ns.basename:
print(os.path.basename(os.getcwd()))
else:
print(collapseuser(os.getcwd()))
except Exception as err:
print("pwd: {}: {!s}".format(type(err).__name__, err), file=sys.stderr)
status = 1
sys.exit(status)
if __name__ == "__main__":
main(sys.argv[1:])
|
from pyecobee import (
ECOBEE_API_KEY,
ECOBEE_CONFIG_FILENAME,
ECOBEE_REFRESH_TOKEN,
Ecobee,
)
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_API_KEY
from homeassistant.core import HomeAssistantError
from homeassistant.util.json import load_json
from .const import _LOGGER, CONF_REFRESH_TOKEN, DATA_ECOBEE_CONFIG, DOMAIN
class EcobeeFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle an ecobee config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize the ecobee flow."""
self._ecobee = None
async def async_step_user(self, user_input=None):
"""Handle a flow initiated by the user."""
if self._async_current_entries():
# Config entry already exists, only one allowed.
return self.async_abort(reason="single_instance_allowed")
errors = {}
stored_api_key = (
self.hass.data[DATA_ECOBEE_CONFIG].get(CONF_API_KEY)
if DATA_ECOBEE_CONFIG in self.hass.data
else ""
)
if user_input is not None:
# Use the user-supplied API key to attempt to obtain a PIN from ecobee.
self._ecobee = Ecobee(config={ECOBEE_API_KEY: user_input[CONF_API_KEY]})
if await self.hass.async_add_executor_job(self._ecobee.request_pin):
# We have a PIN; move to the next step of the flow.
return await self.async_step_authorize()
errors["base"] = "pin_request_failed"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema(
{vol.Required(CONF_API_KEY, default=stored_api_key): str}
),
errors=errors,
)
async def async_step_authorize(self, user_input=None):
"""Present the user with the PIN so that the app can be authorized on ecobee.com."""
errors = {}
if user_input is not None:
# Attempt to obtain tokens from ecobee and finish the flow.
if await self.hass.async_add_executor_job(self._ecobee.request_tokens):
# Refresh token obtained; create the config entry.
config = {
CONF_API_KEY: self._ecobee.api_key,
CONF_REFRESH_TOKEN: self._ecobee.refresh_token,
}
return self.async_create_entry(title=DOMAIN, data=config)
errors["base"] = "token_request_failed"
return self.async_show_form(
step_id="authorize",
errors=errors,
description_placeholders={"pin": self._ecobee.pin},
)
async def async_step_import(self, import_data):
"""
Import ecobee config from configuration.yaml.
Triggered by async_setup only if a config entry doesn't already exist.
If ecobee.conf exists, we will attempt to validate the credentials
and create an entry if valid. Otherwise, we will delegate to the user
step so that the user can continue the config flow.
"""
try:
legacy_config = await self.hass.async_add_executor_job(
load_json, self.hass.config.path(ECOBEE_CONFIG_FILENAME)
)
config = {
ECOBEE_API_KEY: legacy_config[ECOBEE_API_KEY],
ECOBEE_REFRESH_TOKEN: legacy_config[ECOBEE_REFRESH_TOKEN],
}
except (HomeAssistantError, KeyError):
_LOGGER.debug(
"No valid ecobee.conf configuration found for import, delegating to user step"
)
return await self.async_step_user(
user_input={
CONF_API_KEY: self.hass.data[DATA_ECOBEE_CONFIG].get(CONF_API_KEY)
}
)
ecobee = Ecobee(config=config)
if await self.hass.async_add_executor_job(ecobee.refresh_tokens):
# Credentials found and validated; create the entry.
_LOGGER.debug(
"Valid ecobee configuration found for import, creating configuration entry"
)
return self.async_create_entry(
title=DOMAIN,
data={
CONF_API_KEY: ecobee.api_key,
CONF_REFRESH_TOKEN: ecobee.refresh_token,
},
)
return await self.async_step_user(
user_input={
CONF_API_KEY: self.hass.data[DATA_ECOBEE_CONFIG].get(CONF_API_KEY)
}
)
|
import argparse
import chainer
from chainercv.datasets import ade20k_semantic_segmentation_label_names
from chainercv.datasets import cityscapes_semantic_segmentation_label_names
from chainercv.datasets import voc_semantic_segmentation_label_names
from chainercv.links import DeepLabV3plusXception65
import tensorflow as tf
_n_class = {
'voc': len(voc_semantic_segmentation_label_names),
'cityscapes': len(cityscapes_semantic_segmentation_label_names),
'ade20k': len(ade20k_semantic_segmentation_label_names),
}
_model_class = {
'xception65': DeepLabV3plusXception65,
}
_last_layer = ('decoder', 'conv_logits')
_eliminated_channels = {
'voc': [],
'cityscapes': [],
'ade20k': [0],
}
def load_param(param, weight, transpose=None):
if isinstance(param, chainer.Variable):
param = param.array
if transpose is not None:
weight = weight.transpose(transpose)
param[:] = weight
def get_model(name, task):
n_class = _n_class[task]
model = _model_class[name](n_class, min_input_size=(513, 513),
scales=(1.0,), flip=False,
extractor_kwargs={},
aspp_kwargs={}, decoder_kwargs={})
return model
def get_session(graph_path):
# load graph
with tf.gfile.GFile(graph_path, "rb") as f:
graph_def = tf.GraphDef()
graph_def.ParseFromString(f.read())
with tf.Graph().as_default() as graph:
tf.import_graph_def(
graph_def,
input_map=None,
return_elements=None,
name='deeplab',
producer_op_list=None
)
graph_options = tf.GPUOptions(visible_device_list="0", allow_growth=True)
config = config = tf.ConfigProto(gpu_options=graph_options)
sess = tf.Session(graph=graph, config=config)
return sess
def get_weightmap(model):
weightmap = {}
if model == 'xception65':
weightmap[('feature_extractor', 'entryflow_conv1')] = (
'Conv2DBNActiv', 'deeplab/xception_65/entry_flow/conv1_1')
weightmap[('feature_extractor', 'entryflow_conv2')] = (
'Conv2DBNActiv', 'deeplab/xception_65/entry_flow/conv1_2')
weightmap[('feature_extractor', 'entryflow_block1')] = (
'XceptionBlock', 'deeplab/xception_65/entry_flow/block1/unit_1')
weightmap[('feature_extractor', 'entryflow_block2')] = (
'XceptionBlock', 'deeplab/xception_65/entry_flow/block2/unit_1')
weightmap[('feature_extractor', 'entryflow_block3')] = (
'XceptionBlock', 'deeplab/xception_65/entry_flow/block3/unit_1')
for i in range(1, 17):
weightmap[('feature_extractor',
'middleflow_block{}'.format(i))] = (
'XceptionBlock',
'deeplab/xception_65/middle_flow/block1/unit_{}'.format(i))
weightmap[('feature_extractor', 'exitflow_block1')] = (
'XceptionBlock', 'deeplab/xception_65/exit_flow/block1/unit_1')
weightmap[('feature_extractor', 'exitflow_block2')] = (
'XceptionBlock', 'deeplab/xception_65/exit_flow/block2/unit_1')
weightmap[('aspp', 'image_pooling_conv')] = (
'Conv2DBNActiv', 'deeplab/image_pooling')
weightmap[('aspp', 'conv1x1')] = ('Conv2DBNActiv', 'deeplab/aspp0')
weightmap[('aspp', 'atrous1')] = (
'SeparableConv2DBNActiv', 'deeplab/aspp1')
weightmap[('aspp', 'atrous2')] = (
'SeparableConv2DBNActiv', 'deeplab/aspp2')
weightmap[('aspp', 'atrous3')] = (
'SeparableConv2DBNActiv', 'deeplab/aspp3')
weightmap[('aspp', 'proj')] = (
'Conv2DBNActiv', 'deeplab/concat_projection')
weightmap[('decoder', 'feature_proj')] = (
'Conv2DBNActiv', 'deeplab/decoder/feature_projection0')
weightmap[('decoder', 'conv1')] = (
'SeparableConv2DBNActiv', 'deeplab/decoder/decoder_conv0')
weightmap[('decoder', 'conv2')] = (
'SeparableConv2DBNActiv', 'deeplab/decoder/decoder_conv1')
weightmap[('decoder', 'conv_logits')] = (
'Convolution2D', 'deeplab/logits/semantic')
else:
raise
return weightmap
def resolve(weightmap):
# resolve weightmap
changed = True
while changed:
changed = False
for key in list(weightmap.keys()):
layer, op = weightmap.pop(key)
if layer == 'Conv2DBNActiv':
weightmap[key+('conv',)] = ('Convolution2D', op)
weightmap[key+('bn',)] = (
'BatchNormalization', op + '/BatchNorm')
changed = True
elif layer == 'SeparableConv2DBNActiv':
weightmap[key+('depthwise',)] = (
'Convolution2D_depthwise', op + '_depthwise')
weightmap[key+('dw_bn',)] = (
'BatchNormalization', op + '_depthwise/BatchNorm')
weightmap[key+('pointwise',)] = (
'Convolution2D', op + '_pointwise')
weightmap[key+('pw_bn',)] = (
'BatchNormalization', op + '_pointwise/BatchNorm')
changed = True
elif layer == 'XceptionBlock':
weightmap[key+('separable1',)] = (
'SeparableConv2DBNActiv',
op + '/xception_module/separable_conv1')
weightmap[key+('separable2',)] = (
'SeparableConv2DBNActiv',
op + '/xception_module/separable_conv2')
weightmap[key+('separable3',)] = (
'SeparableConv2DBNActiv',
op + '/xception_module/separable_conv3')
weightmap[key+('conv',)] = (
'Conv2DBNActiv', op + '/xception_module/shortcut')
changed = True
else:
weightmap[key] = (layer, op)
return weightmap
def transfer(model, sess, weightmap, task):
for key, (layer, op) in weightmap.items():
link = model
try:
for sublink in key:
link = link[sublink]
except AttributeError:
continue
print('loading: {}'.format('/'.join(key)))
input_dict = {}
transpose = {}
if layer == 'Convolution2D':
input_dict['W'] = op + '/weights:0'
input_dict['b'] = op + '/biases:0'
transpose['W'] = (3, 2, 0, 1)
elif layer == 'Convolution2D_depthwise':
input_dict['W'] = op + '/depthwise_weights:0'
transpose['W'] = (2, 3, 0, 1)
elif layer == 'BatchNormalization':
input_dict['gamma'] = op + '/gamma:0'
input_dict['beta'] = op + '/beta:0'
input_dict['avg_mean'] = op + '/moving_mean:0'
input_dict['avg_var'] = op + '/moving_variance:0'
else:
raise ValueError('Invalid layer: {}'.format(layer))
for k in list(input_dict.keys()):
if not hasattr(link, k) or getattr(link, k) is None:
input_dict.pop(k)
weights = sess.run(input_dict)
for k in input_dict:
# eliminate some channels
if key == _last_layer:
mask = [i not in _eliminated_channels[task]
for i in range(weights[k].shape[-1])]
if k == 'W':
weights[k] = weights[k][:, :, :, mask]
elif k == 'b':
weights[k] = weights[k][mask]
load_param(getattr(link, k), weights[k], transpose.get(k))
def main():
parser = argparse.ArgumentParser()
# parser.add_argument('model', choices=list(_model_class.keys()))
parser.add_argument('task', choices=list(_n_class.keys()))
parser.add_argument('graph_path')
parser.add_argument('output')
args = parser.parse_args()
# currently, xception65 is only implemented.
# model_name = args.model
model_name = 'xception65'
model = get_model(model_name, args.task)
sess = get_session(args.graph_path)
weightmap = get_weightmap(model_name)
weightmap = resolve(weightmap)
transfer(model, sess, weightmap, args.task)
chainer.serializers.save_npz(args.output, model)
sess.close()
if __name__ == '__main__':
main()
|
from abc import ABC, abstractmethod
import asyncio
from dataclasses import dataclass
import logging
from typing import Any, Awaitable, Callable, Dict, Iterable, List, Optional, cast
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant.components import websocket_api
from homeassistant.const import CONF_ID
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import entity_registry
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.storage import Store
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import slugify
STORAGE_VERSION = 1
SAVE_DELAY = 10
CHANGE_ADDED = "added"
CHANGE_UPDATED = "updated"
CHANGE_REMOVED = "removed"
@dataclass
class CollectionChangeSet:
"""Class to represent a change set.
change_type: One of CHANGE_*
item_id: The id of the item
item: The item
"""
change_type: str
item_id: str
item: Any
ChangeListener = Callable[
[
# Change type
str,
# Item ID
str,
# New or removed config
dict,
],
Awaitable[None],
]
class CollectionError(HomeAssistantError):
"""Base class for collection related errors."""
class ItemNotFound(CollectionError):
"""Raised when an item is not found."""
def __init__(self, item_id: str):
"""Initialize item not found error."""
super().__init__(f"Item {item_id} not found.")
self.item_id = item_id
class IDManager:
"""Keep track of IDs across different collections."""
def __init__(self) -> None:
"""Initiate the ID manager."""
self.collections: List[Dict[str, Any]] = []
def add_collection(self, collection: Dict[str, Any]) -> None:
"""Add a collection to check for ID usage."""
self.collections.append(collection)
def has_id(self, item_id: str) -> bool:
"""Test if the ID exists."""
return any(item_id in collection for collection in self.collections)
def generate_id(self, suggestion: str) -> str:
"""Generate an ID."""
base = slugify(suggestion)
proposal = base
attempt = 1
while self.has_id(proposal):
attempt += 1
proposal = f"{base}_{attempt}"
return proposal
class ObservableCollection(ABC):
"""Base collection type that can be observed."""
def __init__(self, logger: logging.Logger, id_manager: Optional[IDManager] = None):
"""Initialize the base collection."""
self.logger = logger
self.id_manager = id_manager or IDManager()
self.data: Dict[str, dict] = {}
self.listeners: List[ChangeListener] = []
self.id_manager.add_collection(self.data)
@callback
def async_items(self) -> List[dict]:
"""Return list of items in collection."""
return list(self.data.values())
@callback
def async_add_listener(self, listener: ChangeListener) -> None:
"""Add a listener.
Will be called with (change_type, item_id, updated_config).
"""
self.listeners.append(listener)
async def notify_changes(self, change_sets: Iterable[CollectionChangeSet]) -> None:
"""Notify listeners of a change."""
await asyncio.gather(
*[
listener(change_set.change_type, change_set.item_id, change_set.item)
for listener in self.listeners
for change_set in change_sets
]
)
class YamlCollection(ObservableCollection):
"""Offer a collection based on static data."""
async def async_load(self, data: List[dict]) -> None:
"""Load the YAML collection. Overrides existing data."""
old_ids = set(self.data)
change_sets = []
for item in data:
item_id = item[CONF_ID]
if item_id in old_ids:
old_ids.remove(item_id)
event = CHANGE_UPDATED
elif self.id_manager.has_id(item_id):
self.logger.warning("Duplicate ID '%s' detected, skipping", item_id)
continue
else:
event = CHANGE_ADDED
self.data[item_id] = item
change_sets.append(CollectionChangeSet(event, item_id, item))
for item_id in old_ids:
change_sets.append(
CollectionChangeSet(CHANGE_REMOVED, item_id, self.data.pop(item_id))
)
if change_sets:
await self.notify_changes(change_sets)
class StorageCollection(ObservableCollection):
"""Offer a CRUD interface on top of JSON storage."""
def __init__(
self,
store: Store,
logger: logging.Logger,
id_manager: Optional[IDManager] = None,
):
"""Initialize the storage collection."""
super().__init__(logger, id_manager)
self.store = store
@property
def hass(self) -> HomeAssistant:
"""Home Assistant object."""
return self.store.hass
async def _async_load_data(self) -> Optional[dict]:
"""Load the data."""
return cast(Optional[dict], await self.store.async_load())
async def async_load(self) -> None:
"""Load the storage Manager."""
raw_storage = await self._async_load_data()
if raw_storage is None:
raw_storage = {"items": []}
for item in raw_storage["items"]:
self.data[item[CONF_ID]] = item
await self.notify_changes(
[
CollectionChangeSet(CHANGE_ADDED, item[CONF_ID], item)
for item in raw_storage["items"]
]
)
@abstractmethod
async def _process_create_data(self, data: dict) -> dict:
"""Validate the config is valid."""
@callback
@abstractmethod
def _get_suggested_id(self, info: dict) -> str:
"""Suggest an ID based on the config."""
@abstractmethod
async def _update_data(self, data: dict, update_data: dict) -> dict:
"""Return a new updated data object."""
async def async_create_item(self, data: dict) -> dict:
"""Create a new item."""
item = await self._process_create_data(data)
item[CONF_ID] = self.id_manager.generate_id(self._get_suggested_id(item))
self.data[item[CONF_ID]] = item
self._async_schedule_save()
await self.notify_changes(
[CollectionChangeSet(CHANGE_ADDED, item[CONF_ID], item)]
)
return item
async def async_update_item(self, item_id: str, updates: dict) -> dict:
"""Update item."""
if item_id not in self.data:
raise ItemNotFound(item_id)
if CONF_ID in updates:
raise ValueError("Cannot update ID")
current = self.data[item_id]
updated = await self._update_data(current, updates)
self.data[item_id] = updated
self._async_schedule_save()
await self.notify_changes(
[CollectionChangeSet(CHANGE_UPDATED, item_id, updated)]
)
return self.data[item_id]
async def async_delete_item(self, item_id: str) -> None:
"""Delete item."""
if item_id not in self.data:
raise ItemNotFound(item_id)
item = self.data.pop(item_id)
self._async_schedule_save()
await self.notify_changes([CollectionChangeSet(CHANGE_REMOVED, item_id, item)])
@callback
def _async_schedule_save(self) -> None:
"""Schedule saving the area registry."""
self.store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> dict:
"""Return data of area registry to store in a file."""
return {"items": list(self.data.values())}
class IDLessCollection(ObservableCollection):
"""A collection without IDs."""
counter = 0
async def async_load(self, data: List[dict]) -> None:
"""Load the collection. Overrides existing data."""
await self.notify_changes(
[
CollectionChangeSet(CHANGE_REMOVED, item_id, item)
for item_id, item in list(self.data.items())
]
)
self.data.clear()
for item in data:
self.counter += 1
item_id = f"fakeid-{self.counter}"
self.data[item_id] = item
await self.notify_changes(
[
CollectionChangeSet(CHANGE_ADDED, item_id, item)
for item_id, item in self.data.items()
]
)
@callback
def attach_entity_component_collection(
entity_component: EntityComponent,
collection: ObservableCollection,
create_entity: Callable[[dict], Entity],
) -> None:
"""Map a collection to an entity component."""
entities = {}
async def _collection_changed(change_type: str, item_id: str, config: dict) -> None:
"""Handle a collection change."""
if change_type == CHANGE_ADDED:
entity = create_entity(config)
await entity_component.async_add_entities([entity])
entities[item_id] = entity
return
if change_type == CHANGE_REMOVED:
entity = entities.pop(item_id)
await entity.async_remove()
return
# CHANGE_UPDATED
await entities[item_id].async_update_config(config) # type: ignore
collection.async_add_listener(_collection_changed)
@callback
def attach_entity_registry_cleaner(
hass: HomeAssistantType,
domain: str,
platform: str,
collection: ObservableCollection,
) -> None:
"""Attach a listener to clean up entity registry on collection changes."""
async def _collection_changed(change_type: str, item_id: str, config: Dict) -> None:
"""Handle a collection change: clean up entity registry on removals."""
if change_type != CHANGE_REMOVED:
return
ent_reg = await entity_registry.async_get_registry(hass)
ent_to_remove = ent_reg.async_get_entity_id(domain, platform, item_id)
if ent_to_remove is not None:
ent_reg.async_remove(ent_to_remove)
collection.async_add_listener(_collection_changed)
class StorageCollectionWebsocket:
"""Class to expose storage collection management over websocket."""
def __init__(
self,
storage_collection: StorageCollection,
api_prefix: str,
model_name: str,
create_schema: dict,
update_schema: dict,
):
"""Initialize a websocket CRUD."""
self.storage_collection = storage_collection
self.api_prefix = api_prefix
self.model_name = model_name
self.create_schema = create_schema
self.update_schema = update_schema
assert self.api_prefix[-1] != "/", "API prefix should not end in /"
@property
def item_id_key(self) -> str:
"""Return item ID key."""
return f"{self.model_name}_id"
@callback
def async_setup(
self,
hass: HomeAssistant,
*,
create_list: bool = True,
create_create: bool = True,
) -> None:
"""Set up the websocket commands."""
if create_list:
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/list",
self.ws_list_item,
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): f"{self.api_prefix}/list"}
),
)
if create_create:
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/create",
websocket_api.require_admin(
websocket_api.async_response(self.ws_create_item)
),
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
**self.create_schema,
vol.Required("type"): f"{self.api_prefix}/create",
}
),
)
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/update",
websocket_api.require_admin(
websocket_api.async_response(self.ws_update_item)
),
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
**self.update_schema,
vol.Required("type"): f"{self.api_prefix}/update",
vol.Required(self.item_id_key): str,
}
),
)
websocket_api.async_register_command(
hass,
f"{self.api_prefix}/delete",
websocket_api.require_admin(
websocket_api.async_response(self.ws_delete_item)
),
websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): f"{self.api_prefix}/delete",
vol.Required(self.item_id_key): str,
}
),
)
def ws_list_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""List items."""
connection.send_result(msg["id"], self.storage_collection.async_items())
async def ws_create_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Create a item."""
try:
data = dict(msg)
data.pop("id")
data.pop("type")
item = await self.storage_collection.async_create_item(data)
connection.send_result(msg["id"], item)
except vol.Invalid as err:
connection.send_error(
msg["id"],
websocket_api.const.ERR_INVALID_FORMAT,
humanize_error(data, err),
)
except ValueError as err:
connection.send_error(
msg["id"], websocket_api.const.ERR_INVALID_FORMAT, str(err)
)
async def ws_update_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Update a item."""
data = dict(msg)
msg_id = data.pop("id")
item_id = data.pop(self.item_id_key)
data.pop("type")
try:
item = await self.storage_collection.async_update_item(item_id, data)
connection.send_result(msg_id, item)
except ItemNotFound:
connection.send_error(
msg["id"],
websocket_api.const.ERR_NOT_FOUND,
f"Unable to find {self.item_id_key} {item_id}",
)
except vol.Invalid as err:
connection.send_error(
msg["id"],
websocket_api.const.ERR_INVALID_FORMAT,
humanize_error(data, err),
)
except ValueError as err:
connection.send_error(
msg_id, websocket_api.const.ERR_INVALID_FORMAT, str(err)
)
async def ws_delete_item(
self, hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg: dict
) -> None:
"""Delete a item."""
try:
await self.storage_collection.async_delete_item(msg[self.item_id_key])
except ItemNotFound:
connection.send_error(
msg["id"],
websocket_api.const.ERR_NOT_FOUND,
f"Unable to find {self.item_id_key} {msg[self.item_id_key]}",
)
connection.send_result(msg["id"])
|
import errno
import json
import os.path
import random
import shlex
import sys
import threading
from subprocess import PIPE
from subprocess import Popen
from subprocess import STDOUT
import yaml
def _timeout(process):
"""Helper function for _run. It terminates the process.
Doesn't raise OSError, if we try to terminate a non-existing
process as there can be a very small window between poll() and kill()
"""
if process.poll() is None:
try:
# sending SIGKILL to the process
process.kill()
except OSError as e:
# No such process error
# The process could have been terminated meanwhile
if e.errno != errno.ESRCH:
raise
def cmd(command):
stream = False
timeout = 60
output = []
try:
process = Popen(shlex.split(command), stdout=PIPE, stderr=STDOUT, stdin=None)
process.name = command
# start the timer if we specified a timeout
if timeout:
proctimer = threading.Timer(timeout, _timeout, (process,))
proctimer.start()
for line in iter(process.stdout.readline, ""):
if stream:
print(line.rstrip("\n"))
output.append(line.rstrip("\n"))
# when finished, get the exit code
returncode = process.wait()
except OSError as e:
output.append(e.strerror.rstrip("\n"))
returncode = e.errno
except (KeyboardInterrupt, SystemExit):
# need to clean up the timing thread here
if timeout:
proctimer.cancel()
raise
else:
# Stop the timer
if timeout:
proctimer.cancel()
if returncode == -9:
output.append(f"Command '{command}' timed out (longer than {timeout}s)")
return returncode, "\n".join(output)
def abort(message):
print(message)
sys.exit(1)
def condquit(rc, message):
if rc != 0:
print(message)
sys.exit(rc)
def docker_env_to_dict(environment_array):
environment = {}
for kv in environment_array:
k, v = kv.split("=", 1)
environment[k] = v
return environment
def get_proxy_port(service_name, instance_name):
smartstack_yaml = "/nail/etc/services/%s/smartstack.yaml" % service_name
proxy_port = None
if os.path.exists(smartstack_yaml):
with open(smartstack_yaml, "r") as stream:
data = yaml.safe_load(stream)
if instance_name in data:
proxy_port = data[instance_name].get("proxy_port", None)
return proxy_port
def get_last_killed(drained_apps, service, instance):
"""look "back" in drained_apps, find at what time
the given (service, instance) was last killed"""
last_killed_t = -1000
for drained_app in reversed(drained_apps):
dt, dservice, dinstance = drained_app
if dservice == service and dinstance == instance:
last_killed_t = dt
break
return last_killed_t
def has_all_paasta_env(environment):
for k in ("PAASTA_SERVICE", "PAASTA_INSTANCE", "MARATHON_PORT"):
if k not in environment:
return False
return True
def main():
rc, output = cmd("sudo docker ps -q")
condquit(rc, "docker ps")
lines = output.split("\n")
if len(lines) == 0:
abort("no containers running")
running_container_ids = []
for line in lines:
if len(line) != 12:
abort("%s doesn't look like a container ID" % line)
running_container_ids.append(line.rstrip())
random.shuffle(running_container_ids)
drained_apps = [] # ( t_killed, service, instance )
smartstack_grace_sleep = 10
between_containers_grace_sleep = 10
min_kill_interval = 60 # minimum time to wait between same service.instance kills
hadown_expire_in_seconds = 120
t = 0
for container_id in running_container_ids:
rc, output = cmd("sudo docker inspect %s" % container_id)
condquit(rc, "docker inspect %s" % container_id)
docker_inspect_data = json.loads(output)
environment = docker_env_to_dict(docker_inspect_data[0]["Config"]["Env"])
if not has_all_paasta_env(environment):
print("# WARNING: %s is not a paasta container, skipping)" % (container_id))
continue
service = environment["PAASTA_SERVICE"]
instance = environment["PAASTA_INSTANCE"]
print(f"# {service}.{instance}")
marathon_port = int(environment["MARATHON_PORT"])
proxy_port = get_proxy_port(service, instance)
print(f"# {container_id},{service},{instance},{proxy_port},{marathon_port}")
print(
"sudo hadown -P {} -e $((`date +'%s'`+{})) {}.{}".format(
marathon_port, hadown_expire_in_seconds, service, instance
)
)
print("sleep %s" % smartstack_grace_sleep)
t += smartstack_grace_sleep
print("sudo docker kill %s" % container_id)
print(f"sudo haup -P {marathon_port} {service}.{instance}")
last_killed_t = get_last_killed(drained_apps, service, instance)
drained_apps.append((t, service, instance))
# print "t:%s last_killed_t:%s" % (t, last_killed_t)
sleep_amount = between_containers_grace_sleep
if (t - last_killed_t) < min_kill_interval:
sleep_amount = (
min_kill_interval - (t - last_killed_t) + between_containers_grace_sleep
)
print("sleep %s" % sleep_amount)
t += sleep_amount
print()
if __name__ == "__main__":
main()
|
import logging
import os
import re
import socket
import sys
if "PATH" not in os.environ:
# This command is sometimes executed in a sanitized environment
# which does not have the path, which causes the following imports
# to fail.
# To compensate, we set a minimal path to get off the ground.
os.environ["PATH"] = "/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
LOCK_DIRECTORY = "/var/lib/paasta/mac-address"
ENV_MATCH_RE = re.compile(r"^(-\w*e\w*|--env(?P<file>-file)?)(=(?P<arg>\S.*))?$")
MAX_HOSTNAME_LENGTH = 60
def parse_env_args(args):
result = dict(os.environ.items())
in_env = False
in_file = False
for arg in args:
if not in_env:
match = ENV_MATCH_RE.match(arg)
if not match:
continue
arg = match.group("arg") or ""
in_file = bool(match.group("file"))
if not arg:
in_env = True
continue
in_env = False
if in_file:
result.update(read_env_file(arg))
in_file = False
continue
try:
k, v = arg.split("=", 1)
except ValueError:
continue
result[k] = v
return result
def read_env_file(filename):
# Parse a file where each line is KEY=VALUE
# return contents in dictionary form
result = {}
with open(filename) as f:
for line in f:
try:
k, v = line.split("=", 1)
except ValueError:
continue
result[k] = v.strip()
return result
def can_add_hostname(args):
# return False if --hostname is already specified or if --network=host
if is_network_host(args):
return False
for index, arg in enumerate(args):
# Check for --hostname and variants
if arg == "-h":
return False
if arg.startswith("--hostname"):
return False
if len(arg) > 1 and arg[0] == "-" and arg[1] != "-":
# several short args
arg = arg.partition("=")[0]
if "h" in arg:
return False
return True
def is_network_host(args):
for index, arg in enumerate(args):
# Check for --network=host and variants
if arg in ("--net=host", "--network=host"):
return True
try:
if arg in ("--net", "--network") and args[index + 1] == "host":
return True
except IndexError:
pass
return False
def is_run(args):
try:
list(args).index("run")
return True
except ValueError:
return False
def can_add_mac_address(args):
# return False if --mac-address is already specified or if --network=host
if is_network_host(args) or not is_run(args):
return False
for index, arg in enumerate(args):
# Check for --mac-address
if arg.startswith("--mac-address"):
return False
return True
def generate_hostname_task_id(hostname, mesos_task_id):
task_id = mesos_task_id.rpartition(".")[2]
hostname_task_id = hostname + "-" + task_id
# hostnames can only contain alphanumerics and dashes and must be no more
# than 60 characters
hostname_task_id = re.sub("[^a-zA-Z0-9-]+", "-", hostname_task_id)[
:MAX_HOSTNAME_LENGTH
]
# hostnames can also not end with dashes as per RFC952
hostname_task_id = hostname_task_id.rstrip("-")
return hostname_task_id
def add_argument(args, argument):
# Add an argument immediately after 'run' command if it exists
args = list(args)
try:
run_index = args.index("run")
except ValueError:
pass
else:
args.insert(run_index + 1, argument)
return args
def get_cpumap():
# Return a dict containing the core numbers per physical CPU
core = 0
cpumap = {}
try:
with open("/proc/cpuinfo", "r") as f:
for line in f:
m = re.match(r"physical\sid.*(\d)", line)
if m:
cpuid = int(m.group(1))
if cpuid not in cpumap:
cpumap[cpuid] = []
cpumap[cpuid].append(core)
core += 1
except IOError:
logging.warning("Error while trying to read cpuinfo")
pass
return cpumap
def get_numa_memsize(nb_nodes):
# Return memory size in mB per NUMA node assuming memory is split evenly
# TODO: calculate and return real memory map
try:
with open("/proc/meminfo", "r") as f:
for line in f:
m = re.match(r"MemTotal:\s*(\d+)\skB", line)
if m:
return int(m.group(1)) / 1024 / int(nb_nodes)
except IOError:
logging.warning("Error while trying to read meminfo")
pass
return 0
def arg_collision(new_args, current_args):
# Returns True if one of the new arguments is already in the
# current argument list.
cur_arg_keys = []
for c in current_args:
cur_arg_keys.append(c.split("=")[0])
return bool(set(new_args).intersection(set(cur_arg_keys)))
def is_numa_enabled():
if os.path.exists("/proc/1/numa_maps"):
return True
else:
logging.warning("The system does not support NUMA")
return False
def get_cpu_requirements(env_args):
# Ensure we return a float. If no requirements we return 0.0
try:
return float(env_args.get("MARATHON_APP_RESOURCE_CPUS"))
except (ValueError, TypeError):
logging.warning(
"Could not read {} as a float".format(
env_args.get("MARATHON_APP_RESOURCE_CPUS")
)
)
return 0.0
def get_mem_requirements(env_args):
# Ensure we return a float. If no requirements we return 0.0
try:
return float(env_args.get("MARATHON_APP_RESOURCE_MEM"))
except (ValueError, TypeError):
logging.warning(
"Could not read {} as a float".format(
env_args.get("MARATHON_APP_RESOURCE_MEM")
)
)
return 0.0
def append_cpuset_args(argv, env_args):
# Enable log messages to stderr
logging.basicConfig(stream=sys.stderr, level=logging.INFO)
try:
pinned_numa_node = int(env_args.get("PIN_TO_NUMA_NODE"))
except (ValueError, TypeError):
logging.error(
"Could not read PIN_TO_NUMA_NODE value as an int: {}".format(
env_args.get("PIN_TO_NUMA_NODE")
)
)
return argv
cpumap = get_cpumap()
if len(cpumap) < 1:
logging.error("Less than 2 physical CPU detected")
return argv
if pinned_numa_node not in cpumap:
logging.error(
"Specified NUMA node: {} does not exist on this system".format(
pinned_numa_node
)
)
return argv
if arg_collision(["--cpuset-cpus", "--cpuset-mems"], argv):
logging.error("--cpuset options are already set. Not overriding")
return argv
if not is_numa_enabled():
logging.error("Could not detect NUMA on the system")
return argv
if len(cpumap[pinned_numa_node]) < get_cpu_requirements(env_args):
logging.error("The NUMA node has less cores than requested")
return argv
if get_numa_memsize(len(cpumap)) <= get_mem_requirements(env_args):
logging.error(
"Requested memory:{} MB does not fit in one NUMA node: {} MB".format(
get_mem_requirements(env_args), get_numa_memsize(len(cpumap))
)
)
return argv
logging.info(f"Binding container to NUMA node {pinned_numa_node}")
argv = add_argument(
argv, ("--cpuset-cpus=" + ",".join(str(c) for c in cpumap[pinned_numa_node]))
)
argv = add_argument(argv, ("--cpuset-mems=" + str(pinned_numa_node)))
return argv
def add_firewall(argv, service, instance):
# Delayed import to improve performance when add_firewall is not used
from paasta_tools.docker_wrapper_imports import DEFAULT_SYNAPSE_SERVICE_DIR
from paasta_tools.docker_wrapper_imports import firewall_flock
from paasta_tools.docker_wrapper_imports import prepare_new_container
from paasta_tools.docker_wrapper_imports import reserve_unique_mac_address
from paasta_tools.docker_wrapper_imports import DEFAULT_SOA_DIR
output = ""
try:
mac_address, lockfile = reserve_unique_mac_address(LOCK_DIRECTORY)
except Exception as e:
output = f"Unable to add mac address: {e}"
else:
argv = add_argument(argv, f"--mac-address={mac_address}")
try:
with firewall_flock():
prepare_new_container(
DEFAULT_SOA_DIR,
DEFAULT_SYNAPSE_SERVICE_DIR,
service,
instance,
mac_address,
)
except Exception as e:
output = f"Unable to add firewall rules: {e}"
if output:
print(output, file=sys.stderr)
return argv
def main(argv=None):
argv = argv if argv is not None else sys.argv
env_args = parse_env_args(argv)
if env_args.get("PIN_TO_NUMA_NODE"):
argv = append_cpuset_args(argv, env_args)
# Marathon sets MESOS_TASK_ID
mesos_task_id = env_args.get("MESOS_TASK_ID")
if mesos_task_id and can_add_hostname(argv):
hostname = socket.getfqdn()
argv = add_argument(argv, f"-e=PAASTA_HOST={hostname}")
hostname_task_id = generate_hostname_task_id(
hostname.partition(".")[0], mesos_task_id
)
argv = add_argument(argv, f"--hostname={hostname_task_id }")
paasta_firewall = env_args.get("PAASTA_FIREWALL")
service = env_args.get("PAASTA_SERVICE")
instance = env_args.get("PAASTA_INSTANCE")
if paasta_firewall and service and instance and can_add_mac_address(argv):
try:
argv = add_firewall(argv, service, instance)
except Exception as e:
print(f"Unhandled exception in add_firewall: {e}", file=sys.stderr)
os.execlp("docker", "docker", *argv[1:])
|
import os
import shutil
import fnmatch
from invoke import task
from ._config import ROOT_DIR, NAME
@task
def clean(ctx):
""" clear all .pyc modules and __pycache__ dirs
"""
count1, count2 = 0, 0
for root, dirnames, filenames in os.walk(ROOT_DIR):
for dirname in dirnames:
if dirname == '__pycache__':
shutil.rmtree(os.path.join(root, dirname))
count1 += 1
print('removed %i __pycache__ dirs' % count1)
for root, dirnames, filenames in os.walk(ROOT_DIR):
for filename in fnmatch.filter(filenames, '*.pyc'):
os.remove(os.path.join(root, filename))
count2 += 1
print('removed %i .pyc files' % count2)
for dir in ['dist', 'build', NAME+'.egg-info', 'htmlcov']:
dirname = os.path.join(ROOT_DIR, dir)
if os.path.isdir(dirname):
shutil.rmtree(dirname)
print('Removed directory %r' % dir)
|
import logging
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Brain:
"""
This Class is a Singleton Representing the Brain.yml file with synapse
.. note:: the is_loaded Boolean is True when the Brain has been properly loaded.
"""
def __init__(self, synapses=None, brain_file=None, brain_yaml=None):
self.synapses = synapses
self.brain_file = brain_file
self.brain_yaml = brain_yaml
def get_synapse_by_name(self, synapse_name):
"""
Get the synapse, using its synapse name, from the synapse list
:param synapse_name: the name of the synapse to get
:type synapse_name: str
:return: The Synapse corresponding to the name
:rtype: Synapse
"""
synapse_launched = None
for synapse in self.synapses:
if synapse.name == synapse_name:
synapse_launched = synapse
# we found the synapse, we don't need to check the rest of the list
break
return synapse_launched
def disable_synapse_by_name(self, synapse_name):
"""
disable a synapse from the brain by its name
:param synapse_name: the name of the synapse to disable
:return: True if the synapse has been disabled
"""
for synapse in self.synapses:
if synapse.name == synapse_name:
logger.debug("[Brain] disable synapse name: %s" % synapse_name)
synapse.enabled = False
return True
logger.debug("[Brain] Cannot disable synapse name: %s. Synapse not found" % synapse_name)
return False
def enable_synapse_by_name(self, synapse_name):
"""
enable a synapse from the brain by its name
:param synapse_name: the name of the synapse to enable
:return: True if the synapse has been enabled
"""
for synapse in self.synapses:
if synapse.name == synapse_name:
logger.debug("[Brain] enable synapse name: %s" % synapse_name)
synapse.enabled = True
return True
logger.debug("[Brain] Cannot enable synapse name: %s. Synapse not found" % synapse_name)
return False
def __eq__(self, other):
"""
This is used to compare 2 objects
:param other:
:return:
"""
return self.__dict__ == other.__dict__
|
import logging
from pyubee import Ubee
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_MODEL = "model"
DEFAULT_MODEL = "detect"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_MODEL, default=DEFAULT_MODEL): vol.Any(
"EVW32C-0N",
"EVW320B",
"EVW321B",
"EVW3200-Wifi",
"EVW3226@UPC",
"DVW32CB",
"DDW36C",
),
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Ubee scanner."""
info = config[DOMAIN]
host = info[CONF_HOST]
username = info[CONF_USERNAME]
password = info[CONF_PASSWORD]
model = info[CONF_MODEL]
ubee = Ubee(host, username, password, model)
if not ubee.login():
_LOGGER.error("Login failed")
return None
scanner = UbeeDeviceScanner(ubee)
return scanner
class UbeeDeviceScanner(DeviceScanner):
"""This class queries a wireless Ubee router."""
def __init__(self, ubee):
"""Initialize the Ubee scanner."""
self._ubee = ubee
self._mac2name = {}
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
devices = self._get_connected_devices()
self._mac2name = devices
return list(devices)
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
return self._mac2name.get(device)
def _get_connected_devices(self):
"""List connected devices with pyubee."""
if not self._ubee.session_active():
self._ubee.login()
return self._ubee.get_connected_devices()
|
import unittest
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
from torch.utils.data import DataLoader, TensorDataset
class LitDataModule(pl.LightningDataModule):
def __init__(self, batch_size=16):
super().__init__()
self.batch_size = batch_size
def setup(self, stage=None):
X_train = torch.rand(100, 1, 28, 28)
y_train = torch.randint(0, 10, size=(100,))
X_valid = torch.rand(20, 1, 28, 28)
y_valid = torch.randint(0, 10, size=(20,))
self.train_ds = TensorDataset(X_train, y_train)
self.valid_ds = TensorDataset(X_valid, y_valid)
def train_dataloader(self):
return DataLoader(self.train_ds, batch_size=self.batch_size, shuffle=True)
def val_dataloader(self):
return DataLoader(self.valid_ds, batch_size=self.batch_size, shuffle=False)
class LitClassifier(pl.LightningModule):
def __init__(self):
super().__init__()
self.l1 = torch.nn.Linear(28 * 28, 10)
def forward(self, x):
return F.relu(self.l1(x.view(x.size(0), -1)))
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log('train_loss', loss)
return loss
def validation_step(self, batch, batch_idx):
x, y = batch
y_hat = self(x)
loss = F.cross_entropy(y_hat, y)
self.log('val_loss', loss)
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=1e-2)
class TestPytorchLightning(unittest.TestCase):
def test_version(self):
self.assertIsNotNone(pl.__version__)
def test_mnist(self):
dm = LitDataModule()
model = LitClassifier()
trainer = pl.Trainer(gpus=None, max_epochs=1)
result = trainer.fit(model, datamodule=dm)
self.assertTrue(result)
|
from typing import Callable, List
from canary.api import SensorType
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_SIGNAL_STRENGTH,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
TEMP_CELSIUS,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .const import DATA_COORDINATOR, DOMAIN, MANUFACTURER
from .coordinator import CanaryDataUpdateCoordinator
SENSOR_VALUE_PRECISION = 2
ATTR_AIR_QUALITY = "air_quality"
# Define variables to store the device names, as referred to by the Canary API.
# Note: If Canary change the name of any of their devices (which they have done),
# then these variables will need updating, otherwise the sensors will stop working
# and disappear in Home Assistant.
CANARY_PRO = "Canary Pro"
CANARY_FLEX = "Canary Flex"
# Sensor types are defined like so:
# sensor type name, unit_of_measurement, icon, device class, products supported
SENSOR_TYPES = [
["temperature", TEMP_CELSIUS, None, DEVICE_CLASS_TEMPERATURE, [CANARY_PRO]],
["humidity", PERCENTAGE, None, DEVICE_CLASS_HUMIDITY, [CANARY_PRO]],
["air_quality", None, "mdi:weather-windy", None, [CANARY_PRO]],
[
"wifi",
SIGNAL_STRENGTH_DECIBELS_MILLIWATT,
None,
DEVICE_CLASS_SIGNAL_STRENGTH,
[CANARY_FLEX],
],
["battery", PERCENTAGE, None, DEVICE_CLASS_BATTERY, [CANARY_FLEX]],
]
STATE_AIR_QUALITY_NORMAL = "normal"
STATE_AIR_QUALITY_ABNORMAL = "abnormal"
STATE_AIR_QUALITY_VERY_ABNORMAL = "very_abnormal"
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Canary sensors based on a config entry."""
coordinator: CanaryDataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
sensors = []
for location in coordinator.data["locations"].values():
for device in location.devices:
if device.is_online:
device_type = device.device_type
for sensor_type in SENSOR_TYPES:
if device_type.get("name") in sensor_type[4]:
sensors.append(
CanarySensor(coordinator, sensor_type, location, device)
)
async_add_entities(sensors, True)
class CanarySensor(CoordinatorEntity, Entity):
"""Representation of a Canary sensor."""
def __init__(self, coordinator, sensor_type, location, device):
"""Initialize the sensor."""
super().__init__(coordinator)
self._sensor_type = sensor_type
self._device_id = device.device_id
self._device_name = device.name
self._device_type_name = device.device_type["name"]
sensor_type_name = sensor_type[0].replace("_", " ").title()
self._name = f"{location.name} {device.name} {sensor_type_name}"
canary_sensor_type = None
if self._sensor_type[0] == "air_quality":
canary_sensor_type = SensorType.AIR_QUALITY
elif self._sensor_type[0] == "temperature":
canary_sensor_type = SensorType.TEMPERATURE
elif self._sensor_type[0] == "humidity":
canary_sensor_type = SensorType.HUMIDITY
elif self._sensor_type[0] == "wifi":
canary_sensor_type = SensorType.WIFI
elif self._sensor_type[0] == "battery":
canary_sensor_type = SensorType.BATTERY
self._canary_type = canary_sensor_type
@property
def reading(self):
"""Return the device sensor reading."""
readings = self.coordinator.data["readings"][self._device_id]
value = next(
(
reading.value
for reading in readings
if reading.sensor_type == self._canary_type
),
None,
)
if value is not None:
return round(float(value), SENSOR_VALUE_PRECISION)
return None
@property
def name(self):
"""Return the name of the Canary sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self.reading
@property
def unique_id(self):
"""Return the unique ID of this sensor."""
return f"{self._device_id}_{self._sensor_type[0]}"
@property
def device_info(self):
"""Return the device_info of the device."""
return {
"identifiers": {(DOMAIN, str(self._device_id))},
"name": self._device_name,
"model": self._device_type_name,
"manufacturer": MANUFACTURER,
}
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._sensor_type[1]
@property
def device_class(self):
"""Device class for the sensor."""
return self._sensor_type[3]
@property
def icon(self):
"""Icon for the sensor."""
return self._sensor_type[2]
@property
def device_state_attributes(self):
"""Return the state attributes."""
reading = self.reading
if self._sensor_type[0] == "air_quality" and reading is not None:
air_quality = None
if reading <= 0.4:
air_quality = STATE_AIR_QUALITY_VERY_ABNORMAL
elif reading <= 0.59:
air_quality = STATE_AIR_QUALITY_ABNORMAL
elif reading <= 1.0:
air_quality = STATE_AIR_QUALITY_NORMAL
return {ATTR_AIR_QUALITY: air_quality}
return None
|
import asyncio
from pysmappee import Smappee
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_CLIENT_ID,
CONF_CLIENT_SECRET,
CONF_IP_ADDRESS,
CONF_PLATFORM,
)
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow, config_validation as cv
from homeassistant.util import Throttle
from . import api, config_flow
from .const import (
AUTHORIZE_URL,
CONF_SERIALNUMBER,
DOMAIN,
MIN_TIME_BETWEEN_UPDATES,
SMAPPEE_PLATFORMS,
TOKEN_URL,
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_CLIENT_ID): cv.string,
vol.Required(CONF_CLIENT_SECRET): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Smappee component."""
hass.data[DOMAIN] = {}
if DOMAIN not in config:
return True
client_id = config[DOMAIN][CONF_CLIENT_ID]
hass.data[DOMAIN][client_id] = {}
# decide platform
platform = "PRODUCTION"
if client_id == "homeassistant_f2":
platform = "ACCEPTANCE"
elif client_id == "homeassistant_f3":
platform = "DEVELOPMENT"
hass.data[DOMAIN][CONF_PLATFORM] = platform
config_flow.SmappeeFlowHandler.async_register_implementation(
hass,
config_entry_oauth2_flow.LocalOAuth2Implementation(
hass,
DOMAIN,
config[DOMAIN][CONF_CLIENT_ID],
config[DOMAIN][CONF_CLIENT_SECRET],
AUTHORIZE_URL[platform],
TOKEN_URL[platform],
),
)
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Smappee from a zeroconf or config entry."""
if CONF_IP_ADDRESS in entry.data:
smappee_api = api.api.SmappeeLocalApi(ip=entry.data[CONF_IP_ADDRESS])
smappee = Smappee(api=smappee_api, serialnumber=entry.data[CONF_SERIALNUMBER])
await hass.async_add_executor_job(smappee.load_local_service_location)
else:
implementation = (
await config_entry_oauth2_flow.async_get_config_entry_implementation(
hass, entry
)
)
smappee_api = api.ConfigEntrySmappeeApi(hass, entry, implementation)
smappee = Smappee(api=smappee_api)
await hass.async_add_executor_job(smappee.load_service_locations)
hass.data[DOMAIN][entry.entry_id] = SmappeeBase(hass, smappee)
for component in SMAPPEE_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in SMAPPEE_PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id, None)
return unload_ok
class SmappeeBase:
"""An object to hold the PySmappee instance."""
def __init__(self, hass, smappee):
"""Initialize the Smappee API wrapper class."""
self.hass = hass
self.smappee = smappee
@Throttle(MIN_TIME_BETWEEN_UPDATES)
async def async_update(self):
"""Update all Smappee trends and appliance states."""
await self.hass.async_add_executor_job(
self.smappee.update_trends_and_appliance_states
)
|
import unittest
import mock
from kalliope.core.NeuronModule import MissingParameterException, InvalidParameterException
from kalliope.neurons.neurotransmitter import Neurotransmitter
class TestNeurotransmitter(unittest.TestCase):
def setUp(self):
self.from_answer_link = [
{
"synapse": "synapse2",
"answers": [
"answer one"
]
},
{
"synapse": "synapse3",
"answers": [
"answer two",
"answer three"
]
},
]
self.direct_link = "direct_link"
self.default = "default"
def testParameters(self):
"""
Testing the Parameters checking
"""
def run_test_invalid_parameter_exception(parameters_to_test):
with self.assertRaises(InvalidParameterException):
Neurotransmitter(**parameters_to_test)
def run_test_missing_parameter_exception(parameters_to_test):
with self.assertRaises(MissingParameterException):
Neurotransmitter(**parameters_to_test)
# empty
parameters = dict()
run_test_missing_parameter_exception(parameters)
# missing direct_link and from_answer_link
parameters = {
"default": self.default
}
run_test_missing_parameter_exception(parameters)
# missing direct_link and from_answer_link
parameters = {
"default": self.default,
"from_answer_link": self.from_answer_link,
"direct_link": self.direct_link
}
run_test_invalid_parameter_exception(parameters)
# missing default
parameters = {
"from_answer_link": self.from_answer_link,
"direct_link": self.direct_link
}
run_test_invalid_parameter_exception(parameters)
# Missing answer in from_answer_link
self.from_answer_link = [
{
"synapse": "synapse2",
}
]
parameters = {
"default": self.default,
"from_answer_link": self.from_answer_link
}
run_test_missing_parameter_exception(parameters)
# Missing synapse in from_answer_link
self.from_answer_link = [
{
"answer": "blablablbla",
}
]
parameters = {
"default": self.default,
"from_answer_link": self.from_answer_link
}
run_test_missing_parameter_exception(parameters)
def testCallback(self):
"""
Testing the callback provided when audio has been provided by the User as an answer.
"""
parameters = {
"default": self.default,
"from_answer_link": self.from_answer_link
}
with mock.patch("kalliope.core.NeuronModule.get_audio_from_stt") as mock_get_audio_from_stt:
with mock.patch("kalliope.core.NeuronModule.run_synapse_by_name") as mock_run_synapse_by_name:
# testing running the default when no order matching
nt = Neurotransmitter(**parameters)
mock_get_audio_from_stt.assert_called_once()
mock_get_audio_from_stt.reset_mock()
# testing running the default when audio None
audio_text = None
nt.callback(audio=audio_text)
mock_run_synapse_by_name.assert_called_once_with(self.default, high_priority=True, is_api_call=False)
mock_run_synapse_by_name.reset_mock()
# testing running the default when no order matching
audio_text = "try test audio "
nt.callback(audio=audio_text)
mock_run_synapse_by_name.assert_called_once_with(self.default, high_priority=True, is_api_call=False)
mock_run_synapse_by_name.reset_mock()
# Testing calling the right synapse
audio_text = "answer one"
nt.callback(audio=audio_text)
mock_run_synapse_by_name.assert_called_once_with(synapse_name="synapse2",
user_order=audio_text,
synapse_order="answer one",
high_priority=True,
is_api_call=False)
def testInit(self):
"""
Testing the init method of the neurontransmitter.
"""
with mock.patch("kalliope.core.NeuronModule.run_synapse_by_name") as mock_run_synapse_by_name:
# Test direct link
parameters = {
"default": self.default,
"direct_link": self.direct_link
}
Neurotransmitter(**parameters)
mock_run_synapse_by_name.assert_called_once_with(self.direct_link, high_priority=True)
with mock.patch("kalliope.core.NeuronModule.get_audio_from_stt") as mock_get_audio_from_stt:
# Test get_audio_from_stt
parameters = {
"default": self.default,
"from_answer_link": self.from_answer_link,
}
Neurotransmitter(**parameters)
mock_get_audio_from_stt.assert_called_once()
if __name__ == '__main__':
unittest.main()
|
import logging
import platform
import subprocess as sp
import voluptuous as vol
import wakeonlan
from homeassistant.components.switch import PLATFORM_SCHEMA, SwitchEntity
from homeassistant.const import (
CONF_BROADCAST_ADDRESS,
CONF_BROADCAST_PORT,
CONF_HOST,
CONF_MAC,
CONF_NAME,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
_LOGGER = logging.getLogger(__name__)
CONF_OFF_ACTION = "turn_off"
DEFAULT_NAME = "Wake on LAN"
DEFAULT_PING_TIMEOUT = 1
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_BROADCAST_ADDRESS): cv.string,
vol.Optional(CONF_BROADCAST_PORT): cv.port,
vol.Optional(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OFF_ACTION): cv.SCRIPT_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a wake on lan switch."""
broadcast_address = config.get(CONF_BROADCAST_ADDRESS)
broadcast_port = config.get(CONF_BROADCAST_PORT)
host = config.get(CONF_HOST)
mac_address = config[CONF_MAC]
name = config[CONF_NAME]
off_action = config.get(CONF_OFF_ACTION)
add_entities(
[
WolSwitch(
hass,
name,
host,
mac_address,
off_action,
broadcast_address,
broadcast_port,
)
],
True,
)
class WolSwitch(SwitchEntity):
"""Representation of a wake on lan switch."""
def __init__(
self,
hass,
name,
host,
mac_address,
off_action,
broadcast_address,
broadcast_port,
):
"""Initialize the WOL switch."""
self._hass = hass
self._name = name
self._host = host
self._mac_address = mac_address
self._broadcast_address = broadcast_address
self._broadcast_port = broadcast_port
domain = __name__.split(".")[-2]
self._off_script = (
Script(hass, off_action, name, domain) if off_action else None
)
self._state = False
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
@property
def name(self):
"""Return the name of the switch."""
return self._name
def turn_on(self, **kwargs):
"""Turn the device on."""
service_kwargs = {}
if self._broadcast_address is not None:
service_kwargs["ip_address"] = self._broadcast_address
if self._broadcast_port is not None:
service_kwargs["port"] = self._broadcast_port
_LOGGER.info(
"Send magic packet to mac %s (broadcast: %s, port: %s)",
self._mac_address,
self._broadcast_address,
self._broadcast_port,
)
wakeonlan.send_magic_packet(self._mac_address, **service_kwargs)
def turn_off(self, **kwargs):
"""Turn the device off if an off action is present."""
if self._off_script is not None:
self._off_script.run(context=self._context)
def update(self):
"""Check if device is on and update the state."""
if platform.system().lower() == "windows":
ping_cmd = [
"ping",
"-n",
"1",
"-w",
str(DEFAULT_PING_TIMEOUT * 1000),
str(self._host),
]
else:
ping_cmd = [
"ping",
"-c",
"1",
"-W",
str(DEFAULT_PING_TIMEOUT),
str(self._host),
]
status = sp.call(ping_cmd, stdout=sp.DEVNULL, stderr=sp.DEVNULL)
self._state = not bool(status)
|
from datetime import datetime, timedelta
import logging
import re
from env_canada import ECData # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
ATTR_LOCATION,
CONF_LATITUDE,
CONF_LONGITUDE,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
SCAN_INTERVAL = timedelta(minutes=10)
ATTR_UPDATED = "updated"
ATTR_STATION = "station"
ATTR_TIME = "alert time"
CONF_ATTRIBUTION = "Data provided by Environment Canada"
CONF_STATION = "station"
CONF_LANGUAGE = "language"
def validate_station(station):
"""Check that the station ID is well-formed."""
if station is None:
return
if not re.fullmatch(r"[A-Z]{2}/s0000\d{3}", station):
raise vol.error.Invalid('Station ID must be of the form "XX/s0000###"')
return station
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_LANGUAGE, default="english"): vol.In(["english", "french"]),
vol.Optional(CONF_STATION): validate_station,
vol.Inclusive(CONF_LATITUDE, "latlon"): cv.latitude,
vol.Inclusive(CONF_LONGITUDE, "latlon"): cv.longitude,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Environment Canada sensor."""
if config.get(CONF_STATION):
ec_data = ECData(
station_id=config[CONF_STATION], language=config.get(CONF_LANGUAGE)
)
else:
lat = config.get(CONF_LATITUDE, hass.config.latitude)
lon = config.get(CONF_LONGITUDE, hass.config.longitude)
ec_data = ECData(coordinates=(lat, lon), language=config.get(CONF_LANGUAGE))
sensor_list = list(ec_data.conditions) + list(ec_data.alerts)
add_entities([ECSensor(sensor_type, ec_data) for sensor_type in sensor_list], True)
class ECSensor(Entity):
"""Implementation of an Environment Canada sensor."""
def __init__(self, sensor_type, ec_data):
"""Initialize the sensor."""
self.sensor_type = sensor_type
self.ec_data = ec_data
self._unique_id = None
self._name = None
self._state = None
self._attr = None
self._unit = None
@property
def unique_id(self) -> str:
"""Return the unique ID of the sensor."""
return self._unique_id
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._attr
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit
def update(self):
"""Update current conditions."""
self.ec_data.update()
self.ec_data.conditions.update(self.ec_data.alerts)
conditions = self.ec_data.conditions
metadata = self.ec_data.metadata
sensor_data = conditions.get(self.sensor_type)
self._unique_id = f"{metadata['location']}-{self.sensor_type}"
self._attr = {}
self._name = sensor_data.get("label")
value = sensor_data.get("value")
if isinstance(value, list):
self._state = " | ".join([str(s.get("title")) for s in value])[:255]
self._attr.update(
{ATTR_TIME: " | ".join([str(s.get("date")) for s in value])}
)
elif self.sensor_type == "tendency":
self._state = str(value).capitalize()
elif value is not None and len(value) > 255:
self._state = value[:255]
_LOGGER.info("Value for %s truncated to 255 characters", self._unique_id)
else:
self._state = value
if sensor_data.get("unit") == "C" or self.sensor_type in [
"wind_chill",
"humidex",
]:
self._unit = TEMP_CELSIUS
else:
self._unit = sensor_data.get("unit")
timestamp = metadata.get("timestamp")
if timestamp:
updated_utc = datetime.strptime(timestamp, "%Y%m%d%H%M%S").isoformat()
else:
updated_utc = None
self._attr.update(
{
ATTR_ATTRIBUTION: CONF_ATTRIBUTION,
ATTR_UPDATED: updated_utc,
ATTR_LOCATION: metadata.get("location"),
ATTR_STATION: metadata.get("station"),
}
)
|
import os.path
import logging
import pytest
import pytest_bdd as bdd
bdd.scenarios('sessions.feature')
@pytest.fixture(autouse=True)
def turn_on_scroll_logging(quteproc):
quteproc.turn_on_scroll_logging()
@bdd.when(bdd.parsers.parse('I have a "{name}" session file:\n{contents}'))
def create_session_file(quteproc, name, contents):
filename = os.path.join(quteproc.basedir, 'data', 'sessions',
name + '.yml')
with open(filename, 'w', encoding='utf-8') as f:
f.write(contents)
@bdd.when(bdd.parsers.parse('I replace "{pattern}" by "{replacement}" in the '
'"{name}" session file'))
def session_replace(quteproc, server, pattern, replacement, name):
# First wait until the session was actually saved
quteproc.wait_for(category='message', loglevel=logging.INFO,
message='Saved session {}.'.format(name))
filename = os.path.join(quteproc.basedir, 'data', 'sessions',
name + '.yml')
replacement = replacement.replace('(port)', str(server.port)) # yo dawg
with open(filename, 'r', encoding='utf-8') as f:
data = f.read()
with open(filename, 'w', encoding='utf-8') as f:
f.write(data.replace(pattern, replacement))
@bdd.then(bdd.parsers.parse("the session {name} should exist"))
def session_should_exist(quteproc, name):
filename = os.path.join(quteproc.basedir, 'data', 'sessions',
name + '.yml')
assert os.path.exists(filename)
@bdd.then(bdd.parsers.parse("the session {name} should not exist"))
def session_should_not_exist(quteproc, name):
filename = os.path.join(quteproc.basedir, 'data', 'sessions',
name + '.yml')
assert not os.path.exists(filename)
|
import abodepy.helpers.constants as CONST
from homeassistant.components.switch import SwitchEntity
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import AbodeAutomation, AbodeDevice
from .const import DOMAIN
DEVICE_TYPES = [CONST.TYPE_SWITCH, CONST.TYPE_VALVE]
ICON = "mdi:robot"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Abode switch devices."""
data = hass.data[DOMAIN]
entities = []
for device_type in DEVICE_TYPES:
for device in data.abode.get_devices(generic_type=device_type):
entities.append(AbodeSwitch(data, device))
for automation in data.abode.get_automations():
entities.append(AbodeAutomationSwitch(data, automation))
async_add_entities(entities)
class AbodeSwitch(AbodeDevice, SwitchEntity):
"""Representation of an Abode switch."""
def turn_on(self, **kwargs):
"""Turn on the device."""
self._device.switch_on()
def turn_off(self, **kwargs):
"""Turn off the device."""
self._device.switch_off()
@property
def is_on(self):
"""Return true if device is on."""
return self._device.is_on
class AbodeAutomationSwitch(AbodeAutomation, SwitchEntity):
"""A switch implementation for Abode automations."""
async def async_added_to_hass(self):
"""Set up trigger automation service."""
await super().async_added_to_hass()
signal = f"abode_trigger_automation_{self.entity_id}"
self.async_on_remove(async_dispatcher_connect(self.hass, signal, self.trigger))
def turn_on(self, **kwargs):
"""Enable the automation."""
if self._automation.enable(True):
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Disable the automation."""
if self._automation.enable(False):
self.schedule_update_ha_state()
def trigger(self):
"""Trigger the automation."""
self._automation.trigger()
@property
def is_on(self):
"""Return True if the automation is enabled."""
return self._automation.is_enabled
@property
def icon(self):
"""Return the robot icon to match Home Assistant automations."""
return ICON
|
import cmd
import keychain
import sys
from stashutils import dbutils
_stash = globals()["_stash"]
class DropboxSetupCmd(cmd.Cmd):
"""The command loop for managing the dropbox"""
intro = _stash.text_color("Welcome to the Dropbox Setup. Type 'help' for help.", "yellow")
prompt = _stash.text_color("(dbs)", "red")
use_rawinput = False
def do_exit(self, cmd):
"""exit: quits the setup."""
sys.exit(0)
do_quit = do_EOF = do_exit
def do_list(self, cmd):
"""list: lists the dropbox usernames."""
self.stdout.write("\n")
for service, account in keychain.get_services():
if service == dbutils.DB_SERVICE:
self.stdout.write(account + "\n")
self.stdout.write("\n")
def do_del(self, cmd):
"""del USERNAME: resets the dropbox for USERNAME."""
dbutils.reset_dropbox(cmd)
do_reset = do_del
def do_add(self, cmd):
"""add USERNAME: starts the setup for USERNAME."""
if len(cmd) == 0:
self.stdout.write(_stash.text_color("Error: expected an username.\n", "red"))
return
try:
dbutils.dropbox_setup(cmd, self.stdin, self.stdout)
except KeyboardInterrupt:
self.stdout.write("\nSetup aborted.\n")
do_edit = do_add
if __name__ == "__main__":
cmdo = DropboxSetupCmd()
cmdo.cmdloop()
|
import pyvera as pv
from homeassistant.core import HomeAssistant
from .common import ComponentFactory, new_simple_controller_config
from tests.async_mock import MagicMock
async def test_scene(
hass: HomeAssistant, vera_component_factory: ComponentFactory
) -> None:
"""Test function."""
vera_scene = MagicMock(spec=pv.VeraScene) # type: pv.VeraScene
vera_scene.scene_id = 1
vera_scene.vera_scene_id = vera_scene.scene_id
vera_scene.name = "dev1"
entity_id = "scene.dev1_1"
await vera_component_factory.configure_component(
hass=hass,
controller_config=new_simple_controller_config(scenes=(vera_scene,)),
)
await hass.services.async_call(
"scene",
"turn_on",
{"entity_id": entity_id},
)
await hass.async_block_till_done()
|
import pytest
import voluptuous as vol
from homeassistant.components.wake_on_lan import DOMAIN, SERVICE_SEND_MAGIC_PACKET
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_send_magic_packet(hass):
"""Test of send magic packet service call."""
with patch("homeassistant.components.wake_on_lan.wakeonlan") as mocked_wakeonlan:
mac = "aa:bb:cc:dd:ee:ff"
bc_ip = "192.168.255.255"
bc_port = 999
await async_setup_component(hass, DOMAIN, {})
await hass.services.async_call(
DOMAIN,
SERVICE_SEND_MAGIC_PACKET,
{"mac": mac, "broadcast_address": bc_ip, "broadcast_port": bc_port},
blocking=True,
)
assert len(mocked_wakeonlan.mock_calls) == 1
assert mocked_wakeonlan.mock_calls[-1][1][0] == mac
assert mocked_wakeonlan.mock_calls[-1][2]["ip_address"] == bc_ip
assert mocked_wakeonlan.mock_calls[-1][2]["port"] == bc_port
await hass.services.async_call(
DOMAIN,
SERVICE_SEND_MAGIC_PACKET,
{"mac": mac, "broadcast_address": bc_ip},
blocking=True,
)
assert len(mocked_wakeonlan.mock_calls) == 2
assert mocked_wakeonlan.mock_calls[-1][1][0] == mac
assert mocked_wakeonlan.mock_calls[-1][2]["ip_address"] == bc_ip
assert "port" not in mocked_wakeonlan.mock_calls[-1][2]
await hass.services.async_call(
DOMAIN,
SERVICE_SEND_MAGIC_PACKET,
{"mac": mac, "broadcast_port": bc_port},
blocking=True,
)
assert len(mocked_wakeonlan.mock_calls) == 3
assert mocked_wakeonlan.mock_calls[-1][1][0] == mac
assert mocked_wakeonlan.mock_calls[-1][2]["port"] == bc_port
assert "ip_address" not in mocked_wakeonlan.mock_calls[-1][2]
with pytest.raises(vol.Invalid):
await hass.services.async_call(
DOMAIN,
SERVICE_SEND_MAGIC_PACKET,
{"broadcast_address": bc_ip},
blocking=True,
)
assert len(mocked_wakeonlan.mock_calls) == 3
await hass.services.async_call(
DOMAIN, SERVICE_SEND_MAGIC_PACKET, {"mac": mac}, blocking=True
)
assert len(mocked_wakeonlan.mock_calls) == 4
assert mocked_wakeonlan.mock_calls[-1][1][0] == mac
assert not mocked_wakeonlan.mock_calls[-1][2]
|
import copy
from typing import Any
from typing import cast
from typing import List
from typing import Optional
from mypy_extensions import TypedDict
from paasta_tools.long_running_service_tools import load_service_namespace_config
from paasta_tools.long_running_service_tools import LongRunningServiceConfig
from paasta_tools.long_running_service_tools import LongRunningServiceConfigDict
from paasta_tools.long_running_service_tools import ServiceNamespaceConfig
from paasta_tools.utils import BranchDictV2
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import Constraint # noqa, imported for typing.
from paasta_tools.utils import DEFAULT_SOA_DIR
from paasta_tools.utils import DockerParameter
from paasta_tools.utils import get_code_sha_from_dockerurl
from paasta_tools.utils import get_config_hash
from paasta_tools.utils import load_service_instance_config
from paasta_tools.utils import load_v2_deployments_json
from paasta_tools.utils import SystemPaastaConfig
MESOS_TASK_SPACER = "."
VolumeInfo = TypedDict(
"VolumeInfo", {"container_path": str, "host_path": str, "mode": str}
)
_Docker_PortMapping = TypedDict(
"_Docker_PortMapping", {"host_port": int, "container_port": int, "protocol": str}
)
DockerInfo = TypedDict(
"DockerInfo",
{
"image": str,
"network": str,
"port_mappings": List[_Docker_PortMapping],
"parameters": List[DockerParameter],
},
)
ContainerInfo = TypedDict(
"ContainerInfo", {"type": str, "docker": DockerInfo, "volumes": List[VolumeInfo]}
)
_CommandInfo_URI = TypedDict(
"_CommandInfo_URI",
{
"value": str,
"executable": bool,
"extract": bool,
"cache": bool,
"output_file": str,
},
total=False,
)
CommandInfo = TypedDict(
"CommandInfo",
{
"uris": List[_CommandInfo_URI],
"environment": Any,
"shell": bool,
"value": str,
"arguments": List[str],
"user": str,
},
total=False,
)
Value_Scalar = TypedDict("Value_Scalar", {"value": float})
Value_Range = TypedDict("Value_Range", {"begin": int, "end": int})
Value_Ranges = TypedDict("Value_Ranges", {"range": List[Value_Range]})
Resource = TypedDict(
"Resource",
{"name": str, "type": str, "scalar": Value_Scalar, "ranges": Value_Ranges},
total=False,
)
TaskID = TypedDict("TaskID", {"value": str})
SlaveID = TypedDict("SlaveID", {"value": str})
class TaskInfoBase(TypedDict):
name: str
task_id: TaskID
agent_id: SlaveID
resources: List[Resource]
class TaskInfo(TaskInfoBase):
container: ContainerInfo
command: CommandInfo
class NativeServiceConfigDict(LongRunningServiceConfigDict):
pass
class NativeServiceConfig(LongRunningServiceConfig):
config_dict: NativeServiceConfigDict
config_filename_prefix = "paasta_native"
def __init__(
self,
service: str,
instance: str,
cluster: str,
config_dict: NativeServiceConfigDict,
branch_dict: Optional[BranchDictV2],
soa_dir: str,
service_namespace_config: Optional[ServiceNamespaceConfig] = None,
) -> None:
super().__init__(
cluster=cluster,
instance=instance,
service=service,
config_dict=config_dict,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
# service_namespace_config may be omitted/set to None at first, then set
# after initializing. e.g. we do this in load_paasta_native_job_config
# so we can call get_nerve_namespace() to figure out what SNC to read.
# It may also be set to None if this service is not in nerve.
if service_namespace_config is not None:
self.service_namespace_config = service_namespace_config
else:
self.service_namespace_config = ServiceNamespaceConfig()
def task_name(self, base_task: TaskInfo) -> str:
code_sha = get_code_sha_from_dockerurl(
base_task["container"]["docker"]["image"]
)
filled_in_task = copy.deepcopy(base_task)
filled_in_task["name"] = ""
filled_in_task["task_id"] = {"value": ""}
filled_in_task["agent_id"] = {"value": ""}
config_hash = get_config_hash(
filled_in_task, force_bounce=self.get_force_bounce()
)
return compose_job_id(
self.service,
self.instance,
git_hash=code_sha,
config_hash=config_hash,
spacer=MESOS_TASK_SPACER,
)
def base_task(
self, system_paasta_config: SystemPaastaConfig, portMappings=True
) -> TaskInfo:
"""Return a TaskInfo Dict with all the fields corresponding to the
configuration filled in.
Does not include task.agent_id or a task.id; those need to be
computed separately.
"""
docker_volumes = self.get_volumes(
system_volumes=system_paasta_config.get_volumes()
)
task: TaskInfo = {
"name": "",
"task_id": {"value": ""},
"agent_id": {"value": ""},
"container": {
"type": "DOCKER",
"docker": {
"image": self.get_docker_url(),
"parameters": [
{"key": param["key"], "value": param["value"]}
for param in self.format_docker_parameters()
],
"network": self.get_mesos_network_mode(),
"port_mappings": [],
},
"volumes": [
{
"container_path": volume["containerPath"],
"host_path": volume["hostPath"],
"mode": volume["mode"].upper(),
}
for volume in docker_volumes
],
},
"command": {
"value": str(self.get_cmd()),
"uris": [
{
"value": system_paasta_config.get_dockercfg_location(),
"extract": False,
}
],
},
"resources": [
{
"name": "cpus",
"type": "SCALAR",
"scalar": {"value": self.get_cpus()},
},
{"name": "mem", "type": "SCALAR", "scalar": {"value": self.get_mem()}},
],
}
if portMappings:
task["container"]["docker"]["port_mappings"] = [
{
"container_port": self.get_container_port(),
# filled by tasks_and_state_for_offer()
"host_port": 0,
"protocol": "tcp",
}
]
task["resources"].append(
{
"name": "ports",
"type": "RANGES",
"ranges": {
# filled by tasks_and_state_for_offer
"range": [{"begin": 0, "end": 0}]
},
}
)
task["name"] = self.task_name(task)
return task
def get_mesos_network_mode(self) -> str:
return self.get_net().upper()
def load_paasta_native_job_config(
service: str,
instance: str,
cluster: str,
load_deployments: bool = True,
soa_dir: str = DEFAULT_SOA_DIR,
instance_type: str = "paasta_native",
config_overrides: Optional[NativeServiceConfigDict] = None,
) -> NativeServiceConfig:
instance_config_dict = cast(
NativeServiceConfigDict,
load_service_instance_config(
service=service,
instance=instance,
instance_type=instance_type,
cluster=cluster,
soa_dir=soa_dir,
),
)
branch_dict: Optional[BranchDictV2] = None
instance_config_dict.update(config_overrides or {})
if load_deployments:
deployments_json = load_v2_deployments_json(service, soa_dir=soa_dir)
temp_instance_config = NativeServiceConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=instance_config_dict,
branch_dict=None,
soa_dir=soa_dir,
)
branch = temp_instance_config.get_branch()
deploy_group = temp_instance_config.get_deploy_group()
branch_dict = deployments_json.get_branch_dict(service, branch, deploy_group)
service_config = NativeServiceConfig(
service=service,
cluster=cluster,
instance=instance,
config_dict=instance_config_dict,
branch_dict=branch_dict,
soa_dir=soa_dir,
)
service_namespace_config = load_service_namespace_config(
service=service, namespace=service_config.get_nerve_namespace(), soa_dir=soa_dir
)
service_config.service_namespace_config = service_namespace_config
return service_config
class UnknownNativeServiceError(Exception):
pass
|
import asyncio
from datetime import timedelta
from functools import partial
from pyiqvia import Client
from pyiqvia.errors import IQVIAError
from homeassistant.const import ATTR_ATTRIBUTION
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
UpdateFailed,
)
from .const import (
CONF_ZIP_CODE,
DATA_COORDINATOR,
DOMAIN,
LOGGER,
TYPE_ALLERGY_FORECAST,
TYPE_ALLERGY_INDEX,
TYPE_ALLERGY_OUTLOOK,
TYPE_ASTHMA_FORECAST,
TYPE_ASTHMA_INDEX,
TYPE_DISEASE_FORECAST,
TYPE_DISEASE_INDEX,
)
DEFAULT_ATTRIBUTION = "Data provided by IQVIA™"
DEFAULT_SCAN_INTERVAL = timedelta(minutes=30)
PLATFORMS = ["sensor"]
async def async_setup(hass, config):
"""Set up the IQVIA component."""
hass.data[DOMAIN] = {DATA_COORDINATOR: {}}
return True
async def async_setup_entry(hass, entry):
"""Set up IQVIA as config entry."""
hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id] = {}
if not entry.unique_id:
# If the config entry doesn't already have a unique ID, set one:
hass.config_entries.async_update_entry(
entry, **{"unique_id": entry.data[CONF_ZIP_CODE]}
)
websession = aiohttp_client.async_get_clientsession(hass)
client = Client(entry.data[CONF_ZIP_CODE], websession)
async def async_get_data_from_api(api_coro):
"""Get data from a particular API coroutine."""
try:
return await api_coro()
except IQVIAError as err:
raise UpdateFailed from err
init_data_update_tasks = []
for sensor_type, api_coro in [
(TYPE_ALLERGY_FORECAST, client.allergens.extended),
(TYPE_ALLERGY_INDEX, client.allergens.current),
(TYPE_ALLERGY_OUTLOOK, client.allergens.outlook),
(TYPE_ASTHMA_FORECAST, client.asthma.extended),
(TYPE_ASTHMA_INDEX, client.asthma.current),
(TYPE_DISEASE_FORECAST, client.disease.extended),
(TYPE_DISEASE_INDEX, client.disease.current),
]:
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id][
sensor_type
] = DataUpdateCoordinator(
hass,
LOGGER,
name=f"{entry.data[CONF_ZIP_CODE]} {sensor_type}",
update_interval=DEFAULT_SCAN_INTERVAL,
update_method=partial(async_get_data_from_api, api_coro),
)
init_data_update_tasks.append(coordinator.async_refresh())
await asyncio.gather(*init_data_update_tasks)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def async_unload_entry(hass, entry):
"""Unload an OpenUV config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN][DATA_COORDINATOR].pop(entry.entry_id)
return unload_ok
class IQVIAEntity(CoordinatorEntity):
"""Define a base IQVIA entity."""
def __init__(self, coordinator, entry, sensor_type, name, icon):
"""Initialize."""
super().__init__(coordinator)
self._attrs = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
self._entry = entry
self._icon = icon
self._name = name
self._state = None
self._type = sensor_type
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return self._attrs
@property
def icon(self):
"""Return the icon."""
return self._icon
@property
def name(self):
"""Return the name."""
return self._name
@property
def state(self):
"""Return the state."""
return self._state
@property
def unique_id(self):
"""Return a unique, Home Assistant friendly identifier for this entity."""
return f"{self._entry.data[CONF_ZIP_CODE]}_{self._type}"
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return "index"
@callback
def _handle_coordinator_update(self) -> None:
"""Handle updated data from the coordinator."""
self.update_from_latest_data()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
await super().async_added_to_hass()
if self._type == TYPE_ALLERGY_FORECAST:
self.async_on_remove(
self.hass.data[DOMAIN][DATA_COORDINATOR][self._entry.entry_id][
TYPE_ALLERGY_OUTLOOK
].async_add_listener(self._handle_coordinator_update)
)
self.update_from_latest_data()
@callback
def update_from_latest_data(self):
"""Update the entity from the latest data."""
raise NotImplementedError
|
from unittest import TestCase
from scattertext.emojis.EmojiExtractor import extract_emoji
class TestExtract_emoji(TestCase):
def test_extract_emoji(self):
text_ords = [128589, 127998, 97, 102, 100, 115, 128077, 128077, 127998, 127873, 128175]
text = ''.join([chr(c) for c in text_ords])
result = [[ord(c) for c in pic] for pic in extract_emoji(text)]
self.assertEqual(result,
[[128589, 127998], [128077], [128077, 127998], [127873], [128175]])
def test_extract_emoji_ensure_no_numbers(self):
text_ords = [50, 49, 51, 52, 50, 51, 128587, 127995, 128587, 127995, 97, 32, 97, 106, 97, 107, 115, 100, 108, 32,
102, 97, 115, 108, 107, 51, 32, 107, 32, 51, 32, 35, 32, 94, 32, 64, 32, 33, 32, 32, 35, 32, 42, 32,
60, 32, 62, 32, 63, 32, 32, 34, 32, 46, 32, 44, 32, 32, 41, 32, 40, 32, 36]
text = ''.join([chr(c) for c in text_ords])
result = [[ord(c) for c in pic] for pic in extract_emoji(text)]
self.assertEqual(result, [[128587, 127995], [128587, 127995]])
|
from marshmallow import fields
from lemur.common.schema import LemurInputSchema, LemurOutputSchema
from lemur.schemas import AssociatedCertificateSchema
# from lemur.certificates.schemas import CertificateNestedOutputSchema
class DomainInputSchema(LemurInputSchema):
id = fields.Integer()
name = fields.String(required=True)
sensitive = fields.Boolean(missing=False)
certificates = fields.Nested(AssociatedCertificateSchema, many=True, missing=[])
class DomainOutputSchema(LemurOutputSchema):
id = fields.Integer()
name = fields.String()
sensitive = fields.Boolean()
# certificates = fields.Nested(CertificateNestedOutputSchema, many=True, missing=[])
class DomainNestedOutputSchema(DomainOutputSchema):
__envelope__ = False
domain_input_schema = DomainInputSchema()
domain_output_schema = DomainOutputSchema()
domains_output_schema = DomainOutputSchema(many=True)
|
from __future__ import unicode_literals
import os
from lib.data.data import pyoptions
from lib.fun.filter import filterforfun
from lib.fun.fun import walk_pure_file, cool, finishprinter, finalsavepath, fun_name
def comparer_magic(*args):
"""[minuend_file] [subtrahend_file]"""
args = list(args[0])
if len(args) != 3:
exit(pyoptions.CRLF + cool.fuchsia("[!] Usage: {} {}".format(args[0], pyoptions.tools_info.get(args[0]))))
storepath = finalsavepath(fun_name())
minuend_file = args[1]
subtrahend_file = args[2]
if not os.path.isfile(os.path.abspath(pyoptions.args_tool[1])):
exit(pyoptions.CRLF + cool.red("[-] file: {} don't exists".format(minuend_file)))
if not os.path.isfile(os.path.abspath(pyoptions.args_tool[2])):
exit(pyoptions.CRLF + cool.red("[-] file: {} don't exists".format(subtrahend_file)))
minuend_list = walk_pure_file(minuend_file)
subtrahend_list = walk_pure_file(subtrahend_file)
with open(storepath, "a") as f:
for item in minuend_list:
if item not in subtrahend_list:
item = filterforfun(item)
if item:
f.write(item + pyoptions.CRLF)
finishprinter(storepath)
|
from smart_open import open
def read_lines(url, limit):
lines = []
with open(url, 'r', errors='ignore') as fin:
for i, l in enumerate(fin):
if i == limit:
break
lines.append(l)
return lines
def test(benchmark):
#
# This file is around 850MB.
#
url = (
's3://commoncrawl/crawl-data/CC-MAIN-2019-51/segments/1575541319511.97'
'/warc/CC-MAIN-20191216093448-20191216121448-00559.warc.gz'
)
limit = 1000000
lines = benchmark(read_lines, url, limit)
assert len(lines) == limit
|
import os
import sys
try:
import docker
except ImportError:
docker = None
sys.path.append(os.path.join(os.path.dirname(os.path.dirname(__file__)),
'memory_cgroup'))
from memory_cgroup import MemoryCgroupCollector
class MemoryDockerCollector(MemoryCgroupCollector):
def collect(self):
if docker is None:
self.log.error('Unable to import docker')
return
self.containers = dict(
(c['Id'], c['Names'][0][1:])
for c in docker.Client().containers(all=True)
if c['Names'] is not None)
return super(MemoryDockerCollector, self).collect()
def publish(self, metric_name, value, metric_type):
for container_id, container_name in self.containers.items():
metric_name = metric_name.replace(
'docker.' + container_id + '.',
'docker.' + container_name + '.')
return super(MemoryDockerCollector, self).publish(
metric_name, value, metric_type)
|
import os
import socket
import warnings
import librabbitmq as amqp
from librabbitmq import ChannelError, ConnectionError
from kombu.utils.amq_manager import get_manager
from kombu.utils.text import version_string_as_tuple
from . import base
from .base import to_rabbitmq_queue_arguments
W_VERSION = """
librabbitmq version too old to detect RabbitMQ version information
so make sure you are using librabbitmq 1.5 when using rabbitmq > 3.3
"""
DEFAULT_PORT = 5672
DEFAULT_SSL_PORT = 5671
NO_SSL_ERROR = """\
ssl not supported by librabbitmq, please use pyamqp:// or stunnel\
"""
class Message(base.Message):
"""AMQP Message (librabbitmq)."""
def __init__(self, channel, props, info, body):
super().__init__(
channel=channel,
body=body,
delivery_info=info,
properties=props,
delivery_tag=info.get('delivery_tag'),
content_type=props.get('content_type'),
content_encoding=props.get('content_encoding'),
headers=props.get('headers'))
class Channel(amqp.Channel, base.StdChannel):
"""AMQP Channel (librabbitmq)."""
Message = Message
def prepare_message(self, body, priority=None,
content_type=None, content_encoding=None,
headers=None, properties=None):
"""Encapsulate data into a AMQP message."""
properties = properties if properties is not None else {}
properties.update({'content_type': content_type,
'content_encoding': content_encoding,
'headers': headers})
# Don't include priority if it's not an integer.
# If that's the case librabbitmq will fail
# and raise an exception.
if priority is not None:
properties['priority'] = priority
return body, properties
def prepare_queue_arguments(self, arguments, **kwargs):
arguments = to_rabbitmq_queue_arguments(arguments, **kwargs)
return {k.encode('utf8'): v for k, v in arguments.items()}
class Connection(amqp.Connection):
"""AMQP Connection (librabbitmq)."""
Channel = Channel
Message = Message
class Transport(base.Transport):
"""AMQP Transport (librabbitmq)."""
Connection = Connection
default_port = DEFAULT_PORT
default_ssl_port = DEFAULT_SSL_PORT
connection_errors = (
base.Transport.connection_errors + (
ConnectionError, socket.error, IOError, OSError)
)
channel_errors = (
base.Transport.channel_errors + (ChannelError,)
)
driver_type = 'amqp'
driver_name = 'librabbitmq'
implements = base.Transport.implements.extend(
asynchronous=True,
heartbeats=False,
)
def __init__(self, client, **kwargs):
self.client = client
self.default_port = kwargs.get('default_port') or self.default_port
self.default_ssl_port = (kwargs.get('default_ssl_port') or
self.default_ssl_port)
self.__reader = None
def driver_version(self):
return amqp.__version__
def create_channel(self, connection):
return connection.channel()
def drain_events(self, connection, **kwargs):
return connection.drain_events(**kwargs)
def establish_connection(self):
"""Establish connection to the AMQP broker."""
conninfo = self.client
for name, default_value in self.default_connection_params.items():
if not getattr(conninfo, name, None):
setattr(conninfo, name, default_value)
if conninfo.ssl:
raise NotImplementedError(NO_SSL_ERROR)
opts = dict({
'host': conninfo.host,
'userid': conninfo.userid,
'password': conninfo.password,
'virtual_host': conninfo.virtual_host,
'login_method': conninfo.login_method,
'insist': conninfo.insist,
'ssl': conninfo.ssl,
'connect_timeout': conninfo.connect_timeout,
}, **conninfo.transport_options or {})
conn = self.Connection(**opts)
conn.client = self.client
self.client.drain_events = conn.drain_events
return conn
def close_connection(self, connection):
"""Close the AMQP broker connection."""
self.client.drain_events = None
connection.close()
def _collect(self, connection):
if connection is not None:
for channel in connection.channels.values():
channel.connection = None
try:
os.close(connection.fileno())
except (OSError, ValueError):
pass
connection.channels.clear()
connection.callbacks.clear()
self.client.drain_events = None
self.client = None
def verify_connection(self, connection):
return connection.connected
def register_with_event_loop(self, connection, loop):
loop.add_reader(
connection.fileno(), self.on_readable, connection, loop,
)
def get_manager(self, *args, **kwargs):
return get_manager(self.client, *args, **kwargs)
def qos_semantics_matches_spec(self, connection):
try:
props = connection.server_properties
except AttributeError:
warnings.warn(UserWarning(W_VERSION))
else:
if props.get('product') == 'RabbitMQ':
return version_string_as_tuple(props['version']) < (3, 3)
return True
@property
def default_connection_params(self):
return {
'userid': 'guest',
'password': 'guest',
'port': (self.default_ssl_port if self.client.ssl
else self.default_port),
'hostname': 'localhost',
'login_method': 'AMQPLAIN',
}
|
from gi.repository import Gio, GLib, GObject, Gtk, GtkSource
from meld.conf import _
from meld.filters import FilterEntry
from meld.settings import settings
from meld.ui.listwidget import EditableListWidget
@Gtk.Template(resource_path='/org/gnome/meld/ui/filter-list.ui')
class FilterList(Gtk.Box, EditableListWidget):
__gtype_name__ = "FilterList"
treeview = Gtk.Template.Child()
remove = Gtk.Template.Child()
move_up = Gtk.Template.Child()
move_down = Gtk.Template.Child()
pattern_column = Gtk.Template.Child()
validity_renderer = Gtk.Template.Child()
default_entry = [_("label"), False, _("pattern"), True]
filter_type = GObject.Property(
type=int,
flags=(
GObject.ParamFlags.READABLE |
GObject.ParamFlags.WRITABLE |
GObject.ParamFlags.CONSTRUCT_ONLY
),
)
settings_key = GObject.Property(
type=str,
flags=(
GObject.ParamFlags.READABLE |
GObject.ParamFlags.WRITABLE |
GObject.ParamFlags.CONSTRUCT_ONLY
),
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = self.treeview.get_model()
self.pattern_column.set_cell_data_func(
self.validity_renderer, self.valid_icon_celldata)
for filter_params in settings.get_value(self.settings_key):
filt = FilterEntry.new_from_gsetting(
filter_params, self.filter_type)
if filt is None:
continue
valid = filt.filter is not None
self.model.append(
[filt.label, filt.active, filt.filter_string, valid])
for signal in ('row-changed', 'row-deleted', 'row-inserted',
'rows-reordered'):
self.model.connect(signal, self._update_filter_string)
self.setup_sensitivity_handling()
def valid_icon_celldata(self, col, cell, model, it, user_data=None):
is_valid = model.get_value(it, 3)
icon_name = "gtk-dialog-warning" if not is_valid else None
cell.set_property("stock-id", icon_name)
@Gtk.Template.Callback()
def on_add_clicked(self, button):
self.add_entry()
@Gtk.Template.Callback()
def on_remove_clicked(self, button):
self.remove_selected_entry()
@Gtk.Template.Callback()
def on_move_up_clicked(self, button):
self.move_up_selected_entry()
@Gtk.Template.Callback()
def on_move_down_clicked(self, button):
self.move_down_selected_entry()
@Gtk.Template.Callback()
def on_name_edited(self, ren, path, text):
self.model[path][0] = text
@Gtk.Template.Callback()
def on_cellrenderertoggle_toggled(self, ren, path):
self.model[path][1] = not ren.get_active()
@Gtk.Template.Callback()
def on_pattern_edited(self, ren, path, text):
valid = FilterEntry.check_filter(text, self.filter_type)
self.model[path][2] = text
self.model[path][3] = valid
def _update_filter_string(self, *args):
value = [(row[0], row[1], row[2]) for row in self.model]
settings.set_value(self.settings_key, GLib.Variant('a(sbs)', value))
@Gtk.Template(resource_path='/org/gnome/meld/ui/column-list.ui')
class ColumnList(Gtk.VBox, EditableListWidget):
__gtype_name__ = "ColumnList"
treeview = Gtk.Template.Child()
remove = Gtk.Template.Child()
move_up = Gtk.Template.Child()
move_down = Gtk.Template.Child()
default_entry = [_("label"), False, _("pattern"), True]
available_columns = {
"size": _("Size"),
"modification time": _("Modification time"),
"permissions": _("Permissions"),
}
settings_key = GObject.Property(
type=str,
flags=(
GObject.ParamFlags.READABLE |
GObject.ParamFlags.WRITABLE |
GObject.ParamFlags.CONSTRUCT_ONLY
),
)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = self.treeview.get_model()
# Unwrap the variant
prefs_columns = [
(k, v) for k, v in settings.get_value(self.settings_key)
]
column_vis = {}
column_order = {}
for sort_key, (column_name, visibility) in enumerate(prefs_columns):
column_vis[column_name] = bool(int(visibility))
column_order[column_name] = sort_key
columns = [
(column_vis.get(name, True), name, label)
for name, label in self.available_columns.items()
]
columns = sorted(columns, key=lambda c: column_order.get(c[1], 0))
for visibility, name, label in columns:
self.model.append([visibility, name, label])
for signal in ('row-changed', 'row-deleted', 'row-inserted',
'rows-reordered'):
self.model.connect(signal, self._update_columns)
self.setup_sensitivity_handling()
@Gtk.Template.Callback()
def on_move_up_clicked(self, button):
self.move_up_selected_entry()
@Gtk.Template.Callback()
def on_move_down_clicked(self, button):
self.move_down_selected_entry()
@Gtk.Template.Callback()
def on_cellrenderertoggle_toggled(self, ren, path):
self.model[path][0] = not ren.get_active()
def _update_columns(self, *args):
value = [(c[1].lower(), c[0]) for c in self.model]
settings.set_value(self.settings_key, GLib.Variant('a(sb)', value))
class GSettingsComboBox(Gtk.ComboBox):
def __init__(self):
super().__init__()
self.connect('notify::gsettings-value', self._setting_changed)
self.connect('notify::active', self._active_changed)
def bind_to(self, key):
settings.bind(
key, self, 'gsettings-value', Gio.SettingsBindFlags.DEFAULT)
def _setting_changed(self, obj, val):
column = self.get_property('gsettings-column')
value = self.get_property('gsettings-value')
for row in self.get_model():
if value == row[column]:
idx = row.path[0]
break
else:
idx = 0
if self.get_property('active') != idx:
self.set_property('active', idx)
def _active_changed(self, obj, val):
active_iter = self.get_active_iter()
if active_iter is None:
return
column = self.get_property('gsettings-column')
value = self.get_model()[active_iter][column]
self.set_property('gsettings-value', value)
class GSettingsIntComboBox(GSettingsComboBox):
__gtype_name__ = "GSettingsIntComboBox"
gsettings_column = GObject.Property(type=int, default=0)
gsettings_value = GObject.Property(type=int)
class GSettingsBoolComboBox(GSettingsComboBox):
__gtype_name__ = "GSettingsBoolComboBox"
gsettings_column = GObject.Property(type=int, default=0)
gsettings_value = GObject.Property(type=bool, default=False)
class GSettingsStringComboBox(GSettingsComboBox):
__gtype_name__ = "GSettingsStringComboBox"
gsettings_column = GObject.Property(type=int, default=0)
gsettings_value = GObject.Property(type=str, default="")
@Gtk.Template(resource_path='/org/gnome/meld/ui/preferences.ui')
class PreferencesDialog(Gtk.Dialog):
__gtype_name__ = "PreferencesDialog"
checkbutton_break_commit_lines = Gtk.Template.Child()
checkbutton_default_font = Gtk.Template.Child()
checkbutton_folder_filter_text = Gtk.Template.Child()
checkbutton_highlight_current_line = Gtk.Template.Child()
checkbutton_ignore_blank_lines = Gtk.Template.Child()
checkbutton_ignore_symlinks = Gtk.Template.Child()
checkbutton_shallow_compare = Gtk.Template.Child()
checkbutton_show_commit_margin = Gtk.Template.Child()
checkbutton_show_line_numbers = Gtk.Template.Child()
checkbutton_show_overview_map = Gtk.Template.Child()
checkbutton_show_whitespace = Gtk.Template.Child()
checkbutton_spaces_instead_of_tabs = Gtk.Template.Child()
checkbutton_use_syntax_highlighting = Gtk.Template.Child()
checkbutton_wrap_text = Gtk.Template.Child()
checkbutton_wrap_word = Gtk.Template.Child()
column_list_vbox = Gtk.Template.Child()
combo_file_order = Gtk.Template.Child()
combo_merge_order = Gtk.Template.Child()
combo_overview_map = Gtk.Template.Child()
combo_timestamp = Gtk.Template.Child()
combobox_style_scheme = Gtk.Template.Child()
custom_edit_command_entry = Gtk.Template.Child()
file_filters_vbox = Gtk.Template.Child()
fontpicker = Gtk.Template.Child()
spinbutton_commit_margin = Gtk.Template.Child()
spinbutton_tabsize = Gtk.Template.Child()
syntaxschemestore = Gtk.Template.Child()
system_editor_checkbutton = Gtk.Template.Child()
text_filters_vbox = Gtk.Template.Child()
def __init__(self, **kwargs):
super().__init__(**kwargs)
bindings = [
('use-system-font', self.checkbutton_default_font, 'active'),
('custom-font', self.fontpicker, 'font'),
('indent-width', self.spinbutton_tabsize, 'value'),
('insert-spaces-instead-of-tabs', self.checkbutton_spaces_instead_of_tabs, 'active'), # noqa: E501
('highlight-current-line', self.checkbutton_highlight_current_line, 'active'), # noqa: E501
('show-line-numbers', self.checkbutton_show_line_numbers, 'active'), # noqa: E501
('highlight-syntax', self.checkbutton_use_syntax_highlighting, 'active'), # noqa: E501
('enable-space-drawer', self.checkbutton_show_whitespace, 'active'), # noqa: E501
('use-system-editor', self.system_editor_checkbutton, 'active'),
('custom-editor-command', self.custom_edit_command_entry, 'text'),
('folder-shallow-comparison', self.checkbutton_shallow_compare, 'active'), # noqa: E501
('folder-filter-text', self.checkbutton_folder_filter_text, 'active'), # noqa: E501
('folder-ignore-symlinks', self.checkbutton_ignore_symlinks, 'active'), # noqa: E501
('vc-show-commit-margin', self.checkbutton_show_commit_margin, 'active'), # noqa: E501
('show-overview-map', self.checkbutton_show_overview_map, 'active'), # noqa: E501
('vc-commit-margin', self.spinbutton_commit_margin, 'value'),
('vc-break-commit-message', self.checkbutton_break_commit_lines, 'active'), # noqa: E501
('ignore-blank-lines', self.checkbutton_ignore_blank_lines, 'active'), # noqa: E501
# Sensitivity bindings must come after value bindings, or the key
# writability in gsettings overrides manual sensitivity setting.
('vc-show-commit-margin', self.spinbutton_commit_margin, 'sensitive'), # noqa: E501
('vc-show-commit-margin', self.checkbutton_break_commit_lines, 'sensitive'), # noqa: E501
]
for key, obj, attribute in bindings:
settings.bind(key, obj, attribute, Gio.SettingsBindFlags.DEFAULT)
invert_bindings = [
('use-system-editor', self.custom_edit_command_entry, 'sensitive'),
('use-system-font', self.fontpicker, 'sensitive'),
('folder-shallow-comparison', self.checkbutton_folder_filter_text, 'sensitive'), # noqa: E501
]
for key, obj, attribute in invert_bindings:
settings.bind(
key, obj, attribute, Gio.SettingsBindFlags.DEFAULT |
Gio.SettingsBindFlags.INVERT_BOOLEAN)
self.checkbutton_wrap_text.bind_property(
'active', self.checkbutton_wrap_word, 'sensitive',
GObject.BindingFlags.DEFAULT)
wrap_mode = settings.get_enum('wrap-mode')
self.checkbutton_wrap_text.set_active(wrap_mode != Gtk.WrapMode.NONE)
self.checkbutton_wrap_word.set_active(wrap_mode == Gtk.WrapMode.WORD)
filefilter = FilterList(
filter_type=FilterEntry.SHELL,
settings_key="filename-filters",
)
self.file_filters_vbox.pack_start(filefilter, True, True, 0)
textfilter = FilterList(
filter_type=FilterEntry.REGEX,
settings_key="text-filters",
)
self.text_filters_vbox.pack_start(textfilter, True, True, 0)
columnlist = ColumnList(settings_key="folder-columns")
self.column_list_vbox.pack_start(columnlist, True, True, 0)
self.combo_timestamp.bind_to('folder-time-resolution')
self.combo_file_order.bind_to('vc-left-is-local')
self.combo_overview_map.bind_to('overview-map-style')
self.combo_merge_order.bind_to('vc-merge-file-order')
# Fill color schemes
manager = GtkSource.StyleSchemeManager.get_default()
for scheme_id in manager.get_scheme_ids():
scheme = manager.get_scheme(scheme_id)
self.syntaxschemestore.append([scheme_id, scheme.get_name()])
self.combobox_style_scheme.bind_to('style-scheme')
self.show()
@Gtk.Template.Callback()
def on_checkbutton_wrap_text_toggled(self, button):
if not self.checkbutton_wrap_text.get_active():
wrap_mode = Gtk.WrapMode.NONE
elif self.checkbutton_wrap_word.get_active():
wrap_mode = Gtk.WrapMode.WORD
else:
wrap_mode = Gtk.WrapMode.CHAR
settings.set_enum('wrap-mode', wrap_mode)
@Gtk.Template.Callback()
def on_response(self, dialog, response_id):
self.destroy()
|
import unittest
import os
import tensorflow_gcs_config
from unittest.mock import patch
from test.support import EnvironmentVarGuard
from kaggle_secrets import UserSecretsClient
class TestTensorflowCredentials(unittest.TestCase):
@patch('tensorflow_gcs_config.configure_gcs')
def test_set_tensorflow_credential(self, mock_configure_gcs):
credential = '{"client_id":"fake_client_id",' \
'"client_secret":"fake_client_secret",' \
'"refresh_token":"not a refresh token",' \
'"type":"authorized_user"}';
env = EnvironmentVarGuard()
env.set('HOME', '/tmp')
env.set('GOOGLE_APPLICATION_CREDENTIALS', '')
# These need to be set to make UserSecretsClient happy, but aren't
# pertinent to this test.
env.set('KAGGLE_USER_SECRETS_TOKEN', 'foobar')
env.set('KAGGLE_KERNEL_INTEGRATIONS', 'AUTOML')
user_secrets = UserSecretsClient()
user_secrets.set_tensorflow_credential(credential)
credential_path = '/tmp/gcloud_credential.json'
self.assertEqual(
credential_path, os.environ['GOOGLE_APPLICATION_CREDENTIALS'])
with open(credential_path, 'r') as f:
saved_cred = f.read()
self.assertEqual(credential, saved_cred)
mock_configure_gcs.assert_called_with(credentials=credential)
|
from rflink.parser import PACKET_FIELDS, UNITS
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
CONF_NAME,
CONF_UNIT_OF_MEASUREMENT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import (
CONF_ALIASES,
CONF_AUTOMATIC_ADD,
CONF_DEVICES,
DATA_DEVICE_REGISTER,
DATA_ENTITY_LOOKUP,
EVENT_KEY_ID,
EVENT_KEY_SENSOR,
EVENT_KEY_UNIT,
SIGNAL_AVAILABILITY,
SIGNAL_HANDLE_EVENT,
TMP_ENTITY,
RflinkDevice,
)
SENSOR_ICONS = {
"humidity": "mdi:water-percent",
"battery": "mdi:battery",
"temperature": "mdi:thermometer",
}
CONF_SENSOR_TYPE = "sensor_type"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_AUTOMATIC_ADD, default=True): cv.boolean,
vol.Optional(CONF_DEVICES, default={}): {
cv.string: vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_SENSOR_TYPE): cv.string,
vol.Optional(CONF_UNIT_OF_MEASUREMENT): cv.string,
vol.Optional(CONF_ALIASES, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
}
)
},
},
extra=vol.ALLOW_EXTRA,
)
def lookup_unit_for_sensor_type(sensor_type):
"""Get unit for sensor type.
Async friendly.
"""
field_abbrev = {v: k for k, v in PACKET_FIELDS.items()}
return UNITS.get(field_abbrev.get(sensor_type))
def devices_from_config(domain_config):
"""Parse configuration and add Rflink sensor devices."""
devices = []
for device_id, config in domain_config[CONF_DEVICES].items():
if ATTR_UNIT_OF_MEASUREMENT not in config:
config[ATTR_UNIT_OF_MEASUREMENT] = lookup_unit_for_sensor_type(
config[CONF_SENSOR_TYPE]
)
device = RflinkSensor(device_id, **config)
devices.append(device)
return devices
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Rflink platform."""
async_add_entities(devices_from_config(config))
async def add_new_device(event):
"""Check if device is known, otherwise create device entity."""
device_id = event[EVENT_KEY_ID]
device = RflinkSensor(
device_id,
event[EVENT_KEY_SENSOR],
event[EVENT_KEY_UNIT],
initial_event=event,
)
# Add device entity
async_add_entities([device])
if config[CONF_AUTOMATIC_ADD]:
hass.data[DATA_DEVICE_REGISTER][EVENT_KEY_SENSOR] = add_new_device
class RflinkSensor(RflinkDevice):
"""Representation of a Rflink sensor."""
def __init__(
self, device_id, sensor_type, unit_of_measurement, initial_event=None, **kwargs
):
"""Handle sensor specific args and super init."""
self._sensor_type = sensor_type
self._unit_of_measurement = unit_of_measurement
super().__init__(device_id, initial_event=initial_event, **kwargs)
def _handle_event(self, event):
"""Domain specific event handler."""
self._state = event["value"]
async def async_added_to_hass(self):
"""Register update callback."""
# Remove temporary bogus entity_id if added
tmp_entity = TMP_ENTITY.format(self._device_id)
if (
tmp_entity
in self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][self._device_id]
):
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][
self._device_id
].remove(tmp_entity)
# Register id and aliases
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][self._device_id].append(
self.entity_id
)
if self._aliases:
for _id in self._aliases:
self.hass.data[DATA_ENTITY_LOOKUP][EVENT_KEY_SENSOR][_id].append(
self.entity_id
)
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_AVAILABILITY, self._availability_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_HANDLE_EVENT.format(self.entity_id),
self.handle_event_callback,
)
)
# Process the initial event now that the entity is created
if self._initial_event:
self.handle_event_callback(self._initial_event)
@property
def unit_of_measurement(self):
"""Return measurement unit."""
return self._unit_of_measurement
@property
def state(self):
"""Return value."""
return self._state
@property
def icon(self):
"""Return possible sensor specific icon."""
if self._sensor_type in SENSOR_ICONS:
return SENSOR_ICONS[self._sensor_type]
|
from datetime import datetime, timedelta
from pytest import mark
import homeassistant.components.sun as sun
from homeassistant.const import EVENT_STATE_CHANGED
import homeassistant.core as ha
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
async def test_setting_rising(hass, legacy_patchable_time):
"""Test retrieving sun setting and rising."""
utc_now = datetime(2016, 11, 1, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=utc_now):
await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
await hass.async_block_till_done()
state = hass.states.get(sun.ENTITY_ID)
from astral import Astral
astral = Astral()
utc_today = utc_now.date()
latitude = hass.config.latitude
longitude = hass.config.longitude
mod = -1
while True:
next_dawn = astral.dawn_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_dawn > utc_now:
break
mod += 1
mod = -1
while True:
next_dusk = astral.dusk_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_dusk > utc_now:
break
mod += 1
mod = -1
while True:
next_midnight = astral.solar_midnight_utc(
utc_today + timedelta(days=mod), longitude
)
if next_midnight > utc_now:
break
mod += 1
mod = -1
while True:
next_noon = astral.solar_noon_utc(utc_today + timedelta(days=mod), longitude)
if next_noon > utc_now:
break
mod += 1
mod = -1
while True:
next_rising = astral.sunrise_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_rising > utc_now:
break
mod += 1
mod = -1
while True:
next_setting = astral.sunset_utc(
utc_today + timedelta(days=mod), latitude, longitude
)
if next_setting > utc_now:
break
mod += 1
assert next_dawn == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_DAWN]
)
assert next_dusk == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_DUSK]
)
assert next_midnight == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_MIDNIGHT]
)
assert next_noon == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_NOON]
)
assert next_rising == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_RISING]
)
assert next_setting == dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_SETTING]
)
async def test_state_change(hass, legacy_patchable_time):
"""Test if the state changes at next setting/rising."""
now = datetime(2016, 6, 1, 8, 0, 0, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=now):
await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
await hass.async_block_till_done()
test_time = dt_util.parse_datetime(
hass.states.get(sun.ENTITY_ID).attributes[sun.STATE_ATTR_NEXT_RISING]
)
assert test_time is not None
assert sun.STATE_BELOW_HORIZON == hass.states.get(sun.ENTITY_ID).state
patched_time = test_time + timedelta(seconds=5)
with patch(
"homeassistant.helpers.condition.dt_util.utcnow", return_value=patched_time
):
hass.bus.async_fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: patched_time})
await hass.async_block_till_done()
assert sun.STATE_ABOVE_HORIZON == hass.states.get(sun.ENTITY_ID).state
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=now):
await hass.config.async_update(longitude=hass.config.longitude + 90)
await hass.async_block_till_done()
assert sun.STATE_ABOVE_HORIZON == hass.states.get(sun.ENTITY_ID).state
async def test_norway_in_june(hass):
"""Test location in Norway where the sun doesn't set in summer."""
hass.config.latitude = 69.6
hass.config.longitude = 18.8
june = datetime(2016, 6, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=june):
assert await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
state = hass.states.get(sun.ENTITY_ID)
assert state is not None
assert dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_RISING]
) == datetime(2016, 7, 25, 23, 23, 39, tzinfo=dt_util.UTC)
assert dt_util.parse_datetime(
state.attributes[sun.STATE_ATTR_NEXT_SETTING]
) == datetime(2016, 7, 26, 22, 19, 1, tzinfo=dt_util.UTC)
assert state.state == sun.STATE_ABOVE_HORIZON
@mark.skip
async def test_state_change_count(hass):
"""Count the number of state change events in a location."""
# Skipped because it's a bit slow. Has been validated with
# multiple lattitudes and dates
hass.config.latitude = 10
hass.config.longitude = 0
now = datetime(2016, 6, 1, tzinfo=dt_util.UTC)
with patch("homeassistant.helpers.condition.dt_util.utcnow", return_value=now):
assert await async_setup_component(
hass, sun.DOMAIN, {sun.DOMAIN: {sun.CONF_ELEVATION: 0}}
)
events = []
@ha.callback
def state_change_listener(event):
if event.data.get("entity_id") == "sun.sun":
events.append(event)
hass.bus.async_listen(EVENT_STATE_CHANGED, state_change_listener)
await hass.async_block_till_done()
for _ in range(24 * 60 * 60):
now += timedelta(seconds=1)
hass.bus.async_fire(ha.EVENT_TIME_CHANGED, {ha.ATTR_NOW: now})
await hass.async_block_till_done()
assert len(events) < 721
|
from datetime import timedelta
import logging
import dweepy
import voluptuous as vol
from homeassistant.const import (
ATTR_FRIENDLY_NAME,
CONF_NAME,
CONF_WHITELIST,
EVENT_STATE_CHANGED,
STATE_UNKNOWN,
)
from homeassistant.helpers import state as state_helper
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DOMAIN = "dweet"
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=1)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_WHITELIST, default=[]): vol.All(
cv.ensure_list, [cv.entity_id]
),
}
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Dweet.io component."""
conf = config[DOMAIN]
name = conf.get(CONF_NAME)
whitelist = conf.get(CONF_WHITELIST)
json_body = {}
def dweet_event_listener(event):
"""Listen for new messages on the bus and sends them to Dweet.io."""
state = event.data.get("new_state")
if (
state is None
or state.state in (STATE_UNKNOWN, "")
or state.entity_id not in whitelist
):
return
try:
_state = state_helper.state_as_number(state)
except ValueError:
_state = state.state
json_body[state.attributes.get(ATTR_FRIENDLY_NAME)] = _state
send_data(name, json_body)
hass.bus.listen(EVENT_STATE_CHANGED, dweet_event_listener)
return True
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def send_data(name, msg):
"""Send the collected data to Dweet.io."""
try:
dweepy.dweet_for(name, msg)
except dweepy.DweepyError:
_LOGGER.error("Error saving data to Dweet.io: %s", msg)
|
import posixpath
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import ycsb
BENCHMARK_NAME = 'jdbc_ycsb'
BENCHMARK_CONFIG = """
jdbc_ycsb:
description: >
Run YCSB against relational databases that support JDBC.
Configure the number of VMs via --num-vms.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: 1"""
YCSB_BINDING_LIB_DIR = posixpath.join(ycsb.YCSB_DIR, 'jdbc-binding', 'lib')
CREATE_TABLE_SQL = ("CREATE TABLE usertable "
"(YCSB_KEY VARCHAR(255) PRIMARY KEY, "
"FIELD0 TEXT, FIELD1 TEXT, "
"FIELD2 TEXT, FIELD3 TEXT, "
"FIELD4 TEXT, FIELD5 TEXT, "
"FIELD6 TEXT, FIELD7 TEXT, "
"FIELD8 TEXT, FIELD9 TEXT);")
DROP_TABLE_SQL = "DROP TABLE IF EXISTS usertable;"
FLAGS = flags.FLAGS
flags.DEFINE_string('jdbc_ycsb_db_driver',
None,
'The class of JDBC driver that connects to DB.')
flags.DEFINE_string('jdbc_ycsb_db_url',
None,
'The URL that is used to connect to DB')
flags.DEFINE_string('jdbc_ycsb_db_user',
None,
'The username of target DB.')
flags.DEFINE_string('jdbc_ycsb_db_passwd',
None,
'The password of specified DB user.')
flags.DEFINE_string('jdbc_ycsb_db_driver_path',
None,
'The path to JDBC driver jar file on local machine.')
flags.DEFINE_integer('jdbc_ycsb_db_batch_size',
0,
'The batch size for doing batched insert.')
flags.DEFINE_integer('jdbc_ycsb_fetch_size',
10,
'The JDBC fetch size hinted to driver')
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS['ycsb_client_vms'].present:
config['vm_groups']['default']['vm_count'] = FLAGS.ycsb_client_vms
return config
def CheckPrerequisites(benchmark_config):
# Before YCSB Cloud Datastore supports Application Default Credential,
# we should always make sure valid credential flags are set.
if not FLAGS.jdbc_ycsb_db_driver:
raise ValueError('"jdbc_ycsb_db_driver" must be set')
if not FLAGS.jdbc_ycsb_db_driver_path:
raise ValueError('"jdbc_ycsb_db_driver_path" must be set')
if not FLAGS.jdbc_ycsb_db_url:
raise ValueError('"jdbc_ycsb_db_url" must be set')
if not FLAGS.jdbc_ycsb_db_user:
raise ValueError('"jdbc_ycsb_db_user" must be set ')
if not FLAGS.jdbc_ycsb_db_passwd:
raise ValueError('"jdbc_ycsb_db_passwd" must be set ')
def Prepare(benchmark_spec):
benchmark_spec.always_call_cleanup = True
vms = benchmark_spec.vms
# Install required packages and copy credential files.
vm_util.RunThreaded(_Install, vms)
# Create benchmark table.
ExecuteSql(vms[0], DROP_TABLE_SQL)
ExecuteSql(vms[0], CREATE_TABLE_SQL)
benchmark_spec.executor = ycsb.YCSBExecutor('jdbc')
def ExecuteSql(vm, sql):
db_args = (
' -p db.driver={0}'
' -p db.url="{1}"'
' -p db.user={2}'
' -p db.passwd={3}').format(
FLAGS.jdbc_ycsb_db_driver,
FLAGS.jdbc_ycsb_db_url,
FLAGS.jdbc_ycsb_db_user,
FLAGS.jdbc_ycsb_db_passwd)
exec_cmd = 'java -cp "{0}/*" com.yahoo.ycsb.db.JdbcDBCli -c "{1}" ' \
.format(YCSB_BINDING_LIB_DIR, sql)
stdout, stderr = vm.RobustRemoteCommand(exec_cmd + db_args)
if 'successfully executed' not in stdout and not stderr:
raise errors.VirtualMachine.RemoteCommandError(stderr)
def Run(benchmark_spec):
vms = benchmark_spec.vms
run_kwargs = {
'db.driver': FLAGS.jdbc_ycsb_db_driver,
'db.url': '"%s"' % FLAGS.jdbc_ycsb_db_url,
'db.user': FLAGS.jdbc_ycsb_db_user,
'db.passwd': FLAGS.jdbc_ycsb_db_passwd,
'db.batchsize': FLAGS.jdbc_ycsb_db_batch_size,
'jdbc.fetchsize': FLAGS.jdbc_ycsb_fetch_size,
}
load_kwargs = run_kwargs.copy()
if FLAGS['ycsb_preload_threads'].present:
load_kwargs['threads'] = FLAGS['ycsb_preload_threads']
samples = list(benchmark_spec.executor.LoadAndRun(
vms, load_kwargs=load_kwargs, run_kwargs=run_kwargs))
return samples
def Cleanup(benchmark_spec):
# support automatic cleanup.
ExecuteSql(benchmark_spec.vms[0], DROP_TABLE_SQL)
def _Install(vm):
vm.Install('ycsb')
# Copy driver jar to VM.
vm.RemoteCopy(FLAGS.jdbc_ycsb_db_driver_path, YCSB_BINDING_LIB_DIR)
|
import unittest
import mock
from pytest import raises
from paasta_tools.deployd.common import BounceTimers
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.deployd.workers import BounceResults
from paasta_tools.deployd.workers import PaastaDeployWorker
from paasta_tools.marathon_tools import DEFAULT_SOA_DIR
class TestPaastaDeployWorker(unittest.TestCase):
def setUp(self):
self.mock_instances_to_bounce = mock.Mock(
get=mock.Mock(
return_value=mock.Mock(__enter__=mock.Mock(), __exit__=mock.Mock())
)
)
self.mock_metrics = mock.Mock()
mock_config = mock.Mock(
get_cluster=mock.Mock(return_value="westeros-prod"),
get_deployd_worker_failure_backoff_factor=mock.Mock(return_value=30),
)
with mock.patch(
"paasta_tools.deployd.workers.PaastaDeployWorker.setup", autospec=True
):
self.worker = PaastaDeployWorker(
1, self.mock_instances_to_bounce, mock_config, self.mock_metrics
)
self.worker.max_failures = 20
def test_setup(self):
with mock.patch(
"paasta_tools.deployd.workers.load_system_paasta_config", autospec=True
), mock.patch(
"paasta_tools.deployd.workers.marathon_tools.get_marathon_clients",
autospec=True,
), mock.patch(
"paasta_tools.deployd.workers.marathon_tools.get_marathon_servers",
autospec=True,
):
self.worker.setup()
def test_setup_timers(self):
mock_si = mock.Mock(service="universe", instance="c137")
ret = self.worker.setup_timers(mock_si)
calls = [
mock.call(
"bounce_length_timer",
service="universe",
paasta_cluster="westeros-prod",
instance="c137",
),
mock.call(
"processed_by_worker",
service="universe",
paasta_cluster="westeros-prod",
instance="c137",
),
mock.call(
"setup_marathon_timer",
service="universe",
paasta_cluster="westeros-prod",
instance="c137",
),
]
self.mock_metrics.create_timer.assert_has_calls(calls)
assert ret == BounceTimers(
processed_by_worker=self.mock_metrics.create_timer.return_value,
setup_marathon=self.mock_metrics.create_timer.return_value,
bounce_length=self.mock_metrics.create_timer.return_value,
)
def test_run(self):
with mock.patch("time.time", autospec=True, return_value=1), mock.patch(
"time.sleep", autospec=True
) as mock_sleep, mock.patch(
"paasta_tools.deployd.workers.PaastaDeployWorker.process_service_instance",
autospec=True,
) as mock_process_service_instance:
mock_bounce_results = BounceResults(
bounce_again_in_seconds=None, return_code=0
)
mock_process_service_instance.return_value = mock_bounce_results
mock_sleep.side_effect = LoopBreak
mock_si = mock.Mock(
service="universe",
instance="c137",
failures=0,
processed_count=0,
bounce_start_time=123456789.0,
)
self.mock_instances_to_bounce.get().__enter__.return_value = mock_si
with raises(LoopBreak):
self.worker.run()
mock_process_service_instance.assert_called_with(self.worker, mock_si)
assert not self.mock_instances_to_bounce.put.called
mock_bounce_results = BounceResults(
bounce_again_in_seconds=60, return_code=1
)
mock_process_service_instance.return_value = mock_bounce_results
mock_queued_si = ServiceInstance(
service="universe",
instance="c137",
bounce_by=61,
wait_until=61,
watcher="Worker1",
failures=1,
processed_count=1,
bounce_start_time=123456789.0,
enqueue_time=1,
)
with raises(LoopBreak):
self.worker.run()
mock_process_service_instance.assert_called_with(self.worker, mock_si)
self.mock_instances_to_bounce.put.assert_called_with(mock_queued_si)
mock_si = mock.Mock(
service="universe",
instance="c137",
failures=0,
processed_count=0,
bounce_start_time=123456789.0,
)
self.mock_instances_to_bounce.get.return_value.__enter__.return_value = (
mock_si
)
mock_process_service_instance.side_effect = Exception
mock_queued_si = ServiceInstance(
service="universe",
instance="c137",
bounce_by=61,
wait_until=61,
watcher="Worker1",
failures=1,
processed_count=1,
bounce_start_time=123456789.0,
enqueue_time=1,
)
with raises(LoopBreak):
self.worker.run()
mock_process_service_instance.assert_called_with(self.worker, mock_si)
self.mock_instances_to_bounce.put.assert_called_with(mock_queued_si)
def test_process_service_instance(self):
mock_client = mock.Mock()
mock_app = mock.Mock()
with mock.patch(
"paasta_tools.deployd.workers.marathon_tools.get_all_marathon_apps",
autospec=True,
return_value=[mock_app],
), mock.patch(
"paasta_tools.deployd.workers.PaastaDeployWorker.setup_timers",
autospec=True,
) as mock_setup_timers, mock.patch(
"paasta_tools.deployd.workers.deploy_marathon_service", autospec=True
) as mock_deploy_marathon_service, mock.patch(
"time.time", autospec=True, return_value=1
):
self.worker.marathon_clients = mock.Mock(
get_all_clients=mock.Mock(return_value=[mock_client])
)
self.worker.marathon_config = mock.Mock()
mock_deploy_marathon_service.return_value = (0, None)
mock_si = mock.Mock(
service="universe",
instance="c137",
failures=0,
processed_count=0,
bounce_by=0,
enqueue_time=1.0,
bounce_start_time=1.0,
)
ret = self.worker.process_service_instance(mock_si)
expected = BounceResults(None, 0)
assert ret == expected
mock_setup_timers.assert_called_with(self.worker, mock_si)
assert mock_setup_timers.return_value.setup_marathon.start.called
mock_deploy_marathon_service.assert_called_with(
service="universe",
instance="c137",
clients=self.worker.marathon_clients,
soa_dir=DEFAULT_SOA_DIR,
marathon_apps_with_clients=None,
)
assert mock_setup_timers.return_value.setup_marathon.stop.called
mock_si = mock.Mock(
service="universe",
instance="c137",
failures=0,
processed_count=1,
bounce_by=0,
enqueue_time=1.0,
bounce_start_time=1.0,
)
ret = self.worker.process_service_instance(mock_si)
mock_si = mock.Mock(
service="universe",
instance="c137",
failures=0,
processed_count=1,
bounce_by=0,
enqueue_time=1.0,
bounce_start_time=1.0,
)
mock_deploy_marathon_service.return_value = (0, 60)
ret = self.worker.process_service_instance(mock_si)
expected = BounceResults(60, 0)
assert ret == expected
mock_setup_timers.assert_called_with(self.worker, mock_si)
assert mock_setup_timers.return_value.setup_marathon.start.called
mock_deploy_marathon_service.assert_called_with(
service="universe",
instance="c137",
clients=self.worker.marathon_clients,
soa_dir=DEFAULT_SOA_DIR,
marathon_apps_with_clients=None,
)
assert mock_setup_timers.return_value.setup_marathon.stop.called
class LoopBreak(Exception):
pass
|
import collections
import numpy as np
from .unit import Unit
class WordHashing(Unit):
"""
Word-hashing layer for DSSM-based models.
The input of :class:`WordHashingUnit` should be a list of word
sub-letter list extracted from one document. The output of is
the word-hashing representation of this document.
:class:`NgramLetterUnit` and :class:`VocabularyUnit` are two
essential prerequisite of :class:`WordHashingUnit`.
Examples:
>>> letters = [['#te', 'tes','est', 'st#'], ['oov']]
>>> word_hashing = WordHashing(
... term_index={
... '_PAD': 0, 'OOV': 1, 'st#': 2, '#te': 3, 'est': 4, 'tes': 5
... })
>>> hashing = word_hashing.transform(letters)
>>> hashing[0]
[0.0, 0.0, 1.0, 1.0, 1.0, 1.0]
>>> hashing[1]
[0.0, 1.0, 0.0, 0.0, 0.0, 0.0]
"""
def __init__(
self,
term_index: dict,
):
"""
Class initialization.
:param term_index: term-index mapping generated by
:class:`VocabularyUnit`.
:param dim_triletter: dimensionality of tri_leltters.
"""
self._term_index = term_index
def transform(self, input_: list) -> list:
"""
Transform list of :attr:`letters` into word hashing layer.
:param input_: list of `tri_letters` generated by
:class:`NgramLetterUnit`.
:return: Word hashing representation of `tri-letters`.
"""
if any([isinstance(elem, list) for elem in input_]):
# The input shape for CDSSM is
# [[word1 ngram, ngram], [word2, ngram, ngram], ...].
hashing = np.zeros((len(input_), len(self._term_index)))
for idx, word in enumerate(input_):
counted_letters = collections.Counter(word)
for key, value in counted_letters.items():
letter_id = self._term_index.get(key, 1)
hashing[idx, letter_id] = value
else:
# The input shape for DSSM model [ngram, ngram, ...].
hashing = np.zeros(len(self._term_index))
counted_letters = collections.Counter(input_)
for key, value in counted_letters.items():
letter_id = self._term_index.get(key, 1)
hashing[letter_id] = value
return hashing.tolist()
|
import importlib
import os
import re
import sys
import uuid
import logging
import pkg_resources
import yaml
from cerberus.validator import Validator
from yandextank.common.util import recursive_dict_update, read_resource
logger = logging.getLogger(__name__)
class ValidationError(Exception):
MSG_TEMPLATE = """Validation error:\n{}"""
def __init__(self, errors):
self.errors = errors
self.message = self.MSG_TEMPLATE.format(yaml.dump(self.errors))
def __str__(self):
return self.message
def load_yaml_schema(path):
# DEFAULT_FILENAME = 'schema.yaml'
file_content = read_resource(path)
return yaml.safe_load(file_content)
def load_py_schema(package):
schema_module = importlib.import_module(package + '.config.schema')
return schema_module.SCHEMA
def load_plugin_schema(package):
try:
return load_yaml_schema(
pkg_resources.resource_filename(
package, 'config/schema.yaml'))
except IOError:
try:
return load_py_schema(package)
except ImportError:
logger.error(
"Could not find schema for %s (should be located in config/ directory of a plugin)",
package)
raise IOError('No schema found for plugin %s' % package)
except ImportError:
if 'aggregator' in package.lower():
logger.exception('Plugin Aggregator is now deprecated, please remove this section from your config.')
raise ValidationError({'package': ['No module named {}'.format(package)]})
def load_schema(directory, filename=None):
try:
return load_yaml_schema(directory)
except IOError:
try:
return load_py_schema(directory)
except ImportError:
raise IOError(
'Neither .yaml nor .py schema found in %s' %
directory)
class PatchedValidator(Validator):
def _validate_description(self, description, field, value):
""" {'type': 'string'} """
pass
def _validate_values_description(self, values_description, field, value):
""" {'type': 'dict'} """
pass
def _validate_tutorial_link(self, tutorial_link, field, value):
""" {'type': 'string'} """
pass
def _validate_examples(self, examples, field, value):
""" {'type': 'dict'} """
pass
@staticmethod
def is_number(value):
try:
float(value)
return True
except ValueError:
return False
def validate_duration(self, field, duration):
'''
2h
2h5m
5m
180
1h4m3
:param duration:
:return:
'''
DURATION_RE = r'^(\d+d)?(\d+h)?(\d+m)?(\d+s?)?$'
if not re.match(DURATION_RE, duration):
self._error(field, 'Load duration examples: 2h30m; 5m15; 180')
def _validator_load_scheme(self, field, value):
'''
step(10,200,5,180)
step(5,50,2.5,5m)
line(22,154,2h5m)
step(5,50,2.5,5m) line(22,154,2h5m)
const(10,1h4m3s)
:param field:
:param value:
:return:
'''
# stpd file can be any value
if self.document['load_type'] in 'stpd_file':
return
PRIMARY_RE = r'(step|line|const)\((.+?)\)'
N_OF_ARGS = {
'step': 4,
'line': 3,
'const': 2,
}
matches = re.findall(PRIMARY_RE, value)
if len(matches) == 0:
self._error(field, 'Should match one of the following patterns: step(...) / line(...) / const(...)')
else:
for match in matches:
curve, params_str = match
params = [v.strip() for v in params_str.split(',')]
# check number of arguments
if not len(params) == N_OF_ARGS[curve]:
self._error(field, '{} load scheme: expected {} arguments, found {}'.format(curve,
N_OF_ARGS[curve],
len(params)))
# check arguments' types
for param in params[:-1]:
if not self.is_number(param):
self._error(field, 'Argument {} in load scheme should be a number'.format(param))
self.validate_duration(field, params[-1])
class TankConfig(object):
DYNAMIC_OPTIONS = {
'uuid': lambda: str(uuid.uuid4()),
'pid': lambda: os.getpid(),
'cmdline': lambda: ' '.join(sys.argv)
}
def __init__(
self,
configs,
with_dynamic_options=True,
core_section='core',
error_output=None):
"""
:param configs: list of configs dicts
:param with_dynamic_options: insert uuid, pid, and other DYNAMIC_OPTIONS
:param core_section: name of core section in config
:param error_output: file to output error messages
"""
self._errors = None
if not isinstance(configs, list):
configs = [configs]
self.raw_config_dict = self.__load_multiple(
[config for config in configs if config is not None])
if self.raw_config_dict.get(core_section) is None:
self.raw_config_dict[core_section] = {}
self.with_dynamic_options = with_dynamic_options
self.CORE_SECTION = core_section
self._validated = None
self._plugins = None
self.ERROR_OUTPUT = error_output
self.BASE_SCHEMA = load_yaml_schema(pkg_resources.resource_filename('yandextank.core', 'config/schema.yaml'))
self.PLUGINS_SCHEMA = load_yaml_schema(pkg_resources.resource_filename('yandextank.core', 'config/plugins_schema.yaml'))
def get_configinitial(self):
return self.raw_config_dict
def validate(self):
if not self._validated:
try:
self._validated = ValidatedConfig(self.__validate(), self.BASE_SCHEMA)
self._errors = {}
except ValidationError as e:
self._validated = None
self._errors = e.errors
return self._validated, self._errors, self.raw_config_dict
@property
def validated(self):
if not self._validated:
try:
self._validated = self.__validate()
except ValidationError as e:
self._errors = e.errors
if self.ERROR_OUTPUT:
with open(self.ERROR_OUTPUT, 'w') as f:
yaml.dump(e.errors, f)
raise
return self._validated
def save_raw(self, filename):
with open(filename, 'w') as f:
yaml.dump(self.raw_config_dict, f)
def __load_multiple(self, configs):
logger.info('Configs: {}'.format(configs))
configs_count = len(configs)
if configs_count == 0:
return {}
elif configs_count == 1:
return configs[0]
elif configs_count == 2:
return recursive_dict_update(configs[0], configs[1])
else:
return self.__load_multiple(
[recursive_dict_update(configs[0], configs[1])] + configs[2:])
def __parse_enabled_plugins(self):
"""
:returns: [(plugin_name, plugin_package, plugin_config), ...]
:rtype: list of tuple
"""
return [
(
plugin_name,
plugin['package'],
plugin) for plugin_name,
plugin in self.raw_config_dict.items() if (
plugin_name not in self.BASE_SCHEMA) and isinstance(
plugin,
dict) and plugin.get('enabled')]
def __validate(self):
core_validated = self.__validate_core()
# plugins:
errors = {}
results = {}
for plugin_name, package, config in self.__parse_enabled_plugins():
try:
results[plugin_name] = \
self.__validate_plugin(config,
load_plugin_schema(package))
except ValidationError as e:
errors[plugin_name] = e.errors
if len(errors) > 0:
raise ValidationError((dict(errors)))
for plugin_name, plugin_conf in results.items():
core_validated[plugin_name] = plugin_conf
return core_validated
def __validate_core(self):
v = PatchedValidator(allow_unknown=self.PLUGINS_SCHEMA)
result = v.validate(self.raw_config_dict, self.BASE_SCHEMA)
if not result:
errors = v.errors
for key, value in tuple(errors.items()):
if 'must be of dict type' in value:
errors[key] = ['unknown field']
raise ValidationError(errors)
normalized = v.normalized(self.raw_config_dict)
return self.__set_core_dynamic_options(
normalized) if self.with_dynamic_options else normalized
def __validate_plugin(self, config, schema):
schema.update(self.PLUGINS_SCHEMA['schema'])
v = PatchedValidator(schema, allow_unknown=False)
# .validate() makes .errors as side effect if there's any
if not v.validate(config):
raise ValidationError(v.errors)
# .normalized() returns config with defaults
return v.normalized(config)
def __set_core_dynamic_options(self, config):
for option, setter in self.DYNAMIC_OPTIONS.items():
try:
config[self.CORE_SECTION][option] = setter()
except KeyError:
config[self.CORE_SECTION] = {option: setter()}
return config
def __str__(self):
return yaml.dump(self.raw_config_dict)
class ValidatedConfig(object):
def __init__(self, validated, base_schema):
"""
:type validated: dict
"""
self.validated = validated
self.base_schema = base_schema
self._plugins = None
@property
def plugins(self):
"""
:returns: [(plugin_name, plugin_package, plugin_config), ...]
:rtype: list of tuple
"""
if not self._plugins:
self._plugins = [
(plugin_name,
plugin_cfg['package'],
plugin_cfg) for plugin_name, plugin_cfg in self.validated.items() if (
plugin_name not in self.base_schema) and plugin_cfg['enabled']]
return self._plugins
def get_option(self, section, option, default=None):
try:
return self.validated[section][option]
except KeyError:
if default is not None:
return default
raise
def __bool__(self):
return len(self.validated) > 0
def dump(self, path):
with open(path, 'w') as f:
yaml.dump(self.validated, f)
def __str__(self):
return yaml.dump(self.validated)
|
import logging
import btlewrap
from btlewrap.base import BluetoothBackendException
from mitemp_bt import mitemp_bt_poller
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_FORCE_UPDATE,
CONF_MAC,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
DEVICE_CLASS_BATTERY,
DEVICE_CLASS_HUMIDITY,
DEVICE_CLASS_TEMPERATURE,
PERCENTAGE,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
try:
import bluepy.btle # noqa: F401 pylint: disable=unused-import
BACKEND = btlewrap.BluepyBackend
except ImportError:
BACKEND = btlewrap.GatttoolBackend
_LOGGER = logging.getLogger(__name__)
CONF_ADAPTER = "adapter"
CONF_CACHE = "cache_value"
CONF_MEDIAN = "median"
CONF_RETRIES = "retries"
CONF_TIMEOUT = "timeout"
DEFAULT_ADAPTER = "hci0"
DEFAULT_UPDATE_INTERVAL = 300
DEFAULT_FORCE_UPDATE = False
DEFAULT_MEDIAN = 3
DEFAULT_NAME = "MiTemp BT"
DEFAULT_RETRIES = 2
DEFAULT_TIMEOUT = 10
# Sensor types are defined like: Name, units
SENSOR_TYPES = {
"temperature": [DEVICE_CLASS_TEMPERATURE, "Temperature", TEMP_CELSIUS],
"humidity": [DEVICE_CLASS_HUMIDITY, "Humidity", PERCENTAGE],
"battery": [DEVICE_CLASS_BATTERY, "Battery", PERCENTAGE],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_MAC): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_MEDIAN, default=DEFAULT_MEDIAN): cv.positive_int,
vol.Optional(CONF_FORCE_UPDATE, default=DEFAULT_FORCE_UPDATE): cv.boolean,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Optional(CONF_RETRIES, default=DEFAULT_RETRIES): cv.positive_int,
vol.Optional(CONF_CACHE, default=DEFAULT_UPDATE_INTERVAL): cv.positive_int,
vol.Optional(CONF_ADAPTER, default=DEFAULT_ADAPTER): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MiTempBt sensor."""
backend = BACKEND
_LOGGER.debug("MiTempBt is using %s backend", backend.__name__)
cache = config.get(CONF_CACHE)
poller = mitemp_bt_poller.MiTempBtPoller(
config.get(CONF_MAC),
cache_timeout=cache,
adapter=config.get(CONF_ADAPTER),
backend=backend,
)
force_update = config.get(CONF_FORCE_UPDATE)
median = config.get(CONF_MEDIAN)
poller.ble_timeout = config.get(CONF_TIMEOUT)
poller.retries = config.get(CONF_RETRIES)
devs = []
for parameter in config[CONF_MONITORED_CONDITIONS]:
device = SENSOR_TYPES[parameter][0]
name = SENSOR_TYPES[parameter][1]
unit = SENSOR_TYPES[parameter][2]
prefix = config.get(CONF_NAME)
if prefix:
name = f"{prefix} {name}"
devs.append(
MiTempBtSensor(poller, parameter, device, name, unit, force_update, median)
)
add_entities(devs)
class MiTempBtSensor(Entity):
"""Implementing the MiTempBt sensor."""
def __init__(self, poller, parameter, device, name, unit, force_update, median):
"""Initialize the sensor."""
self.poller = poller
self.parameter = parameter
self._device = device
self._unit = unit
self._name = name
self._state = None
self.data = []
self._force_update = force_update
# Median is used to filter out outliers. median of 3 will filter
# single outliers, while median of 5 will filter double outliers
# Use median_count = 1 if no filtering is required.
self.median_count = median
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit
@property
def device_class(self):
"""Device class of this entity."""
return self._device
@property
def force_update(self):
"""Force update."""
return self._force_update
def update(self):
"""
Update current conditions.
This uses a rolling median over 3 values to filter out outliers.
"""
try:
_LOGGER.debug("Polling data for %s", self.name)
data = self.poller.parameter_value(self.parameter)
except OSError as ioerr:
_LOGGER.warning("Polling error %s", ioerr)
return
except BluetoothBackendException as bterror:
_LOGGER.warning("Polling error %s", bterror)
return
if data is not None:
_LOGGER.debug("%s = %s", self.name, data)
self.data.append(data)
else:
_LOGGER.warning(
"Did not receive any data from Mi Temp sensor %s", self.name
)
# Remove old data from median list or set sensor value to None
# if no data is available anymore
if self.data:
self.data = self.data[1:]
else:
self._state = None
return
if len(self.data) > self.median_count:
self.data = self.data[1:]
if len(self.data) == self.median_count:
median = sorted(self.data)[int((self.median_count - 1) / 2)]
_LOGGER.debug("Median is: %s", median)
self._state = median
else:
_LOGGER.debug("Not yet enough data for median calculation")
|
import voluptuous as vol
from homeassistant.const import CONF_TOKEN
from homeassistant.helpers import discovery
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_TOKEN): cv.string})}, extra=vol.ALLOW_EXTRA
)
def setup(hass, config):
"""Set up the notify_events component."""
hass.data[DOMAIN] = config[DOMAIN]
discovery.load_platform(hass, "notify", DOMAIN, {}, config)
return True
|
import pytest
from homeassistant import data_entry_flow
from homeassistant.components import traccar, zone
from homeassistant.components.device_tracker import DOMAIN as DEVICE_TRACKER_DOMAIN
from homeassistant.components.traccar import DOMAIN, TRACKER_UPDATE
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import (
HTTP_OK,
HTTP_UNPROCESSABLE_ENTITY,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.helpers.dispatcher import DATA_DISPATCHER
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
HOME_LATITUDE = 37.239622
HOME_LONGITUDE = -115.815811
@pytest.fixture(autouse=True)
def mock_dev_track(mock_device_tracker_conf):
"""Mock device tracker config loading."""
@pytest.fixture(name="client")
async def traccar_client(loop, hass, aiohttp_client):
"""Mock client for Traccar (unauthenticated)."""
assert await async_setup_component(hass, "persistent_notification", {})
assert await async_setup_component(hass, DOMAIN, {DOMAIN: {}})
await hass.async_block_till_done()
with patch("homeassistant.components.device_tracker.legacy.update_config"):
return await aiohttp_client(hass.http.app)
@pytest.fixture(autouse=True)
async def setup_zones(loop, hass):
"""Set up Zone config in HA."""
assert await async_setup_component(
hass,
zone.DOMAIN,
{
"zone": {
"name": "Home",
"latitude": HOME_LATITUDE,
"longitude": HOME_LONGITUDE,
"radius": 100,
}
},
)
await hass.async_block_till_done()
@pytest.fixture(name="webhook_id")
async def webhook_id_fixture(hass, client):
"""Initialize the Traccar component and get the webhook_id."""
await async_process_ha_core_config(
hass,
{"external_url": "http://example.com"},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
return result["result"].data["webhook_id"]
async def test_missing_data(hass, client, webhook_id):
"""Test missing data."""
url = f"/api/webhook/{webhook_id}"
data = {"lat": "1.0", "lon": "1.1", "id": "123"}
# No data
req = await client.post(url)
await hass.async_block_till_done()
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No latitude
copy = data.copy()
del copy["lat"]
req = await client.post(url, params=copy)
await hass.async_block_till_done()
assert req.status == HTTP_UNPROCESSABLE_ENTITY
# No device
copy = data.copy()
del copy["id"]
req = await client.post(url, params=copy)
await hass.async_block_till_done()
assert req.status == HTTP_UNPROCESSABLE_ENTITY
async def test_enter_and_exit(hass, client, webhook_id):
"""Test when there is a known zone."""
url = f"/api/webhook/{webhook_id}"
data = {"lat": str(HOME_LATITUDE), "lon": str(HOME_LONGITUDE), "id": "123"}
# Enter the Home
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"])
).state
assert STATE_HOME == state_name
# Enter Home again
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"])
).state
assert STATE_HOME == state_name
data["lon"] = 0
data["lat"] = 0
# Enter Somewhere else
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"])
).state
assert STATE_NOT_HOME == state_name
dev_reg = await hass.helpers.device_registry.async_get_registry()
assert len(dev_reg.devices) == 1
ent_reg = await hass.helpers.entity_registry.async_get_registry()
assert len(ent_reg.entities) == 1
async def test_enter_with_attrs(hass, client, webhook_id):
"""Test when additional attributes are present."""
url = f"/api/webhook/{webhook_id}"
data = {
"timestamp": 123456789,
"lat": "1.0",
"lon": "1.1",
"id": "123",
"accuracy": "10.5",
"batt": 10,
"speed": 100,
"bearing": "105.32",
"altitude": 102,
}
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"]))
assert state.state == STATE_NOT_HOME
assert state.attributes["gps_accuracy"] == 10.5
assert state.attributes["battery_level"] == 10.0
assert state.attributes["speed"] == 100.0
assert state.attributes["bearing"] == 105.32
assert state.attributes["altitude"] == 102.0
data = {
"lat": str(HOME_LATITUDE),
"lon": str(HOME_LONGITUDE),
"id": "123",
"accuracy": 123,
"batt": 23,
"speed": 23,
"bearing": 123,
"altitude": 123,
}
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"]))
assert state.state == STATE_HOME
assert state.attributes["gps_accuracy"] == 123
assert state.attributes["battery_level"] == 23
assert state.attributes["speed"] == 23
assert state.attributes["bearing"] == 123
assert state.attributes["altitude"] == 123
async def test_two_devices(hass, client, webhook_id):
"""Test updating two different devices."""
url = f"/api/webhook/{webhook_id}"
data_device_1 = {"lat": "1.0", "lon": "1.1", "id": "device_1"}
# Exit Home
req = await client.post(url, params=data_device_1)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["id"]))
assert state.state == "not_home"
# Enter Home
data_device_2 = dict(data_device_1)
data_device_2["lat"] = str(HOME_LATITUDE)
data_device_2["lon"] = str(HOME_LONGITUDE)
data_device_2["id"] = "device_2"
req = await client.post(url, params=data_device_2)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_2["id"]))
assert state.state == "home"
state = hass.states.get("{}.{}".format(DEVICE_TRACKER_DOMAIN, data_device_1["id"]))
assert state.state == "not_home"
@pytest.mark.xfail(
reason="The device_tracker component does not support unloading yet."
)
async def test_load_unload_entry(hass, client, webhook_id):
"""Test that the appropriate dispatch signals are added and removed."""
url = f"/api/webhook/{webhook_id}"
data = {"lat": str(HOME_LATITUDE), "lon": str(HOME_LONGITUDE), "id": "123"}
# Enter the Home
req = await client.post(url, params=data)
await hass.async_block_till_done()
assert req.status == HTTP_OK
state_name = hass.states.get(
"{}.{}".format(DEVICE_TRACKER_DOMAIN, data["id"])
).state
assert STATE_HOME == state_name
assert len(hass.data[DATA_DISPATCHER][TRACKER_UPDATE]) == 1
entry = hass.config_entries.async_entries(DOMAIN)[0]
assert await traccar.async_unload_entry(hass, entry)
await hass.async_block_till_done()
assert not hass.data[DATA_DISPATCHER][TRACKER_UPDATE]
|
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.core import callback
from .const import (
CONF_AUTOHEAL,
CONF_DEBUG,
CONF_NETWORK_KEY,
CONF_POLLING_INTERVAL,
CONF_USB_STICK_PATH,
DATA_NETWORK,
DATA_ZWAVE_CONFIG,
)
TYPE = "type"
ID = "id"
@websocket_api.require_admin
@websocket_api.websocket_command({vol.Required(TYPE): "zwave/network_status"})
def websocket_network_status(hass, connection, msg):
"""Get Z-Wave network status."""
network = hass.data[DATA_NETWORK]
connection.send_result(msg[ID], {"state": network.state})
@websocket_api.require_admin
@websocket_api.websocket_command({vol.Required(TYPE): "zwave/get_config"})
def websocket_get_config(hass, connection, msg):
"""Get Z-Wave configuration."""
config = hass.data[DATA_ZWAVE_CONFIG]
connection.send_result(
msg[ID],
{
CONF_AUTOHEAL: config[CONF_AUTOHEAL],
CONF_DEBUG: config[CONF_DEBUG],
CONF_POLLING_INTERVAL: config[CONF_POLLING_INTERVAL],
CONF_USB_STICK_PATH: config[CONF_USB_STICK_PATH],
},
)
@websocket_api.require_admin
@websocket_api.websocket_command({vol.Required(TYPE): "zwave/get_migration_config"})
def websocket_get_migration_config(hass, connection, msg):
"""Get Z-Wave configuration for migration."""
config = hass.data[DATA_ZWAVE_CONFIG]
connection.send_result(
msg[ID],
{
CONF_USB_STICK_PATH: config[CONF_USB_STICK_PATH],
CONF_NETWORK_KEY: config[CONF_NETWORK_KEY],
},
)
@callback
def async_load_websocket_api(hass):
"""Set up the web socket API."""
websocket_api.async_register_command(hass, websocket_network_status)
websocket_api.async_register_command(hass, websocket_get_config)
websocket_api.async_register_command(hass, websocket_get_migration_config)
|
import diamond.collector
from diamond.metric import Metric
try:
import xml.etree.ElementTree as ET
except ImportError:
import cElementTree as ET
try:
from netappsdk.NaServer import *
from netappsdk.NaElement import *
except ImportError:
netappsdk = None
__author__ = '[email protected]'
class netapp_inodeCol(object):
""" Our netapp_inode Collector
"""
def __init__(self, device, ip, user, password, prefix, pm):
"""Instantiate _our_ stuff
"""
self.device = device
self.ip = ip
self.netapp_user = user
self.netapp_password = password
self.path_prefix = prefix
self.publish_metric = pm
self._netapp_login()
filers_xml = self.get_netapp_data()
for volume in filers_xml:
max_inodes = volume.find('files-total').text
used_inodes = volume.find('files-used').text
volume = volume.find('name').text
self.push('max_inodes', max_inodes, volume)
self.push('used_inodes', used_inodes, volume)
def push(self, metric_name=None, metric_value=None, volume=None):
""" Ship that shit off to graphite broski
"""
graphite_path = self.path_prefix
graphite_path += '.' + self.device + '.' + 'volume'
graphite_path += '.' + volume + '.' + metric_name
metric = Metric(graphite_path, metric_value, precision=4,
host=self.device)
self.publish_metric(metric)
def get_netapp_data(self):
""" Retrieve netapp volume information
returns ElementTree of netapp volume information
"""
netapp_data = self.server.invoke('volume-list-info')
if netapp_data.results_status() == 'failed':
self.log.error(
'While using netapp API failed to retrieve '
'volume-list-info for netapp filer %s' % self.device)
return
netapp_xml = ET.fromstring(netapp_data.sprintf()).find('volumes')
return netapp_xml
def _netapp_login(self):
""" Login to our netapp filer
"""
self.server = NaServer(self.ip, 1, 3)
self.server.set_transport_type('HTTPS')
self.server.set_style('LOGIN')
self.server.set_admin_user(self.netapp_user, self.netapp_password)
class netapp_inode(diamond.collector.Collector):
""" Netapp inode diamond collector
"""
running = set()
def collect(self, device, ip, user, password):
""" Collects metrics for our netapp filer --START HERE--
"""
if netappsdk is None:
self.log.error(
'Failed to import netappsdk.NaServer or netappsdk.NaElement')
return
if device in self.running:
return
self.running.add(device)
prefix = self.config['path_prefix']
pm = self.publish_metric
netapp_inodeCol(device, ip, user, password, prefix, pm)
self.running.remove(device)
|
import logging
import unittest
import numpy as np
from gensim.models.phrases import Phrases, FrozenPhrases, _PhrasesTransformation
from gensim.models.phrases import original_scorer
from gensim.test.utils import common_texts, temporary_file, datapath
class TestPhraseAnalysis(unittest.TestCase):
class AnalysisTester(_PhrasesTransformation):
def __init__(self, scores, threshold):
super().__init__(connector_words={"a", "the", "with", "of"})
self.scores = scores
self.threshold = threshold
def score_candidate(self, word_a, word_b, in_between):
phrase = "_".join([word_a] + in_between + [word_b])
score = self.scores.get(phrase, -1)
if score > self.threshold:
return phrase, score
return None, None
def test_simple_analysis(self):
"""Test transformation with no phrases."""
sentence = ["simple", "sentence", "should", "pass"]
result = self.AnalysisTester({}, threshold=1)[sentence]
self.assertEqual(result, sentence)
sentence = ["a", "simple", "sentence", "with", "no", "bigram", "but", "common", "terms"]
result = self.AnalysisTester({}, threshold=1)[sentence]
self.assertEqual(result, sentence)
def test_analysis_bigrams(self):
scores = {
"simple_sentence": 2, "sentence_many": 2,
"many_possible": 2, "possible_bigrams": 2,
}
sentence = ["simple", "sentence", "many", "possible", "bigrams"]
result = self.AnalysisTester(scores, threshold=1)[sentence]
self.assertEqual(result, ["simple_sentence", "many_possible", "bigrams"])
sentence = ["some", "simple", "sentence", "many", "bigrams"]
result = self.AnalysisTester(scores, threshold=1)[sentence]
self.assertEqual(result, ["some", "simple_sentence", "many", "bigrams"])
sentence = ["some", "unrelated", "simple", "words"]
result = self.AnalysisTester(scores, threshold=1)[sentence]
self.assertEqual(result, sentence)
def test_analysis_connector_words(self):
scores = {
"simple_sentence": 2, "sentence_many": 2,
"many_possible": 2, "possible_bigrams": 2,
}
sentence = ["a", "simple", "sentence", "many", "the", "possible", "bigrams"]
result = self.AnalysisTester(scores, threshold=1)[sentence]
self.assertEqual(result, ["a", "simple_sentence", "many", "the", "possible_bigrams"])
sentence = ["simple", "the", "sentence", "and", "many", "possible", "bigrams", "with", "a"]
result = self.AnalysisTester(scores, threshold=1)[sentence]
self.assertEqual(
result,
["simple", "the", "sentence", "and", "many_possible", "bigrams", "with", "a"],
)
def test_analysis_connector_words_in_between(self):
scores = {
"simple_sentence": 2, "sentence_with_many": 2,
"many_possible": 2, "many_of_the_possible": 2, "possible_bigrams": 2,
}
sentence = ["sentence", "with", "many", "possible", "bigrams"]
result = self.AnalysisTester(scores, threshold=1)[sentence]
self.assertEqual(result, ["sentence_with_many", "possible_bigrams"])
sentence = ["a", "simple", "sentence", "with", "many", "of", "the", "possible", "bigrams", "with"]
result = self.AnalysisTester(scores, threshold=1)[sentence]
self.assertEqual(
result, ["a", "simple_sentence", "with", "many_of_the_possible", "bigrams", "with"])
class PhrasesData:
sentences = common_texts + [
['graph', 'minors', 'survey', 'human', 'interface'],
]
connector_words = frozenset()
bigram1 = u'response_time'
bigram2 = u'graph_minors'
bigram3 = u'human_interface'
def gen_sentences(self):
return ((w for w in sentence) for sentence in self.sentences)
class PhrasesCommon(PhrasesData):
"""Tests for both Phrases and FrozenPhrases classes."""
def setUp(self):
self.bigram = Phrases(self.sentences, min_count=1, threshold=1, connector_words=self.connector_words)
self.bigram_default = Phrases(self.sentences, connector_words=self.connector_words)
def testEmptyPhrasifiedSentencesIterator(self):
bigram_phrases = Phrases(self.sentences)
bigram_phraser = FrozenPhrases(bigram_phrases)
trigram_phrases = Phrases(bigram_phraser[self.sentences])
trigram_phraser = FrozenPhrases(trigram_phrases)
trigrams = trigram_phraser[bigram_phraser[self.sentences]]
fst, snd = list(trigrams), list(trigrams)
self.assertEqual(fst, snd)
self.assertNotEqual(snd, [])
def testEmptyInputsOnBigramConstruction(self):
"""Test that empty inputs don't throw errors and return the expected result."""
# Empty list -> empty list
self.assertEqual(list(self.bigram_default[[]]), [])
# Empty iterator -> empty list
self.assertEqual(list(self.bigram_default[iter(())]), [])
# List of empty list -> list of empty list
self.assertEqual(list(self.bigram_default[[[], []]]), [[], []])
# Iterator of empty list -> list of empty list
self.assertEqual(list(self.bigram_default[iter([[], []])]), [[], []])
# Iterator of empty iterator -> list of empty list
self.assertEqual(list(self.bigram_default[(iter(()) for i in range(2))]), [[], []])
def testSentenceGeneration(self):
"""Test basic bigram using a dummy corpus."""
# test that we generate the same amount of sentences as the input
self.assertEqual(
len(self.sentences),
len(list(self.bigram_default[self.sentences])),
)
def testSentenceGenerationWithGenerator(self):
"""Test basic bigram production when corpus is a generator."""
self.assertEqual(
len(list(self.gen_sentences())),
len(list(self.bigram_default[self.gen_sentences()])),
)
def testBigramConstruction(self):
"""Test Phrases bigram construction."""
# with this setting we should get response_time and graph_minors
bigram1_seen = False
bigram2_seen = False
for sentence in self.bigram[self.sentences]:
if not bigram1_seen and self.bigram1 in sentence:
bigram1_seen = True
if not bigram2_seen and self.bigram2 in sentence:
bigram2_seen = True
if bigram1_seen and bigram2_seen:
break
self.assertTrue(bigram1_seen and bigram2_seen)
# check the same thing, this time using single doc transformation
# last sentence should contain both graph_minors and human_interface
self.assertTrue(self.bigram1 in self.bigram[self.sentences[1]])
self.assertTrue(self.bigram1 in self.bigram[self.sentences[4]])
self.assertTrue(self.bigram2 in self.bigram[self.sentences[-2]])
self.assertTrue(self.bigram2 in self.bigram[self.sentences[-1]])
self.assertTrue(self.bigram3 in self.bigram[self.sentences[-1]])
def testBigramConstructionFromGenerator(self):
"""Test Phrases bigram construction building when corpus is a generator."""
bigram1_seen = False
bigram2_seen = False
for s in self.bigram[self.gen_sentences()]:
if not bigram1_seen and self.bigram1 in s:
bigram1_seen = True
if not bigram2_seen and self.bigram2 in s:
bigram2_seen = True
if bigram1_seen and bigram2_seen:
break
self.assertTrue(bigram1_seen and bigram2_seen)
def testBigramConstructionFromArray(self):
"""Test Phrases bigram construction building when corpus is a numpy array."""
bigram1_seen = False
bigram2_seen = False
for s in self.bigram[np.array(self.sentences, dtype=object)]:
if not bigram1_seen and self.bigram1 in s:
bigram1_seen = True
if not bigram2_seen and self.bigram2 in s:
bigram2_seen = True
if bigram1_seen and bigram2_seen:
break
self.assertTrue(bigram1_seen and bigram2_seen)
# scorer for testCustomScorer
# function is outside of the scope of the test because for picklability of custom scorer
# Phrases tests for picklability
# all scores will be 1
def dumb_scorer(worda_count, wordb_count, bigram_count, len_vocab, min_count, corpus_word_count):
return 1
class TestPhrasesModel(PhrasesCommon, unittest.TestCase):
def testExportPhrases(self):
"""Test Phrases bigram export phrases."""
bigram = Phrases(self.sentences, min_count=1, threshold=1, delimiter=' ')
seen_bigrams = set(bigram.find_phrases(self.sentences).keys())
assert seen_bigrams == {
'response time',
'graph minors',
'human interface',
}
def testMultipleBigramsSingleEntry(self):
"""Test a single entry produces multiple bigrams."""
bigram = Phrases(self.sentences, min_count=1, threshold=1, delimiter=' ')
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']]
seen_bigrams = set(bigram.find_phrases(test_sentences).keys())
assert seen_bigrams == {'graph minors', 'human interface'}
def testScoringDefault(self):
"""Test the default scoring, from the mikolov word2vec paper."""
bigram = Phrases(self.sentences, min_count=1, threshold=1, delimiter=' ')
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']]
seen_scores = set(round(score, 3) for score in bigram.find_phrases(test_sentences).values())
assert seen_scores == {
5.167, # score for graph minors
3.444 # score for human interface
}
def test__getitem__(self):
"""Test Phrases[sentences] with a single sentence."""
bigram = Phrases(self.sentences, min_count=1, threshold=1)
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']]
phrased_sentence = next(bigram[test_sentences].__iter__())
assert phrased_sentence == ['graph_minors', 'survey', 'human_interface']
def testScoringNpmi(self):
"""Test normalized pointwise mutual information scoring."""
bigram = Phrases(self.sentences, min_count=1, threshold=.5, scoring='npmi')
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface']]
seen_scores = set(round(score, 3) for score in bigram.find_phrases(test_sentences).values())
assert seen_scores == {
.882, # score for graph minors
.714 # score for human interface
}
def testCustomScorer(self):
"""Test using a custom scoring function."""
bigram = Phrases(self.sentences, min_count=1, threshold=.001, scoring=dumb_scorer)
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
seen_scores = list(bigram.find_phrases(test_sentences).values())
assert all(score == 1 for score in seen_scores)
assert len(seen_scores) == 3 # 'graph minors' and 'survey human' and 'interface system'
def testBadParameters(self):
"""Test the phrases module with bad parameters."""
# should fail with something less or equal than 0
self.assertRaises(ValueError, Phrases, self.sentences, min_count=0)
# threshold should be positive
self.assertRaises(ValueError, Phrases, self.sentences, threshold=-1)
def testPruning(self):
"""Test that max_vocab_size parameter is respected."""
bigram = Phrases(self.sentences, max_vocab_size=5)
self.assertTrue(len(bigram.vocab) <= 5)
# endclass TestPhrasesModel
class TestPhrasesPersistence(PhrasesData, unittest.TestCase):
def testSaveLoadCustomScorer(self):
"""Test saving and loading a Phrases object with a custom scorer."""
with temporary_file("test.pkl") as fpath:
bigram = Phrases(self.sentences, min_count=1, threshold=.001, scoring=dumb_scorer)
bigram.save(fpath)
bigram_loaded = Phrases.load(fpath)
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
seen_scores = list(bigram_loaded.find_phrases(test_sentences).values())
assert all(score == 1 for score in seen_scores)
assert len(seen_scores) == 3 # 'graph minors' and 'survey human' and 'interface system'
def testSaveLoad(self):
"""Test saving and loading a Phrases object."""
with temporary_file("test.pkl") as fpath:
bigram = Phrases(self.sentences, min_count=1, threshold=1)
bigram.save(fpath)
bigram_loaded = Phrases.load(fpath)
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
seen_scores = set(round(score, 3) for score in bigram_loaded.find_phrases(test_sentences).values())
assert seen_scores == set([
5.167, # score for graph minors
3.444 # score for human interface
])
def testSaveLoadStringScoring(self):
"""Test backwards compatibility with a previous version of Phrases with custom scoring."""
bigram_loaded = Phrases.load(datapath("phrases-scoring-str.pkl"))
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
seen_scores = set(round(score, 3) for score in bigram_loaded.find_phrases(test_sentences).values())
assert seen_scores == set([
5.167, # score for graph minors
3.444 # score for human interface
])
def testSaveLoadNoScoring(self):
"""Test backwards compatibility with old versions of Phrases with no scoring parameter."""
bigram_loaded = Phrases.load(datapath("phrases-no-scoring.pkl"))
test_sentences = [['graph', 'minors', 'survey', 'human', 'interface', 'system']]
seen_scores = set(round(score, 3) for score in bigram_loaded.find_phrases(test_sentences).values())
assert seen_scores == set([
5.167, # score for graph minors
3.444 # score for human interface
])
def testSaveLoadNoCommonTerms(self):
"""Ensure backwards compatibility with old versions of Phrases, before connector_words."""
bigram_loaded = Phrases.load(datapath("phrases-no-common-terms.pkl"))
self.assertEqual(bigram_loaded.connector_words, frozenset())
# can make a phraser, cf #1751
phraser = FrozenPhrases(bigram_loaded) # does not raise
phraser[["human", "interface", "survey"]] # does not raise
class TestFrozenPhrasesPersistence(PhrasesData, unittest.TestCase):
def testSaveLoadCustomScorer(self):
"""Test saving and loading a FrozenPhrases object with a custom scorer."""
with temporary_file("test.pkl") as fpath:
bigram = FrozenPhrases(
Phrases(self.sentences, min_count=1, threshold=.001, scoring=dumb_scorer))
bigram.save(fpath)
bigram_loaded = FrozenPhrases.load(fpath)
self.assertEqual(bigram_loaded.scoring, dumb_scorer)
def testSaveLoad(self):
"""Test saving and loading a FrozenPhrases object."""
with temporary_file("test.pkl") as fpath:
bigram = FrozenPhrases(Phrases(self.sentences, min_count=1, threshold=1))
bigram.save(fpath)
bigram_loaded = FrozenPhrases.load(fpath)
self.assertEqual(
bigram_loaded[['graph', 'minors', 'survey', 'human', 'interface', 'system']],
['graph_minors', 'survey', 'human_interface', 'system'])
def testSaveLoadStringScoring(self):
"""Test saving and loading a FrozenPhrases object with a string scoring parameter.
This should ensure backwards compatibility with the previous version of FrozenPhrases"""
bigram_loaded = FrozenPhrases.load(datapath("phraser-scoring-str.pkl"))
# we do not much with scoring, just verify its the one expected
self.assertEqual(bigram_loaded.scoring, original_scorer)
def testSaveLoadNoScoring(self):
"""Test saving and loading a FrozenPhrases object with no scoring parameter.
This should ensure backwards compatibility with old versions of FrozenPhrases"""
bigram_loaded = FrozenPhrases.load(datapath("phraser-no-scoring.pkl"))
# we do not much with scoring, just verify its the one expected
self.assertEqual(bigram_loaded.scoring, original_scorer)
def testSaveLoadNoCommonTerms(self):
"""Ensure backwards compatibility with old versions of FrozenPhrases, before connector_words."""
bigram_loaded = FrozenPhrases.load(datapath("phraser-no-common-terms.pkl"))
self.assertEqual(bigram_loaded.connector_words, frozenset())
class TestFrozenPhrasesModel(PhrasesCommon, unittest.TestCase):
"""Test FrozenPhrases models."""
def setUp(self):
"""Set up FrozenPhrases models for the tests."""
bigram_phrases = Phrases(
self.sentences, min_count=1, threshold=1, connector_words=self.connector_words)
self.bigram = FrozenPhrases(bigram_phrases)
bigram_default_phrases = Phrases(self.sentences, connector_words=self.connector_words)
self.bigram_default = FrozenPhrases(bigram_default_phrases)
class CommonTermsPhrasesData:
"""This mixin permits to reuse tests with the connector_words option."""
sentences = [
['human', 'interface', 'with', 'computer'],
['survey', 'of', 'user', 'computer', 'system', 'lack', 'of', 'interest'],
['eps', 'user', 'interface', 'system'],
['system', 'and', 'human', 'system', 'eps'],
['user', 'lack', 'of', 'interest'],
['trees'],
['graph', 'of', 'trees'],
['data', 'and', 'graph', 'of', 'trees'],
['data', 'and', 'graph', 'survey'],
['data', 'and', 'graph', 'survey', 'for', 'human', 'interface'] # test bigrams within same sentence
]
connector_words = ['of', 'and', 'for']
bigram1 = u'lack_of_interest'
bigram2 = u'data_and_graph'
bigram3 = u'human_interface'
expression1 = u'lack of interest'
expression2 = u'data and graph'
expression3 = u'human interface'
def gen_sentences(self):
return ((w for w in sentence) for sentence in self.sentences)
class TestPhrasesModelCommonTerms(CommonTermsPhrasesData, TestPhrasesModel):
"""Test Phrases models with connector words."""
def testMultipleBigramsSingleEntry(self):
"""Test a single entry produces multiple bigrams."""
bigram = Phrases(self.sentences, min_count=1, threshold=1, connector_words=self.connector_words, delimiter=' ')
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
seen_bigrams = set(bigram.find_phrases(test_sentences).keys())
assert seen_bigrams == set([
'data and graph',
'human interface',
])
def testExportPhrases(self):
"""Test Phrases bigram export phrases."""
bigram = Phrases(self.sentences, min_count=1, threshold=1, connector_words=self.connector_words, delimiter=' ')
seen_bigrams = set(bigram.find_phrases(self.sentences).keys())
assert seen_bigrams == set([
'human interface',
'graph of trees',
'data and graph',
'lack of interest',
])
def testScoringDefault(self):
""" test the default scoring, from the mikolov word2vec paper """
bigram = Phrases(self.sentences, min_count=1, threshold=1, connector_words=self.connector_words)
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
seen_scores = set(round(score, 3) for score in bigram.find_phrases(test_sentences).values())
min_count = float(bigram.min_count)
len_vocab = float(len(bigram.vocab))
graph = float(bigram.vocab["graph"])
data = float(bigram.vocab["data"])
data_and_graph = float(bigram.vocab["data_and_graph"])
human = float(bigram.vocab["human"])
interface = float(bigram.vocab["interface"])
human_interface = float(bigram.vocab["human_interface"])
assert seen_scores == set([
# score for data and graph
round((data_and_graph - min_count) / data / graph * len_vocab, 3),
# score for human interface
round((human_interface - min_count) / human / interface * len_vocab, 3),
])
def testScoringNpmi(self):
"""Test normalized pointwise mutual information scoring."""
bigram = Phrases(
self.sentences, min_count=1, threshold=.5,
scoring='npmi', connector_words=self.connector_words,
)
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
seen_scores = set(round(score, 3) for score in bigram.find_phrases(test_sentences).values())
assert seen_scores == set([
.74, # score for data and graph
.894 # score for human interface
])
def testCustomScorer(self):
"""Test using a custom scoring function."""
bigram = Phrases(
self.sentences, min_count=1, threshold=.001,
scoring=dumb_scorer, connector_words=self.connector_words,
)
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
seen_scores = list(bigram.find_phrases(test_sentences).values())
assert all(seen_scores) # all scores 1
assert len(seen_scores) == 2 # 'data and graph' 'survey for human'
def test__getitem__(self):
"""Test Phrases[sentences] with a single sentence."""
bigram = Phrases(self.sentences, min_count=1, threshold=1, connector_words=self.connector_words)
test_sentences = [['data', 'and', 'graph', 'survey', 'for', 'human', 'interface']]
phrased_sentence = next(bigram[test_sentences].__iter__())
assert phrased_sentence == ['data_and_graph', 'survey', 'for', 'human_interface']
class TestFrozenPhrasesModelCompatibilty(unittest.TestCase):
def testCompatibilty(self):
phrases = Phrases.load(datapath("phrases-3.6.0.model"))
phraser = FrozenPhrases.load(datapath("phraser-3.6.0.model"))
test_sentences = ['trees', 'graph', 'minors']
self.assertEqual(phrases[test_sentences], ['trees', 'graph_minors'])
self.assertEqual(phraser[test_sentences], ['trees', 'graph_minors'])
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
import numpy as np
from tensornetwork.backends.pytorch import pytorch_backend
import torch
import pytest
from unittest.mock import Mock
torch_dtypes = [torch.float32, torch.float64, torch.int32]
torch_eye_dtypes = [torch.float32, torch.float64, torch.int32, torch.int64]
torch_randn_dtypes = [torch.float32, torch.float64]
def test_tensordot():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(2 * np.ones((2, 3, 4)))
b = backend.convert_to_tensor(np.ones((2, 3, 4)))
actual = backend.tensordot(a, b, ((1, 2), (1, 2)))
expected = np.array([[24.0, 24.0], [24.0, 24.0]])
np.testing.assert_allclose(expected, actual)
def test_tensordot_int():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(2 * np.ones((3, 3, 3)))
b = backend.convert_to_tensor(np.ones((3, 3, 3)))
actual = backend.tensordot(a, b, 1)
expected = torch.tensordot(a, b, 1)
np.testing.assert_allclose(expected, actual)
def test_reshape():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(np.ones((2, 3, 4)))
actual = backend.shape_tuple(backend.reshape(a, (6, 4, 1)))
assert actual == (6, 4, 1)
def test_transpose():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(
np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]]))
actual = backend.transpose(a, [2, 0, 1])
expected = np.array([[[1.0, 3.0], [5.0, 7.0]], [[2.0, 4.0], [6.0, 8.0]]])
np.testing.assert_allclose(expected, actual)
def test_transpose_noperm():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(
np.array([[[1., 2.], [3., 4.]], [[5., 6.], [7., 8.]]]))
actual = backend.transpose(a) # [2, 1, 0]
actual = backend.transpose(actual, perm=[0, 2, 1])
expected = np.array([[[1.0, 3.0], [5.0, 7.0]], [[2.0, 4.0], [6.0, 8.0]]])
np.testing.assert_allclose(expected, actual)
def test_shape_concat():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(2 * np.ones((1, 3, 1)))
b = backend.convert_to_tensor(np.ones((1, 2, 1)))
expected = backend.shape_concat((a, b), axis=1)
actual = np.array([[[2.0], [2.0], [2.0], [1.0], [1.0]]])
np.testing.assert_allclose(expected, actual)
def test_slice():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(
np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]))
actual = backend.slice(a, (1, 1), (2, 2))
expected = np.array([[5., 6.], [8., 9.]])
np.testing.assert_allclose(expected, actual)
def test_slice_raises_error():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(
np.array([[1., 2., 3.], [4., 5., 6.], [7., 8., 9.]]))
with pytest.raises(ValueError):
backend.slice(a, (1, 1), (2, 2, 2))
def test_shape_tensor():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(np.ones([2, 3, 4]))
assert isinstance(backend.shape_tensor(a), torch.Tensor)
actual = backend.shape_tensor(a)
expected = np.array([2, 3, 4])
np.testing.assert_allclose(expected, actual)
def test_shape_tuple():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(np.ones([2, 3, 4]))
actual = backend.shape_tuple(a)
assert actual == (2, 3, 4)
def test_shape_prod():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(2 * np.ones([1, 2, 3, 4]))
actual = np.array(backend.shape_prod(a))
assert actual == 2**24
def test_sqrt():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(np.array([4.0, 9.0]))
actual = backend.sqrt(a)
expected = np.array([2, 3])
np.testing.assert_allclose(expected, actual)
def test_convert_to_tensor():
backend = pytorch_backend.PyTorchBackend()
array = np.ones((2, 3, 4))
actual = backend.convert_to_tensor(array)
expected = torch.ones((2, 3, 4))
assert isinstance(actual, type(expected))
np.testing.assert_allclose(expected, actual)
def test_outer_product():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(2 * np.ones((2, 1)))
b = backend.convert_to_tensor(np.ones((1, 2, 2)))
actual = backend.outer_product(a, b)
expected = np.ones((2, 1, 1, 2, 2)) * 2
np.testing.assert_allclose(expected, actual)
def test_norm():
backend = pytorch_backend.PyTorchBackend()
a = backend.convert_to_tensor(np.ones((2, 2)))
assert backend.norm(a) == 2
@pytest.mark.parametrize("dtype", torch_eye_dtypes)
def test_eye(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.eye(N=4, M=5, dtype=dtype)
np.testing.assert_allclose(torch.eye(n=4, m=5, dtype=dtype), a)
@pytest.mark.parametrize("dtype", torch_dtypes)
def test_ones(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.ones((4, 4), dtype=dtype)
np.testing.assert_allclose(torch.ones((4, 4), dtype=dtype), a)
@pytest.mark.parametrize("dtype", torch_dtypes)
def test_zeros(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.zeros((4, 4), dtype=dtype)
np.testing.assert_allclose(torch.zeros((4, 4), dtype=dtype), a)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_randn(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.randn((4, 4), dtype=dtype)
assert a.shape == (4, 4)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_random_uniform(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.random_uniform((4, 4), dtype=dtype)
assert a.shape == (4, 4)
@pytest.mark.parametrize("dtype", torch_eye_dtypes)
def test_eye_dtype(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.eye(N=4, M=4, dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", torch_dtypes)
def test_ones_dtype(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.ones((4, 4), dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", torch_dtypes)
def test_zeros_dtype(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.zeros((4, 4), dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_randn_dtype(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.randn((4, 4), dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_random_uniform_dtype(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.random_uniform((4, 4), dtype=dtype)
assert a.dtype == dtype
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_randn_seed(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.randn((4, 4), seed=10, dtype=dtype)
b = backend.randn((4, 4), seed=10, dtype=dtype)
np.testing.assert_allclose(a, b)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_random_uniform_seed(dtype):
backend = pytorch_backend.PyTorchBackend()
a = backend.random_uniform((4, 4), seed=10, dtype=dtype)
b = backend.random_uniform((4, 4), seed=10, dtype=dtype)
torch.allclose(a, b)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_random_uniform_boundaries(dtype):
lb = 1.2
ub = 4.8
backend = pytorch_backend.PyTorchBackend()
a = backend.random_uniform((4, 4), seed=10, dtype=dtype)
b = backend.random_uniform((4, 4), (lb, ub), seed=10, dtype=dtype)
assert (torch.ge(a, 0).byte().all() and torch.le(a, 1).byte().all() and
torch.ge(b, lb).byte().all() and torch.le(b, ub).byte().all())
def test_random_uniform_behavior():
backend = pytorch_backend.PyTorchBackend()
a = backend.random_uniform((4, 4), seed=10)
torch.manual_seed(10)
b = torch.empty((4, 4), dtype=torch.float64).uniform_()
torch.allclose(a, b)
def test_conj():
backend = pytorch_backend.PyTorchBackend()
real = np.random.rand(2, 2, 2)
a = backend.convert_to_tensor(real)
actual = backend.conj(a)
expected = real
np.testing.assert_allclose(expected, actual)
def test_eigsh_lanczos_0():
#this test should just not crash
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
D = 4
init = backend.randn((2, 2, 2), dtype=dtype)
tmp = backend.randn((8, 8), dtype=dtype)
H = tmp + backend.transpose(backend.conj(tmp), (1, 0))
H = H.reshape([2, 2, 2, 2, 2, 2])
def mv(x, mat):
return torch.tensordot(mat, x, ([0, 3, 5], [2, 0, 1])).permute([2, 0, 1])
backend.eigsh_lanczos(mv, [H], init, num_krylov_vecs=D)
def test_eigsh_lanczos_1():
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
D = 24
init = backend.randn((D,), dtype=dtype)
tmp = backend.randn((D, D), dtype=dtype)
H = tmp + backend.transpose(backend.conj(tmp), (1, 0))
def mv(x, mat):
return mat.mv(x)
eta1, U1 = backend.eigsh_lanczos(mv, [H], init, num_krylov_vecs=D)
eta2, U2 = H.symeig(eigenvectors=True)
v2 = U2[:, 0]
v2 = v2 / sum(v2)
v1 = np.reshape(U1[0], (D))
v1 = v1 / sum(v1)
np.testing.assert_allclose(eta1[0], min(eta2))
np.testing.assert_allclose(v1, v2)
def test_eigsh_small_number_krylov_vectors():
backend = pytorch_backend.PyTorchBackend()
init = backend.convert_to_tensor(np.array([1, 1], dtype=np.float64))
H = backend.convert_to_tensor(np.array([[1, 2], [3, 4]], dtype=np.float64))
def mv(x, mat):
return mat.mv(x)
eta1, _ = backend.eigsh_lanczos(mv, [H], init, num_krylov_vecs=1)
np.testing.assert_allclose(eta1[0], 5)
@pytest.mark.parametrize("numeig", [1, 2, 3, 4])
def test_eigsh_lanczos_reorthogonalize(numeig):
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
D = 24
np.random.seed(10)
tmp = backend.randn((D, D), dtype=dtype, seed=10)
H = tmp + backend.transpose(backend.conj(tmp), (1, 0))
def mv(x, mat):
return mat.mv(x)
eta1, U1 = backend.eigsh_lanczos(
mv, [H],
shape=(D,),
dtype=dtype,
numeig=numeig,
num_krylov_vecs=D,
reorthogonalize=True,
ndiag=1,
tol=10**(-12),
delta=10**(-12))
eta2, U2 = np.linalg.eigh(H)
np.testing.assert_allclose(eta1[0:numeig], eta2[0:numeig])
for n in range(numeig):
v2 = U2[:, n]
v2 /= np.sum(v2) #fix phases
v1 = np.reshape(U1[n], (D))
v1 /= torch.sum(v1)
np.testing.assert_allclose(v1, v2, rtol=10**(-5), atol=10**(-5))
def test_eigsh_lanczos_2():
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
D = 16
tmp = backend.randn((D, D), dtype=dtype)
H = tmp + backend.transpose(backend.conj(tmp), (1, 0))
def mv(x, mat):
return mat.mv(x)
eta1, U1 = backend.eigsh_lanczos(
mv, [H],
shape=(D,),
dtype=dtype,
reorthogonalize=True,
ndiag=1,
tol=10**(-12),
delta=10**(-12))
eta2, U2 = H.symeig(eigenvectors=True)
v2 = U2[:, 0]
v2 = v2 / sum(v2)
v1 = np.reshape(U1[0], (D))
v1 = v1 / sum(v1)
np.testing.assert_allclose(eta1[0], min(eta2))
np.testing.assert_allclose(v1, v2, rtol=10**(-5), atol=10**(-5))
def test_eigsh_lanczos_raises():
backend = pytorch_backend.PyTorchBackend()
with pytest.raises(
ValueError, match='`num_krylov_vecs` >= `numeig` required!'):
backend.eigsh_lanczos(lambda x: x, numeig=10, num_krylov_vecs=9)
with pytest.raises(
ValueError,
match="Got numeig = 2 > 1 and `reorthogonalize = False`. "
"Use `reorthogonalize=True` for `numeig > 1`"):
backend.eigsh_lanczos(lambda x: x, numeig=2, reorthogonalize=False)
with pytest.raises(
ValueError,
match="if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided"):
backend.eigsh_lanczos(lambda x: x, shape=(10,), dtype=None)
with pytest.raises(
ValueError,
match="if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided"):
backend.eigsh_lanczos(lambda x: x, shape=None, dtype=torch.float64)
with pytest.raises(
ValueError,
match="if no `initial_state` is passed, then `shape` and"
"`dtype` have to be provided"):
backend.eigsh_lanczos(lambda x: x)
with pytest.raises(
TypeError, match="Expected a `torch.Tensor`. Got <class 'list'>"):
backend.eigsh_lanczos(lambda x: x, initial_state=[1, 2, 3])
@pytest.mark.parametrize("a, b, expected", [
pytest.param(1, 1, 2),
pytest.param(
np.ones((1, 2, 3)), np.ones((1, 2, 3)), 2. * np.ones((1, 2, 3))),
])
def test_addition(a, b, expected):
backend = pytorch_backend.PyTorchBackend()
tensor1 = backend.convert_to_tensor(a)
tensor2 = backend.convert_to_tensor(b)
result = backend.addition(tensor1, tensor2)
np.testing.assert_allclose(result, expected)
assert tensor1.dtype == tensor2.dtype == result.dtype
@pytest.mark.parametrize("a, b, expected", [
pytest.param(1, 1, 0),
pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.zeros((1, 2, 3))),
])
def test_subtraction(a, b, expected):
backend = pytorch_backend.PyTorchBackend()
tensor1 = backend.convert_to_tensor(a)
tensor2 = backend.convert_to_tensor(b)
result = backend.subtraction(tensor1, tensor2)
np.testing.assert_allclose(result, expected)
assert tensor1.dtype == tensor2.dtype == result.dtype
@pytest.mark.parametrize("a, b, expected", [
pytest.param(1, 1, 1),
pytest.param(np.ones((1, 2, 3)), np.ones((1, 2, 3)), np.ones((1, 2, 3))),
])
def test_multiply(a, b, expected):
backend = pytorch_backend.PyTorchBackend()
tensor1 = backend.convert_to_tensor(a)
tensor2 = backend.convert_to_tensor(b)
result = backend.multiply(tensor1, tensor2)
np.testing.assert_allclose(result, expected)
assert tensor1.dtype == tensor2.dtype == result.dtype
@pytest.mark.parametrize("a, b, expected", [
pytest.param(2., 2., 1.),
pytest.param(
np.ones(()), 2. * np.ones((1, 2, 3)), 0.5 * np.ones((1, 2, 3))),
])
def test_divide(a, b, expected):
backend = pytorch_backend.PyTorchBackend()
tensor1 = backend.convert_to_tensor(a)
tensor2 = backend.convert_to_tensor(b)
result = backend.divide(tensor1, tensor2)
np.testing.assert_allclose(result, expected)
assert tensor1.dtype == tensor2.dtype == result.dtype
def test_eigh():
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
H = backend.randn((4, 4), dtype)
H = H + np.conj(np.transpose(H))
eta, U = backend.eigh(H)
eta_ac, _ = np.linalg.eigh(H)
M = U.transpose(1, 0).mm(H).mm(U)
np.testing.assert_allclose(eta, eta_ac)
np.testing.assert_almost_equal(np.diag(eta), M)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_index_update(dtype):
backend = pytorch_backend.PyTorchBackend()
tensor = backend.randn((4, 2, 3), dtype=dtype, seed=10)
out = backend.index_update(tensor, tensor > 0.1, 0.0)
tensor[tensor > 0.1] = 0.0
np.testing.assert_allclose(out, tensor)
def test_matrix_inv():
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
matrix = backend.randn((4, 4), dtype=dtype, seed=10)
inverse = backend.inv(matrix)
m1 = matrix.mm(inverse)
m2 = inverse.mm(matrix)
np.testing.assert_almost_equal(m1, np.eye(4))
np.testing.assert_almost_equal(m2, np.eye(4))
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_matrix_inv_raises(dtype):
backend = pytorch_backend.PyTorchBackend()
matrix = backend.randn((4, 4, 4), dtype=dtype, seed=10)
with pytest.raises(ValueError):
backend.inv(matrix)
def test_eigs_not_implemented():
backend = pytorch_backend.PyTorchBackend()
with pytest.raises(NotImplementedError):
backend.eigs(np.ones((2, 2)))
def test_gmres_not_implemented():
backend = pytorch_backend.PyTorchBackend()
dummy = backend.zeros(2)
with pytest.raises(NotImplementedError):
backend.gmres(lambda x: x, dummy)
def test_broadcast_right_multiplication():
backend = pytorch_backend.PyTorchBackend()
tensor1 = backend.randn((2, 4, 3), dtype=torch.float64, seed=10)
tensor2 = backend.randn((3,), dtype=torch.float64, seed=10)
out = backend.broadcast_right_multiplication(tensor1, tensor2)
np.testing.assert_allclose(out, tensor1 * tensor2)
def test_broadcast_right_multiplication_raises():
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
tensor1 = backend.randn((2, 4, 3), dtype=dtype, seed=10)
tensor2 = backend.randn((3, 3), dtype=dtype, seed=10)
with pytest.raises(ValueError):
backend.broadcast_right_multiplication(tensor1, tensor2)
def test_broadcast_left_multiplication():
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
tensor1 = backend.randn((3,), dtype=dtype, seed=10)
tensor2 = backend.randn((3, 4, 2), dtype=dtype, seed=10)
out = backend.broadcast_left_multiplication(tensor1, tensor2)
np.testing.assert_allclose(out, np.reshape(tensor1, (3, 1, 1)) * tensor2)
def test_broadcast_left_multiplication_raises():
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
tensor1 = backend.randn((3, 3), dtype=dtype, seed=10)
tensor2 = backend.randn((2, 4, 3), dtype=dtype, seed=10)
with pytest.raises(ValueError):
backend.broadcast_left_multiplication(tensor1, tensor2)
def test_sparse_shape():
dtype = torch.float64
backend = pytorch_backend.PyTorchBackend()
tensor = backend.randn((2, 3, 4), dtype=dtype, seed=10)
np.testing.assert_allclose(backend.sparse_shape(tensor), tensor.shape)
def test_sum():
np.random.seed(10)
backend = pytorch_backend.PyTorchBackend()
tensor = np.random.rand(2, 3, 4)
a = backend.convert_to_tensor(tensor)
actual = backend.sum(a, axis=(1, 2))
expected = np.sum(tensor, axis=(1, 2))
np.testing.assert_allclose(expected, actual)
def test_matmul():
np.random.seed(10)
backend = pytorch_backend.PyTorchBackend()
t1 = np.random.rand(10, 2, 3)
t2 = np.random.rand(10, 3, 4)
a = backend.convert_to_tensor(t1)
b = backend.convert_to_tensor(t2)
actual = backend.matmul(a, b)
expected = np.matmul(t1, t2)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
@pytest.mark.parametrize("offset", range(-2, 2))
@pytest.mark.parametrize("axis1", [-2, 0])
@pytest.mark.parametrize("axis2", [-1, 0])
def test_diagonal(dtype, offset, axis1, axis2):
shape = (5, 5, 5, 5)
backend = pytorch_backend.PyTorchBackend()
array = backend.randn(shape, dtype=dtype, seed=10)
if axis1 == axis2:
with pytest.raises(ValueError):
actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
else:
actual = backend.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
expected = np.diagonal(array, offset=offset, axis1=axis1, axis2=axis2)
np.testing.assert_allclose(actual, expected)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
@pytest.mark.parametrize("k", range(-2, 2))
def test_diagflat(dtype, k):
backend = pytorch_backend.PyTorchBackend()
array = backend.randn((16,), dtype=dtype, seed=10)
actual = backend.diagflat(array, k=k)
expected = torch.diag_embed(array, offset=k)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_abs(dtype):
shape = (4, 3, 2)
backend = pytorch_backend.PyTorchBackend()
tensor = backend.randn(shape, dtype=dtype, seed=10)
actual = backend.abs(tensor)
expected = torch.abs(tensor)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_sign(dtype):
shape = (4, 3, 2)
backend = pytorch_backend.PyTorchBackend()
tensor = backend.randn(shape, dtype=dtype, seed=10)
actual = backend.sign(tensor)
expected = torch.sign(tensor)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
@pytest.mark.parametrize("offset", [0, 1])
@pytest.mark.parametrize("axis1", range(0, 3))
@pytest.mark.parametrize("axis2", range(0, 3))
def test_trace(dtype, offset, axis1, axis2):
shape = (5, 5, 5, 5)
backend = pytorch_backend.PyTorchBackend()
array = backend.randn(shape, dtype=dtype, seed=10)
if offset != 0:
with pytest.raises(NotImplementedError):
actual = backend.trace(array, offset=offset, axis1=axis1, axis2=axis2)
elif axis1 == axis2:
with pytest.raises(ValueError):
actual = backend.trace(array, offset=offset, axis1=axis1, axis2=axis2)
else:
actual = backend.trace(array, offset=offset, axis1=axis1, axis2=axis2)
expected = np.trace(array, axis1=axis1, axis2=axis2)
np.testing.assert_allclose(actual, expected, atol=1e-6, rtol=1e-6)
def test_trace_raises():
shape = tuple([1] * 30)
backend = pytorch_backend.PyTorchBackend()
array = backend.randn(shape, seed=10)
with pytest.raises(ValueError):
_ = backend.trace(array)
@pytest.mark.parametrize("pivot_axis", [-1, 1, 2])
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_pivot(dtype, pivot_axis):
shape = (4, 3, 2, 8)
pivot_shape = (np.prod(shape[:pivot_axis]), np.prod(shape[pivot_axis:]))
backend = pytorch_backend.PyTorchBackend()
tensor = backend.randn(shape, dtype=dtype, seed=10)
expected = torch.reshape(tensor, pivot_shape)
actual = backend.pivot(tensor, pivot_axis=pivot_axis)
np.testing.assert_allclose(expected, actual)
def test_matmul_rank2():
np.random.seed(10)
backend = pytorch_backend.PyTorchBackend()
t1 = np.random.rand(10, 4)
t2 = np.random.rand(4, 10)
a = backend.convert_to_tensor(t1)
b = backend.convert_to_tensor(t2)
actual = backend.matmul(a, b)
expected = np.matmul(t1, t2)
np.testing.assert_allclose(expected, actual)
@pytest.mark.parametrize("dtype", torch_randn_dtypes)
def test_item(dtype):
backend = pytorch_backend.PyTorchBackend()
tensor = backend.randn((1,), dtype=dtype, seed=10)
assert backend.item(tensor) == tensor.item()
|
import argparse
import sys
from paasta_tools.marathon_tools import DEFAULT_SOA_DIR
from paasta_tools.marathon_tools import get_marathon_apps_with_clients
from paasta_tools.marathon_tools import get_marathon_clients
from paasta_tools.marathon_tools import get_marathon_servers
from paasta_tools.marathon_tools import get_num_at_risk_tasks
from paasta_tools.marathon_tools import load_marathon_service_config
from paasta_tools.mesos_maintenance import get_draining_hosts
from paasta_tools.utils import _log
from paasta_tools.utils import compose_job_id
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import long_job_id_to_short_job_id
from paasta_tools.utils import use_requests_cache
def parse_args():
parser = argparse.ArgumentParser(
description="Lists marathon instances for a service."
)
parser.add_argument(
"-c",
"--cluster",
dest="cluster",
metavar="CLUSTER",
default=None,
help="define a specific cluster to read from",
)
parser.add_argument(
"-d",
"--soa-dir",
dest="soa_dir",
metavar="SOA_DIR",
default=DEFAULT_SOA_DIR,
help="define a different soa config directory",
)
parser.add_argument(
"-m",
"--minimal",
dest="minimal",
action="store_true",
help="show only service instances that need bouncing",
)
args = parser.parse_args()
return args
def get_desired_marathon_configs(soa_dir):
cluster = load_system_paasta_config().get_cluster()
instances = get_services_for_cluster(
instance_type="marathon", cluster=cluster, soa_dir=soa_dir
)
job_configs = dict()
formatted_marathon_configs = dict()
for service, instance in instances:
try:
job_config = load_marathon_service_config(
service=service, instance=instance, cluster=cluster, soa_dir=soa_dir
)
formatted_config = job_config.format_marathon_app_dict()
formatted_marathon_configs[
formatted_config["id"].lstrip("/")
] = formatted_config
job_configs[formatted_config["id"].lstrip("/")] = job_config
# Not ideal but we rely on a lot of user input to create the app dict
# and we really can't afford to bail if just one app definition is malformed
except Exception as errormsg:
_log(
service=service,
line=str(errormsg),
component="deploy",
level="debug",
cluster=cluster,
instance=instance,
)
return formatted_marathon_configs, job_configs
@use_requests_cache("list_marathon_services")
def get_service_instances_that_need_bouncing(marathon_clients, soa_dir):
(
desired_marathon_configs_formatted,
desired_job_configs,
) = get_desired_marathon_configs(soa_dir)
desired_ids_and_clients = set()
for app_id, job_config in desired_job_configs.items():
desired_ids_and_clients.add(
(app_id, marathon_clients.get_current_client_for_service(job_config))
)
current_apps_with_clients = {
(app.id.lstrip("/"), client): app
for app, client in get_marathon_apps_with_clients(
marathon_clients.get_all_clients()
)
}
actual_ids_and_clients = set(current_apps_with_clients.keys())
undesired_apps_and_clients = actual_ids_and_clients.symmetric_difference(
desired_ids_and_clients
)
apps_that_need_bouncing = {
long_job_id_to_short_job_id(app_id)
for app_id, client in undesired_apps_and_clients
}
draining_hosts = get_draining_hosts()
for (app_id, client), app in current_apps_with_clients.items():
short_app_id = long_job_id_to_short_job_id(app_id)
if short_app_id not in apps_that_need_bouncing:
if (
app.instances != desired_marathon_configs_formatted[app_id]["instances"]
or get_num_at_risk_tasks(app, draining_hosts) != 0
):
apps_that_need_bouncing.add(short_app_id)
return (app_id.replace("--", "_") for app_id in apps_that_need_bouncing)
def main():
args = parse_args()
soa_dir = args.soa_dir
cluster = args.cluster
if args.minimal:
system_paasta_config = load_system_paasta_config()
marathon_servers = get_marathon_servers(system_paasta_config)
marathon_clients = get_marathon_clients(marathon_servers)
service_instances = get_service_instances_that_need_bouncing(
marathon_clients=marathon_clients, soa_dir=soa_dir
)
else:
instances = get_services_for_cluster(
cluster=cluster, instance_type="marathon", soa_dir=soa_dir
)
service_instances = []
for name, instance in instances:
service_instances.append(compose_job_id(name, instance))
print("\n".join(service_instances))
sys.exit(0)
if __name__ == "__main__":
main()
|
import json
import logging
import voluptuous as vol
from homeassistant.components import mqtt
from homeassistant.components.mqtt import (
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
subscription,
)
from homeassistant.components.vacuum import (
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATUS,
SUPPORT_STOP,
StateVacuumEntity,
)
from homeassistant.const import (
ATTR_SUPPORTED_FEATURES,
CONF_DEVICE,
CONF_NAME,
CONF_UNIQUE_ID,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from ..debug_info import log_messages
from .schema import MQTT_VACUUM_SCHEMA, services_to_strings, strings_to_services
_LOGGER = logging.getLogger(__name__)
SERVICE_TO_STRING = {
SUPPORT_START: "start",
SUPPORT_PAUSE: "pause",
SUPPORT_STOP: "stop",
SUPPORT_RETURN_HOME: "return_home",
SUPPORT_FAN_SPEED: "fan_speed",
SUPPORT_BATTERY: "battery",
SUPPORT_STATUS: "status",
SUPPORT_SEND_COMMAND: "send_command",
SUPPORT_LOCATE: "locate",
SUPPORT_CLEAN_SPOT: "clean_spot",
}
STRING_TO_SERVICE = {v: k for k, v in SERVICE_TO_STRING.items()}
DEFAULT_SERVICES = (
SUPPORT_START
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_STATUS
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
)
ALL_SERVICES = (
DEFAULT_SERVICES
| SUPPORT_PAUSE
| SUPPORT_LOCATE
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
)
BATTERY = "battery_level"
FAN_SPEED = "fan_speed"
STATE = "state"
POSSIBLE_STATES = {
STATE_IDLE: STATE_IDLE,
STATE_DOCKED: STATE_DOCKED,
STATE_ERROR: STATE_ERROR,
STATE_PAUSED: STATE_PAUSED,
STATE_RETURNING: STATE_RETURNING,
STATE_CLEANING: STATE_CLEANING,
}
CONF_SUPPORTED_FEATURES = ATTR_SUPPORTED_FEATURES
CONF_PAYLOAD_TURN_ON = "payload_turn_on"
CONF_PAYLOAD_TURN_OFF = "payload_turn_off"
CONF_PAYLOAD_RETURN_TO_BASE = "payload_return_to_base"
CONF_PAYLOAD_STOP = "payload_stop"
CONF_PAYLOAD_CLEAN_SPOT = "payload_clean_spot"
CONF_PAYLOAD_LOCATE = "payload_locate"
CONF_PAYLOAD_START = "payload_start"
CONF_PAYLOAD_PAUSE = "payload_pause"
CONF_SET_FAN_SPEED_TOPIC = "set_fan_speed_topic"
CONF_FAN_SPEED_LIST = "fan_speed_list"
CONF_SEND_COMMAND_TOPIC = "send_command_topic"
DEFAULT_NAME = "MQTT State Vacuum"
DEFAULT_RETAIN = False
DEFAULT_SERVICE_STRINGS = services_to_strings(DEFAULT_SERVICES, SERVICE_TO_STRING)
DEFAULT_PAYLOAD_RETURN_TO_BASE = "return_to_base"
DEFAULT_PAYLOAD_STOP = "stop"
DEFAULT_PAYLOAD_CLEAN_SPOT = "clean_spot"
DEFAULT_PAYLOAD_LOCATE = "locate"
DEFAULT_PAYLOAD_START = "start"
DEFAULT_PAYLOAD_PAUSE = "pause"
PLATFORM_SCHEMA_STATE = (
mqtt.MQTT_BASE_PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICE): mqtt.MQTT_ENTITY_DEVICE_INFO_SCHEMA,
vol.Optional(CONF_FAN_SPEED_LIST, default=[]): vol.All(
cv.ensure_list, [cv.string]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(
CONF_PAYLOAD_CLEAN_SPOT, default=DEFAULT_PAYLOAD_CLEAN_SPOT
): cv.string,
vol.Optional(
CONF_PAYLOAD_LOCATE, default=DEFAULT_PAYLOAD_LOCATE
): cv.string,
vol.Optional(
CONF_PAYLOAD_RETURN_TO_BASE, default=DEFAULT_PAYLOAD_RETURN_TO_BASE
): cv.string,
vol.Optional(CONF_PAYLOAD_START, default=DEFAULT_PAYLOAD_START): cv.string,
vol.Optional(CONF_PAYLOAD_PAUSE, default=DEFAULT_PAYLOAD_PAUSE): cv.string,
vol.Optional(CONF_PAYLOAD_STOP, default=DEFAULT_PAYLOAD_STOP): cv.string,
vol.Optional(CONF_SEND_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_SET_FAN_SPEED_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_STATE_TOPIC): mqtt.valid_publish_topic,
vol.Optional(
CONF_SUPPORTED_FEATURES, default=DEFAULT_SERVICE_STRINGS
): vol.All(cv.ensure_list, [vol.In(STRING_TO_SERVICE.keys())]),
vol.Optional(CONF_UNIQUE_ID): cv.string,
vol.Optional(CONF_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_RETAIN, default=DEFAULT_RETAIN): cv.boolean,
}
)
.extend(mqtt.MQTT_AVAILABILITY_SCHEMA.schema)
.extend(mqtt.MQTT_JSON_ATTRS_SCHEMA.schema)
.extend(MQTT_VACUUM_SCHEMA.schema)
)
async def async_setup_entity_state(
config, async_add_entities, config_entry, discovery_data
):
"""Set up a State MQTT Vacuum."""
async_add_entities([MqttStateVacuum(config, config_entry, discovery_data)])
class MqttStateVacuum(
MqttAttributes,
MqttAvailability,
MqttDiscoveryUpdate,
MqttEntityDeviceInfo,
StateVacuumEntity,
):
"""Representation of a MQTT-controlled state vacuum."""
def __init__(self, config, config_entry, discovery_info):
"""Initialize the vacuum."""
self._state = None
self._state_attrs = {}
self._fan_speed_list = []
self._sub_state = None
self._unique_id = config.get(CONF_UNIQUE_ID)
# Load config
self._setup_from_config(config)
device_config = config.get(CONF_DEVICE)
MqttAttributes.__init__(self, config)
MqttAvailability.__init__(self, config)
MqttDiscoveryUpdate.__init__(self, discovery_info, self.discovery_update)
MqttEntityDeviceInfo.__init__(self, device_config, config_entry)
def _setup_from_config(self, config):
self._config = config
self._name = config[CONF_NAME]
supported_feature_strings = config[CONF_SUPPORTED_FEATURES]
self._supported_features = strings_to_services(
supported_feature_strings, STRING_TO_SERVICE
)
self._fan_speed_list = config[CONF_FAN_SPEED_LIST]
self._command_topic = config.get(mqtt.CONF_COMMAND_TOPIC)
self._set_fan_speed_topic = config.get(CONF_SET_FAN_SPEED_TOPIC)
self._send_command_topic = config.get(CONF_SEND_COMMAND_TOPIC)
self._payloads = {
key: config.get(key)
for key in (
CONF_PAYLOAD_START,
CONF_PAYLOAD_PAUSE,
CONF_PAYLOAD_STOP,
CONF_PAYLOAD_RETURN_TO_BASE,
CONF_PAYLOAD_CLEAN_SPOT,
CONF_PAYLOAD_LOCATE,
)
}
async def discovery_update(self, discovery_payload):
"""Handle updated discovery message."""
config = PLATFORM_SCHEMA_STATE(discovery_payload)
self._setup_from_config(config)
await self.attributes_discovery_update(config)
await self.availability_discovery_update(config)
await self.device_info_discovery_update(config)
await self._subscribe_topics()
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Subscribe MQTT events."""
await super().async_added_to_hass()
await self._subscribe_topics()
async def async_will_remove_from_hass(self):
"""Unsubscribe when removed."""
self._sub_state = await subscription.async_unsubscribe_topics(
self.hass, self._sub_state
)
await MqttAttributes.async_will_remove_from_hass(self)
await MqttAvailability.async_will_remove_from_hass(self)
await MqttDiscoveryUpdate.async_will_remove_from_hass(self)
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_message_received(msg):
"""Handle state MQTT message."""
payload = json.loads(msg.payload)
if STATE in payload and payload[STATE] in POSSIBLE_STATES:
self._state = POSSIBLE_STATES[payload[STATE]]
del payload[STATE]
self._state_attrs.update(payload)
self.async_write_ha_state()
if self._config.get(CONF_STATE_TOPIC):
topics["state_position_topic"] = {
"topic": self._config.get(CONF_STATE_TOPIC),
"msg_callback": state_message_received,
"qos": self._config[CONF_QOS],
}
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
@property
def name(self):
"""Return the name of the vacuum."""
return self._name
@property
def state(self):
"""Return state of vacuum."""
return self._state
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def fan_speed(self):
"""Return fan speed of the vacuum."""
return self._state_attrs.get(FAN_SPEED, 0)
@property
def fan_speed_list(self):
"""Return fan speed list of the vacuum."""
return self._fan_speed_list
@property
def battery_level(self):
"""Return battery level of the vacuum."""
return max(0, min(100, self._state_attrs.get(BATTERY, 0)))
@property
def supported_features(self):
"""Flag supported features."""
return self._supported_features
async def async_start(self):
"""Start the vacuum."""
if self.supported_features & SUPPORT_START == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_START],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_pause(self):
"""Pause the vacuum."""
if self.supported_features & SUPPORT_PAUSE == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_PAUSE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_stop(self, **kwargs):
"""Stop the vacuum."""
if self.supported_features & SUPPORT_STOP == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_STOP],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if (self.supported_features & SUPPORT_FAN_SPEED == 0) or (
fan_speed not in self._fan_speed_list
):
return None
mqtt.async_publish(
self.hass,
self._set_fan_speed_topic,
fan_speed,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_return_to_base(self, **kwargs):
"""Tell the vacuum to return to its dock."""
if self.supported_features & SUPPORT_RETURN_HOME == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_RETURN_TO_BASE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
if self.supported_features & SUPPORT_CLEAN_SPOT == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_CLEAN_SPOT],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_locate(self, **kwargs):
"""Locate the vacuum (usually by playing a song)."""
if self.supported_features & SUPPORT_LOCATE == 0:
return None
mqtt.async_publish(
self.hass,
self._command_topic,
self._config[CONF_PAYLOAD_LOCATE],
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
async def async_send_command(self, command, params=None, **kwargs):
"""Send a command to a vacuum cleaner."""
if self.supported_features & SUPPORT_SEND_COMMAND == 0:
return None
if params:
message = {"command": command}
message.update(params)
message = json.dumps(message)
else:
message = command
mqtt.async_publish(
self.hass,
self._send_command_topic,
message,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
|
import pytest
import matchzoo as mz
@pytest.fixture(scope='module')
def tuner():
model = mz.models.DenseBaseline()
prpr = model.get_default_preprocessor()
train_raw = mz.datasets.toy.load_data('train')
dev_raw = mz.datasets.toy.load_data('dev')
prpr.fit(train_raw)
model.params.update(prpr.context)
model.guess_and_fill_missing_params()
return mz.auto.Tuner(
params=model.params,
train_data=prpr.transform(train_raw, verbose=0),
test_data=prpr.transform(dev_raw, verbose=0)
)
@pytest.mark.parametrize('attr', [
'params',
'train_data',
'test_data',
'fit_kwargs',
'evaluate_kwargs',
'metric',
'mode',
'num_runs',
'callbacks',
'verbose'
])
def test_getters_setters(tuner, attr):
val = getattr(tuner, attr)
setattr(tuner, attr, val)
assert getattr(tuner, attr) is val
def test_tuning(tuner):
tuner.num_runs = 1
assert tuner.tune()
|
import logging
from homeassistant.components.vacuum import SUPPORT_FAN_SPEED
from .irobot_base import SUPPORT_IROBOT, IRobotVacuum
_LOGGER = logging.getLogger(__name__)
ATTR_DETECTED_PAD = "detected_pad"
ATTR_LID_CLOSED = "lid_closed"
ATTR_TANK_PRESENT = "tank_present"
ATTR_TANK_LEVEL = "tank_level"
ATTR_PAD_WETNESS = "spray_amount"
OVERLAP_STANDARD = 67
OVERLAP_DEEP = 85
OVERLAP_EXTENDED = 25
MOP_STANDARD = "Standard"
MOP_DEEP = "Deep"
MOP_EXTENDED = "Extended"
BRAAVA_MOP_BEHAVIORS = [MOP_STANDARD, MOP_DEEP, MOP_EXTENDED]
BRAAVA_SPRAY_AMOUNT = [1, 2, 3]
# Braava Jets can set mopping behavior through fanspeed
SUPPORT_BRAAVA = SUPPORT_IROBOT | SUPPORT_FAN_SPEED
class BraavaJet(IRobotVacuum):
"""Braava Jet."""
def __init__(self, roomba, blid):
"""Initialize the Roomba handler."""
super().__init__(roomba, blid)
# Initialize fan speed list
speed_list = []
for behavior in BRAAVA_MOP_BEHAVIORS:
for spray in BRAAVA_SPRAY_AMOUNT:
speed_list.append(f"{behavior}-{spray}")
self._speed_list = speed_list
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_BRAAVA
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
# Mopping behavior and spray amount as fan speed
rank_overlap = self.vacuum_state.get("rankOverlap", {})
behavior = None
if rank_overlap == OVERLAP_STANDARD:
behavior = MOP_STANDARD
elif rank_overlap == OVERLAP_DEEP:
behavior = MOP_DEEP
elif rank_overlap == OVERLAP_EXTENDED:
behavior = MOP_EXTENDED
pad_wetness = self.vacuum_state.get("padWetness", {})
# "disposable" and "reusable" values are always the same
pad_wetness_value = pad_wetness.get("disposable")
return f"{behavior}-{pad_wetness_value}"
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return self._speed_list
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
try:
split = fan_speed.split("-", 1)
behavior = split[0]
spray = int(split[1])
if behavior.capitalize() in BRAAVA_MOP_BEHAVIORS:
behavior = behavior.capitalize()
except IndexError:
_LOGGER.error(
"Fan speed error: expected {behavior}-{spray_amount}, got '%s'",
fan_speed,
)
return
except ValueError:
_LOGGER.error("Spray amount error: expected integer, got '%s'", split[1])
return
if behavior not in BRAAVA_MOP_BEHAVIORS:
_LOGGER.error(
"Mop behavior error: expected one of %s, got '%s'",
str(BRAAVA_MOP_BEHAVIORS),
behavior,
)
return
if spray not in BRAAVA_SPRAY_AMOUNT:
_LOGGER.error(
"Spray amount error: expected one of %s, got '%d'",
str(BRAAVA_SPRAY_AMOUNT),
spray,
)
return
overlap = 0
if behavior == MOP_STANDARD:
overlap = OVERLAP_STANDARD
elif behavior == MOP_DEEP:
overlap = OVERLAP_DEEP
else:
overlap = OVERLAP_EXTENDED
await self.hass.async_add_executor_job(
self.vacuum.set_preference, "rankOverlap", overlap
)
await self.hass.async_add_executor_job(
self.vacuum.set_preference,
"padWetness",
{"disposable": spray, "reusable": spray},
)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
state_attrs = super().device_state_attributes
# Get Braava state
state = self.vacuum_state
detected_pad = state.get("detectedPad")
mop_ready = state.get("mopReady", {})
lid_closed = mop_ready.get("lidClosed")
tank_present = mop_ready.get("tankPresent")
tank_level = state.get("tankLvl")
state_attrs[ATTR_DETECTED_PAD] = detected_pad
state_attrs[ATTR_LID_CLOSED] = lid_closed
state_attrs[ATTR_TANK_PRESENT] = tank_present
state_attrs[ATTR_TANK_LEVEL] = tank_level
return state_attrs
|
from homeassistant.components.light import LightEntity
from homeassistant.const import STATE_OFF, STATE_ON
from tests.common import MockToggleEntity
ENTITIES = []
def init(empty=False):
"""Initialize the platform with entities."""
global ENTITIES
ENTITIES = (
[]
if empty
else [
MockLight("Ceiling", STATE_ON),
MockLight("Ceiling", STATE_OFF),
MockLight(None, STATE_OFF),
]
)
async def async_setup_platform(
hass, config, async_add_entities_callback, discovery_info=None
):
"""Return mock entities."""
async_add_entities_callback(ENTITIES)
class MockLight(MockToggleEntity, LightEntity):
"""Mock light class."""
brightness = None
supported_features = 0
|
from homeassistant import data_entry_flow
from homeassistant.components import ifttt
from homeassistant.core import callback
from tests.async_mock import patch
async def test_config_flow_registers_webhook(hass, aiohttp_client):
"""Test setting up IFTTT and sending webhook."""
with patch("homeassistant.util.get_local_ip", return_value="example.com"):
result = await hass.config_entries.flow.async_init(
"ifttt", context={"source": "user"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM, result
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
webhook_id = result["result"].data["webhook_id"]
ifttt_events = []
@callback
def handle_event(event):
"""Handle IFTTT event."""
ifttt_events.append(event)
hass.bus.async_listen(ifttt.EVENT_RECEIVED, handle_event)
client = await aiohttp_client(hass.http.app)
await client.post(f"/api/webhook/{webhook_id}", json={"hello": "ifttt"})
assert len(ifttt_events) == 1
assert ifttt_events[0].data["webhook_id"] == webhook_id
assert ifttt_events[0].data["hello"] == "ifttt"
# Invalid JSON
await client.post(f"/api/webhook/{webhook_id}", data="not a dict")
assert len(ifttt_events) == 1
# Not a dict
await client.post(f"/api/webhook/{webhook_id}", json="not a dict")
assert len(ifttt_events) == 1
|
import asyncio
import logging
import aiohttp
import tibber
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_NAME, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import discovery
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util
from .const import DATA_HASS_CONFIG, DOMAIN
PLATFORMS = [
"sensor",
]
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_ACCESS_TOKEN): cv.string})},
extra=vol.ALLOW_EXTRA,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up the Tibber component."""
hass.data[DATA_HASS_CONFIG] = config
if DOMAIN not in config:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data=config[DOMAIN],
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
tibber_connection = tibber.Tibber(
access_token=entry.data[CONF_ACCESS_TOKEN],
websession=async_get_clientsession(hass),
time_zone=dt_util.DEFAULT_TIME_ZONE,
)
hass.data[DOMAIN] = tibber_connection
async def _close(event):
await tibber_connection.rt_disconnect()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _close)
try:
await tibber_connection.update_info()
except asyncio.TimeoutError as err:
raise ConfigEntryNotReady from err
except aiohttp.ClientError as err:
_LOGGER.error("Error connecting to Tibber: %s ", err)
return False
except tibber.InvalidLogin as exp:
_LOGGER.error("Failed to login. %s", exp)
return False
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
# set up notify platform, no entry support for notify component yet,
# have to use discovery to load platform.
hass.async_create_task(
discovery.async_load_platform(
hass, "notify", DOMAIN, {CONF_NAME: DOMAIN}, hass.data[DATA_HASS_CONFIG]
)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
tibber_connection = hass.data.get(DOMAIN)
await tibber_connection.rt_disconnect()
return unload_ok
|
import argparse
import glob
import os
import time
import random
COLOURS = (b'\xFF\x00\x00', b'\x00\xFF\x00', b'\x00\x00\xFF',
b'\xFF\xFF\x00', b'\xFF\x00\xFF', b'\x00\xFF\xFF')
def write_binary(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'wb') as open_file:
open_file.write(payload)
def read_string(driver_path, device_file):
with open(os.path.join(driver_path, device_file), 'r') as open_file:
return open_file.read().rstrip('\n')
def write_string(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'w') as open_file:
open_file.write(payload)
def find_devices(vid, pid):
driver_paths = glob.glob(os.path.join(
'/sys/bus/hid/drivers/razeraccessory', '*:{0:04X}:{1:04X}.*'.format(vid, pid)))
for driver_path in driver_paths:
device_type_path = os.path.join(driver_path, 'device_type')
if os.path.exists(device_type_path):
yield driver_path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--skip-standard', action='store_true')
parser.add_argument('--skip-custom', action='store_true')
parser.add_argument('--skip-game-led', action='store_true')
parser.add_argument('--skip-macro-led', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
found_chroma = False
for index, driver_path in enumerate(find_devices(0x1532, 0x0C02), start=1):
found_chroma = True
print("Razer Goliathus {0}\n".format(index))
print("Driver version: {0}".format(
read_string(driver_path, 'version')))
print("Driver firmware version: {0}".format(
read_string(driver_path, 'firmware_version')))
print("Device serial: {0}".format(
read_string(driver_path, 'device_serial')))
print("Device type: {0}".format(
read_string(driver_path, 'device_type')))
print("Device mode: {0}".format(
read_string(driver_path, 'device_mode')))
# Set to static red so that we have something standard
write_binary(driver_path, 'matrix_effect_static', b'\xFF\x00\x00')
if not args.skip_standard:
print("Starting brightness test. Press enter to begin.")
input()
print("Max brightness...", end='')
write_string(driver_path, 'matrix_brightness', '255')
time.sleep(1)
print("brightness ({0})".format(
read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Half brightness...", end='')
write_string(driver_path, 'matrix_brightness', '128')
time.sleep(1)
print("brightness ({0})".format(
read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Zero brightness...", end='')
write_string(driver_path, 'matrix_brightness', '0')
time.sleep(1)
print("brightness ({0})".format(
read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
write_string(driver_path, 'matrix_brightness', '255')
print("Starting reactive tests. Press enter to begin.")
input()
print("Reactive blue")
write_binary(driver_path, 'matrix_effect_reactive', b'\x01\x00\x00\xFF')
time.sleep(2)
print("Trigger reactive")
write_string(driver_path, 'matrix_reactive_trigger', '1')
time.sleep(2)
print("Trigger reactive")
write_string(driver_path, 'matrix_reactive_trigger', '1')
time.sleep(2)
print("Trigger reactive")
write_string(driver_path, 'matrix_reactive_trigger', '1')
time.sleep(2)
print("Trigger reactive")
write_string(driver_path, 'matrix_reactive_trigger', '1')
time.sleep(2)
print("Trigger reactive")
write_string(driver_path, 'matrix_reactive_trigger', '1')
print("Starting other colour effect tests. Press enter to begin.")
input()
print("Green Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\x00')
time.sleep(5)
print("Cyan Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\xFF')
time.sleep(5)
print("Spectrum")
write_binary(driver_path, 'matrix_effect_spectrum', b'\x00')
time.sleep(10)
print("None")
write_binary(driver_path, 'matrix_effect_none', b'\x00')
time.sleep(5)
print("Breathing random")
write_binary(driver_path, 'matrix_effect_breath', b'\x00')
time.sleep(10)
print("Breathing red")
write_binary(driver_path, 'matrix_effect_breath', b'\xFF\x00\x00')
time.sleep(10)
print("Breathing blue-green")
write_binary(driver_path, 'matrix_effect_breath',
b'\x00\xFF\x00\x00\x00\xFF')
time.sleep(10)
if not args.skip_custom:
# row, start_col, end_col
payload_all = b'\x00\x00\x00'
# add 1 column (end_col + 1 - start_col == 0) of LEDs (1 LED)
payload_all += random.choice(COLOURS)
payload_white = b'\x00\x00\x00'
payload_white += b'\xFF\xFF\xFF'
print("Custom LED matrix colours test")
print("Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_all)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
print("Custom LED matrix partial colours test")
print("Set LED to white. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_white)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
time.sleep(0.5)
print("Finished")
if not found_chroma:
print("No Goliathus found")
|
import time
import pathlib
import os
import logging
from PyQt5.QtCore import QProcess
import pytest
from qutebrowser.misc import editor as editormod
from qutebrowser.utils import usertypes
@pytest.fixture(autouse=True)
def patch_things(config_stub, monkeypatch, stubs):
monkeypatch.setattr(editormod.guiprocess, 'QProcess',
stubs.fake_qprocess())
@pytest.fixture(params=[True, False])
def editor(caplog, qtbot, request):
ed = editormod.ExternalEditor(watch=request.param)
yield ed
with caplog.at_level(logging.ERROR):
ed._remove_file = True
ed._cleanup()
class TestArg:
"""Test argument handling.
Attributes:
editor: The ExternalEditor instance to test.
"""
def test_placeholder(self, config_stub, editor):
"""Test starting editor with placeholder argument."""
config_stub.val.editor.command = ['bin', 'foo', '{}', 'bar']
editor.edit("")
editor._proc._proc.start.assert_called_with(
"bin", ["foo", editor._filename, "bar"])
def test_placeholder_inline(self, config_stub, editor):
"""Test starting editor with placeholder arg inside of another arg."""
config_stub.val.editor.command = ['bin', 'foo{}', 'bar']
editor.edit("")
editor._proc._proc.start.assert_called_with(
"bin", ["foo" + editor._filename, "bar"])
class TestFileHandling:
"""Test creation/deletion of tempfile."""
def test_ok(self, editor):
"""Test file handling when closing with an exit status == 0."""
editor.edit("")
filename = pathlib.Path(editor._filename)
assert filename.exists()
assert filename.name.startswith('qutebrowser-editor-')
editor._proc.finished.emit(0, QProcess.NormalExit)
assert not filename.exists()
@pytest.mark.parametrize('touch', [True, False])
def test_with_filename(self, editor, tmp_path, touch):
"""Test editing a file with an explicit path."""
path = tmp_path / 'foo.txt'
if touch:
path.touch()
editor.edit_file(str(path))
editor._proc.finished.emit(0, QProcess.NormalExit)
assert path.exists()
def test_error(self, editor):
"""Test file handling when closing with an exit status != 0."""
editor.edit("")
filename = pathlib.Path(editor._filename)
assert filename.exists()
editor._proc._proc.exitStatus = lambda: QProcess.CrashExit
editor._proc.finished.emit(1, QProcess.NormalExit)
assert filename.exists()
filename.unlink()
def test_crash(self, editor):
"""Test file handling when closing with a crash."""
editor.edit("")
filename = pathlib.Path(editor._filename)
assert filename.exists()
editor._proc._proc.exitStatus = lambda: QProcess.CrashExit
editor._proc.error.emit(QProcess.Crashed)
editor._proc.finished.emit(0, QProcess.CrashExit)
assert filename.exists()
filename.unlink()
def test_unreadable(self, message_mock, editor, caplog, qtbot):
"""Test file handling when closing with an unreadable file."""
editor.edit("")
filename = pathlib.Path(editor._filename)
assert filename.exists()
filename.chmod(0o277)
if os.access(str(filename), os.R_OK):
# Docker container or similar
pytest.skip("File was still readable")
with caplog.at_level(logging.ERROR):
editor._proc.finished.emit(0, QProcess.NormalExit)
assert not filename.exists()
msg = message_mock.getmsg(usertypes.MessageLevel.error)
assert msg.text.startswith("Failed to read back edited file: ")
def test_unwritable(self, monkeypatch, message_mock, editor,
unwritable_tmp_path, caplog):
"""Test file handling when the initial file is not writable."""
monkeypatch.setattr(editormod.tempfile, 'tempdir',
str(unwritable_tmp_path))
with caplog.at_level(logging.ERROR):
editor.edit("")
msg = message_mock.getmsg(usertypes.MessageLevel.error)
assert msg.text.startswith("Failed to create initial file: ")
assert editor._proc is None
def test_double_edit(self, editor):
editor.edit("")
with pytest.raises(ValueError):
editor.edit("")
def test_backup(self, qtbot, message_mock):
editor = editormod.ExternalEditor(watch=True)
editor.edit('foo')
with qtbot.wait_signal(editor.file_updated, timeout=5000):
_update_file(editor._filename, 'bar')
editor.backup()
msg = message_mock.getmsg(usertypes.MessageLevel.info)
prefix = 'Editor backup at '
assert msg.text.startswith(prefix)
fname = msg.text[len(prefix):]
with qtbot.wait_signal(editor.editing_finished):
editor._proc.finished.emit(0, QProcess.NormalExit)
with open(fname, 'r', encoding='utf-8') as f:
assert f.read() == 'bar'
def test_backup_no_content(self, qtbot, message_mock):
editor = editormod.ExternalEditor(watch=True)
editor.edit('foo')
editor.backup()
# content has not changed, so no backup should be created
assert not message_mock.messages
def test_backup_error(self, qtbot, message_mock, mocker, caplog):
editor = editormod.ExternalEditor(watch=True)
editor.edit('foo')
with qtbot.wait_signal(editor.file_updated):
_update_file(editor._filename, 'bar')
mocker.patch('tempfile.NamedTemporaryFile', side_effect=OSError)
with caplog.at_level(logging.ERROR):
editor.backup()
msg = message_mock.getmsg(usertypes.MessageLevel.error)
assert msg.text.startswith('Failed to create editor backup:')
@pytest.mark.parametrize('initial_text, edited_text', [
('', 'Hello'),
('Hello', 'World'),
('Hällö Wörld', 'Überprüfung'),
('\u2603', '\u2601') # Unicode snowman -> cloud
])
def test_modify(qtbot, editor, initial_text, edited_text):
"""Test if inputs get modified correctly."""
editor.edit(initial_text)
with open(editor._filename, 'r', encoding='utf-8') as f:
assert f.read() == initial_text
with open(editor._filename, 'w', encoding='utf-8') as f:
f.write(edited_text)
with qtbot.wait_signal(editor.file_updated) as blocker:
editor._proc.finished.emit(0, QProcess.NormalExit)
assert blocker.args == [edited_text]
def _update_file(filename, contents):
"""Update the given file and make sure its mtime changed.
This might write the file multiple times, but different systems have
different mtime's, so we can't be sure how long to wait otherwise.
"""
old_mtime = new_mtime = os.stat(filename).st_mtime
while old_mtime == new_mtime:
time.sleep(0.1)
with open(filename, 'w', encoding='utf-8') as f:
f.write(contents)
new_mtime = os.stat(filename).st_mtime
def test_modify_watch(qtbot):
"""Test that saving triggers file_updated when watch=True."""
editor = editormod.ExternalEditor(watch=True)
editor.edit('foo')
with qtbot.wait_signal(editor.file_updated, timeout=3000) as blocker:
_update_file(editor._filename, 'bar')
assert blocker.args == ['bar']
with qtbot.wait_signal(editor.file_updated) as blocker:
_update_file(editor._filename, 'baz')
assert blocker.args == ['baz']
with qtbot.assert_not_emitted(editor.file_updated):
editor._proc.finished.emit(0, QProcess.NormalExit)
def test_failing_watch(qtbot, caplog, monkeypatch):
"""When watching failed, an error should be logged.
Also, updating should still work when closing the process.
"""
editor = editormod.ExternalEditor(watch=True)
monkeypatch.setattr(editor._watcher, 'addPath', lambda _path: False)
with caplog.at_level(logging.ERROR):
editor.edit('foo')
with qtbot.assert_not_emitted(editor.file_updated):
_update_file(editor._filename, 'bar')
with qtbot.wait_signal(editor.file_updated) as blocker:
editor._proc.finished.emit(0, QProcess.NormalExit)
assert blocker.args == ['bar']
message = 'Failed to watch path: {}'.format(editor._filename)
assert caplog.messages[0] == message
def test_failing_unwatch(qtbot, caplog, monkeypatch):
"""When unwatching failed, an error should be logged."""
editor = editormod.ExternalEditor(watch=True)
monkeypatch.setattr(editor._watcher, 'addPath', lambda _path: True)
monkeypatch.setattr(editor._watcher, 'files', lambda: [editor._filename])
monkeypatch.setattr(editor._watcher, 'removePaths', lambda paths: paths)
editor.edit('foo')
with caplog.at_level(logging.ERROR):
editor._proc.finished.emit(0, QProcess.NormalExit)
message = 'Failed to unwatch paths: [{!r}]'.format(editor._filename)
assert caplog.messages[-1] == message
@pytest.mark.parametrize('text, caret_position, result', [
('', 0, (1, 1)),
('a', 0, (1, 1)),
('a\nb', 1, (1, 2)),
('a\nb', 2, (2, 1)),
('a\nb', 3, (2, 2)),
('a\nbb\nccc', 4, (2, 3)),
('a\nbb\nccc', 5, (3, 1)),
('a\nbb\nccc', 8, (3, 4)),
('', None, (1, 1)),
])
def test_calculation(editor, text, caret_position, result):
"""Test calculation for line and column given text and caret_position."""
assert editor._calc_line_and_column(text, caret_position) == result
|
import io
import json
import logging
import re
import ssl
from urllib.parse import urlencode
import certifi
import urllib3
from paasta_tools.paastaapi.exceptions import ApiException, ApiValueError
logger = logging.getLogger(__name__)
class RESTResponse(io.IOBase):
def __init__(self, resp):
self.urllib3_response = resp
self.status = resp.status
self.reason = resp.reason
self.data = resp.data
def getheaders(self):
"""Returns a dictionary of the response headers."""
return self.urllib3_response.getheaders()
def getheader(self, name, default=None):
"""Returns a given response header."""
return self.urllib3_response.getheader(name, default)
class RESTClientObject(object):
def __init__(self, configuration, pools_size=4, maxsize=None):
# urllib3.PoolManager will pass all kw parameters to connectionpool
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/poolmanager.py#L75 # noqa: E501
# https://github.com/shazow/urllib3/blob/f9409436f83aeb79fbaf090181cd81b784f1b8ce/urllib3/connectionpool.py#L680 # noqa: E501
# maxsize is the number of requests to host that are allowed in parallel # noqa: E501
# Custom SSL certificates and client certificates: http://urllib3.readthedocs.io/en/latest/advanced-usage.html # noqa: E501
# cert_reqs
if configuration.verify_ssl:
cert_reqs = ssl.CERT_REQUIRED
else:
cert_reqs = ssl.CERT_NONE
# ca_certs
if configuration.ssl_ca_cert:
ca_certs = configuration.ssl_ca_cert
else:
# if not set certificate file, use Mozilla's root certificates.
ca_certs = certifi.where()
addition_pool_args = {}
if configuration.assert_hostname is not None:
addition_pool_args['assert_hostname'] = configuration.assert_hostname # noqa: E501
if configuration.retries is not None:
addition_pool_args['retries'] = configuration.retries
if maxsize is None:
if configuration.connection_pool_maxsize is not None:
maxsize = configuration.connection_pool_maxsize
else:
maxsize = 4
# https pool manager
if configuration.proxy:
self.pool_manager = urllib3.ProxyManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
proxy_url=configuration.proxy,
proxy_headers=configuration.proxy_headers,
**addition_pool_args
)
else:
self.pool_manager = urllib3.PoolManager(
num_pools=pools_size,
maxsize=maxsize,
cert_reqs=cert_reqs,
ca_certs=ca_certs,
cert_file=configuration.cert_file,
key_file=configuration.key_file,
**addition_pool_args
)
def request(self, method, url, query_params=None, headers=None,
body=None, post_params=None, _preload_content=True,
_request_timeout=None):
"""Perform requests.
:param method: http request method
:param url: http request url
:param query_params: query parameters in the url
:param headers: http request headers
:param body: request json body, for `application/json`
:param post_params: request post parameters,
`application/x-www-form-urlencoded`
and `multipart/form-data`
:param _preload_content: if False, the urllib3.HTTPResponse object will
be returned without reading/decoding response
data. Default is True.
:param _request_timeout: timeout setting for this request. If one
number provided, it will be total request
timeout. It can also be a pair (tuple) of
(connection, read) timeouts.
"""
method = method.upper()
assert method in ['GET', 'HEAD', 'DELETE', 'POST', 'PUT',
'PATCH', 'OPTIONS']
if post_params and body:
raise ApiValueError(
"body parameter cannot be used with post_params parameter."
)
post_params = post_params or {}
headers = headers or {}
timeout = None
if _request_timeout:
if isinstance(_request_timeout, (int, float)): # noqa: E501,F821
timeout = urllib3.Timeout(total=_request_timeout)
elif (isinstance(_request_timeout, tuple) and
len(_request_timeout) == 2):
timeout = urllib3.Timeout(
connect=_request_timeout[0], read=_request_timeout[1])
if 'Content-Type' not in headers:
headers['Content-Type'] = 'application/json'
try:
# For `POST`, `PUT`, `PATCH`, `OPTIONS`, `DELETE`
if method in ['POST', 'PUT', 'PATCH', 'OPTIONS', 'DELETE']:
if query_params:
url += '?' + urlencode(query_params)
if re.search('json', headers['Content-Type'], re.IGNORECASE):
request_body = None
if body is not None:
request_body = json.dumps(body)
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'application/x-www-form-urlencoded': # noqa: E501
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=False,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
elif headers['Content-Type'] == 'multipart/form-data':
# must del headers['Content-Type'], or the correct
# Content-Type which generated by urllib3 will be
# overwritten.
del headers['Content-Type']
r = self.pool_manager.request(
method, url,
fields=post_params,
encode_multipart=True,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
# Pass a `string` parameter directly in the body to support
# other content types than Json when `body` argument is
# provided in serialized form
elif isinstance(body, str) or isinstance(body, bytes):
request_body = body
r = self.pool_manager.request(
method, url,
body=request_body,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
else:
# Cannot generate the request from given parameters
msg = """Cannot prepare a request message for provided
arguments. Please check that your arguments match
declared content type."""
raise ApiException(status=0, reason=msg)
# For `GET`, `HEAD`
else:
r = self.pool_manager.request(method, url,
fields=query_params,
preload_content=_preload_content,
timeout=timeout,
headers=headers)
except urllib3.exceptions.SSLError as e:
msg = "{0}\n{1}".format(type(e).__name__, str(e))
raise ApiException(status=0, reason=msg)
if _preload_content:
r = RESTResponse(r)
# log response body
logger.debug("response body: %s", r.data)
if not 200 <= r.status <= 299:
raise ApiException(http_resp=r)
return r
def GET(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("GET", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def HEAD(self, url, headers=None, query_params=None, _preload_content=True,
_request_timeout=None):
return self.request("HEAD", url,
headers=headers,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
query_params=query_params)
def OPTIONS(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("OPTIONS", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def DELETE(self, url, headers=None, query_params=None, body=None,
_preload_content=True, _request_timeout=None):
return self.request("DELETE", url,
headers=headers,
query_params=query_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def POST(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("POST", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PUT(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PUT", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
def PATCH(self, url, headers=None, query_params=None, post_params=None,
body=None, _preload_content=True, _request_timeout=None):
return self.request("PATCH", url,
headers=headers,
query_params=query_params,
post_params=post_params,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
body=body)
|
from hass_nabucasa import Cloud
import voluptuous as vol
from homeassistant.components.alexa import const as alexa_const
from homeassistant.components.google_assistant import const as ga_c
from homeassistant.const import (
CONF_MODE,
CONF_NAME,
CONF_REGION,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_validation as cv, entityfilter
from homeassistant.loader import bind_hass
from homeassistant.util.aiohttp import MockRequest
from . import account_link, http_api
from .client import CloudClient
from .const import (
CONF_ACCOUNT_LINK_URL,
CONF_ACME_DIRECTORY_SERVER,
CONF_ALEXA,
CONF_ALEXA_ACCESS_TOKEN_URL,
CONF_ALIASES,
CONF_CLOUDHOOK_CREATE_URL,
CONF_COGNITO_CLIENT_ID,
CONF_ENTITY_CONFIG,
CONF_FILTER,
CONF_GOOGLE_ACTIONS,
CONF_GOOGLE_ACTIONS_REPORT_STATE_URL,
CONF_RELAYER,
CONF_REMOTE_API_URL,
CONF_SUBSCRIPTION_INFO_URL,
CONF_USER_POOL_ID,
CONF_VOICE_API_URL,
DOMAIN,
MODE_DEV,
MODE_PROD,
)
from .prefs import CloudPreferences
DEFAULT_MODE = MODE_PROD
SERVICE_REMOTE_CONNECT = "remote_connect"
SERVICE_REMOTE_DISCONNECT = "remote_disconnect"
ALEXA_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(alexa_const.CONF_DESCRIPTION): cv.string,
vol.Optional(alexa_const.CONF_DISPLAY_CATEGORIES): cv.string,
vol.Optional(CONF_NAME): cv.string,
}
)
GOOGLE_ENTITY_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(CONF_ALIASES): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(ga_c.CONF_ROOM_HINT): cv.string,
}
)
ASSISTANT_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER, default=dict): entityfilter.FILTER_SCHEMA}
)
ALEXA_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: ALEXA_ENTITY_SCHEMA}}
)
GACTIONS_SCHEMA = ASSISTANT_SCHEMA.extend(
{vol.Optional(CONF_ENTITY_CONFIG): {cv.entity_id: GOOGLE_ENTITY_SCHEMA}}
)
# pylint: disable=no-value-for-parameter
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Optional(CONF_MODE, default=DEFAULT_MODE): vol.In(
[MODE_DEV, MODE_PROD]
),
vol.Optional(CONF_COGNITO_CLIENT_ID): str,
vol.Optional(CONF_USER_POOL_ID): str,
vol.Optional(CONF_REGION): str,
vol.Optional(CONF_RELAYER): str,
vol.Optional(CONF_SUBSCRIPTION_INFO_URL): vol.Url(),
vol.Optional(CONF_CLOUDHOOK_CREATE_URL): vol.Url(),
vol.Optional(CONF_REMOTE_API_URL): vol.Url(),
vol.Optional(CONF_ACME_DIRECTORY_SERVER): vol.Url(),
vol.Optional(CONF_ALEXA): ALEXA_SCHEMA,
vol.Optional(CONF_GOOGLE_ACTIONS): GACTIONS_SCHEMA,
vol.Optional(CONF_ALEXA_ACCESS_TOKEN_URL): vol.Url(),
vol.Optional(CONF_GOOGLE_ACTIONS_REPORT_STATE_URL): vol.Url(),
vol.Optional(CONF_ACCOUNT_LINK_URL): vol.Url(),
vol.Optional(CONF_VOICE_API_URL): vol.Url(),
}
)
},
extra=vol.ALLOW_EXTRA,
)
class CloudNotAvailable(HomeAssistantError):
"""Raised when an action requires the cloud but it's not available."""
@bind_hass
@callback
def async_is_logged_in(hass) -> bool:
"""Test if user is logged in."""
return DOMAIN in hass.data and hass.data[DOMAIN].is_logged_in
@bind_hass
@callback
def async_active_subscription(hass) -> bool:
"""Test if user has an active subscription."""
return async_is_logged_in(hass) and not hass.data[DOMAIN].subscription_expired
@bind_hass
async def async_create_cloudhook(hass, webhook_id: str) -> str:
"""Create a cloudhook."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
hook = await hass.data[DOMAIN].cloudhooks.async_create(webhook_id, True)
return hook["cloudhook_url"]
@bind_hass
async def async_delete_cloudhook(hass, webhook_id: str) -> None:
"""Delete a cloudhook."""
if DOMAIN not in hass.data:
raise CloudNotAvailable
await hass.data[DOMAIN].cloudhooks.async_delete(webhook_id)
@bind_hass
@callback
def async_remote_ui_url(hass) -> str:
"""Get the remote UI URL."""
if not async_is_logged_in(hass):
raise CloudNotAvailable
if not hass.data[DOMAIN].client.prefs.remote_enabled:
raise CloudNotAvailable
if not hass.data[DOMAIN].remote.instance_domain:
raise CloudNotAvailable
return f"https://{hass.data[DOMAIN].remote.instance_domain}"
def is_cloudhook_request(request):
"""Test if a request came from a cloudhook.
Async friendly.
"""
return isinstance(request, MockRequest)
async def async_setup(hass, config):
"""Initialize the Home Assistant cloud."""
# Process configs
if DOMAIN in config:
kwargs = dict(config[DOMAIN])
else:
kwargs = {CONF_MODE: DEFAULT_MODE}
# Alexa/Google custom config
alexa_conf = kwargs.pop(CONF_ALEXA, None) or ALEXA_SCHEMA({})
google_conf = kwargs.pop(CONF_GOOGLE_ACTIONS, None) or GACTIONS_SCHEMA({})
# Cloud settings
prefs = CloudPreferences(hass)
await prefs.async_initialize()
# Initialize Cloud
websession = hass.helpers.aiohttp_client.async_get_clientsession()
client = CloudClient(hass, prefs, websession, alexa_conf, google_conf)
cloud = hass.data[DOMAIN] = Cloud(client, **kwargs)
async def _shutdown(event):
"""Shutdown event."""
await cloud.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, _shutdown)
async def _service_handler(service):
"""Handle service for cloud."""
if service.service == SERVICE_REMOTE_CONNECT:
await cloud.remote.connect()
await prefs.async_update(remote_enabled=True)
elif service.service == SERVICE_REMOTE_DISCONNECT:
await cloud.remote.disconnect()
await prefs.async_update(remote_enabled=False)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_CONNECT, _service_handler
)
hass.helpers.service.async_register_admin_service(
DOMAIN, SERVICE_REMOTE_DISCONNECT, _service_handler
)
loaded = False
async def _on_connect():
"""Discover RemoteUI binary sensor."""
nonlocal loaded
# Prevent multiple discovery
if loaded:
return
loaded = True
await hass.helpers.discovery.async_load_platform(
"binary_sensor", DOMAIN, {}, config
)
await hass.helpers.discovery.async_load_platform("stt", DOMAIN, {}, config)
await hass.helpers.discovery.async_load_platform("tts", DOMAIN, {}, config)
cloud.iot.register_on_connect(_on_connect)
await cloud.start()
await http_api.async_setup(hass)
account_link.async_setup(hass)
return True
|
import asyncio
from datetime import timedelta
import logging
import requests
from tesla_powerwall import MissingAttributeError, Powerwall, PowerwallUnreachableError
import voluptuous as vol
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import CONF_IP_ADDRESS
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import entity_registry
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import (
DOMAIN,
POWERWALL_API_CHANGED,
POWERWALL_API_CHARGE,
POWERWALL_API_DEVICE_TYPE,
POWERWALL_API_GRID_STATUS,
POWERWALL_API_METERS,
POWERWALL_API_SERIAL_NUMBERS,
POWERWALL_API_SITE_INFO,
POWERWALL_API_SITEMASTER,
POWERWALL_API_STATUS,
POWERWALL_COORDINATOR,
POWERWALL_HTTP_SESSION,
POWERWALL_OBJECT,
UPDATE_INTERVAL,
)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({vol.Required(CONF_IP_ADDRESS): cv.string})},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = ["binary_sensor", "sensor"]
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Tesla Powerwall component."""
hass.data.setdefault(DOMAIN, {})
conf = config.get(DOMAIN)
if not conf:
return True
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN,
context={"source": SOURCE_IMPORT},
data=conf,
)
)
return True
async def _migrate_old_unique_ids(hass, entry_id, powerwall_data):
serial_numbers = powerwall_data[POWERWALL_API_SERIAL_NUMBERS]
site_info = powerwall_data[POWERWALL_API_SITE_INFO]
@callback
def _async_migrator(entity_entry: entity_registry.RegistryEntry):
parts = entity_entry.unique_id.split("_")
# Check if the unique_id starts with the serial_numbers of the powerwalls
if parts[0 : len(serial_numbers)] != serial_numbers:
# The old unique_id ended with the nomianal_system_engery_kWh so we can use that
# to find the old base unique_id and extract the device_suffix.
normalized_energy_index = (
len(parts) - 1 - parts[::-1].index(str(site_info.nominal_system_energy))
)
device_suffix = parts[normalized_energy_index + 1 :]
new_unique_id = "_".join([*serial_numbers, *device_suffix])
_LOGGER.info(
"Migrating unique_id from [%s] to [%s]",
entity_entry.unique_id,
new_unique_id,
)
return {"new_unique_id": new_unique_id}
return None
await entity_registry.async_migrate_entries(hass, entry_id, _async_migrator)
async def _async_handle_api_changed_error(
hass: HomeAssistant, error: MissingAttributeError
):
# The error might include some important information about what exactly changed.
_LOGGER.error(str(error))
hass.components.persistent_notification.async_create(
"It seems like your powerwall uses an unsupported version. "
"Please update the software of your powerwall or if it is "
"already the newest consider reporting this issue.\nSee logs for more information",
title="Unknown powerwall software version",
)
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Tesla Powerwall from a config entry."""
entry_id = entry.entry_id
hass.data[DOMAIN].setdefault(entry_id, {})
http_session = requests.Session()
power_wall = Powerwall(entry.data[CONF_IP_ADDRESS], http_session=http_session)
try:
await hass.async_add_executor_job(power_wall.detect_and_pin_version)
await hass.async_add_executor_job(_fetch_powerwall_data, power_wall)
powerwall_data = await hass.async_add_executor_job(call_base_info, power_wall)
except PowerwallUnreachableError as err:
http_session.close()
raise ConfigEntryNotReady from err
except MissingAttributeError as err:
http_session.close()
await _async_handle_api_changed_error(hass, err)
return False
await _migrate_old_unique_ids(hass, entry_id, powerwall_data)
async def async_update_data():
"""Fetch data from API endpoint."""
# Check if we had an error before
_LOGGER.debug("Checking if update failed")
if not hass.data[DOMAIN][entry.entry_id][POWERWALL_API_CHANGED]:
_LOGGER.debug("Updating data")
try:
return await hass.async_add_executor_job(
_fetch_powerwall_data, power_wall
)
except PowerwallUnreachableError as err:
raise UpdateFailed("Unable to fetch data from powerwall") from err
except MissingAttributeError as err:
await _async_handle_api_changed_error(hass, err)
hass.data[DOMAIN][entry.entry_id][POWERWALL_API_CHANGED] = True
# Returns the cached data. This data can also be None
return hass.data[DOMAIN][entry.entry_id][POWERWALL_COORDINATOR].data
else:
return hass.data[DOMAIN][entry.entry_id][POWERWALL_COORDINATOR].data
coordinator = DataUpdateCoordinator(
hass,
_LOGGER,
name="Powerwall site",
update_method=async_update_data,
update_interval=timedelta(seconds=UPDATE_INTERVAL),
)
hass.data[DOMAIN][entry.entry_id] = powerwall_data
hass.data[DOMAIN][entry.entry_id].update(
{
POWERWALL_OBJECT: power_wall,
POWERWALL_COORDINATOR: coordinator,
POWERWALL_HTTP_SESSION: http_session,
POWERWALL_API_CHANGED: False,
}
)
await coordinator.async_refresh()
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
def call_base_info(power_wall):
"""Wrap powerwall properties to be a callable."""
serial_numbers = power_wall.get_serial_numbers()
# Make sure the serial numbers always have the same order
serial_numbers.sort()
return {
POWERWALL_API_SITE_INFO: power_wall.get_site_info(),
POWERWALL_API_STATUS: power_wall.get_status(),
POWERWALL_API_DEVICE_TYPE: power_wall.get_device_type(),
POWERWALL_API_SERIAL_NUMBERS: serial_numbers,
}
def _fetch_powerwall_data(power_wall):
"""Process and update powerwall data."""
return {
POWERWALL_API_CHARGE: power_wall.get_charge(),
POWERWALL_API_SITEMASTER: power_wall.get_sitemaster(),
POWERWALL_API_METERS: power_wall.get_meters(),
POWERWALL_API_GRID_STATUS: power_wall.get_grid_status(),
}
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
hass.data[DOMAIN][entry.entry_id][POWERWALL_HTTP_SESSION].close()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
|
from __future__ import division
from __future__ import print_function
from distutils.util import strtobool
import hashlib
import os
import shutil
import tarfile
import tempfile
import zipfile
import filelock
from six.moves.urllib import request
import sys
import time
from chainer.dataset.download import get_dataset_directory
from chainer.dataset.download import get_dataset_root
def _reporthook(count, block_size, total_size):
global start_time
if count == 0:
start_time = time.time()
print(' % Total Recv Speed Time left')
return
duration = time.time() - start_time
progress_size = count * block_size
try:
speed = progress_size / duration
except ZeroDivisionError:
speed = float('inf')
percent = progress_size / total_size * 100
eta = int((total_size - progress_size) / speed)
sys.stdout.write(
'\r{:3.0f} {:4.0f}MiB {:4.0f}MiB {:6.0f}KiB/s {:4d}:{:02d}:{:02d}'
.format(
percent, total_size / (1 << 20), progress_size / (1 << 20),
speed / (1 << 10), eta // 60 // 60, (eta // 60) % 60, eta % 60))
sys.stdout.flush()
def cached_download(url):
"""Downloads a file and caches it.
This is different from the original
:func:`~chainer.dataset.cached_download` in that the download
progress is reported. Note that this progress report can be disabled
by setting the environment variable `CHAINERCV_DOWNLOAD_REPORT` to `'OFF'`.
It downloads a file from the URL if there is no corresponding cache. After
the download, this function stores a cache to the directory under the
dataset root (see :func:`set_dataset_root`). If there is already a cache
for the given URL, it just returns the path to the cache without
downloading the same file.
Args:
url (string): URL to download from.
Returns:
string: Path to the downloaded file.
"""
cache_root = os.path.join(get_dataset_root(), '_dl_cache')
try:
os.makedirs(cache_root)
except OSError:
if not os.path.exists(cache_root):
raise
lock_path = os.path.join(cache_root, '_dl_lock')
urlhash = hashlib.md5(url.encode('utf-8')).hexdigest()
cache_path = os.path.join(cache_root, urlhash)
with filelock.FileLock(lock_path):
if os.path.exists(cache_path):
return cache_path
temp_root = tempfile.mkdtemp(dir=cache_root)
try:
temp_path = os.path.join(temp_root, 'dl')
if strtobool(os.getenv('CHAINERCV_DOWNLOAD_REPORT', 'ON')):
print('Downloading ...')
print('From: {:s}'.format(url))
print('To: {:s}'.format(cache_path))
request.urlretrieve(url, temp_path, _reporthook)
else:
request.urlretrieve(url, temp_path)
with filelock.FileLock(lock_path):
shutil.move(temp_path, cache_path)
finally:
shutil.rmtree(temp_root)
return cache_path
def download_model(url):
"""Downloads a model file and puts it under model directory.
It downloads a file from the URL and puts it under model directory.
For exmaple, if :obj:`url` is `http://example.com/subdir/model.npz`,
the pretrained weights file will be saved to
`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/model.npz`.
If there is already a file at the destination path,
it just returns the path without downloading the same file.
Args:
url (string): URL to download from.
Returns:
string: Path to the downloaded file.
"""
# To support ChainerMN, the target directory should be locked.
with filelock.FileLock(os.path.join(
get_dataset_directory(os.path.join('pfnet', 'chainercv', '.lock')),
'models.lock')):
root = get_dataset_directory(
os.path.join('pfnet', 'chainercv', 'models'))
basename = os.path.basename(url)
path = os.path.join(root, basename)
if not os.path.exists(path):
cache_path = cached_download(url)
os.rename(cache_path, path)
return path
def extractall(file_path, destination, ext):
"""Extracts an archive file.
This function extracts an archive file to a destination.
Args:
file_path (string): The path of a file to be extracted.
destination (string): A directory path. The archive file
will be extracted under this directory.
ext (string): An extension suffix of the archive file.
This function supports :obj:`'.zip'`, :obj:`'.tar'`,
:obj:`'.gz'` and :obj:`'.tgz'`.
"""
if ext == '.zip':
with zipfile.ZipFile(file_path, 'r') as z:
z.extractall(destination)
elif ext == '.tar':
with tarfile.TarFile(file_path, 'r') as t:
t.extractall(destination)
elif ext == '.gz' or ext == '.tgz':
with tarfile.open(file_path, 'r:gz') as t:
t.extractall(destination)
|
from os import environ
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks
from autobahn.wamp.types import SubscribeOptions
from autobahn.twisted.wamp import ApplicationSession, ApplicationRunner
class Component(ApplicationSession):
"""
An application component that subscribes and receives events
of no payload and of complex payload, and stops after 5 seconds.
"""
@inlineCallbacks
def onJoin(self, details):
print("session attached")
self.received = 0
def on_heartbeat(details=None):
print("heartbeat (publication ID {})".format(details.publication))
yield self.subscribe(
on_heartbeat, 'com.myapp.heartbeat',
options=SubscribeOptions(details_arg='details')
)
def on_topic2(a, b, c=None, d=None):
print("Got event: {} {} {} {}".format(a, b, c, d))
yield self.subscribe(on_topic2, 'com.myapp.topic2')
reactor.callLater(5, self.leave)
def onDisconnect(self):
print("disconnected")
reactor.stop()
if __name__ == '__main__':
url = environ.get("AUTOBAHN_DEMO_ROUTER", "ws://127.0.0.1:8080/ws")
realm = "crossbardemo"
runner = ApplicationRunner(url, realm)
runner.run(Component)
|
class SettingsEntry(object):
"""
This is an interface representing an entry in the settings file
"""
def __init__(self, name=None):
self.name = name
def __str__(self):
return str(self.serialize())
def serialize(self):
return {
'name': self.name
}
def __eq__(self, other):
"""
This is used to compare 2 objects
:param other:
:return:
"""
return self.__dict__ == other.__dict__
|
import boto3
import os
import pickle
from src.config import CACHE_PATH, FINDINGS_S3_BUCKET
from src.single_layer_network import list_findings
from src.training_data import load_all_training_tiles, tag_with_locations
from src.training_visualization import render_results_for_analysis
def post_findings_to_s3(raster_data_paths, model, training_info, bands, render_results):
"""Aggregate findings from all NAIPs into a pickled list, post to S3."""
findings = []
for path in raster_data_paths:
labels, images = load_all_training_tiles(path, bands)
if len(labels) == 0 or len(images) == 0:
print("WARNING, there is a borked naip image file")
continue
false_positives, fp_images = list_findings(labels, images, model)
path_parts = path.split('/')
filename = path_parts[len(path_parts) - 1]
print("FINDINGS: {} false pos of {} tiles, from {}".format(
len(false_positives), len(images), filename))
if render_results:
# render JPEGs showing findings
render_results_for_analysis([path], false_positives, fp_images, training_info['bands'],
training_info['tile_size'])
# combine findings for all NAIP images analyzedfor the region
[findings.append(f) for f in tag_with_locations(fp_images, false_positives,
training_info['tile_size'],
training_info['naip_state'])]
# dump combined findings to disk as a pickle
try:
os.mkdir(CACHE_PATH + training_info['naip_state'])
except:
pass
naip_path_in_cache_dir = training_info['naip_state'] + '/' + 'findings.pickle'
local_path = CACHE_PATH + naip_path_in_cache_dir
with open(local_path, 'w') as outfile:
pickle.dump(findings, outfile)
# push pickle to S3
s3_client = boto3.client('s3')
s3_client.upload_file(local_path, FINDINGS_S3_BUCKET, naip_path_in_cache_dir)
|
import urllib2
import time
from datetime import datetime
import diamond.collector
class WebsiteMonitorCollector(diamond.collector.Collector):
"""
Gather HTTP response code and Duration of HTTP request
"""
def get_default_config_help(self):
config_help = super(WebsiteMonitorCollector,
self).get_default_config_help()
config_help.update({
'URL': "FQDN of HTTP endpoint to test",
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
default_config = super(WebsiteMonitorCollector,
self).get_default_config()
default_config['URL'] = ''
default_config['path'] = 'websitemonitor'
return default_config
def collect(self):
req = urllib2.Request('%s' % (self.config['URL']))
try:
# time in seconds since epoch as a floating number
start_time = time.time()
# human-readable time e.g November 25, 2013 18:15:56
st = datetime.fromtimestamp(start_time
).strftime('%B %d, %Y %H:%M:%S')
self.log.debug('Start time: %s' % (st))
resp = urllib2.urlopen(req)
# time in seconds since epoch as a floating number
end_time = time.time()
# human-readable end time e.eg. November 25, 2013 18:15:56
et = datetime.fromtimestamp(end_time).strftime('%B %d, %Y %H:%M%S')
self.log.debug('End time: %s' % (et))
# Response time in milliseconds
rt = int(format((end_time - start_time) * 1000, '.0f'))
# Publish metrics
self.publish('response_time.%s' % (resp.code), rt,
metric_type='COUNTER')
# urllib2 will puke on non HTTP 200/OK URLs
except urllib2.URLError as e:
if e.code != 200:
# time in seconds since epoch as a floating number
end_time = time.time()
# Response time in milliseconds
rt = int(format((end_time - start_time) * 1000, '.0f'))
# Publish metrics -- this is recording a failure, rt will
# likely be 0 but does capture HTTP Status Code
self.publish('response_time.%s' % (e.code), rt,
metric_type='COUNTER')
except IOError as e:
self.log.error('Unable to open %s' % (self.config['URL']))
except Exception as e:
self.log.error("Unknown error opening url: %s", e)
|
from functools import wraps
def endpoint(interface_name, function_name, in_sig=None, out_sig=None, byte_arrays=False):
"""
DBus Endpoint
:param interface_name: DBus Interface name
:type interface_name: str
:param function_name: DBus Method name
:type function_name: str
:param in_sig: DBus parameter signature
:type in_sig: str
:param out_sig: DBus return signature
:type out_sig: str
:param byte_arrays: is Byte Array
:type byte_arrays: bool
:return: Function
:rtype: callable
"""
# pylint: disable=missing-docstring
def inner_render(func):
def wrapped(*args, **kwargs):
return func(*args, **kwargs)
wrapped.endpoint = True
wrapped.interface = interface_name
wrapped.name = function_name
wrapped.in_sig = in_sig
wrapped.out_sig = out_sig
wrapped.byte_arrays = byte_arrays
wrapped.code = func.__code__
wrapped.globals = func.__globals__
wrapped.defaults = func.__defaults__
wrapped.closure = func.__closure__
return wraps(func)(wrapped)
return inner_render
|
import pytest
from jinja2 import Environment
from jinja2.bccache import Bucket
from jinja2.bccache import FileSystemBytecodeCache
from jinja2.bccache import MemcachedBytecodeCache
from jinja2.exceptions import TemplateNotFound
@pytest.fixture
def env(package_loader, tmp_path):
bytecode_cache = FileSystemBytecodeCache(str(tmp_path))
return Environment(loader=package_loader, bytecode_cache=bytecode_cache)
class TestByteCodeCache:
def test_simple(self, env):
tmpl = env.get_template("test.html")
assert tmpl.render().strip() == "BAR"
pytest.raises(TemplateNotFound, env.get_template, "missing.html")
class MockMemcached:
class Error(Exception):
pass
key = None
value = None
timeout = None
def get(self, key):
return self.value
def set(self, key, value, timeout=None):
self.key = key
self.value = value
self.timeout = timeout
def get_side_effect(self, key):
raise self.Error()
def set_side_effect(self, *args):
raise self.Error()
class TestMemcachedBytecodeCache:
def test_dump_load(self):
memcached = MockMemcached()
m = MemcachedBytecodeCache(memcached)
b = Bucket(None, "key", "")
b.code = "code"
m.dump_bytecode(b)
assert memcached.key == "jinja2/bytecode/key"
b = Bucket(None, "key", "")
m.load_bytecode(b)
assert b.code == "code"
def test_exception(self):
memcached = MockMemcached()
memcached.get = memcached.get_side_effect
memcached.set = memcached.set_side_effect
m = MemcachedBytecodeCache(memcached)
b = Bucket(None, "key", "")
b.code = "code"
m.dump_bytecode(b)
m.load_bytecode(b)
m.ignore_memcache_errors = False
with pytest.raises(MockMemcached.Error):
m.dump_bytecode(b)
with pytest.raises(MockMemcached.Error):
m.load_bytecode(b)
|
import copy as cp
import numpy as np
from ..epochs import Epochs
from ..proj import compute_proj_evoked, compute_proj_epochs
from ..utils import logger, verbose, warn
from ..io.pick import pick_types
from ..io import make_eeg_average_ref_proj
from .ecg import find_ecg_events
from .eog import find_eog_events
def _safe_del_key(dict_, key):
"""Aux function.
Use this function when preparing rejection parameters
instead of directly deleting keys.
"""
if key in dict_:
del dict_[key]
def _compute_exg_proj(mode, raw, raw_event, tmin, tmax,
n_grad, n_mag, n_eeg, l_freq, h_freq,
average, filter_length, n_jobs, ch_name,
reject, flat, bads, avg_ref, no_proj, event_id,
exg_l_freq, exg_h_freq, tstart, qrs_threshold,
filter_method, iir_params, return_drop_log, copy,
meg, verbose):
"""Compute SSP/PCA projections for ECG or EOG artifacts."""
raw = raw.copy() if copy else raw
del copy
raw.load_data() # we will filter it later
if no_proj:
projs = []
else:
projs = cp.deepcopy(raw.info['projs'])
logger.info('Including %d SSP projectors from raw file'
% len(projs))
if avg_ref:
eeg_proj = make_eeg_average_ref_proj(raw.info)
projs.append(eeg_proj)
if raw_event is None:
raw_event = raw
assert mode in ('ECG', 'EOG') # internal function
logger.info('Running %s SSP computation' % mode)
if mode == 'ECG':
events, _, _ = find_ecg_events(raw_event, ch_name=ch_name,
event_id=event_id, l_freq=exg_l_freq,
h_freq=exg_h_freq, tstart=tstart,
qrs_threshold=qrs_threshold,
filter_length=filter_length)
else: # mode == 'EOG':
events = find_eog_events(raw_event, event_id=event_id,
l_freq=exg_l_freq, h_freq=exg_h_freq,
filter_length=filter_length, ch_name=ch_name,
tstart=tstart)
# Check to make sure we actually got at least one usable event
if events.shape[0] < 1:
warn('No %s events found, returning None for projs' % mode)
return (None, events) + (([],) if return_drop_log else ())
logger.info('Computing projector')
my_info = cp.deepcopy(raw.info)
my_info['bads'] += bads
# Handler rejection parameters
if reject is not None: # make sure they didn't pass None
if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'grad')
if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'mag')
if len(pick_types(my_info, meg=False, eeg=True, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'eeg')
if len(pick_types(my_info, meg=False, eeg=False, eog=True,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(reject, 'eog')
if flat is not None: # make sure they didn't pass None
if len(pick_types(my_info, meg='grad', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'grad')
if len(pick_types(my_info, meg='mag', eeg=False, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'mag')
if len(pick_types(my_info, meg=False, eeg=True, eog=False,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'eeg')
if len(pick_types(my_info, meg=False, eeg=False, eog=True,
ref_meg=False, exclude='bads')) == 0:
_safe_del_key(flat, 'eog')
# exclude bad channels from projection
# keep reference channels if compensation channels are present
ref_meg = len(my_info['comps']) > 0
picks = pick_types(my_info, meg=True, eeg=True, eog=True, ecg=True,
ref_meg=ref_meg, exclude='bads')
raw.filter(l_freq, h_freq, picks=picks, filter_length=filter_length,
n_jobs=n_jobs, method=filter_method, iir_params=iir_params,
l_trans_bandwidth=0.5, h_trans_bandwidth=0.5,
phase='zero-double', fir_design='firwin2')
epochs = Epochs(raw, events, None, tmin, tmax, baseline=None, preload=True,
picks=picks, reject=reject, flat=flat, proj=True)
drop_log = epochs.drop_log
if epochs.events.shape[0] < 1:
warn('No good epochs found, returning None for projs')
return (None, events) + ((drop_log,) if return_drop_log else ())
if average:
evoked = epochs.average()
ev_projs = compute_proj_evoked(evoked, n_grad=n_grad, n_mag=n_mag,
n_eeg=n_eeg, meg=meg)
else:
ev_projs = compute_proj_epochs(epochs, n_grad=n_grad, n_mag=n_mag,
n_eeg=n_eeg, n_jobs=n_jobs, meg=meg)
for p in ev_projs:
p['desc'] = mode + "-" + p['desc']
projs.extend(ev_projs)
logger.info('Done.')
return (projs, events) + ((drop_log,) if return_drop_log else ())
@verbose
def compute_proj_ecg(raw, raw_event=None, tmin=-0.2, tmax=0.4,
n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
average=True, filter_length='10s', n_jobs=1,
ch_name=None, reject=dict(grad=2000e-13, mag=3000e-15,
eeg=50e-6, eog=250e-6),
flat=None, bads=[], avg_ref=False,
no_proj=False, event_id=999, ecg_l_freq=5, ecg_h_freq=35,
tstart=0., qrs_threshold='auto', filter_method='fir',
iir_params=None, copy=True, return_drop_log=False,
meg='separate', verbose=None):
"""Compute SSP (signal-space projection) vectors for ECG artifacts.
%(compute_proj_ecg)s
.. note:: Raw data will be loaded if it hasn't been preloaded already.
Parameters
----------
raw : mne.io.Raw
Raw input file.
raw_event : mne.io.Raw or None
Raw file to use for event detection (if None, raw is used).
tmin : float
Time before event in seconds.
tmax : float
Time after event in seconds.
n_grad : int
Number of SSP vectors for gradiometers.
n_mag : int
Number of SSP vectors for magnetometers.
n_eeg : int
Number of SSP vectors for EEG.
l_freq : float | None
Filter low cut-off frequency for the data channels in Hz.
h_freq : float | None
Filter high cut-off frequency for the data channels in Hz.
average : bool
Compute SSP after averaging. Default is True.
filter_length : str | int | None
Number of taps to use for filtering.
%(n_jobs)s
ch_name : str | None
Channel to use for ECG detection (Required if no ECG found).
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
bads : list
List with (additional) bad channels.
avg_ref : bool
Add EEG average reference proj.
no_proj : bool
Exclude the SSP projectors currently in the fiff file.
event_id : int
ID to use for events.
ecg_l_freq : float
Low pass frequency applied to the ECG channel for event detection.
ecg_h_freq : float
High pass frequency applied to the ECG channel for event detection.
tstart : float
Start artifact detection after tstart seconds.
qrs_threshold : float | str
Between 0 and 1. qrs detection threshold. Can also be "auto" to
automatically choose the threshold that generates a reasonable
number of heartbeats (40-160 beats / min).
filter_method : str
Method for filtering ('iir' or 'fir').
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
copy : bool
If False, filtering raw data is done in place. Defaults to True.
return_drop_log : bool
If True, return the drop log.
.. versionadded:: 0.15
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
proj : list
Computed SSP projectors.
ecg_events : ndarray
Detected ECG events.
drop_log : list
The drop log, if requested.
See Also
--------
find_ecg_events
create_ecg_epochs
Notes
-----
Filtering is applied to the ECG channel while finding events using
``ecg_l_freq`` and ``ecg_h_freq``, and then to the ``raw`` instance
using ``l_freq`` and ``h_freq`` before creation of the epochs used to
create the projectors.
"""
return _compute_exg_proj(
'ECG', raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg,
l_freq, h_freq, average, filter_length, n_jobs, ch_name, reject, flat,
bads, avg_ref, no_proj, event_id, ecg_l_freq, ecg_h_freq, tstart,
qrs_threshold, filter_method, iir_params, return_drop_log, copy,
meg, verbose)
@verbose
def compute_proj_eog(raw, raw_event=None, tmin=-0.2, tmax=0.2,
n_grad=2, n_mag=2, n_eeg=2, l_freq=1.0, h_freq=35.0,
average=True, filter_length='10s', n_jobs=1,
reject=dict(grad=2000e-13, mag=3000e-15, eeg=500e-6,
eog=np.inf), flat=None, bads=[],
avg_ref=False, no_proj=False, event_id=998, eog_l_freq=1,
eog_h_freq=10, tstart=0., filter_method='fir',
iir_params=None, ch_name=None, copy=True,
return_drop_log=False, meg='separate', verbose=None):
"""Compute SSP (signal-space projection) vectors for EOG artifacts.
%(compute_proj_eog)s
.. note:: Raw data must be preloaded.
Parameters
----------
raw : mne.io.Raw
Raw input file.
raw_event : mne.io.Raw or None
Raw file to use for event detection (if None, raw is used).
tmin : float
Time before event in seconds.
tmax : float
Time after event in seconds.
n_grad : int
Number of SSP vectors for gradiometers.
n_mag : int
Number of SSP vectors for magnetometers.
n_eeg : int
Number of SSP vectors for EEG.
l_freq : float | None
Filter low cut-off frequency for the data channels in Hz.
h_freq : float | None
Filter high cut-off frequency for the data channels in Hz.
average : bool
Compute SSP after averaging. Default is True.
filter_length : str | int | None
Number of taps to use for filtering.
%(n_jobs)s
reject : dict | None
Epoch rejection configuration (see Epochs).
flat : dict | None
Epoch flat configuration (see Epochs).
bads : list
List with (additional) bad channels.
avg_ref : bool
Add EEG average reference proj.
no_proj : bool
Exclude the SSP projectors currently in the fiff file.
event_id : int
ID to use for events.
eog_l_freq : float
Low pass frequency applied to the E0G channel for event detection.
eog_h_freq : float
High pass frequency applied to the EOG channel for event detection.
tstart : float
Start artifact detection after tstart seconds.
filter_method : str
Method for filtering ('iir' or 'fir').
iir_params : dict | None
Dictionary of parameters to use for IIR filtering.
See mne.filter.construct_iir_filter for details. If iir_params
is None and method="iir", 4th order Butterworth will be used.
ch_name : str | None
If not None, specify EOG channel name.
copy : bool
If False, filtering raw data is done in place. Defaults to True.
return_drop_log : bool
If True, return the drop log.
.. versionadded:: 0.15
meg : str
Can be 'separate' (default) or 'combined' to compute projectors
for magnetometers and gradiometers separately or jointly.
If 'combined', ``n_mag == n_grad`` is required and the number of
projectors computed for MEG will be ``n_mag``.
.. versionadded:: 0.18
%(verbose)s
Returns
-------
proj: list
Computed SSP projectors.
eog_events: ndarray
Detected EOG events.
drop_log : list
The drop log, if requested.
See Also
--------
find_eog_events
create_eog_epochs
Notes
-----
Filtering is applied to the EOG channel while finding events using
``eog_l_freq`` and ``eog_h_freq``, and then to the ``raw`` instance
using ``l_freq`` and ``h_freq`` before creation of the epochs used to
create the projectors.
"""
return _compute_exg_proj(
'EOG', raw, raw_event, tmin, tmax, n_grad, n_mag, n_eeg,
l_freq, h_freq, average, filter_length, n_jobs, ch_name, reject, flat,
bads, avg_ref, no_proj, event_id, eog_l_freq, eog_h_freq, tstart,
'auto', filter_method, iir_params, return_drop_log, copy, meg,
verbose)
|
import logging
from circuit_webhook import Circuit
from homeassistant.components.notify import BaseNotificationService
from homeassistant.const import CONF_URL
_LOGGER = logging.getLogger(__name__)
def get_service(hass, config, discovery_info=None):
"""Get the Unify Circuit notification service."""
if discovery_info is None:
return None
return CircuitNotificationService(discovery_info)
class CircuitNotificationService(BaseNotificationService):
"""Implement the notification service for Unify Circuit."""
def __init__(self, config):
"""Initialize the service."""
self.webhook_url = config[CONF_URL]
def send_message(self, message=None, **kwargs):
"""Send a message to the webhook."""
webhook_url = self.webhook_url
if webhook_url and message:
try:
circuit_message = Circuit(url=webhook_url)
circuit_message.post(text=message)
except RuntimeError as err:
_LOGGER.error("Could not send notification. Error: %s", err)
|
import os
import sys
from pathlib import Path
PY3 = sys.version_info >= (3, 0)
test_root = os.path.abspath(os.path.dirname(__file__))
tmp_dir = Path(test_root, "tmp")
test_dict = {
"key1": "value1",
"not$allowed": "fine_value",
"BigCamel": "hi",
"alist": [{"a": 1}],
"Key 2": {"Key 3": "Value 3", "Key4": {"Key5": "Value5"}},
}
extended_test_dict = {
3: "howdy",
"not": "true",
(3, 4): "test",
"_box_config": True,
"CamelCase": "21",
"321CamelCase": 321,
False: "tree",
"tuples_galore": ({"item": 3}, ({"item": 4}, 5)),
}
extended_test_dict.update(test_dict) # type: ignore
data_json_file = os.path.join(test_root, "data", "json_file.json")
data_yaml_file = os.path.join(test_root, "data", "yaml_file.yaml")
tmp_json_file = os.path.join(test_root, "tmp", "tmp_json_file.json")
tmp_yaml_file = os.path.join(test_root, "tmp", "tmp_yaml_file.yaml")
tmp_msgpack_file = os.path.join(test_root, "tmp", "tmp_msgpack_file.msgpack")
movie_data = {
"movies": {
"Spaceballs": {
"imdb_stars": 7.1,
"rating": "PG",
"length": 96,
"Director": "Mel Brooks",
"Stars": [
{"name": "Mel Brooks", "imdb": "nm0000316", "role": "President Skroob"},
{"name": "John Candy", "imdb": "nm0001006", "role": "Barf"},
{"name": "Rick Moranis", "imdb": "nm0001548", "role": "Dark Helmet"},
],
},
"Robin Hood: Men in Tights": {
"imdb_stars": 6.7,
"rating": "PG-13",
"length": 104,
"Director": "Mel Brooks",
"Stars": [
{"name": "Cary Elwes", "imdb": "nm0000144", "role": "Robin Hood"},
{"name": "Richard Lewis", "imdb": "nm0507659", "role": "Prince John"},
{"name": "Roger Rees", "imdb": "nm0715953", "role": "Sheriff of Rottingham"},
{"name": "Amy Yasbeck", "imdb": "nm0001865", "role": "Marian"},
],
},
}
}
def function_example(value):
yield value
class ClassExample(object):
def __init__(self):
self.a = "a"
self.b = 2
python_example_objects = (
None, # type: ignore
True,
False,
1,
3.14,
"abc",
[1, 2, 3],
{},
([], {}),
lambda x: x ** 2,
function_example,
ClassExample(),
) # type: ignore
|
import logging
from fortiosapi import FortiOSAPI
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_TOKEN, CONF_VERIFY_SSL
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_VERIFY_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): cv.string,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a FortiOSDeviceScanner."""
host = config[DOMAIN][CONF_HOST]
verify_ssl = config[DOMAIN][CONF_VERIFY_SSL]
token = config[DOMAIN][CONF_TOKEN]
fgt = FortiOSAPI()
try:
fgt.tokenlogin(host, token, verify_ssl)
except ConnectionError as ex:
_LOGGER.error("ConnectionError to FortiOS API: %s", ex)
return None
except Exception as ex: # pylint: disable=broad-except
_LOGGER.error("Failed to login to FortiOS API: %s", ex)
return None
return FortiOSDeviceScanner(fgt)
class FortiOSDeviceScanner(DeviceScanner):
"""This class queries a FortiOS unit for connected devices."""
def __init__(self, fgt) -> None:
"""Initialize the scanner."""
self._clients = {}
self._clients_json = {}
self._fgt = fgt
def update(self):
"""Update clients from the device."""
clients_json = self._fgt.monitor("user/device/select", "")
self._clients_json = clients_json
self._clients = []
if clients_json:
for client in clients_json["results"]:
if client["last_seen"] < 180:
self._clients.append(client["mac"].upper())
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self.update()
return self._clients
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
_LOGGER.debug("Getting name of device %s", device)
device = device.lower()
data = self._clients_json
if data == 0:
_LOGGER.error("No json results to get device names")
return None
for client in data["results"]:
if client["mac"] == device:
try:
name = client["host"]["name"]
_LOGGER.debug("Getting device name=%s", name)
return name
except KeyError as kex:
_LOGGER.error("Name not found in client data: %s", kex)
return None
return None
|
from datetime import timedelta
import logging
from adafruit_bmp280 import Adafruit_BMP280_I2C
import board
from busio import I2C
import voluptuous as vol
from homeassistant.components.sensor import (
DEVICE_CLASS_PRESSURE,
DEVICE_CLASS_TEMPERATURE,
PLATFORM_SCHEMA,
)
from homeassistant.const import CONF_NAME, PRESSURE_HPA, TEMP_CELSIUS
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "BMP280"
SCAN_INTERVAL = timedelta(seconds=15)
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=3)
MIN_I2C_ADDRESS = 0x76
MAX_I2C_ADDRESS = 0x77
CONF_I2C_ADDRESS = "i2c_address"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_I2C_ADDRESS): vol.All(
vol.Coerce(int), vol.Range(min=MIN_I2C_ADDRESS, max=MAX_I2C_ADDRESS)
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the sensor platform."""
try:
# initializing I2C bus using the auto-detected pins
i2c = I2C(board.SCL, board.SDA)
# initializing the sensor
bmp280 = Adafruit_BMP280_I2C(i2c, address=config[CONF_I2C_ADDRESS])
except ValueError as error:
# this usually happens when the board is I2C capable, but the device can't be found at the configured address
if str(error.args[0]).startswith("No I2C device at address"):
_LOGGER.error(
"%s. Hint: Check wiring and make sure that the SDO pin is tied to either ground (0x76) or VCC (0x77)",
error.args[0],
)
raise PlatformNotReady() from error
_LOGGER.error(error)
return
# use custom name if there's any
name = config[CONF_NAME]
# BMP280 has both temperature and pressure sensing capability
add_entities(
[Bmp280TemperatureSensor(bmp280, name), Bmp280PressureSensor(bmp280, name)]
)
class Bmp280Sensor(Entity):
"""Base class for BMP280 entities."""
def __init__(
self,
bmp280: Adafruit_BMP280_I2C,
name: str,
unit_of_measurement: str,
device_class: str,
):
"""Initialize the sensor."""
self._bmp280 = bmp280
self._name = name
self._unit_of_measurement = unit_of_measurement
self._device_class = device_class
self._state = None
self._errored = False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def device_class(self):
"""Return the device class."""
return self._device_class
@property
def available(self) -> bool:
"""Return if the device is currently available."""
return not self._errored
class Bmp280TemperatureSensor(Bmp280Sensor):
"""Representation of a Bosch BMP280 Temperature Sensor."""
def __init__(self, bmp280: Adafruit_BMP280_I2C, name: str):
"""Initialize the entity."""
super().__init__(
bmp280, f"{name} Temperature", TEMP_CELSIUS, DEVICE_CLASS_TEMPERATURE
)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Fetch new state data for the sensor."""
try:
self._state = round(self._bmp280.temperature, 1)
if self._errored:
_LOGGER.warning("Communication restored with temperature sensor")
self._errored = False
except OSError:
# this is thrown when a working sensor is unplugged between two updates
_LOGGER.warning(
"Unable to read temperature data due to a communication problem"
)
self._errored = True
class Bmp280PressureSensor(Bmp280Sensor):
"""Representation of a Bosch BMP280 Barometric Pressure Sensor."""
def __init__(self, bmp280: Adafruit_BMP280_I2C, name: str):
"""Initialize the entity."""
super().__init__(
bmp280, f"{name} Pressure", PRESSURE_HPA, DEVICE_CLASS_PRESSURE
)
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Fetch new state data for the sensor."""
try:
self._state = round(self._bmp280.pressure)
if self._errored:
_LOGGER.warning("Communication restored with pressure sensor")
self._errored = False
except OSError:
# this is thrown when a working sensor is unplugged between two updates
_LOGGER.warning(
"Unable to read pressure data due to a communication problem"
)
self._errored = True
|
from __future__ import division
__docformat__ = "restructuredtext en"
import math
import re
import sys
from locale import getlocale, LC_TIME
from datetime import date, time, datetime, timedelta
from time import strptime as time_strptime
from calendar import monthrange, timegm
from six.moves import range
try:
from mx.DateTime import RelativeDateTime, Date, DateTimeType
except ImportError:
endOfMonth = None
DateTimeType = datetime
else:
endOfMonth = RelativeDateTime(months=1, day=-1)
# NOTE: should we implement a compatibility layer between date representations
# as we have in lgc.db ?
FRENCH_FIXED_HOLIDAYS = {
'jour_an': '%s-01-01',
'fete_travail': '%s-05-01',
'armistice1945': '%s-05-08',
'fete_nat': '%s-07-14',
'assomption': '%s-08-15',
'toussaint': '%s-11-01',
'armistice1918': '%s-11-11',
'noel': '%s-12-25',
}
FRENCH_MOBILE_HOLIDAYS = {
'paques2004': '2004-04-12',
'ascension2004': '2004-05-20',
'pentecote2004': '2004-05-31',
'paques2005': '2005-03-28',
'ascension2005': '2005-05-05',
'pentecote2005': '2005-05-16',
'paques2006': '2006-04-17',
'ascension2006': '2006-05-25',
'pentecote2006': '2006-06-05',
'paques2007': '2007-04-09',
'ascension2007': '2007-05-17',
'pentecote2007': '2007-05-28',
'paques2008': '2008-03-24',
'ascension2008': '2008-05-01',
'pentecote2008': '2008-05-12',
'paques2009': '2009-04-13',
'ascension2009': '2009-05-21',
'pentecote2009': '2009-06-01',
'paques2010': '2010-04-05',
'ascension2010': '2010-05-13',
'pentecote2010': '2010-05-24',
'paques2011': '2011-04-25',
'ascension2011': '2011-06-02',
'pentecote2011': '2011-06-13',
'paques2012': '2012-04-09',
'ascension2012': '2012-05-17',
'pentecote2012': '2012-05-28',
}
# XXX this implementation cries for multimethod dispatching
def get_step(dateobj, nbdays=1):
# assume date is either a python datetime or a mx.DateTime object
if isinstance(dateobj, date):
return ONEDAY * nbdays
return nbdays # mx.DateTime is ok with integers
def datefactory(year, month, day, sampledate):
# assume date is either a python datetime or a mx.DateTime object
if isinstance(sampledate, datetime):
return datetime(year, month, day)
if isinstance(sampledate, date):
return date(year, month, day)
return Date(year, month, day)
def weekday(dateobj):
# assume date is either a python datetime or a mx.DateTime object
if isinstance(dateobj, date):
return dateobj.weekday()
return dateobj.day_of_week
def str2date(datestr, sampledate):
# NOTE: datetime.strptime is not an option until we drop py2.4 compat
year, month, day = [int(chunk) for chunk in datestr.split('-')]
return datefactory(year, month, day, sampledate)
def days_between(start, end):
if isinstance(start, date):
delta = end - start
# datetime.timedelta.days is always an integer (floored)
if delta.seconds:
return delta.days + 1
return delta.days
else:
return int(math.ceil((end - start).days))
def get_national_holidays(begin, end):
"""return french national days off between begin and end"""
begin = datefactory(begin.year, begin.month, begin.day, begin)
end = datefactory(end.year, end.month, end.day, end)
holidays = [str2date(datestr, begin)
for datestr in FRENCH_MOBILE_HOLIDAYS.values()]
for year in range(begin.year, end.year+1):
for datestr in FRENCH_FIXED_HOLIDAYS.values():
date = str2date(datestr % year, begin)
if date not in holidays:
holidays.append(date)
return [day for day in holidays if begin <= day < end]
def add_days_worked(start, days):
"""adds date but try to only take days worked into account"""
step = get_step(start)
weeks, plus = divmod(days, 5)
end = start + ((weeks * 7) + plus) * step
if weekday(end) >= 5: # saturday or sunday
end += (2 * step)
end += len([x for x in get_national_holidays(start, end + step)
if weekday(x) < 5]) * step
if weekday(end) >= 5: # saturday or sunday
end += (2 * step)
return end
def nb_open_days(start, end):
assert start <= end
step = get_step(start)
days = days_between(start, end)
weeks, plus = divmod(days, 7)
if weekday(start) > weekday(end):
plus -= 2
elif weekday(end) == 6:
plus -= 1
open_days = weeks * 5 + plus
nb_week_holidays = len([x for x in get_national_holidays(start, end+step)
if weekday(x) < 5 and x < end])
open_days -= nb_week_holidays
if open_days < 0:
return 0
return open_days
def date_range(begin, end, incday=None, incmonth=None):
"""yields each date between begin and end
:param begin: the start date
:param end: the end date
:param incr: the step to use to iterate over dates. Default is
one day.
:param include: None (means no exclusion) or a function taking a
date as parameter, and returning True if the date
should be included.
When using mx datetime, you should *NOT* use incmonth argument, use instead
oneDay, oneHour, oneMinute, oneSecond, oneWeek or endOfMonth (to enumerate
months) as `incday` argument
"""
assert not (incday and incmonth)
begin = todate(begin)
end = todate(end)
if incmonth:
while begin < end:
yield begin
begin = next_month(begin, incmonth)
else:
incr = get_step(begin, incday or 1)
while begin < end:
yield begin
begin += incr
# makes py datetime usable #####################################################
ONEDAY = timedelta(days=1)
ONEWEEK = timedelta(days=7)
try:
strptime = datetime.strptime
except AttributeError: # py < 2.5
from time import strptime as time_strptime
def strptime(value, format):
return datetime(*time_strptime(value, format)[:6])
def strptime_time(value, format='%H:%M'):
return time(*time_strptime(value, format)[3:6])
def todate(somedate):
"""return a date from a date (leaving unchanged) or a datetime"""
if isinstance(somedate, datetime):
return date(somedate.year, somedate.month, somedate.day)
assert isinstance(somedate, (date, DateTimeType)), repr(somedate)
return somedate
def totime(somedate):
"""return a time from a time (leaving unchanged), date or datetime"""
# XXX mx compat
if not isinstance(somedate, time):
return time(somedate.hour, somedate.minute, somedate.second)
assert isinstance(somedate, (time)), repr(somedate)
return somedate
def todatetime(somedate):
"""return a date from a date (leaving unchanged) or a datetime"""
# take care, datetime is a subclass of date
if isinstance(somedate, datetime):
return somedate
assert isinstance(somedate, (date, DateTimeType)), repr(somedate)
return datetime(somedate.year, somedate.month, somedate.day)
def datetime2ticks(somedate):
return timegm(somedate.timetuple()) * 1000 + int(getattr(somedate, 'microsecond', 0) / 1000)
def ticks2datetime(ticks):
miliseconds, microseconds = divmod(ticks, 1000)
try:
return datetime.fromtimestamp(miliseconds)
except (ValueError, OverflowError):
epoch = datetime.fromtimestamp(0)
nb_days, seconds = divmod(int(miliseconds), 86400)
delta = timedelta(nb_days, seconds=seconds, microseconds=microseconds)
try:
return epoch + delta
except (ValueError, OverflowError):
raise
def days_in_month(somedate):
return monthrange(somedate.year, somedate.month)[1]
def days_in_year(somedate):
feb = date(somedate.year, 2, 1)
if days_in_month(feb) == 29:
return 366
else:
return 365
def previous_month(somedate, nbmonth=1):
while nbmonth:
somedate = first_day(somedate) - ONEDAY
nbmonth -= 1
return somedate
def next_month(somedate, nbmonth=1):
while nbmonth:
somedate = last_day(somedate) + ONEDAY
nbmonth -= 1
return somedate
def first_day(somedate):
return date(somedate.year, somedate.month, 1)
def last_day(somedate):
return date(somedate.year, somedate.month, days_in_month(somedate))
def ustrftime(somedate, fmt='%Y-%m-%d'):
"""like strftime, but returns a unicode string instead of an encoded
string which may be problematic with localized date.
"""
if sys.version_info >= (3, 3):
# datetime.date.strftime() supports dates since year 1 in Python >=3.3.
return somedate.strftime(fmt)
else:
try:
if sys.version_info < (3, 0):
encoding = getlocale(LC_TIME)[1] or 'ascii'
return unicode(somedate.strftime(str(fmt)), encoding)
else:
return somedate.strftime(fmt)
except ValueError:
if somedate.year >= 1900:
raise
# datetime is not happy with dates before 1900
# we try to work around this, assuming a simple
# format string
fields = {'Y': somedate.year,
'm': somedate.month,
'd': somedate.day,
}
if isinstance(somedate, datetime):
fields.update({'H': somedate.hour,
'M': somedate.minute,
'S': somedate.second})
fmt = re.sub('%([YmdHMS])', r'%(\1)02d', fmt)
return unicode(fmt) % fields
def utcdatetime(dt):
if dt.tzinfo is None:
return dt
return (dt.replace(tzinfo=None) - dt.utcoffset())
def utctime(dt):
if dt.tzinfo is None:
return dt
return (dt + dt.utcoffset() + dt.dst()).replace(tzinfo=None)
def datetime_to_seconds(date):
"""return the number of seconds since the begining of the day for that date
"""
return date.second+60*date.minute + 3600*date.hour
def timedelta_to_days(delta):
"""return the time delta as a number of seconds"""
return delta.days + delta.seconds / (3600*24)
def timedelta_to_seconds(delta):
"""return the time delta as a fraction of days"""
return delta.days*(3600*24) + delta.seconds
|
import logging
from pycomfoconnect import (
CMD_FAN_MODE_AWAY,
CMD_FAN_MODE_HIGH,
CMD_FAN_MODE_LOW,
CMD_FAN_MODE_MEDIUM,
SENSOR_FAN_SPEED_MODE,
)
from homeassistant.components.fan import (
SPEED_HIGH,
SPEED_LOW,
SPEED_MEDIUM,
SPEED_OFF,
SUPPORT_SET_SPEED,
FanEntity,
)
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DOMAIN, SIGNAL_COMFOCONNECT_UPDATE_RECEIVED, ComfoConnectBridge
_LOGGER = logging.getLogger(__name__)
SPEED_MAPPING = {0: SPEED_OFF, 1: SPEED_LOW, 2: SPEED_MEDIUM, 3: SPEED_HIGH}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ComfoConnect fan platform."""
ccb = hass.data[DOMAIN]
add_entities([ComfoConnectFan(ccb.name, ccb)], True)
class ComfoConnectFan(FanEntity):
"""Representation of the ComfoConnect fan platform."""
def __init__(self, name, ccb: ComfoConnectBridge) -> None:
"""Initialize the ComfoConnect fan."""
self._ccb = ccb
self._name = name
async def async_added_to_hass(self):
"""Register for sensor updates."""
_LOGGER.debug("Registering for fan speed")
self.async_on_remove(
async_dispatcher_connect(
self.hass,
SIGNAL_COMFOCONNECT_UPDATE_RECEIVED.format(SENSOR_FAN_SPEED_MODE),
self._handle_update,
)
)
await self.hass.async_add_executor_job(
self._ccb.comfoconnect.register_sensor, SENSOR_FAN_SPEED_MODE
)
def _handle_update(self, value):
"""Handle update callbacks."""
_LOGGER.debug(
"Handle update for fan speed (%d): %s", SENSOR_FAN_SPEED_MODE, value
)
self._ccb.data[SENSOR_FAN_SPEED_MODE] = value
self.schedule_update_ha_state()
@property
def should_poll(self) -> bool:
"""Do not poll."""
return False
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self._ccb.unique_id
@property
def name(self):
"""Return the name of the fan."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend."""
return "mdi:air-conditioner"
@property
def supported_features(self) -> int:
"""Flag supported features."""
return SUPPORT_SET_SPEED
@property
def speed(self):
"""Return the current fan mode."""
try:
speed = self._ccb.data[SENSOR_FAN_SPEED_MODE]
return SPEED_MAPPING[speed]
except KeyError:
return None
@property
def speed_list(self):
"""List of available fan modes."""
return [SPEED_OFF, SPEED_LOW, SPEED_MEDIUM, SPEED_HIGH]
def turn_on(self, speed: str = None, **kwargs) -> None:
"""Turn on the fan."""
if speed is None:
speed = SPEED_LOW
self.set_speed(speed)
def turn_off(self, **kwargs) -> None:
"""Turn off the fan (to away)."""
self.set_speed(SPEED_OFF)
def set_speed(self, speed: str):
"""Set fan speed."""
_LOGGER.debug("Changing fan speed to %s", speed)
if speed == SPEED_OFF:
self._ccb.comfoconnect.cmd_rmi_request(CMD_FAN_MODE_AWAY)
elif speed == SPEED_LOW:
self._ccb.comfoconnect.cmd_rmi_request(CMD_FAN_MODE_LOW)
elif speed == SPEED_MEDIUM:
self._ccb.comfoconnect.cmd_rmi_request(CMD_FAN_MODE_MEDIUM)
elif speed == SPEED_HIGH:
self._ccb.comfoconnect.cmd_rmi_request(CMD_FAN_MODE_HIGH)
# Update current mode
self.schedule_update_ha_state()
|
from datetime import datetime, timedelta
import logging
from requests.exceptions import ConnectTimeout, HTTPError
from rova.rova import Rova
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_NAME,
DEVICE_CLASS_TIMESTAMP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
# Config for rova requests.
CONF_ZIP_CODE = "zip_code"
CONF_HOUSE_NUMBER = "house_number"
CONF_HOUSE_NUMBER_SUFFIX = "house_number_suffix"
UPDATE_DELAY = timedelta(hours=12)
SCAN_INTERVAL = timedelta(hours=12)
# Supported sensor types:
# Key: [json_key, name, icon]
SENSOR_TYPES = {
"bio": ["gft", "Biowaste", "mdi:recycle"],
"paper": ["papier", "Paper", "mdi:recycle"],
"plastic": ["pmd", "PET", "mdi:recycle"],
"residual": ["restafval", "Residual", "mdi:recycle"],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ZIP_CODE): cv.string,
vol.Required(CONF_HOUSE_NUMBER): cv.string,
vol.Optional(CONF_HOUSE_NUMBER_SUFFIX, default=""): cv.string,
vol.Optional(CONF_NAME, default="Rova"): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=["bio"]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
}
)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Create the Rova data service and sensors."""
zip_code = config[CONF_ZIP_CODE]
house_number = config[CONF_HOUSE_NUMBER]
house_number_suffix = config[CONF_HOUSE_NUMBER_SUFFIX]
platform_name = config[CONF_NAME]
# Create new Rova object to retrieve data
api = Rova(zip_code, house_number, house_number_suffix)
try:
if not api.is_rova_area():
_LOGGER.error("ROVA does not collect garbage in this area")
return
except (ConnectTimeout, HTTPError):
_LOGGER.error("Could not retrieve details from ROVA API")
return
# Create rova data service which will retrieve and update the data.
data_service = RovaData(api)
# Create a new sensor for each garbage type.
entities = []
for sensor_key in config[CONF_MONITORED_CONDITIONS]:
sensor = RovaSensor(platform_name, sensor_key, data_service)
entities.append(sensor)
add_entities(entities, True)
class RovaSensor(Entity):
"""Representation of a Rova sensor."""
def __init__(self, platform_name, sensor_key, data_service):
"""Initialize the sensor."""
self.sensor_key = sensor_key
self.platform_name = platform_name
self.data_service = data_service
self._state = None
self._json_key = SENSOR_TYPES[self.sensor_key][0]
@property
def name(self):
"""Return the name."""
return f"{self.platform_name}_{self.sensor_key}"
@property
def icon(self):
"""Return the sensor icon."""
return SENSOR_TYPES[self.sensor_key][2]
@property
def device_class(self):
"""Return the class of this sensor."""
return DEVICE_CLASS_TIMESTAMP
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Get the latest data from the sensor and update the state."""
self.data_service.update()
pickup_date = self.data_service.data.get(self._json_key)
if pickup_date is not None:
self._state = pickup_date.isoformat()
class RovaData:
"""Get and update the latest data from the Rova API."""
def __init__(self, api):
"""Initialize the data object."""
self.api = api
self.data = {}
@Throttle(UPDATE_DELAY)
def update(self):
"""Update the data from the Rova API."""
try:
items = self.api.get_calendar_items()
except (ConnectTimeout, HTTPError):
_LOGGER.error("Could not retrieve data, retry again later")
return
self.data = {}
for item in items:
date = datetime.strptime(item["Date"], "%Y-%m-%dT%H:%M:%S")
code = item["GarbageTypeCode"].lower()
if code not in self.data and date > datetime.now():
self.data[code] = date
_LOGGER.debug("Updated Rova calendar: %s", self.data)
|
import sys
import argparse
from PyQt5.QtCore import QUrl
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebKit import QWebSettings
from PyQt5.QtWebKitWidgets import QWebView
def parse_args():
"""Parse commandline arguments."""
parser = argparse.ArgumentParser()
parser.add_argument('url', help='The URL to open')
parser.add_argument('--plugins', '-p', help='Enable plugins',
default=False, action='store_true')
return parser.parse_known_args()[0]
if __name__ == '__main__':
args = parse_args()
app = QApplication(sys.argv)
wv = QWebView()
wv.loadStarted.connect(lambda: print("Loading started"))
wv.loadProgress.connect(lambda p: print("Loading progress: {}%".format(p)))
wv.loadFinished.connect(lambda: print("Loading finished"))
if args.plugins:
wv.settings().setAttribute(QWebSettings.PluginsEnabled, True)
wv.load(QUrl.fromUserInput(args.url))
wv.show()
app.exec_()
|
import pytest
from lemur.destinations.views import * # noqa
from .vectors import (
VALID_ADMIN_API_TOKEN,
VALID_ADMIN_HEADER_TOKEN,
VALID_USER_HEADER_TOKEN,
)
def test_destination_input_schema(client, destination_plugin, destination):
from lemur.destinations.schemas import DestinationInputSchema
input_data = {
"label": "destination1",
"options": {},
"description": "my destination",
"active": True,
"plugin": {"slug": "test-destination"},
}
data, errors = DestinationInputSchema().load(input_data)
assert not errors
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 404),
(VALID_ADMIN_HEADER_TOKEN, 404),
(VALID_ADMIN_API_TOKEN, 404),
("", 401),
],
)
def test_destination_get(client, token, status):
assert (
client.get(
api.url_for(Destinations, destination_id=1), headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_destination_post_(client, token, status):
assert (
client.post(
api.url_for(Destinations, destination_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_destination_put(client, token, status):
assert (
client.put(
api.url_for(Destinations, destination_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_destination_delete(client, token, status):
assert (
client.delete(
api.url_for(Destinations, destination_id=1), headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_destination_patch(client, token, status):
assert (
client.patch(
api.url_for(Destinations, destination_id=1), data={}, headers=token
).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 403),
(VALID_ADMIN_HEADER_TOKEN, 400),
(VALID_ADMIN_API_TOKEN, 400),
("", 401),
],
)
def test_destination_list_post_(client, token, status):
assert (
client.post(api.url_for(DestinationsList), data={}, headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 200),
(VALID_ADMIN_HEADER_TOKEN, 200),
(VALID_ADMIN_API_TOKEN, 200),
("", 401),
],
)
def test_destination_list_get(client, token, status):
assert (
client.get(api.url_for(DestinationsList), headers=token).status_code == status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_destination_list_delete(client, token, status):
assert (
client.delete(api.url_for(DestinationsList), headers=token).status_code
== status
)
@pytest.mark.parametrize(
"token,status",
[
(VALID_USER_HEADER_TOKEN, 405),
(VALID_ADMIN_HEADER_TOKEN, 405),
(VALID_ADMIN_API_TOKEN, 405),
("", 405),
],
)
def test_destination_list_patch(client, token, status):
assert (
client.patch(api.url_for(DestinationsList), data={}, headers=token).status_code
== status
)
|
from pybotvac.exceptions import NeatoLoginException
import pytest
from homeassistant.components.neato.const import CONF_VENDOR, NEATO_DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
USERNAME = "myUsername"
PASSWORD = "myPassword"
VENDOR_NEATO = "neato"
VENDOR_VORWERK = "vorwerk"
VENDOR_INVALID = "invalid"
VALID_CONFIG = {
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_VENDOR: VENDOR_NEATO,
}
DIFFERENT_CONFIG = {
CONF_USERNAME: "anotherUsername",
CONF_PASSWORD: "anotherPassword",
CONF_VENDOR: VENDOR_VORWERK,
}
INVALID_CONFIG = {
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_VENDOR: VENDOR_INVALID,
}
@pytest.fixture(name="config_flow")
def mock_config_flow_login():
"""Mock a successful login."""
with patch("homeassistant.components.neato.config_flow.Account", return_value=True):
yield
@pytest.fixture(name="hub")
def mock_controller_login():
"""Mock a successful login."""
with patch("homeassistant.components.neato.Account", return_value=True):
yield
async def test_no_config_entry(hass):
"""There is nothing in configuration.yaml."""
res = await async_setup_component(hass, NEATO_DOMAIN, {})
assert res is True
async def test_create_valid_config_entry(hass, config_flow, hub):
"""There is something in configuration.yaml."""
assert hass.config_entries.async_entries(NEATO_DOMAIN) == []
assert await async_setup_component(hass, NEATO_DOMAIN, {NEATO_DOMAIN: VALID_CONFIG})
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(NEATO_DOMAIN)
assert entries
assert entries[0].data[CONF_USERNAME] == USERNAME
assert entries[0].data[CONF_PASSWORD] == PASSWORD
assert entries[0].data[CONF_VENDOR] == VENDOR_NEATO
async def test_config_entries_in_sync(hass, hub):
"""The config entry and configuration.yaml are in sync."""
MockConfigEntry(domain=NEATO_DOMAIN, data=VALID_CONFIG).add_to_hass(hass)
assert hass.config_entries.async_entries(NEATO_DOMAIN)
assert await async_setup_component(hass, NEATO_DOMAIN, {NEATO_DOMAIN: VALID_CONFIG})
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(NEATO_DOMAIN)
assert entries
assert entries[0].data[CONF_USERNAME] == USERNAME
assert entries[0].data[CONF_PASSWORD] == PASSWORD
assert entries[0].data[CONF_VENDOR] == VENDOR_NEATO
async def test_config_entries_not_in_sync(hass, config_flow, hub):
"""The config entry and configuration.yaml are not in sync."""
MockConfigEntry(domain=NEATO_DOMAIN, data=DIFFERENT_CONFIG).add_to_hass(hass)
assert hass.config_entries.async_entries(NEATO_DOMAIN)
assert await async_setup_component(hass, NEATO_DOMAIN, {NEATO_DOMAIN: VALID_CONFIG})
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(NEATO_DOMAIN)
assert entries
assert entries[0].data[CONF_USERNAME] == USERNAME
assert entries[0].data[CONF_PASSWORD] == PASSWORD
assert entries[0].data[CONF_VENDOR] == VENDOR_NEATO
async def test_config_entries_not_in_sync_error(hass):
"""The config entry and configuration.yaml are not in sync, the new configuration is wrong."""
MockConfigEntry(domain=NEATO_DOMAIN, data=VALID_CONFIG).add_to_hass(hass)
assert hass.config_entries.async_entries(NEATO_DOMAIN)
with patch(
"homeassistant.components.neato.config_flow.Account",
side_effect=NeatoLoginException(),
):
assert not await async_setup_component(
hass, NEATO_DOMAIN, {NEATO_DOMAIN: DIFFERENT_CONFIG}
)
await hass.async_block_till_done()
entries = hass.config_entries.async_entries(NEATO_DOMAIN)
assert entries
assert entries[0].data[CONF_USERNAME] == USERNAME
assert entries[0].data[CONF_PASSWORD] == PASSWORD
assert entries[0].data[CONF_VENDOR] == VENDOR_NEATO
|
from __future__ import division, print_function
import logging
from .video import Episode, Movie
logger = logging.getLogger(__name__)
#: Scores for episodes
episode_scores = {'hash': 809, 'series': 405, 'year': 135, 'country': 135, 'season': 45, 'episode': 45,
'release_group': 15, 'streaming_service': 15, 'source': 7, 'audio_codec': 3, 'resolution': 2,
'video_codec': 2, 'hearing_impaired': 1}
#: Scores for movies
movie_scores = {'hash': 269, 'title': 135, 'year': 45, 'country': 45, 'release_group': 15, 'streaming_service': 15,
'source': 7, 'audio_codec': 3, 'resolution': 2, 'video_codec': 2, 'hearing_impaired': 1}
#: All scores names
score_keys = set([s for s in episode_scores.keys()] + [s for s in movie_scores.keys()])
#: Equivalent release groups
equivalent_release_groups = ({'LOL', 'DIMENSION'}, {'ASAP', 'IMMERSE', 'FLEET'}, {'AVS', 'SVA'})
def get_equivalent_release_groups(release_group):
"""Get all the equivalents of the given release group.
:param str release_group: the release group to get the equivalents of.
:return: the equivalent release groups.
:rtype: set
"""
for equivalent_release_group in equivalent_release_groups:
if release_group in equivalent_release_group:
return equivalent_release_group
return {release_group}
def get_scores(video):
"""Get the scores dict for the given `video`.
This will return either :data:`episode_scores` or :data:`movie_scores` based on the type of the `video`.
:param video: the video to compute the score against.
:type video: :class:`~subliminal.video.Video`
:return: the scores dict.
:rtype: dict
"""
if isinstance(video, Episode):
return episode_scores
elif isinstance(video, Movie):
return movie_scores
raise ValueError('video must be an instance of Episode or Movie')
def compute_score(subtitle, video, hearing_impaired=None):
"""Compute the score of the `subtitle` against the `video` with `hearing_impaired` preference.
:func:`compute_score` uses the :meth:`Subtitle.get_matches <subliminal.subtitle.Subtitle.get_matches>` method and
applies the scores (either from :data:`episode_scores` or :data:`movie_scores`) after some processing.
:param subtitle: the subtitle to compute the score of.
:type subtitle: :class:`~subliminal.subtitle.Subtitle`
:param video: the video to compute the score against.
:type video: :class:`~subliminal.video.Video`
:param bool hearing_impaired: hearing impaired preference.
:return: score of the subtitle.
:rtype: int
"""
logger.info('Computing score of %r for video %r with %r', subtitle, video, dict(hearing_impaired=hearing_impaired))
# get the scores dict
scores = get_scores(video)
logger.debug('Using scores %r', scores)
# get the matches
matches = subtitle.get_matches(video)
logger.debug('Found matches %r', matches)
# on hash match, discard everything else
if 'hash' in matches:
logger.debug('Keeping only hash match')
matches &= {'hash'}
# handle equivalent matches
if isinstance(video, Episode):
if 'title' in matches:
logger.debug('Adding title match equivalent')
matches.add('episode')
if 'series_imdb_id' in matches:
logger.debug('Adding series_imdb_id match equivalent')
matches |= {'series', 'year', 'country'}
if 'imdb_id' in matches:
logger.debug('Adding imdb_id match equivalents')
matches |= {'series', 'year', 'country', 'season', 'episode'}
if 'tvdb_id' in matches:
logger.debug('Adding tvdb_id match equivalents')
matches |= {'series', 'year', 'country', 'season', 'episode'}
if 'series_tvdb_id' in matches:
logger.debug('Adding series_tvdb_id match equivalents')
matches |= {'series', 'year', 'country'}
elif isinstance(video, Movie):
if 'imdb_id' in matches:
logger.debug('Adding imdb_id match equivalents')
matches |= {'title', 'year', 'country'}
# handle hearing impaired
if hearing_impaired is not None and subtitle.hearing_impaired == hearing_impaired:
logger.debug('Matched hearing_impaired')
matches.add('hearing_impaired')
# compute the score
score = sum((scores.get(match, 0) for match in matches))
logger.info('Computed score %r with final matches %r', score, matches)
# ensure score is within valid bounds
assert 0 <= score <= scores['hash'] + scores['hearing_impaired']
return score
def solve_episode_equations():
from sympy import Eq, solve, symbols
hash, series, year, country, season, episode = symbols('hash series year country season episode')
release_group, streaming_service, source = symbols('release_group streaming_service source')
audio_codec, resolution, video_codec = symbols('audio_codec resolution video_codec')
hearing_impaired = symbols('hearing_impaired')
equations = [
# hash is best
Eq(hash, series + year + country + season + episode +
release_group + streaming_service + source + audio_codec + resolution + video_codec),
# series counts for the most part in the total score
Eq(series, year + country + season + episode + release_group + streaming_service + source +
audio_codec + resolution + video_codec + 1),
# year is the second most important part
Eq(year, season + episode + release_group + streaming_service + source +
audio_codec + resolution + video_codec + 1),
# year counts as much as country
Eq(year, country),
# season is important too
Eq(season, release_group + streaming_service + source + audio_codec + resolution + video_codec + 1),
# episode is equally important to season
Eq(episode, season),
# release group is the next most wanted match
Eq(release_group, source + audio_codec + resolution + video_codec + 1),
# streaming service counts as much as release group
Eq(release_group, streaming_service),
# source counts as much as audio_codec, resolution and video_codec
Eq(source, audio_codec + resolution + video_codec),
# audio_codec is more valuable than video_codec
Eq(audio_codec, video_codec + 1),
# resolution counts as much as video_codec
Eq(resolution, video_codec),
# video_codec is the least valuable match but counts more than the sum of all scoring increasing matches
Eq(video_codec, hearing_impaired + 1),
# hearing impaired is only used for score increasing, so put it to 1
Eq(hearing_impaired, 1),
]
return solve(equations, [hash, series, year, country, season, episode, release_group, streaming_service, source,
audio_codec, resolution, hearing_impaired, video_codec])
def solve_movie_equations():
from sympy import Eq, solve, symbols
hash, title, year, country, release_group = symbols('hash title year country release_group')
streaming_service, source, audio_codec, resolution = symbols('streaming_service source audio_codec resolution')
video_codec, hearing_impaired = symbols('video_codec hearing_impaired')
equations = [
# hash is best
Eq(hash, title + year + country + release_group + streaming_service +
source + audio_codec + resolution + video_codec),
# title counts for the most part in the total score
Eq(title, year + country + release_group + streaming_service +
source + audio_codec + resolution + video_codec + 1),
# year is the second most important part
Eq(year, release_group + streaming_service + source + audio_codec + resolution + video_codec + 1),
# year counts as much as country
Eq(year, country),
# release group is the next most wanted match
Eq(release_group, source + audio_codec + resolution + video_codec + 1),
# streaming service counts as much as release group
Eq(release_group, streaming_service),
# source counts as much as audio_codec, resolution and video_codec
Eq(source, audio_codec + resolution + video_codec),
# audio_codec is more valuable than video_codec
Eq(audio_codec, video_codec + 1),
# resolution counts as much as video_codec
Eq(resolution, video_codec),
# video_codec is the least valuable match but counts more than the sum of all scoring increasing matches
Eq(video_codec, hearing_impaired + 1),
# hearing impaired is only used for score increasing, so put it to 1
Eq(hearing_impaired, 1),
]
return solve(equations, [hash, title, year, country, release_group, streaming_service, source, audio_codec,
resolution, hearing_impaired, video_codec])
|
import logging
from homeconnect.api import HomeConnectError
from homeassistant.components.switch import SwitchEntity
from .const import (
BSH_ACTIVE_PROGRAM,
BSH_OPERATION_STATE,
BSH_POWER_ON,
BSH_POWER_STATE,
DOMAIN,
)
from .entity import HomeConnectEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Home Connect switch."""
def get_entities():
"""Get a list of entities."""
entities = []
hc_api = hass.data[DOMAIN][config_entry.entry_id]
for device_dict in hc_api.devices:
entity_dicts = device_dict.get("entities", {}).get("switch", [])
entity_list = [HomeConnectProgramSwitch(**d) for d in entity_dicts]
entity_list += [HomeConnectPowerSwitch(device_dict["device"])]
entities += entity_list
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
class HomeConnectProgramSwitch(HomeConnectEntity, SwitchEntity):
"""Switch class for Home Connect."""
def __init__(self, device, program_name):
"""Initialize the entity."""
desc = " ".join(["Program", program_name.split(".")[-1]])
super().__init__(device, desc)
self.program_name = program_name
self._state = None
self._remote_allowed = None
@property
def is_on(self):
"""Return true if the switch is on."""
return bool(self._state)
@property
def available(self):
"""Return true if the entity is available."""
return True
async def async_turn_on(self, **kwargs):
"""Start the program."""
_LOGGER.debug("Tried to turn on program %s", self.program_name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.start_program, self.program_name
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to start program: %s", err)
self.async_entity_update()
async def async_turn_off(self, **kwargs):
"""Stop the program."""
_LOGGER.debug("Tried to stop program %s", self.program_name)
try:
await self.hass.async_add_executor_job(self.device.appliance.stop_program)
except HomeConnectError as err:
_LOGGER.error("Error while trying to stop program: %s", err)
self.async_entity_update()
async def async_update(self):
"""Update the switch's status."""
state = self.device.appliance.status.get(BSH_ACTIVE_PROGRAM, {})
if state.get("value") == self.program_name:
self._state = True
else:
self._state = False
_LOGGER.debug("Updated, new state: %s", self._state)
class HomeConnectPowerSwitch(HomeConnectEntity, SwitchEntity):
"""Power switch class for Home Connect."""
def __init__(self, device):
"""Inititialize the entity."""
super().__init__(device, "Power")
self._state = None
@property
def is_on(self):
"""Return true if the switch is on."""
return bool(self._state)
async def async_turn_on(self, **kwargs):
"""Switch the device on."""
_LOGGER.debug("Tried to switch on %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting,
BSH_POWER_STATE,
BSH_POWER_ON,
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn on device: %s", err)
self._state = False
self.async_entity_update()
async def async_turn_off(self, **kwargs):
"""Switch the device off."""
_LOGGER.debug("tried to switch off %s", self.name)
try:
await self.hass.async_add_executor_job(
self.device.appliance.set_setting,
BSH_POWER_STATE,
self.device.power_off_state,
)
except HomeConnectError as err:
_LOGGER.error("Error while trying to turn off device: %s", err)
self._state = True
self.async_entity_update()
async def async_update(self):
"""Update the switch's status."""
if (
self.device.appliance.status.get(BSH_POWER_STATE, {}).get("value")
== BSH_POWER_ON
):
self._state = True
elif (
self.device.appliance.status.get(BSH_POWER_STATE, {}).get("value")
== self.device.power_off_state
):
self._state = False
elif self.device.appliance.status.get(BSH_OPERATION_STATE, {}).get(
"value", None
) in [
"BSH.Common.EnumType.OperationState.Ready",
"BSH.Common.EnumType.OperationState.DelayedStart",
"BSH.Common.EnumType.OperationState.Run",
"BSH.Common.EnumType.OperationState.Pause",
"BSH.Common.EnumType.OperationState.ActionRequired",
"BSH.Common.EnumType.OperationState.Aborting",
"BSH.Common.EnumType.OperationState.Finished",
]:
self._state = True
elif (
self.device.appliance.status.get(BSH_OPERATION_STATE, {}).get("value")
== "BSH.Common.EnumType.OperationState.Inactive"
):
self._state = False
else:
self._state = None
_LOGGER.debug("Updated, new state: %s", self._state)
|
import json
import os
import subprocess
import threading
import unittest
from http.server import BaseHTTPRequestHandler, HTTPServer
from test.support import EnvironmentVarGuard
from urllib.parse import urlparse
from datetime import datetime, timedelta
import mock
from google.auth.exceptions import DefaultCredentialsError
from google.cloud import bigquery
from kaggle_secrets import (GcpTarget, UserSecretsClient,
NotFoundError, ValidationError)
from kaggle_web_client import (_KAGGLE_URL_BASE_ENV_VAR_NAME,
_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME,
CredentialError, BackendError)
_TEST_JWT = 'test-secrets-key'
class UserSecretsHTTPHandler(BaseHTTPRequestHandler):
def set_request(self):
raise NotImplementedError()
def get_response(self):
raise NotImplementedError()
def do_HEAD(s):
s.send_response(200)
def do_POST(s):
s.set_request()
s.send_response(200)
s.send_header("Content-type", "application/json")
s.end_headers()
s.wfile.write(json.dumps(s.get_response()).encode("utf-8"))
class TestUserSecrets(unittest.TestCase):
SERVER_ADDRESS = urlparse(os.getenv(_KAGGLE_URL_BASE_ENV_VAR_NAME, default="http://127.0.0.1:8001"))
def _test_client(self, client_func, expected_path, expected_body, secret=None, success=True):
_request = {}
class AccessTokenHandler(UserSecretsHTTPHandler):
def set_request(self):
_request['path'] = self.path
content_len = int(self.headers.get('Content-Length'))
_request['body'] = json.loads(self.rfile.read(content_len))
_request['headers'] = self.headers
def get_response(self):
if success:
return {'result': {'secret': secret, 'secretType': 'refreshToken', 'secretProvider': 'google', 'expiresInSeconds': 3600}, 'wasSuccessful': "true"}
else:
return {'wasSuccessful': "false", 'errors': ['No user secrets exist for kernel']}
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
with HTTPServer((self.SERVER_ADDRESS.hostname, self.SERVER_ADDRESS.port), AccessTokenHandler) as httpd:
threading.Thread(target=httpd.serve_forever).start()
try:
client_func()
finally:
httpd.shutdown()
path, headers, body = _request['path'], _request['headers'], _request['body']
self.assertEqual(
path,
expected_path,
msg="Fake server did not receive the right request from the UserSecrets client.")
self.assertEqual(
body,
expected_body,
msg="Fake server did not receive the right body from the UserSecrets client.")
def test_no_token_fails(self):
env = EnvironmentVarGuard()
env.unset(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME)
with env:
with self.assertRaises(CredentialError):
client = UserSecretsClient()
def test_get_secret_succeeds(self):
secret = '12345'
def call_get_secret():
client = UserSecretsClient()
secret_response = client.get_secret("secret_label")
self.assertEqual(secret_response, secret)
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "secret_label"},
secret=secret)
def test_get_secret_handles_unsuccessful(self):
def call_get_secret():
client = UserSecretsClient()
with self.assertRaises(BackendError):
secret_response = client.get_secret("secret_label")
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "secret_label"},
success=False)
def test_get_secret_validates_label(self):
env = EnvironmentVarGuard()
env.set(_KAGGLE_USER_SECRETS_TOKEN_ENV_VAR_NAME, _TEST_JWT)
with env:
client = UserSecretsClient()
with self.assertRaises(ValidationError):
secret_response = client.get_secret("")
def test_get_gcloud_secret_succeeds(self):
secret = '{"client_id":"gcloud","type":"authorized_user"}'
def call_get_secret():
client = UserSecretsClient()
secret_response = client.get_gcloud_credential()
self.assertEqual(secret_response, secret)
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"},
secret=secret)
def test_get_gcloud_secret_handles_unsuccessful(self):
def call_get_secret():
client = UserSecretsClient()
with self.assertRaises(NotFoundError):
secret_response = client.get_gcloud_credential()
self._test_client(call_get_secret,
'/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"},
success=False)
def test_set_gcloud_credentials_succeeds(self):
secret = '{"client_id":"gcloud","type":"authorized_user","refresh_token":"refresh_token"}'
project = 'foo'
account = 'bar'
def get_gcloud_config_value(field):
result = subprocess.run(['gcloud', 'config', 'get-value', field], capture_output=True)
result.check_returncode()
return result.stdout.strip().decode('ascii')
def test_fn():
client = UserSecretsClient()
client.set_gcloud_credentials(project=project, account=account)
self.assertEqual(project, os.environ['GOOGLE_CLOUD_PROJECT'])
self.assertEqual(project, get_gcloud_config_value('project'))
self.assertEqual(account, os.environ['GOOGLE_ACCOUNT'])
self.assertEqual(account, get_gcloud_config_value('account'))
expected_creds_file = '/tmp/gcloud_credential.json'
self.assertEqual(expected_creds_file, os.environ['GOOGLE_APPLICATION_CREDENTIALS'])
self.assertEqual(expected_creds_file, get_gcloud_config_value('auth/credential_file_override'))
with open(expected_creds_file, 'r') as f:
self.assertEqual(secret, '\n'.join(f.readlines()))
self._test_client(test_fn, '/requests/GetUserSecretByLabelRequest', {'Label': "__gcloud_sdk_auth__"}, secret=secret)
@mock.patch('kaggle_secrets.datetime')
def test_get_access_token_succeeds(self, mock_dt):
secret = '12345'
now = datetime(1993, 4, 24)
mock_dt.utcnow = mock.Mock(return_value=now)
def call_get_bigquery_access_token():
client = UserSecretsClient()
secret_response = client.get_bigquery_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
def call_get_gcs_access_token():
client = UserSecretsClient()
secret_response = client._get_gcs_access_token()
self.assertEqual(secret_response, (secret, now + timedelta(seconds=3600)))
self._test_client(call_get_bigquery_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.BIGQUERY.target},
secret=secret)
self._test_client(call_get_gcs_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.GCS.target},
secret=secret)
def test_get_access_token_handles_unsuccessful(self):
def call_get_access_token():
client = UserSecretsClient()
with self.assertRaises(BackendError):
client.get_bigquery_access_token()
self._test_client(call_get_access_token,
'/requests/GetUserSecretRequest', {'Target': GcpTarget.BIGQUERY.target}, success=False)
|
from __future__ import with_statement
import logging
import csv
import itertools
from gensim import interfaces, utils
logger = logging.getLogger(__name__)
class CsvCorpus(interfaces.CorpusABC):
"""Corpus in CSV format.
Notes
-----
The CSV delimiter, headers etc. are guessed automatically based on the file content.
All row values are expected to be ints/floats.
"""
def __init__(self, fname, labels):
"""
Parameters
----------
fname : str
Path to corpus.
labels : bool
If True - ignore first column (class labels).
"""
logger.info("loading corpus from %s", fname)
self.fname = fname
self.length = None
self.labels = labels
# load the first few lines, to guess the CSV dialect
with utils.open(self.fname, 'rb') as f:
head = ''.join(itertools.islice(f, 5))
self.headers = csv.Sniffer().has_header(head)
self.dialect = csv.Sniffer().sniff(head)
logger.info("sniffed CSV delimiter=%r, headers=%s", self.dialect.delimiter, self.headers)
def __iter__(self):
"""Iterate over the corpus, returning one BoW vector at a time.
Yields
------
list of (int, float)
Document in BoW format.
"""
with utils.open(self.fname, 'rb') as f:
reader = csv.reader(f, self.dialect)
if self.headers:
next(reader) # skip the headers
line_no = -1
for line_no, line in enumerate(reader):
if self.labels:
line.pop(0) # ignore the first column = class label
yield list(enumerate(float(x) for x in line))
self.length = line_no + 1 # store the total number of CSV rows = documents
|
import sys
import timeit
from random import sample
from pygal import CHARTS, CHARTS_BY_NAME
from pygal.etree import etree
from pygal.test import adapt
sizes = (1, 5, 10, 50, 100, 500, 1000)
rands = list(zip(
sample(range(1000), 1000),
sample(range(1000), 1000)))
def perf(chart_name, length, series):
chart = CHARTS_BY_NAME.get(chart_name)()
for i in range(series):
chart.add('s %d' % i, adapt(chart, rands[:length]))
return chart
if '--bench' in sys.argv:
bench = True
def prt(s):
pass
def cht(s):
sys.stdout.write(s)
else:
bench = False
def prt(s):
sys.stdout.write(s)
sys.stdout.flush()
def cht(s):
pass
if '--profile' in sys.argv:
import cProfile
c = perf('Line', 500, 500)
cProfile.run("c.render()")
sys.exit(0)
if '--mem' in sys.argv:
_TWO_20 = float(2 ** 20)
import os
import psutil
import linecache
pid = os.getpid()
process = psutil.Process(pid)
import gc
gc.set_debug(
gc.DEBUG_UNCOLLECTABLE | gc.DEBUG_INSTANCES | gc.DEBUG_OBJECTS)
def print_mem():
mem = process.get_memory_info()[0] / _TWO_20
f = sys._getframe(1)
line = linecache.getline(
f.f_code.co_filename, f.f_lineno - 1).replace('\n', '')
print('%s:%d \t| %.6f \t| %s' % (
f.f_code.co_name, f.f_lineno, mem, line))
c = perf('Line', 100, 500)
print_mem()
a = c.render()
print_mem()
import objgraph
objgraph.show_refs([c], filename='sample-graph.png')
gc.collect()
print_mem()
print(gc.garbage)
print_mem()
del a
print_mem()
del c
print_mem()
sys.exit(0)
charts = CHARTS if '--all' in sys.argv else 'Line',
for impl in ['lxml', 'etree']:
if impl == 'lxml':
etree.to_lxml()
else:
etree.to_etree()
for chart in charts:
prt('%s\n' % chart)
prt('s\\l\t1\t10\t100')
v = sys.version.split(' ')[0]
if hasattr(sys, 'subversion'):
v += ' ' + sys.subversion[0]
v += ' ' + impl
if len(charts) > 1:
v += ' ' + chart
cht('bench.add("%s", ' % v)
diag = []
for series in sizes:
prt('\n%d\t' % series)
for length in sizes:
times = []
if series == length or not bench:
time = timeit.timeit(
"c.render()",
setup="from __main__ import perf; "
"c = perf('%s', %d, %d)" % (
chart, length, series),
number=10)
if series == length:
diag.append(1000 * time)
prt('%d\t' % (1000 * time))
cht(repr(diag))
cht(')\n')
prt('\n')
|
from homeassistant import config_entries, core
from homeassistant.const import CONF_HOST, CONF_TOKEN
from homeassistant.helpers import device_registry as dr
from .config_flow import CONF_FLOW_TYPE, CONF_GATEWAY
from .const import DOMAIN
from .gateway import ConnectXiaomiGateway
GATEWAY_PLATFORMS = ["alarm_control_panel", "sensor", "light"]
async def async_setup(hass: core.HomeAssistant, config: dict):
"""Set up the Xiaomi Miio component."""
return True
async def async_setup_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up the Xiaomi Miio components from a config entry."""
hass.data[DOMAIN] = {}
if entry.data[CONF_FLOW_TYPE] == CONF_GATEWAY:
if not await async_setup_gateway_entry(hass, entry):
return False
return True
async def async_setup_gateway_entry(
hass: core.HomeAssistant, entry: config_entries.ConfigEntry
):
"""Set up the Xiaomi Gateway component from a config entry."""
host = entry.data[CONF_HOST]
token = entry.data[CONF_TOKEN]
name = entry.title
gateway_id = entry.unique_id
# For backwards compat
if entry.unique_id.endswith("-gateway"):
hass.config_entries.async_update_entry(entry, unique_id=entry.data["mac"])
# Connect to gateway
gateway = ConnectXiaomiGateway(hass)
if not await gateway.async_connect_gateway(host, token):
return False
gateway_info = gateway.gateway_info
hass.data[DOMAIN][entry.entry_id] = gateway.gateway_device
gateway_model = f"{gateway_info.model}-{gateway_info.hardware_version}"
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
connections={(dr.CONNECTION_NETWORK_MAC, gateway_info.mac_address)},
identifiers={(DOMAIN, gateway_id)},
manufacturer="Xiaomi",
name=name,
model=gateway_model,
sw_version=gateway_info.firmware_version,
)
for component in GATEWAY_PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
|
from homeassistant.components.cover import (
ATTR_POSITION,
DEVICE_CLASS_DAMPER,
SUPPORT_CLOSE,
SUPPORT_OPEN,
SUPPORT_SET_POSITION,
CoverEntity,
)
from .const import (
ADVANTAGE_AIR_STATE_CLOSE,
ADVANTAGE_AIR_STATE_OPEN,
DOMAIN as ADVANTAGE_AIR_DOMAIN,
)
from .entity import AdvantageAirEntity
PARALLEL_UPDATES = 0
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up AdvantageAir cover platform."""
instance = hass.data[ADVANTAGE_AIR_DOMAIN][config_entry.entry_id]
entities = []
for ac_key, ac_device in instance["coordinator"].data["aircons"].items():
for zone_key, zone in ac_device["zones"].items():
# Only add zone vent controls when zone in vent control mode.
if zone["type"] == 0:
entities.append(AdvantageAirZoneVent(instance, ac_key, zone_key))
async_add_entities(entities)
class AdvantageAirZoneVent(AdvantageAirEntity, CoverEntity):
"""Advantage Air Cover Class."""
@property
def name(self):
"""Return the name."""
return f'{self._zone["name"]}'
@property
def unique_id(self):
"""Return a unique id."""
return f'{self.coordinator.data["system"]["rid"]}-{self.ac_key}-{self.zone_key}'
@property
def device_class(self):
"""Return the device class of the vent."""
return DEVICE_CLASS_DAMPER
@property
def supported_features(self):
"""Return the supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_SET_POSITION
@property
def is_closed(self):
"""Return if vent is fully closed."""
return self._zone["state"] == ADVANTAGE_AIR_STATE_CLOSE
@property
def current_cover_position(self):
"""Return vents current position as a percentage."""
if self._zone["state"] == ADVANTAGE_AIR_STATE_OPEN:
return self._zone["value"]
return 0
async def async_open_cover(self, **kwargs):
"""Fully open zone vent."""
await self.async_change(
{
self.ac_key: {
"zones": {
self.zone_key: {"state": ADVANTAGE_AIR_STATE_OPEN, "value": 100}
}
}
}
)
async def async_close_cover(self, **kwargs):
"""Fully close zone vent."""
await self.async_change(
{
self.ac_key: {
"zones": {self.zone_key: {"state": ADVANTAGE_AIR_STATE_CLOSE}}
}
}
)
async def async_set_cover_position(self, **kwargs):
"""Change vent position."""
position = round(kwargs[ATTR_POSITION] / 5) * 5
if position == 0:
await self.async_change(
{
self.ac_key: {
"zones": {self.zone_key: {"state": ADVANTAGE_AIR_STATE_CLOSE}}
}
}
)
else:
await self.async_change(
{
self.ac_key: {
"zones": {
self.zone_key: {
"state": ADVANTAGE_AIR_STATE_OPEN,
"value": position,
}
}
}
}
)
|
import pytest
from homeassistant.components import frontend
from homeassistant.components.lovelace import const, dashboard
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import (
assert_setup_component,
async_capture_events,
get_system_health_info,
)
async def test_lovelace_from_storage(hass, hass_ws_client, hass_storage):
"""Test we load lovelace config from storage."""
assert await async_setup_component(hass, "lovelace", {})
assert hass.data[frontend.DATA_PANELS]["lovelace"].config == {"mode": "storage"}
client = await hass_ws_client(hass)
# Fetch data
await client.send_json({"id": 5, "type": "lovelace/config"})
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "config_not_found"
# Store new config
events = async_capture_events(hass, const.EVENT_LOVELACE_UPDATED)
await client.send_json(
{"id": 6, "type": "lovelace/config/save", "config": {"yo": "hello"}}
)
response = await client.receive_json()
assert response["success"]
assert hass_storage[dashboard.CONFIG_STORAGE_KEY_DEFAULT]["data"] == {
"config": {"yo": "hello"}
}
assert len(events) == 1
# Load new config
await client.send_json({"id": 7, "type": "lovelace/config"})
response = await client.receive_json()
assert response["success"]
assert response["result"] == {"yo": "hello"}
# Test with safe mode
hass.config.safe_mode = True
await client.send_json({"id": 8, "type": "lovelace/config"})
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "config_not_found"
await client.send_json(
{"id": 9, "type": "lovelace/config/save", "config": {"yo": "hello"}}
)
response = await client.receive_json()
assert not response["success"]
await client.send_json({"id": 10, "type": "lovelace/config/delete"})
response = await client.receive_json()
assert not response["success"]
async def test_lovelace_from_storage_save_before_load(
hass, hass_ws_client, hass_storage
):
"""Test we can load lovelace config from storage."""
assert await async_setup_component(hass, "lovelace", {})
client = await hass_ws_client(hass)
# Store new config
await client.send_json(
{"id": 6, "type": "lovelace/config/save", "config": {"yo": "hello"}}
)
response = await client.receive_json()
assert response["success"]
assert hass_storage[dashboard.CONFIG_STORAGE_KEY_DEFAULT]["data"] == {
"config": {"yo": "hello"}
}
async def test_lovelace_from_storage_delete(hass, hass_ws_client, hass_storage):
"""Test we delete lovelace config from storage."""
assert await async_setup_component(hass, "lovelace", {})
client = await hass_ws_client(hass)
# Store new config
await client.send_json(
{"id": 6, "type": "lovelace/config/save", "config": {"yo": "hello"}}
)
response = await client.receive_json()
assert response["success"]
assert hass_storage[dashboard.CONFIG_STORAGE_KEY_DEFAULT]["data"] == {
"config": {"yo": "hello"}
}
# Delete config
await client.send_json({"id": 7, "type": "lovelace/config/delete"})
response = await client.receive_json()
assert response["success"]
assert dashboard.CONFIG_STORAGE_KEY_DEFAULT not in hass_storage
# Fetch data
await client.send_json({"id": 8, "type": "lovelace/config"})
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "config_not_found"
async def test_lovelace_from_yaml(hass, hass_ws_client):
"""Test we load lovelace config from yaml."""
assert await async_setup_component(hass, "lovelace", {"lovelace": {"mode": "YAML"}})
assert hass.data[frontend.DATA_PANELS]["lovelace"].config == {"mode": "yaml"}
client = await hass_ws_client(hass)
# Fetch data
await client.send_json({"id": 5, "type": "lovelace/config"})
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "config_not_found"
# Store new config not allowed
await client.send_json(
{"id": 6, "type": "lovelace/config/save", "config": {"yo": "hello"}}
)
response = await client.receive_json()
assert not response["success"]
# Patch data
events = async_capture_events(hass, const.EVENT_LOVELACE_UPDATED)
with patch(
"homeassistant.components.lovelace.dashboard.load_yaml",
return_value={"hello": "yo"},
):
await client.send_json({"id": 7, "type": "lovelace/config"})
response = await client.receive_json()
assert response["success"]
assert response["result"] == {"hello": "yo"}
assert len(events) == 0
# Fake new data to see we fire event
with patch(
"homeassistant.components.lovelace.dashboard.load_yaml",
return_value={"hello": "yo2"},
):
await client.send_json({"id": 8, "type": "lovelace/config", "force": True})
response = await client.receive_json()
assert response["success"]
assert response["result"] == {"hello": "yo2"}
assert len(events) == 1
async def test_system_health_info_autogen(hass):
"""Test system health info endpoint."""
assert await async_setup_component(hass, "lovelace", {})
info = await get_system_health_info(hass, "lovelace")
assert info == {"dashboards": 1, "mode": "auto-gen", "resources": 0}
async def test_system_health_info_storage(hass, hass_storage):
"""Test system health info endpoint."""
hass_storage[dashboard.CONFIG_STORAGE_KEY_DEFAULT] = {
"key": "lovelace",
"version": 1,
"data": {"config": {"resources": [], "views": []}},
}
assert await async_setup_component(hass, "lovelace", {})
info = await get_system_health_info(hass, "lovelace")
assert info == {"dashboards": 1, "mode": "storage", "resources": 0, "views": 0}
async def test_system_health_info_yaml(hass):
"""Test system health info endpoint."""
assert await async_setup_component(hass, "lovelace", {"lovelace": {"mode": "YAML"}})
with patch(
"homeassistant.components.lovelace.dashboard.load_yaml",
return_value={"views": [{"cards": []}]},
):
info = await get_system_health_info(hass, "lovelace")
assert info == {"dashboards": 1, "mode": "yaml", "resources": 0, "views": 1}
async def test_system_health_info_yaml_not_found(hass):
"""Test system health info endpoint."""
assert await async_setup_component(hass, "lovelace", {"lovelace": {"mode": "YAML"}})
info = await get_system_health_info(hass, "lovelace")
assert info == {
"dashboards": 1,
"mode": "yaml",
"error": "{} not found".format(hass.config.path("ui-lovelace.yaml")),
"resources": 0,
}
@pytest.mark.parametrize("url_path", ("test-panel", "test-panel-no-sidebar"))
async def test_dashboard_from_yaml(hass, hass_ws_client, url_path):
"""Test we load lovelace dashboard config from yaml."""
assert await async_setup_component(
hass,
"lovelace",
{
"lovelace": {
"dashboards": {
"test-panel": {
"mode": "yaml",
"filename": "bla.yaml",
"title": "Test Panel",
"icon": "mdi:test-icon",
"show_in_sidebar": False,
"require_admin": True,
},
"test-panel-no-sidebar": {
"title": "Title No Sidebar",
"mode": "yaml",
"filename": "bla2.yaml",
},
}
}
},
)
assert hass.data[frontend.DATA_PANELS]["test-panel"].config == {"mode": "yaml"}
assert hass.data[frontend.DATA_PANELS]["test-panel-no-sidebar"].config == {
"mode": "yaml"
}
client = await hass_ws_client(hass)
# List dashboards
await client.send_json({"id": 4, "type": "lovelace/dashboards/list"})
response = await client.receive_json()
assert response["success"]
assert len(response["result"]) == 2
with_sb, without_sb = response["result"]
assert with_sb["mode"] == "yaml"
assert with_sb["filename"] == "bla.yaml"
assert with_sb["title"] == "Test Panel"
assert with_sb["icon"] == "mdi:test-icon"
assert with_sb["show_in_sidebar"] is False
assert with_sb["require_admin"] is True
assert with_sb["url_path"] == "test-panel"
assert without_sb["mode"] == "yaml"
assert without_sb["filename"] == "bla2.yaml"
assert without_sb["show_in_sidebar"] is True
assert without_sb["require_admin"] is False
assert without_sb["url_path"] == "test-panel-no-sidebar"
# Fetch data
await client.send_json({"id": 5, "type": "lovelace/config", "url_path": url_path})
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "config_not_found"
# Store new config not allowed
await client.send_json(
{
"id": 6,
"type": "lovelace/config/save",
"config": {"yo": "hello"},
"url_path": url_path,
}
)
response = await client.receive_json()
assert not response["success"]
# Patch data
events = async_capture_events(hass, const.EVENT_LOVELACE_UPDATED)
with patch(
"homeassistant.components.lovelace.dashboard.load_yaml",
return_value={"hello": "yo"},
):
await client.send_json(
{"id": 7, "type": "lovelace/config", "url_path": url_path}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == {"hello": "yo"}
assert len(events) == 0
# Fake new data to see we fire event
with patch(
"homeassistant.components.lovelace.dashboard.load_yaml",
return_value={"hello": "yo2"},
):
await client.send_json(
{"id": 8, "type": "lovelace/config", "force": True, "url_path": url_path}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == {"hello": "yo2"}
assert len(events) == 1
async def test_wrong_key_dashboard_from_yaml(hass):
"""Test we don't load lovelace dashboard without hyphen config from yaml."""
with assert_setup_component(0):
assert not await async_setup_component(
hass,
"lovelace",
{
"lovelace": {
"dashboards": {
"testpanel": {
"mode": "yaml",
"filename": "bla.yaml",
"title": "Test Panel",
"icon": "mdi:test-icon",
"show_in_sidebar": False,
"require_admin": True,
}
}
}
},
)
async def test_storage_dashboards(hass, hass_ws_client, hass_storage):
"""Test we load lovelace config from storage."""
assert await async_setup_component(hass, "lovelace", {})
assert hass.data[frontend.DATA_PANELS]["lovelace"].config == {"mode": "storage"}
client = await hass_ws_client(hass)
# Fetch data
await client.send_json({"id": 5, "type": "lovelace/dashboards/list"})
response = await client.receive_json()
assert response["success"]
assert response["result"] == []
# Add a wrong dashboard
await client.send_json(
{
"id": 6,
"type": "lovelace/dashboards/create",
"url_path": "path",
"title": "Test path without hyphen",
}
)
response = await client.receive_json()
assert not response["success"]
# Add a dashboard
await client.send_json(
{
"id": 7,
"type": "lovelace/dashboards/create",
"url_path": "created-url-path",
"require_admin": True,
"title": "New Title",
"icon": "mdi:map",
}
)
response = await client.receive_json()
assert response["success"]
assert response["result"]["require_admin"] is True
assert response["result"]["title"] == "New Title"
assert response["result"]["icon"] == "mdi:map"
dashboard_id = response["result"]["id"]
assert "created-url-path" in hass.data[frontend.DATA_PANELS]
await client.send_json({"id": 8, "type": "lovelace/dashboards/list"})
response = await client.receive_json()
assert response["success"]
assert len(response["result"]) == 1
assert response["result"][0]["mode"] == "storage"
assert response["result"][0]["title"] == "New Title"
assert response["result"][0]["icon"] == "mdi:map"
assert response["result"][0]["show_in_sidebar"] is True
assert response["result"][0]["require_admin"] is True
# Fetch config
await client.send_json(
{"id": 9, "type": "lovelace/config", "url_path": "created-url-path"}
)
response = await client.receive_json()
assert not response["success"]
assert response["error"]["code"] == "config_not_found"
# Store new config
events = async_capture_events(hass, const.EVENT_LOVELACE_UPDATED)
await client.send_json(
{
"id": 10,
"type": "lovelace/config/save",
"url_path": "created-url-path",
"config": {"yo": "hello"},
}
)
response = await client.receive_json()
assert response["success"]
assert hass_storage[dashboard.CONFIG_STORAGE_KEY.format(dashboard_id)]["data"] == {
"config": {"yo": "hello"}
}
assert len(events) == 1
assert events[0].data["url_path"] == "created-url-path"
await client.send_json(
{"id": 11, "type": "lovelace/config", "url_path": "created-url-path"}
)
response = await client.receive_json()
assert response["success"]
assert response["result"] == {"yo": "hello"}
# Update a dashboard
await client.send_json(
{
"id": 12,
"type": "lovelace/dashboards/update",
"dashboard_id": dashboard_id,
"require_admin": False,
"icon": "mdi:updated",
"show_in_sidebar": False,
"title": "Updated Title",
}
)
response = await client.receive_json()
assert response["success"]
assert response["result"]["mode"] == "storage"
assert response["result"]["url_path"] == "created-url-path"
assert response["result"]["title"] == "Updated Title"
assert response["result"]["icon"] == "mdi:updated"
assert response["result"]["show_in_sidebar"] is False
assert response["result"]["require_admin"] is False
# List dashboards again and make sure we see latest config
await client.send_json({"id": 13, "type": "lovelace/dashboards/list"})
response = await client.receive_json()
assert response["success"]
assert len(response["result"]) == 1
assert response["result"][0]["mode"] == "storage"
assert response["result"][0]["url_path"] == "created-url-path"
assert response["result"][0]["title"] == "Updated Title"
assert response["result"][0]["icon"] == "mdi:updated"
assert response["result"][0]["show_in_sidebar"] is False
assert response["result"][0]["require_admin"] is False
# Add dashboard with existing url path
await client.send_json(
{"id": 14, "type": "lovelace/dashboards/create", "url_path": "created-url-path"}
)
response = await client.receive_json()
assert not response["success"]
# Delete dashboards
await client.send_json(
{"id": 15, "type": "lovelace/dashboards/delete", "dashboard_id": dashboard_id}
)
response = await client.receive_json()
assert response["success"]
assert "created-url-path" not in hass.data[frontend.DATA_PANELS]
assert dashboard.CONFIG_STORAGE_KEY.format(dashboard_id) not in hass_storage
async def test_storage_dashboard_migrate(hass, hass_ws_client, hass_storage):
"""Test changing url path from storage config."""
hass_storage[dashboard.DASHBOARDS_STORAGE_KEY] = {
"key": "lovelace_dashboards",
"version": 1,
"data": {
"items": [
{
"icon": "mdi:tools",
"id": "tools",
"mode": "storage",
"require_admin": True,
"show_in_sidebar": True,
"title": "Tools",
"url_path": "tools",
},
{
"icon": "mdi:tools",
"id": "tools2",
"mode": "storage",
"require_admin": True,
"show_in_sidebar": True,
"title": "Tools",
"url_path": "dashboard-tools",
},
]
},
}
assert await async_setup_component(hass, "lovelace", {})
client = await hass_ws_client(hass)
# Fetch data
await client.send_json({"id": 5, "type": "lovelace/dashboards/list"})
response = await client.receive_json()
assert response["success"]
without_hyphen, with_hyphen = response["result"]
assert without_hyphen["icon"] == "mdi:tools"
assert without_hyphen["id"] == "tools"
assert without_hyphen["mode"] == "storage"
assert without_hyphen["require_admin"]
assert without_hyphen["show_in_sidebar"]
assert without_hyphen["title"] == "Tools"
assert without_hyphen["url_path"] == "lovelace-tools"
assert (
with_hyphen
== hass_storage[dashboard.DASHBOARDS_STORAGE_KEY]["data"]["items"][1]
)
async def test_websocket_list_dashboards(hass, hass_ws_client):
"""Test listing dashboards both storage + YAML."""
assert await async_setup_component(
hass,
"lovelace",
{
"lovelace": {
"dashboards": {
"test-panel-no-sidebar": {
"title": "Test YAML",
"mode": "yaml",
"filename": "bla.yaml",
},
}
}
},
)
client = await hass_ws_client(hass)
# Create a storage dashboard
await client.send_json(
{
"id": 6,
"type": "lovelace/dashboards/create",
"url_path": "created-url-path",
"title": "Test Storage",
}
)
response = await client.receive_json()
assert response["success"]
# List dashboards
await client.send_json({"id": 8, "type": "lovelace/dashboards/list"})
response = await client.receive_json()
assert response["success"]
assert len(response["result"]) == 2
with_sb, without_sb = response["result"]
assert with_sb["mode"] == "yaml"
assert with_sb["title"] == "Test YAML"
assert with_sb["filename"] == "bla.yaml"
assert with_sb["url_path"] == "test-panel-no-sidebar"
assert without_sb["mode"] == "storage"
assert without_sb["title"] == "Test Storage"
assert without_sb["url_path"] == "created-url-path"
|
from typing import Any, Optional, cast
from homeassistant.core import HomeAssistant
from homeassistant.helpers import config_entry_oauth2_flow
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from . import config_flow
def register_oauth2_implementations(
hass: HomeAssistant, client_id: str, client_secret: str
) -> None:
"""Register Toon OAuth2 implementations."""
config_flow.ToonFlowHandler.async_register_implementation(
hass,
ToonLocalOAuth2Implementation(
hass,
client_id=client_id,
client_secret=client_secret,
name="Eneco Toon",
tenant_id="eneco",
issuer="identity.toon.eu",
),
)
config_flow.ToonFlowHandler.async_register_implementation(
hass,
ToonLocalOAuth2Implementation(
hass,
client_id=client_id,
client_secret=client_secret,
name="Engie Electrabel Boxx",
tenant_id="electrabel",
),
)
config_flow.ToonFlowHandler.async_register_implementation(
hass,
ToonLocalOAuth2Implementation(
hass,
client_id=client_id,
client_secret=client_secret,
name="Viesgo",
tenant_id="viesgo",
),
)
class ToonLocalOAuth2Implementation(config_entry_oauth2_flow.LocalOAuth2Implementation):
"""Local OAuth2 implementation for Toon."""
def __init__(
self,
hass: HomeAssistant,
client_id: str,
client_secret: str,
name: str,
tenant_id: str,
issuer: Optional[str] = None,
):
"""Local Toon Oauth Implementation."""
self._name = name
self.tenant_id = tenant_id
self.issuer = issuer
super().__init__(
hass=hass,
domain=tenant_id,
client_id=client_id,
client_secret=client_secret,
authorize_url="https://api.toon.eu/authorize",
token_url="https://api.toon.eu/token",
)
@property
def name(self) -> str:
"""Name of the implementation."""
return f"{self._name} via Configuration.yaml"
@property
def extra_authorize_data(self) -> dict:
"""Extra data that needs to be appended to the authorize url."""
data = {"tenant_id": self.tenant_id}
if self.issuer is not None:
data["issuer"] = self.issuer
return data
async def async_resolve_external_data(self, external_data: Any) -> dict:
"""Initialize local Toon auth implementation."""
data = {
"grant_type": "authorization_code",
"code": external_data,
"redirect_uri": self.redirect_uri,
"tenant_id": self.tenant_id,
}
if self.issuer is not None:
data["issuer"] = self.issuer
return await self._token_request(data)
async def _async_refresh_token(self, token: dict) -> dict:
"""Refresh tokens."""
data = {
"grant_type": "refresh_token",
"client_id": self.client_id,
"refresh_token": token["refresh_token"],
"tenant_id": self.tenant_id,
}
new_token = await self._token_request(data)
return {**token, **new_token}
async def _token_request(self, data: dict) -> dict:
"""Make a token request."""
session = async_get_clientsession(self.hass)
headers = {}
data["client_id"] = self.client_id
data["tenant_id"] = self.tenant_id
if self.client_secret is not None:
data["client_secret"] = self.client_secret
if self.issuer is not None:
data["issuer"] = self.issuer
headers["issuer"] = self.issuer
resp = await session.post(self.token_url, data=data, headers=headers)
resp.raise_for_status()
resp_json = cast(dict, await resp.json())
# The Toon API returns "expires_in" as a string for some tenants.
# This is not according to OAuth specifications.
resp_json["expires_in"] = float(resp_json["expires_in"])
return resp_json
|
from __future__ import print_function
__docformat__ = "restructuredtext en"
from warnings import warn
warn('lgc.optparser module is deprecated, use lgc.clcommands instead', DeprecationWarning,
stacklevel=2)
import sys
import optparse
class OptionParser(optparse.OptionParser):
def __init__(self, *args, **kwargs):
optparse.OptionParser.__init__(self, *args, **kwargs)
self._commands = {}
self.min_args, self.max_args = 0, 1
def add_command(self, name, mod_or_funcs, help=''):
"""name of the command, name of module or tuple of functions
(run, add_options)
"""
assert isinstance(mod_or_funcs, str) or isinstance(mod_or_funcs, tuple), \
"mod_or_funcs has to be a module name or a tuple of functions"
self._commands[name] = (mod_or_funcs, help)
def print_main_help(self):
optparse.OptionParser.print_help(self)
print('\ncommands:')
for cmdname, (_, help) in self._commands.items():
print('% 10s - %s' % (cmdname, help))
def parse_command(self, args):
if len(args) == 0:
self.print_main_help()
sys.exit(1)
cmd = args[0]
args = args[1:]
if cmd not in self._commands:
if cmd in ('-h', '--help'):
self.print_main_help()
sys.exit(0)
elif self.version is not None and cmd == "--version":
self.print_version()
sys.exit(0)
self.error('unknown command')
self.prog = '%s %s' % (self.prog, cmd)
mod_or_f, help = self._commands[cmd]
# optparse inserts self.description between usage and options help
self.description = help
if isinstance(mod_or_f, str):
exec('from %s import run, add_options' % mod_or_f)
else:
run, add_options = mod_or_f
add_options(self)
(options, args) = self.parse_args(args)
if not (self.min_args <= len(args) <= self.max_args):
self.error('incorrect number of arguments')
return run, options, args
|
from django.db.models import Count
from django.shortcuts import get_object_or_404
from django.views.generic.list import BaseListView
from django.views.generic.list import ListView
from zinnia.models.author import Author
from zinnia.settings import PAGINATION
from zinnia.views.mixins.prefetch_related import PrefetchCategoriesAuthorsMixin
from zinnia.views.mixins.templates import EntryQuerysetTemplateResponseMixin
class AuthorList(ListView):
"""
View returning a list of all published authors.
"""
def get_queryset(self):
"""
Return a queryset of published authors,
with a count of their entries published.
"""
return Author.published.all().annotate(
count_entries_published=Count('entries'))
class BaseAuthorDetail(object):
"""
Mixin providing the behavior of the author detail view,
by returning in the context the current author and a
queryset containing the entries written by author.
"""
def get_queryset(self):
"""
Retrieve the author by his username and
build a queryset of his published entries.
"""
self.author = get_object_or_404(
Author, **{Author.USERNAME_FIELD: self.kwargs['username']})
return self.author.entries_published()
def get_context_data(self, **kwargs):
"""
Add the current author in context.
"""
context = super(BaseAuthorDetail, self).get_context_data(**kwargs)
context['author'] = self.author
return context
class AuthorDetail(EntryQuerysetTemplateResponseMixin,
PrefetchCategoriesAuthorsMixin,
BaseAuthorDetail,
BaseListView):
"""
Detailed view for an Author combinating these mixins:
- EntryQuerysetTemplateResponseMixin to provide custom templates
for the author display page.
- PrefetchCategoriesAuthorsMixin to prefetch related Categories
and Authors to belonging the entry list.
- BaseAuthorDetail to provide the behavior of the view.
- BaseListView to implement the ListView.
"""
model_type = 'author'
paginate_by = PAGINATION
def get_model_name(self):
"""
The model name is the author's username.
"""
return self.author.get_username()
|
import abc
import functools
import typing
from pathlib import Path
import dill
import matchzoo as mz
def validate_context(func):
"""Validate context in the preprocessor."""
@functools.wraps(func)
def transform_wrapper(self, *args, **kwargs):
if not self.context:
raise ValueError('Please call `fit` before calling `transform`.')
return func(self, *args, **kwargs)
return transform_wrapper
class BasePreprocessor(metaclass=abc.ABCMeta):
"""
:class:`BasePreprocessor` to input handle data.
A preprocessor should be used in two steps. First, `fit`, then,
`transform`. `fit` collects information into `context`, which includes
everything the preprocessor needs to `transform` together with other
useful information for later use. `fit` will only change the
preprocessor's inner state but not the input data. In contrast,
`transform` returns a modified copy of the input data without changing
the preprocessor's inner state.
"""
DATA_FILENAME = 'preprocessor.dill'
def __init__(self):
"""Initialization."""
self._context = {}
@property
def context(self):
"""Return context."""
return self._context
@abc.abstractmethod
def fit(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'BasePreprocessor':
"""
Fit parameters on input data.
This method is an abstract base method, need to be
implemented in the child class.
This method is expected to return itself as a callable
object.
:param data_pack: :class:`Datapack` object to be fitted.
:param verbose: Verbosity.
"""
@abc.abstractmethod
def transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Transform input data to expected manner.
This method is an abstract base method, need to be
implemented in the child class.
:param data_pack: :class:`DataPack` object to be transformed.
:param verbose: Verbosity.
or list of text-left, text-right tuples.
"""
def fit_transform(
self,
data_pack: 'mz.DataPack',
verbose: int = 1
) -> 'mz.DataPack':
"""
Call fit-transform.
:param data_pack: :class:`DataPack` object to be processed.
:param verbose: Verbosity.
"""
return self.fit(data_pack, verbose=verbose) \
.transform(data_pack, verbose=verbose)
def save(self, dirpath: typing.Union[str, Path]):
"""
Save the :class:`DSSMPreprocessor` object.
A saved :class:`DSSMPreprocessor` is represented as a directory with
the `context` object (fitted parameters on training data), it will
be saved by `pickle`.
:param dirpath: directory path of the saved :class:`DSSMPreprocessor`.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(self.DATA_FILENAME)
if data_file_path.exists():
raise FileExistsError(
f'{data_file_path} instance exist, fail to save.')
elif not dirpath.exists():
dirpath.mkdir()
dill.dump(self, open(data_file_path, mode='wb'))
@classmethod
def _default_units(cls) -> list:
"""Prepare needed process units."""
return [
mz.preprocessors.units.tokenize.Tokenize(),
mz.preprocessors.units.lowercase.Lowercase(),
mz.preprocessors.units.punc_removal.PuncRemoval(),
]
def load_preprocessor(dirpath: typing.Union[str, Path]) -> 'mz.DataPack':
"""
Load the fitted `context`. The reverse function of :meth:`save`.
:param dirpath: directory path of the saved model.
:return: a :class:`DSSMPreprocessor` instance.
"""
dirpath = Path(dirpath)
data_file_path = dirpath.joinpath(BasePreprocessor.DATA_FILENAME)
return dill.load(open(data_file_path, 'rb'))
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.