text
stringlengths 213
32.3k
|
---|
import io
import os
from setuptools import setup, find_packages
def _get_version():
curr_dir = os.path.dirname(os.path.abspath(__file__))
with open(os.path.join(curr_dir, 'smart_open', 'version.py')) as fin:
line = fin.readline().strip()
parts = line.split(' ')
assert len(parts) == 3
assert parts[0] == '__version__'
assert parts[1] == '='
return parts[2].strip('\'"')
#
# We cannot do "from smart_open.version import __version__" because that will
# require the dependencies for smart_open to already be in place, and that is
# not necessarily the case when running setup.py for the first time.
#
__version__ = _get_version()
def read(fname):
return io.open(os.path.join(os.path.dirname(__file__), fname), encoding='utf-8').read()
aws_deps = ['boto3']
gcp_deps = ['google-cloud-storage']
azure_deps = ['azure-storage-blob', 'azure-common', 'azure-core']
http_deps = ['requests']
all_deps = aws_deps + gcp_deps + azure_deps + http_deps
tests_require = all_deps + [
'mock',
'moto[server]==1.3.14', # Older versions of moto appear broken
'pathlib2',
'responses',
'boto3',
'paramiko',
'parameterizedtestcase',
'pytest',
'pytest-rerunfailures'
]
setup(
name='smart_open',
version=__version__,
description='Utils for streaming large files (S3, HDFS, GCS, Azure Blob Storage, gzip, bz2...)',
long_description=read('README.rst'),
packages=find_packages(),
package_data={
"smart_open.tests": ["test_data/*"],
},
author='Radim Rehurek',
author_email='[email protected]',
maintainer='Radim Rehurek',
maintainer_email='[email protected]',
url='https://github.com/piskvorky/smart_open',
download_url='http://pypi.python.org/pypi/smart_open',
keywords='file streaming, s3, hdfs, gcs, azure blob storage',
license='MIT',
platforms='any',
tests_require=tests_require,
extras_require={
'test': tests_require,
's3': aws_deps,
'gcp': gcp_deps,
'azure': azure_deps,
'all': all_deps,
'http': http_deps,
'webhdfs': http_deps,
},
python_requires=">=3.6.*",
test_suite="smart_open.tests",
classifiers=[
'Development Status :: 4 - Beta',
'Environment :: Console',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Topic :: System :: Distributed Computing',
'Topic :: Database :: Front-Ends',
],
)
|
from asyncio import Event, gather
from collections import OrderedDict
from typing import Dict, Iterable, List, MutableMapping, Optional, cast
import attr
from homeassistant.core import callback
from homeassistant.loader import bind_hass
import homeassistant.util.uuid as uuid_util
from .typing import HomeAssistantType
DATA_REGISTRY = "area_registry"
EVENT_AREA_REGISTRY_UPDATED = "area_registry_updated"
STORAGE_KEY = "core.area_registry"
STORAGE_VERSION = 1
SAVE_DELAY = 10
@attr.s(slots=True, frozen=True)
class AreaEntry:
"""Area Registry Entry."""
name: Optional[str] = attr.ib(default=None)
id: str = attr.ib(factory=uuid_util.random_uuid_hex)
class AreaRegistry:
"""Class to hold a registry of areas."""
def __init__(self, hass: HomeAssistantType) -> None:
"""Initialize the area registry."""
self.hass = hass
self.areas: MutableMapping[str, AreaEntry] = {}
self._store = hass.helpers.storage.Store(STORAGE_VERSION, STORAGE_KEY)
@callback
def async_get_area(self, area_id: str) -> Optional[AreaEntry]:
"""Get all areas."""
return self.areas.get(area_id)
@callback
def async_list_areas(self) -> Iterable[AreaEntry]:
"""Get all areas."""
return self.areas.values()
@callback
def async_create(self, name: str) -> AreaEntry:
"""Create a new area."""
if self._async_is_registered(name):
raise ValueError("Name is already in use")
area = AreaEntry()
self.areas[area.id] = area
created = self._async_update(area.id, name=name)
self.hass.bus.async_fire(
EVENT_AREA_REGISTRY_UPDATED, {"action": "create", "area_id": created.id}
)
return created
async def async_delete(self, area_id: str) -> None:
"""Delete area."""
device_registry, entity_registry = await gather(
self.hass.helpers.device_registry.async_get_registry(),
self.hass.helpers.entity_registry.async_get_registry(),
)
device_registry.async_clear_area_id(area_id)
entity_registry.async_clear_area_id(area_id)
del self.areas[area_id]
self.hass.bus.async_fire(
EVENT_AREA_REGISTRY_UPDATED, {"action": "remove", "area_id": area_id}
)
self.async_schedule_save()
@callback
def async_update(self, area_id: str, name: str) -> AreaEntry:
"""Update name of area."""
updated = self._async_update(area_id, name)
self.hass.bus.async_fire(
EVENT_AREA_REGISTRY_UPDATED, {"action": "update", "area_id": area_id}
)
return updated
@callback
def _async_update(self, area_id: str, name: str) -> AreaEntry:
"""Update name of area."""
old = self.areas[area_id]
changes = {}
if name == old.name:
return old
if self._async_is_registered(name):
raise ValueError("Name is already in use")
changes["name"] = name
new = self.areas[area_id] = attr.evolve(old, **changes)
self.async_schedule_save()
return new
@callback
def _async_is_registered(self, name: str) -> Optional[AreaEntry]:
"""Check if a name is currently registered."""
for area in self.areas.values():
if name == area.name:
return area
return None
async def async_load(self) -> None:
"""Load the area registry."""
data = await self._store.async_load()
areas: MutableMapping[str, AreaEntry] = OrderedDict()
if data is not None:
for area in data["areas"]:
areas[area["id"]] = AreaEntry(name=area["name"], id=area["id"])
self.areas = areas
@callback
def async_schedule_save(self) -> None:
"""Schedule saving the area registry."""
self._store.async_delay_save(self._data_to_save, SAVE_DELAY)
@callback
def _data_to_save(self) -> Dict[str, List[Dict[str, Optional[str]]]]:
"""Return data of area registry to store in a file."""
data = {}
data["areas"] = [
{"name": entry.name, "id": entry.id} for entry in self.areas.values()
]
return data
@bind_hass
async def async_get_registry(hass: HomeAssistantType) -> AreaRegistry:
"""Return area registry instance."""
reg_or_evt = hass.data.get(DATA_REGISTRY)
if not reg_or_evt:
evt = hass.data[DATA_REGISTRY] = Event()
reg = AreaRegistry(hass)
await reg.async_load()
hass.data[DATA_REGISTRY] = reg
evt.set()
return reg
if isinstance(reg_or_evt, Event):
evt = reg_or_evt
await evt.wait()
return cast(AreaRegistry, hass.data.get(DATA_REGISTRY))
return cast(AreaRegistry, reg_or_evt)
|
from __future__ import print_function
import json
import os.path
import time
import vim # noqa
from ._compat import PY2
class VimPymodeEnviroment(object):
"""Vim User interface."""
prefix = '[Pymode]'
def __init__(self):
"""Init VIM environment."""
self.current = vim.current
self.options = dict(encoding=vim.eval('&enc'))
self.options['debug'] = self.var('g:pymode_debug', True)
@property
def curdir(self):
"""Return current working directory."""
return self.var('getcwd()')
@property
def curbuf(self):
"""Return current buffer."""
return self.current.buffer
@property
def cursor(self):
"""Return current window position.
:return tuple: (row, col)
"""
return self.current.window.cursor
@property
def source(self):
"""Return source of current buffer."""
return "\n".join(self.lines)
@property
def lines(self):
"""Iterate by lines in current file.
:return list:
"""
if not PY2:
return self.curbuf
return [l.decode(self.options.get('encoding')) for l in self.curbuf]
@staticmethod
def var(name, to_bool=False, silence=False, default=None):
"""Get vim variable.
:return vimobj:
"""
try:
value = vim.eval(name)
except vim.error:
if silence:
return default
raise
if to_bool:
try:
value = bool(int(value))
except ValueError:
value = value
return value
@staticmethod
def message(msg, history=False):
"""Show message to user.
:return: :None
"""
if history:
return vim.command('echom "%s"' % str(msg))
return vim.command('call pymode#wide_message("%s")' % str(msg))
def user_input(self, msg, default=''):
"""Return user input or default.
:return str:
"""
msg = '%s %s ' % (self.prefix, msg)
if default != '':
msg += '[%s] ' % default
try:
vim.command('echohl Debug')
input_str = vim.eval('input("%s> ")' % msg)
vim.command('echohl none')
except KeyboardInterrupt:
input_str = ''
return input_str or default
def user_confirm(self, msg, yes=False):
"""Get user confirmation.
:return bool:
"""
default = 'yes' if yes else 'no'
action = self.user_input(msg, default)
return action and 'yes'.startswith(action)
def user_input_choices(self, msg, *options):
"""Get one of many options.
:return str: A choosen option
"""
choices = ['%s %s' % (self.prefix, msg)]
choices += [
"%s. %s" % (num, opt) for num, opt in enumerate(options, 1)]
try:
input_str = int(
vim.eval('inputlist(%s)' % self.prepare_value(choices)))
except (KeyboardInterrupt, ValueError):
input_str = 0
if not input_str:
self.message('Cancelled!')
return False
try:
return options[input_str - 1]
except (IndexError, ValueError):
self.error('Invalid option: %s' % input_str)
return self.user_input_choices(msg, *options)
@staticmethod
def error(msg):
"""Show error to user."""
vim.command('call pymode#error("%s")' % str(msg))
def debug(self, msg, *args):
"""Print debug information."""
if self.options.get('debug'):
print("%s %s [%s]" % (
int(time.time()), msg, ', '.join([str(a) for a in args])))
def stop(self, value=None):
"""Break Vim function."""
cmd = 'return'
if value is not None:
cmd += ' ' + self.prepare_value(value)
vim.command(cmd)
def catch_exceptions(self, func):
"""Decorator. Make execution more silence.
:return func:
"""
def _wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (Exception, vim.error) as e: # noqa
if self.options.get('debug'):
raise
self.error(e)
return None
return _wrapper
def run(self, name, *args):
"""Run vim function."""
vim.command('call %s(%s)' % (name, ", ".join([
self.prepare_value(a) for a in args
])))
def let(self, name, value):
"""Set variable."""
cmd = 'let %s = %s' % (name, self.prepare_value(value))
self.debug(cmd)
vim.command(cmd)
def prepare_value(self, value, dumps=True):
"""Decode bstr to vim encoding.
:return unicode string:
"""
if dumps:
value = json.dumps(value)
if PY2:
value = value.decode('utf-8').encode(self.options.get('encoding'))
return value
def get_offset_params(self, cursor=None, base=""):
"""Calculate current offset.
:return tuple: (source, offset)
"""
row, col = cursor or env.cursor
source = ""
offset = 0
for i, line in enumerate(self.lines, 1):
if i == row:
source += line[:col] + base
offset = len(source)
source += line[col:]
else:
source += line
source += '\n'
env.debug('Get offset', base or None, row, col, offset)
return source, offset
@staticmethod
def goto_line(line):
"""Go to line."""
vim.command('normal %sggzz' % line)
def goto_file(self, path, cmd='e', force=False):
"""Open file by path."""
if force or os.path.abspath(path) != self.curbuf.name:
self.debug('read', path)
if ' ' in path and os.name == 'posix':
path = path.replace(' ', '\\ ')
vim.command("%s %s" % (cmd, path))
@staticmethod
def goto_buffer(bufnr):
"""Open buffer."""
if str(bufnr) != '-1':
vim.command('buffer %s' % bufnr)
env = VimPymodeEnviroment()
|
import numpy as np
import unittest
import chainer
from chainer import optimizers
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.ssd import GradientScaling
class SimpleLink(chainer.Link):
def __init__(self, w, g):
super(SimpleLink, self).__init__()
with self.init_scope():
self.param = chainer.Parameter(w)
self.param.grad = g
class TestGradientScaling(unittest.TestCase):
def setUp(self):
self.target = SimpleLink(
np.arange(6, dtype=np.float32).reshape((2, 3)),
np.arange(3, -3, -1, dtype=np.float32).reshape((2, 3)))
def check_gradient_scaling(self):
w = self.target.param.array
g = self.target.param.grad
rate = 0.2
expect = w - g * rate
opt = optimizers.SGD(lr=1)
opt.setup(self.target)
opt.add_hook(GradientScaling(rate))
opt.update()
testing.assert_allclose(expect, w)
def test_gradient_scaling_cpu(self):
self.check_gradient_scaling()
@attr.gpu
def test_gradient_scaling_gpu(self):
self.target.to_gpu()
self.check_gradient_scaling()
testing.run_module(__name__, __file__)
|
from homeassistant.components.device_tracker import SOURCE_TYPE_GPS
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.util import slugify
from . import DATA_KEY, SIGNAL_STATE_UPDATED
async def async_setup_scanner(hass, config, async_see, discovery_info=None):
"""Set up the Volvo tracker."""
if discovery_info is None:
return
vin, component, attr = discovery_info
data = hass.data[DATA_KEY]
instrument = data.instrument(vin, component, attr)
async def see_vehicle():
"""Handle the reporting of the vehicle position."""
host_name = instrument.vehicle_name
dev_id = "volvo_{}".format(slugify(host_name))
await async_see(
dev_id=dev_id,
host_name=host_name,
source_type=SOURCE_TYPE_GPS,
gps=instrument.state,
icon="mdi:car",
)
async_dispatcher_connect(hass, SIGNAL_STATE_UPDATED, see_vehicle)
return True
|
import os
import pytest
import sh
from molecule import config
from molecule import util
from molecule.verifier import testinfra
from molecule.verifier.lint import flake8
@pytest.fixture
def _patched_testinfra_get_tests(mocker):
m = mocker.patch('molecule.verifier.testinfra.Testinfra._get_tests')
m.return_value = [
'foo.py',
'bar.py',
]
return m
@pytest.fixture
def _verifier_section_data():
return {
'verifier': {
'name':
'testinfra',
'options': {
'foo': 'bar',
'v': True,
'verbose': True,
},
'additional_files_or_dirs': [
'file1.py',
'file2.py',
'match*.py',
'dir/*',
],
'env': {
'FOO': 'bar',
},
'lint': {
'name': 'flake8',
},
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(patched_config_validate, config_instance):
return testinfra.Testinfra(config_instance)
@pytest.fixture
def inventory_file(_instance):
return _instance._config.provisioner.inventory_file
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(inventory_file, _instance):
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'p': 'no:cacheprovider',
}
assert x == _instance.default_options
def test_default_options_property_updates_debug(inventory_file, _instance):
_instance._config.args = {'debug': True}
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'debug': True,
'vvv': True,
'p': 'no:cacheprovider',
}
assert x == _instance.default_options
def test_default_options_property_updates_sudo(inventory_file, _instance,
_patched_testinfra_get_tests):
_instance._config.args = {'sudo': True}
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'sudo': True,
'p': 'no:cacheprovider',
}
assert x == _instance.default_options
def test_default_env_property(_instance):
assert 'MOLECULE_FILE' in _instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in _instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in _instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in _instance.default_env
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_additional_files_or_dirs_property(_instance):
tests_directory = _instance._config.verifier.directory
file1_file = os.path.join(tests_directory, 'file1.py')
file2_file = os.path.join(tests_directory, 'file2.py')
match1_file = os.path.join(tests_directory, 'match1.py')
match2_file = os.path.join(tests_directory, 'match2.py')
test_subdir = os.path.join(tests_directory, 'dir')
test_subdir_file = os.path.join(test_subdir, 'test_subdir_file.py')
os.mkdir(tests_directory)
os.mkdir(test_subdir)
for f in [
file1_file,
file2_file,
match1_file,
match2_file,
test_subdir_file,
]:
util.write_file(f, '')
x = [
file1_file,
file2_file,
match1_file,
match2_file,
test_subdir_file,
]
assert sorted(x) == sorted(_instance.additional_files_or_dirs)
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_env_property(_instance):
assert 'bar' == _instance.env['FOO']
assert 'ANSIBLE_CONFIG' in _instance.env
assert 'ANSIBLE_ROLES_PATH' in _instance.env
assert 'ANSIBLE_LIBRARY' in _instance.env
assert 'ANSIBLE_FILTER_PLUGINS' in _instance.env
def test_lint_property(_instance):
assert isinstance(_instance.lint, flake8.Flake8)
def test_name_property(_instance):
assert 'testinfra' == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
def test_directory_property(_instance):
parts = _instance.directory.split(os.path.sep)
assert ['molecule', 'default', 'tests'] == parts[-3:]
@pytest.fixture
def _verifier_testinfra_directory_section_data():
return {
'verifier': {
'name': 'testinfra',
'directory': '/tmp/foo/bar'
},
}
@pytest.mark.parametrize(
'config_instance', ['_verifier_testinfra_directory_section_data'],
indirect=True)
def test_directory_property_overriden(_instance):
assert '/tmp/foo/bar' == _instance.directory
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_options_property(inventory_file, _instance):
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'foo': 'bar',
'v': True,
'verbose': True,
'p': 'no:cacheprovider',
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_options_property_handles_cli_args(inventory_file, _instance):
_instance._config.args = {'debug': True}
x = {
'connection': 'ansible',
'ansible-inventory': inventory_file,
'foo': 'bar',
'debug': True,
'vvv': True,
'verbose': True,
'p': 'no:cacheprovider',
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_verifier_section_data'], indirect=True)
def test_bake(_patched_testinfra_get_tests, inventory_file, _instance):
tests_directory = _instance._config.verifier.directory
file1_file = os.path.join(tests_directory, 'file1.py')
os.mkdir(tests_directory)
util.write_file(file1_file, '')
_instance.bake()
x = [
str(sh.Command('py.test')),
'--ansible-inventory={}'.format(inventory_file),
'--connection=ansible',
'-v',
'--foo=bar',
'foo.py',
'bar.py',
'-p',
'no:cacheprovider',
file1_file,
]
result = str(_instance._testinfra_command).split()
assert sorted(x) == sorted(result)
def test_execute(patched_logger_info, patched_run_command,
_patched_testinfra_get_tests, patched_logger_success,
_instance):
_instance._testinfra_command = 'patched-command'
_instance.execute()
patched_run_command.assert_called_once_with('patched-command', debug=False)
msg = 'Executing Testinfra tests found in {}/...'.format(
_instance.directory)
patched_logger_info.assert_called_once_with(msg)
msg = 'Verifier completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute(patched_run_command, patched_logger_warn,
_instance):
_instance._config.config['verifier']['enabled'] = False
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, verifier is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_does_not_execute_without_tests(patched_run_command,
patched_logger_warn, _instance):
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, no tests found.'
patched_logger_warn.assert_called_once_with(msg)
def test_execute_bakes(patched_run_command, _patched_testinfra_get_tests,
_instance):
_instance.execute()
assert _instance._testinfra_command is not None
assert 1 == patched_run_command.call_count
def test_executes_catches_and_exits_return_code(
patched_run_command, _patched_testinfra_get_tests, _instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(
sh.testinfra, b'', b'')
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
|
import pytest
from yandextank.validator.validator import TankConfig, ValidationError, PatchedValidator
CFG_VER_I_0 = {
"version": "1.9.3",
"core": {
'operator': 'fomars',
'artifacts_base_dir': './'
},
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 'monitoring.xml',
'disguise_hostnames': True
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'header_http': '1.1',
'uris': ['/'],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1, 10, 10m)'}
},
'lunapark': {
'package': 'yandextank.plugins.DataUploader',
'enabled': True,
'api_address': 'https://lunapark.test.yandex-team.ru/'
},
'overload': {
'package': 'yandextank.plugins.DataUploader',
'enabled': True,
'api_address': 'https://overload.yandex.net/',
'token_file': 'token.txt'
}
}
PHANTOM_SCHEMA_V_G = {
'phantom_path': {
'type': 'string',
'default': 'phantom'
},
'buffered_seconds': {
'type': 'integer',
'default': 2
},
'address': {
'type': 'string',
'required': True
},
'header_http': {
'type': 'string'
},
'uris': {
'type': 'string'
},
'headers': {
'type': 'string',
'regex': r'(\[[\w\d\.]+:\s[\w\d\.]+\]\s*)+'
},
'rps_schedule': {
'type': 'string'
},
'writelog': {
'type': 'string'
}
}
@pytest.mark.parametrize('config, expected', [
({
"version": "1.9.3",
"core": {
'operator': 'fomars',
'artifacts_base_dir': './',
'ignore_lock': False
},
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 'monitoring.xml',
'disguise_hostnames': True,
'kill_old': True
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'header_http': '1.1',
'uris': ['/'],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1, 10, 10m)'}}
},
{
"version": "1.9.3",
"core": {
'operator': 'fomars',
'artifacts_base_dir': './',
'lock_dir': '/var/lock/',
'taskset_path': 'taskset',
'affinity': '',
'ignore_lock': False,
'debug': False
},
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 'monitoring.xml',
'disguise_hostnames': True,
'default_target': 'localhost',
'ssh_timeout': '5s',
'kill_old': True
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'header_http': '1.1',
'uris': ['/'],
'buffered_seconds': 2,
'phantom_path': 'phantom',
'affinity': '',
'enum_ammo': False,
'phout_file': '',
'phantom_modules_path': '/usr/lib/phantom',
'threads': None,
'writelog': '0',
'timeout': '11s',
'additional_libs': [],
'config': '',
'gatling_ip': '',
'instances': 1000,
'method_options': '',
'method_prefix': 'method_stream',
'phantom_http_entity': '8M',
'phantom_http_field': '8K',
'phantom_http_field_num': 128,
'phantom_http_line': '1K',
'source_log_prefix': '',
'ssl': False,
'tank_type': 'http',
'ammo_limit': -1,
'ammo_type': 'phantom',
'ammofile': '',
'autocases': 0,
'cache_dir': None,
'chosen_cases': '',
'client_certificate': '',
'client_cipher_suites': '',
'client_key': '',
'connection_test': True,
'file_cache': 8192,
'force_stepping': 0,
'headers': [],
'loop': -1,
'port': '',
'use_caching': True,
'multi': [],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1, 10, 10m)'},
}
}
),
({'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'load_profile': {
'load_type': 'rps',
'schedule': 'const(2,1m)'},
'timeout': '5s',
'uris': ['/'],
'loop': 1000,
'address': 'centurion.tanks.yandex.net'}},
{'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'load_profile': {
'load_type': 'rps',
'schedule': 'const(2,1m)'},
'timeout': '5s',
'uris': ['/'],
'loop': 1000,
'address': 'centurion.tanks.yandex.net',
'buffered_seconds': 2,
'phantom_path': 'phantom',
'affinity': '',
'enum_ammo': False,
'phout_file': '',
'phantom_modules_path': '/usr/lib/phantom',
'threads': None,
'writelog': '0',
'additional_libs': [],
'config': '',
'gatling_ip': '',
'instances': 1000,
'method_options': '',
'method_prefix': 'method_stream',
'phantom_http_entity': '8M',
'phantom_http_field': '8K',
'phantom_http_field_num': 128,
'phantom_http_line': '1K',
'source_log_prefix': '',
'ssl': False,
'tank_type': 'http',
'ammo_limit': -1,
'ammo_type': 'phantom',
'ammofile': '',
'autocases': 0,
'cache_dir': None,
'chosen_cases': '',
'client_certificate': '',
'client_cipher_suites': '',
'client_key': '',
'connection_test': True,
'file_cache': 8192,
'force_stepping': 0,
'headers': [],
'port': '',
'use_caching': True,
'header_http': '1.0',
'multi': []},
'core': {
'artifacts_base_dir': './logs',
'lock_dir': '/var/lock/',
'taskset_path': 'taskset',
'affinity': '',
'ignore_lock': False,
'debug': False}}
)
])
def test_validate_core(config, expected):
validated, errors, initial = TankConfig(config, False).validate()
assert validated.validated == expected, errors == errors
@pytest.mark.parametrize('config, expected', [
# plugins: no package
({
"version": "1.9.3",
"core": {
'operator': 'fomars'
},
'telegraf': {
'enabled': True,
'config': 'monitoring.xml',
'disguise_hostnames': 1
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'header_http': '1.1',
'uris': ['/'],
'affinity': '',
'enum_ammo': False,
'phout_file': '',
'config': '',
'gatling_ip': '',
'instances': 1000,
'method_options': '',
'method_prefix': 'method_stream',
'phantom_http_entity': '8M',
'phantom_http_field': '8K',
'phantom_http_field_num': 128,
'phantom_http_line': '1K',
'source_log_prefix': '',
'ssl': False,
'tank_type': 'http',
'multi': [],
}
}, {'telegraf': [{'package': ['required field']}]}),
# plugins: empty package
({
"version": "1.9.3",
"core": {
'operator': 'fomars'
},
'telegraf': {
'package': '',
'enabled': True,
'config': 'monitoring.xml',
'disguise_hostnames': 1
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'header_http': '1.1',
'uris': ['/']}
}, {'telegraf': [{'package': ['empty values not allowed']}]})
])
def test_validate_core_error(config, expected):
with pytest.raises(Exception) as e:
TankConfig(config).validated
print('exception value:\n', str(e.value))
assert expected == e.value.errors
@pytest.mark.parametrize('configs, expected', [
# test disable plugin
([{
"version": "1.9.3",
"core": {
'operator': 'fomars'
},
"plugins": {
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 'mon.xml'
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'foo.bar'
}
}
},
{
"version": "1.9.3",
"core": {
'operator': 'fomars'
},
"plugins": {
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': False,
'config': 'mon.xml'
},
}
}
],
{"version": "1.9.3",
"core": {
'operator': 'fomars'
},
"plugins": {
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': False,
'config': 'mon.xml'
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'foo.bar'
}
}
}
)
])
def test_load_multiple(configs, expected):
assert TankConfig(configs).raw_config_dict == expected
@pytest.mark.parametrize('config, expected', [
(
{
"version": "1.9.3",
"core": {
'operator': 'fomars',
'artifacts_base_dir': './'
},
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 'monitoring.xml',
'disguise_hostnames': True
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'header_http': '1.1',
'uris': ['/'],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1, 10, 10m)'},
'multi': [{'name': 'foo'}],
}
},
{
"version": "1.9.3",
"core": {
'operator': 'fomars',
'artifacts_base_dir': './',
'lock_dir': '/var/lock/',
'taskset_path': 'taskset',
'affinity': '',
'ignore_lock': False,
'debug': False
},
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 'monitoring.xml',
'disguise_hostnames': True,
'ssh_timeout': '5s',
'default_target': 'localhost',
'kill_old': False
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'header_http': '1.1',
'uris': ['/'],
'buffered_seconds': 2,
'phantom_path': 'phantom',
'affinity': '',
'enum_ammo': False,
'phout_file': '',
'phantom_modules_path': '/usr/lib/phantom',
'threads': None,
'writelog': '0',
'timeout': '11s',
'additional_libs': [],
'config': '',
'gatling_ip': '',
'instances': 1000,
'method_options': '',
'method_prefix': 'method_stream',
'phantom_http_entity': '8M',
'phantom_http_field': '8K',
'phantom_http_field_num': 128,
'phantom_http_line': '1K',
'source_log_prefix': '',
'ssl': False,
'tank_type': 'http',
'ammo_limit': -1,
'ammo_type': 'phantom',
'ammofile': '',
'autocases': 0,
'cache_dir': None,
'chosen_cases': '',
'client_certificate': '',
'client_cipher_suites': '',
'client_key': '',
'connection_test': True,
'file_cache': 8192,
'force_stepping': 0,
'headers': [],
'loop': -1,
'port': '',
'use_caching': True,
'multi': [{'name': 'foo'}],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1, 10, 10m)'}
}
}
),
])
def test_validate_all(config, expected):
assert TankConfig(config, False).validated == expected
@pytest.mark.parametrize('config, expected', [
# core errors
(
{
"version": 5,
"core": {},
'telegraf': {
'package': 'yandextank/plugins/Telegraf',
'enabled': True,
'config': 'monitoring.xml',
'disguise_hostnames': True
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'header_http': '1.1',
'uris': ['/'],
}
},
{'telegraf': [{'package': ["value does not match regex '[^/]+'"]}],
'version': ['must be of string type']}),
# plugins errors
(
{
"version": "1.9.3",
"core": {
'operator': 'fomars',
'artifacts_base_dir': './'
},
'telegraf': {
'package': 'yandextank.plugins.Telegraf',
'enabled': True,
'config': 0,
'disguise_hostnames': True
},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'header_http': '1.1',
'uris': ['/'],
}
},
{'phantom': {'address': ['required field'], 'load_profile': ['required field']},
'telegraf': {'config': ['must be of string type']}}),
(
{
"core": {},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'uris': ['/'],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1, 20, 2, 10m)'}
}
},
{'phantom': {'load_profile': [{'schedule': ['line load scheme: expected 3 arguments, found 4']}]}}),
(
{
"core": {},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'uris': ['/'],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1, 20, 10m5m)'}
}
},
{'phantom': {'load_profile': [{'schedule': ['Load duration examples: 2h30m; 5m15; 180']}]}}),
(
{
"core": {},
'phantom': {
'package': 'yandextank.plugins.Phantom',
'enabled': True,
'address': 'nodejs.load.yandex.net',
'uris': ['/'],
'load_profile': {'load_type': 'rps', 'schedule': 'line(1n,20,100)'}
}
},
{'phantom': {'load_profile': [{'schedule': ['Argument 1n in load scheme should be a number']}]}})
])
def test_validate_all_error(config, expected):
with pytest.raises(ValidationError) as e:
TankConfig(config).validated(config)
assert e.value.errors == expected
@pytest.mark.parametrize('config, expected', [
(
CFG_VER_I_0,
{
(
'telegraf',
'yandextank.plugins.Telegraf',
),
(
'phantom',
'yandextank.plugins.Phantom',
),
(
'lunapark',
'yandextank.plugins.DataUploader',
),
(
'overload',
'yandextank.plugins.DataUploader',
)
}
)
])
def test_get_plugins(config, expected):
validated, errors, raw = TankConfig(config).validate()
assert {(name, pack) for name, pack, cfg in validated.plugins} == expected
@pytest.mark.parametrize('value', [
'step(10,200,5,180)',
'step(5,50,2.5,5m)',
'line(22,154,2h5m)',
'step(5,50,2.5,5m) line(22,154,2h5m)',
'const(10,1h4m3s)',
'const(2.5,150)',
'const(100, 1d2h)',
'line(10, 120, 300s)',
])
def test_load_scheme_validator(value):
validator = PatchedValidator({'load_type': {'type': 'string'}, 'schedule': {'validator': 'load_scheme'}})
cfg = {'load_type': 'rps', 'schedule': value}
assert validator.validate(cfg)
@pytest.mark.parametrize('value', [
'step(10,5,180)',
'step(5,50,2.5,5m,30s)',
'lien(22,154,2h5m)',
'step(5,50,2.5,5m) line(22,154,2h5m) const(10, 20, 3m)',
'const(10,1.5h)',
])
def test_negative_load_scheme_validator(value):
validator = PatchedValidator({'load_type': {'type': 'string'}, 'schedule': {'validator': 'load_scheme'}})
cfg = {'load_type': 'rps', 'schedule': value}
assert not validator.validate(cfg)
|
import logging
from typing import Callable
from .board import FirmataBoard, FirmataPinType
from .const import PIN_MODE_INPUT, PIN_MODE_PULLUP, PIN_TYPE_ANALOG
_LOGGER = logging.getLogger(__name__)
class FirmataPinUsedException(Exception):
"""Represents an exception when a pin is already in use."""
class FirmataBoardPin:
"""Manages a single Firmata board pin."""
def __init__(self, board: FirmataBoard, pin: FirmataPinType, pin_mode: str):
"""Initialize the pin."""
self.board = board
self._pin = pin
self._pin_mode = pin_mode
self._pin_type, self._firmata_pin = self.board.get_pin_type(self._pin)
self._state = None
if self._pin_type == PIN_TYPE_ANALOG:
# Pymata wants the analog pin formatted as the # from "A#"
self._analog_pin = int(self._pin[1:])
def setup(self):
"""Set up a pin and make sure it is valid."""
if not self.board.mark_pin_used(self._pin):
raise FirmataPinUsedException(f"Pin {self._pin} already used!")
class FirmataBinaryDigitalOutput(FirmataBoardPin):
"""Representation of a Firmata Digital Output Pin."""
def __init__(
self,
board: FirmataBoard,
pin: FirmataPinType,
pin_mode: str,
initial: bool,
negate: bool,
):
"""Initialize the digital output pin."""
self._initial = initial
self._negate = negate
super().__init__(board, pin, pin_mode)
async def start_pin(self) -> None:
"""Set initial state on a pin."""
_LOGGER.debug(
"Setting initial state for digital output pin %s on board %s",
self._pin,
self.board.name,
)
api = self.board.api
# Only PIN_MODE_OUTPUT mode is supported as binary digital output
await api.set_pin_mode_digital_output(self._firmata_pin)
if self._initial:
new_pin_state = not self._negate
else:
new_pin_state = self._negate
await api.digital_pin_write(self._firmata_pin, int(new_pin_state))
self._state = self._initial
@property
def is_on(self) -> bool:
"""Return true if digital output is on."""
return self._state
async def turn_on(self) -> None:
"""Turn on digital output."""
_LOGGER.debug("Turning digital output on pin %s on", self._pin)
new_pin_state = not self._negate
await self.board.api.digital_pin_write(self._firmata_pin, int(new_pin_state))
self._state = True
async def turn_off(self) -> None:
"""Turn off digital output."""
_LOGGER.debug("Turning digital output on pin %s off", self._pin)
new_pin_state = self._negate
await self.board.api.digital_pin_write(self._firmata_pin, int(new_pin_state))
self._state = False
class FirmataPWMOutput(FirmataBoardPin):
"""Representation of a Firmata PWM/analog Output Pin."""
def __init__(
self,
board: FirmataBoard,
pin: FirmataPinType,
pin_mode: str,
initial: bool,
minimum: int,
maximum: int,
):
"""Initialize the PWM/analog output pin."""
self._initial = initial
self._min = minimum
self._max = maximum
self._range = self._max - self._min
super().__init__(board, pin, pin_mode)
async def start_pin(self) -> None:
"""Set initial state on a pin."""
_LOGGER.debug(
"Setting initial state for PWM/analog output pin %s on board %s to %d",
self._pin,
self.board.name,
self._initial,
)
api = self.board.api
await api.set_pin_mode_pwm_output(self._firmata_pin)
new_pin_state = round((self._initial * self._range) / 255) + self._min
await api.pwm_write(self._firmata_pin, new_pin_state)
self._state = self._initial
@property
def state(self) -> int:
"""Return PWM/analog state."""
return self._state
async def set_level(self, level: int) -> None:
"""Set PWM/analog output."""
_LOGGER.debug("Setting PWM/analog output on pin %s to %d", self._pin, level)
new_pin_state = round((level * self._range) / 255) + self._min
await self.board.api.pwm_write(self._firmata_pin, new_pin_state)
self._state = level
class FirmataBinaryDigitalInput(FirmataBoardPin):
"""Representation of a Firmata Digital Input Pin."""
def __init__(
self, board: FirmataBoard, pin: FirmataPinType, pin_mode: str, negate: bool
):
"""Initialize the digital input pin."""
self._negate = negate
self._forward_callback = None
super().__init__(board, pin, pin_mode)
async def start_pin(self, forward_callback: Callable[[], None]) -> None:
"""Get initial state and start reporting a pin."""
_LOGGER.debug(
"Starting reporting updates for digital input pin %s on board %s",
self._pin,
self.board.name,
)
self._forward_callback = forward_callback
api = self.board.api
if self._pin_mode == PIN_MODE_INPUT:
await api.set_pin_mode_digital_input(self._pin, self.latch_callback)
elif self._pin_mode == PIN_MODE_PULLUP:
await api.set_pin_mode_digital_input_pullup(self._pin, self.latch_callback)
new_state = bool((await self.board.api.digital_read(self._firmata_pin))[0])
if self._negate:
new_state = not new_state
self._state = new_state
await api.enable_digital_reporting(self._pin)
self._forward_callback()
async def stop_pin(self) -> None:
"""Stop reporting digital input pin."""
_LOGGER.debug(
"Stopping reporting updates for digital input pin %s on board %s",
self._pin,
self.board.name,
)
api = self.board.api
await api.disable_digital_reporting(self._pin)
@property
def is_on(self) -> bool:
"""Return true if digital input is on."""
return self._state
async def latch_callback(self, data: list) -> None:
"""Update pin state on callback."""
if data[1] != self._firmata_pin:
return
_LOGGER.debug(
"Received latch %d for digital input pin %d on board %s",
data[2],
self._firmata_pin,
self.board.name,
)
new_state = bool(data[2])
if self._negate:
new_state = not new_state
if self._state == new_state:
return
self._state = new_state
self._forward_callback()
class FirmataAnalogInput(FirmataBoardPin):
"""Representation of a Firmata Analog Input Pin."""
def __init__(
self, board: FirmataBoard, pin: FirmataPinType, pin_mode: str, differential: int
):
"""Initialize the analog input pin."""
self._differential = differential
self._forward_callback = None
super().__init__(board, pin, pin_mode)
async def start_pin(self, forward_callback: Callable[[], None]) -> None:
"""Get initial state and start reporting a pin."""
_LOGGER.debug(
"Starting reporting updates for analog input pin %s on board %s",
self._pin,
self.board.name,
)
self._forward_callback = forward_callback
api = self.board.api
# Only PIN_MODE_ANALOG_INPUT mode is supported as sensor input
await api.set_pin_mode_analog_input(
self._analog_pin, self.latch_callback, self._differential
)
self._state = (await self.board.api.analog_read(self._analog_pin))[0]
self._forward_callback()
async def stop_pin(self) -> None:
"""Stop reporting analog input pin."""
_LOGGER.debug(
"Stopping reporting updates for analog input pin %s on board %s",
self._pin,
self.board.name,
)
api = self.board.api
await api.disable_analog_reporting(self._analog_pin)
@property
def state(self) -> int:
"""Return sensor state."""
return self._state
async def latch_callback(self, data: list) -> None:
"""Update pin state on callback."""
if data[1] != self._analog_pin:
return
_LOGGER.debug(
"Received latch %d for analog input pin %s on board %s",
data[2],
self._pin,
self.board.name,
)
new_state = data[2]
if self._state == new_state:
_LOGGER.debug("stopping")
return
self._state = new_state
self._forward_callback()
|
from functools import partial
import logging
from miio import DeviceException, Vacuum # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.vacuum import (
ATTR_CLEANED_AREA,
PLATFORM_SCHEMA,
STATE_CLEANING,
STATE_DOCKED,
STATE_ERROR,
STATE_IDLE,
STATE_PAUSED,
STATE_RETURNING,
SUPPORT_BATTERY,
SUPPORT_CLEAN_SPOT,
SUPPORT_FAN_SPEED,
SUPPORT_LOCATE,
SUPPORT_PAUSE,
SUPPORT_RETURN_HOME,
SUPPORT_SEND_COMMAND,
SUPPORT_START,
SUPPORT_STATE,
SUPPORT_STOP,
StateVacuumEntity,
)
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_TOKEN, STATE_OFF, STATE_ON
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.util.dt import as_utc
from .const import (
SERVICE_CLEAN_SEGMENT,
SERVICE_CLEAN_ZONE,
SERVICE_GOTO,
SERVICE_MOVE_REMOTE_CONTROL,
SERVICE_MOVE_REMOTE_CONTROL_STEP,
SERVICE_START_REMOTE_CONTROL,
SERVICE_STOP_REMOTE_CONTROL,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "Xiaomi Vacuum cleaner"
DATA_KEY = "vacuum.xiaomi_miio"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(str, vol.Length(min=32, max=32)),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
},
extra=vol.ALLOW_EXTRA,
)
ATTR_CLEAN_START = "clean_start"
ATTR_CLEAN_STOP = "clean_stop"
ATTR_CLEANING_TIME = "cleaning_time"
ATTR_DO_NOT_DISTURB = "do_not_disturb"
ATTR_DO_NOT_DISTURB_START = "do_not_disturb_start"
ATTR_DO_NOT_DISTURB_END = "do_not_disturb_end"
ATTR_MAIN_BRUSH_LEFT = "main_brush_left"
ATTR_SIDE_BRUSH_LEFT = "side_brush_left"
ATTR_FILTER_LEFT = "filter_left"
ATTR_SENSOR_DIRTY_LEFT = "sensor_dirty_left"
ATTR_CLEANING_COUNT = "cleaning_count"
ATTR_CLEANED_TOTAL_AREA = "total_cleaned_area"
ATTR_CLEANING_TOTAL_TIME = "total_cleaning_time"
ATTR_ERROR = "error"
ATTR_RC_DURATION = "duration"
ATTR_RC_ROTATION = "rotation"
ATTR_RC_VELOCITY = "velocity"
ATTR_STATUS = "status"
ATTR_ZONE_ARRAY = "zone"
ATTR_ZONE_REPEATER = "repeats"
ATTR_TIMERS = "timers"
SUPPORT_XIAOMI = (
SUPPORT_STATE
| SUPPORT_PAUSE
| SUPPORT_STOP
| SUPPORT_RETURN_HOME
| SUPPORT_FAN_SPEED
| SUPPORT_SEND_COMMAND
| SUPPORT_LOCATE
| SUPPORT_BATTERY
| SUPPORT_CLEAN_SPOT
| SUPPORT_START
)
STATE_CODE_TO_STATE = {
1: STATE_IDLE, # "Starting"
2: STATE_IDLE, # "Charger disconnected"
3: STATE_IDLE, # "Idle"
4: STATE_CLEANING, # "Remote control active"
5: STATE_CLEANING, # "Cleaning"
6: STATE_RETURNING, # "Returning home"
7: STATE_CLEANING, # "Manual mode"
8: STATE_DOCKED, # "Charging"
9: STATE_ERROR, # "Charging problem"
10: STATE_PAUSED, # "Paused"
11: STATE_CLEANING, # "Spot cleaning"
12: STATE_ERROR, # "Error"
13: STATE_IDLE, # "Shutting down"
14: STATE_DOCKED, # "Updating"
15: STATE_RETURNING, # "Docking"
16: STATE_CLEANING, # "Going to target"
17: STATE_CLEANING, # "Zoned cleaning"
18: STATE_CLEANING, # "Segment cleaning"
100: STATE_DOCKED, # "Charging complete"
101: STATE_ERROR, # "Device offline"
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Xiaomi vacuum cleaner robot platform."""
if DATA_KEY not in hass.data:
hass.data[DATA_KEY] = {}
host = config[CONF_HOST]
token = config[CONF_TOKEN]
name = config[CONF_NAME]
# Create handler
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
vacuum = Vacuum(host, token)
mirobo = MiroboVacuum(name, vacuum)
hass.data[DATA_KEY][host] = mirobo
async_add_entities([mirobo], update_before_add=True)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_START_REMOTE_CONTROL,
{},
MiroboVacuum.async_remote_control_start.__name__,
)
platform.async_register_entity_service(
SERVICE_STOP_REMOTE_CONTROL,
{},
MiroboVacuum.async_remote_control_stop.__name__,
)
platform.async_register_entity_service(
SERVICE_MOVE_REMOTE_CONTROL,
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
},
MiroboVacuum.async_remote_control_move.__name__,
)
platform.async_register_entity_service(
SERVICE_MOVE_REMOTE_CONTROL_STEP,
{
vol.Optional(ATTR_RC_VELOCITY): vol.All(
vol.Coerce(float), vol.Clamp(min=-0.29, max=0.29)
),
vol.Optional(ATTR_RC_ROTATION): vol.All(
vol.Coerce(int), vol.Clamp(min=-179, max=179)
),
vol.Optional(ATTR_RC_DURATION): cv.positive_int,
},
MiroboVacuum.async_remote_control_move_step.__name__,
)
platform.async_register_entity_service(
SERVICE_CLEAN_ZONE,
{
vol.Required(ATTR_ZONE_ARRAY): vol.All(
list,
[
vol.ExactSequence(
[
vol.Coerce(int),
vol.Coerce(int),
vol.Coerce(int),
vol.Coerce(int),
]
)
],
),
vol.Required(ATTR_ZONE_REPEATER): vol.All(
vol.Coerce(int), vol.Clamp(min=1, max=3)
),
},
MiroboVacuum.async_clean_zone.__name__,
)
platform.async_register_entity_service(
SERVICE_GOTO,
{
vol.Required("x_coord"): vol.Coerce(int),
vol.Required("y_coord"): vol.Coerce(int),
},
MiroboVacuum.async_goto.__name__,
)
platform.async_register_entity_service(
SERVICE_CLEAN_SEGMENT,
{vol.Required("segments"): vol.Any(vol.Coerce(int), [vol.Coerce(int)])},
MiroboVacuum.async_clean_segment.__name__,
)
class MiroboVacuum(StateVacuumEntity):
"""Representation of a Xiaomi Vacuum cleaner robot."""
def __init__(self, name, vacuum):
"""Initialize the Xiaomi vacuum cleaner robot handler."""
self._name = name
self._vacuum = vacuum
self.vacuum_state = None
self._available = False
self.consumable_state = None
self.clean_history = None
self.dnd_state = None
self.last_clean = None
self._fan_speeds = None
self._fan_speeds_reverse = None
self._timers = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the status of the vacuum cleaner."""
if self.vacuum_state is not None:
# The vacuum reverts back to an idle state after erroring out.
# We want to keep returning an error until it has been cleared.
if self.vacuum_state.got_error:
return STATE_ERROR
try:
return STATE_CODE_TO_STATE[int(self.vacuum_state.state_code)]
except KeyError:
_LOGGER.error(
"STATE not supported: %s, state_code: %s",
self.vacuum_state.state,
self.vacuum_state.state_code,
)
return None
@property
def battery_level(self):
"""Return the battery level of the vacuum cleaner."""
if self.vacuum_state is not None:
return self.vacuum_state.battery
@property
def fan_speed(self):
"""Return the fan speed of the vacuum cleaner."""
if self.vacuum_state is not None:
speed = self.vacuum_state.fanspeed
if speed in self._fan_speeds_reverse:
return self._fan_speeds_reverse[speed]
_LOGGER.debug("Unable to find reverse for %s", speed)
return speed
@property
def fan_speed_list(self):
"""Get the list of available fan speed steps of the vacuum cleaner."""
return list(self._fan_speeds) if self._fan_speeds else []
@property
def timers(self):
"""Get the list of added timers of the vacuum cleaner."""
return [
{
"enabled": timer.enabled,
"cron": timer.cron,
"next_schedule": as_utc(timer.next_schedule),
}
for timer in self._timers
]
@property
def device_state_attributes(self):
"""Return the specific state attributes of this vacuum cleaner."""
attrs = {}
if self.vacuum_state is not None:
attrs.update(
{
ATTR_DO_NOT_DISTURB: STATE_ON
if self.dnd_state.enabled
else STATE_OFF,
ATTR_DO_NOT_DISTURB_START: str(self.dnd_state.start),
ATTR_DO_NOT_DISTURB_END: str(self.dnd_state.end),
# Not working --> 'Cleaning mode':
# STATE_ON if self.vacuum_state.in_cleaning else STATE_OFF,
ATTR_CLEANING_TIME: int(
self.vacuum_state.clean_time.total_seconds() / 60
),
ATTR_CLEANED_AREA: int(self.vacuum_state.clean_area),
ATTR_CLEANING_COUNT: int(self.clean_history.count),
ATTR_CLEANED_TOTAL_AREA: int(self.clean_history.total_area),
ATTR_CLEANING_TOTAL_TIME: int(
self.clean_history.total_duration.total_seconds() / 60
),
ATTR_MAIN_BRUSH_LEFT: int(
self.consumable_state.main_brush_left.total_seconds() / 3600
),
ATTR_SIDE_BRUSH_LEFT: int(
self.consumable_state.side_brush_left.total_seconds() / 3600
),
ATTR_FILTER_LEFT: int(
self.consumable_state.filter_left.total_seconds() / 3600
),
ATTR_SENSOR_DIRTY_LEFT: int(
self.consumable_state.sensor_dirty_left.total_seconds() / 3600
),
ATTR_STATUS: str(self.vacuum_state.state),
}
)
if self.last_clean:
attrs[ATTR_CLEAN_START] = self.last_clean.start
attrs[ATTR_CLEAN_STOP] = self.last_clean.end
if self.vacuum_state.got_error:
attrs[ATTR_ERROR] = self.vacuum_state.error
if self.timers:
attrs[ATTR_TIMERS] = self.timers
return attrs
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._available
@property
def supported_features(self):
"""Flag vacuum cleaner robot features that are supported."""
return SUPPORT_XIAOMI
async def _try_command(self, mask_error, func, *args, **kwargs):
"""Call a vacuum command handling error messages."""
try:
await self.hass.async_add_executor_job(partial(func, *args, **kwargs))
return True
except DeviceException as exc:
_LOGGER.error(mask_error, exc)
return False
async def async_start(self):
"""Start or resume the cleaning task."""
await self._try_command(
"Unable to start the vacuum: %s", self._vacuum.resume_or_start
)
async def async_pause(self):
"""Pause the cleaning task."""
await self._try_command("Unable to set start/pause: %s", self._vacuum.pause)
async def async_stop(self, **kwargs):
"""Stop the vacuum cleaner."""
await self._try_command("Unable to stop: %s", self._vacuum.stop)
async def async_set_fan_speed(self, fan_speed, **kwargs):
"""Set fan speed."""
if fan_speed in self._fan_speeds:
fan_speed = self._fan_speeds[fan_speed]
else:
try:
fan_speed = int(fan_speed)
except ValueError as exc:
_LOGGER.error(
"Fan speed step not recognized (%s). Valid speeds are: %s",
exc,
self.fan_speed_list,
)
return
await self._try_command(
"Unable to set fan speed: %s", self._vacuum.set_fan_speed, fan_speed
)
async def async_return_to_base(self, **kwargs):
"""Set the vacuum cleaner to return to the dock."""
await self._try_command("Unable to return home: %s", self._vacuum.home)
async def async_clean_spot(self, **kwargs):
"""Perform a spot clean-up."""
await self._try_command(
"Unable to start the vacuum for a spot clean-up: %s", self._vacuum.spot
)
async def async_locate(self, **kwargs):
"""Locate the vacuum cleaner."""
await self._try_command("Unable to locate the botvac: %s", self._vacuum.find)
async def async_send_command(self, command, params=None, **kwargs):
"""Send raw command."""
await self._try_command(
"Unable to send command to the vacuum: %s",
self._vacuum.raw_command,
command,
params,
)
async def async_remote_control_start(self):
"""Start remote control mode."""
await self._try_command(
"Unable to start remote control the vacuum: %s", self._vacuum.manual_start
)
async def async_remote_control_stop(self):
"""Stop remote control mode."""
await self._try_command(
"Unable to stop remote control the vacuum: %s", self._vacuum.manual_stop
)
async def async_remote_control_move(
self, rotation: int = 0, velocity: float = 0.3, duration: int = 1500
):
"""Move vacuum with remote control mode."""
await self._try_command(
"Unable to move with remote control the vacuum: %s",
self._vacuum.manual_control,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_remote_control_move_step(
self, rotation: int = 0, velocity: float = 0.2, duration: int = 1500
):
"""Move vacuum one step with remote control mode."""
await self._try_command(
"Unable to remote control the vacuum: %s",
self._vacuum.manual_control_once,
velocity=velocity,
rotation=rotation,
duration=duration,
)
async def async_goto(self, x_coord: int, y_coord: int):
"""Goto the specified coordinates."""
await self._try_command(
"Unable to send the vacuum cleaner to the specified coordinates: %s",
self._vacuum.goto,
x_coord=x_coord,
y_coord=y_coord,
)
async def async_clean_segment(self, segments):
"""Clean the specified segments(s)."""
if isinstance(segments, int):
segments = [segments]
await self._try_command(
"Unable to start cleaning of the specified segments: %s",
self._vacuum.segment_clean,
segments=segments,
)
def update(self):
"""Fetch state from the device."""
try:
state = self._vacuum.status()
self.vacuum_state = state
self._fan_speeds = self._vacuum.fan_speed_presets()
self._fan_speeds_reverse = {v: k for k, v in self._fan_speeds.items()}
self.consumable_state = self._vacuum.consumable_status()
self.clean_history = self._vacuum.clean_history()
self.last_clean = self._vacuum.last_clean_details()
self.dnd_state = self._vacuum.dnd_status()
self._available = True
except (OSError, DeviceException) as exc:
if self._available:
self._available = False
_LOGGER.warning("Got exception while fetching the state: %s", exc)
# Fetch timers separately, see #38285
try:
self._timers = self._vacuum.timer()
except DeviceException as exc:
_LOGGER.debug(
"Unable to fetch timers, this may happen on some devices: %s", exc
)
self._timers = []
async def async_clean_zone(self, zone, repeats=1):
"""Clean selected area for the number of repeats indicated."""
for _zone in zone:
_zone.append(repeats)
_LOGGER.debug("Zone with repeats: %s", zone)
try:
await self.hass.async_add_executor_job(self._vacuum.zoned_clean, zone)
except (OSError, DeviceException) as exc:
_LOGGER.error("Unable to send zoned_clean command to the vacuum: %s", exc)
|
from datetime import timedelta
from typing import Any, Dict
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
import homeassistant.util.dt as dt_util
from . import DOMAIN, GeniusDevice, GeniusEntity
GH_STATE_ATTR = "batteryLevel"
GH_LEVEL_MAPPING = {
"error": "Errors",
"warning": "Warnings",
"information": "Information",
}
async def async_setup_platform(
hass: HomeAssistantType, config: ConfigType, async_add_entities, discovery_info=None
) -> None:
"""Set up the Genius Hub sensor entities."""
if discovery_info is None:
return
broker = hass.data[DOMAIN]["broker"]
sensors = [
GeniusBattery(broker, d, GH_STATE_ATTR)
for d in broker.client.device_objs
if GH_STATE_ATTR in d.data["state"]
]
issues = [GeniusIssue(broker, i) for i in list(GH_LEVEL_MAPPING)]
async_add_entities(sensors + issues, update_before_add=True)
class GeniusBattery(GeniusDevice):
"""Representation of a Genius Hub sensor."""
def __init__(self, broker, device, state_attr) -> None:
"""Initialize the sensor."""
super().__init__(broker, device)
self._state_attr = state_attr
self._name = f"{device.type} {device.id}"
@property
def icon(self) -> str:
"""Return the icon of the sensor."""
if "_state" in self._device.data: # only for v3 API
interval = timedelta(
seconds=self._device.data["_state"].get("wakeupInterval", 30 * 60)
)
if self._last_comms < dt_util.utcnow() - interval * 3:
return "mdi:battery-unknown"
battery_level = self._device.data["state"][self._state_attr]
if battery_level == 255:
return "mdi:battery-unknown"
if battery_level < 40:
return "mdi:battery-alert"
icon = "mdi:battery"
if battery_level <= 95:
icon += f"-{int(round(battery_level / 10 - 0.01)) * 10}"
return icon
@property
def device_class(self) -> str:
"""Return the device class of the sensor."""
return DEVICE_CLASS_BATTERY
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement of the sensor."""
return PERCENTAGE
@property
def state(self) -> str:
"""Return the state of the sensor."""
level = self._device.data["state"][self._state_attr]
return level if level != 255 else 0
class GeniusIssue(GeniusEntity):
"""Representation of a Genius Hub sensor."""
def __init__(self, broker, level) -> None:
"""Initialize the sensor."""
super().__init__()
self._hub = broker.client
self._unique_id = f"{broker.hub_uid}_{GH_LEVEL_MAPPING[level]}"
self._name = f"GeniusHub {GH_LEVEL_MAPPING[level]}"
self._level = level
self._issues = []
@property
def state(self) -> str:
"""Return the number of issues."""
return len(self._issues)
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the device state attributes."""
return {f"{self._level}_list": self._issues}
async def async_update(self) -> None:
"""Process the sensor's state data."""
self._issues = [
i["description"] for i in self._hub.issues if i["level"] == self._level
]
|
import math
from datetime import date, datetime, timedelta, timezone
from itertools import chain
from radicale import xmlutils
from radicale.log import logger
DAY = timedelta(days=1)
SECOND = timedelta(seconds=1)
DATETIME_MIN = datetime.min.replace(tzinfo=timezone.utc)
DATETIME_MAX = datetime.max.replace(tzinfo=timezone.utc)
TIMESTAMP_MIN = math.floor(DATETIME_MIN.timestamp())
TIMESTAMP_MAX = math.ceil(DATETIME_MAX.timestamp())
def date_to_datetime(date_):
"""Transform a date to a UTC datetime.
If date_ is a datetime without timezone, return as UTC datetime. If date_
is already a datetime with timezone, return as is.
"""
if not isinstance(date_, datetime):
date_ = datetime.combine(date_, datetime.min.time())
if not date_.tzinfo:
date_ = date_.replace(tzinfo=timezone.utc)
return date_
def comp_match(item, filter_, level=0):
"""Check whether the ``item`` matches the comp ``filter_``.
If ``level`` is ``0``, the filter is applied on the
item's collection. Otherwise, it's applied on the item.
See rfc4791-9.7.1.
"""
# TODO: Filtering VALARM and VFREEBUSY is not implemented
# HACK: the filters are tested separately against all components
if level == 0:
tag = item.name
elif level == 1:
tag = item.component_name
else:
logger.warning(
"Filters with three levels of comp-filter are not supported")
return True
if not tag:
return False
name = filter_.get("name").upper()
if len(filter_) == 0:
# Point #1 of rfc4791-9.7.1
return name == tag
if len(filter_) == 1:
if filter_[0].tag == xmlutils.make_clark("C:is-not-defined"):
# Point #2 of rfc4791-9.7.1
return name != tag
if name != tag:
return False
if (level == 0 and name != "VCALENDAR" or
level == 1 and name not in ("VTODO", "VEVENT", "VJOURNAL")):
logger.warning("Filtering %s is not supported", name)
return True
# Point #3 and #4 of rfc4791-9.7.1
components = ([item.vobject_item] if level == 0
else list(getattr(item.vobject_item,
"%s_list" % tag.lower())))
for child in filter_:
if child.tag == xmlutils.make_clark("C:prop-filter"):
if not any(prop_match(comp, child, "C")
for comp in components):
return False
elif child.tag == xmlutils.make_clark("C:time-range"):
if not time_range_match(item.vobject_item, filter_[0], tag):
return False
elif child.tag == xmlutils.make_clark("C:comp-filter"):
if not comp_match(item, child, level=level + 1):
return False
else:
raise ValueError("Unexpected %r in comp-filter" % child.tag)
return True
def prop_match(vobject_item, filter_, ns):
"""Check whether the ``item`` matches the prop ``filter_``.
See rfc4791-9.7.2 and rfc6352-10.5.1.
"""
name = filter_.get("name").lower()
if len(filter_) == 0:
# Point #1 of rfc4791-9.7.2
return name in vobject_item.contents
if len(filter_) == 1:
if filter_[0].tag == xmlutils.make_clark("C:is-not-defined"):
# Point #2 of rfc4791-9.7.2
return name not in vobject_item.contents
if name not in vobject_item.contents:
return False
# Point #3 and #4 of rfc4791-9.7.2
for child in filter_:
if ns == "C" and child.tag == xmlutils.make_clark("C:time-range"):
if not time_range_match(vobject_item, child, name):
return False
elif child.tag == xmlutils.make_clark("%s:text-match" % ns):
if not text_match(vobject_item, child, name, ns):
return False
elif child.tag == xmlutils.make_clark("%s:param-filter" % ns):
if not param_filter_match(vobject_item, child, name, ns):
return False
else:
raise ValueError("Unexpected %r in prop-filter" % child.tag)
return True
def time_range_match(vobject_item, filter_, child_name):
"""Check whether the component/property ``child_name`` of
``vobject_item`` matches the time-range ``filter_``."""
start = filter_.get("start")
end = filter_.get("end")
if not start and not end:
return False
if start:
start = datetime.strptime(start, "%Y%m%dT%H%M%SZ")
else:
start = datetime.min
if end:
end = datetime.strptime(end, "%Y%m%dT%H%M%SZ")
else:
end = datetime.max
start = start.replace(tzinfo=timezone.utc)
end = end.replace(tzinfo=timezone.utc)
matched = False
def range_fn(range_start, range_end, is_recurrence):
nonlocal matched
if start < range_end and range_start < end:
matched = True
return True
if end < range_start and not is_recurrence:
return True
return False
def infinity_fn(start):
return False
visit_time_ranges(vobject_item, child_name, range_fn, infinity_fn)
return matched
def visit_time_ranges(vobject_item, child_name, range_fn, infinity_fn):
"""Visit all time ranges in the component/property ``child_name`` of
`vobject_item`` with visitors ``range_fn`` and ``infinity_fn``.
``range_fn`` gets called for every time_range with ``start`` and ``end``
datetimes and ``is_recurrence`` as arguments. If the function returns True,
the operation is cancelled.
``infinity_fn`` gets called when an infiite recurrence rule is detected
with ``start`` datetime as argument. If the function returns True, the
operation is cancelled.
See rfc4791-9.9.
"""
# HACK: According to rfc5545-3.8.4.4 an recurrance that is resheduled
# with Recurrence ID affects the recurrence itself and all following
# recurrences too. This is not respected and client don't seem to bother
# either.
def getrruleset(child, ignore=()):
if (hasattr(child, "rrule") and
";UNTIL=" not in child.rrule.value.upper() and
";COUNT=" not in child.rrule.value.upper()):
for dtstart in child.getrruleset(addRDate=True):
if dtstart in ignore:
continue
if infinity_fn(date_to_datetime(dtstart)):
return (), True
break
return filter(lambda dtstart: dtstart not in ignore,
child.getrruleset(addRDate=True)), False
def get_children(components):
main = None
recurrences = []
for comp in components:
if hasattr(comp, "recurrence_id") and comp.recurrence_id.value:
recurrences.append(comp.recurrence_id.value)
if comp.rruleset:
# Prevent possible infinite loop
raise ValueError("Overwritten recurrence with RRULESET")
yield comp, True, ()
else:
if main is not None:
raise ValueError("Multiple main components")
main = comp
if main is None:
raise ValueError("Main component missing")
yield main, False, recurrences
# Comments give the lines in the tables of the specification
if child_name == "VEVENT":
for child, is_recurrence, recurrences in get_children(
vobject_item.vevent_list):
# TODO: check if there's a timezone
dtstart = child.dtstart.value
if child.rruleset:
dtstarts, infinity = getrruleset(child, recurrences)
if infinity:
return
else:
dtstarts = (dtstart,)
dtend = getattr(child, "dtend", None)
if dtend is not None:
dtend = dtend.value
original_duration = (dtend - dtstart).total_seconds()
dtend = date_to_datetime(dtend)
duration = getattr(child, "duration", None)
if duration is not None:
original_duration = duration = duration.value
for dtstart in dtstarts:
dtstart_is_datetime = isinstance(dtstart, datetime)
dtstart = date_to_datetime(dtstart)
if dtend is not None:
# Line 1
dtend = dtstart + timedelta(seconds=original_duration)
if range_fn(dtstart, dtend, is_recurrence):
return
elif duration is not None:
if original_duration is None:
original_duration = duration.seconds
if duration.seconds > 0:
# Line 2
if range_fn(dtstart, dtstart + duration,
is_recurrence):
return
else:
# Line 3
if range_fn(dtstart, dtstart + SECOND, is_recurrence):
return
elif dtstart_is_datetime:
# Line 4
if range_fn(dtstart, dtstart + SECOND, is_recurrence):
return
else:
# Line 5
if range_fn(dtstart, dtstart + DAY, is_recurrence):
return
elif child_name == "VTODO":
for child, is_recurrence, recurrences in get_children(
vobject_item.vtodo_list):
dtstart = getattr(child, "dtstart", None)
duration = getattr(child, "duration", None)
due = getattr(child, "due", None)
completed = getattr(child, "completed", None)
created = getattr(child, "created", None)
if dtstart is not None:
dtstart = date_to_datetime(dtstart.value)
if duration is not None:
duration = duration.value
if due is not None:
due = date_to_datetime(due.value)
if dtstart is not None:
original_duration = (due - dtstart).total_seconds()
if completed is not None:
completed = date_to_datetime(completed.value)
if created is not None:
created = date_to_datetime(created.value)
original_duration = (completed - created).total_seconds()
elif created is not None:
created = date_to_datetime(created.value)
if child.rruleset:
reference_dates, infinity = getrruleset(child, recurrences)
if infinity:
return
else:
if dtstart is not None:
reference_dates = (dtstart,)
elif due is not None:
reference_dates = (due,)
elif completed is not None:
reference_dates = (completed,)
elif created is not None:
reference_dates = (created,)
else:
# Line 8
if range_fn(DATETIME_MIN, DATETIME_MAX, is_recurrence):
return
reference_dates = ()
for reference_date in reference_dates:
reference_date = date_to_datetime(reference_date)
if dtstart is not None and duration is not None:
# Line 1
if range_fn(reference_date,
reference_date + duration + SECOND,
is_recurrence):
return
if range_fn(reference_date + duration - SECOND,
reference_date + duration + SECOND,
is_recurrence):
return
elif dtstart is not None and due is not None:
# Line 2
due = reference_date + timedelta(seconds=original_duration)
if (range_fn(reference_date, due, is_recurrence) or
range_fn(reference_date,
reference_date + SECOND, is_recurrence) or
range_fn(due - SECOND, due, is_recurrence) or
range_fn(due - SECOND, reference_date + SECOND,
is_recurrence)):
return
elif dtstart is not None:
if range_fn(reference_date, reference_date + SECOND,
is_recurrence):
return
elif due is not None:
# Line 4
if range_fn(reference_date - SECOND, reference_date,
is_recurrence):
return
elif completed is not None and created is not None:
# Line 5
completed = reference_date + timedelta(
seconds=original_duration)
if (range_fn(reference_date - SECOND,
reference_date + SECOND,
is_recurrence) or
range_fn(completed - SECOND, completed + SECOND,
is_recurrence) or
range_fn(reference_date - SECOND,
reference_date + SECOND, is_recurrence) or
range_fn(completed - SECOND, completed + SECOND,
is_recurrence)):
return
elif completed is not None:
# Line 6
if range_fn(reference_date - SECOND,
reference_date + SECOND, is_recurrence):
return
elif created is not None:
# Line 7
if range_fn(reference_date, DATETIME_MAX, is_recurrence):
return
elif child_name == "VJOURNAL":
for child, is_recurrence, recurrences in get_children(
vobject_item.vjournal_list):
dtstart = getattr(child, "dtstart", None)
if dtstart is not None:
dtstart = dtstart.value
if child.rruleset:
dtstarts, infinity = getrruleset(child, recurrences)
if infinity:
return
else:
dtstarts = (dtstart,)
for dtstart in dtstarts:
dtstart_is_datetime = isinstance(dtstart, datetime)
dtstart = date_to_datetime(dtstart)
if dtstart_is_datetime:
# Line 1
if range_fn(dtstart, dtstart + SECOND, is_recurrence):
return
else:
# Line 2
if range_fn(dtstart, dtstart + DAY, is_recurrence):
return
else:
# Match a property
child = getattr(vobject_item, child_name.lower())
if isinstance(child, date):
child_is_datetime = isinstance(child, datetime)
child = date_to_datetime(child)
if child_is_datetime:
range_fn(child, child + SECOND, False)
else:
range_fn(child, child + DAY, False)
def text_match(vobject_item, filter_, child_name, ns, attrib_name=None):
"""Check whether the ``item`` matches the text-match ``filter_``.
See rfc4791-9.7.5.
"""
# TODO: collations are not supported, but the default ones needed
# for DAV servers are actually pretty useless. Texts are lowered to
# be case-insensitive, almost as the "i;ascii-casemap" value.
text = next(filter_.itertext()).lower()
match_type = "contains"
if ns == "CR":
match_type = filter_.get("match-type", match_type)
def match(value):
value = value.lower()
if match_type == "equals":
return value == text
if match_type == "contains":
return text in value
if match_type == "starts-with":
return value.startswith(text)
if match_type == "ends-with":
return value.endswith(text)
raise ValueError("Unexpected text-match match-type: %r" % match_type)
children = getattr(vobject_item, "%s_list" % child_name, [])
if attrib_name:
condition = any(
match(attrib) for child in children
for attrib in child.params.get(attrib_name, []))
else:
condition = any(match(child.value) for child in children)
if filter_.get("negate-condition") == "yes":
return not condition
return condition
def param_filter_match(vobject_item, filter_, parent_name, ns):
"""Check whether the ``item`` matches the param-filter ``filter_``.
See rfc4791-9.7.3.
"""
name = filter_.get("name").upper()
children = getattr(vobject_item, "%s_list" % parent_name, [])
condition = any(name in child.params for child in children)
if len(filter_) > 0:
if filter_[0].tag == xmlutils.make_clark("%s:text-match" % ns):
return condition and text_match(
vobject_item, filter_[0], parent_name, ns, name)
if filter_[0].tag == xmlutils.make_clark("%s:is-not-defined" % ns):
return not condition
return condition
def simplify_prefilters(filters, collection_tag="VCALENDAR"):
"""Creates a simplified condition from ``filters``.
Returns a tuple (``tag``, ``start``, ``end``, ``simple``) where ``tag`` is
a string or None (match all) and ``start`` and ``end`` are POSIX
timestamps (as int). ``simple`` is a bool that indicates that ``filters``
and the simplified condition are identical.
"""
flat_filters = tuple(chain.from_iterable(filters))
simple = len(flat_filters) <= 1
for col_filter in flat_filters:
if collection_tag != "VCALENDAR":
simple = False
break
if (col_filter.tag != xmlutils.make_clark("C:comp-filter") or
col_filter.get("name").upper() != "VCALENDAR"):
simple = False
continue
simple &= len(col_filter) <= 1
for comp_filter in col_filter:
if comp_filter.tag != xmlutils.make_clark("C:comp-filter"):
simple = False
continue
tag = comp_filter.get("name").upper()
if comp_filter.find(
xmlutils.make_clark("C:is-not-defined")) is not None:
simple = False
continue
simple &= len(comp_filter) <= 1
for time_filter in comp_filter:
if tag not in ("VTODO", "VEVENT", "VJOURNAL"):
simple = False
break
if time_filter.tag != xmlutils.make_clark("C:time-range"):
simple = False
continue
start = time_filter.get("start")
end = time_filter.get("end")
if start:
start = math.floor(datetime.strptime(
start, "%Y%m%dT%H%M%SZ").replace(
tzinfo=timezone.utc).timestamp())
else:
start = TIMESTAMP_MIN
if end:
end = math.ceil(datetime.strptime(
end, "%Y%m%dT%H%M%SZ").replace(
tzinfo=timezone.utc).timestamp())
else:
end = TIMESTAMP_MAX
return tag, start, end, simple
return tag, TIMESTAMP_MIN, TIMESTAMP_MAX, simple
return None, TIMESTAMP_MIN, TIMESTAMP_MAX, simple
|
from homeassistant.const import (
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
TEMPERATURE,
UNIT_NOT_RECOGNIZED_TEMPLATE,
)
def fahrenheit_to_celsius(fahrenheit: float, interval: bool = False) -> float:
"""Convert a temperature in Fahrenheit to Celsius."""
if interval:
return fahrenheit / 1.8
return (fahrenheit - 32.0) / 1.8
def celsius_to_fahrenheit(celsius: float, interval: bool = False) -> float:
"""Convert a temperature in Celsius to Fahrenheit."""
if interval:
return celsius * 1.8
return celsius * 1.8 + 32.0
def convert(
temperature: float, from_unit: str, to_unit: str, interval: bool = False
) -> float:
"""Convert a temperature from one unit to another."""
if from_unit not in (TEMP_CELSIUS, TEMP_FAHRENHEIT):
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(from_unit, TEMPERATURE))
if to_unit not in (TEMP_CELSIUS, TEMP_FAHRENHEIT):
raise ValueError(UNIT_NOT_RECOGNIZED_TEMPLATE.format(to_unit, TEMPERATURE))
if from_unit == to_unit:
return temperature
if from_unit == TEMP_CELSIUS:
return celsius_to_fahrenheit(temperature, interval)
return fahrenheit_to_celsius(temperature, interval)
|
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
import numpy as np
import unittest
from chainercv import functions
from tests.functions_tests.test_ps_roi_average_pooling_2d import _outsize
@testing.parameterize(*testing.product({
'sampling_ratio': [(np.int(1), np.int(2)), None, 1, 2, (None, 3), (1, 2)],
'spatial_scale': [np.float(0.6), np.int(1), 0.6, 1.0, 2.0],
'outsize': [(np.int(2), np.int(4), np.int(4)), (2, 4, 4), (4, 4), 4],
}))
class TestPSROIMaxAlign2D(unittest.TestCase):
def setUp(self):
self.N = 3
self.group_size = 2
self.out_c, self.out_h, self.out_w = _outsize(self.outsize)
if self.out_c is None:
self.out_c = 2
self.n_channels = self.group_size * self.group_size * self.out_c
self.x = np.arange(
self.N * self.n_channels * 10 * 12,
dtype=np.float32).reshape((self.N, self.n_channels, 10, 12))
np.random.shuffle(self.x)
self.x = 2 * self.x / self.x.size - 1
self.x = self.x.astype(np.float32)
self.rois = np.array(
[[0, 0, 7, 7],
[1, 0, 5, 12],
[0, 1, 10, 5],
[3, 3, 4, 4]],
dtype=np.float32
)
self.roi_indices = np.array([0, 2, 1, 0], dtype=np.int32)
self.n_roi = self.rois.shape[0]
self.out_h, self.out_w = 4, 4
self.gy = np.random.uniform(
-1, 1, (self.n_roi, self.out_c, self.out_h, self.out_w))
self.gy = self.gy.astype(np.float32)
self.check_backward_options = {'atol': 5e-4, 'rtol': 5e-3}
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.ps_roi_max_align_2d(
x, rois, roi_indices, self.outsize,
self.spatial_scale, self.group_size,
sampling_ratio=self.sampling_ratio)
self.assertEqual(y.data.dtype, np.float32)
y_data = cuda.to_cpu(y.data)
self.assertEqual(
(self.n_roi, self.out_c, self.out_h, self.out_w), y_data.shape)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
def check_backward(self, x_data, roi_data, roi_index_data, y_grad_data):
def f(x, rois, roi_indices):
y = functions.ps_roi_max_align_2d(
x, rois, roi_indices, self.outsize,
self.spatial_scale, self.group_size,
sampling_ratio=self.sampling_ratio)
xp = cuda.get_array_module(y)
y = F.where(
xp.isinf(y.array), xp.zeros(y.shape, dtype=y.dtype), y)
return y
gradient_check.check_backward(
f, (x_data, roi_data, roi_index_data), y_grad_data,
no_grads=[False, True, True], **self.check_backward_options)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.rois, self.roi_indices, self.gy)
@attr.gpu
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
def apply_backward(self, x_data, roi_data, roi_index_data, y_grad_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
y = functions.ps_roi_max_align_2d(
x, rois, roi_indices, self.outsize,
self.spatial_scale, self.group_size,
sampling_ratio=self.sampling_ratio)
x.cleargrad()
y.grad = y_grad_data
y.backward()
return x, y
@attr.gpu
@condition.retry(3)
def test_consistency_with_gpu(self):
x_cpu, y_cpu = self.apply_backward(
self.x, self.rois, self.roi_indices, self.gy)
x_gpu, y_gpu = self.apply_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices), cuda.to_gpu(self.gy))
testing.assert_allclose(y_cpu.data, y_gpu.data)
testing.assert_allclose(x_cpu.grad, x_gpu.grad)
@testing.parameterize(*testing.product({
'outsize': [(2, 4, 4), (4, 4), 4]
}))
class TestPSROIMaxAlign2DFailure(unittest.TestCase):
def setUp(self):
self.N = 3
self.group_size = 2
self.spatial_scale = 0.6
out_c, _, _ = _outsize(self.outsize)
if out_c is None:
self.n_channels = self.group_size * self.group_size * 2 - 1
else:
self.n_channels = self.group_size * self.group_size * (out_c + 1)
self.x = np.arange(
self.N * self.n_channels * 10 * 12,
dtype=np.float32).reshape((self.N, self.n_channels, 10, 12))
np.random.shuffle(self.x)
self.x = 2 * self.x / self.x.size - 1
self.x = self.x.astype(np.float32)
self.rois = np.array(
[[0, 0, 7, 7],
[1, 0, 5, 12],
[0, 1, 10, 5],
[3, 3, 4, 4]],
dtype=np.float32
)
self.roi_indices = np.array([0, 2, 1, 0], dtype=np.int32)
self.n_roi = self.rois.shape[0]
def check_forward(self, x_data, roi_data, roi_index_data):
x = chainer.Variable(x_data)
rois = chainer.Variable(roi_data)
roi_indices = chainer.Variable(roi_index_data)
functions.ps_roi_max_align_2d(
x, rois, roi_indices, self.outsize,
self.spatial_scale, self.group_size)
@condition.retry(3)
def test_invalid_outsize_cpu(self):
with self.assertRaises(ValueError):
self.check_forward(self.x, self.rois, self.roi_indices)
@attr.gpu
@condition.retry(3)
def test_invalid_outsize_gpu(self):
with self.assertRaises(ValueError):
self.check_forward(
cuda.to_gpu(self.x), cuda.to_gpu(self.rois),
cuda.to_gpu(self.roi_indices))
testing.run_module(__name__, __file__)
|
import argparse
import glob
import os
import time
import random
COLOURS = (b'\xFF\x00\x00', b'\x00\xFF\x00', b'\x00\x00\xFF', b'\xFF\xFF\x00', b'\xFF\x00\xFF', b'\x00\xFF\xFF')
def write_binary(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'wb') as open_file:
open_file.write(payload)
def read_string(driver_path, device_file):
with open(os.path.join(driver_path, device_file), 'r') as open_file:
return open_file.read().rstrip('\n')
def write_string(driver_path, device_file, payload):
with open(os.path.join(driver_path, device_file), 'w') as open_file:
open_file.write(payload)
def find_devices(vid, pid):
driver_paths = glob.glob(os.path.join('/sys/bus/hid/drivers/razeraccessory', '*:{0:04X}:{1:04X}.*'.format(vid, pid)))
for driver_path in driver_paths:
device_type_path = os.path.join(driver_path, 'device_type')
if os.path.exists(device_type_path):
yield driver_path
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('--skip-standard', action='store_true')
parser.add_argument('--skip-custom', action='store_true')
parser.add_argument('--skip-game-led', action='store_true')
parser.add_argument('--skip-macro-led', action='store_true')
return parser.parse_args()
if __name__ == '__main__':
args = parse_args()
found_chroma = False
for index, driver_path in enumerate(find_devices(0x1532, 0x0068), start=1):
found_chroma = True
print("Razer Firefly_hyperflux {0}\n".format(index))
print("Driver version: {0}".format(read_string(driver_path, 'version')))
print("Driver firmware version: {0}".format(read_string(driver_path, 'firmware_version')))
print("Device serial: {0}".format(read_string(driver_path, 'device_serial')))
print("Device type: {0}".format(read_string(driver_path, 'device_type')))
print("Device mode: {0}".format(read_string(driver_path, 'device_mode')))
# Set to static red so that we have something standard
write_binary(driver_path, 'matrix_effect_static', b'\xFF\x00\x00')
if not args.skip_standard:
print("Starting brightness test. Press enter to begin.")
input()
print("Max brightness...", end='')
write_string(driver_path, 'matrix_brightness', '255')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Half brightness...", end='')
write_string(driver_path, 'matrix_brightness', '128')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
print("Zero brightness...", end='')
write_string(driver_path, 'matrix_brightness', '0')
time.sleep(1)
print("brightness ({0})".format(read_string(driver_path, 'matrix_brightness')))
time.sleep(1)
write_string(driver_path, 'matrix_brightness', '255')
print("Starting other colour effect tests. Press enter to begin.")
input()
print("Green Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\x00')
time.sleep(5)
print("Cyan Static")
write_binary(driver_path, 'matrix_effect_static', b'\x00\xFF\xFF')
time.sleep(5)
print("Spectrum")
write_binary(driver_path, 'matrix_effect_spectrum', b'\x00')
time.sleep(10)
print("None")
write_binary(driver_path, 'matrix_effect_none', b'\x00')
time.sleep(5)
print("Wave Left")
write_string(driver_path, 'matrix_effect_wave', '1')
time.sleep(5)
print("Wave Right")
write_string(driver_path, 'matrix_effect_wave', '2')
time.sleep(5)
print("Breathing random")
write_binary(driver_path, 'matrix_effect_breath', b'\x00')
time.sleep(10)
print("Breathing red")
write_binary(driver_path, 'matrix_effect_breath', b'\xFF\x00\x00')
time.sleep(10)
print("Breathing blue-green")
write_binary(driver_path, 'matrix_effect_breath', b'\x00\xFF\x00\x00\x00\xFF')
time.sleep(10)
if not args.skip_custom:
# Custom LEDs all rows
payload_all = b'\x00\x00\x0E'
for i in range(0, 15): # 15 colours 0x00-0x0E
payload_all += random.choice(COLOURS)
payload_m1_5 = b''
for led in (0x00, 0x0E):
led_byte = led.to_bytes(1, byteorder='big')
payload_m1_5 += b'\x00' + led_byte + led_byte + b'\xFF\xFF\xFF'
print("Custom LED matrix colours test. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_all)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
print("Custom LED matrix partial colours test. First and last led to white. Press enter to begin.")
input()
write_binary(driver_path, 'matrix_custom_frame', payload_m1_5)
write_binary(driver_path, 'matrix_effect_custom', b'\x00')
time.sleep(0.5)
print("Finished")
if not found_chroma:
print("No Fireflies found")
|
from pybotvac.exceptions import NeatoLoginException, NeatoRobotException
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.neato import config_flow
from homeassistant.components.neato.const import CONF_VENDOR, NEATO_DOMAIN
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
from tests.async_mock import patch
from tests.common import MockConfigEntry
USERNAME = "myUsername"
PASSWORD = "myPassword"
VENDOR_NEATO = "neato"
VENDOR_VORWERK = "vorwerk"
VENDOR_INVALID = "invalid"
@pytest.fixture(name="account")
def mock_controller_login():
"""Mock a successful login."""
with patch("homeassistant.components.neato.config_flow.Account", return_value=True):
yield
def init_config_flow(hass):
"""Init a configuration flow."""
flow = config_flow.NeatoConfigFlow()
flow.hass = hass
return flow
async def test_user(hass, account):
"""Test user config."""
flow = init_config_flow(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
result = await flow.async_step_user(
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD, CONF_VENDOR: VENDOR_NEATO}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_VENDOR] == VENDOR_NEATO
result = await flow.async_step_user(
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD, CONF_VENDOR: VENDOR_VORWERK}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == USERNAME
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_VENDOR] == VENDOR_VORWERK
async def test_import(hass, account):
"""Test import step."""
flow = init_config_flow(hass)
result = await flow.async_step_import(
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD, CONF_VENDOR: VENDOR_NEATO}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == f"{USERNAME} (from configuration)"
assert result["data"][CONF_USERNAME] == USERNAME
assert result["data"][CONF_PASSWORD] == PASSWORD
assert result["data"][CONF_VENDOR] == VENDOR_NEATO
async def test_abort_if_already_setup(hass, account):
"""Test we abort if Neato is already setup."""
flow = init_config_flow(hass)
MockConfigEntry(
domain=NEATO_DOMAIN,
data={
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_VENDOR: VENDOR_NEATO,
},
).add_to_hass(hass)
# Should fail, same USERNAME (import)
result = await flow.async_step_import(
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD, CONF_VENDOR: VENDOR_NEATO}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
# Should fail, same USERNAME (flow)
result = await flow.async_step_user(
{CONF_USERNAME: USERNAME, CONF_PASSWORD: PASSWORD, CONF_VENDOR: VENDOR_NEATO}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_abort_on_invalid_credentials(hass):
"""Test when we have invalid credentials."""
flow = init_config_flow(hass)
with patch(
"homeassistant.components.neato.config_flow.Account",
side_effect=NeatoLoginException(),
):
result = await flow.async_step_user(
{
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_VENDOR: VENDOR_NEATO,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "invalid_auth"}
result = await flow.async_step_import(
{
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_VENDOR: VENDOR_NEATO,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "invalid_auth"
async def test_abort_on_unexpected_error(hass):
"""Test when we have an unexpected error."""
flow = init_config_flow(hass)
with patch(
"homeassistant.components.neato.config_flow.Account",
side_effect=NeatoRobotException(),
):
result = await flow.async_step_user(
{
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_VENDOR: VENDOR_NEATO,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "unknown"}
result = await flow.async_step_import(
{
CONF_USERNAME: USERNAME,
CONF_PASSWORD: PASSWORD,
CONF_VENDOR: VENDOR_NEATO,
}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "unknown"
|
import numpy as np
import pandas as pd
from . import parameterized
class MultiIndexSeries:
def setup(self, dtype, subset):
data = np.random.rand(100000).astype(dtype)
index = pd.MultiIndex.from_product(
[
list("abcdefhijk"),
list("abcdefhijk"),
pd.date_range(start="2000-01-01", periods=1000, freq="B"),
]
)
series = pd.Series(data, index)
if subset:
series = series[::3]
self.series = series
@parameterized(["dtype", "subset"], ([int, float], [True, False]))
def time_to_xarray(self, dtype, subset):
self.series.to_xarray()
|
from __future__ import print_function
import argparse
import locale
import os
import re
import sys
from distutils.version import StrictVersion
from socket import timeout as SocketTimeout
from six.moves import input
import paramiko
__version__ = '0.8.0'
def install_module_from_github(username, package_name, version):
"""
Install python module from github zip files
"""
cmd_string = """
echo Installing {1} {2} ...
wget https://github.com/{0}/{1}/archive/{2}.zip -o $TMPDIR/{1}.zip
mkdir $TMPDIR/{1}_src
unzip $TMPDIR/{1}.zip -d $TMPDIR/{1}_src
rm -f $TMPDIR/{1}.zip
mv $TMPDIR/{1}_src/{1} $STASH_ROOT/lib/
rm -rf $TMPDIR/{1}_src
echo Done
""".format(username,
package_name,
version)
globals()['_stash'](cmd_string)
if StrictVersion(paramiko.__version__) < StrictVersion('1.15'):
# Install paramiko 1.16.0 to fix a bug with version < 1.15
install_module_from_github('paramiko', 'paramiko', 'v1.16.0')
print('Please restart Pythonista for changes to take full effect')
sys.exit(0)
DEBUG = False
APP_DIR = os.environ['STASH_ROOT']
# this is quote from the shlex module, added in py3.3
_find_unsafe = re.compile(br'[^\w@%+=:,./~-]').search
def _sh_quote(s):
"""Return a shell-escaped version of the string `s`."""
if not s:
return b""
if _find_unsafe(s) is None:
return s
# use single quotes, and put single quotes into double quotes
# the string $'b is then quoted as '$'"'"'b'
return b"'" + s.replace(b"'", b"'\"'\"'") + b"'"
# Unicode conversion functions; assume UTF-8
def asbytes(s):
"""Turns unicode into bytes, if needed.
Assumes UTF-8.
"""
if isinstance(s, bytes):
return s
else:
return s.encode('utf-8')
def asunicode(s):
"""Turns bytes into unicode, if needed.
Uses UTF-8.
"""
if isinstance(s, bytes):
return s.decode('utf-8', 'replace')
else:
return s
# os.path.sep is unicode on Python 3, no matter the platform
bytes_sep = asbytes(os.path.sep)
# Unicode conversion function for Windows
# Used to convert local paths if the local machine is Windows
def asunicode_win(s):
"""Turns bytes into unicode, if needed.
"""
if isinstance(s, bytes):
return s.decode(locale.getpreferredencoding())
else:
return s
class SCPClient(object):
"""
An scp1 implementation, compatible with openssh scp.
Raises SCPException for all transport related errors. Local filesystem
and OS errors pass through.
Main public methods are .put and .get
The get method is controlled by the remote scp instance, and behaves
accordingly. This means that symlinks are resolved, and the transfer is
halted after too many levels of symlinks are detected.
The put method uses os.walk for recursion, and sends files accordingly.
Since scp doesn't support symlinks, we send file symlinks as the file
(matching scp behaviour), but we make no attempt at symlinked directories.
"""
def __init__(self, transport, buff_size=16384, socket_timeout=5.0, progress=None, sanitize=_sh_quote):
"""
Create an scp1 client.
@param transport: an existing paramiko L{Transport}
@type transport: L{Transport}
@param buff_size: size of the scp send buffer.
@type buff_size: int
@param socket_timeout: channel socket timeout in seconds
@type socket_timeout: float
@param progress: callback - called with (filename, size, sent) during
transfers
@param sanitize: function - called with filename, should return
safe or escaped string. Uses _sh_quote by default.
@type progress: function(string, int, int)
"""
self.transport = transport
self.buff_size = buff_size
self.socket_timeout = socket_timeout
self.channel = None
self.preserve_times = False
self._progress = progress
self._recv_dir = b''
self._rename = False
self._utime = None
self.sanitize = sanitize
self._dirtimes = {}
def put(self, files, remote_path=b'.', recursive=False, preserve_times=False):
"""
Transfer files to remote host.
@param files: A single path, or a list of paths to be transfered.
recursive must be True to transfer directories.
@type files: string OR list of strings
@param remote_path: path in which to receive the files on the remote
host. defaults to '.'
@type remote_path: str
@param recursive: transfer files and directories recursively
@type recursive: bool
@param preserve_times: preserve mtime and atime of transfered files
and directories.
@type preserve_times: bool
"""
self.preserve_times = preserve_times
self.channel = self.transport.open_session()
self.channel.settimeout(self.socket_timeout)
scp_command = (b'scp -t ', b'scp -r -t ')[recursive]
self.channel.exec_command(scp_command + self.sanitize(asbytes(remote_path)))
self._recv_confirm()
if not isinstance(files, (list, tuple)):
files = [files]
if recursive:
self._send_recursive(files)
else:
self._send_files(files)
if self.channel:
self.channel.close()
def get(self, remote_path, local_path='', recursive=False, preserve_times=False):
"""
Transfer files from remote host to localhost
@param remote_path: path to retreive from remote host. since this is
evaluated by scp on the remote host, shell wildcards and
environment variables may be used.
@type remote_path: str
@param local_path: path in which to receive files locally
@type local_path: str
@param recursive: transfer files and directories recursively
@type recursive: bool
@param preserve_times: preserve mtime and atime of transfered files
and directories.
@type preserve_times: bool
"""
if not isinstance(remote_path, (list, tuple)):
remote_path = [remote_path]
remote_path = [self.sanitize(asbytes(r)) for r in remote_path]
self._recv_dir = local_path or os.getcwd()
self._rename = (len(remote_path) == 1 and not os.path.isdir(os.path.abspath(local_path)))
if len(remote_path) > 1:
if not os.path.exists(self._recv_dir):
raise SCPException("Local path '%s' does not exist" % asunicode(self._recv_dir))
elif not os.path.isdir(self._recv_dir):
raise SCPException("Local path '%s' is not a directory" % asunicode(self._recv_dir))
rcsv = (b'', b' -r')[recursive]
prsv = (b'', b' -p')[preserve_times]
self.channel = self.transport.open_session()
self.channel.settimeout(self.socket_timeout)
self.channel.exec_command(b"scp" + rcsv + prsv + b" -f " + b' '.join(remote_path))
self._recv_all()
if self.channel:
self.channel.close()
def _read_stats(self, name):
"""return just the file stats needed for scp"""
stats = os.stat(name)
mode = oct(stats.st_mode)[-4:]
size = stats.st_size
atime = int(stats.st_atime)
mtime = int(stats.st_mtime)
return (mode, size, mtime, atime)
def _send_files(self, files):
for name in files:
basename = asbytes(os.path.basename(name))
(mode, size, mtime, atime) = self._read_stats(name)
if self.preserve_times:
self._send_time(mtime, atime)
file_hdl = open(name, 'rb')
# The protocol can't handle \n in the filename.
# Quote them as the control sequence \^J for now,
# which is how openssh handles it.
self.channel.sendall(("C%s %d " % (mode, size)).encode('ascii') + basename.replace(b'\n', b'\\^J') + b"\n")
self._recv_confirm()
file_pos = 0
if self._progress:
if size == 0:
# avoid divide-by-zero
self._progress(basename, 1, 1)
else:
self._progress(basename, size, 0)
buff_size = self.buff_size
chan = self.channel
while file_pos < size:
chan.sendall(file_hdl.read(buff_size))
file_pos = file_hdl.tell()
if self._progress:
self._progress(basename, size, file_pos)
chan.sendall('\x00')
file_hdl.close()
self._recv_confirm()
def _chdir(self, from_dir, to_dir):
# Pop until we're one level up from our next push.
# Push *once* into to_dir.
# This is dependent on the depth-first traversal from os.walk
# add path.sep to each when checking the prefix, so we can use
# path.dirname after
common = os.path.commonprefix([from_dir + bytes_sep, to_dir + bytes_sep])
# now take the dirname, since commonprefix is character based,
# and we either have a seperator, or a partial name
common = os.path.dirname(common)
cur_dir = from_dir.rstrip(bytes_sep)
while cur_dir != common:
cur_dir = os.path.split(cur_dir)[0]
self._send_popd()
# now we're in our common base directory, so on
self._send_pushd(to_dir)
def _send_recursive(self, files):
for base in files:
if not os.path.isdir(base):
# filename mixed into the bunch
self._send_files([base])
continue
last_dir = asbytes(base)
for root, dirs, fls in os.walk(base):
self._chdir(last_dir, asbytes(root))
self._send_files([os.path.join(root, f) for f in fls])
last_dir = asbytes(root)
# back out of the directory
for i in range(len(os.path.split(last_dir))):
self._send_popd()
def _send_pushd(self, directory):
(mode, size, mtime, atime) = self._read_stats(directory)
basename = asbytes(os.path.basename(directory))
if self.preserve_times:
self._send_time(mtime, atime)
self.channel.sendall(('D%s 0 ' % mode).encode('ascii') + basename.replace(b'\n', b'\\^J') + b'\n')
self._recv_confirm()
def _send_popd(self):
self.channel.sendall('E\n')
self._recv_confirm()
def _send_time(self, mtime, atime):
self.channel.sendall(('T%d 0 %d 0\n' % (mtime, atime)).encode('ascii'))
self._recv_confirm()
def _recv_confirm(self):
# read scp response
msg = b''
try:
msg = self.channel.recv(512)
except SocketTimeout:
raise SCPException('Timout waiting for scp response')
# slice off the first byte, so this compare will work in python2 and python3
if msg and msg[0:1] == b'\x00':
return
elif msg and msg[0:1] == b'\x01':
raise SCPException(asunicode(msg[1:]))
elif self.channel.recv_stderr_ready():
msg = self.channel.recv_stderr(512)
raise SCPException(asunicode(msg))
elif not msg:
raise SCPException('No response from server')
else:
raise SCPException('Invalid response from server', msg)
def _recv_all(self):
# loop over scp commands, and receive as necessary
command = {b'C': self._recv_file, b'T': self._set_time, b'D': self._recv_pushd, b'E': self._recv_popd}
while not self.channel.closed:
# wait for command as long as we're open
self.channel.sendall('\x00')
msg = self.channel.recv(1024)
if not msg: # chan closed while recving
break
assert msg[-1:] == b'\n'
msg = msg[:-1]
code = msg[0:1]
try:
command[code](msg[1:])
except KeyError:
raise SCPException(str(msg).strip())
# directory times can't be set until we're done writing files
self._set_dirtimes()
def _set_time(self, cmd):
try:
times = cmd.split(b' ')
mtime = int(times[0])
atime = int(times[2]) or mtime
except:
self.channel.send(b'\x01')
raise SCPException('Bad time format')
# save for later
self._utime = (atime, mtime)
def _recv_file(self, cmd):
chan = self.channel
parts = cmd.strip().split(b' ', 2)
try:
mode = int(parts[0], 8)
size = int(parts[1])
if self._rename:
path = self._recv_dir
self._rename = False
elif os.name == 'nt':
path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8'))
else:
path = os.path.join(asbytes(self._recv_dir), parts[2])
except:
chan.send('\x01')
chan.close()
raise SCPException('Bad file format')
try:
file_hdl = open(path, 'wb')
except IOError as e:
chan.send(b'\x01' + str(e).encode('utf-8'))
chan.close()
raise
if self._progress:
if size == 0:
# avoid divide-by-zero
self._progress(path, 1, 1)
else:
self._progress(path, size, 0)
buff_size = self.buff_size
pos = 0
chan.send(b'\x00')
try:
while pos < size:
# we have to make sure we don't read the final byte
if size - pos <= buff_size:
buff_size = size - pos
file_hdl.write(chan.recv(buff_size))
pos = file_hdl.tell()
if self._progress:
self._progress(path, size, pos)
msg = chan.recv(512)
if msg and msg[0:1] != b'\x00':
raise SCPException(msg[1:])
except SocketTimeout:
chan.close()
raise SCPException('Error receiving, socket.timeout')
file_hdl.truncate()
try:
os.utime(path, self._utime)
self._utime = None
os.chmod(path, mode)
# should we notify the other end?
finally:
file_hdl.close()
# '\x00' confirmation sent in _recv_all
def _recv_pushd(self, cmd):
parts = cmd.split(b' ', 2)
try:
mode = int(parts[0], 8)
if self._rename:
path = self._recv_dir
self._rename = False
elif os.name == 'nt':
path = os.path.join(asunicode_win(self._recv_dir), parts[2].decode('utf-8'))
else:
path = os.path.join(asbytes(self._recv_dir), parts[2])
except:
self.channel.send(b'\x01')
raise SCPException('Bad directory format')
try:
if not os.path.exists(path):
os.mkdir(path, mode)
elif os.path.isdir(path):
os.chmod(path, mode)
else:
raise SCPException('%s: Not a directory' % path)
self._dirtimes[path] = (self._utime)
self._utime = None
self._recv_dir = path
except (OSError, SCPException) as e:
self.channel.send(b'\x01' + asbytes(str(e)))
raise
def _recv_popd(self, *cmd):
self._recv_dir = os.path.split(self._recv_dir)[0]
def _set_dirtimes(self):
try:
for d in self._dirtimes:
os.utime(d, self._dirtimes[d])
finally:
self._dirtimes = {}
class SCPException(Exception):
"""SCP exception class"""
pass
############################################
def find_ssh_keys():
#dir = os.path.expanduser('~/Documents/.ssh/')
files = []
try:
for file in os.listdir(APP_DIR + '/.ssh'):
if '.' not in file:
files.append(APP_DIR + '/.ssh/' + file)
except OSError:
pass
return files
def parse_host(arg):
user, temp = arg.split('@')
host, path = temp.split(':')
return host, user, path
def scp_callback(filename, size, sent):
if size == sent:
print(filename)
if __name__ == '__main__':
files = []
ap = argparse.ArgumentParser()
ap.add_argument('--password', help='login password')
ap.add_argument('-p', '--port', action='store', default=22, type=int, help='port for ssh default: 22')
ap.add_argument('files', nargs='*', help='file or module name')
args = ap.parse_args()
#scp_mode 0 put 1 get
if '@' in args.files[0]:
scp_mode = 1
else:
scp_mode = 0
for file in args.files:
if '@' in file:
host, user, host_path = parse_host(file)
else:
files.append(file)
ssh = paramiko.SSHClient()
#ssh.load_system_host_keys()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
key_filename = find_ssh_keys()
if args.password is not None:
ssh.connect(host, username=user, password=args.password, port=args.port)
else:
if len(key_filename) == 0: # no key file found
password = input('Enter passsword:')
ssh.connect(host, username=user, password=password, port=args.port)
else:
ssh.connect(host, username=user, key_filename=key_filename, port=args.port)
# SCPCLient takes a paramiko transport as its only argument
scp = SCPClient(ssh.get_transport(), progress=scp_callback)
#scp.put('stash',remote_path='stash/',recursive=True)
if scp_mode:
print('Copying from server...')
scp.get(host_path, local_path=files[0], recursive=True)
else:
print('Copying to server...')
scp.put(files, recursive=True, remote_path=host_path)
ssh.close()
|
import asyncio
import logging
from pathlib import Path
import lavalink
from redbot.core import data_manager
from redbot.core.i18n import Translator
from ...errors import LavalinkDownloadFailed
from ...manager import ServerManager
from ..abc import MixinMeta
from ..cog_utils import CompositeMetaClass
log = logging.getLogger("red.cogs.Audio.cog.Tasks.lavalink")
_ = Translator("Audio", Path(__file__))
class LavalinkTasks(MixinMeta, metaclass=CompositeMetaClass):
def lavalink_restart_connect(self) -> None:
lavalink.unregister_event_listener(self.lavalink_event_handler)
lavalink.unregister_update_listener(self.lavalink_update_handler)
if self.lavalink_connect_task:
self.lavalink_connect_task.cancel()
if self._restore_task:
self._restore_task.cancel()
self._restore_task = None
lavalink.register_event_listener(self.lavalink_event_handler)
lavalink.register_update_listener(self.lavalink_update_handler)
self.lavalink_connect_task = self.bot.loop.create_task(self.lavalink_attempt_connect())
async def lavalink_attempt_connect(self, timeout: int = 50) -> None:
self.lavalink_connection_aborted = False
max_retries = 5
retry_count = 0
while retry_count < max_retries:
configs = await self.config.all()
external = configs["use_external_lavalink"]
java_exec = configs["java_exc_path"]
if external is False:
settings = self._default_lavalink_settings
host = settings["host"]
password = settings["password"]
ws_port = settings["ws_port"]
if self.player_manager is not None:
await self.player_manager.shutdown()
self.player_manager = ServerManager()
try:
await self.player_manager.start(java_exec)
except LavalinkDownloadFailed as exc:
await asyncio.sleep(1)
if exc.should_retry:
log.exception(
"Exception whilst starting internal Lavalink server, retrying...",
exc_info=exc,
)
retry_count += 1
continue
else:
log.exception(
"Fatal exception whilst starting internal Lavalink server, "
"aborting...",
exc_info=exc,
)
self.lavalink_connection_aborted = True
raise
except asyncio.CancelledError:
log.exception("Invalid machine architecture, cannot run Lavalink.")
raise
except Exception as exc:
log.exception(
"Unhandled exception whilst starting internal Lavalink server, "
"aborting...",
exc_info=exc,
)
self.lavalink_connection_aborted = True
raise
else:
break
else:
host = configs["host"]
password = configs["password"]
ws_port = configs["ws_port"]
break
else:
log.critical(
"Setting up the Lavalink server failed after multiple attempts. "
"See above tracebacks for details."
)
self.lavalink_connection_aborted = True
return
retry_count = 0
while retry_count < max_retries:
if lavalink.node._nodes:
await lavalink.node.disconnect()
try:
await lavalink.initialize(
bot=self.bot,
host=host,
password=password,
rest_port=ws_port,
ws_port=ws_port,
timeout=timeout,
resume_key=f"Red-Core-Audio-{self.bot.user.id}-{data_manager.instance_name}",
)
except asyncio.TimeoutError:
log.error("Connecting to Lavalink server timed out, retrying...")
if external is False and self.player_manager is not None:
await self.player_manager.shutdown()
retry_count += 1
await asyncio.sleep(1) # prevent busylooping
except Exception as exc:
log.exception(
"Unhandled exception whilst connecting to Lavalink, aborting...", exc_info=exc
)
self.lavalink_connection_aborted = True
raise
else:
break
else:
self.lavalink_connection_aborted = True
log.critical(
"Connecting to the Lavalink server failed after multiple attempts. "
"See above tracebacks for details."
)
return
self._restore_task = asyncio.create_task(self.restore_players())
|
from homeassistant.util import dt as dt_util
from homeassistant.util.decorator import Registry
from .models import Event
PARSERS = Registry()
@PARSERS.register("tns1:VideoSource/MotionAlarm")
# pylint: disable=protected-access
async def async_parse_motion_alarm(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:VideoSource/MotionAlarm
"""
try:
source = msg.Message._value_1.Source.SimpleItem[0].Value
return Event(
f"{uid}_{msg.Topic._value_1}_{source}",
f"{source} Motion Alarm",
"binary_sensor",
"motion",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:VideoSource/ImageTooBlurry/AnalyticsService")
@PARSERS.register("tns1:VideoSource/ImageTooBlurry/ImagingService")
@PARSERS.register("tns1:VideoSource/ImageTooBlurry/RecordingService")
# pylint: disable=protected-access
async def async_parse_image_too_blurry(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:VideoSource/ImageTooBlurry/*
"""
try:
source = msg.Message._value_1.Source.SimpleItem[0].Value
return Event(
f"{uid}_{msg.Topic._value_1}_{source}",
f"{source} Image Too Blurry",
"binary_sensor",
"problem",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:VideoSource/ImageTooDark/AnalyticsService")
@PARSERS.register("tns1:VideoSource/ImageTooDark/ImagingService")
@PARSERS.register("tns1:VideoSource/ImageTooDark/RecordingService")
# pylint: disable=protected-access
async def async_parse_image_too_dark(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:VideoSource/ImageTooDark/*
"""
try:
source = msg.Message._value_1.Source.SimpleItem[0].Value
return Event(
f"{uid}_{msg.Topic._value_1}_{source}",
f"{source} Image Too Dark",
"binary_sensor",
"problem",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:VideoSource/ImageTooBright/AnalyticsService")
@PARSERS.register("tns1:VideoSource/ImageTooBright/ImagingService")
@PARSERS.register("tns1:VideoSource/ImageTooBright/RecordingService")
# pylint: disable=protected-access
async def async_parse_image_too_bright(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:VideoSource/ImageTooBright/*
"""
try:
source = msg.Message._value_1.Source.SimpleItem[0].Value
return Event(
f"{uid}_{msg.Topic._value_1}_{source}",
f"{source} Image Too Bright",
"binary_sensor",
"problem",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:VideoSource/GlobalSceneChange/AnalyticsService")
@PARSERS.register("tns1:VideoSource/GlobalSceneChange/ImagingService")
@PARSERS.register("tns1:VideoSource/GlobalSceneChange/RecordingService")
# pylint: disable=protected-access
async def async_parse_scene_change(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:VideoSource/GlobalSceneChange/*
"""
try:
source = msg.Message._value_1.Source.SimpleItem[0].Value
return Event(
f"{uid}_{msg.Topic._value_1}_{source}",
f"{source} Global Scene Change",
"binary_sensor",
"problem",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:AudioAnalytics/Audio/DetectedSound")
# pylint: disable=protected-access
async def async_parse_detected_sound(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:AudioAnalytics/Audio/DetectedSound
"""
try:
audio_source = ""
audio_analytics = ""
rule = ""
for source in msg.Message._value_1.Source.SimpleItem:
if source.Name == "AudioSourceConfigurationToken":
audio_source = source.Value
if source.Name == "AudioAnalyticsConfigurationToken":
audio_analytics = source.Value
if source.Name == "Rule":
rule = source.Value
return Event(
f"{uid}_{msg.Topic._value_1}_{audio_source}_{audio_analytics}_{rule}",
f"{rule} Detected Sound",
"binary_sensor",
"sound",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:RuleEngine/FieldDetector/ObjectsInside")
# pylint: disable=protected-access
async def async_parse_field_detector(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:RuleEngine/FieldDetector/ObjectsInside
"""
try:
video_source = ""
video_analytics = ""
rule = ""
for source in msg.Message._value_1.Source.SimpleItem:
if source.Name == "VideoSourceConfigurationToken":
video_source = source.Value
if source.Name == "VideoAnalyticsConfigurationToken":
video_analytics = source.Value
if source.Name == "Rule":
rule = source.Value
evt = Event(
f"{uid}_{msg.Topic._value_1}_{video_source}_{video_analytics}_{rule}",
f"{rule} Field Detection",
"binary_sensor",
"motion",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
return evt
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:RuleEngine/CellMotionDetector/Motion")
# pylint: disable=protected-access
async def async_parse_cell_motion_detector(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:RuleEngine/CellMotionDetector/Motion
"""
try:
video_source = ""
video_analytics = ""
rule = ""
for source in msg.Message._value_1.Source.SimpleItem:
if source.Name == "VideoSourceConfigurationToken":
video_source = source.Value
if source.Name == "VideoAnalyticsConfigurationToken":
video_analytics = source.Value
if source.Name == "Rule":
rule = source.Value
return Event(
f"{uid}_{msg.Topic._value_1}_{video_source}_{video_analytics}_{rule}",
f"{rule} Cell Motion Detection",
"binary_sensor",
"motion",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:RuleEngine/MotionRegionDetector/Motion")
# pylint: disable=protected-access
async def async_parse_motion_region_detector(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:RuleEngine/MotionRegionDetector/Motion
"""
try:
video_source = ""
video_analytics = ""
rule = ""
for source in msg.Message._value_1.Source.SimpleItem:
if source.Name == "VideoSourceConfigurationToken":
video_source = source.Value
if source.Name == "VideoAnalyticsConfigurationToken":
video_analytics = source.Value
if source.Name == "Rule":
rule = source.Value
return Event(
f"{uid}_{msg.Topic._value_1}_{video_source}_{video_analytics}_{rule}",
f"{rule} Motion Region Detection",
"binary_sensor",
"motion",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:RuleEngine/TamperDetector/Tamper")
# pylint: disable=protected-access
async def async_parse_tamper_detector(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:RuleEngine/TamperDetector/Tamper
"""
try:
video_source = ""
video_analytics = ""
rule = ""
for source in msg.Message._value_1.Source.SimpleItem:
if source.Name == "VideoSourceConfigurationToken":
video_source = source.Value
if source.Name == "VideoAnalyticsConfigurationToken":
video_analytics = source.Value
if source.Name == "Rule":
rule = source.Value
return Event(
f"{uid}_{msg.Topic._value_1}_{video_source}_{video_analytics}_{rule}",
f"{rule} Tamper Detection",
"binary_sensor",
"problem",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:Device/HardwareFailure/StorageFailure")
# pylint: disable=protected-access
async def async_parse_storage_failure(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:Device/HardwareFailure/StorageFailure
"""
try:
source = msg.Message._value_1.Source.SimpleItem[0].Value
return Event(
f"{uid}_{msg.Topic._value_1}_{source}",
"Storage Failure",
"binary_sensor",
"problem",
None,
msg.Message._value_1.Data.SimpleItem[0].Value == "true",
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:Monitoring/ProcessorUsage")
# pylint: disable=protected-access
async def async_parse_processor_usage(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:Monitoring/ProcessorUsage
"""
try:
usage = float(msg.Message._value_1.Data.SimpleItem[0].Value)
if usage <= 1:
usage *= 100
return Event(
f"{uid}_{msg.Topic._value_1}",
"Processor Usage",
"sensor",
None,
"percent",
int(usage),
)
except (AttributeError, KeyError):
return None
@PARSERS.register("tns1:Monitoring/OperatingTime/LastReboot")
# pylint: disable=protected-access
async def async_parse_last_reboot(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:Monitoring/OperatingTime/LastReboot
"""
try:
return Event(
f"{uid}_{msg.Topic._value_1}",
"Last Reboot",
"sensor",
"timestamp",
None,
dt_util.as_local(
dt_util.parse_datetime(msg.Message._value_1.Data.SimpleItem[0].Value)
),
)
except (AttributeError, KeyError, ValueError):
return None
@PARSERS.register("tns1:Monitoring/OperatingTime/LastReset")
# pylint: disable=protected-access
async def async_parse_last_reset(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:Monitoring/OperatingTime/LastReset
"""
try:
return Event(
f"{uid}_{msg.Topic._value_1}",
"Last Reset",
"sensor",
"timestamp",
None,
dt_util.as_local(
dt_util.parse_datetime(msg.Message._value_1.Data.SimpleItem[0].Value)
),
entity_enabled=False,
)
except (AttributeError, KeyError, ValueError):
return None
@PARSERS.register("tns1:Monitoring/OperatingTime/LastClockSynchronization")
# pylint: disable=protected-access
async def async_parse_last_clock_sync(uid: str, msg) -> Event:
"""Handle parsing event message.
Topic: tns1:Monitoring/OperatingTime/LastClockSynchronization
"""
try:
return Event(
f"{uid}_{msg.Topic._value_1}",
"Last Clock Synchronization",
"sensor",
"timestamp",
None,
dt_util.as_local(
dt_util.parse_datetime(msg.Message._value_1.Data.SimpleItem[0].Value)
),
entity_enabled=False,
)
except (AttributeError, KeyError, ValueError):
return None
|
from collections import namedtuple
from pathlib import Path
import json
import subprocess as sp
import shutil
import pytest
from redbot.cogs.downloader.repo_manager import RepoManager, Repo, ProcessFormatter
from redbot.cogs.downloader.installable import Installable, InstalledModule
__all__ = [
"repo_manager",
"repo",
"bot_repo",
"INFO_JSON",
"LIBRARY_INFO_JSON",
"installable",
"installed_cog",
"library_installable",
"fake_run_noprint",
"fake_current_commit",
"_session_git_repo",
"git_repo",
"cloned_git_repo",
"git_repo_with_remote",
]
async def fake_run_noprint(*args, **kwargs):
fake_result_tuple = namedtuple("fake_result", "returncode result")
res = fake_result_tuple(0, (args, kwargs))
return res
async def fake_current_commit(*args, **kwargs):
return "fake_result"
@pytest.fixture
def repo_manager(tmpdir_factory):
rm = RepoManager()
# rm.repos_folder = Path(str(tmpdir_factory.getbasetemp())) / 'repos'
return rm
@pytest.fixture
def repo(tmp_path):
repo_folder = tmp_path / "repos" / "squid"
repo_folder.mkdir(parents=True, exist_ok=True)
return Repo(
url="https://github.com/tekulvw/Squid-Plugins",
name="squid",
branch="rewrite_cogs",
commit="6acb5decbb717932e5dc0cda7fca0eff452c47dd",
folder_path=repo_folder,
)
@pytest.fixture
def bot_repo(event_loop):
cwd = Path.cwd()
return Repo(
name="Red-DiscordBot",
branch="WRONG",
commit="",
url="https://empty.com/something.git",
folder_path=cwd,
)
# Installable
INFO_JSON = {
"author": ("tekulvw",),
"min_bot_version": "3.0.0",
"max_bot_version": "3.0.2",
"description": "A long description",
"hidden": False,
"install_msg": "A post-installation message",
"required_cogs": {},
"requirements": ("tabulate",),
"short": "A short description",
"tags": ("tag1", "tag2"),
"type": "COG",
}
LIBRARY_INFO_JSON = {
"author": ("seputaes",),
"min_bot_version": "3.0.0",
"max_bot_version": "3.0.2",
"description": "A long library description",
"hidden": False, # libraries are always hidden, this tests it will be flipped
"install_msg": "A library install message",
"required_cogs": {},
"requirements": ("tabulate",),
"short": "A short library description",
"tags": ("libtag1", "libtag2"),
"type": "SHARED_LIBRARY",
}
@pytest.fixture
def installable(tmpdir):
cog_path = tmpdir.mkdir("test_repo").mkdir("test_cog")
info_path = cog_path.join("info.json")
info_path.write_text(json.dumps(INFO_JSON), "utf-8")
cog_info = Installable(Path(str(cog_path)))
return cog_info
@pytest.fixture
def installed_cog(tmpdir):
cog_path = tmpdir.mkdir("test_repo").mkdir("test_installed_cog")
info_path = cog_path.join("info.json")
info_path.write_text(json.dumps(INFO_JSON), "utf-8")
cog_info = InstalledModule(Path(str(cog_path)))
return cog_info
@pytest.fixture
def library_installable(tmpdir):
lib_path = tmpdir.mkdir("test_repo").mkdir("test_lib")
info_path = lib_path.join("info.json")
info_path.write_text(json.dumps(LIBRARY_INFO_JSON), "utf-8")
cog_info = Installable(Path(str(lib_path)))
return cog_info
# Git
TEST_REPO_EXPORT_PTH: Path = Path(__file__).parent / "downloader_testrepo.export"
def _init_test_repo(destination: Path):
# copied from tools/edit_testrepo.py
git_dirparams = ("git", "-C", str(destination))
init_commands = (
(*git_dirparams, "init"),
(*git_dirparams, "config", "--local", "user.name", "Cog-Creators"),
(*git_dirparams, "config", "--local", "user.email", "[email protected]"),
(*git_dirparams, "config", "--local", "commit.gpgSign", "false"),
)
for args in init_commands:
sp.run(args, check=True)
return git_dirparams
@pytest.fixture(scope="session")
async def _session_git_repo(tmp_path_factory, event_loop):
# we will import repo only once once per session and duplicate the repo folder
repo_path = tmp_path_factory.mktemp("session_git_repo")
repo = Repo(name="redbot-testrepo", url="", branch="master", commit="", folder_path=repo_path)
git_dirparams = _init_test_repo(repo_path)
fast_import = sp.Popen((*git_dirparams, "fast-import", "--quiet"), stdin=sp.PIPE)
with TEST_REPO_EXPORT_PTH.open(mode="rb") as f:
fast_import.communicate(f.read())
return_code = fast_import.wait()
if return_code:
raise Exception(f"git fast-import failed with code {return_code}")
sp.run((*git_dirparams, "reset", "--hard"))
return repo
@pytest.fixture
async def git_repo(_session_git_repo, tmp_path, event_loop):
# fixture only copies repo that was imported in _session_git_repo
repo_path = tmp_path / "redbot-testrepo"
shutil.copytree(_session_git_repo.folder_path, repo_path)
repo = Repo(
name="redbot-testrepo",
url=_session_git_repo.url,
branch=_session_git_repo.branch,
commit=_session_git_repo.commit,
folder_path=repo_path,
)
return repo
@pytest.fixture
async def cloned_git_repo(_session_git_repo, tmp_path, event_loop):
# don't use this if you want to edit origin repo
repo_path = tmp_path / "redbot-cloned_testrepo"
repo = Repo(
name="redbot-testrepo",
url=str(_session_git_repo.folder_path),
branch=_session_git_repo.branch,
commit=_session_git_repo.commit,
folder_path=repo_path,
)
sp.run(("git", "clone", str(_session_git_repo.folder_path), str(repo_path)), check=True)
return repo
@pytest.fixture
async def git_repo_with_remote(git_repo, tmp_path, event_loop):
# this can safely be used when you want to do changes to origin repo
repo_path = tmp_path / "redbot-testrepo_with_remote"
repo = Repo(
name="redbot-testrepo",
url=str(git_repo.folder_path),
branch=git_repo.branch,
commit=git_repo.commit,
folder_path=repo_path,
)
sp.run(("git", "clone", str(git_repo.folder_path), str(repo_path)), check=True)
return repo
|
from typing import Any
from homeassistant.components.scene import Scene
from . import FIBARO_DEVICES, FibaroDevice
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Perform the setup for Fibaro scenes."""
if discovery_info is None:
return
async_add_entities(
[FibaroScene(scene) for scene in hass.data[FIBARO_DEVICES]["scene"]], True
)
class FibaroScene(FibaroDevice, Scene):
"""Representation of a Fibaro scene entity."""
def activate(self, **kwargs: Any) -> None:
"""Activate the scene."""
self.fibaro_device.start()
|
import functools
from typing import Mapping, Callable, MutableMapping, Union, Set, cast
import attr
from PyQt5.QtCore import pyqtSlot, pyqtSignal, Qt, QObject, QEvent
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtWidgets import QApplication
from qutebrowser.commands import runners
from qutebrowser.keyinput import modeparsers, basekeyparser
from qutebrowser.config import config
from qutebrowser.api import cmdutils
from qutebrowser.utils import usertypes, log, objreg, utils
from qutebrowser.browser import hints
INPUT_MODES = [usertypes.KeyMode.insert, usertypes.KeyMode.passthrough]
PROMPT_MODES = [usertypes.KeyMode.prompt, usertypes.KeyMode.yesno]
ParserDictType = MutableMapping[usertypes.KeyMode, basekeyparser.BaseKeyParser]
@attr.s(frozen=True)
class KeyEvent:
"""A small wrapper over a QKeyEvent storing its data.
This is needed because Qt apparently mutates existing events with new data.
It doesn't store the modifiers because they can be different for a key
press/release.
Attributes:
key: A Qt.Key member (QKeyEvent::key).
text: A string (QKeyEvent::text).
"""
key: Qt.Key = attr.ib()
text: str = attr.ib()
@classmethod
def from_event(cls, event: QKeyEvent) -> 'KeyEvent':
"""Initialize a KeyEvent from a QKeyEvent."""
return cls(Qt.Key(event.key()), event.text())
class NotInModeError(Exception):
"""Exception raised when we want to leave a mode we're not in."""
class UnavailableError(Exception):
"""Exception raised when trying to access modeman before initialization.
Thrown by instance() if modeman has not been initialized yet.
"""
def init(win_id: int, parent: QObject) -> 'ModeManager':
"""Initialize the mode manager and the keyparsers for the given win_id."""
commandrunner = runners.CommandRunner(win_id)
modeman = ModeManager(win_id, parent)
objreg.register('mode-manager', modeman, scope='window', window=win_id)
hintmanager = hints.HintManager(win_id, parent=parent)
objreg.register('hintmanager', hintmanager, scope='window',
window=win_id, command_only=True)
modeman.hintmanager = hintmanager
keyparsers: ParserDictType = {
usertypes.KeyMode.normal:
modeparsers.NormalKeyParser(
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
usertypes.KeyMode.hint:
modeparsers.HintKeyParser(
win_id=win_id,
commandrunner=commandrunner,
hintmanager=hintmanager,
parent=modeman),
usertypes.KeyMode.insert:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.insert,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True,
do_log=False,
supports_count=False),
usertypes.KeyMode.passthrough:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.passthrough,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True,
do_log=False,
supports_count=False),
usertypes.KeyMode.command:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.command,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True,
do_log=False,
supports_count=False),
usertypes.KeyMode.prompt:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.prompt,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True,
do_log=False,
supports_count=False),
usertypes.KeyMode.yesno:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.yesno,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
supports_count=False),
usertypes.KeyMode.caret:
modeparsers.CommandKeyParser(
mode=usertypes.KeyMode.caret,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman,
passthrough=True),
usertypes.KeyMode.set_mark:
modeparsers.RegisterKeyParser(
mode=usertypes.KeyMode.set_mark,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
usertypes.KeyMode.jump_mark:
modeparsers.RegisterKeyParser(
mode=usertypes.KeyMode.jump_mark,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
usertypes.KeyMode.record_macro:
modeparsers.RegisterKeyParser(
mode=usertypes.KeyMode.record_macro,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
usertypes.KeyMode.run_macro:
modeparsers.RegisterKeyParser(
mode=usertypes.KeyMode.run_macro,
win_id=win_id,
commandrunner=commandrunner,
parent=modeman),
}
for mode, parser in keyparsers.items():
modeman.register(mode, parser)
return modeman
def instance(win_id: Union[int, str]) -> 'ModeManager':
"""Get a modemanager object.
Raises UnavailableError if there is no instance available yet.
"""
mode_manager = objreg.get('mode-manager', scope='window', window=win_id,
default=None)
if mode_manager is not None:
return mode_manager
else:
raise UnavailableError("ModeManager is not initialized yet.")
def enter(win_id: int,
mode: usertypes.KeyMode,
reason: str = None,
only_if_normal: bool = False) -> None:
"""Enter the mode 'mode'."""
instance(win_id).enter(mode, reason, only_if_normal)
def leave(win_id: int,
mode: usertypes.KeyMode,
reason: str = None, *,
maybe: bool = False) -> None:
"""Leave the mode 'mode'."""
instance(win_id).leave(mode, reason, maybe=maybe)
class ModeManager(QObject):
"""Manager for keyboard modes.
Attributes:
mode: The mode we're currently in.
hintmanager: The HintManager associated with this window.
_win_id: The window ID of this ModeManager
_prev_mode: Mode before a prompt popped up
parsers: A dictionary of modes and their keyparsers.
_forward_unbound_keys: If we should forward unbound keys.
_releaseevents_to_pass: A set of KeyEvents where the keyPressEvent was
passed through, so the release event should as
well.
Signals:
entered: Emitted when a mode is entered.
arg1: The mode which has been entered.
arg2: The window ID of this mode manager.
left: Emitted when a mode is left.
arg1: The mode which has been left.
arg2: The new current mode.
arg3: The window ID of this mode manager.
keystring_updated: Emitted when the keystring was updated in any mode.
arg 1: The mode in which the keystring has been
updated.
arg 2: The new key string.
"""
entered = pyqtSignal(usertypes.KeyMode, int)
left = pyqtSignal(usertypes.KeyMode, usertypes.KeyMode, int)
keystring_updated = pyqtSignal(usertypes.KeyMode, str)
def __init__(self, win_id: int, parent: QObject = None) -> None:
super().__init__(parent)
self._win_id = win_id
self.parsers: ParserDictType = {}
self._prev_mode = usertypes.KeyMode.normal
self.mode = usertypes.KeyMode.normal
self._releaseevents_to_pass: Set[KeyEvent] = set()
# Set after __init__
self.hintmanager = cast(hints.HintManager, None)
def __repr__(self) -> str:
return utils.get_repr(self, mode=self.mode)
def _handle_keypress(self, event: QKeyEvent, *,
dry_run: bool = False) -> bool:
"""Handle filtering of KeyPress events.
Args:
event: The KeyPress to examine.
dry_run: Don't actually handle the key, only filter it.
Return:
True if event should be filtered, False otherwise.
"""
curmode = self.mode
parser = self.parsers[curmode]
if curmode != usertypes.KeyMode.insert:
log.modes.debug("got keypress in mode {} - delegating to "
"{}".format(curmode, utils.qualname(parser)))
match = parser.handle(event, dry_run=dry_run)
has_modifier = event.modifiers() not in [
Qt.NoModifier,
Qt.ShiftModifier,
] # type: ignore[comparison-overlap]
is_non_alnum = has_modifier or not event.text().strip()
forward_unbound_keys = config.cache['input.forward_unbound_keys']
if match:
filter_this = True
elif (parser.passthrough or forward_unbound_keys == 'all' or
(forward_unbound_keys == 'auto' and is_non_alnum)):
filter_this = False
else:
filter_this = True
if not filter_this and not dry_run:
self._releaseevents_to_pass.add(KeyEvent.from_event(event))
if curmode != usertypes.KeyMode.insert:
focus_widget = QApplication.instance().focusWidget()
log.modes.debug("match: {}, forward_unbound_keys: {}, "
"passthrough: {}, is_non_alnum: {}, dry_run: {} "
"--> filter: {} (focused: {!r})".format(
match, forward_unbound_keys,
parser.passthrough, is_non_alnum, dry_run,
filter_this, focus_widget))
return filter_this
def _handle_keyrelease(self, event: QKeyEvent) -> bool:
"""Handle filtering of KeyRelease events.
Args:
event: The KeyPress to examine.
Return:
True if event should be filtered, False otherwise.
"""
# handle like matching KeyPress
keyevent = KeyEvent.from_event(event)
if keyevent in self._releaseevents_to_pass:
self._releaseevents_to_pass.remove(keyevent)
filter_this = False
else:
filter_this = True
if self.mode != usertypes.KeyMode.insert:
log.modes.debug("filter: {}".format(filter_this))
return filter_this
def register(self, mode: usertypes.KeyMode,
parser: basekeyparser.BaseKeyParser) -> None:
"""Register a new mode."""
assert parser is not None
self.parsers[mode] = parser
parser.request_leave.connect(self.leave)
parser.keystring_updated.connect(
functools.partial(self.keystring_updated.emit, mode))
def enter(self, mode: usertypes.KeyMode,
reason: str = None,
only_if_normal: bool = False) -> None:
"""Enter a new mode.
Args:
mode: The mode to enter as a KeyMode member.
reason: Why the mode was entered.
only_if_normal: Only enter the new mode if we're in normal mode.
"""
if mode == usertypes.KeyMode.normal:
self.leave(self.mode, reason='enter normal: {}'.format(reason))
return
log.modes.debug("Entering mode {}{}".format(
mode, '' if reason is None else ' (reason: {})'.format(reason)))
if mode not in self.parsers:
raise ValueError("No keyparser for mode {}".format(mode))
if self.mode == mode or (self.mode in PROMPT_MODES and
mode in PROMPT_MODES):
log.modes.debug("Ignoring request as we're in mode {} "
"already.".format(self.mode))
return
if self.mode != usertypes.KeyMode.normal:
if only_if_normal:
log.modes.debug("Ignoring request as we're in mode {} "
"and only_if_normal is set..".format(
self.mode))
return
log.modes.debug("Overriding mode {}.".format(self.mode))
self.left.emit(self.mode, mode, self._win_id)
if mode in PROMPT_MODES and self.mode in INPUT_MODES:
self._prev_mode = self.mode
else:
self._prev_mode = usertypes.KeyMode.normal
self.mode = mode
self.entered.emit(mode, self._win_id)
@cmdutils.register(instance='mode-manager', scope='window')
def enter_mode(self, mode: str) -> None:
"""Enter a key mode.
Args:
mode: The mode to enter. See `:help bindings.commands` for the
available modes, but note that hint/command/yesno/prompt mode
can't be entered manually.
"""
try:
m = usertypes.KeyMode[mode]
except KeyError:
raise cmdutils.CommandError("Mode {} does not exist!".format(mode))
if m in [usertypes.KeyMode.hint, usertypes.KeyMode.command,
usertypes.KeyMode.yesno, usertypes.KeyMode.prompt,
usertypes.KeyMode.register]:
raise cmdutils.CommandError(
"Mode {} can't be entered manually!".format(mode))
self.enter(m, 'command')
@pyqtSlot(usertypes.KeyMode, str, bool)
def leave(self, mode: usertypes.KeyMode,
reason: str = None,
maybe: bool = False) -> None:
"""Leave a key mode.
Args:
mode: The mode to leave as a usertypes.KeyMode member.
reason: Why the mode was left.
maybe: If set, ignore the request if we're not in that mode.
"""
if self.mode != mode:
if maybe:
log.modes.debug("Ignoring leave request for {} (reason {}) as "
"we're in mode {}".format(
mode, reason, self.mode))
return
else:
raise NotInModeError("Not in mode {}!".format(mode))
log.modes.debug("Leaving mode {}{}".format(
mode, '' if reason is None else ' (reason: {})'.format(reason)))
# leaving a mode implies clearing keychain, see
# https://github.com/qutebrowser/qutebrowser/issues/1805
self.clear_keychain()
self.mode = usertypes.KeyMode.normal
self.left.emit(mode, self.mode, self._win_id)
if mode in PROMPT_MODES:
self.enter(self._prev_mode,
reason='restore mode before {}'.format(mode.name))
@cmdutils.register(instance='mode-manager', name='leave-mode',
not_modes=[usertypes.KeyMode.normal], scope='window')
def leave_current_mode(self) -> None:
"""Leave the mode we're currently in."""
if self.mode == usertypes.KeyMode.normal:
raise ValueError("Can't leave normal mode!")
self.leave(self.mode, 'leave current')
def handle_event(self, event: QEvent) -> bool:
"""Filter all events based on the currently set mode.
Also calls the real keypress handler.
Args:
event: The KeyPress to examine.
Return:
True if event should be filtered, False otherwise.
"""
handlers: Mapping[QEvent.Type, Callable[[QKeyEvent], bool]] = {
QEvent.KeyPress: self._handle_keypress,
QEvent.KeyRelease: self._handle_keyrelease,
QEvent.ShortcutOverride:
functools.partial(self._handle_keypress, dry_run=True),
}
handler = handlers[event.type()]
return handler(cast(QKeyEvent, event))
@cmdutils.register(instance='mode-manager', scope='window')
def clear_keychain(self) -> None:
"""Clear the currently entered key chain."""
self.parsers[self.mode].clear_keystring()
|
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
import functools
import logging
import warnings
import typing
if typing.TYPE_CHECKING:
from typing import Callable
def warn_logging(logger):
# type: (logging.Logger) -> Callable
"""Create a `showwarning` function that uses the given logger.
Arguments:
logger (~logging.Logger): the logger to use.
Returns:
function: a function that can be used as the `warnings.showwarning`
callback.
"""
def showwarning(message, category, filename, lineno, file=None, line=None):
logger.warning(message)
return showwarning
def wrap_warnings(logger):
"""Have the function patch `warnings.showwarning` with the given logger.
Arguments:
logger (~logging.logger): the logger to wrap warnings with when
the decorated function is called.
Returns:
`function`: a decorator function.
"""
def decorator(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
showwarning = warnings.showwarning
warnings.showwarning = warn_logging(logger)
try:
return func(*args, **kwargs)
finally:
warnings.showwarning = showwarning
return new_func
return decorator
|
import builtins
import configparser
import operator
import sys
from cherrypy._cpcompat import text_or_bytes
class NamespaceSet(dict):
"""A dict of config namespace names and handlers.
Each config entry should begin with a namespace name; the corresponding
namespace handler will be called once for each config entry in that
namespace, and will be passed two arguments: the config key (with the
namespace removed) and the config value.
Namespace handlers may be any Python callable; they may also be
context managers, in which case their __enter__
method should return a callable to be used as the handler.
See cherrypy.tools (the Toolbox class) for an example.
"""
def __call__(self, config):
"""Iterate through config and pass it to each namespace handler.
config
A flat dict, where keys use dots to separate
namespaces, and values are arbitrary.
The first name in each config key is used to look up the corresponding
namespace handler. For example, a config entry of {'tools.gzip.on': v}
will call the 'tools' namespace handler with the args: ('gzip.on', v)
"""
# Separate the given config into namespaces
ns_confs = {}
for k in config:
if '.' in k:
ns, name = k.split('.', 1)
bucket = ns_confs.setdefault(ns, {})
bucket[name] = config[k]
# I chose __enter__ and __exit__ so someday this could be
# rewritten using 'with' statement:
# for ns, handler in self.items():
# with handler as callable:
# for k, v in ns_confs.get(ns, {}).items():
# callable(k, v)
for ns, handler in self.items():
exit = getattr(handler, '__exit__', None)
if exit:
callable = handler.__enter__()
no_exc = True
try:
try:
for k, v in ns_confs.get(ns, {}).items():
callable(k, v)
except Exception:
# The exceptional case is handled here
no_exc = False
if exit is None:
raise
if not exit(*sys.exc_info()):
raise
# The exception is swallowed if exit() returns true
finally:
# The normal and non-local-goto cases are handled here
if no_exc and exit:
exit(None, None, None)
else:
for k, v in ns_confs.get(ns, {}).items():
handler(k, v)
def __repr__(self):
return '%s.%s(%s)' % (self.__module__, self.__class__.__name__,
dict.__repr__(self))
def __copy__(self):
newobj = self.__class__()
newobj.update(self)
return newobj
copy = __copy__
class Config(dict):
"""A dict-like set of configuration data, with defaults and namespaces.
May take a file, filename, or dict.
"""
defaults = {}
environments = {}
namespaces = NamespaceSet()
def __init__(self, file=None, **kwargs):
self.reset()
if file is not None:
self.update(file)
if kwargs:
self.update(kwargs)
def reset(self):
"""Reset self to default values."""
self.clear()
dict.update(self, self.defaults)
def update(self, config):
"""Update self from a dict, file, or filename."""
self._apply(Parser.load(config))
def _apply(self, config):
"""Update self from a dict."""
which_env = config.get('environment')
if which_env:
env = self.environments[which_env]
for k in env:
if k not in config:
config[k] = env[k]
dict.update(self, config)
self.namespaces(config)
def __setitem__(self, k, v):
dict.__setitem__(self, k, v)
self.namespaces({k: v})
class Parser(configparser.ConfigParser):
"""Sub-class of ConfigParser that keeps the case of options and that
raises an exception if the file cannot be read.
"""
def optionxform(self, optionstr):
return optionstr
def read(self, filenames):
if isinstance(filenames, text_or_bytes):
filenames = [filenames]
for filename in filenames:
# try:
# fp = open(filename)
# except IOError:
# continue
fp = open(filename)
try:
self._read(fp, filename)
finally:
fp.close()
def as_dict(self, raw=False, vars=None):
"""Convert an INI file to a dictionary"""
# Load INI file into a dict
result = {}
for section in self.sections():
if section not in result:
result[section] = {}
for option in self.options(section):
value = self.get(section, option, raw=raw, vars=vars)
try:
value = unrepr(value)
except Exception:
x = sys.exc_info()[1]
msg = ('Config error in section: %r, option: %r, '
'value: %r. Config values must be valid Python.' %
(section, option, value))
raise ValueError(msg, x.__class__.__name__, x.args)
result[section][option] = value
return result
def dict_from_file(self, file):
if hasattr(file, 'read'):
self.readfp(file)
else:
self.read(file)
return self.as_dict()
@classmethod
def load(self, input):
"""Resolve 'input' to dict from a dict, file, or filename."""
is_file = (
# Filename
isinstance(input, text_or_bytes)
# Open file object
or hasattr(input, 'read')
)
return Parser().dict_from_file(input) if is_file else input.copy()
# public domain "unrepr" implementation, found on the web and then improved.
class _Builder:
def build(self, o):
m = getattr(self, 'build_' + o.__class__.__name__, None)
if m is None:
raise TypeError('unrepr does not recognize %s' %
repr(o.__class__.__name__))
return m(o)
def astnode(self, s):
"""Return a Python3 ast Node compiled from a string."""
try:
import ast
except ImportError:
# Fallback to eval when ast package is not available,
# e.g. IronPython 1.0.
return eval(s)
p = ast.parse('__tempvalue__ = ' + s)
return p.body[0].value
def build_Subscript(self, o):
return self.build(o.value)[self.build(o.slice)]
def build_Index(self, o):
return self.build(o.value)
def _build_call35(self, o):
"""
Workaround for python 3.5 _ast.Call signature, docs found here
https://greentreesnakes.readthedocs.org/en/latest/nodes.html
"""
import ast
callee = self.build(o.func)
args = []
if o.args is not None:
for a in o.args:
if isinstance(a, ast.Starred):
args.append(self.build(a.value))
else:
args.append(self.build(a))
kwargs = {}
for kw in o.keywords:
if kw.arg is None: # double asterix `**`
rst = self.build(kw.value)
if not isinstance(rst, dict):
raise TypeError('Invalid argument for call.'
'Must be a mapping object.')
# give preference to the keys set directly from arg=value
for k, v in rst.items():
if k not in kwargs:
kwargs[k] = v
else: # defined on the call as: arg=value
kwargs[kw.arg] = self.build(kw.value)
return callee(*args, **kwargs)
def build_Call(self, o):
if sys.version_info >= (3, 5):
return self._build_call35(o)
callee = self.build(o.func)
if o.args is None:
args = ()
else:
args = tuple([self.build(a) for a in o.args])
if o.starargs is None:
starargs = ()
else:
starargs = tuple(self.build(o.starargs))
if o.kwargs is None:
kwargs = {}
else:
kwargs = self.build(o.kwargs)
if o.keywords is not None: # direct a=b keywords
for kw in o.keywords:
# preference because is a direct keyword against **kwargs
kwargs[kw.arg] = self.build(kw.value)
return callee(*(args + starargs), **kwargs)
def build_List(self, o):
return list(map(self.build, o.elts))
def build_Str(self, o):
return o.s
def build_Num(self, o):
return o.n
def build_Dict(self, o):
return dict([(self.build(k), self.build(v))
for k, v in zip(o.keys, o.values)])
def build_Tuple(self, o):
return tuple(self.build_List(o))
def build_Name(self, o):
name = o.id
if name == 'None':
return None
if name == 'True':
return True
if name == 'False':
return False
# See if the Name is a package or module. If it is, import it.
try:
return modules(name)
except ImportError:
pass
# See if the Name is in builtins.
try:
return getattr(builtins, name)
except AttributeError:
pass
raise TypeError('unrepr could not resolve the name %s' % repr(name))
def build_NameConstant(self, o):
return o.value
build_Constant = build_NameConstant # Python 3.8 change
def build_UnaryOp(self, o):
op, operand = map(self.build, [o.op, o.operand])
return op(operand)
def build_BinOp(self, o):
left, op, right = map(self.build, [o.left, o.op, o.right])
return op(left, right)
def build_Add(self, o):
return operator.add
def build_Mult(self, o):
return operator.mul
def build_USub(self, o):
return operator.neg
def build_Attribute(self, o):
parent = self.build(o.value)
return getattr(parent, o.attr)
def build_NoneType(self, o):
return None
def unrepr(s):
"""Return a Python object compiled from a string."""
if not s:
return s
b = _Builder()
obj = b.astnode(s)
return b.build(obj)
def modules(modulePath):
"""Load a module and retrieve a reference to that module."""
__import__(modulePath)
return sys.modules[modulePath]
def attributes(full_attribute_name):
"""Load a module and retrieve an attribute of that module."""
# Parse out the path, module, and attribute
last_dot = full_attribute_name.rfind('.')
attr_name = full_attribute_name[last_dot + 1:]
mod_path = full_attribute_name[:last_dot]
mod = modules(mod_path)
# Let an AttributeError propagate outward.
try:
attr = getattr(mod, attr_name)
except AttributeError:
raise AttributeError("'%s' object has no attribute '%s'"
% (mod_path, attr_name))
# Return a reference to the attribute.
return attr
|
from datapoint.exceptions import APIException
import pytest
from tests.async_mock import patch
@pytest.fixture()
def mock_simple_manager_fail():
"""Mock datapoint Manager with default values for testing in config_flow."""
with patch("datapoint.Manager") as mock_manager:
instance = mock_manager.return_value
instance.get_nearest_forecast_site.side_effect = APIException()
instance.get_forecast_for_site.side_effect = APIException()
instance.latitude = None
instance.longitude = None
instance.site = None
instance.site_id = None
instance.site_name = None
instance.now = None
yield mock_manager
|
import asyncio
import logging
from pyinsteon import async_close, async_connect, devices
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_PLATFORM, EVENT_HOMEASSISTANT_STOP
from homeassistant.exceptions import ConfigEntryNotReady
from .const import (
CONF_CAT,
CONF_DIM_STEPS,
CONF_HOUSECODE,
CONF_OVERRIDE,
CONF_SUBCAT,
CONF_UNITCODE,
CONF_X10,
DOMAIN,
INSTEON_COMPONENTS,
ON_OFF_EVENTS,
)
from .schemas import convert_yaml_to_config_flow
from .utils import (
add_on_off_event_device,
async_register_services,
get_device_platforms,
register_new_device_callback,
)
_LOGGER = logging.getLogger(__name__)
OPTIONS = "options"
async def async_get_device_config(hass, config_entry):
"""Initiate the connection and services."""
# Make a copy of addresses due to edge case where the list of devices could change during status update
# Cannot be done concurrently due to issues with the underlying protocol.
for address in list(devices):
try:
await devices[address].async_status()
except AttributeError:
pass
await devices.async_load(id_devices=1)
for addr in devices:
device = devices[addr]
flags = True
for name in device.operating_flags:
if not device.operating_flags[name].is_loaded:
flags = False
break
if flags:
for name in device.properties:
if not device.properties[name].is_loaded:
flags = False
break
# Cannot be done concurrently due to issues with the underlying protocol.
if not device.aldb.is_loaded or not flags:
await device.async_read_config()
await devices.async_save(workdir=hass.config.config_dir)
async def close_insteon_connection(*args):
"""Close the Insteon connection."""
await async_close()
async def async_setup(hass, config):
"""Set up the Insteon platform."""
if DOMAIN not in config:
return True
conf = config[DOMAIN]
data, options = convert_yaml_to_config_flow(conf)
if options:
hass.data[DOMAIN] = {}
hass.data[DOMAIN][OPTIONS] = options
# Create a config entry with the connection data
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=data
)
)
return True
async def async_setup_entry(hass, entry):
"""Set up an Insteon entry."""
if not devices.modem:
try:
await async_connect(**entry.data)
except ConnectionError as exception:
_LOGGER.error("Could not connect to Insteon modem")
raise ConfigEntryNotReady from exception
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, close_insteon_connection)
await devices.async_load(
workdir=hass.config.config_dir, id_devices=0, load_modem_aldb=0
)
# If options existed in YAML and have not already been saved to the config entry
# add them now
if (
not entry.options
and entry.source == SOURCE_IMPORT
and hass.data.get(DOMAIN)
and hass.data[DOMAIN].get(OPTIONS)
):
hass.config_entries.async_update_entry(
entry=entry,
options=hass.data[DOMAIN][OPTIONS],
)
for device_override in entry.options.get(CONF_OVERRIDE, []):
# Override the device default capabilities for a specific address
address = device_override.get("address")
if not devices.get(address):
cat = device_override[CONF_CAT]
subcat = device_override[CONF_SUBCAT]
devices.set_id(address, cat, subcat, 0)
for device in entry.options.get(CONF_X10, []):
housecode = device.get(CONF_HOUSECODE)
unitcode = device.get(CONF_UNITCODE)
x10_type = "on_off"
steps = device.get(CONF_DIM_STEPS, 22)
if device.get(CONF_PLATFORM) == "light":
x10_type = "dimmable"
elif device.get(CONF_PLATFORM) == "binary_sensor":
x10_type = "sensor"
_LOGGER.debug(
"Adding X10 device to Insteon: %s %d %s", housecode, unitcode, x10_type
)
device = devices.add_x10_device(housecode, unitcode, x10_type, steps)
for component in INSTEON_COMPONENTS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
for address in devices:
device = devices[address]
platforms = get_device_platforms(device)
if ON_OFF_EVENTS in platforms:
add_on_off_event_device(hass, device)
_LOGGER.debug("Insteon device count: %s", len(devices))
register_new_device_callback(hass)
async_register_services(hass)
device_registry = await hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, str(devices.modem.address))},
manufacturer="Smart Home",
name=f"{devices.modem.description} {devices.modem.address}",
model=f"{devices.modem.model} ({devices.modem.cat!r}, 0x{devices.modem.subcat:02x})",
sw_version=f"{devices.modem.firmware:02x} Engine Version: {devices.modem.engine_version}",
)
asyncio.create_task(async_get_device_config(hass, entry))
return True
|
import argparse
import logging
import os
import sys
def extract_preamble(fin):
end_preamble = False
preamble, body = [], []
for line in fin:
if end_preamble:
body.append(line)
elif line.startswith('#'):
preamble.append(line)
else:
end_preamble = True
body.append(line)
return preamble, body
def main():
parser = argparse.ArgumentParser()
parser.add_argument('path', help='the path of the file to check')
parser.add_argument('--replace', help='replace the preamble with the one from this file')
parser.add_argument('--loglevel', default=logging.INFO)
args = parser.parse_args()
logging.basicConfig(level=args.loglevel)
with open(args.path) as fin:
preamble, body = extract_preamble(fin)
for line in preamble:
logging.info('%s: %s', args.path, line.rstrip())
if not args.replace:
sys.exit(0)
with open(args.replace) as fin:
preamble, _ = extract_preamble(fin)
if os.access(args.path, os.X_OK):
preamble.insert(0, '#!/usr/bin/env python\n')
with open(args.path, 'w') as fout:
for line in preamble + body:
fout.write(line)
if __name__ == '__main__':
main()
|
import abodepy.helpers.constants as CONST
from homeassistant.components.cover import CoverEntity
from . import AbodeDevice
from .const import DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Abode cover devices."""
data = hass.data[DOMAIN]
entities = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_COVER):
entities.append(AbodeCover(data, device))
async_add_entities(entities)
class AbodeCover(AbodeDevice, CoverEntity):
"""Representation of an Abode cover."""
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
return not self._device.is_open
def close_cover(self, **kwargs):
"""Issue close command to cover."""
self._device.close_cover()
def open_cover(self, **kwargs):
"""Issue open command to cover."""
self._device.open_cover()
|
import os
import re
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
def test_hostname(host):
assert re.search(r'instance-[12]', host.check_output('hostname -s'))
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
filename = '/etc/molecule/{}'.format(host.check_output('hostname -s'))
f = host.file(filename)
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
from datetime import timedelta
from django.urls import reverse
from django.utils import timezone
from weblate.trans.tests.test_views import ViewTestCase
from weblate.trans.views.reports import generate_counts, generate_credits
COUNTS_DATA = [
{
"count": 1,
"count_edit": 0,
"count_new": 1,
"name": "Weblate Test",
"words": 2,
"words_edit": 0,
"words_new": 2,
"chars": 14,
"chars_edit": 0,
"chars_new": 14,
"email": "[email protected]",
"t_chars": 14,
"t_chars_edit": 0,
"t_chars_new": 14,
"t_words": 2,
"t_words_edit": 0,
"t_words_new": 2,
"count_approve": 0,
"words_approve": 0,
"chars_approve": 0,
"t_chars_approve": 0,
"t_words_approve": 0,
"edits": 14,
"edits_approve": 0,
"edits_edit": 0,
"edits_new": 14,
}
]
class BaseReportsTest(ViewTestCase):
def setUp(self):
super().setUp()
self.user.is_superuser = True
self.user.save()
def add_change(self):
self.edit_unit("Hello, world!\n", "Nazdar svete!\n")
class ReportsTest(BaseReportsTest):
def test_credits_empty(self):
data = generate_credits(
None,
timezone.now() - timedelta(days=1),
timezone.now() + timedelta(days=1),
translation__component=self.component,
)
self.assertEqual(data, [])
def test_credits_one(self, expected_count=1):
self.add_change()
data = generate_credits(
None,
timezone.now() - timedelta(days=1),
timezone.now() + timedelta(days=1),
translation__component=self.component,
)
self.assertEqual(
data, [{"Czech": [("[email protected]", "Weblate Test", expected_count)]}]
)
def test_credits_more(self):
self.edit_unit("Hello, world!\n", "Nazdar svete2!\n")
self.test_credits_one(expected_count=2)
def test_counts_one(self):
self.add_change()
data = generate_counts(
None,
timezone.now() - timedelta(days=1),
timezone.now() + timedelta(days=1),
component=self.component,
)
self.assertEqual(data, COUNTS_DATA)
class ReportsComponentTest(BaseReportsTest):
def get_kwargs(self):
return self.kw_component
def get_credits(self, style):
self.add_change()
return self.client.post(
reverse("credits", kwargs=self.get_kwargs()),
{
"period": "",
"style": style,
"start_date": "2000-01-01",
"end_date": "2100-01-01",
},
)
def test_credits_view_json(self):
response = self.get_credits("json")
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(
response.content.decode(),
[{"Czech": [["[email protected]", "Weblate Test", 1]]}],
)
def test_credits_view_rst(self):
response = self.get_credits("rst")
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.content.decode(),
"\n\n* Czech\n\n * Weblate Test <[email protected]> (1)\n\n",
)
def test_credits_view_html(self):
response = self.get_credits("html")
self.assertEqual(response.status_code, 200)
self.assertHTMLEqual(
response.content.decode(),
"<table>\n"
"<tr>\n<th>Czech</th>\n"
'<td><ul><li><a href="mailto:[email protected]">'
"Weblate Test</a> (1)</li></ul></td>\n</tr>\n"
"</table>",
)
def get_counts(self, style, **kwargs):
self.add_change()
params = {
"style": style,
"period": "",
"start_date": "2000-01-01",
"end_date": "2100-01-01",
}
params.update(kwargs)
return self.client.post(reverse("counts", kwargs=self.get_kwargs()), params)
def test_counts_view_json(self):
response = self.get_counts("json")
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode(), COUNTS_DATA)
def test_counts_view_30days(self):
response = self.get_counts("json", period="30days")
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode(), COUNTS_DATA)
def test_counts_view_this_month(self):
response = self.get_counts("json", period="this-month")
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode(), COUNTS_DATA)
def test_counts_view_month(self):
response = self.get_counts("json", period="month")
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode(), [])
def test_counts_view_year(self):
response = self.get_counts("json", period="year")
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode(), [])
def test_counts_view_this_year(self):
response = self.get_counts("json", period="this-year")
self.assertEqual(response.status_code, 200)
self.assertJSONEqual(response.content.decode(), COUNTS_DATA)
def test_counts_view_rst(self):
response = self.get_counts("rst")
self.assertEqual(response.status_code, 200)
self.assertContains(response, "[email protected]")
def test_counts_view_html(self):
response = self.get_counts("html")
self.assertEqual(response.status_code, 200)
self.assertHTMLEqual(
response.content.decode(),
"""
<table>
<tr>
<th>Name</th>
<th>Email</th>
<th>Count total</th>
<th>Edits total</th>
<th>Source words total</th>
<th>Source chars total</th>
<th>Target words total</th>
<th>Target chars total</th>
<th>Count new</th>
<th>Edits new</th>
<th>Source words new</th>
<th>Source chars new</th>
<th>Target words new</th>
<th>Target chars new</th>
<th>Count approved</th>
<th>Edits approved</th>
<th>Source words approved</th>
<th>Source chars approved</th>
<th>Target words approved</th>
<th>Target chars approved</th>
<th>Count edited</th>
<th>Edits edited</th>
<th>Source words edited</th>
<th>Source chars edited</th>
<th>Target words edited</th>
<th>Target chars edited</th>
</tr>
<tr>
<td>Weblate Test</td>
<td>[email protected]</td>
<td>1</td>
<td>14</td>
<td>2</td>
<td>14</td>
<td>2</td>
<td>14</td>
<td>1</td>
<td>14</td>
<td>2</td>
<td>14</td>
<td>2</td>
<td>14</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
<td>0</td>
</tr>
</table>
""",
)
class ReportsProjectTest(ReportsComponentTest):
def get_kwargs(self):
return self.kw_project
class ReportsGlobalTest(ReportsComponentTest):
def get_kwargs(self):
return {}
|
class cached_property:
"""Cached property descriptor.
Caches the return value of the get method on first call.
Examples:
.. code-block:: python
@cached_property
def connection(self):
return Connection()
@connection.setter # Prepares stored value
def connection(self, value):
if value is None:
raise TypeError('Connection must be a connection')
return value
@connection.deleter
def connection(self, value):
# Additional action to do at del(self.attr)
if value is not None:
print('Connection {0!r} deleted'.format(value)
"""
def __init__(self, fget=None, fset=None, fdel=None, doc=None):
self.__get = fget
self.__set = fset
self.__del = fdel
self.__doc__ = doc or fget.__doc__
self.__name__ = fget.__name__
self.__module__ = fget.__module__
def __get__(self, obj, type=None):
if obj is None:
return self
try:
return obj.__dict__[self.__name__]
except KeyError:
value = obj.__dict__[self.__name__] = self.__get(obj)
return value
def __set__(self, obj, value):
if obj is None:
return self
if self.__set is not None:
value = self.__set(obj, value)
obj.__dict__[self.__name__] = value
def __delete__(self, obj, _sentinel=object()):
if obj is None:
return self
value = obj.__dict__.pop(self.__name__, _sentinel)
if self.__del is not None and value is not _sentinel:
self.__del(obj, value)
def setter(self, fset):
return self.__class__(self.__get, fset, self.__del)
def deleter(self, fdel):
return self.__class__(self.__get, self.__set, fdel)
|
import functools
import logging
import redis
logger = logging.getLogger(__name__)
redis_conn = None
cache_prefix = None
def init(enable=True,
host='localhost', port=6379, db=0, password=None, path='/'):
global redis_conn, cache_prefix
if not enable:
redis_conn = None
return
logging.info('Enabling storage cache on Redis')
logging.info('Redis config: {0}'.format({
'host': host,
'port': port,
'db': db,
'password': password,
'path': path
}))
redis_conn = redis.StrictRedis(host=host,
port=int(port),
db=int(db),
password=password)
cache_prefix = 'cache_path:{0}'.format(path)
def cache_key(key):
return cache_prefix + key
def set(f):
@functools.wraps(f)
def wrapper(*args):
content = args[-1]
key = args[-2]
key = cache_key(key)
try:
cached_content = get_by_key(key)
if cached_content and cached_content == content:
# If cached content is the same as what we are about to
# write, we don't need to write again.
return args[-2]
redis_conn.set(key, content)
except redis.exceptions.ConnectionError as e:
logging.warning("LRU: Redis connection error: {0}".format(e))
return f(*args)
if redis_conn is None:
return f
return wrapper
def get(f):
@functools.wraps(f)
def wrapper(*args):
key = args[-1]
key = cache_key(key)
content = get_by_key(key)
if content is not None:
return content
# Refresh cache
content = f(*args)
if content is not None:
try:
redis_conn.set(key, content)
except redis.exceptions.ConnectionError as e:
logging.warning("LRU: Redis connection error: {0}".format(e))
return content
if redis_conn is None:
return f
return wrapper
def get_by_key(key):
try:
content = redis_conn.get(key)
except redis.exceptions.ConnectionError as e:
logging.warning("LRU: Redis connection error: {0}".format(e))
return None
return content
def remove(f):
@functools.wraps(f)
def wrapper(*args):
key = args[-1]
key = cache_key(key)
try:
redis_conn.delete(key)
except redis.exceptions.ConnectionError as e:
logging.warning("LRU: Redis connection error: {0}".format(e))
return f(*args)
if redis_conn is None:
return f
return wrapper
|
from homeassistant.components import logbook
from homeassistant.components.alexa.const import EVENT_ALEXA_SMART_HOME
from homeassistant.setup import async_setup_component
from tests.components.logbook.test_init import MockLazyEventPartialState
async def test_humanify_alexa_event(hass):
"""Test humanifying Alexa event."""
hass.config.components.add("recorder")
await async_setup_component(hass, "alexa", {})
await async_setup_component(hass, "logbook", {})
hass.states.async_set("light.kitchen", "on", {"friendly_name": "Kitchen Light"})
entity_attr_cache = logbook.EntityAttributeCache(hass)
results = list(
logbook.humanify(
hass,
[
MockLazyEventPartialState(
EVENT_ALEXA_SMART_HOME,
{"request": {"namespace": "Alexa.Discovery", "name": "Discover"}},
),
MockLazyEventPartialState(
EVENT_ALEXA_SMART_HOME,
{
"request": {
"namespace": "Alexa.PowerController",
"name": "TurnOn",
"entity_id": "light.kitchen",
}
},
),
MockLazyEventPartialState(
EVENT_ALEXA_SMART_HOME,
{
"request": {
"namespace": "Alexa.PowerController",
"name": "TurnOn",
"entity_id": "light.non_existing",
}
},
),
],
entity_attr_cache,
{},
)
)
event1, event2, event3 = results
assert event1["name"] == "Amazon Alexa"
assert event1["message"] == "send command Alexa.Discovery/Discover"
assert event1["entity_id"] is None
assert event2["name"] == "Amazon Alexa"
assert (
event2["message"]
== "send command Alexa.PowerController/TurnOn for Kitchen Light"
)
assert event2["entity_id"] == "light.kitchen"
assert event3["name"] == "Amazon Alexa"
assert (
event3["message"]
== "send command Alexa.PowerController/TurnOn for light.non_existing"
)
assert event3["entity_id"] == "light.non_existing"
|
import pandas as pd
import pytest
import json
import os
from numpy.testing import assert_array_equal
from yandextank.plugins.NeUploader.plugin import Plugin
try:
from yatest import common
PATH = common.source_path('load/projects/yandex-tank/yandextank/plugins/NeUploader/tests')
except ImportError:
PATH = os.path.dirname(__file__)
class TestMonitoringData(object):
@pytest.mark.parametrize('mon_data, length', [
(os.path.join(PATH, 'monitoring_data/monitoring1.json'), 54),
])
def test_df_num_and_cols(self, mon_data, length):
with open(mon_data) as f:
jsondata = json.load(f)
dfs = Plugin.monitoring_data_to_dfs(jsondata)
assert len(dfs) == length
assert all([list(df.columns) == ['ts', 'value'] for df in dfs.values()])
@pytest.mark.parametrize('mon_data, names', [
(os.path.join(PATH, 'monitoring_data/monitoring1.json'),
()),
])
def test_metrics_names(self, mon_data, names):
with open(mon_data) as f:
jsondata = json.load(f)
dfs = Plugin.monitoring_data_to_dfs(jsondata)
assert set(dfs.keys()) == {'{}:{}'.format(panelk, name) for i in jsondata for panelk, panelv in i['data'].items() for name in panelv['metrics'].keys()}
DF = pd.DataFrame({'ts': [0, 1, 2, 3, 4, 5, 6, 7, 8, 9],
'value': [43, 75, 12, 65, 24, 65, 41, 87, 15, 62],
'tag': ['foo', 'bar', 'foo', '', '', 'null', '', 'not_null', '', 'foo']})
@pytest.mark.parametrize('df, case, expected', [
(DF, '__overall__', DF[['ts', 'value']]),
(DF, 'foo', pd.DataFrame({'ts': [0, 2, 9],
'value': [43, 12, 62]})),
(DF, 'null', pd.DataFrame({'ts': [5],
'value': [65]}))
])
def test_filter_df_by_case(df, case, expected):
assert_array_equal(Plugin.filter_df_by_case(df, case), expected, )
|
import os
import sys
from PyQt5.QtCore import (QT_VERSION_STR, PYQT_VERSION_STR, qVersion,
QStandardPaths, QCoreApplication)
def print_header():
"""Show system information."""
print("Python {}".format(sys.version))
print("os.name: {}".format(os.name))
print("sys.platform: {}".format(sys.platform))
print()
print("Qt {}, compiled {}".format(qVersion(), QT_VERSION_STR))
print("PyQt {}".format(PYQT_VERSION_STR))
print()
def print_paths():
"""Print all QStandardPaths.StandardLocation members."""
for name, obj in vars(QStandardPaths).items():
if isinstance(obj, QStandardPaths.StandardLocation):
location = QStandardPaths.writableLocation(obj)
print("{:25} {}".format(name, location))
def main():
print_header()
print("No QApplication")
print("===============")
print()
print_paths()
app = QCoreApplication(sys.argv)
app.setApplicationName("qapp_name")
print()
print("With QApplication")
print("=================")
print()
print_paths()
if __name__ == '__main__':
main()
|
from datetime import datetime, timedelta
from functools import wraps
import logging
import re
from babelfish import Country
import guessit
import requests
from .. import __short_version__
from ..cache import REFINER_EXPIRATION_TIME, region
from ..utils import sanitize
from ..video import Episode
logger = logging.getLogger(__name__)
series_re = re.compile(r'^(?P<series>.*?)(?: \((?:(?P<year>\d{4})|(?P<country>[A-Z]{2}))\))?$')
def requires_auth(func):
"""Decorator for :class:`TVDBClient` methods that require authentication"""
@wraps(func)
def wrapper(self, *args, **kwargs):
if self.token is None or self.token_expired:
self.login()
elif self.token_needs_refresh:
self.refresh_token()
return func(self, *args, **kwargs)
return wrapper
class TVDBClient(object):
"""TVDB REST API Client
:param str apikey: API key to use.
:param str username: username to use.
:param str password: password to use.
:param str language: language of the responses.
:param session: session object to use.
:type session: :class:`requests.sessions.Session` or compatible.
:param dict headers: additional headers.
:param int timeout: timeout for the requests.
"""
#: Base URL of the API
base_url = 'https://api.thetvdb.com'
#: Token lifespan
token_lifespan = timedelta(hours=1)
#: Minimum token age before a :meth:`refresh_token` is triggered
refresh_token_every = timedelta(minutes=30)
def __init__(self, apikey=None, username=None, password=None, language='en', session=None, headers=None,
timeout=10):
#: API key
self.apikey = apikey
#: Username
self.username = username
#: Password
self.password = password
#: Last token acquisition date
self.token_date = datetime.utcnow() - self.token_lifespan
#: Session for the requests
self.session = session or requests.Session()
self.session.timeout = timeout
self.session.headers.update(headers or {})
self.session.headers['Content-Type'] = 'application/json'
self.session.headers['Accept-Language'] = language
@property
def language(self):
return self.session.headers['Accept-Language']
@language.setter
def language(self, value):
self.session.headers['Accept-Language'] = value
@property
def token(self):
if 'Authorization' not in self.session.headers:
return None
return self.session.headers['Authorization'][7:]
@property
def token_expired(self):
return datetime.utcnow() - self.token_date > self.token_lifespan
@property
def token_needs_refresh(self):
return datetime.utcnow() - self.token_date > self.refresh_token_every
def login(self):
"""Login"""
# perform the request
data = {'apikey': self.apikey, 'username': self.username, 'password': self.password}
r = self.session.post(self.base_url + '/login', json=data)
r.raise_for_status()
# set the Authorization header
self.session.headers['Authorization'] = 'Bearer ' + r.json()['token']
# update token_date
self.token_date = datetime.utcnow()
def refresh_token(self):
"""Refresh token"""
# perform the request
r = self.session.get(self.base_url + '/refresh_token')
r.raise_for_status()
# set the Authorization header
self.session.headers['Authorization'] = 'Bearer ' + r.json()['token']
# update token_date
self.token_date = datetime.utcnow()
@requires_auth
def search_series(self, name=None, imdb_id=None, zap2it_id=None):
"""Search series"""
# perform the request
params = {'name': name, 'imdbId': imdb_id, 'zap2itId': zap2it_id}
r = self.session.get(self.base_url + '/search/series', params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data']
@requires_auth
def get_series(self, id):
"""Get series"""
# perform the request
r = self.session.get(self.base_url + '/series/{}'.format(id))
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data']
@requires_auth
def get_series_actors(self, id):
"""Get series actors"""
# perform the request
r = self.session.get(self.base_url + '/series/{}/actors'.format(id))
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data']
@requires_auth
def get_series_episodes(self, id, page=1):
"""Get series episodes"""
# perform the request
params = {'page': page}
r = self.session.get(self.base_url + '/series/{}/episodes'.format(id), params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()
@requires_auth
def query_series_episodes(self, id, absolute_number=None, aired_season=None, aired_episode=None, dvd_season=None,
dvd_episode=None, imdb_id=None, page=1):
"""Query series episodes"""
# perform the request
params = {'absoluteNumber': absolute_number, 'airedSeason': aired_season, 'airedEpisode': aired_episode,
'dvdSeason': dvd_season, 'dvdEpisode': dvd_episode, 'imdbId': imdb_id, 'page': page}
r = self.session.get(self.base_url + '/series/{}/episodes/query'.format(id), params=params)
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()
@requires_auth
def get_episode(self, id):
"""Get episode"""
# perform the request
r = self.session.get(self.base_url + '/episodes/{}'.format(id))
if r.status_code == 404:
return None
r.raise_for_status()
return r.json()['data']
#: User-Agent to use
user_agent = 'Subliminal/%s' % __short_version__
#: Configured instance of :class:`TVDBClient`
tvdb_client = TVDBClient('5EC930FB90DA1ADA', headers={'User-Agent': user_agent})
#: Configure guessit in order to use GuessitCountryConverter
guessit.api.configure()
@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME)
def search_series(name):
"""Search series.
:param str name: name of the series.
:return: the search results.
:rtype: list
"""
return tvdb_client.search_series(name)
@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME)
def get_series(id):
"""Get series.
:param int id: id of the series.
:return: the series data.
:rtype: dict
"""
return tvdb_client.get_series(id)
@region.cache_on_arguments(expiration_time=REFINER_EXPIRATION_TIME)
def get_series_episode(series_id, season, episode):
"""Get an episode of a series.
:param int series_id: id of the series.
:param int season: season number of the episode.
:param int episode: episode number of the episode.
:return: the episode data.
:rtype: dict
"""
result = tvdb_client.query_series_episodes(series_id, aired_season=season, aired_episode=episode)
if result:
return tvdb_client.get_episode(result['data'][0]['id'])
def refine(video, **kwargs):
"""Refine a video by searching `TheTVDB <http://thetvdb.com/>`_.
.. note::
This refiner only work for instances of :class:`~subliminal.video.Episode`.
Several attributes can be found:
* :attr:`~subliminal.video.Episode.series`
* :attr:`~subliminal.video.Episode.year`
* :attr:`~subliminal.video.Episode.series_imdb_id`
* :attr:`~subliminal.video.Episode.series_tvdb_id`
* :attr:`~subliminal.video.Episode.title`
* :attr:`~subliminal.video.Video.imdb_id`
* :attr:`~subliminal.video.Episode.tvdb_id`
"""
# only deal with Episode videos
if not isinstance(video, Episode):
logger.error('Cannot refine episodes')
return
# exit if the information is complete
if video.series_tvdb_id and video.tvdb_id:
logger.debug('No need to search')
return
# search the series
logger.info('Searching series %r', video.series)
results = search_series(video.series.lower())
if not results:
logger.warning('No results for series')
return
logger.debug('Found %d results', len(results))
# search for exact matches
matching_results = []
for result in results:
matching_result = {}
# use seriesName and aliases
series_names = [result['seriesName']]
series_names.extend(result['aliases'])
# parse the original series as series + year or country
original_match = series_re.match(result['seriesName']).groupdict()
# parse series year
series_year = None
if result['firstAired']:
series_year = datetime.strptime(result['firstAired'], '%Y-%m-%d').year
# discard mismatches on year
if video.year and series_year and video.year != series_year:
logger.debug('Discarding series %r mismatch on year %d', result['seriesName'], series_year)
continue
# iterate over series names
for series_name in series_names:
# parse as series, year and country
series, year, country = series_re.match(series_name).groups()
if year:
year = int(year)
if country:
country = Country.fromguessit(country)
# discard mismatches on year
if year and (video.original_series or video.year != year):
logger.debug('Discarding series name %r mismatch on year %d', series, year)
continue
# discard mismatches on country
if video.country and video.country != country:
logger.debug('Discarding series name %r mismatch on country %r', series, country)
continue
# match on sanitized series name
if sanitize(series) == sanitize(video.series):
logger.debug('Found exact match on series %r', series_name)
matching_result['match'] = {
'series': original_match['series'],
'year': series_year or year,
'country': country,
'original_series': original_match['year'] is None and country is None
}
break
# add the result on match
if matching_result:
matching_result['data'] = result
matching_results.append(matching_result)
# exit if we don't have exactly 1 matching result
if not matching_results:
logger.error('No matching series found')
return
if len(matching_results) > 1:
logger.error('Multiple matches found')
return
# get the series
matching_result = matching_results[0]
series = get_series(matching_result['data']['id'])
# add series information
logger.debug('Found series %r', series)
video.series = matching_result['match']['series']
video.alternative_series.extend(series['aliases'])
video.year = matching_result['match']['year']
video.country = matching_result['match']['country']
video.original_series = matching_result['match']['original_series']
video.series_tvdb_id = series['id']
video.series_imdb_id = series['imdbId'] or None
# get the episode
logger.info('Getting series episode %dx%d', video.season, video.episode)
episode = get_series_episode(video.series_tvdb_id, video.season, video.episode)
if not episode:
logger.warning('No results for episode')
return
# add episode information
logger.debug('Found episode %r', episode)
video.tvdb_id = episode['id']
video.title = episode['episodeName'] or None
video.imdb_id = episode['imdbId'] or None
|
import urllib2
from urlparse import urlparse, parse_qs
try:
from xml.etree import ElementTree
except ImportError:
ElementTree = None
from test import CollectorTestCase
from test import get_collector_config
from test import run_only
from test import unittest
from mock import patch
from diamond.collector import Collector
from kafkastat import KafkaCollector
##########
def run_only_if_ElementTree_is_available(func):
try:
from xml.etree import ElementTree
except ImportError:
ElementTree = None
def pred():
return ElementTree is not None
return run_only(func, pred)
class TestKafkaCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('KafkaCollector', {
'interval': 10
})
self.collector = KafkaCollector(config, None)
def _get_xml_fixture(self, name):
fixture = self.getFixture(name)
return ElementTree.fromstring(fixture.getvalue())
def test_import(self):
self.assertTrue(KafkaCollector)
@run_only_if_ElementTree_is_available
@patch('urllib2.urlopen')
def test_get(self, urlopen_mock):
urlopen_mock.return_value = self.getFixture('empty.xml')
result = self.collector._get('/path')
result_string = ElementTree.tostring(result)
self.assertEqual(result_string, '<Server />')
@run_only_if_ElementTree_is_available
@patch('urllib2.urlopen')
def test_get_httperror(self, urlopen_mock):
urlopen_mock.side_effect = urllib2.URLError('BOOM')
result = self.collector._get('/path')
self.assertFalse(result)
@run_only_if_ElementTree_is_available
@patch('urllib2.urlopen')
def test_get_bad_xml(self, urlopen_mock):
urlopen_mock.return_value = self.getFixture('bad.xml')
result = self.collector._get('/path')
self.assertFalse(result)
@run_only_if_ElementTree_is_available
@patch.object(KafkaCollector, '_get')
def test_get_mbeans(self, get_mock):
get_mock.return_value = self._get_xml_fixture('serverbydomain.xml')
expected_names = {'kafka:type=kafka.BrokerAllTopicStat',
'kafka:type=kafka.BrokerTopicStat.mytopic',
'kafka:type=kafka.LogFlushStats',
'kafka:type=kafka.SocketServerStats',
'kafka:type=kafka.logs.mytopic-0',
'kafka:type=kafka.logs.mytopic-1',
'kafka:type=kafka.Log4jController'}
found_beans = self.collector.get_mbeans('*')
self.assertEqual(found_beans, expected_names)
@run_only_if_ElementTree_is_available
@patch.object(KafkaCollector, '_get')
def test_get_mbeans_get_fail(self, get_mock):
get_mock.return_value = None
found_beans = self.collector.get_mbeans('*')
self.assertEqual(found_beans, None)
@run_only_if_ElementTree_is_available
@patch.object(KafkaCollector, '_get')
def test_query_mbean(self, get_mock):
get_mock.return_value = self._get_xml_fixture('mbean.xml')
expected_metrics = {
'kafka.logs.mytopic-1.CurrentOffset': long('213500615'),
'kafka.logs.mytopic-1.NumAppendedMessages': long('224634137'),
'kafka.logs.mytopic-1.NumberOfSegments': int('94'),
'kafka.logs.mytopic-1.Size': long('50143615339'),
}
metrics = self.collector.query_mbean('kafka:type=kafka.logs.mytopic-1')
self.assertEqual(metrics, expected_metrics)
@run_only_if_ElementTree_is_available
@patch.object(KafkaCollector, '_get')
def test_query_mbean_with_prefix(self, get_mock):
get_mock.return_value = self._get_xml_fixture('mbean.xml')
expected_metrics = {
'some.prefix.CurrentOffset': long('213500615'),
'some.prefix.NumAppendedMessages': long('224634137'),
'some.prefix.NumberOfSegments': int('94'),
'some.prefix.Size': long('50143615339'),
}
metrics = self.collector.query_mbean('kafka:type=kafka.logs.mytopic-0',
'some.prefix')
self.assertEqual(metrics, expected_metrics)
@run_only_if_ElementTree_is_available
@patch.object(KafkaCollector, '_get')
def test_activeController_value(self, get_mock):
get_mock.return_value = self._get_xml_fixture(
'activecontrollercount.xml')
expected_metrics = {
'KafkaController.ActiveControllerCount.Value': 1.0,
}
metrics = self.collector.query_mbean(
'kafka.controller:type=KafkaController,name=ActiveControllerCount')
self.assertEqual(metrics, expected_metrics)
@run_only_if_ElementTree_is_available
@patch.object(KafkaCollector, '_get')
def test_query_mbean_fail(self, get_mock):
get_mock.return_value = None
metrics = self.collector.query_mbean('kafka:type=kafka.logs.mytopic-0')
self.assertEqual(metrics, None)
def getKafkaFixture(self, url):
url_object = urlparse(url)
query_string = parse_qs(url_object.query)
querynames = query_string.get('querynames', [])
objectnames = query_string.get('objectname', [])
if url_object.path == '/serverbydomain':
if 'java.lang:type=GarbageCollector,name=*' in querynames:
return self.getFixture('serverbydomain_gc.xml')
elif '*kafka.controller:*' in querynames:
return self.getFixture('kafkacontroller.xml')
elif 'java.lang:type=Threading' in querynames:
return self.getFixture('serverbydomain_threading.xml')
else:
return self.getFixture('serverbydomain_logs_only.xml')
elif url_object.path == '/mbean':
if ('java.lang:type=GarbageCollector,name=PS MarkSweep'
in objectnames):
return self.getFixture('gc_marksweep.xml')
elif ('kafka.controller:type=KafkaController,' +
'name=ActiveControllerCount'
in objectnames):
return self.getFixture('activecontrollercount.xml')
elif ('java.lang:type=GarbageCollector,name=PS Scavenge'
in objectnames):
return self.getFixture('gc_scavenge.xml')
elif 'java.lang:type=Threading' in objectnames:
return self.getFixture('threading.xml')
else:
return self.getFixture('mbean.xml')
else:
return ''
@run_only_if_ElementTree_is_available
@patch('urllib2.urlopen')
@patch.object(Collector, 'publish')
def test(self, publish_mock, urlopen_mock):
urlopen_mock.side_effect = self.getKafkaFixture
self.collector.collect()
expected_metrics = {
'kafka.logs.mytopic-1.CurrentOffset': 213500615,
'kafka.logs.mytopic-1.NumAppendedMessages': 224634137,
'kafka.logs.mytopic-1.NumberOfSegments': 94,
'kafka.logs.mytopic-1.Size': 50143615339,
'Threading.CurrentThreadCpuTime': 0,
'Threading.CurrentThreadUserTime': 0,
'Threading.DaemonThreadCount': 58,
'Threading.PeakThreadCount': 90,
'Threading.ThreadCount': 89,
'Threading.TotalStartedThreadCount': 228,
'GarbageCollector.PSScavenge.CollectionCount': 37577,
'GarbageCollector.PSScavenge.CollectionTime': 112293,
'GarbageCollector.PSMarkSweep.CollectionCount': 2,
'GarbageCollector.PSMarkSweep.CollectionTime': 160,
}
self.assertPublishedMany(publish_mock, expected_metrics)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
import json
import logging
import posixpath
from absl import flags
from perfkitbenchmarker import errors
from perfkitbenchmarker import os_types
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
FLAGS = flags.FLAGS
flags.DEFINE_boolean('enterprise_redis_tune_on_startup', True,
'Whether to tune core config during startup.')
flags.DEFINE_integer('enterprise_redis_proxy_threads', 24,
'Number of redis proxy threads to use.')
flags.DEFINE_integer('enterprise_redis_shard_count', 6,
'Number of redis shard. Each shard is a redis thread.')
flags.DEFINE_integer('enterprise_redis_load_records', 1000000,
'Number of keys to pre-load into Redis.')
flags.DEFINE_integer('enterprise_redis_run_records', 1000000,
'Number of requests per loadgen client to send to the '
'Redis server.')
flags.DEFINE_integer('enterprise_redis_pipeline', 9,
'Number of pipelines to use.')
flags.DEFINE_integer('enterprise_redis_loadgen_clients', 24,
'Number of clients per loadgen vm.')
flags.DEFINE_integer('enterprise_redis_max_threads', 40,
'Maximum number of memtier threads to use.')
flags.DEFINE_integer('enterprise_redis_min_threads', 18,
'Minimum number of memtier threads to use.')
flags.DEFINE_integer('enterprise_redis_thread_increment', 1,
'Number of memtier threads to increment by.')
flags.DEFINE_integer('enterprise_redis_latency_threshold', 1100,
'The latency threshold in microseconds '
'until the test stops.')
flags.DEFINE_boolean('enterprise_redis_pin_workers', False,
'Whether to pin the proxy threads after startup.')
flags.DEFINE_list('enterprise_redis_disable_cpu_ids', None,
'List of cpus to disable by id.')
_PACKAGE_NAME = 'redis_enterprise'
_LICENSE = 'enterprise_redis_license'
_WORKING_DIR = '~/redislabs'
_RHEL_TAR = 'redislabs-5.4.2-24-rhel7-x86_64.tar'
_XENIAL_TAR = 'redislabs-5.4.2-24-xenial-amd64.tar'
_BIONIC_TAR = 'redislabs-5.4.2-24-bionic-amd64.tar'
_USERNAME = '[email protected]'
PREPROVISIONED_DATA = {
_RHEL_TAR:
'8db83074b3e4e6de9c249ce34b6bb899ed158a6a4801f36c530e79bdb97a4c20',
_XENIAL_TAR:
'ef2da8b5eaa02b53488570392df258c0d5d3890a9085c2495aeb5c96f336e639',
_BIONIC_TAR:
'ef0c58d6d11683aac07d3f2cae6b9544cb53064c9f7a7419d63b6d14cd858d53',
_LICENSE:
'd336e9fb8574519ab90a54155727c5c73dda122d906a617368bdfa6a32b03a42',
}
def _GetTarName():
"""Returns the Redis Enterprise package to use depending on the os.
For information about available packages, see
https://redislabs.com/redis-enterprise/software/downloads/.
"""
if FLAGS.os_type in [os_types.RHEL, os_types.AMAZONLINUX2, os_types.CENTOS7]:
return _RHEL_TAR
if FLAGS.os_type in [os_types.UBUNTU1604, os_types.DEBIAN, os_types.DEBIAN9]:
return _XENIAL_TAR
if FLAGS.os_type == os_types.UBUNTU1804:
return _BIONIC_TAR
def Install(vm):
"""Installs Redis Enterprise package on the VM."""
vm.InstallPackages('wget')
vm.InstallPreprovisionedPackageData(_PACKAGE_NAME,
[_GetTarName(), _LICENSE],
_WORKING_DIR)
vm.RemoteCommand('cd {dir} && sudo tar xvf {tar}'.format(
dir=_WORKING_DIR, tar=_GetTarName()))
if FLAGS.os_type == os_types.UBUNTU1804:
# Fix Ubuntu 18.04 DNS conflict
vm.RemoteCommand(
'echo "DNSStubListener=no" | sudo tee -a /etc/systemd/resolved.conf')
vm.RemoteCommand('sudo mv /etc/resolv.conf /etc/resolv.conf.orig')
vm.RemoteCommand(
'sudo ln -s /run/systemd/resolve/resolv.conf /etc/resolv.conf')
vm.RemoteCommand('sudo service systemd-resolved restart')
install_cmd = './install.sh -y'
if not FLAGS.enterprise_redis_tune_on_startup:
install_cmd = 'CONFIG_systune=no ./install.sh -y -n'
vm.RemoteCommand('cd {dir} && sudo {install}'.format(
dir=_WORKING_DIR, install=install_cmd))
def CreateCluster(vm):
"""Create an Redis Enterprise cluster on the vm."""
vm.RemoteCommand(
'sudo /opt/redislabs/bin/rladmin cluster create '
'license_file {license_file} '
'name redis-cluster '
'username {username} '
'password {password} '.format(
license_file=posixpath.join(_WORKING_DIR, _LICENSE),
username=_USERNAME,
password=FLAGS.run_uri))
def OfflineCores(vm):
"""Offline specific cores."""
for cpu_id in FLAGS.enterprise_redis_disable_cpu_ids or []:
vm.RemoteCommand('sudo bash -c '
'"echo 0 > /sys/devices/system/cpu/cpu%s/online"' % cpu_id)
def TuneProxy(vm):
"""Tune the number of Redis proxies on the server vm."""
vm.RemoteCommand(
'sudo /opt/redislabs/bin/rladmin tune '
'proxy all '
'max_threads {proxy_threads} '
'threads {proxy_threads} '.format(
proxy_threads=str(FLAGS.enterprise_redis_proxy_threads)))
vm.RemoteCommand('sudo /opt/redislabs/bin/dmc_ctl restart')
def PinWorkers(vm):
"""Splits the Redis worker threads across the NUMA nodes evenly.
This function is no-op if --enterprise_redis_pin_workers is not set.
Args:
vm: The VM with the Redis workers to pin.
"""
if not FLAGS.enterprise_redis_pin_workers:
return
numa_nodes = vm.CheckLsCpu().numa_node_count
proxies_per_node = FLAGS.enterprise_redis_proxy_threads // numa_nodes
for node in range(numa_nodes):
node_cpu_list = vm.RemoteCommand(
'cat /sys/devices/system/node/node%d/cpulist' % node)[0].strip()
# List the PIDs of the Redis worker processes and pin a sliding window of
# `proxies_per_node` workers to the NUMA nodes in increasing order.
vm.RemoteCommand(r'sudo /opt/redislabs/bin/dmc-cli -ts root list | '
r'grep worker | '
r'head -n -{proxies_already_partitioned} | '
r'tail -n {proxies_per_node} | '
r"awk '"
r'{{printf "%i\n",$3}}'
r"' | "
r'xargs -i sudo taskset -pc {node_cpu_list} {{}} '.format(
proxies_already_partitioned=proxies_per_node * node,
proxies_per_node=proxies_per_node,
node_cpu_list=node_cpu_list))
def SetUpCluster(vm, redis_port):
"""Set up the details of the cluster."""
content = {
'name': 'redisdb',
'memory_size': 10000000000,
'type': 'redis',
'proxy_policy': 'all-master-shards',
'port': redis_port,
'sharding': False,
'authentication_redis_pass': FLAGS.run_uri,
}
if FLAGS.enterprise_redis_shard_count > 1:
content.update({
'sharding': True,
'shards_count': FLAGS.enterprise_redis_shard_count,
'shards_placement': 'sparse',
'oss_cluster': True,
'shard_key_regex':
[{'regex': '.*\\{(?<tag>.*)\\}.*'}, {'regex': '(?<tag>.*)'}]
})
vm.RemoteCommand(
"curl -v -k -u {username}:{password} https://localhost:9443/v1/bdbs "
"-H 'Content-type: application/json' -d '{content}'".format(
username=_USERNAME,
password=FLAGS.run_uri,
content=json.dumps(content)))
@vm_util.Retry()
def WaitForClusterUp(vm, redis_port):
"""Waits for the Redis Enterprise cluster to respond to commands."""
stdout, _ = vm.RemoteCommand(
'/opt/redislabs/bin/redis-cli '
'-h localhost '
'-p {port} '
'-a {password} '
'ping'.format(
password=FLAGS.run_uri,
port=redis_port))
if stdout.find('PONG') == -1:
raise errors.Resource.RetryableCreationError()
def LoadCluster(vm, redis_port):
"""Load the cluster before performing tests."""
vm.RemoteCommand(
'/opt/redislabs/bin/memtier_benchmark '
'-s localhost '
'-a {password} '
'-p {port} '
'-t 1 ' # Set -t and -c to 1 to avoid duplicated work in writing the same
'-c 1 ' # key/value pairs repeatedly.
'--ratio 1:0 '
'--pipeline 100 '
'-d 100 '
'--key-pattern S:S '
'--key-minimum 1 '
'--key-maximum {load_records} '
'-n allkeys '
'--cluster-mode '.format(
password=FLAGS.run_uri,
port=str(redis_port),
load_records=str(FLAGS.enterprise_redis_load_records)))
def BuildRunCommand(redis_vm, threads, port):
"""Spawn a memtir_benchmark on the load_vm against the redis_vm:port.
Args:
redis_vm: The target of the memtier_benchmark
threads: The number of threads to run in this memtier_benchmark process.
port: the port to target on the redis_vm.
Returns:
Command to issue to the redis server.
"""
if threads == 0:
return None
return ('/opt/redislabs/bin/memtier_benchmark '
'-s {ip_address} '
'-a {password} '
'-p {port} '
'-t {threads} '
'--ratio 1:1 '
'--pipeline {pipeline} '
'-c {clients} '
'-d 100 '
'--key-minimum 1 '
'--key-maximum {key_maximum} '
'-n {run_records} '
'--cluster-mode '.format(
ip_address=redis_vm.internal_ip,
password=FLAGS.run_uri,
port=str(port),
threads=str(threads),
pipeline=str(FLAGS.enterprise_redis_pipeline),
clients=str(FLAGS.enterprise_redis_loadgen_clients),
key_maximum=str(FLAGS.enterprise_redis_load_records),
run_records=str(FLAGS.enterprise_redis_run_records)))
def Run(redis_vm, load_vms, redis_port):
"""Run memtier against enterprise redis and measure latency and throughput.
This function runs memtier against the redis server vm with increasing memtier
threads until one of following conditions is reached:
- FLAGS.enterprise_redis_max_threads is reached
- FLAGS.enterprise_redis_latency_threshold is reached
Args:
redis_vm: Redis server vm.
load_vms: Memtier load vms.
redis_port: Port for the redis server.
Returns:
A list of sample.Sample objects.
"""
results = []
cur_max_latency = 0.0
latency_threshold = FLAGS.enterprise_redis_latency_threshold
threads = FLAGS.enterprise_redis_min_threads
max_throughput_for_completion_latency_under_1ms = 0.0
while (cur_max_latency < latency_threshold
and threads <= FLAGS.enterprise_redis_max_threads):
load_command = BuildRunCommand(redis_vm, threads, redis_port)
# 1min for throughput to stabilize and 10sec of data.
measurement_command = (
'sleep 60 && curl -v -k -u {user}:{password} '
'https://localhost:9443/v1/bdbs/stats?interval=1sec > ~/output'.format(
user=_USERNAME,
password=FLAGS.run_uri,))
args = [((load_vm, load_command), {}) for load_vm in load_vms]
args += [((redis_vm, measurement_command), {})]
vm_util.RunThreaded(lambda vm, command: vm.RemoteCommand(command), args)
stdout, _ = redis_vm.RemoteCommand('cat ~/output')
output = json.loads(stdout)[0]
intervals = output.get('intervals')
for interval in intervals:
throughput = interval.get('total_req')
latency = interval.get('avg_latency')
cur_max_latency = max(cur_max_latency, latency)
sample_metadata = interval
sample_metadata['redis_tune_on_startup'] = (
FLAGS.enterprise_redis_tune_on_startup)
sample_metadata['redis_pipeline'] = (
FLAGS.enterprise_redis_pipeline)
sample_metadata['threads'] = threads
sample_metadata['shard_count'] = FLAGS.enterprise_redis_shard_count
sample_metadata['redis_proxy_threads'] = (
FLAGS.enterprise_redis_proxy_threads)
sample_metadata['redis_loadgen_clients'] = (
FLAGS.enterprise_redis_loadgen_clients)
sample_metadata['pin_workers'] = FLAGS.enterprise_redis_pin_workers
sample_metadata['disable_cpus'] = FLAGS.enterprise_redis_disable_cpu_ids
results.append(sample.Sample('throughput', throughput, 'ops/s',
sample_metadata))
if latency < 1000:
max_throughput_for_completion_latency_under_1ms = max(
max_throughput_for_completion_latency_under_1ms, throughput)
logging.info('Threads : %d (%f, %f) < %f', threads, throughput, latency,
latency_threshold)
threads += FLAGS.enterprise_redis_thread_increment
if cur_max_latency >= 1000:
results.append(sample.Sample(
'max_throughput_for_completion_latency_under_1ms',
max_throughput_for_completion_latency_under_1ms, 'ops/s',
sample_metadata))
return results
|
from base64 import b64decode
from homeassistant import config_entries
from homeassistant.const import CONF_HOST
import homeassistant.helpers.config_validation as cv
from .const import DOMAIN
def data_packet(value):
"""Decode a data packet given for a Broadlink remote."""
value = cv.string(value)
extra = len(value) % 4
if extra > 0:
value = value + ("=" * (4 - extra))
return b64decode(value)
def mac_address(mac):
"""Validate and convert a MAC address to bytes."""
mac = cv.string(mac)
if len(mac) == 17:
mac = "".join(mac[i : i + 2] for i in range(0, 17, 3))
elif len(mac) == 14:
mac = "".join(mac[i : i + 4] for i in range(0, 14, 5))
elif len(mac) != 12:
raise ValueError("Invalid MAC address")
return bytes.fromhex(mac)
def format_mac(mac):
"""Format a MAC address."""
return ":".join([format(octet, "02x") for octet in mac])
def import_device(hass, host):
"""Create a config flow for a device."""
configured_hosts = {
entry.data.get(CONF_HOST) for entry in hass.config_entries.async_entries(DOMAIN)
}
if host not in configured_hosts:
task = hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_IMPORT},
data={CONF_HOST: host},
)
hass.async_create_task(task)
|
from threading import Lock
from itertools import islice
from operator import itemgetter
__all__ = ('LamportClock', 'timetuple')
R_CLOCK = '_lamport(clock={0}, timestamp={1}, id={2} {3!r})'
class timetuple(tuple):
"""Tuple of event clock information.
Can be used as part of a heap to keep events ordered.
Arguments:
clock (int): Event clock value.
timestamp (float): Event UNIX timestamp value.
id (str): Event host id (e.g. ``hostname:pid``).
obj (Any): Optional obj to associate with this event.
"""
__slots__ = ()
def __new__(cls, clock, timestamp, id, obj=None):
return tuple.__new__(cls, (clock, timestamp, id, obj))
def __repr__(self):
return R_CLOCK.format(*self)
def __getnewargs__(self):
return tuple(self)
def __lt__(self, other):
# 0: clock 1: timestamp 3: process id
try:
A, B = self[0], other[0]
# uses logical clock value first
if A and B: # use logical clock if available
if A == B: # equal clocks use lower process id
return self[2] < other[2]
return A < B
return self[1] < other[1] # ... or use timestamp
except IndexError:
return NotImplemented
def __gt__(self, other):
return other < self
def __le__(self, other):
return not other < self
def __ge__(self, other):
return not self < other
clock = property(itemgetter(0))
timestamp = property(itemgetter(1))
id = property(itemgetter(2))
obj = property(itemgetter(3))
class LamportClock:
"""Lamport's logical clock.
From Wikipedia:
A Lamport logical clock is a monotonically incrementing software counter
maintained in each process. It follows some simple rules:
* A process increments its counter before each event in that process;
* When a process sends a message, it includes its counter value with
the message;
* On receiving a message, the receiver process sets its counter to be
greater than the maximum of its own value and the received value
before it considers the message received.
Conceptually, this logical clock can be thought of as a clock that only
has meaning in relation to messages moving between processes. When a
process receives a message, it resynchronizes its logical clock with
the sender.
See Also:
* `Lamport timestamps`_
* `Lamports distributed mutex`_
.. _`Lamport Timestamps`: https://en.wikipedia.org/wiki/Lamport_timestamps
.. _`Lamports distributed mutex`: https://bit.ly/p99ybE
*Usage*
When sending a message use :meth:`forward` to increment the clock,
when receiving a message use :meth:`adjust` to sync with
the time stamp of the incoming message.
"""
#: The clocks current value.
value = 0
def __init__(self, initial_value=0, Lock=Lock):
self.value = initial_value
self.mutex = Lock()
def adjust(self, other):
with self.mutex:
value = self.value = max(self.value, other) + 1
return value
def forward(self):
with self.mutex:
self.value += 1
return self.value
def sort_heap(self, h):
"""Sort heap of events.
List of tuples containing at least two elements, representing
an event, where the first element is the event's scalar clock value,
and the second element is the id of the process (usually
``"hostname:pid"``): ``sh([(clock, processid, ...?), (...)])``
The list must already be sorted, which is why we refer to it as a
heap.
The tuple will not be unpacked, so more than two elements can be
present.
Will return the latest event.
"""
if h[0][0] == h[1][0]:
same = []
for PN in zip(h, islice(h, 1, None)):
if PN[0][0] != PN[1][0]:
break # Prev and Next's clocks differ
same.append(PN[0])
# return first item sorted by process id
return sorted(same, key=lambda event: event[1])[0]
# clock values unique, return first item
return h[0]
def __str__(self):
return str(self.value)
def __repr__(self):
return f'<LamportClock: {self.value}>'
|
import numpy as np
import unittest
from chainer.dataset import DatasetMixin
from chainer import testing
from chainercv.utils import assert_is_label_dataset
class LabelDataset(DatasetMixin):
def __init__(self, color, *options):
self.color = color
self.options = options
def __len__(self):
return 10
def get_example(self, i):
if self.color:
img = np.random.randint(0, 256, size=(3, 48, 64))
else:
img = np.random.randint(0, 256, size=(1, 48, 64))
label = np.random.randint(0, 20, dtype='i')
return (img, label) + self.options
class InvalidSampleSizeDataset(LabelDataset):
def get_example(self, i):
img, label = super(
InvalidSampleSizeDataset, self).get_example(i)[:2]
return img
class InvalidImageDataset(LabelDataset):
def get_example(self, i):
img, label = super(InvalidImageDataset, self).get_example(i)[:2]
return img[0], label
class InvalidLabelDataset(LabelDataset):
def get_example(self, i):
img, label = super(InvalidLabelDataset, self).get_example(i)[:2]
label += 1000
return img, label
@testing.parameterize(*(
testing.product_dict(
[
{'dataset': LabelDataset, 'valid': True},
{'dataset': LabelDataset, 'valid': True,
'option': 'option'},
{'dataset': InvalidSampleSizeDataset, 'valid': False},
{'dataset': InvalidImageDataset, 'valid': False},
{'dataset': InvalidLabelDataset, 'valid': False}
],
[
{'color': False},
{'color': True}
]
)
))
class TestAssertIsLabelDataset(unittest.TestCase):
def test_assert_is_label_dataset(self):
if hasattr(self, 'option'):
dataset = self.dataset(self.color, self.option)
else:
dataset = self.dataset(self.color)
if self.valid:
assert_is_label_dataset(dataset, 20, color=self.color)
else:
with self.assertRaises(AssertionError):
assert_is_label_dataset(dataset, 20, color=self.color)
testing.run_module(__name__, __file__)
|
import logging
import os
import sys
import colorama
from ansible.module_utils.parsing.convert_bool import boolean as to_bool
def should_do_markup():
py_colors = os.environ.get('PY_COLORS', None)
if py_colors is not None:
return to_bool(py_colors, strict=False)
return sys.stdout.isatty() and os.environ.get('TERM') != 'dumb'
colorama.init(autoreset=True, strip=not should_do_markup())
SUCCESS = 100
OUT = 101
class LogFilter(object):
"""
A custom log filter which excludes log messages above the logged
level.
"""
def __init__(self, level):
self.__level = level
def filter(self, logRecord): # pragma: no cover
# https://docs.python.org/3/library/logging.html#logrecord-attributes
return logRecord.levelno <= self.__level
class CustomLogger(logging.getLoggerClass()):
"""
A custom logging class which adds additional methods to the logger. These
methods serve as syntactic sugar for formatting log messages.
"""
def __init__(self, name, level=logging.NOTSET):
super(logging.getLoggerClass(), self).__init__(name, level)
logging.addLevelName(SUCCESS, 'SUCCESS')
logging.addLevelName(OUT, 'OUT')
def success(self, msg, *args, **kwargs):
if self.isEnabledFor(SUCCESS):
self._log(SUCCESS, msg, args, **kwargs)
def out(self, msg, *args, **kwargs):
if self.isEnabledFor(OUT):
self._log(OUT, msg, args, **kwargs)
class TrailingNewlineFormatter(logging.Formatter):
"""
A custom logging formatter which removes additional newlines from messages.
"""
def format(self, record):
if record.msg:
record.msg = record.msg.rstrip()
return super(TrailingNewlineFormatter, self).format(record)
def get_logger(name=None):
"""
Build a logger with the given name and returns the logger.
:param name: The name for the logger. This is usually the module
name, ``__name__``.
:return: logger object
"""
logging.setLoggerClass(CustomLogger)
logger = logging.getLogger(name)
logger.setLevel(logging.DEBUG)
logger.addHandler(_get_info_handler())
logger.addHandler(_get_out_handler())
logger.addHandler(_get_warn_handler())
logger.addHandler(_get_error_handler())
logger.addHandler(_get_critical_handler())
logger.addHandler(_get_success_handler())
logger.propagate = False
return logger
def _get_info_handler():
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.INFO)
handler.addFilter(LogFilter(logging.INFO))
handler.setFormatter(
TrailingNewlineFormatter('--> {}'.format(cyan_text('%(message)s'))))
return handler
def _get_out_handler():
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(OUT)
handler.addFilter(LogFilter(OUT))
handler.setFormatter(TrailingNewlineFormatter(' %(message)s'))
return handler
def _get_warn_handler():
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(logging.WARN)
handler.addFilter(LogFilter(logging.WARN))
handler.setFormatter(TrailingNewlineFormatter(yellow_text('%(message)s')))
return handler
def _get_error_handler():
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.ERROR)
handler.addFilter(LogFilter(logging.ERROR))
handler.setFormatter(TrailingNewlineFormatter(red_text('%(message)s')))
return handler
def _get_critical_handler():
handler = logging.StreamHandler(sys.stderr)
handler.setLevel(logging.CRITICAL)
handler.addFilter(LogFilter(logging.CRITICAL))
handler.setFormatter(
TrailingNewlineFormatter(red_text('ERROR: %(message)s')))
return handler
def _get_success_handler():
handler = logging.StreamHandler(sys.stdout)
handler.setLevel(SUCCESS)
handler.addFilter(LogFilter(SUCCESS))
handler.setFormatter(TrailingNewlineFormatter(green_text('%(message)s')))
return handler
def red_text(msg):
return color_text(colorama.Fore.RED, msg)
def yellow_text(msg):
return color_text(colorama.Fore.YELLOW, msg)
def green_text(msg):
return color_text(colorama.Fore.GREEN, msg)
def cyan_text(msg):
return color_text(colorama.Fore.CYAN, msg)
def color_text(color, msg):
return '{}{}{}'.format(color, msg, colorama.Style.RESET_ALL)
|
import json
import sqlite3
from hypothesis import example, given, settings
from hypothesis.strategies import sets, integers
from coverage import env
from coverage.backward import byte_to_int
from coverage.numbits import (
nums_to_numbits, numbits_to_nums, numbits_union, numbits_intersection,
numbits_any_intersection, num_in_numbits, register_sqlite_functions,
)
from tests.coveragetest import CoverageTest
# Hypothesis-generated line number data
line_numbers = integers(min_value=1, max_value=9999)
line_number_sets = sets(line_numbers)
# When coverage-testing ourselves, hypothesis complains about a test being
# flaky because the first run exceeds the deadline (and fails), and the second
# run succeeds. Disable the deadline if we are coverage-testing.
default_settings = settings()
if env.METACOV:
default_settings = settings(default_settings, deadline=None)
def good_numbits(numbits):
"""Assert that numbits is good."""
# It shouldn't end with a zero byte, that should have been trimmed off.
assert (not numbits) or (byte_to_int(numbits[-1]) != 0)
class NumbitsOpTest(CoverageTest):
"""Tests of the numbits operations in numbits.py."""
run_in_temp_dir = False
@given(line_number_sets)
@settings(default_settings)
def test_conversion(self, nums):
numbits = nums_to_numbits(nums)
good_numbits(numbits)
nums2 = numbits_to_nums(numbits)
self.assertEqual(nums, set(nums2))
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_union(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
nbu = numbits_union(nb1, nb2)
good_numbits(nbu)
union = numbits_to_nums(nbu)
self.assertEqual(nums1 | nums2, set(union))
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_intersection(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
nbi = numbits_intersection(nb1, nb2)
good_numbits(nbi)
intersection = numbits_to_nums(nbi)
self.assertEqual(nums1 & nums2, set(intersection))
@given(line_number_sets, line_number_sets)
@settings(default_settings)
def test_any_intersection(self, nums1, nums2):
nb1 = nums_to_numbits(nums1)
good_numbits(nb1)
nb2 = nums_to_numbits(nums2)
good_numbits(nb2)
inter = numbits_any_intersection(nb1, nb2)
expect = bool(nums1 & nums2)
self.assertEqual(expect, bool(inter))
@given(line_numbers, line_number_sets)
@settings(default_settings)
@example(152, {144})
def test_num_in_numbits(self, num, nums):
numbits = nums_to_numbits(nums)
good_numbits(numbits)
is_in = num_in_numbits(num, numbits)
self.assertEqual(num in nums, is_in)
class NumbitsSqliteFunctionTest(CoverageTest):
"""Tests of the SQLite integration for numbits functions."""
run_in_temp_dir = False
def setUp(self):
super(NumbitsSqliteFunctionTest, self).setUp()
conn = sqlite3.connect(":memory:")
register_sqlite_functions(conn)
self.cursor = conn.cursor()
self.cursor.execute("create table data (id int, numbits blob)")
self.cursor.executemany(
"insert into data (id, numbits) values (?, ?)",
[
(i, nums_to_numbits(range(i, 100, i)))
for i in range(1, 11)
]
)
self.addCleanup(self.cursor.close)
def test_numbits_union(self):
res = self.cursor.execute(
"select numbits_union("
"(select numbits from data where id = 7),"
"(select numbits from data where id = 9)"
")"
)
answer = numbits_to_nums(list(res)[0][0])
self.assertEqual(
[7, 9, 14, 18, 21, 27, 28, 35, 36, 42, 45, 49,
54, 56, 63, 70, 72, 77, 81, 84, 90, 91, 98, 99],
answer
)
def test_numbits_intersection(self):
res = self.cursor.execute(
"select numbits_intersection("
"(select numbits from data where id = 7),"
"(select numbits from data where id = 9)"
")"
)
answer = numbits_to_nums(list(res)[0][0])
self.assertEqual([63], answer)
def test_numbits_any_intersection(self):
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([3, 4, 5]))
)
answer = [any_inter for (any_inter,) in res]
self.assertEqual([1], answer)
res = self.cursor.execute(
"select numbits_any_intersection(?, ?)",
(nums_to_numbits([1, 2, 3]), nums_to_numbits([7, 8, 9]))
)
answer = [any_inter for (any_inter,) in res]
self.assertEqual([0], answer)
def test_num_in_numbits(self):
res = self.cursor.execute("select id, num_in_numbits(12, numbits) from data order by id")
answer = [is_in for (id, is_in) in res]
self.assertEqual([1, 1, 1, 1, 0, 1, 0, 0, 0, 0], answer)
def test_numbits_to_nums(self):
res = self.cursor.execute("select numbits_to_nums(?)", [nums_to_numbits([1, 2, 3])])
self.assertEqual([1, 2, 3], json.loads(res.fetchone()[0]))
|
import pytest
from matchzoo import tasks
@pytest.mark.parametrize("task_type", [
tasks.Ranking, tasks.Classification
])
def test_task_listings(task_type):
assert task_type.list_available_losses()
assert task_type.list_available_metrics()
@pytest.mark.parametrize("arg", [None, -1, 0, 1])
def test_classification_instantiation_failure(arg):
with pytest.raises(Exception):
tasks.Classification(num_classes=arg)
@pytest.mark.parametrize("arg", [2, 10, 2048])
def test_classification_num_classes(arg):
task = tasks.Classification(num_classes=arg)
assert task.num_classes == arg
|
import os
import re
import subprocess
import diamond.collector
from diamond.collector import str_to_bool
class DiskTemperatureCollector(diamond.collector.Collector):
def process_config(self):
super(DiskTemperatureCollector, self).process_config()
self.devices = re.compile(self.config['devices'])
def get_default_config_help(self):
config_help = super(DiskTemperatureCollector,
self).get_default_config_help()
config_help.update({
'devices': "device regex to collect stats on",
'bin': 'The path to the hddtemp binary',
'use_sudo': 'Use sudo?',
'sudo_cmd': 'Path to sudo',
})
return config_help
def get_default_config(self):
"""
Returns default configuration options.
"""
config = super(DiskTemperatureCollector, self).get_default_config()
config.update({
'path': 'disktemp',
'bin': 'hddtemp',
'use_sudo': False,
'sudo_cmd': '/usr/bin/sudo',
'devices': '^disk[0-9]$|^sd[a-z]$|^hd[a-z]$'
})
return config
def get_temp(self, device):
command = [self.config['bin'], '-n', device]
if str_to_bool(self.config['use_sudo']):
command.insert(0, self.config['sudo_cmd'])
return subprocess.Popen(command, stdout=subprocess.PIPE)
def match_device(self, device, path):
m = self.devices.match(device)
if m:
key = device
# If the regex has a capture group for pretty printing, pick
# the last matched capture group
if self.devices.groups > 0:
key = '.'.join(filter(None, [g for g in m.groups()]))
return {key: self.get_temp(os.path.join('/dev', device))}
return {}
def collect(self):
"""
Collect and publish disk temperatures
"""
instances = {}
# Support disks such as /dev/(sd.*)
for device in os.listdir('/dev/'):
instances.update(self.match_device(device, '/dev/'))
# Support disk by id such as /dev/disk/by-id/wwn-(.*)
for device_id in os.listdir('/dev/disk/by-id/'):
instances.update(self.match_device(device, '/dev/disk/by-id/'))
metrics = {}
for device, p in instances.items():
output = p.communicate()[0].strip()
try:
metrics[device + ".Temperature"] = float(output)
except:
self.log.warn('Disk temperature retrieval failed on ' + device)
for metric in metrics.keys():
self.publish(metric, metrics[metric])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import json
import logging
import time
from absl import flags
from perfkitbenchmarker import data
from perfkitbenchmarker import relational_db
from perfkitbenchmarker.providers import gcp
from perfkitbenchmarker.providers.gcp import gce_network
from perfkitbenchmarker.providers.gcp import util
from six.moves import range
FLAGS = flags.FLAGS
GCP_DATABASE_VERSION_MAPPING = {
relational_db.MYSQL: {
'5.5': 'MYSQL_5_5',
'5.6': 'MYSQL_5_6',
'5.7': 'MYSQL_5_7'
},
relational_db.POSTGRES: {
'9.6': 'POSTGRES_9_6',
'10': 'POSTGRES_10',
'11': 'POSTGRES_11',
'12': 'POSTGRES_12'
},
relational_db.SQLSERVER: {
'2017_Standard': 'SQLSERVER_2017_Standard',
'2017_Enterprise': 'SQLSERVER_2017_ENTERPRISE',
'2017_Express': 'SQLSERVER_2017_EXPRESS',
'2017_Web': 'SQLSERVER_2017_WEB'
}
}
DEFAULT_MYSQL_VERSION = '5.7'
DEFAULT_POSTGRES_VERSION = '9.6'
DEFAULT_SQL_SERVER_VERSION = '2017_Standard'
DEFAULT_MYSQL_PORT = 3306
DEFAULT_POSTGRES_PORT = 5432
DEFAULT_SQLSERVER_PORT = 1433
DEFAULT_PORTS = {
relational_db.MYSQL: DEFAULT_MYSQL_PORT,
relational_db.POSTGRES: DEFAULT_POSTGRES_PORT,
relational_db.SQLSERVER: DEFAULT_SQLSERVER_PORT,
}
DEFAULT_ENGINE_VERSIONS = {
relational_db.MYSQL: DEFAULT_MYSQL_VERSION,
relational_db.POSTGRES: DEFAULT_POSTGRES_VERSION,
relational_db.SQLSERVER: DEFAULT_SQL_SERVER_VERSION,
}
# PostgreSQL restrictions on memory.
# Source: https://cloud.google.com/sql/docs/postgres/instance-settings.
CUSTOM_MACHINE_CPU_MEM_RATIO_LOWER_BOUND = 0.9
CUSTOM_MACHINE_CPU_MEM_RATIO_UPPER_BOUND = 6.5
MIN_CUSTOM_MACHINE_MEM_MB = 3840
IS_READY_TIMEOUT = 600 # 10 minutes
DELETE_INSTANCE_TIMEOUT = 600 # 10 minutes
CREATION_TIMEOUT = 1200 # 20 minutes
class UnsupportedDatabaseEngineException(Exception):
pass
class GCPRelationalDb(relational_db.BaseRelationalDb):
"""A GCP CloudSQL database resource.
This class contains logic required to provision and teardown the database.
Currently, the database will be open to the world (0.0.0.0/0) which is not
ideal; however, a password is still required to connect. Currently only
MySQL 5.7 and Postgres 9.6 are supported.
"""
CLOUD = gcp.CLOUD
def __init__(self, relational_db_spec):
super(GCPRelationalDb, self).__init__(relational_db_spec)
self.project = FLAGS.project or util.GetDefaultProject()
self.instance_id = 'pkb-db-instance-' + FLAGS.run_uri
self.unmanaged_db_exists = None if self.is_managed_db else False
def _GetAuthorizedNetworks(self, vms):
"""Get CIDR connections for list of VM specs that need to access the db."""
for vm in vms:
if not vm.HasIpAddress:
raise Exception('Client vm needs to be initialized before database can '
'discover authorized network.')
# create the CIDR of the client VM that is configured to access
# the database
return ','.join('{0}/32'.format(vm.ip_address) for vm in vms)
def _CreateGcloudSqlInstance(self):
storage_size = self.spec.db_disk_spec.disk_size
instance_zone = self.spec.db_spec.zone
authorized_network = self._GetAuthorizedNetworks([self.client_vm])
database_version_string = self._GetEngineVersionString(
self.spec.engine, self.spec.engine_version)
cmd_string = [
self,
'beta',
'sql',
'instances',
'create',
self.instance_id,
'--quiet',
'--format=json',
'--activation-policy=ALWAYS',
'--assign-ip',
'--authorized-networks=%s' % authorized_network,
'--zone=%s' % instance_zone,
'--database-version=%s' % database_version_string,
'--storage-size=%d' % storage_size,
'--labels=%s' % util.MakeFormattedDefaultTags(),
]
if self.spec.engine == relational_db.MYSQL:
cmd_string.append('--enable-bin-log')
if self.spec.engine == relational_db.SQLSERVER:
# `--root-password` is required when creating SQL Server instances.
cmd_string.append('--root-password={0}'.format(
self.spec.database_password))
if (self.spec.db_spec.cpus and self.spec.db_spec.memory):
self._ValidateSpec()
memory = self.spec.db_spec.memory
cpus = self.spec.db_spec.cpus
self._ValidateMachineType(memory, cpus)
cmd_string.append('--cpu={}'.format(cpus))
cmd_string.append('--memory={}MiB'.format(memory))
elif hasattr(self.spec.db_spec, 'machine_type'):
machine_type_flag = '--tier=%s' % self.spec.db_spec.machine_type
cmd_string.append(machine_type_flag)
else:
raise Exception('Unspecified machine type')
if self.spec.high_availability:
cmd_string.append(self._GetHighAvailabilityFlag())
if self.spec.backup_enabled:
cmd_string.append('--backup')
cmd_string.append('--backup-start-time={}'.format(
self.spec.backup_start_time))
else:
cmd_string.append('--no-backup')
cmd = util.GcloudCommand(*cmd_string)
cmd.flags['project'] = self.project
_, stderr, retcode = cmd.Issue(timeout=CREATION_TIMEOUT)
util.CheckGcloudResponseKnownFailures(stderr, retcode)
if FLAGS.mysql_flags:
cmd_string = [
self, 'sql', 'instances', 'patch', self.instance_id,
'--database-flags=%s' % ','.join(FLAGS.mysql_flags)
]
cmd = util.GcloudCommand(*cmd_string)
_, stderr, _ = cmd.Issue()
if stderr:
raise Exception('Invalid MySQL flags: %s' % stderr)
def _Create(self):
"""Creates the Cloud SQL instance and authorizes traffic from anywhere.
Raises:
UnsupportedDatabaseEngineException:
if the database is unmanaged and the engine isn't MYSQL.
Exception: if an invalid MySQL flag was used.
"""
if self.spec.engine == relational_db.MYSQL:
self._InstallMySQLClient()
if self.is_managed_db:
self._CreateGcloudSqlInstance()
else:
self.endpoint = self.server_vm.ip_address
if self.spec.engine == relational_db.MYSQL:
self._InstallMySQLServer()
else:
raise UnsupportedDatabaseEngineException(
'Engine {0} not supported for unmanaged databases.'.format(
self.spec.engine))
self.firewall = gce_network.GceFirewall()
self.firewall.AllowPort(
self.server_vm, 3306, source_range=[self.client_vm.ip_address])
self.unmanaged_db_exists = True
self._ApplyMySqlFlags()
def _GetHighAvailabilityFlag(self):
"""Returns a flag that enables high-availability for the specified engine.
Returns:
Flag (as string) to be appended to the gcloud sql create command.
Raises:
UnsupportedDatabaseEngineException:
if engine does not support high availability.
"""
if self.spec.engine == relational_db.MYSQL:
self.replica_instance_id = 'replica-' + self.instance_id
return '--failover-replica-name=' + self.replica_instance_id
elif (self.spec.engine == relational_db.POSTGRES or
self.spec.engine == relational_db.SQLSERVER):
return '--availability-type=REGIONAL'
else:
raise UnsupportedDatabaseEngineException(
'High availability not supported on engine {0}'.format(
self.spec.engine))
def _ValidateSpec(self):
"""Validates PostgreSQL spec for CPU and memory.
Raises:
data.ResourceNotFound: On missing memory or cpus in postgres benchmark
config.
"""
if not hasattr(self.spec.db_spec, 'cpus') or not self.spec.db_spec.cpus:
raise data.ResourceNotFound(
'Must specify cpu count in benchmark config. See https://'
'cloud.google.com/sql/docs/postgres/instance-settings for more '
'details about size restrictions.')
if not hasattr(self.spec.db_spec, 'memory') or not self.spec.db_spec.memory:
raise data.ResourceNotFound(
'Must specify a memory amount in benchmark config. See https://'
'cloud.google.com/sql/docs/postgres/instance-settings for more '
'details about size restrictions.')
def _ValidateMachineType(self, memory, cpus):
"""Validates the custom machine type configuration.
Memory and CPU must be within the parameters described here:
https://cloud.google.com/sql/docs/postgres/instance-settings
Args:
memory: (int) in MiB
cpus: (int)
Raises:
ValueError on invalid configuration.
"""
if cpus not in [1] + list(range(2, 97, 2)):
raise ValueError(
'CPUs (%i) much be 1 or an even number in-between 2 and 96, '
'inclusive.' % cpus)
if memory % 256 != 0:
raise ValueError(
'Total memory (%dMiB) for a custom machine must be a multiple'
'of 256MiB.' % memory)
ratio = memory / 1024.0 / cpus
if (ratio < CUSTOM_MACHINE_CPU_MEM_RATIO_LOWER_BOUND or
ratio > CUSTOM_MACHINE_CPU_MEM_RATIO_UPPER_BOUND):
raise ValueError(
'The memory (%.2fGiB) per vCPU (%d) of a custom machine '
'type must be between %.2f GiB and %.2f GiB per vCPU, '
'inclusive.' %
(memory / 1024.0, cpus, CUSTOM_MACHINE_CPU_MEM_RATIO_LOWER_BOUND,
CUSTOM_MACHINE_CPU_MEM_RATIO_UPPER_BOUND))
if memory < MIN_CUSTOM_MACHINE_MEM_MB:
raise ValueError('The total memory (%dMiB) for a custom machine type'
'must be at least %dMiB.' %
(memory,
MIN_CUSTOM_MACHINE_MEM_MB))
def _Delete(self):
"""Deletes the underlying resource.
Implementations of this method should be idempotent since it may
be called multiple times, even if the resource has already been
deleted.
"""
if not self.is_managed_db:
if hasattr(self, 'firewall'):
self.firewall.DisallowAllPorts()
self.unmanaged_db_exists = False
self.server_vm.RemoteCommand('sudo cat /var/log/mysql/error.log')
self.server_vm.RemoteCommand(
'mysql %s -e "SHOW GLOBAL STATUS LIKE \'Aborted_connects\';"' %
self.MakeMysqlConnectionString(use_localhost=True))
self.server_vm.RemoteCommand(
'mysql %s -e "SHOW GLOBAL STATUS LIKE \'Aborted_clients\';"' %
self.MakeMysqlConnectionString(use_localhost=True))
return
if hasattr(self, 'replica_instance_id'):
cmd = util.GcloudCommand(self, 'sql', 'instances', 'delete',
self.replica_instance_id, '--quiet')
cmd.Issue(raise_on_failure=False, timeout=DELETE_INSTANCE_TIMEOUT)
cmd = util.GcloudCommand(self, 'sql', 'instances', 'delete',
self.instance_id, '--quiet', '--async')
cmd.Issue(raise_on_failure=False, timeout=DELETE_INSTANCE_TIMEOUT)
def _Exists(self):
"""Returns true if the underlying resource exists.
Supplying this method is optional. If it is not implemented then the
default is to assume success when _Create and _Delete do not raise
exceptions.
"""
if not self.is_managed_db:
return self.unmanaged_db_exists
cmd = util.GcloudCommand(self, 'sql', 'instances', 'describe',
self.instance_id)
stdout, _, _ = cmd.Issue(raise_on_failure=False)
try:
json_output = json.loads(stdout)
return json_output['kind'] == 'sql#instance'
except:
return False
def _IsDBInstanceReady(self, instance_id, timeout=IS_READY_TIMEOUT):
cmd = util.GcloudCommand(self, 'sql', 'instances', 'describe',
instance_id)
start_time = datetime.datetime.now()
while True:
if (datetime.datetime.now() - start_time).seconds > timeout:
logging.exception('Timeout waiting for sql instance to be ready')
return False
stdout, _, _ = cmd.Issue(suppress_warning=True, raise_on_failure=False)
try:
json_output = json.loads(stdout)
state = json_output['state']
logging.info('Instance %s state: %s', instance_id, state)
if state == 'RUNNABLE':
break
except:
logging.exception('Error attempting to read stdout. Creation failure.')
return False
time.sleep(5)
return True
def _IsReady(self, timeout=IS_READY_TIMEOUT):
"""Return true if the underlying resource is ready.
Supplying this method is optional. Use it when a resource can exist
without being ready. If the subclass does not implement
it then it just returns true.
Args:
timeout: how long to wait when checking if the DB is ready.
Returns:
True if the resource was ready in time, False if the wait timed out.
"""
if not self.is_managed_db:
return self._IsReadyUnmanaged()
if not self._IsDBInstanceReady(self.instance_id, timeout):
return False
if self.spec.high_availability and hasattr(self, 'replica_instance_id'):
if not self._IsDBInstanceReady(self.replica_instance_id, timeout):
return False
cmd = util.GcloudCommand(
self, 'sql', 'instances', 'describe', self.instance_id)
stdout, _, _ = cmd.Issue()
json_output = json.loads(stdout)
self.endpoint = self._ParseEndpoint(json_output)
self.port = self._GetDefaultPort(self.spec.engine)
return True
def _ParseEndpoint(self, describe_instance_json):
"""Returns the IP of the resource given the metadata as JSON.
Args:
describe_instance_json: JSON output.
Returns:
public IP address (string)
"""
if describe_instance_json is None:
return ''
try:
selflink = describe_instance_json['ipAddresses'][0]['ipAddress']
except:
selflink = ''
logging.exception('Error attempting to read stdout. Creation failure.')
return selflink
def _PostCreate(self):
"""Creates the PKB user and sets the password.
"""
if not self.is_managed_db:
return
# The hostname '%' means unrestricted access from any host.
cmd = util.GcloudCommand(
self, 'sql', 'users', 'create', self.spec.database_username,
'--host=%', '--instance={0}'.format(self.instance_id),
'--password={0}'.format(self.spec.database_password))
_, _, _ = cmd.Issue()
# this is a fix for b/71594701
# by default the empty password on 'postgres'
# is a security violation. Change the password to a non-default value.
if self.spec.engine == relational_db.POSTGRES:
cmd = util.GcloudCommand(
self, 'sql', 'users', 'set-password', 'postgres',
'--host=dummy_host', '--instance={0}'.format(self.instance_id),
'--password={0}'.format(self.spec.database_password))
_, _, _ = cmd.Issue()
@staticmethod
def GetDefaultEngineVersion(engine):
"""Returns the default version of a given database engine.
Args:
engine (string): type of database (my_sql or postgres).
Returns:
(string): Default version for the given database engine.
"""
if engine not in DEFAULT_ENGINE_VERSIONS:
raise NotImplementedError('Default engine not specified for '
'engine {0}'.format(engine))
return DEFAULT_ENGINE_VERSIONS[engine]
@staticmethod
def _GetEngineVersionString(engine, version):
"""Returns CloudSQL-specific version string for givin database engine.
Args:
engine: database engine
version: engine version
Returns:
(string): CloudSQL-specific name for requested engine and version.
Raises:
NotImplementedError on invalid engine / version combination.
"""
if engine not in GCP_DATABASE_VERSION_MAPPING:
valid_databases = ', '.join(GCP_DATABASE_VERSION_MAPPING.keys())
raise NotImplementedError(
'Database {0} is not supported,supported '
'databases include {1}'.format(engine, valid_databases))
version_mapping = GCP_DATABASE_VERSION_MAPPING[engine]
if version not in version_mapping:
valid_versions = ', '.join(version_mapping.keys())
raise NotImplementedError(
'Version {0} is not supported,supported '
'versions include {1}'.format(version, valid_versions))
return version_mapping[version]
@staticmethod
def _GetDefaultPort(engine):
"""Returns default port for the db engine from the spec."""
if engine not in DEFAULT_PORTS:
raise NotImplementedError('Default port not specified for '
'engine {0}'.format(engine))
return DEFAULT_PORTS[engine]
def _FailoverHA(self):
"""Fail over from master to replica."""
cmd_string = [
self,
'sql',
'instances',
'failover',
self.instance_id,
]
cmd = util.GcloudCommand(*cmd_string)
cmd.flags['project'] = self.project
# this command doesnt support the specifier: 'format'
del cmd.flags['format']
cmd.IssueRetryable()
|
from typing import Callable, List, Optional
from pyisy.constants import (
CMD_CLIMATE_FAN_SETTING,
CMD_CLIMATE_MODE,
PROP_HEAT_COOL_STATE,
PROP_HUMIDITY,
PROP_SETPOINT_COOL,
PROP_SETPOINT_HEAT,
PROP_UOM,
PROTO_INSTEON,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
DOMAIN as CLIMATE,
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
ATTR_TEMPERATURE,
PRECISION_TENTHS,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
_LOGGER,
DOMAIN as ISY994_DOMAIN,
HA_FAN_TO_ISY,
HA_HVAC_TO_ISY,
ISY994_NODES,
ISY_HVAC_MODES,
UOM_FAN_MODES,
UOM_HVAC_ACTIONS,
UOM_HVAC_MODE_GENERIC,
UOM_HVAC_MODE_INSTEON,
UOM_ISY_CELSIUS,
UOM_ISY_FAHRENHEIT,
UOM_ISYV4_NONE,
UOM_TO_STATES,
)
from .entity import ISYNodeEntity
from .helpers import convert_isy_value_to_hass, migrate_old_unique_ids
ISY_SUPPORTED_FEATURES = (
SUPPORT_FAN_MODE | SUPPORT_TARGET_TEMPERATURE | SUPPORT_TARGET_TEMPERATURE_RANGE
)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[list], None],
) -> bool:
"""Set up the ISY994 thermostat platform."""
entities = []
hass_isy_data = hass.data[ISY994_DOMAIN][entry.entry_id]
for node in hass_isy_data[ISY994_NODES][CLIMATE]:
entities.append(ISYThermostatEntity(node))
await migrate_old_unique_ids(hass, CLIMATE, entities)
async_add_entities(entities)
class ISYThermostatEntity(ISYNodeEntity, ClimateEntity):
"""Representation of an ISY994 thermostat entity."""
def __init__(self, node) -> None:
"""Initialize the ISY Thermostat entity."""
super().__init__(node)
self._node = node
self._uom = self._node.uom
if isinstance(self._uom, list):
self._uom = self._node.uom[0]
self._hvac_action = None
self._hvac_mode = None
self._fan_mode = None
self._temp_unit = None
self._current_humidity = 0
self._target_temp_low = 0
self._target_temp_high = 0
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return ISY_SUPPORTED_FEATURES
@property
def precision(self) -> str:
"""Return the precision of the system."""
return PRECISION_TENTHS
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
uom = self._node.aux_properties.get(PROP_UOM)
if not uom:
return self.hass.config.units.temperature_unit
if uom.value == UOM_ISY_CELSIUS:
return TEMP_CELSIUS
if uom.value == UOM_ISY_FAHRENHEIT:
return TEMP_FAHRENHEIT
@property
def current_humidity(self) -> Optional[int]:
"""Return the current humidity."""
humidity = self._node.aux_properties.get(PROP_HUMIDITY)
if not humidity:
return None
return int(humidity.value)
@property
def hvac_mode(self) -> Optional[str]:
"""Return hvac operation ie. heat, cool mode."""
hvac_mode = self._node.aux_properties.get(CMD_CLIMATE_MODE)
if not hvac_mode:
return None
# Which state values used depends on the mode property's UOM:
uom = hvac_mode.uom
# Handle special case for ISYv4 Firmware:
if uom in (UOM_ISYV4_NONE, ""):
uom = (
UOM_HVAC_MODE_INSTEON
if self._node.protocol == PROTO_INSTEON
else UOM_HVAC_MODE_GENERIC
)
return UOM_TO_STATES[uom].get(hvac_mode.value)
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return ISY_HVAC_MODES
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported."""
hvac_action = self._node.aux_properties.get(PROP_HEAT_COOL_STATE)
if not hvac_action:
return None
return UOM_TO_STATES[UOM_HVAC_ACTIONS].get(hvac_action.value)
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return convert_isy_value_to_hass(
self._node.status, self._uom, self._node.prec, 1
)
@property
def target_temperature_step(self) -> Optional[float]:
"""Return the supported step of target temperature."""
return 1.0
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_COOL:
return self.target_temperature_high
if self.hvac_mode == HVAC_MODE_HEAT:
return self.target_temperature_low
return None
@property
def target_temperature_high(self) -> Optional[float]:
"""Return the highbound target temperature we try to reach."""
target = self._node.aux_properties.get(PROP_SETPOINT_COOL)
if not target:
return None
return convert_isy_value_to_hass(target.value, target.uom, target.prec, 1)
@property
def target_temperature_low(self) -> Optional[float]:
"""Return the lowbound target temperature we try to reach."""
target = self._node.aux_properties.get(PROP_SETPOINT_HEAT)
if not target:
return None
return convert_isy_value_to_hass(target.value, target.uom, target.prec, 1)
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return [FAN_AUTO, FAN_ON]
@property
def fan_mode(self) -> str:
"""Return the current fan mode ie. auto, on."""
fan_mode = self._node.aux_properties.get(CMD_CLIMATE_FAN_SETTING)
if not fan_mode:
return None
return UOM_TO_STATES[UOM_FAN_MODES].get(fan_mode.value)
def set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
target_temp = kwargs.get(ATTR_TEMPERATURE)
target_temp_low = kwargs.get(ATTR_TARGET_TEMP_LOW)
target_temp_high = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if target_temp is not None:
if self.hvac_mode == HVAC_MODE_COOL:
target_temp_high = target_temp
if self.hvac_mode == HVAC_MODE_HEAT:
target_temp_low = target_temp
if target_temp_low is not None:
self._node.set_climate_setpoint_heat(int(target_temp_low))
# Presumptive setting--event stream will correct if cmd fails:
self._target_temp_low = target_temp_low
if target_temp_high is not None:
self._node.set_climate_setpoint_cool(int(target_temp_high))
# Presumptive setting--event stream will correct if cmd fails:
self._target_temp_high = target_temp_high
self.schedule_update_ha_state()
def set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
_LOGGER.debug("Requested fan mode %s", fan_mode)
self._node.set_fan_mode(HA_FAN_TO_ISY.get(fan_mode))
# Presumptive setting--event stream will correct if cmd fails:
self._fan_mode = fan_mode
self.schedule_update_ha_state()
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
_LOGGER.debug("Requested operation mode %s", hvac_mode)
self._node.set_climate_mode(HA_HVAC_TO_ISY.get(hvac_mode))
# Presumptive setting--event stream will correct if cmd fails:
self._hvac_mode = hvac_mode
self.schedule_update_ha_state()
|
import subprocess
import tempfile
from base64 import b64encode
from django.http.request import HttpRequest
from django.urls import reverse
from weblate.gitexport.models import get_export_url
from weblate.gitexport.views import authenticate
from weblate.trans.models import Project
from weblate.trans.tests.test_models import BaseLiveServerTestCase
from weblate.trans.tests.test_views import ViewTestCase
from weblate.trans.tests.utils import RepoTestMixin, create_test_user
from weblate.utils.files import remove_tree
class GitExportTest(ViewTestCase):
def setUp(self):
super().setUp()
# We don't want standard Django authentication
self.client.logout()
def get_auth_string(self, code):
encoded = b64encode(f"{self.user.username}:{code}".encode())
return "basic " + encoded.decode("ascii")
def test_authenticate_invalid(self):
request = HttpRequest()
self.assertFalse(authenticate(request, "foo"))
def test_authenticate_missing(self):
request = HttpRequest()
self.assertFalse(authenticate(request, "basic "))
def test_authenticate_basic_invalid(self):
request = HttpRequest()
self.assertFalse(authenticate(request, "basic fdsafds"))
def test_authenticate_digest(self):
request = HttpRequest()
self.assertFalse(authenticate(request, "digest fdsafds"))
def test_authenticate_wrong(self):
request = HttpRequest()
self.assertFalse(authenticate(request, self.get_auth_string("invalid")))
def test_authenticate_basic(self):
request = HttpRequest()
self.assertTrue(
authenticate(request, self.get_auth_string(self.user.auth_token.key))
)
def test_authenticate_inactive(self):
self.user.is_active = False
self.user.save()
request = HttpRequest()
self.assertFalse(
authenticate(request, self.get_auth_string(self.user.auth_token.key))
)
def get_git_url(self, path, component=None):
kwargs = {"path": ""}
if component is None:
component = self.kw_component
kwargs.update(component)
return reverse("git-export", kwargs=kwargs) + path
def test_git_root(self):
response = self.client.get(self.get_git_url(""))
self.assertEqual(302, response.status_code)
def test_git_info(self):
response = self.client.get(self.get_git_url("info"), follow=True)
self.assertEqual(404, response.status_code)
def git_receive(self, **kwargs):
return self.client.get(
self.get_git_url("info/refs"),
QUERY_STRING="?service=git-upload-pack",
CONTENT_TYPE="application/x-git-upload-pack-advertisement",
**kwargs,
)
def test_redirect_link(self):
linked = self.create_link_existing()
response = self.client.get(
self.get_git_url("info/refs", component=linked.get_reverse_url_kwargs()),
QUERY_STRING="?service=git-upload-pack",
CONTENT_TYPE="application/x-git-upload-pack-advertisement",
)
self.assertRedirects(
response,
"/git/test/test/info/refs??service=git-upload-pack",
status_code=301,
)
def test_reject_push(self):
response = self.client.get(
self.get_git_url("info/refs"), {"service": "git-receive-pack"}
)
self.assertEqual(403, response.status_code)
def test_wrong_auth(self):
response = self.git_receive(HTTP_AUTHORIZATION="foo")
self.assertEqual(401, response.status_code)
def test_git_receive(self):
response = self.git_receive()
self.assertContains(response, "refs/heads/master")
def enable_acl(self):
self.project.access_control = Project.ACCESS_PRIVATE
self.project.save()
def test_git_receive_acl_denied(self):
self.enable_acl()
response = self.git_receive()
self.assertEqual(401, response.status_code)
def test_git_receive_acl_auth(self):
self.enable_acl()
self.project.add_user(self.user, "@VCS")
response = self.git_receive(
HTTP_AUTHORIZATION=self.get_auth_string(self.user.auth_token.key)
)
self.assertContains(response, "refs/heads/master")
def test_git_receive_acl_auth_denied(self):
self.enable_acl()
response = self.git_receive(
HTTP_AUTHORIZATION=self.get_auth_string(self.user.auth_token.key)
)
self.assertEqual(404, response.status_code)
def test_get_export_url(self):
self.assertEqual(
"http://example.com/git/test/test/", get_export_url(self.component)
)
class GitCloneTest(BaseLiveServerTestCase, RepoTestMixin):
"""Integration tests using git to clone the repo."""
acl = True
def setUp(self):
super().setUp()
self.clone_test_repos()
self.component = self.create_component()
self.component.project.access_control = Project.ACCESS_PRIVATE
self.component.project.save()
self.user = create_test_user()
def test_clone(self):
testdir = tempfile.mkdtemp()
if self.acl:
self.component.project.add_user(self.user, "@VCS")
try:
url = (
get_export_url(self.component)
.replace("http://example.com", self.live_server_url)
.replace(
"http://",
f"http://{self.user.username}:{self.user.auth_token.key}@",
)
)
process = subprocess.Popen(
["git", "clone", url],
cwd=testdir,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
stdin=subprocess.PIPE,
)
output = process.communicate()[0]
retcode = process.poll()
finally:
remove_tree(testdir)
check = self.assertEqual if self.acl else self.assertNotEqual
check(retcode, 0, f"Failed: {output}")
class GitCloneFailTest(GitCloneTest):
acl = False
|
from statistics import mean
from homeassistant.components.weather import (
ATTR_FORECAST_CONDITION,
ATTR_FORECAST_PRECIPITATION,
ATTR_FORECAST_PRECIPITATION_PROBABILITY,
ATTR_FORECAST_TEMP,
ATTR_FORECAST_TEMP_LOW,
ATTR_FORECAST_TIME,
ATTR_FORECAST_WIND_BEARING,
ATTR_FORECAST_WIND_SPEED,
WeatherEntity,
)
from homeassistant.const import CONF_NAME, TEMP_CELSIUS, TEMP_FAHRENHEIT
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from homeassistant.util.dt import utc_from_timestamp
from .const import (
ATTR_FORECAST,
ATTRIBUTION,
CONDITION_CLASSES,
COORDINATOR,
DOMAIN,
MANUFACTURER,
NAME,
)
PARALLEL_UPDATES = 1
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Add a AccuWeather weather entity from a config_entry."""
name = config_entry.data[CONF_NAME]
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
async_add_entities([AccuWeatherEntity(name, coordinator)], False)
class AccuWeatherEntity(CoordinatorEntity, WeatherEntity):
"""Define an AccuWeather entity."""
def __init__(self, name, coordinator):
"""Initialize."""
super().__init__(coordinator)
self._name = name
self._attrs = {}
self._unit_system = "Metric" if self.coordinator.is_metric else "Imperial"
@property
def name(self):
"""Return the name."""
return self._name
@property
def attribution(self):
"""Return the attribution."""
return ATTRIBUTION
@property
def unique_id(self):
"""Return a unique_id for this entity."""
return self.coordinator.location_key
@property
def device_info(self):
"""Return the device info."""
return {
"identifiers": {(DOMAIN, self.coordinator.location_key)},
"name": NAME,
"manufacturer": MANUFACTURER,
"entry_type": "service",
}
@property
def condition(self):
"""Return the current condition."""
try:
return [
k
for k, v in CONDITION_CLASSES.items()
if self.coordinator.data["WeatherIcon"] in v
][0]
except IndexError:
return None
@property
def temperature(self):
"""Return the temperature."""
return self.coordinator.data["Temperature"][self._unit_system]["Value"]
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS if self.coordinator.is_metric else TEMP_FAHRENHEIT
@property
def pressure(self):
"""Return the pressure."""
return self.coordinator.data["Pressure"][self._unit_system]["Value"]
@property
def humidity(self):
"""Return the humidity."""
return self.coordinator.data["RelativeHumidity"]
@property
def wind_speed(self):
"""Return the wind speed."""
return self.coordinator.data["Wind"]["Speed"][self._unit_system]["Value"]
@property
def wind_bearing(self):
"""Return the wind bearing."""
return self.coordinator.data["Wind"]["Direction"]["Degrees"]
@property
def visibility(self):
"""Return the visibility."""
return self.coordinator.data["Visibility"][self._unit_system]["Value"]
@property
def ozone(self):
"""Return the ozone level."""
# We only have ozone data for certain locations and only in the forecast data.
if self.coordinator.forecast and self.coordinator.data[ATTR_FORECAST][0].get(
"Ozone"
):
return self.coordinator.data[ATTR_FORECAST][0]["Ozone"]["Value"]
return None
@property
def forecast(self):
"""Return the forecast array."""
if not self.coordinator.forecast:
return None
# remap keys from library to keys understood by the weather component
forecast = [
{
ATTR_FORECAST_TIME: utc_from_timestamp(item["EpochDate"]).isoformat(),
ATTR_FORECAST_TEMP: item["TemperatureMax"]["Value"],
ATTR_FORECAST_TEMP_LOW: item["TemperatureMin"]["Value"],
ATTR_FORECAST_PRECIPITATION: self._calc_precipitation(item),
ATTR_FORECAST_PRECIPITATION_PROBABILITY: round(
mean(
[
item["PrecipitationProbabilityDay"],
item["PrecipitationProbabilityNight"],
]
)
),
ATTR_FORECAST_WIND_SPEED: item["WindDay"]["Speed"]["Value"],
ATTR_FORECAST_WIND_BEARING: item["WindDay"]["Direction"]["Degrees"],
ATTR_FORECAST_CONDITION: [
k for k, v in CONDITION_CLASSES.items() if item["IconDay"] in v
][0],
}
for item in self.coordinator.data[ATTR_FORECAST]
]
return forecast
@staticmethod
def _calc_precipitation(day: dict) -> float:
"""Return sum of the precipitation."""
precip_sum = 0
precip_types = ["Rain", "Snow", "Ice"]
for precip in precip_types:
precip_sum = sum(
[
precip_sum,
day[f"{precip}Day"]["Value"],
day[f"{precip}Night"]["Value"],
]
)
return round(precip_sum, 1)
|
from itertools import count
from .common import maybe_declare
from .compression import compress
from .connection import maybe_channel, is_connection
from .entity import Exchange, Queue, maybe_delivery_mode
from .exceptions import ContentDisallowed
from .serialization import dumps, prepare_accept_content
from .utils.functional import ChannelPromise, maybe_list
__all__ = ('Exchange', 'Queue', 'Producer', 'Consumer')
class Producer:
"""Message Producer.
Arguments:
channel (kombu.Connection, ChannelT): Connection or channel.
exchange (kombu.entity.Exchange, str): Optional default exchange.
routing_key (str): Optional default routing key.
serializer (str): Default serializer. Default is `"json"`.
compression (str): Default compression method.
Default is no compression.
auto_declare (bool): Automatically declare the default exchange
at instantiation. Default is :const:`True`.
on_return (Callable): Callback to call for undeliverable messages,
when the `mandatory` or `immediate` arguments to
:meth:`publish` is used. This callback needs the following
signature: `(exception, exchange, routing_key, message)`.
Note that the producer needs to drain events to use this feature.
"""
#: Default exchange
exchange = None
#: Default routing key.
routing_key = ''
#: Default serializer to use. Default is JSON.
serializer = None
#: Default compression method. Disabled by default.
compression = None
#: By default, if a defualt exchange is set,
#: that exchange will be declare when publishing a message.
auto_declare = True
#: Basic return callback.
on_return = None
#: Set if channel argument was a Connection instance (using
#: default_channel).
__connection__ = None
def __init__(self, channel, exchange=None, routing_key=None,
serializer=None, auto_declare=None, compression=None,
on_return=None):
self._channel = channel
self.exchange = exchange
self.routing_key = routing_key or self.routing_key
self.serializer = serializer or self.serializer
self.compression = compression or self.compression
self.on_return = on_return or self.on_return
self._channel_promise = None
if self.exchange is None:
self.exchange = Exchange('')
if auto_declare is not None:
self.auto_declare = auto_declare
if self._channel:
self.revive(self._channel)
def __repr__(self):
return f'<Producer: {self._channel}>'
def __reduce__(self):
return self.__class__, self.__reduce_args__()
def __reduce_args__(self):
return (None, self.exchange, self.routing_key, self.serializer,
self.auto_declare, self.compression)
def declare(self):
"""Declare the exchange.
Note:
This happens automatically at instantiation when
the :attr:`auto_declare` flag is enabled.
"""
if self.exchange.name:
self.exchange.declare()
def maybe_declare(self, entity, retry=False, **retry_policy):
"""Declare exchange if not already declared during this session."""
if entity:
return maybe_declare(entity, self.channel, retry, **retry_policy)
def _delivery_details(self, exchange, delivery_mode=None,
maybe_delivery_mode=maybe_delivery_mode,
Exchange=Exchange):
if isinstance(exchange, Exchange):
return exchange.name, maybe_delivery_mode(
delivery_mode or exchange.delivery_mode,
)
# exchange is string, so inherit the delivery
# mode of our default exchange.
return exchange, maybe_delivery_mode(
delivery_mode or self.exchange.delivery_mode,
)
def publish(self, body, routing_key=None, delivery_mode=None,
mandatory=False, immediate=False, priority=0,
content_type=None, content_encoding=None, serializer=None,
headers=None, compression=None, exchange=None, retry=False,
retry_policy=None, declare=None, expiration=None, timeout=None,
**properties):
"""Publish message to the specified exchange.
Arguments:
body (Any): Message body.
routing_key (str): Message routing key.
delivery_mode (enum): See :attr:`delivery_mode`.
mandatory (bool): Currently not supported.
immediate (bool): Currently not supported.
priority (int): Message priority. A number between 0 and 9.
content_type (str): Content type. Default is auto-detect.
content_encoding (str): Content encoding. Default is auto-detect.
serializer (str): Serializer to use. Default is auto-detect.
compression (str): Compression method to use. Default is none.
headers (Dict): Mapping of arbitrary headers to pass along
with the message body.
exchange (kombu.entity.Exchange, str): Override the exchange.
Note that this exchange must have been declared.
declare (Sequence[EntityT]): Optional list of required entities
that must have been declared before publishing the message.
The entities will be declared using
:func:`~kombu.common.maybe_declare`.
retry (bool): Retry publishing, or declaring entities if the
connection is lost.
retry_policy (Dict): Retry configuration, this is the keywords
supported by :meth:`~kombu.Connection.ensure`.
expiration (float): A TTL in seconds can be specified per message.
Default is no expiration.
timeout (float): Set timeout to wait maximum timeout second
for message to publish.
**properties (Any): Additional message properties, see AMQP spec.
"""
_publish = self._publish
declare = [] if declare is None else declare
headers = {} if headers is None else headers
retry_policy = {} if retry_policy is None else retry_policy
routing_key = self.routing_key if routing_key is None else routing_key
compression = self.compression if compression is None else compression
exchange_name, properties['delivery_mode'] = self._delivery_details(
exchange or self.exchange, delivery_mode,
)
if expiration is not None:
properties['expiration'] = str(int(expiration * 1000))
body, content_type, content_encoding = self._prepare(
body, serializer, content_type, content_encoding,
compression, headers)
if self.auto_declare and self.exchange.name:
if self.exchange not in declare:
# XXX declare should be a Set.
declare.append(self.exchange)
if retry:
_publish = self.connection.ensure(self, _publish, **retry_policy)
return _publish(
body, priority, content_type, content_encoding,
headers, properties, routing_key, mandatory, immediate,
exchange_name, declare, timeout
)
def _publish(self, body, priority, content_type, content_encoding,
headers, properties, routing_key, mandatory,
immediate, exchange, declare, timeout=None):
channel = self.channel
message = channel.prepare_message(
body, priority, content_type,
content_encoding, headers, properties,
)
if declare:
maybe_declare = self.maybe_declare
[maybe_declare(entity) for entity in declare]
# handle autogenerated queue names for reply_to
reply_to = properties.get('reply_to')
if isinstance(reply_to, Queue):
properties['reply_to'] = reply_to.name
return channel.basic_publish(
message,
exchange=exchange, routing_key=routing_key,
mandatory=mandatory, immediate=immediate,
timeout=timeout
)
def _get_channel(self):
channel = self._channel
if isinstance(channel, ChannelPromise):
channel = self._channel = channel()
self.exchange.revive(channel)
if self.on_return:
channel.events['basic_return'].add(self.on_return)
return channel
def _set_channel(self, channel):
self._channel = channel
channel = property(_get_channel, _set_channel)
def revive(self, channel):
"""Revive the producer after connection loss."""
if is_connection(channel):
connection = channel
self.__connection__ = connection
channel = ChannelPromise(lambda: connection.default_channel)
if isinstance(channel, ChannelPromise):
self._channel = channel
self.exchange = self.exchange(channel)
else:
# Channel already concrete
self._channel = channel
if self.on_return:
self._channel.events['basic_return'].add(self.on_return)
self.exchange = self.exchange(channel)
def __enter__(self):
return self
def __exit__(self, *exc_info):
self.release()
def release(self):
pass
close = release
def _prepare(self, body, serializer=None, content_type=None,
content_encoding=None, compression=None, headers=None):
# No content_type? Then we're serializing the data internally.
if not content_type:
serializer = serializer or self.serializer
(content_type, content_encoding,
body) = dumps(body, serializer=serializer)
else:
# If the programmer doesn't want us to serialize,
# make sure content_encoding is set.
if isinstance(body, str):
if not content_encoding:
content_encoding = 'utf-8'
body = body.encode(content_encoding)
# If they passed in a string, we can't know anything
# about it. So assume it's binary data.
elif not content_encoding:
content_encoding = 'binary'
if compression:
body, headers['compression'] = compress(body, compression)
return body, content_type, content_encoding
@property
def connection(self):
try:
return self.__connection__ or self.channel.connection.client
except AttributeError:
pass
class Consumer:
"""Message consumer.
Arguments:
channel (kombu.Connection, ChannelT): see :attr:`channel`.
queues (Sequence[kombu.Queue]): see :attr:`queues`.
no_ack (bool): see :attr:`no_ack`.
auto_declare (bool): see :attr:`auto_declare`
callbacks (Sequence[Callable]): see :attr:`callbacks`.
on_message (Callable): See :attr:`on_message`
on_decode_error (Callable): see :attr:`on_decode_error`.
prefetch_count (int): see :attr:`prefetch_count`.
"""
ContentDisallowed = ContentDisallowed
#: The connection/channel to use for this consumer.
channel = None
#: A single :class:`~kombu.Queue`, or a list of queues to
#: consume from.
queues = None
#: Flag for automatic message acknowledgment.
#: If enabled the messages are automatically acknowledged by the
#: broker. This can increase performance but means that you
#: have no control of when the message is removed.
#:
#: Disabled by default.
no_ack = None
#: By default all entities will be declared at instantiation, if you
#: want to handle this manually you can set this to :const:`False`.
auto_declare = True
#: List of callbacks called in order when a message is received.
#:
#: The signature of the callbacks must take two arguments:
#: `(body, message)`, which is the decoded message body and
#: the :class:`~kombu.Message` instance.
callbacks = None
#: Optional function called whenever a message is received.
#:
#: When defined this function will be called instead of the
#: :meth:`receive` method, and :attr:`callbacks` will be disabled.
#:
#: So this can be used as an alternative to :attr:`callbacks` when
#: you don't want the body to be automatically decoded.
#: Note that the message will still be decompressed if the message
#: has the ``compression`` header set.
#:
#: The signature of the callback must take a single argument,
#: which is the :class:`~kombu.Message` object.
#:
#: Also note that the ``message.body`` attribute, which is the raw
#: contents of the message body, may in some cases be a read-only
#: :class:`buffer` object.
on_message = None
#: Callback called when a message can't be decoded.
#:
#: The signature of the callback must take two arguments: `(message,
#: exc)`, which is the message that can't be decoded and the exception
#: that occurred while trying to decode it.
on_decode_error = None
#: List of accepted content-types.
#:
#: An exception will be raised if the consumer receives
#: a message with an untrusted content type.
#: By default all content-types are accepted, but not if
#: :func:`kombu.disable_untrusted_serializers` was called,
#: in which case only json is allowed.
accept = None
#: Initial prefetch count
#:
#: If set, the consumer will set the prefetch_count QoS value at startup.
#: Can also be changed using :meth:`qos`.
prefetch_count = None
#: Mapping of queues we consume from.
_queues = None
_tags = count(1) # global
def __init__(self, channel, queues=None, no_ack=None, auto_declare=None,
callbacks=None, on_decode_error=None, on_message=None,
accept=None, prefetch_count=None, tag_prefix=None):
self.channel = channel
self.queues = maybe_list(queues or [])
self.no_ack = self.no_ack if no_ack is None else no_ack
self.callbacks = (self.callbacks or [] if callbacks is None
else callbacks)
self.on_message = on_message
self.tag_prefix = tag_prefix
self._active_tags = {}
if auto_declare is not None:
self.auto_declare = auto_declare
if on_decode_error is not None:
self.on_decode_error = on_decode_error
self.accept = prepare_accept_content(accept)
self.prefetch_count = prefetch_count
if self.channel:
self.revive(self.channel)
@property
def queues(self):
return list(self._queues.values())
@queues.setter
def queues(self, queues):
self._queues = {q.name: q for q in queues}
def revive(self, channel):
"""Revive consumer after connection loss."""
self._active_tags.clear()
channel = self.channel = maybe_channel(channel)
# modify dict size while iterating over it is not allowed
for qname, queue in list(self._queues.items()):
# name may have changed after declare
self._queues.pop(qname, None)
queue = self._queues[queue.name] = queue(self.channel)
queue.revive(channel)
if self.auto_declare:
self.declare()
if self.prefetch_count is not None:
self.qos(prefetch_count=self.prefetch_count)
def declare(self):
"""Declare queues, exchanges and bindings.
Note:
This is done automatically at instantiation
when :attr:`auto_declare` is set.
"""
for queue in self._queues.values():
queue.declare()
def register_callback(self, callback):
"""Register a new callback to be called when a message is received.
Note:
The signature of the callback needs to accept two arguments:
`(body, message)`, which is the decoded message body
and the :class:`~kombu.Message` instance.
"""
self.callbacks.append(callback)
def __enter__(self):
self.consume()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.channel and self.channel.connection:
conn_errors = self.channel.connection.client.connection_errors
if not isinstance(exc_val, conn_errors):
try:
self.cancel()
except Exception:
pass
def add_queue(self, queue):
"""Add a queue to the list of queues to consume from.
Note:
This will not start consuming from the queue,
for that you will have to call :meth:`consume` after.
"""
queue = queue(self.channel)
if self.auto_declare:
queue.declare()
self._queues[queue.name] = queue
return queue
def consume(self, no_ack=None):
"""Start consuming messages.
Can be called multiple times, but note that while it
will consume from new queues added since the last call,
it will not cancel consuming from removed queues (
use :meth:`cancel_by_queue`).
Arguments:
no_ack (bool): See :attr:`no_ack`.
"""
queues = list(self._queues.values())
if queues:
no_ack = self.no_ack if no_ack is None else no_ack
H, T = queues[:-1], queues[-1]
for queue in H:
self._basic_consume(queue, no_ack=no_ack, nowait=True)
self._basic_consume(T, no_ack=no_ack, nowait=False)
def cancel(self):
"""End all active queue consumers.
Note:
This does not affect already delivered messages, but it does
mean the server will not send any more messages for this consumer.
"""
cancel = self.channel.basic_cancel
for tag in self._active_tags.values():
cancel(tag)
self._active_tags.clear()
close = cancel
def cancel_by_queue(self, queue):
"""Cancel consumer by queue name."""
qname = queue.name if isinstance(queue, Queue) else queue
try:
tag = self._active_tags.pop(qname)
except KeyError:
pass
else:
self.channel.basic_cancel(tag)
finally:
self._queues.pop(qname, None)
def consuming_from(self, queue):
"""Return :const:`True` if currently consuming from queue'."""
name = queue
if isinstance(queue, Queue):
name = queue.name
return name in self._active_tags
def purge(self):
"""Purge messages from all queues.
Warning:
This will *delete all ready messages*, there is no undo operation.
"""
return sum(queue.purge() for queue in self._queues.values())
def flow(self, active):
"""Enable/disable flow from peer.
This is a simple flow-control mechanism that a peer can use
to avoid overflowing its queues or otherwise finding itself
receiving more messages than it can process.
The peer that receives a request to stop sending content
will finish sending the current content (if any), and then wait
until flow is reactivated.
"""
self.channel.flow(active)
def qos(self, prefetch_size=0, prefetch_count=0, apply_global=False):
"""Specify quality of service.
The client can request that messages should be sent in
advance so that when the client finishes processing a message,
the following message is already held locally, rather than needing
to be sent down the channel. Prefetching gives a performance
improvement.
The prefetch window is Ignored if the :attr:`no_ack` option is set.
Arguments:
prefetch_size (int): Specify the prefetch window in octets.
The server will send a message in advance if it is equal to
or smaller in size than the available prefetch size (and
also falls within other prefetch limits). May be set to zero,
meaning "no specific limit", although other prefetch limits
may still apply.
prefetch_count (int): Specify the prefetch window in terms of
whole messages.
apply_global (bool): Apply new settings globally on all channels.
"""
return self.channel.basic_qos(prefetch_size,
prefetch_count,
apply_global)
def recover(self, requeue=False):
"""Redeliver unacknowledged messages.
Asks the broker to redeliver all unacknowledged messages
on the specified channel.
Arguments:
requeue (bool): By default the messages will be redelivered
to the original recipient. With `requeue` set to true, the
server will attempt to requeue the message, potentially then
delivering it to an alternative subscriber.
"""
return self.channel.basic_recover(requeue=requeue)
def receive(self, body, message):
"""Method called when a message is received.
This dispatches to the registered :attr:`callbacks`.
Arguments:
body (Any): The decoded message body.
message (~kombu.Message): The message instance.
Raises:
NotImplementedError: If no consumer callbacks have been
registered.
"""
callbacks = self.callbacks
if not callbacks:
raise NotImplementedError('Consumer does not have any callbacks')
[callback(body, message) for callback in callbacks]
def _basic_consume(self, queue, consumer_tag=None,
no_ack=no_ack, nowait=True):
tag = self._active_tags.get(queue.name)
if tag is None:
tag = self._add_tag(queue, consumer_tag)
queue.consume(tag, self._receive_callback,
no_ack=no_ack, nowait=nowait)
return tag
def _add_tag(self, queue, consumer_tag=None):
tag = consumer_tag or '{}{}'.format(
self.tag_prefix, next(self._tags))
self._active_tags[queue.name] = tag
return tag
def _receive_callback(self, message):
accept = self.accept
on_m, channel, decoded = self.on_message, self.channel, None
try:
m2p = getattr(channel, 'message_to_python', None)
if m2p:
message = m2p(message)
if accept is not None:
message.accept = accept
if message.errors:
return message._reraise_error(self.on_decode_error)
decoded = None if on_m else message.decode()
except Exception as exc:
if not self.on_decode_error:
raise
self.on_decode_error(message, exc)
else:
return on_m(message) if on_m else self.receive(decoded, message)
def __repr__(self):
return '<{name}: {0.queues}>'.format(self, name=type(self).__name__)
@property
def connection(self):
try:
return self.channel.connection.client
except AttributeError:
pass
|
import logging
from aioasuswrt.asuswrt import AsusWrt
from homeassistant.const import DATA_GIGABYTES, DATA_RATE_MEGABITS_PER_SECOND
from homeassistant.helpers.entity import Entity
from . import DATA_ASUSWRT
_LOGGER = logging.getLogger(__name__)
UPLOAD_ICON = "mdi:upload-network"
DOWNLOAD_ICON = "mdi:download-network"
async def async_setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the asuswrt sensors."""
if discovery_info is None:
return
api = hass.data[DATA_ASUSWRT]
devices = []
if "devices" in discovery_info:
devices.append(AsuswrtDevicesSensor(api))
if "download" in discovery_info:
devices.append(AsuswrtTotalRXSensor(api))
if "upload" in discovery_info:
devices.append(AsuswrtTotalTXSensor(api))
if "download_speed" in discovery_info:
devices.append(AsuswrtRXSensor(api))
if "upload_speed" in discovery_info:
devices.append(AsuswrtTXSensor(api))
add_entities(devices)
class AsuswrtSensor(Entity):
"""Representation of a asuswrt sensor."""
_name = "generic"
def __init__(self, api: AsusWrt):
"""Initialize the sensor."""
self._api = api
self._state = None
self._devices = None
self._rates = None
self._speed = None
self._connect_error = False
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
async def async_update(self):
"""Fetch status from asuswrt."""
try:
self._devices = await self._api.async_get_connected_devices()
self._rates = await self._api.async_get_bytes_total()
self._speed = await self._api.async_get_current_transfer_rates()
if self._connect_error:
self._connect_error = False
_LOGGER.info("Reconnected to ASUS router for %s update", self.entity_id)
except OSError as err:
if not self._connect_error:
self._connect_error = True
_LOGGER.error(
"Error connecting to ASUS router for %s update: %s",
self.entity_id,
err,
)
class AsuswrtDevicesSensor(AsuswrtSensor):
"""Representation of a asuswrt download speed sensor."""
_name = "Asuswrt Devices Connected"
async def async_update(self):
"""Fetch new state data for the sensor."""
await super().async_update()
if self._devices:
self._state = len(self._devices)
class AsuswrtRXSensor(AsuswrtSensor):
"""Representation of a asuswrt download speed sensor."""
_name = "Asuswrt Download Speed"
_unit = DATA_RATE_MEGABITS_PER_SECOND
@property
def icon(self):
"""Return the icon."""
return DOWNLOAD_ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
async def async_update(self):
"""Fetch new state data for the sensor."""
await super().async_update()
if self._speed:
self._state = round(self._speed[0] / 125000, 2)
class AsuswrtTXSensor(AsuswrtSensor):
"""Representation of a asuswrt upload speed sensor."""
_name = "Asuswrt Upload Speed"
_unit = DATA_RATE_MEGABITS_PER_SECOND
@property
def icon(self):
"""Return the icon."""
return UPLOAD_ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
async def async_update(self):
"""Fetch new state data for the sensor."""
await super().async_update()
if self._speed:
self._state = round(self._speed[1] / 125000, 2)
class AsuswrtTotalRXSensor(AsuswrtSensor):
"""Representation of a asuswrt total download sensor."""
_name = "Asuswrt Download"
_unit = DATA_GIGABYTES
@property
def icon(self):
"""Return the icon."""
return DOWNLOAD_ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
async def async_update(self):
"""Fetch new state data for the sensor."""
await super().async_update()
if self._rates:
self._state = round(self._rates[0] / 1000000000, 1)
class AsuswrtTotalTXSensor(AsuswrtSensor):
"""Representation of a asuswrt total upload sensor."""
_name = "Asuswrt Upload"
_unit = DATA_GIGABYTES
@property
def icon(self):
"""Return the icon."""
return UPLOAD_ICON
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit
async def async_update(self):
"""Fetch new state data for the sensor."""
await super().async_update()
if self._rates:
self._state = round(self._rates[1] / 1000000000, 1)
|
import mock
from StringIO import StringIO
from contextlib import contextmanager
from paramiko import SSHClient
import app.utils.SshUtil as ssh
from . import RSA_PRIVATE_KEY
@contextmanager
def patch_ssh(stdout='', stderr=''):
def mock_exec_command(*args, **kwargs):
stdin = StringIO()
stdin.close()
return stdin, StringIO(stdout), StringIO(stderr)
with mock.patch.object(SSHClient, 'exec_command', new=mock_exec_command):
with mock.patch.object(SSHClient, 'connect'):
yield
def test_ssh():
kwargs = dict(
ip='127.0.0.1',
port='22',
account='root',
pkey=RSA_PRIVATE_KEY,
shell='echo hello\n echo hello'
)
with patch_ssh('', ''):
success, log = ssh.do_ssh_cmd(**kwargs)
assert success
with patch_ssh('', 'message'):
success, log = ssh.do_ssh_cmd(**kwargs)
assert not success
assert 'message' in log
msg = ('fatal: Not a git repository '
'(or any parent up to mount point /tmp)\n'
'Stopping at filesystem boundary '
'(GIT_DISCOVERY_ACROSS_FILESYSTEM not set).')
with patch_ssh(msg, ''):
success, log = ssh.do_ssh_cmd(**kwargs)
assert not success
assert msg in log
|
from pathlib import Path
import aiohttp
from redbot.core.i18n import Translator
_ = Translator("Audio", Path(__file__))
class AudioError(Exception):
"""Base exception for errors in the Audio cog."""
class LavalinkDownloadFailed(AudioError, RuntimeError):
"""Downloading the Lavalink jar failed.
Attributes
----------
response : aiohttp.ClientResponse
The response from the server to the failed GET request.
should_retry : bool
Whether or not the Audio cog should retry downloading the jar.
"""
def __init__(self, *args, response: aiohttp.ClientResponse, should_retry: bool = False):
super().__init__(*args)
self.response = response
self.should_retry = should_retry
def __repr__(self) -> str:
str_args = [*map(str, self.args), self._response_repr()]
return f"LavalinkDownloadFailed({', '.join(str_args)}"
def __str__(self) -> str:
return f"{super().__str__()} {self._response_repr()}"
def _response_repr(self) -> str:
return f"[{self.response.status} {self.response.reason}]"
class QueryUnauthorized(AudioError):
"""Provided an unauthorized query to audio."""
def __init__(self, message, *args):
self.message = message
super().__init__(*args)
class TrackEnqueueError(AudioError):
"""Unable to play track."""
class PlayListError(AudioError):
"""Base exception for errors related to playlists."""
class InvalidPlaylistScope(PlayListError):
"""Provided playlist scope is not valid."""
class MissingGuild(PlayListError):
"""Trying to access the Guild scope without a guild."""
class MissingAuthor(PlayListError):
"""Trying to access the User scope without an user id."""
class TooManyMatches(PlayListError):
"""Too many playlist match user input."""
class NoMatchesFound(PlayListError):
"""No entries found for this input."""
class NotAllowed(PlayListError):
"""Too many playlist match user input."""
class ApiError(AudioError):
"""Base exception for API errors in the Audio cog."""
class SpotifyApiError(ApiError):
"""Base exception for Spotify API errors."""
class SpotifyFetchError(SpotifyApiError):
"""Fetching Spotify data failed."""
def __init__(self, message, *args):
self.message = message
super().__init__(*args)
class YouTubeApiError(ApiError):
"""Base exception for YouTube Data API errors."""
def __init__(self, message, *args):
self.message = message
super().__init__(*args)
class DatabaseError(AudioError):
"""Base exception for database errors in the Audio cog."""
class InvalidTableError(DatabaseError):
"""Provided table to query is not a valid table."""
class LocalTrackError(AudioError):
"""Base exception for local track errors."""
class InvalidLocalTrack(LocalTrackError):
"""Base exception for local track errors."""
class InvalidLocalTrackFolder(LocalTrackError):
"""Base exception for local track errors."""
|
import ast
import asyncio
import aiohttp
import inspect
import io
import textwrap
import traceback
import types
import re
from contextlib import redirect_stdout
from copy import copy
import discord
from . import checks, commands
from .commands import NoParseOptional as Optional
from .i18n import Translator
from .utils.chat_formatting import box, pagify
from .utils.predicates import MessagePredicate
"""
Notice:
95% of the below code came from R.Danny which can be found here:
https://github.com/Rapptz/RoboDanny/blob/master/cogs/repl.py
"""
_ = Translator("Dev", __file__)
START_CODE_BLOCK_RE = re.compile(r"^((```py)(?=\s)|(```))")
class Dev(commands.Cog):
"""Various development focused utilities."""
async def red_delete_data_for_user(self, **kwargs):
"""
Because despite my best efforts to advise otherwise,
people use ``--dev`` in production
"""
return
def __init__(self):
super().__init__()
self._last_result = None
self.sessions = {}
@staticmethod
def async_compile(source, filename, mode):
return compile(source, filename, mode, flags=ast.PyCF_ALLOW_TOP_LEVEL_AWAIT, optimize=0)
@staticmethod
async def maybe_await(coro):
for i in range(2):
if inspect.isawaitable(coro):
coro = await coro
else:
return coro
return coro
@staticmethod
def cleanup_code(content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith("```") and content.endswith("```"):
return START_CODE_BLOCK_RE.sub("", content)[:-3]
# remove `foo`
return content.strip("` \n")
@staticmethod
def get_syntax_error(e):
"""Format a syntax error to send to the user.
Returns a string representation of the error formatted as a codeblock.
"""
if e.text is None:
return box("{0.__class__.__name__}: {0}".format(e), lang="py")
return box(
"{0.text}\n{1:>{0.offset}}\n{2}: {0}".format(e, "^", type(e).__name__), lang="py"
)
@staticmethod
def get_pages(msg: str):
"""Pagify the given message for output to the user."""
return pagify(msg, delims=["\n", " "], priority=True, shorten_by=10)
@staticmethod
def sanitize_output(ctx: commands.Context, input_: str) -> str:
"""Hides the bot's token from a string."""
token = ctx.bot.http.token
return re.sub(re.escape(token), "[EXPUNGED]", input_, re.I)
@commands.command()
@checks.is_owner()
async def debug(self, ctx, *, code):
"""Evaluate a statement of python code.
The bot will always respond with the return value of the code.
If the return value of the code is a coroutine, it will be awaited,
and the result of that will be the bot's response.
Note: Only one statement may be evaluated. Using certain restricted
keywords, e.g. yield, will result in a syntax error. For multiple
lines or asynchronous code, see [p]repl or [p]eval.
Environment Variables:
ctx - command invocation context
bot - bot object
channel - the current channel object
author - command author's member object
message - the command's message object
discord - discord.py library
commands - redbot.core.commands
_ - The result of the last dev command.
"""
env = {
"bot": ctx.bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"asyncio": asyncio,
"aiohttp": aiohttp,
"discord": discord,
"commands": commands,
"_": self._last_result,
"__name__": "__main__",
}
code = self.cleanup_code(code)
try:
compiled = self.async_compile(code, "<string>", "eval")
result = await self.maybe_await(eval(compiled, env))
except SyntaxError as e:
await ctx.send(self.get_syntax_error(e))
return
except Exception as e:
await ctx.send(box("{}: {!s}".format(type(e).__name__, e), lang="py"))
return
self._last_result = result
result = self.sanitize_output(ctx, str(result))
await ctx.send_interactive(self.get_pages(result), box_lang="py")
@commands.command(name="eval")
@checks.is_owner()
async def _eval(self, ctx, *, body: str):
"""Execute asynchronous code.
This command wraps code into the body of an async function and then
calls and awaits it. The bot will respond with anything printed to
stdout, as well as the return value of the function.
The code can be within a codeblock, inline code or neither, as long
as they are not mixed and they are formatted correctly.
Environment Variables:
ctx - command invocation context
bot - bot object
channel - the current channel object
author - command author's member object
message - the command's message object
discord - discord.py library
commands - redbot.core.commands
_ - The result of the last dev command.
"""
env = {
"bot": ctx.bot,
"ctx": ctx,
"channel": ctx.channel,
"author": ctx.author,
"guild": ctx.guild,
"message": ctx.message,
"asyncio": asyncio,
"aiohttp": aiohttp,
"discord": discord,
"commands": commands,
"_": self._last_result,
"__name__": "__main__",
}
body = self.cleanup_code(body)
stdout = io.StringIO()
to_compile = "async def func():\n%s" % textwrap.indent(body, " ")
try:
compiled = self.async_compile(to_compile, "<string>", "exec")
exec(compiled, env)
except SyntaxError as e:
return await ctx.send(self.get_syntax_error(e))
func = env["func"]
result = None
try:
with redirect_stdout(stdout):
result = await func()
except:
printed = "{}{}".format(stdout.getvalue(), traceback.format_exc())
else:
printed = stdout.getvalue()
await ctx.tick()
if result is not None:
self._last_result = result
msg = "{}{}".format(printed, result)
else:
msg = printed
msg = self.sanitize_output(ctx, msg)
await ctx.send_interactive(self.get_pages(msg), box_lang="py")
@commands.group(invoke_without_command=True)
@checks.is_owner()
async def repl(self, ctx):
"""Open an interactive REPL.
The REPL will only recognise code as messages which start with a
backtick. This includes codeblocks, and as such multiple lines can be
evaluated.
"""
variables = {
"ctx": ctx,
"bot": ctx.bot,
"message": ctx.message,
"guild": ctx.guild,
"channel": ctx.channel,
"author": ctx.author,
"asyncio": asyncio,
"_": None,
"__builtins__": __builtins__,
"__name__": "__main__",
}
if ctx.channel.id in self.sessions:
if self.sessions[ctx.channel.id]:
await ctx.send(
_("Already running a REPL session in this channel. Exit it with `quit`.")
)
else:
await ctx.send(
_(
"Already running a REPL session in this channel. Resume the REPL with `{}repl resume`."
).format(ctx.prefix)
)
return
self.sessions[ctx.channel.id] = True
await ctx.send(
_(
"Enter code to execute or evaluate. `exit()` or `quit` to exit. `{}repl pause` to pause."
).format(ctx.prefix)
)
while True:
response = await ctx.bot.wait_for("message", check=MessagePredicate.regex(r"^`", ctx))
if not self.sessions[ctx.channel.id]:
continue
cleaned = self.cleanup_code(response.content)
if cleaned in ("quit", "exit", "exit()"):
await ctx.send(_("Exiting."))
del self.sessions[ctx.channel.id]
return
executor = None
if cleaned.count("\n") == 0:
# single statement, potentially 'eval'
try:
code = self.async_compile(cleaned, "<repl session>", "eval")
except SyntaxError:
pass
else:
executor = eval
if executor is None:
try:
code = self.async_compile(cleaned, "<repl session>", "exec")
except SyntaxError as e:
await ctx.send(self.get_syntax_error(e))
continue
variables["message"] = response
stdout = io.StringIO()
msg = ""
try:
with redirect_stdout(stdout):
if executor is None:
result = types.FunctionType(code, variables)()
else:
result = executor(code, variables)
result = await self.maybe_await(result)
except:
value = stdout.getvalue()
msg = "{}{}".format(value, traceback.format_exc())
else:
value = stdout.getvalue()
if result is not None:
msg = "{}{}".format(value, result)
variables["_"] = result
elif value:
msg = "{}".format(value)
msg = self.sanitize_output(ctx, msg)
try:
await ctx.send_interactive(self.get_pages(msg), box_lang="py")
except discord.Forbidden:
pass
except discord.HTTPException as e:
await ctx.send(_("Unexpected error: `{}`").format(e))
@repl.command(aliases=["resume"])
async def pause(self, ctx, toggle: Optional[bool] = None):
"""Pauses/resumes the REPL running in the current channel"""
if ctx.channel.id not in self.sessions:
await ctx.send(_("There is no currently running REPL session in this channel."))
return
if toggle is None:
toggle = not self.sessions[ctx.channel.id]
self.sessions[ctx.channel.id] = toggle
if toggle:
await ctx.send(_("The REPL session in this channel has been resumed."))
else:
await ctx.send(_("The REPL session in this channel is now paused."))
@commands.command()
@checks.is_owner()
async def mock(self, ctx, user: discord.Member, *, command):
"""Mock another user invoking a command.
The prefix must not be entered.
"""
msg = copy(ctx.message)
msg.author = user
msg.content = ctx.prefix + command
ctx.bot.dispatch("message", msg)
@commands.command(name="mockmsg")
@checks.is_owner()
async def mock_msg(self, ctx, user: discord.Member, *, content: str):
"""Dispatch a message event as if it were sent by a different user.
Only reads the raw content of the message. Attachments, embeds etc. are
ignored.
"""
old_author = ctx.author
old_content = ctx.message.content
ctx.message.author = user
ctx.message.content = content
ctx.bot.dispatch("message", ctx.message)
# If we change the author and content back too quickly,
# the bot won't process the mocked message in time.
await asyncio.sleep(2)
ctx.message.author = old_author
ctx.message.content = old_content
@commands.command()
@checks.is_owner()
async def bypasscooldowns(self, ctx, toggle: Optional[bool] = None):
"""Give bot owners the ability to bypass cooldowns.
Does not persist through restarts."""
if toggle is None:
toggle = not ctx.bot._bypass_cooldowns
ctx.bot._bypass_cooldowns = toggle
if toggle:
await ctx.send(_("Bot owners will now bypass all commands with cooldowns."))
else:
await ctx.send(_("Bot owners will no longer bypass all commands with cooldowns."))
|
import os
import unittest
from integration_tests.files import require_empty_dir
from trashcli.put import TrashDirectoryForPut, RealFs
from mock import Mock
join = os.path.join
class TestTrashDirectory_persit_trash_info(unittest.TestCase):
def setUp(self):
self.trashdirectory_base_dir = os.path.realpath(
"./sandbox/testTrashDirectory")
require_empty_dir(self.trashdirectory_base_dir)
self.instance = TrashDirectoryForPut(
self.trashdirectory_base_dir,
"/",
RealFs())
self.logger = Mock()
def persist_trash_info(self, basename, content):
return self.instance.persist_trash_info(basename,
content,
self.logger)
def test_persist_trash_info_first_time(self):
trash_info_file = self.persist_trash_info('dummy-path', b'content')
assert join(self.trashdirectory_base_dir,'info', 'dummy-path.trashinfo') == trash_info_file
assert 'content' == read(trash_info_file)
def test_persist_trash_info_first_100_times(self):
self.test_persist_trash_info_first_time()
for i in range(1,100) :
content=b'trashinfo content'
trash_info_file = self.persist_trash_info('dummy-path', content)
assert ("dummy-path_%s.trashinfo" % i ==
os.path.basename(trash_info_file))
assert 'trashinfo content' == read(trash_info_file)
def test_persist_trash_info_other_times(self):
self.test_persist_trash_info_first_100_times()
for i in range(101,200) :
trash_info_file = self.persist_trash_info('dummy-path',b'content')
trash_info_id = os.path.basename(trash_info_file)
assert trash_info_id.startswith("dummy-path_")
assert 'content' == read(trash_info_file)
test_persist_trash_info_first_100_times.stress_test = True
test_persist_trash_info_other_times.stress_test = True
def read(path):
return open(path).read()
|
import logging
from Plugwise_Smile.Smile import Smile
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from .const import COORDINATOR, DOMAIN
from .gateway import SmileGateway
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Smile switches from a config entry."""
api = hass.data[DOMAIN][config_entry.entry_id]["api"]
coordinator = hass.data[DOMAIN][config_entry.entry_id][COORDINATOR]
entities = []
all_devices = api.get_all_devices()
for dev_id, device_properties in all_devices.items():
if "plug" in device_properties["types"]:
model = "Metered Switch"
entities.append(
PwSwitch(api, coordinator, device_properties["name"], dev_id, model)
)
async_add_entities(entities, True)
class PwSwitch(SmileGateway, SwitchEntity):
"""Representation of a Plugwise plug."""
def __init__(self, api, coordinator, name, dev_id, model):
"""Set up the Plugwise API."""
super().__init__(api, coordinator, name, dev_id)
self._model = model
self._is_on = False
self._unique_id = f"{dev_id}-plug"
@property
def is_on(self):
"""Return true if device is on."""
return self._is_on
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
try:
if await self._api.set_relay_state(self._dev_id, "on"):
self._is_on = True
self.async_write_ha_state()
except Smile.PlugwiseError:
_LOGGER.error("Error while communicating to device")
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
try:
if await self._api.set_relay_state(self._dev_id, "off"):
self._is_on = False
self.async_write_ha_state()
except Smile.PlugwiseError:
_LOGGER.error("Error while communicating to device")
@callback
def _async_process_data(self):
"""Update the data from the Plugs."""
_LOGGER.debug("Update switch called")
data = self._api.get_device_data(self._dev_id)
if not data:
_LOGGER.error("Received no data for device %s", self._name)
self.async_write_ha_state()
return
if "relay" in data:
self._is_on = data["relay"]
self.async_write_ha_state()
|
from math import ceil
import abodepy.helpers.constants as CONST
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.util.color import (
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
from . import AbodeDevice
from .const import DOMAIN
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Abode light devices."""
data = hass.data[DOMAIN]
entities = []
for device in data.abode.get_devices(generic_type=CONST.TYPE_LIGHT):
entities.append(AbodeLight(data, device))
async_add_entities(entities)
class AbodeLight(AbodeDevice, LightEntity):
"""Representation of an Abode light."""
def turn_on(self, **kwargs):
"""Turn on the light."""
if ATTR_COLOR_TEMP in kwargs and self._device.is_color_capable:
self._device.set_color_temp(
int(color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP]))
)
return
if ATTR_HS_COLOR in kwargs and self._device.is_color_capable:
self._device.set_color(kwargs[ATTR_HS_COLOR])
return
if ATTR_BRIGHTNESS in kwargs and self._device.is_dimmable:
# Convert Home Assistant brightness (0-255) to Abode brightness (0-99)
# If 100 is sent to Abode, response is 99 causing an error
self._device.set_level(ceil(kwargs[ATTR_BRIGHTNESS] * 99 / 255.0))
return
self._device.switch_on()
def turn_off(self, **kwargs):
"""Turn off the light."""
self._device.switch_off()
@property
def is_on(self):
"""Return true if device is on."""
return self._device.is_on
@property
def brightness(self):
"""Return the brightness of the light."""
if self._device.is_dimmable and self._device.has_brightness:
brightness = int(self._device.brightness)
# Abode returns 100 during device initialization and device refresh
if brightness == 100:
return 255
# Convert Abode brightness (0-99) to Home Assistant brightness (0-255)
return ceil(brightness * 255 / 99.0)
@property
def color_temp(self):
"""Return the color temp of the light."""
if self._device.has_color:
return color_temperature_kelvin_to_mired(self._device.color_temp)
@property
def hs_color(self):
"""Return the color of the light."""
if self._device.has_color:
return self._device.color
@property
def supported_features(self):
"""Flag supported features."""
if self._device.is_dimmable and self._device.is_color_capable:
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR | SUPPORT_COLOR_TEMP
if self._device.is_dimmable:
return SUPPORT_BRIGHTNESS
return 0
|
from paasta_tools.autoscaling.pause_service_autoscaler import (
delete_service_autoscale_pause_time,
)
from paasta_tools.autoscaling.pause_service_autoscaler import (
get_service_autoscale_pause_time,
)
from paasta_tools.autoscaling.pause_service_autoscaler import (
update_service_autoscale_pause_time,
)
from paasta_tools.utils import _log_audit
MAX_PAUSE_DURATION = 320
def add_subparser(subparsers):
status_parser = subparsers.add_parser(
"pause_service_autoscaler",
help="Pause the service autoscaler for an entire cluster",
description=(
"'paasta pause_service_autoscaler is used to pause the paasta service autoscaler "
"for an entire paasta cluster. "
),
)
status_parser.add_argument(
"-c",
"--cluster",
dest="cluster",
help="which cluster to pause autoscaling in. ie. norcal-prod",
)
status_parser.add_argument(
"-d",
"--pause-duration",
default=120,
dest="duration",
type=int,
help="How long to pause the autoscaler for, defaults to %(default)s minutes",
)
status_parser.add_argument(
"-f",
"--force",
help="Force pause for longer than max duration",
action="store_true",
dest="force",
default=False,
)
status_parser.add_argument(
"-i",
"--info",
help="Print when the autoscaler is paused until",
action="store_true",
dest="info",
default=False,
)
status_parser.add_argument(
"-r",
"--resume",
help="Resume autoscaling (unpause) in a cluster",
action="store_true",
dest="resume",
default=False,
)
status_parser.set_defaults(command=paasta_pause_service_autoscaler)
def paasta_pause_service_autoscaler(args):
"""With a given cluster and duration, pauses the paasta service autoscaler
in that cluster for duration minutes"""
if args.duration > MAX_PAUSE_DURATION:
if not args.force:
print(
"Specified duration: {d} longer than max: {m}".format(
d=args.duration, m=MAX_PAUSE_DURATION
)
)
print("If you are really sure, run again with --force")
return 3
if args.info:
return_code = get_service_autoscale_pause_time(args.cluster)
elif args.resume:
return_code = delete_service_autoscale_pause_time(args.cluster)
_log_audit(action="resume-service-autoscaler", cluster=args.cluster)
else:
minutes = args.duration
return_code = update_service_autoscale_pause_time(args.cluster, minutes)
_log_audit(
action="pause-service-autoscaler",
action_details={"duration": minutes},
cluster=args.cluster,
)
return return_code
|
import asyncio
from contextlib import suppress
import logging
from typing import Optional
from aiohttp import WSMsgType, web
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from homeassistant.core import callback
from homeassistant.helpers.event import async_call_later
from .auth import AuthPhase, auth_required_message
from .const import (
CANCELLATION_ERRORS,
DATA_CONNECTIONS,
MAX_PENDING_MSG,
PENDING_MSG_PEAK,
PENDING_MSG_PEAK_TIME,
SIGNAL_WEBSOCKET_CONNECTED,
SIGNAL_WEBSOCKET_DISCONNECTED,
URL,
)
from .error import Disconnect
from .messages import message_to_json
# mypy: allow-untyped-calls, allow-untyped-defs, no-check-untyped-defs
class WebsocketAPIView(HomeAssistantView):
"""View to serve a websockets endpoint."""
name = "websocketapi"
url = URL
requires_auth = False
async def get(self, request: web.Request) -> web.WebSocketResponse:
"""Handle an incoming websocket connection."""
return await WebSocketHandler(request.app["hass"], request).async_handle()
class WebSocketHandler:
"""Handle an active websocket client connection."""
def __init__(self, hass, request):
"""Initialize an active connection."""
self.hass = hass
self.request = request
self.wsock: Optional[web.WebSocketResponse] = None
self._to_write: asyncio.Queue = asyncio.Queue(maxsize=MAX_PENDING_MSG)
self._handle_task = None
self._writer_task = None
self._logger = logging.getLogger("{}.connection.{}".format(__name__, id(self)))
self._peak_checker_unsub = None
async def _writer(self):
"""Write outgoing messages."""
# Exceptions if Socket disconnected or cancelled by connection handler
with suppress(RuntimeError, ConnectionResetError, *CANCELLATION_ERRORS):
while not self.wsock.closed:
message = await self._to_write.get()
if message is None:
break
self._logger.debug("Sending %s", message)
if not isinstance(message, str):
message = message_to_json(message)
await self.wsock.send_str(message)
# Clean up the peaker checker when we shut down the writer
if self._peak_checker_unsub:
self._peak_checker_unsub()
self._peak_checker_unsub = None
@callback
def _send_message(self, message):
"""Send a message to the client.
Closes connection if the client is not reading the messages.
Async friendly.
"""
try:
self._to_write.put_nowait(message)
except asyncio.QueueFull:
self._logger.error(
"Client exceeded max pending messages [2]: %s", MAX_PENDING_MSG
)
self._cancel()
if self._to_write.qsize() < PENDING_MSG_PEAK:
if self._peak_checker_unsub:
self._peak_checker_unsub()
self._peak_checker_unsub = None
return
if self._peak_checker_unsub is None:
self._peak_checker_unsub = async_call_later(
self.hass, PENDING_MSG_PEAK_TIME, self._check_write_peak
)
@callback
def _check_write_peak(self, _):
"""Check that we are no longer above the write peak."""
self._peak_checker_unsub = None
if self._to_write.qsize() < PENDING_MSG_PEAK:
return
self._logger.error(
"Client unable to keep up with pending messages. Stayed over %s for %s seconds",
PENDING_MSG_PEAK,
PENDING_MSG_PEAK_TIME,
)
self._cancel()
@callback
def _cancel(self):
"""Cancel the connection."""
self._handle_task.cancel()
self._writer_task.cancel()
async def async_handle(self) -> web.WebSocketResponse:
"""Handle a websocket response."""
request = self.request
wsock = self.wsock = web.WebSocketResponse(heartbeat=55)
await wsock.prepare(request)
self._logger.debug("Connected from %s", request.remote)
self._handle_task = asyncio.current_task()
@callback
def handle_hass_stop(event):
"""Cancel this connection."""
self._cancel()
unsub_stop = self.hass.bus.async_listen(
EVENT_HOMEASSISTANT_STOP, handle_hass_stop
)
# As the webserver is now started before the start
# event we do not want to block for websocket responses
self._writer_task = asyncio.create_task(self._writer())
auth = AuthPhase(self._logger, self.hass, self._send_message, request)
connection = None
disconnect_warn = None
try:
self._send_message(auth_required_message())
# Auth Phase
try:
with async_timeout.timeout(10):
msg = await wsock.receive()
except asyncio.TimeoutError as err:
disconnect_warn = "Did not receive auth message within 10 seconds"
raise Disconnect from err
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
raise Disconnect
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
raise Disconnect
try:
msg_data = msg.json()
except ValueError as err:
disconnect_warn = "Received invalid JSON."
raise Disconnect from err
self._logger.debug("Received %s", msg_data)
connection = await auth.async_handle(msg_data)
self.hass.data[DATA_CONNECTIONS] = (
self.hass.data.get(DATA_CONNECTIONS, 0) + 1
)
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_CONNECTED
)
# Command phase
while not wsock.closed:
msg = await wsock.receive()
if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSING):
break
if msg.type != WSMsgType.TEXT:
disconnect_warn = "Received non-Text message."
break
try:
msg_data = msg.json()
except ValueError:
disconnect_warn = "Received invalid JSON."
break
self._logger.debug("Received %s", msg_data)
connection.async_handle(msg_data)
except asyncio.CancelledError:
self._logger.info("Connection closed by client")
except Disconnect:
pass
except Exception: # pylint: disable=broad-except
self._logger.exception("Unexpected error inside websocket API")
finally:
unsub_stop()
if connection is not None:
connection.async_close()
try:
self._to_write.put_nowait(None)
# Make sure all error messages are written before closing
await self._writer_task
except asyncio.QueueFull:
self._writer_task.cancel()
await wsock.close()
if disconnect_warn is None:
self._logger.debug("Disconnected")
else:
self._logger.warning("Disconnected: %s", disconnect_warn)
if connection is not None:
self.hass.data[DATA_CONNECTIONS] -= 1
self.hass.helpers.dispatcher.async_dispatcher_send(
SIGNAL_WEBSOCKET_DISCONNECTED
)
return wsock
|
from time import time
import pytest
from homeassistant import config_entries, core
from homeassistant.components.almond import const
from homeassistant.config import async_process_ha_core_config
from homeassistant.const import EVENT_HOMEASSISTANT_START
from homeassistant.setup import async_setup_component
from homeassistant.util.dt import utcnow
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
@pytest.fixture(autouse=True)
def patch_hass_state(hass):
"""Mock the hass.state to be not_running."""
hass.state = core.CoreState.not_running
async def test_set_up_oauth_remote_url(hass, aioclient_mock):
"""Test we set up Almond to connect to HA if we have external url."""
entry = MockConfigEntry(
domain="almond",
data={
"type": const.TYPE_OAUTH2,
"auth_implementation": "local",
"host": "http://localhost:9999",
"token": {"expires_at": time() + 1000, "access_token": "abcd"},
},
)
entry.add_to_hass(hass)
with patch(
"homeassistant.helpers.config_entry_oauth2_flow.async_get_config_entry_implementation",
):
assert await async_setup_component(hass, "almond", {})
assert entry.state == config_entries.ENTRY_STATE_LOADED
hass.config.components.add("cloud")
with patch("homeassistant.components.almond.ALMOND_SETUP_DELAY", 0), patch(
"homeassistant.helpers.network.get_url",
return_value="https://example.nabu.casa",
), patch("pyalmond.WebAlmondAPI.async_create_device") as mock_create_device:
hass.bus.async_fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
async_fire_time_changed(hass, utcnow())
await hass.async_block_till_done()
assert len(mock_create_device.mock_calls) == 1
async def test_set_up_oauth_no_external_url(hass, aioclient_mock):
"""Test we do not set up Almond to connect to HA if we have no external url."""
entry = MockConfigEntry(
domain="almond",
data={
"type": const.TYPE_OAUTH2,
"auth_implementation": "local",
"host": "http://localhost:9999",
"token": {"expires_at": time() + 1000, "access_token": "abcd"},
},
)
entry.add_to_hass(hass)
with patch(
"homeassistant.helpers.config_entry_oauth2_flow.async_get_config_entry_implementation",
), patch("pyalmond.WebAlmondAPI.async_create_device") as mock_create_device:
assert await async_setup_component(hass, "almond", {})
assert entry.state == config_entries.ENTRY_STATE_LOADED
assert len(mock_create_device.mock_calls) == 0
async def test_set_up_hassio(hass, aioclient_mock):
"""Test we do not set up Almond to connect to HA if we use Hass.io."""
entry = MockConfigEntry(
domain="almond",
data={
"is_hassio": True,
"type": const.TYPE_LOCAL,
"host": "http://localhost:9999",
},
)
entry.add_to_hass(hass)
with patch("pyalmond.WebAlmondAPI.async_create_device") as mock_create_device:
assert await async_setup_component(hass, "almond", {})
assert entry.state == config_entries.ENTRY_STATE_LOADED
assert len(mock_create_device.mock_calls) == 0
async def test_set_up_local(hass, aioclient_mock):
"""Test we do not set up Almond to connect to HA if we use local."""
# Set up an internal URL, as Almond won't be set up if there is no URL available
await async_process_ha_core_config(
hass,
{"internal_url": "https://192.168.0.1"},
)
entry = MockConfigEntry(
domain="almond",
data={"type": const.TYPE_LOCAL, "host": "http://localhost:9999"},
)
entry.add_to_hass(hass)
with patch("pyalmond.WebAlmondAPI.async_create_device") as mock_create_device:
assert await async_setup_component(hass, "almond", {})
assert entry.state == config_entries.ENTRY_STATE_LOADED
assert len(mock_create_device.mock_calls) == 1
|
from collections import namedtuple
from datetime import timedelta
import logging
from getmac import get_mac_address
from nmap import PortScanner, PortScannerError
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOSTS
import homeassistant.helpers.config_validation as cv
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
CONF_EXCLUDE = "exclude"
# Interval in minutes to exclude devices from a scan while they are home
CONF_HOME_INTERVAL = "home_interval"
CONF_OPTIONS = "scan_options"
DEFAULT_OPTIONS = "-F --host-timeout 5s"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOSTS): cv.ensure_list,
vol.Required(CONF_HOME_INTERVAL, default=0): cv.positive_int,
vol.Optional(CONF_EXCLUDE, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_OPTIONS, default=DEFAULT_OPTIONS): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Nmap scanner."""
return NmapDeviceScanner(config[DOMAIN])
Device = namedtuple("Device", ["mac", "name", "ip", "last_update"])
class NmapDeviceScanner(DeviceScanner):
"""This class scans for devices using nmap."""
exclude = []
def __init__(self, config):
"""Initialize the scanner."""
self.last_results = []
self.hosts = config[CONF_HOSTS]
self.exclude = config[CONF_EXCLUDE]
minutes = config[CONF_HOME_INTERVAL]
self._options = config[CONF_OPTIONS]
self.home_interval = timedelta(minutes=minutes)
_LOGGER.debug("Scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
_LOGGER.debug("Nmap last results %s", self.last_results)
return [device.mac for device in self.last_results]
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [
result.name for result in self.last_results if result.mac == device
]
if filter_named:
return filter_named[0]
return None
def get_extra_attributes(self, device):
"""Return the IP of the given device."""
filter_ip = next(
(result.ip for result in self.last_results if result.mac == device), None
)
return {"ip": filter_ip}
def _update_info(self):
"""Scan the network for devices.
Returns boolean if scanning successful.
"""
_LOGGER.debug("Scanning...")
scanner = PortScanner()
options = self._options
if self.home_interval:
boundary = dt_util.now() - self.home_interval
last_results = [
device for device in self.last_results if device.last_update > boundary
]
if last_results:
exclude_hosts = self.exclude + [device.ip for device in last_results]
else:
exclude_hosts = self.exclude
else:
last_results = []
exclude_hosts = self.exclude
if exclude_hosts:
options += f" --exclude {','.join(exclude_hosts)}"
try:
result = scanner.scan(hosts=" ".join(self.hosts), arguments=options)
except PortScannerError:
return False
now = dt_util.now()
for ipv4, info in result["scan"].items():
if info["status"]["state"] != "up":
continue
name = info["hostnames"][0]["name"] if info["hostnames"] else ipv4
# Mac address only returned if nmap ran as root
mac = info["addresses"].get("mac") or get_mac_address(ip=ipv4)
if mac is None:
_LOGGER.info("No MAC address found for %s", ipv4)
continue
last_results.append(Device(mac.upper(), name, ipv4, now))
self.last_results = last_results
_LOGGER.debug("nmap scan successful")
return True
|
import sys
import pygogo as gogo
from difflib import unified_diff
from os import path as p
from io import StringIO, open
from timeit import default_timer as timer
from scripttest import TestFileEnvironment
sys.path.append('../riko')
try:
from riko.bado import _isasync
except ImportError:
_isasync = False
PARENT_DIR = p.abspath(p.dirname(p.dirname(__file__)))
def main(script, tests, verbose=False, stop=True):
""" Main method
Returns 0 on success, 1 on failure
"""
failures = 0
logger = gogo.Gogo(__name__, verbose=verbose).logger
short_script = p.basename(script)
env = TestFileEnvironment('.scripttest')
start = timer()
for pos, test in enumerate(tests):
num = pos + 1
opts, arguments, expected = test
joined_opts = ' '.join(opts) if opts else ''
joined_args = '"%s"' % '" "'.join(arguments) if arguments else ''
command = "%s %s %s" % (script, joined_opts, joined_args)
short_command = "%s %s %s" % (short_script, joined_opts, joined_args)
result = env.run(command, cwd=PARENT_DIR, expect_stderr=True)
output = result.stdout
if isinstance(expected, bool):
text = StringIO(output).read()
outlines = [str(bool(text))]
checklines = StringIO(str(expected)).readlines()
elif p.isfile(expected):
outlines = StringIO(output).readlines()
with open(expected, encoding='utf-8') as f:
checklines = f.readlines()
else:
outlines = StringIO(output).readlines()
checklines = StringIO(expected).readlines()
args = [checklines, outlines]
kwargs = {'fromfile': 'expected', 'tofile': 'got'}
diffs = ''.join(unified_diff(*args, **kwargs))
if diffs:
failures += 1
msg = "ERROR! Output from test #%i:\n %s\n" % (num, short_command)
msg += "doesn't match:\n %s\n" % expected
msg += diffs if diffs else ''
else:
logger.debug(output)
msg = 'Scripttest #%i: %s ... ok' % (num, short_command)
logger.info(msg)
if stop and failures:
break
time = timer() - start
logger.info('%s' % '-' * 70)
end = 'FAILED (failures=%i)' % failures if failures else 'OK'
logger.info('Ran %i scripttests in %0.3fs\n\n%s' % (num, time, end))
sys.exit(failures)
if __name__ == '__main__':
demo = p.join(PARENT_DIR, 'bin', 'runpipe')
benchmark = p.join(PARENT_DIR, 'bin', 'benchmark')
text = 'Deadline to clear up health law eligibility near 682\n'
runpipe_tests = [
([], ['demo'], text),
([], ['simple1'], "'farechart'\n")]
if _isasync:
runpipe_tests += [
(['-a'], ['demo'], text),
(['-a'], ['simple1'], "'farechart'\n")]
main(demo, runpipe_tests)
main(benchmark, [([], [], '')])
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import functools
import logging
import math
import posixpath
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import cassandra
from six.moves import range
NUM_KEYS_PER_CORE = 2000000
PROPAGATION_WAIT_TIME = 30
# cassandra-stress command
WRITE_COMMAND = 'write'
COUNTER_WRITE_COMMAND = 'counter_write'
USER_COMMAND = 'user'
READ_COMMAND = 'read'
COUNTER_READ_COMMAND = 'counter_read'
MIXED_COMMAND = 'mixed'
PRELOAD_REQUIRED = (READ_COMMAND, COUNTER_READ_COMMAND, MIXED_COMMAND)
# cassandra-stress command [options]
flags.DEFINE_enum('cassandra_stress_command', WRITE_COMMAND,
[WRITE_COMMAND,
COUNTER_WRITE_COMMAND,
USER_COMMAND,
READ_COMMAND,
COUNTER_READ_COMMAND,
MIXED_COMMAND],
'cassandra-stress command to use.')
flags.DEFINE_integer('cassandra_stress_preload_num_keys', None,
'Number of keys to preload into cassandra database. '
'Read/counter_read/mixed modes require preloading '
'cassandra database. If not set, the number of the keys '
'preloaded will be the same as --num_keys for '
'read/counter_read/mixed mode, the same as the number of '
'loaders for write/counter_write/user mode.')
# Options for cassandra-stress
flags.DEFINE_integer('num_keys', 0,
'Number of keys used in cassandra-stress tool across '
'all loader vms. If unset, this benchmark will use '
'%s * NumCpusForBenchmark() on data nodes as the value.'
% NUM_KEYS_PER_CORE)
flags.DEFINE_integer('num_cassandra_stress_threads', 150,
'Number of threads used in cassandra-stress tool '
'on each loader node.')
flags.DEFINE_integer('cassandra_stress_replication_factor', 3,
'Number of replicas.')
flags.DEFINE_enum('cassandra_stress_consistency_level', 'QUORUM',
['ONE', 'QUORUM', 'LOCAL_ONE', 'LOCAL_QUORUM',
'EACH_QUORUM', 'ALL', 'ANY'],
'Set the consistency level to use during cassandra-stress.')
flags.DEFINE_integer('cassandra_stress_retries', 1000,
'Number of retries when error encountered during stress.')
# Use "./cassandra-stress help -pop" to get more details.
# [dist=DIST(?)]: Seeds are selected from this distribution
# EXP(min..max):
# An exponential distribution over the range [min..max]
# EXTREME(min..max,shape):
# An extreme value (Weibull) distribution over the range [min..max]
# QEXTREME(min..max,shape,quantas):
# An extreme value, split into quantas, within which the chance of
# selection is uniform
# GAUSSIAN(min..max,stdvrng):
# A gaussian/normal distribution, where mean=(min+max)/2, and stdev
# is (mean-min)/stdvrng
# GAUSSIAN(min..max,mean,stdev):
# A gaussian/normal distribution, with explicitly defined mean and stdev
# UNIFORM(min..max):
# A uniform distribution over the range [min, max]
# Preceding the name with ~ will invert the distribution,
# e.g. ~EXP(1..10) will yield 10 most, instead of least, often.
flags.DEFINE_enum('cassandra_stress_population_distribution', None,
['EXP', 'EXTREME', 'QEXTREME', 'GAUSSIAN', 'UNIFORM',
'~EXP', '~EXTREME', '~QEXTREME', '~GAUSSIAN', '~UNIFORM'],
'The population distribution cassandra-stress uses. '
'By default, each loader vm is given a range of keys '
'[min, max], and loaders will read/insert keys sequentially '
'from min to max.')
flags.DEFINE_integer('cassandra_stress_population_size', None,
'The size of the population across all clients. '
'By default, the size of the population equals to '
'max(num_keys,cassandra_stress_preload_num_keys).')
flags.DEFINE_list('cassandra_stress_population_parameters', [],
'Additional parameters to use with distribution. '
'This benchmark will calculate min, max for each '
'distribution. Some distributions need more parameters. '
'See: "./cassandra-stress help -pop" for more details. '
'Comma-separated list.')
# Options to use with cassandra-stress mixed mode, below flags only matter if
# --cassandra_stress_command=mixed.
flags.DEFINE_string('cassandra_stress_mixed_ratio', 'write=1,read=1',
'Read/write ratio of cassandra-stress. Only valid if '
'--cassandra_stress_command=mixed. By default, '
'50% read and 50% write.')
# Options to use with cassandra-stress user mode, below flags only matter if
# --cassandra_stress_command=user.
# http://www.datastax.com/dev/blog/improved-cassandra-2-1-stress-tool-benchmark-any-schema
flags.DEFINE_string('cassandra_stress_profile', '',
'Path to cassandra-stress profile file. '
'Only valid if --cassandra_stress_command=user.')
flags.DEFINE_string('cassandra_stress_operations', 'insert=1',
'Specify what operations (inserts and/or queries) to '
'run and the ratio of each operation. '
'Only valid if --cassandra_stress_command=user.')
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'cassandra_stress'
BENCHMARK_CONFIG = """
cassandra_stress:
description: Benchmark Cassandra using cassandra-stress
vm_groups:
workers:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: 3
client:
vm_spec: *default_single_core
"""
CASSANDRA_GROUP = 'workers'
CLIENT_GROUP = 'client'
SLEEP_BETWEEN_CHECK_IN_SECONDS = 5
TEMP_PROFILE_PATH = posixpath.join(vm_util.VM_TMP_DIR, 'profile.yaml')
# Results documentation:
# http://docs.datastax.com/en/cassandra/2.1/cassandra/tools/toolsCStressOutput_c.html
RESULTS_METRICS = (
'op rate', # Number of operations per second performed during the run.
'partition rate', # Number of partition operations per second performed
# during the run.
'row rate', # Number of row operations per second performed during the run.
'latency mean', # Average latency in milliseconds for each operation during
# that run.
'latency median', # Median latency in milliseconds for each operation
# during that run.
'latency 95th percentile', # 95% of the time the latency was less than
# the number displayed in the column.
'latency 99th percentile', # 99% of the time the latency was less than
# the number displayed in the column.
'latency 99.9th percentile', # 99.9% of the time the latency was less than
# the number displayed in the column.
'latency max', # Maximum latency in milliseconds.
'Total partitions', # Number of partitions.
'Total errors', # Number of errors.
'Total operation time') # Total operation time.
# Metrics are aggregated between client vms.
AGGREGATED_METRICS = {'op rate', 'partition rate', 'row rate',
'Total partitions', 'Total errors'}
# Maximum value will be choisen between client vms.
MAXIMUM_METRICS = {'latency max'}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
cassandra.CheckPrerequisites()
if FLAGS.cassandra_stress_command == USER_COMMAND:
data.ResourcePath(FLAGS.cassandra_stress_profile)
def CheckMetadata(metadata):
"""Verify that metadata is valid.
Args:
metadata: dict. Contains metadata for this benchmark.
"""
if metadata['command'] in PRELOAD_REQUIRED:
if metadata['population_size'] > metadata['num_preload_keys']:
raise errors.Benchmarks.PrepareException(
'For %s modes, number of preloaded keys must be larger than or '
'equal to population size.', PRELOAD_REQUIRED)
def GenerateMetadataFromFlags(benchmark_spec):
"""Generate metadata from command-line flags.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
dict. Contains metadata for this benchmark.
"""
vm_dict = benchmark_spec.vm_groups
metadata = {}
if not FLAGS.num_keys:
metadata['num_keys'] = (
NUM_KEYS_PER_CORE * vm_dict[CASSANDRA_GROUP][0].NumCpusForBenchmark())
else:
metadata['num_keys'] = FLAGS.num_keys
if FLAGS['cassandra_stress_preload_num_keys'].present:
metadata['num_preload_keys'] = FLAGS.cassandra_stress_preload_num_keys
elif FLAGS.cassandra_stress_command in PRELOAD_REQUIRED:
metadata['num_preload_keys'] = metadata['num_keys']
else:
metadata['num_preload_keys'] = len(vm_dict[CLIENT_GROUP])
metadata.update({
'concurrent_reads': FLAGS.cassandra_concurrent_reads,
'num_data_nodes': len(vm_dict[CASSANDRA_GROUP]),
'num_loader_nodes': len(vm_dict[CLIENT_GROUP]),
'num_cassandra_stress_threads': FLAGS.num_cassandra_stress_threads,
'command': FLAGS.cassandra_stress_command,
'consistency_level': FLAGS.cassandra_stress_consistency_level,
'retries': FLAGS.cassandra_stress_retries,
'population_size': (FLAGS.cassandra_stress_population_size or
max(metadata['num_keys'],
metadata['num_preload_keys'])),
'population_dist': FLAGS.cassandra_stress_population_distribution,
'population_parameters': ','.join(
FLAGS.cassandra_stress_population_parameters)})
if FLAGS.cassandra_stress_command == USER_COMMAND:
metadata.update({
'profile': FLAGS.cassandra_stress_profile,
'operations': FLAGS.cassandra_stress_operations})
else:
if FLAGS.cassandra_stress_command == MIXED_COMMAND:
metadata['mixed_ratio'] = FLAGS.cassandra_stress_mixed_ratio
metadata['replication_factor'] = FLAGS.cassandra_stress_replication_factor
logging.info('Metadata: %s', metadata)
return metadata
def PreloadCassandraServer(benchmark_spec, metadata):
"""Preload cassandra cluster if necessary.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
metadata: dict. Contains metadata for this benchmark.
"""
if (FLAGS.cassandra_stress_command == 'read' or
FLAGS.cassandra_stress_command == 'mixed'):
cassandra_stress_command = 'write'
elif FLAGS.cassandra_stress_command == 'counter_read':
cassandra_stress_command = 'counter_write'
else:
cassandra_stress_command = FLAGS.cassandra_stress_command
logging.info('Preloading cassandra database with %s %s operations.',
metadata['num_preload_keys'], cassandra_stress_command)
RunCassandraStressTest(
benchmark_spec.vm_groups[CASSANDRA_GROUP],
benchmark_spec.vm_groups[CLIENT_GROUP],
metadata['num_preload_keys'], cassandra_stress_command)
logging.info('Waiting %s for keyspace to propagate.', PROPAGATION_WAIT_TIME)
time.sleep(PROPAGATION_WAIT_TIME)
def Prepare(benchmark_spec):
"""Install Cassandra and Java on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_groups
cassandra_vms = vm_dict[CASSANDRA_GROUP]
client_vms = vm_dict[CLIENT_GROUP]
logging.info('VM dictionary %s', vm_dict)
logging.info('Authorizing loader[0] permission to access all other vms.')
client_vms[0].AuthenticateVm()
logging.info('Preparing data files and Java on all vms.')
vm_util.RunThreaded(lambda vm: vm.Install('cassandra'), cassandra_vms)
vm_util.RunThreaded(lambda vm: vm.Install('cassandra_stress'), client_vms)
seed_vm = cassandra_vms[0]
configure = functools.partial(cassandra.Configure, seed_vms=[seed_vm])
vm_util.RunThreaded(configure, cassandra_vms)
cassandra.StartCluster(seed_vm, cassandra_vms[1:])
if FLAGS.cassandra_stress_command == USER_COMMAND:
for vm in client_vms:
vm.PushFile(FLAGS.cassandra_stress_profile,
TEMP_PROFILE_PATH)
metadata = GenerateMetadataFromFlags(benchmark_spec)
if metadata['num_preload_keys']:
CheckMetadata(metadata)
PreloadCassandraServer(benchmark_spec, metadata)
def _ResultFilePath(vm):
return posixpath.join(vm_util.VM_TMP_DIR,
vm.hostname + '.stress_results.txt')
def RunTestOnLoader(vm, loader_index, operations_per_vm, data_node_ips,
command, user_operations, population_per_vm,
population_dist, population_params):
"""Run Cassandra-stress test on loader node.
Args:
vm: The target vm.
loader_index: integer. The index of target vm in loader vms.
operations_per_vm: integer. The number of operations each loader vm
requests.
data_node_ips: list. List of IP addresses for all data nodes.
command: string. The cassandra-stress command to use.
user_operations: string. The operations to use with user mode.
population_per_vm: integer. Population per loader vm.
population_dist: string. The population distribution.
population_params: string. Representing additional population parameters.
"""
if command == USER_COMMAND:
command += ' profile={profile} ops\({ops}\)'.format(
profile=TEMP_PROFILE_PATH, ops=user_operations)
schema_option = ''
else:
if command == MIXED_COMMAND:
command += ' ratio\({ratio}\)'.format(
ratio=FLAGS.cassandra_stress_mixed_ratio)
# TODO: Support more complex replication strategy.
schema_option = '-schema replication\(factor={replication_factor}\)'.format(
replication_factor=FLAGS.cassandra_stress_replication_factor)
population_range = '%s..%s' % (loader_index * population_per_vm + 1,
(loader_index + 1) * population_per_vm)
if population_params:
population_params = '%s,%s' % (population_range, population_params)
else:
population_params = population_range
if population_dist:
population_dist = '-pop dist=%s\(%s\)' % (population_dist,
population_params)
else:
population_dist = '-pop seq=%s' % population_params
vm.RobustRemoteCommand(
'{cassandra} {command} cl={consistency_level} n={num_keys} '
'-node {nodes} {schema} {population_dist} '
'-log file={result_file} -rate threads={threads} '
'-errors retries={retries}'.format(
cassandra=cassandra.GetCassandraStressPath(vm),
command=command,
consistency_level=FLAGS.cassandra_stress_consistency_level,
num_keys=operations_per_vm,
nodes=','.join(data_node_ips),
schema=schema_option,
population_dist=population_dist,
result_file=_ResultFilePath(vm),
retries=FLAGS.cassandra_stress_retries,
threads=FLAGS.num_cassandra_stress_threads))
def RunCassandraStressTest(cassandra_vms, loader_vms, num_operations,
command, profile_operations='insert=1',
population_size=None, population_dist=None,
population_params=None):
"""Start all loader nodes as Cassandra clients and run stress test.
Args:
cassandra_vms: list. A list of vm objects. Cassandra servers.
load_vms: list. A list of vm objects. Cassandra clients.
num_keys: integer. The number of operations cassandra-stress clients should
issue.
command: string. The cassandra-stress command to use.
profile_operations: string. The operations to use with user mode.
population_size: integer. The population size.
population_dist: string. The population distribution.
population_params: string. Representing additional population parameters.
"""
num_loaders = len(loader_vms)
data_node_ips = [vm.internal_ip for vm in cassandra_vms]
population_size = population_size or num_operations
operations_per_vm = int(math.ceil(float(num_operations) / num_loaders))
population_per_vm = population_size / num_loaders
if num_operations % num_loaders:
logging.warn(
'Total number of operations rounded to %s '
'(%s operations per loader vm).',
operations_per_vm * num_loaders, operations_per_vm)
logging.info('Executing the benchmark.')
args = [((loader_vms[i], i, operations_per_vm, data_node_ips,
command, profile_operations, population_per_vm,
population_dist, population_params), {})
for i in range(0, num_loaders)]
vm_util.RunThreaded(RunTestOnLoader, args)
def CollectResultFile(vm, results):
"""Collect result file on vm.
Args:
vm: The target vm.
results: A dictionary of lists. Each list contains results of a field
defined in RESULTS_METRICS collected from each loader machines.
"""
result_path = _ResultFilePath(vm)
vm.PullFile(vm_util.GetTempDir(), result_path)
resp, _ = vm.RemoteCommand('tail -n 20 ' + result_path)
for metric in RESULTS_METRICS:
value = regex_util.ExtractGroup(r'%s[\t ]+: ([\d\.:]+)' % metric, resp)
if metric == RESULTS_METRICS[-1]: # Total operation time
value = value.split(':')
results[metric].append(
int(value[0]) * 3600 + int(value[1]) * 60 + int(value[2]))
else:
results[metric].append(float(value))
def CollectResults(benchmark_spec, metadata):
"""Collect and parse test results.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
metadata: dict. Contains metadata for this benchmark.
Returns:
A list of sample.Sample objects.
"""
logging.info('Gathering results.')
vm_dict = benchmark_spec.vm_groups
loader_vms = vm_dict[CLIENT_GROUP]
raw_results = collections.defaultdict(list)
args = [((vm, raw_results), {}) for vm in loader_vms]
vm_util.RunThreaded(CollectResultFile, args)
results = []
for metric in RESULTS_METRICS:
if metric in MAXIMUM_METRICS:
value = max(raw_results[metric])
else:
value = math.fsum(raw_results[metric])
if metric not in AGGREGATED_METRICS:
value = value / len(loader_vms)
if metric.startswith('latency'):
unit = 'ms'
elif metric.endswith('rate'):
unit = 'operations per second'
elif metric == 'Total operation time':
unit = 'seconds'
results.append(sample.Sample(metric, value, unit, metadata))
logging.info('Cassandra results:\n%s', results)
return results
def Run(benchmark_spec):
"""Run Cassandra on target vms.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
metadata = GenerateMetadataFromFlags(benchmark_spec)
RunCassandraStressTest(
benchmark_spec.vm_groups[CASSANDRA_GROUP],
benchmark_spec.vm_groups[CLIENT_GROUP],
metadata['num_keys'],
metadata['command'],
metadata.get('operations'),
metadata['population_size'],
metadata['population_dist'],
metadata['population_parameters'])
return CollectResults(benchmark_spec, metadata)
def Cleanup(benchmark_spec):
"""Cleanup function.
Args:
benchmark_spec: The benchmark specification. Contains all data
that is required to run the benchmark.
"""
vm_dict = benchmark_spec.vm_groups
cassandra_vms = vm_dict[CASSANDRA_GROUP]
vm_util.RunThreaded(cassandra.Stop, cassandra_vms)
vm_util.RunThreaded(cassandra.CleanNode, cassandra_vms)
|
import os
import shlex
import shutil
import sqlite3
import subprocess
import sys
import unittest
def run_cmd(cmd):
"""Run a command and return a tuple with (stdout, stderr, exit_code)"""
process = subprocess.Popen(shlex.split(cmd), stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
(stdout, stderr) = process.communicate()
return stdout, stderr, process.wait()
class TestMigrate(unittest.TestCase):
def setUp(self):
os.chdir(os.path.split(os.path.abspath(__file__))[0])
try:
os.remove('app1.db')
os.remove('app2.db')
except OSError:
pass
try:
shutil.rmtree('migrations')
except OSError:
pass
def tearDown(self):
try:
os.remove('app1.db')
os.remove('app2.db')
except OSError:
pass
try:
shutil.rmtree('migrations')
except OSError:
pass
def test_multidb_migrate_upgrade(self):
(o, e, s) = run_cmd(
sys.executable + ' app_multidb.py db init --multidb')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(sys.executable + ' app_multidb.py db migrate')
self.assertTrue(s == 0)
(o, e, s) = run_cmd(sys.executable + ' app_multidb.py db upgrade')
self.assertTrue(s == 0)
# ensure the tables are in the correct databases
conn1 = sqlite3.connect('app1.db')
c = conn1.cursor()
c.execute('select name from sqlite_master')
tables = c.fetchall()
conn1.close()
self.assertIn(('alembic_version',), tables)
self.assertIn(('user',), tables)
conn2 = sqlite3.connect('app2.db')
c = conn2.cursor()
c.execute('select name from sqlite_master')
tables = c.fetchall()
conn2.close()
self.assertIn(('alembic_version',), tables)
self.assertIn(('group',), tables)
# ensure the databases can be written to
(o, e, s) = run_cmd(sys.executable + ' app_multidb.py add')
self.assertTrue(s == 0)
# ensure the downgrade works
(o, e, s) = run_cmd(sys.executable + ' app_multidb.py db downgrade')
self.assertTrue(s == 0)
conn1 = sqlite3.connect('app1.db')
c = conn1.cursor()
c.execute('select name from sqlite_master')
tables = c.fetchall()
conn1.close()
self.assertIn(('alembic_version',), tables)
self.assertNotIn(('user',), tables)
conn2 = sqlite3.connect('app2.db')
c = conn2.cursor()
c.execute('select name from sqlite_master')
tables = c.fetchall()
conn2.close()
self.assertIn(('alembic_version',), tables)
self.assertNotIn(('group',), tables)
|
class BaseInstanceLoader:
"""
Base abstract implementation of instance loader.
"""
def __init__(self, resource, dataset=None):
self.resource = resource
self.dataset = dataset
def get_instance(self, row):
raise NotImplementedError
class ModelInstanceLoader(BaseInstanceLoader):
"""
Instance loader for Django model.
Lookup for model instance by ``import_id_fields``.
"""
def get_queryset(self):
return self.resource.get_queryset()
def get_instance(self, row):
try:
params = {}
for key in self.resource.get_import_id_fields():
field = self.resource.fields[key]
params[field.attribute] = field.clean(row)
if params:
return self.get_queryset().get(**params)
else:
return None
except self.resource._meta.model.DoesNotExist:
return None
class CachedInstanceLoader(ModelInstanceLoader):
"""
Loads all possible model instances in dataset avoid hitting database for
every ``get_instance`` call.
This instance loader work only when there is one ``import_id_fields``
field.
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
pk_field_name = self.resource.get_import_id_fields()[0]
self.pk_field = self.resource.fields[pk_field_name]
ids = [self.pk_field.clean(row) for row in self.dataset.dict]
qs = self.get_queryset().filter(**{
"%s__in" % self.pk_field.attribute: ids
})
self.all_instances = {
self.pk_field.get_value(instance): instance
for instance in qs
}
def get_instance(self, row):
return self.all_instances.get(self.pk_field.clean(row))
|
import os
from nikola.plugin_categories import Task
from nikola import utils
class Sources(Task):
"""Copy page sources into the output."""
name = "render_sources"
def gen_tasks(self):
"""Publish the page sources into the output."""
kw = {
"translations": self.site.config["TRANSLATIONS"],
"output_folder": self.site.config["OUTPUT_FOLDER"],
"default_lang": self.site.config["DEFAULT_LANG"],
"show_untranslated_posts": self.site.config['SHOW_UNTRANSLATED_POSTS'],
}
self.site.scan_posts()
yield self.group_task()
if self.site.config['COPY_SOURCES']:
for lang in kw["translations"]:
for post in self.site.timeline:
if not kw["show_untranslated_posts"] and lang not in post.translated_to:
continue
if post.meta('password'):
continue
output_name = os.path.join(
kw['output_folder'], post.destination_path(
lang, post.source_ext(True)))
# do not publish PHP sources
if post.source_ext(True) == post.compiler.extension():
continue
source = post.translated_source_path(lang)
if source is not None and os.path.isfile(source):
yield {
'basename': 'render_sources',
'name': os.path.normpath(output_name),
'file_dep': [source],
'targets': [output_name],
'actions': [(utils.copy_file, (source, output_name))],
'clean': True,
'uptodate': [utils.config_changed(kw, 'nikola.plugins.task.sources')],
}
|
import io
import lxml.html
from pkg_resources import resource_filename
from mako.template import Template
from nikola.plugin_categories import Command
class CommandRst2Html(Command):
"""Compile reStructuredText to HTML, using Nikola architecture."""
name = "rst2html"
doc_usage = "infile"
doc_purpose = "compile reStructuredText to HTML files"
needs_config = False
def _execute(self, options, args):
"""Compile reStructuredText to standalone HTML files."""
compiler = self.site.plugin_manager.getPluginByName('rest', 'PageCompiler').plugin_object
if len(args) != 1:
print("This command takes only one argument (input file name).")
return 2
source = args[0]
with io.open(source, "r", encoding="utf-8-sig") as in_file:
data = in_file.read()
output, error_level, deps, shortcode_deps = compiler.compile_string(data, source, True)
rstcss_path = resource_filename('nikola', 'data/themes/base/assets/css/rst_base.css')
with io.open(rstcss_path, "r", encoding="utf-8-sig") as fh:
rstcss = fh.read()
template_path = resource_filename('nikola', 'plugins/command/rst2html/rst2html.tmpl')
template = Template(filename=template_path)
template_output = template.render(rstcss=rstcss, output=output)
parser = lxml.html.HTMLParser(remove_blank_text=True)
doc = lxml.html.document_fromstring(template_output, parser)
html = b'<!DOCTYPE html>\n' + lxml.html.tostring(doc, encoding='utf8', method='html', pretty_print=True)
print(html.decode('utf-8'))
if error_level < 3:
return 0
else:
return 1
|
import io
def find_box(segment: io.BytesIO, target_type: bytes, box_start: int = 0) -> int:
"""Find location of first box (or sub_box if box_start provided) of given type."""
if box_start == 0:
box_end = segment.seek(0, io.SEEK_END)
segment.seek(0)
index = 0
else:
segment.seek(box_start)
box_end = box_start + int.from_bytes(segment.read(4), byteorder="big")
index = box_start + 8
while 1:
if index > box_end - 8: # End of box, not found
break
segment.seek(index)
box_header = segment.read(8)
if box_header[4:8] == target_type:
yield index
segment.seek(index)
index += int.from_bytes(box_header[0:4], byteorder="big")
def get_init(segment: io.BytesIO) -> bytes:
"""Get init section from fragmented mp4."""
moof_location = next(find_box(segment, b"moof"))
segment.seek(0)
return segment.read(moof_location)
def get_m4s(segment: io.BytesIO, sequence: int) -> bytes:
"""Get m4s section from fragmented mp4."""
moof_location = next(find_box(segment, b"moof"))
mfra_location = next(find_box(segment, b"mfra"))
segment.seek(moof_location)
return segment.read(mfra_location - moof_location)
def get_codec_string(segment: io.BytesIO) -> str:
"""Get RFC 6381 codec string."""
codecs = []
# Find moov
moov_location = next(find_box(segment, b"moov"))
# Find tracks
for trak_location in find_box(segment, b"trak", moov_location):
# Drill down to media info
mdia_location = next(find_box(segment, b"mdia", trak_location))
minf_location = next(find_box(segment, b"minf", mdia_location))
stbl_location = next(find_box(segment, b"stbl", minf_location))
stsd_location = next(find_box(segment, b"stsd", stbl_location))
# Get stsd box
segment.seek(stsd_location)
stsd_length = int.from_bytes(segment.read(4), byteorder="big")
segment.seek(stsd_location)
stsd_box = segment.read(stsd_length)
# Base Codec
codec = stsd_box[20:24].decode("utf-8")
# Handle H264
if (
codec in ("avc1", "avc2", "avc3", "avc4")
and stsd_length > 110
and stsd_box[106:110] == b"avcC"
):
profile = stsd_box[111:112].hex()
compatibility = stsd_box[112:113].hex()
# Cap level at 4.1 for compatibility with some Google Cast devices
level = hex(min(stsd_box[113], 41))[2:]
codec += "." + profile + compatibility + level
# Handle H265
elif (
codec in ("hev1", "hvc1")
and stsd_length > 110
and stsd_box[106:110] == b"hvcC"
):
tmp_byte = int.from_bytes(stsd_box[111:112], byteorder="big")
# Profile Space
codec += "."
profile_space_map = {0: "", 1: "A", 2: "B", 3: "C"}
profile_space = tmp_byte >> 6
codec += profile_space_map[profile_space]
general_profile_idc = tmp_byte & 31
codec += str(general_profile_idc)
# Compatibility
codec += "."
general_profile_compatibility = int.from_bytes(
stsd_box[112:116], byteorder="big"
)
reverse = 0
for i in range(0, 32):
reverse |= general_profile_compatibility & 1
if i == 31:
break
reverse <<= 1
general_profile_compatibility >>= 1
codec += hex(reverse)[2:]
# Tier Flag
if (tmp_byte & 32) >> 5 == 0:
codec += ".L"
else:
codec += ".H"
codec += str(int.from_bytes(stsd_box[122:123], byteorder="big"))
# Constraint String
has_byte = False
constraint_string = ""
for i in range(121, 115, -1):
gci = int.from_bytes(stsd_box[i : i + 1], byteorder="big")
if gci or has_byte:
constraint_string = "." + hex(gci)[2:] + constraint_string
has_byte = True
codec += constraint_string
# Handle Audio
elif codec == "mp4a":
oti = None
dsi = None
# Parse ES Descriptors
oti_loc = stsd_box.find(b"\x04\x80\x80\x80")
if oti_loc > 0:
oti = stsd_box[oti_loc + 5 : oti_loc + 6].hex()
codec += f".{oti}"
dsi_loc = stsd_box.find(b"\x05\x80\x80\x80")
if dsi_loc > 0:
dsi_length = int.from_bytes(
stsd_box[dsi_loc + 4 : dsi_loc + 5], byteorder="big"
)
dsi_data = stsd_box[dsi_loc + 5 : dsi_loc + 5 + dsi_length]
dsi0 = int.from_bytes(dsi_data[0:1], byteorder="big")
dsi = (dsi0 & 248) >> 3
if dsi == 31 and len(dsi_data) >= 2:
dsi1 = int.from_bytes(dsi_data[1:2], byteorder="big")
dsi = 32 + ((dsi0 & 7) << 3) + ((dsi1 & 224) >> 5)
codec += f".{dsi}"
codecs.append(codec)
return ",".join(codecs)
|
import logging
import pypca
from serial import SerialException
from homeassistant.components.switch import ATTR_CURRENT_POWER_W, SwitchEntity
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
_LOGGER = logging.getLogger(__name__)
ATTR_TOTAL_ENERGY_KWH = "total_energy_kwh"
DEFAULT_NAME = "PCA 301"
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the PCA switch platform."""
if discovery_info is None:
return
serial_device = discovery_info["device"]
try:
pca = pypca.PCA(serial_device)
pca.open()
entities = [SmartPlugSwitch(pca, device) for device in pca.get_devices()]
add_entities(entities, True)
except SerialException as exc:
_LOGGER.warning("Unable to open serial port: %s", exc)
return
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, pca.close)
pca.start_scan()
class SmartPlugSwitch(SwitchEntity):
"""Representation of a PCA Smart Plug switch."""
def __init__(self, pca, device_id):
"""Initialize the switch."""
self._device_id = device_id
self._name = "PCA 301"
self._state = None
self._available = True
self._emeter_params = {}
self._pca = pca
@property
def name(self):
"""Return the name of the Smart Plug, if any."""
return self._name
@property
def available(self) -> bool:
"""Return if switch is available."""
return self._available
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._pca.turn_on(self._device_id)
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._pca.turn_off(self._device_id)
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
return self._emeter_params
def update(self):
"""Update the PCA switch's state."""
try:
self._emeter_params[
ATTR_CURRENT_POWER_W
] = f"{self._pca.get_current_power(self._device_id):.1f}"
self._emeter_params[
ATTR_TOTAL_ENERGY_KWH
] = f"{self._pca.get_total_consumption(self._device_id):.2f}"
self._available = True
self._state = self._pca.get_state(self._device_id)
except (OSError) as ex:
if self._available:
_LOGGER.warning("Could not read state for %s: %s", self.name, ex)
self._available = False
|
import logging
from pycoolmasternet_async import CoolMasterNet
from homeassistant.components.climate import SCAN_INTERVAL
from homeassistant.const import CONF_HOST, CONF_PORT
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator, UpdateFailed
from .const import DATA_COORDINATOR, DATA_INFO, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass, config):
"""Set up Coolmaster components."""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass, entry):
"""Set up Coolmaster from a config entry."""
host = entry.data[CONF_HOST]
port = entry.data[CONF_PORT]
coolmaster = CoolMasterNet(host, port)
try:
info = await coolmaster.info()
if not info:
raise ConfigEntryNotReady
except (OSError, ConnectionRefusedError, TimeoutError) as error:
raise ConfigEntryNotReady() from error
coordinator = CoolmasterDataUpdateCoordinator(hass, coolmaster)
await coordinator.async_refresh()
hass.data[DOMAIN][entry.entry_id] = {
DATA_INFO: info,
DATA_COORDINATOR: coordinator,
}
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, "climate")
)
return True
async def async_unload_entry(hass, entry):
"""Unload a Coolmaster config entry."""
unload_ok = await hass.config_entries.async_forward_entry_unload(entry, "climate")
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
return unload_ok
class CoolmasterDataUpdateCoordinator(DataUpdateCoordinator):
"""Class to manage fetching Coolmaster data."""
def __init__(self, hass, coolmaster):
"""Initialize global Coolmaster data updater."""
self._coolmaster = coolmaster
super().__init__(
hass,
_LOGGER,
name=DOMAIN,
update_interval=SCAN_INTERVAL,
)
async def _async_update_data(self):
"""Fetch data from Coolmaster."""
try:
return await self._coolmaster.status()
except (OSError, ConnectionRefusedError, TimeoutError) as error:
raise UpdateFailed from error
|
import logging
import threading
import aprslib
from aprslib import ConnectionError as AprsConnectionError, LoginError
import geopy.distance
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
CONF_HOST,
CONF_PASSWORD,
CONF_TIMEOUT,
CONF_USERNAME,
EVENT_HOMEASSISTANT_STOP,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import slugify
DOMAIN = "aprs"
_LOGGER = logging.getLogger(__name__)
ATTR_ALTITUDE = "altitude"
ATTR_COURSE = "course"
ATTR_COMMENT = "comment"
ATTR_FROM = "from"
ATTR_FORMAT = "format"
ATTR_POS_AMBIGUITY = "posambiguity"
ATTR_SPEED = "speed"
CONF_CALLSIGNS = "callsigns"
DEFAULT_HOST = "rotate.aprs2.net"
DEFAULT_PASSWORD = "-1"
DEFAULT_TIMEOUT = 30.0
FILTER_PORT = 14580
MSG_FORMATS = ["compressed", "uncompressed", "mic-e"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_CALLSIGNS): cv.ensure_list,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PASSWORD, default=DEFAULT_PASSWORD): cv.string,
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): vol.Coerce(float),
}
)
def make_filter(callsigns: list) -> str:
"""Make a server-side filter from a list of callsigns."""
return " ".join(f"b/{sign.upper()}" for sign in callsigns)
def gps_accuracy(gps, posambiguity: int) -> int:
"""Calculate the GPS accuracy based on APRS posambiguity."""
pos_a_map = {0: 0, 1: 1 / 600, 2: 1 / 60, 3: 1 / 6, 4: 1}
if posambiguity in pos_a_map:
degrees = pos_a_map[posambiguity]
gps2 = (gps[0], gps[1] + degrees)
dist_m = geopy.distance.distance(gps, gps2).m
accuracy = round(dist_m)
else:
message = f"APRS position ambiguity must be 0-4, not '{posambiguity}'."
raise ValueError(message)
return accuracy
def setup_scanner(hass, config, see, discovery_info=None):
"""Set up the APRS tracker."""
callsigns = config.get(CONF_CALLSIGNS)
server_filter = make_filter(callsigns)
callsign = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
host = config.get(CONF_HOST)
timeout = config.get(CONF_TIMEOUT)
aprs_listener = AprsListenerThread(callsign, password, host, server_filter, see)
def aprs_disconnect(event):
"""Stop the APRS connection."""
aprs_listener.stop()
aprs_listener.start()
hass.bus.listen_once(EVENT_HOMEASSISTANT_STOP, aprs_disconnect)
if not aprs_listener.start_event.wait(timeout):
_LOGGER.error("Timeout waiting for APRS to connect")
return
if not aprs_listener.start_success:
_LOGGER.error(aprs_listener.start_message)
return
_LOGGER.debug(aprs_listener.start_message)
return True
class AprsListenerThread(threading.Thread):
"""APRS message listener."""
def __init__(
self, callsign: str, password: str, host: str, server_filter: str, see
):
"""Initialize the class."""
super().__init__()
self.callsign = callsign
self.host = host
self.start_event = threading.Event()
self.see = see
self.server_filter = server_filter
self.start_message = ""
self.start_success = False
self.ais = aprslib.IS(
self.callsign, passwd=password, host=self.host, port=FILTER_PORT
)
def start_complete(self, success: bool, message: str):
"""Complete startup process."""
self.start_message = message
self.start_success = success
self.start_event.set()
def run(self):
"""Connect to APRS and listen for data."""
self.ais.set_filter(self.server_filter)
try:
_LOGGER.info(
"Opening connection to %s with callsign %s", self.host, self.callsign
)
self.ais.connect()
self.start_complete(
True, f"Connected to {self.host} with callsign {self.callsign}."
)
self.ais.consumer(callback=self.rx_msg, immortal=True)
except (AprsConnectionError, LoginError) as err:
self.start_complete(False, str(err))
except OSError:
_LOGGER.info(
"Closing connection to %s with callsign %s", self.host, self.callsign
)
def stop(self):
"""Close the connection to the APRS network."""
self.ais.close()
def rx_msg(self, msg: dict):
"""Receive message and process if position."""
_LOGGER.debug("APRS message received: %s", str(msg))
if msg[ATTR_FORMAT] in MSG_FORMATS:
dev_id = slugify(msg[ATTR_FROM])
lat = msg[ATTR_LATITUDE]
lon = msg[ATTR_LONGITUDE]
attrs = {}
if ATTR_POS_AMBIGUITY in msg:
pos_amb = msg[ATTR_POS_AMBIGUITY]
try:
attrs[ATTR_GPS_ACCURACY] = gps_accuracy((lat, lon), pos_amb)
except ValueError:
_LOGGER.warning(
"APRS message contained invalid posambiguity: %s", str(pos_amb)
)
for attr in [ATTR_ALTITUDE, ATTR_COMMENT, ATTR_COURSE, ATTR_SPEED]:
if attr in msg:
attrs[attr] = msg[attr]
self.see(dev_id=dev_id, gps=(lat, lon), attributes=attrs)
|
import sys
from .compression import decompress
from .exceptions import reraise, MessageStateError
from .serialization import loads
from .utils.functional import dictfilter
__all__ = ('Message',)
ACK_STATES = {'ACK', 'REJECTED', 'REQUEUED'}
IS_PYPY = hasattr(sys, 'pypy_version_info')
class Message:
"""Base class for received messages.
Keyword Arguments:
channel (ChannelT): If message was received, this should be the
channel that the message was received on.
body (str): Message body.
delivery_mode (bool): Set custom delivery mode.
Defaults to :attr:`delivery_mode`.
priority (int): Message priority, 0 to broker configured
max priority, where higher is better.
content_type (str): The messages content_type. If content_type
is set, no serialization occurs as it is assumed this is either
a binary object, or you've done your own serialization.
Leave blank if using built-in serialization as our library
properly sets content_type.
content_encoding (str): The character set in which this object
is encoded. Use "binary" if sending in raw binary objects.
Leave blank if using built-in serialization as our library
properly sets content_encoding.
properties (Dict): Message properties.
headers (Dict): Message headers.
"""
MessageStateError = MessageStateError
errors = None
if not IS_PYPY: # pragma: no cover
__slots__ = (
'_state', 'channel', 'delivery_tag',
'content_type', 'content_encoding',
'delivery_info', 'headers', 'properties',
'body', '_decoded_cache', 'accept', '__dict__',
)
def __init__(self, body=None, delivery_tag=None,
content_type=None, content_encoding=None, delivery_info=None,
properties=None, headers=None, postencode=None,
accept=None, channel=None, **kwargs):
delivery_info = {} if not delivery_info else delivery_info
self.errors = [] if self.errors is None else self.errors
self.channel = channel
self.delivery_tag = delivery_tag
self.content_type = content_type
self.content_encoding = content_encoding
self.delivery_info = delivery_info
self.headers = headers or {}
self.properties = properties or {}
self._decoded_cache = None
self._state = 'RECEIVED'
self.accept = accept
compression = self.headers.get('compression')
if not self.errors and compression:
try:
body = decompress(body, compression)
except Exception:
self.errors.append(sys.exc_info())
if not self.errors and postencode and isinstance(body, str):
try:
body = body.encode(postencode)
except Exception:
self.errors.append(sys.exc_info())
self.body = body
def _reraise_error(self, callback=None):
try:
reraise(*self.errors[0])
except Exception as exc:
if not callback:
raise
callback(self, exc)
def ack(self, multiple=False):
"""Acknowledge this message as being processed.
This will remove the message from the queue.
Raises:
MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.channel is None:
raise self.MessageStateError(
'This message does not have a receiving channel')
if self.channel.no_ack_consumers is not None:
try:
consumer_tag = self.delivery_info['consumer_tag']
except KeyError:
pass
else:
if consumer_tag in self.channel.no_ack_consumers:
return
if self.acknowledged:
raise self.MessageStateError(
'Message already acknowledged with state: {0._state}'.format(
self))
self.channel.basic_ack(self.delivery_tag, multiple=multiple)
self._state = 'ACK'
def ack_log_error(self, logger, errors, multiple=False):
try:
self.ack(multiple=multiple)
except errors as exc:
logger.critical("Couldn't ack %r, reason:%r",
self.delivery_tag, exc, exc_info=True)
def reject_log_error(self, logger, errors, requeue=False):
try:
self.reject(requeue=requeue)
except errors as exc:
logger.critical("Couldn't reject %r, reason: %r",
self.delivery_tag, exc, exc_info=True)
def reject(self, requeue=False):
"""Reject this message.
The message will be discarded by the server.
Raises:
MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.channel is None:
raise self.MessageStateError(
'This message does not have a receiving channel')
if self.acknowledged:
raise self.MessageStateError(
'Message already acknowledged with state: {0._state}'.format(
self))
self.channel.basic_reject(self.delivery_tag, requeue=requeue)
self._state = 'REJECTED'
def requeue(self):
"""Reject this message and put it back on the queue.
Warning:
You must not use this method as a means of selecting messages
to process.
Raises:
MessageStateError: If the message has already been
acknowledged/requeued/rejected.
"""
if self.channel is None:
raise self.MessageStateError(
'This message does not have a receiving channel')
if self.acknowledged:
raise self.MessageStateError(
'Message already acknowledged with state: {0._state}'.format(
self))
self.channel.basic_reject(self.delivery_tag, requeue=True)
self._state = 'REQUEUED'
def decode(self):
"""Deserialize the message body.
Returning the original python structure sent by the publisher.
Note:
The return value is memoized, use `_decode` to force
re-evaluation.
"""
if not self._decoded_cache:
self._decoded_cache = self._decode()
return self._decoded_cache
def _decode(self):
return loads(self.body, self.content_type,
self.content_encoding, accept=self.accept)
@property
def acknowledged(self):
"""Set to true if the message has been acknowledged."""
return self._state in ACK_STATES
@property
def payload(self):
"""The decoded message body."""
return self._decoded_cache if self._decoded_cache else self.decode()
def __repr__(self):
return '<{} object at {:#x} with details {!r}>'.format(
type(self).__name__, id(self), dictfilter(
state=self._state,
content_type=self.content_type,
delivery_tag=self.delivery_tag,
body_length=len(self.body) if self.body is not None else None,
properties=dictfilter(
correlation_id=self.properties.get('correlation_id'),
type=self.properties.get('type'),
),
delivery_info=dictfilter(
exchange=self.delivery_info.get('exchange'),
routing_key=self.delivery_info.get('routing_key'),
),
),
)
|
from homeassistant.components import switch
from homeassistant.components.switch import SwitchEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .const import DATA_REMOVE_DISCOVER_COMPONENT, DOMAIN as TASMOTA_DOMAIN
from .discovery import TASMOTA_DISCOVERY_ENTITY_NEW
from .mixins import TasmotaAvailability, TasmotaDiscoveryUpdate
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up Tasmota switch dynamically through discovery."""
@callback
def async_discover(tasmota_entity, discovery_hash):
"""Discover and add a Tasmota switch."""
async_add_entities(
[
TasmotaSwitch(
tasmota_entity=tasmota_entity, discovery_hash=discovery_hash
)
]
)
hass.data[
DATA_REMOVE_DISCOVER_COMPONENT.format(switch.DOMAIN)
] = async_dispatcher_connect(
hass,
TASMOTA_DISCOVERY_ENTITY_NEW.format(switch.DOMAIN, TASMOTA_DOMAIN),
async_discover,
)
class TasmotaSwitch(
TasmotaAvailability,
TasmotaDiscoveryUpdate,
SwitchEntity,
):
"""Representation of a Tasmota switch."""
def __init__(self, **kwds):
"""Initialize the Tasmota switch."""
self._state = False
super().__init__(
discovery_update=self.discovery_update,
**kwds,
)
@property
def is_on(self):
"""Return true if device is on."""
return self._state
async def async_turn_on(self, **kwargs):
"""Turn the device on."""
self._tasmota_entity.set_state(True)
async def async_turn_off(self, **kwargs):
"""Turn the device off."""
self._tasmota_entity.set_state(False)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import os
from absl import flags
from absl.testing import parameterized
from compare_gan import datasets
from compare_gan import test_utils
from compare_gan.gans import consts as c
from compare_gan.gans import loss_lib
from compare_gan.gans import penalty_lib
from compare_gan.gans.modular_gan import ModularGAN
import gin
import numpy as np
from six.moves import range
import tensorflow as tf
FLAGS = flags.FLAGS
TEST_ARCHITECTURES = [c.INFOGAN_ARCH, c.DCGAN_ARCH, c.RESNET_CIFAR_ARCH,
c.SNDCGAN_ARCH, c.RESNET5_ARCH]
TEST_LOSSES = [loss_lib.non_saturating, loss_lib.wasserstein,
loss_lib.least_squares, loss_lib.hinge]
TEST_PENALTIES = [penalty_lib.no_penalty, penalty_lib.dragan_penalty,
penalty_lib.wgangp_penalty, penalty_lib.l2_penalty]
GENERATOR_TRAINED_IN_STEPS = [
# disc_iters=1.
[True, True, True],
# disc_iters=2.
[True, False, True],
# disc_iters=3.
[True, False, False],
]
class ModularGanTest(parameterized.TestCase, test_utils.CompareGanTestCase):
def setUp(self):
super(ModularGanTest, self).setUp()
self.model_dir = self._get_empty_model_dir()
self.run_config = tf.contrib.tpu.RunConfig(
model_dir=self.model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
def _runSingleTrainingStep(self, architecture, loss_fn, penalty_fn):
parameters = {
"architecture": architecture,
"lambda": 1,
"z_dim": 128,
}
with gin.unlock_config():
gin.bind_parameter("penalty.fn", penalty_fn)
gin.bind_parameter("loss.fn", loss_fn)
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
model_dir=self.model_dir,
conditional="biggan" in architecture)
estimator = gan.as_estimator(self.run_config, batch_size=2, use_tpu=False)
estimator.train(gan.input_fn, steps=1)
@parameterized.parameters(TEST_ARCHITECTURES)
def testSingleTrainingStepArchitectures(self, architecture):
self._runSingleTrainingStep(architecture, loss_lib.hinge,
penalty_lib.no_penalty)
@parameterized.parameters(TEST_LOSSES)
def testSingleTrainingStepLosses(self, loss_fn):
self._runSingleTrainingStep(c.RESNET_CIFAR_ARCH, loss_fn,
penalty_lib.no_penalty)
@parameterized.parameters(TEST_PENALTIES)
def testSingleTrainingStepPenalties(self, penalty_fn):
self._runSingleTrainingStep(c.RESNET_CIFAR_ARCH, loss_lib.hinge, penalty_fn)
def testSingleTrainingStepWithJointGenForDisc(self):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 120,
"disc_iters": 2,
}
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
model_dir=self.model_dir,
experimental_joint_gen_for_disc=True,
experimental_force_graph_unroll=True,
conditional=True)
estimator = gan.as_estimator(self.run_config, batch_size=2, use_tpu=False)
estimator.train(gan.input_fn, steps=1)
@parameterized.parameters([1, 2, 3])
def testSingleTrainingStepDiscItersWithEma(self, disc_iters):
parameters = {
"architecture": c.DUMMY_ARCH,
"lambda": 1,
"z_dim": 128,
"dics_iters": disc_iters,
}
gin.bind_parameter("ModularGAN.g_use_ema", True)
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
model_dir=self.model_dir)
estimator = gan.as_estimator(self.run_config, batch_size=2, use_tpu=False)
estimator.train(gan.input_fn, steps=1)
# Check for moving average variables in checkpoint.
checkpoint_path = tf.train.latest_checkpoint(self.model_dir)
ema_vars = sorted([v[0] for v in tf.train.list_variables(checkpoint_path)
if v[0].endswith("ExponentialMovingAverage")])
tf.logging.info("ema_vars=%s", ema_vars)
expected_ema_vars = sorted([
"generator/fc_noise/kernel/ExponentialMovingAverage",
"generator/fc_noise/bias/ExponentialMovingAverage",
])
self.assertAllEqual(ema_vars, expected_ema_vars)
@parameterized.parameters(
itertools.product([1, 2, 3], [False, True])
)
def testDiscItersIsUsedCorrectly(self, disc_iters, use_tpu):
parameters = {
"architecture": c.DUMMY_ARCH,
"disc_iters": disc_iters,
"lambda": 1,
"z_dim": 128,
}
run_config = tf.contrib.tpu.RunConfig(
model_dir=self.model_dir,
save_checkpoints_steps=1,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
dataset = datasets.get_dataset("cifar10")
gan = ModularGAN(
dataset=dataset,
parameters=parameters,
model_dir=self.model_dir)
estimator = gan.as_estimator(run_config, batch_size=2, use_tpu=use_tpu)
estimator.train(gan.input_fn, steps=3)
disc_step_values = []
gen_step_values = []
for step in range(4):
basename = os.path.join(self.model_dir, "model.ckpt-{}".format(step))
self.assertTrue(tf.gfile.Exists(basename + ".index"))
ckpt = tf.train.load_checkpoint(basename)
disc_step_values.append(ckpt.get_tensor("global_step_disc"))
gen_step_values.append(ckpt.get_tensor("global_step"))
expected_disc_steps = np.arange(4) * disc_iters
self.assertAllEqual(disc_step_values, expected_disc_steps)
self.assertAllEqual(gen_step_values, [0, 1, 2, 3])
if __name__ == "__main__":
tf.test.main()
|
from collections import OrderedDict
import logging
from typing import Any, Mapping, MutableMapping, Optional
import voluptuous as vol
from homeassistant.components import websocket_api
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.entity import async_generate_entity_id
from homeassistant.loader import bind_hass
from homeassistant.util import slugify
import homeassistant.util.dt as dt_util
# mypy: allow-untyped-calls, allow-untyped-defs
ATTR_CREATED_AT = "created_at"
ATTR_MESSAGE = "message"
ATTR_NOTIFICATION_ID = "notification_id"
ATTR_TITLE = "title"
ATTR_STATUS = "status"
DOMAIN = "persistent_notification"
ENTITY_ID_FORMAT = DOMAIN + ".{}"
EVENT_PERSISTENT_NOTIFICATIONS_UPDATED = "persistent_notifications_updated"
SERVICE_CREATE = "create"
SERVICE_DISMISS = "dismiss"
SERVICE_MARK_READ = "mark_read"
SCHEMA_SERVICE_CREATE = vol.Schema(
{
vol.Required(ATTR_MESSAGE): cv.template,
vol.Optional(ATTR_TITLE): cv.template,
vol.Optional(ATTR_NOTIFICATION_ID): cv.string,
}
)
SCHEMA_SERVICE_DISMISS = vol.Schema({vol.Required(ATTR_NOTIFICATION_ID): cv.string})
SCHEMA_SERVICE_MARK_READ = vol.Schema({vol.Required(ATTR_NOTIFICATION_ID): cv.string})
DEFAULT_OBJECT_ID = "notification"
_LOGGER = logging.getLogger(__name__)
STATE = "notifying"
STATUS_UNREAD = "unread"
STATUS_READ = "read"
@bind_hass
def create(hass, message, title=None, notification_id=None):
"""Generate a notification."""
hass.add_job(async_create, hass, message, title, notification_id)
@bind_hass
def dismiss(hass, notification_id):
"""Remove a notification."""
hass.add_job(async_dismiss, hass, notification_id)
@callback
@bind_hass
def async_create(
hass: HomeAssistant,
message: str,
title: Optional[str] = None,
notification_id: Optional[str] = None,
) -> None:
"""Generate a notification."""
data = {
key: value
for key, value in [
(ATTR_TITLE, title),
(ATTR_MESSAGE, message),
(ATTR_NOTIFICATION_ID, notification_id),
]
if value is not None
}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_CREATE, data))
@callback
@bind_hass
def async_dismiss(hass: HomeAssistant, notification_id: str) -> None:
"""Remove a notification."""
data = {ATTR_NOTIFICATION_ID: notification_id}
hass.async_create_task(hass.services.async_call(DOMAIN, SERVICE_DISMISS, data))
async def async_setup(hass: HomeAssistant, config: dict) -> bool:
"""Set up the persistent notification component."""
persistent_notifications: MutableMapping[str, MutableMapping] = OrderedDict()
hass.data[DOMAIN] = {"notifications": persistent_notifications}
@callback
def create_service(call):
"""Handle a create notification service call."""
title = call.data.get(ATTR_TITLE)
message = call.data.get(ATTR_MESSAGE)
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
if notification_id is not None:
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
else:
entity_id = async_generate_entity_id(
ENTITY_ID_FORMAT, DEFAULT_OBJECT_ID, hass=hass
)
notification_id = entity_id.split(".")[1]
attr = {}
if title is not None:
try:
title.hass = hass
title = title.async_render(parse_result=False)
except TemplateError as ex:
_LOGGER.error("Error rendering title %s: %s", title, ex)
title = title.template
attr[ATTR_TITLE] = title
try:
message.hass = hass
message = message.async_render(parse_result=False)
except TemplateError as ex:
_LOGGER.error("Error rendering message %s: %s", message, ex)
message = message.template
attr[ATTR_MESSAGE] = message
hass.states.async_set(entity_id, STATE, attr)
# Store notification and fire event
# This will eventually replace state machine storage
persistent_notifications[entity_id] = {
ATTR_MESSAGE: message,
ATTR_NOTIFICATION_ID: notification_id,
ATTR_STATUS: STATUS_UNREAD,
ATTR_TITLE: title,
ATTR_CREATED_AT: dt_util.utcnow(),
}
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
@callback
def dismiss_service(call):
"""Handle the dismiss notification service call."""
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
if entity_id not in persistent_notifications:
return
hass.states.async_remove(entity_id, call.context)
del persistent_notifications[entity_id]
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
@callback
def mark_read_service(call):
"""Handle the mark_read notification service call."""
notification_id = call.data.get(ATTR_NOTIFICATION_ID)
entity_id = ENTITY_ID_FORMAT.format(slugify(notification_id))
if entity_id not in persistent_notifications:
_LOGGER.error(
"Marking persistent_notification read failed: "
"Notification ID %s not found",
notification_id,
)
return
persistent_notifications[entity_id][ATTR_STATUS] = STATUS_READ
hass.bus.async_fire(EVENT_PERSISTENT_NOTIFICATIONS_UPDATED)
hass.services.async_register(
DOMAIN, SERVICE_CREATE, create_service, SCHEMA_SERVICE_CREATE
)
hass.services.async_register(
DOMAIN, SERVICE_DISMISS, dismiss_service, SCHEMA_SERVICE_DISMISS
)
hass.services.async_register(
DOMAIN, SERVICE_MARK_READ, mark_read_service, SCHEMA_SERVICE_MARK_READ
)
hass.components.websocket_api.async_register_command(websocket_get_notifications)
return True
@callback
@websocket_api.websocket_command({vol.Required("type"): "persistent_notification/get"})
def websocket_get_notifications(
hass: HomeAssistant,
connection: websocket_api.ActiveConnection,
msg: Mapping[str, Any],
) -> None:
"""Return a list of persistent_notifications."""
connection.send_message(
websocket_api.result_message(
msg["id"],
[
{
key: data[key]
for key in (
ATTR_NOTIFICATION_ID,
ATTR_MESSAGE,
ATTR_STATUS,
ATTR_TITLE,
ATTR_CREATED_AT,
)
}
for data in hass.data[DOMAIN]["notifications"].values()
],
)
)
|
import logging
from nessclient import ArmingState
import homeassistant.components.alarm_control_panel as alarm
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_TRIGGER,
)
from homeassistant.const import (
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_PENDING,
STATE_ALARM_TRIGGERED,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from . import DATA_NESS, SIGNAL_ARMING_STATE_CHANGED
_LOGGER = logging.getLogger(__name__)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Ness Alarm alarm control panel devices."""
if discovery_info is None:
return
device = NessAlarmPanel(hass.data[DATA_NESS], "Alarm Panel")
async_add_entities([device])
class NessAlarmPanel(alarm.AlarmControlPanelEntity):
"""Representation of a Ness alarm panel."""
def __init__(self, client, name):
"""Initialize the alarm panel."""
self._client = client
self._name = name
self._state = None
async def async_added_to_hass(self):
"""Register callbacks."""
self.async_on_remove(
async_dispatcher_connect(
self.hass, SIGNAL_ARMING_STATE_CHANGED, self._handle_arming_state_change
)
)
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def code_format(self):
"""Return the regex for code format or None if no code is required."""
return alarm.FORMAT_NUMBER
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_TRIGGER
async def async_alarm_disarm(self, code=None):
"""Send disarm command."""
await self._client.disarm(code)
async def async_alarm_arm_away(self, code=None):
"""Send arm away command."""
await self._client.arm_away(code)
async def async_alarm_arm_home(self, code=None):
"""Send arm home command."""
await self._client.arm_home(code)
async def async_alarm_trigger(self, code=None):
"""Send trigger/panic command."""
await self._client.panic(code)
@callback
def _handle_arming_state_change(self, arming_state):
"""Handle arming state update."""
if arming_state == ArmingState.UNKNOWN:
self._state = None
elif arming_state == ArmingState.DISARMED:
self._state = STATE_ALARM_DISARMED
elif arming_state == ArmingState.ARMING:
self._state = STATE_ALARM_ARMING
elif arming_state == ArmingState.EXIT_DELAY:
self._state = STATE_ALARM_ARMING
elif arming_state == ArmingState.ARMED:
self._state = STATE_ALARM_ARMED_AWAY
elif arming_state == ArmingState.ENTRY_DELAY:
self._state = STATE_ALARM_PENDING
elif arming_state == ArmingState.TRIGGERED:
self._state = STATE_ALARM_TRIGGERED
else:
_LOGGER.warning("Unhandled arming state: %s", arming_state)
self.async_write_ha_state()
|
from django.urls import reverse
from weblate.trans.tests.test_views import FixtureTestCase
class ChartsTest(FixtureTestCase):
"""Testing of charts."""
def test_activity_monthly(self):
"""Test of monthly activity charts."""
response = self.client.get(reverse("monthly_activity"))
self.assert_svg(response)
response = self.client.get(reverse("monthly_activity_json"))
self.assertEqual(len(response.json()), 52)
response = self.client.get(reverse("monthly_activity", kwargs=self.kw_project))
self.assert_svg(response)
response = self.client.get(
reverse("monthly_activity", kwargs=self.kw_component)
)
self.assert_svg(response)
response = self.client.get(
reverse("monthly_activity", kwargs=self.kw_translation)
)
self.assert_svg(response)
response = self.client.get(reverse("monthly_activity", kwargs={"lang": "cs"}))
self.assert_svg(response)
response = self.client.get(
reverse("monthly_activity", kwargs={"user": self.user.username})
)
self.assert_svg(response)
def test_activity_yearly(self):
"""Test of yearly activity charts."""
response = self.client.get(reverse("yearly_activity"))
self.assert_svg(response)
response = self.client.get(reverse("yearly_activity", kwargs=self.kw_project))
self.assert_svg(response)
response = self.client.get(reverse("yearly_activity", kwargs=self.kw_component))
self.assert_svg(response)
response = self.client.get(
reverse("yearly_activity", kwargs=self.kw_translation)
)
self.assert_svg(response)
response = self.client.get(reverse("yearly_activity", kwargs={"lang": "cs"}))
self.assert_svg(response)
response = self.client.get(
reverse("yearly_activity", kwargs={"user": self.user.username})
)
self.assert_svg(response)
|
import json
from flask import current_app
from lemur import database
from lemur.dns_providers.models import DnsProvider
def render(args):
"""
Helper that helps us render the REST Api responses.
:param args:
:return:
"""
query = database.session_query(DnsProvider)
return database.sort_and_page(query, DnsProvider, args)
def get(dns_provider_id):
provider = database.get(DnsProvider, dns_provider_id)
return provider
def get_all_dns_providers():
"""
Retrieves all dns providers within Lemur.
:return:
"""
return DnsProvider.query.all()
def get_friendly(dns_provider_id):
"""
Retrieves a dns provider by its lemur assigned ID.
:param dns_provider_id: Lemur assigned ID
:rtype : DnsProvider
:return:
"""
dns_provider = get(dns_provider_id)
dns_provider_friendly = {
"name": dns_provider.name,
"description": dns_provider.description,
"providerType": dns_provider.provider_type,
"options": dns_provider.options,
"credentials": dns_provider.credentials,
}
if dns_provider.provider_type == "route53":
dns_provider_friendly["account_id"] = json.loads(dns_provider.credentials).get(
"account_id"
)
return dns_provider_friendly
def delete(dns_provider_id):
"""
Deletes a DNS provider.
:param dns_provider_id: Lemur assigned ID
"""
database.delete(get(dns_provider_id))
def get_types():
provider_config = current_app.config.get(
"ACME_DNS_PROVIDER_TYPES",
{
"items": [
{
"name": "route53",
"requirements": [
{
"name": "account_id",
"type": "int",
"required": True,
"helpMessage": "AWS Account number",
}
],
},
{
"name": "cloudflare",
"requirements": [
{
"name": "email",
"type": "str",
"required": True,
"helpMessage": "Cloudflare Email",
},
{
"name": "key",
"type": "str",
"required": True,
"helpMessage": "Cloudflare Key",
},
],
},
{"name": "dyn"},
{"name": "ultradns"},
{"name": "powerdns"},
]
},
)
if not provider_config:
raise Exception("No DNS Provider configuration specified.")
provider_config["total"] = len(provider_config.get("items"))
return provider_config
def set_domains(dns_provider, domains):
"""
Increments pending certificate attempt counter and updates it in the database.
"""
dns_provider.domains = domains
database.update(dns_provider)
return dns_provider
def create(data):
provider_name = data.get("name")
credentials = {}
for item in data.get("provider_type", {}).get("requirements", []):
credentials[item["name"]] = item["value"]
dns_provider = DnsProvider(
name=provider_name,
description=data.get("description"),
provider_type=data.get("provider_type").get("name"),
credentials=json.dumps(credentials),
)
created = database.create(dns_provider)
return created.id
|
import logging
import re
import voluptuous as vol
from homeassistant.components.alarm_control_panel import (
FORMAT_NUMBER,
FORMAT_TEXT,
PLATFORM_SCHEMA,
AlarmControlPanelEntity,
)
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_STATE,
CONF_CODE,
CONF_NAME,
CONF_OPTIMISTIC,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_DISARMED,
)
import homeassistant.helpers.config_validation as cv
from . import ATTR_EVENT, DOMAIN, SERVICE_PUSH_ALARM_STATE, SERVICE_TRIGGER
_LOGGER = logging.getLogger(__name__)
ALLOWED_STATES = [
STATE_ALARM_DISARMED,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_HOME,
]
DATA_IFTTT_ALARM = "ifttt_alarm"
DEFAULT_NAME = "Home"
CONF_EVENT_AWAY = "event_arm_away"
CONF_EVENT_HOME = "event_arm_home"
CONF_EVENT_NIGHT = "event_arm_night"
CONF_EVENT_DISARM = "event_disarm"
DEFAULT_EVENT_AWAY = "alarm_arm_away"
DEFAULT_EVENT_HOME = "alarm_arm_home"
DEFAULT_EVENT_NIGHT = "alarm_arm_night"
DEFAULT_EVENT_DISARM = "alarm_disarm"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CODE): cv.string,
vol.Optional(CONF_EVENT_AWAY, default=DEFAULT_EVENT_AWAY): cv.string,
vol.Optional(CONF_EVENT_HOME, default=DEFAULT_EVENT_HOME): cv.string,
vol.Optional(CONF_EVENT_NIGHT, default=DEFAULT_EVENT_NIGHT): cv.string,
vol.Optional(CONF_EVENT_DISARM, default=DEFAULT_EVENT_DISARM): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=False): cv.boolean,
}
)
PUSH_ALARM_STATE_SERVICE_SCHEMA = vol.Schema(
{vol.Required(ATTR_ENTITY_ID): cv.entity_ids, vol.Required(ATTR_STATE): cv.string}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a control panel managed through IFTTT."""
if DATA_IFTTT_ALARM not in hass.data:
hass.data[DATA_IFTTT_ALARM] = []
name = config.get(CONF_NAME)
code = config.get(CONF_CODE)
event_away = config.get(CONF_EVENT_AWAY)
event_home = config.get(CONF_EVENT_HOME)
event_night = config.get(CONF_EVENT_NIGHT)
event_disarm = config.get(CONF_EVENT_DISARM)
optimistic = config.get(CONF_OPTIMISTIC)
alarmpanel = IFTTTAlarmPanel(
name, code, event_away, event_home, event_night, event_disarm, optimistic
)
hass.data[DATA_IFTTT_ALARM].append(alarmpanel)
add_entities([alarmpanel])
async def push_state_update(service):
"""Set the service state as device state attribute."""
entity_ids = service.data.get(ATTR_ENTITY_ID)
state = service.data.get(ATTR_STATE)
devices = hass.data[DATA_IFTTT_ALARM]
if entity_ids:
devices = [d for d in devices if d.entity_id in entity_ids]
for device in devices:
device.push_alarm_state(state)
device.async_schedule_update_ha_state()
hass.services.register(
DOMAIN,
SERVICE_PUSH_ALARM_STATE,
push_state_update,
schema=PUSH_ALARM_STATE_SERVICE_SCHEMA,
)
class IFTTTAlarmPanel(AlarmControlPanelEntity):
"""Representation of an alarm control panel controlled through IFTTT."""
def __init__(
self, name, code, event_away, event_home, event_night, event_disarm, optimistic
):
"""Initialize the alarm control panel."""
self._name = name
self._code = code
self._event_away = event_away
self._event_home = event_home
self._event_night = event_night
self._event_disarm = event_disarm
self._optimistic = optimistic
self._state = None
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_NIGHT
@property
def assumed_state(self):
"""Notify that this platform return an assumed state."""
return True
@property
def code_format(self):
"""Return one or more digits/characters."""
if self._code is None:
return None
if isinstance(self._code, str) and re.search("^\\d+$", self._code):
return FORMAT_NUMBER
return FORMAT_TEXT
def alarm_disarm(self, code=None):
"""Send disarm command."""
if not self._check_code(code):
return
self.set_alarm_state(self._event_disarm, STATE_ALARM_DISARMED)
def alarm_arm_away(self, code=None):
"""Send arm away command."""
if not self._check_code(code):
return
self.set_alarm_state(self._event_away, STATE_ALARM_ARMED_AWAY)
def alarm_arm_home(self, code=None):
"""Send arm home command."""
if not self._check_code(code):
return
self.set_alarm_state(self._event_home, STATE_ALARM_ARMED_HOME)
def alarm_arm_night(self, code=None):
"""Send arm night command."""
if not self._check_code(code):
return
self.set_alarm_state(self._event_night, STATE_ALARM_ARMED_NIGHT)
def set_alarm_state(self, event, state):
"""Call the IFTTT trigger service to change the alarm state."""
data = {ATTR_EVENT: event}
self.hass.services.call(DOMAIN, SERVICE_TRIGGER, data)
_LOGGER.debug("Called IFTTT integration to trigger event %s", event)
if self._optimistic:
self._state = state
def push_alarm_state(self, value):
"""Push the alarm state to the given value."""
if value in ALLOWED_STATES:
_LOGGER.debug("Pushed the alarm state to %s", value)
self._state = value
def _check_code(self, code):
return self._code is None or self._code == code
|
from .common import setup_ozw
async def test_switch(hass, generic_data, sent_messages, switch_msg):
"""Test setting up config entry."""
receive_message = await setup_ozw(hass, fixture=generic_data)
# Test loaded
state = hass.states.get("switch.smart_plug_switch")
assert state is not None
assert state.state == "off"
# Test turning on
await hass.services.async_call(
"switch", "turn_on", {"entity_id": "switch.smart_plug_switch"}, blocking=True
)
assert len(sent_messages) == 1
msg = sent_messages[0]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": True, "ValueIDKey": 541671440}
# Feedback on state
switch_msg.decode()
switch_msg.payload["Value"] = True
switch_msg.encode()
receive_message(switch_msg)
await hass.async_block_till_done()
state = hass.states.get("switch.smart_plug_switch")
assert state is not None
assert state.state == "on"
# Test turning off
await hass.services.async_call(
"switch", "turn_off", {"entity_id": "switch.smart_plug_switch"}, blocking=True
)
assert len(sent_messages) == 2
msg = sent_messages[1]
assert msg["topic"] == "OpenZWave/1/command/setvalue/"
assert msg["payload"] == {"Value": False, "ValueIDKey": 541671440}
|
from __future__ import unicode_literals
from django.contrib.admin.sites import AdminSite
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from django.test import RequestFactory
from django.test import TestCase
from django.test.utils import override_settings
from django.urls import reverse
from django.utils import timezone
from django.utils.translation import activate
from django.utils.translation import deactivate
from zinnia import settings
from zinnia.admin import entry as entry_admin
from zinnia.admin.category import CategoryAdmin
from zinnia.admin.entry import EntryAdmin
from zinnia.managers import PUBLISHED
from zinnia.models.author import Author
from zinnia.models.category import Category
from zinnia.models.entry import Entry
from zinnia.signals import disconnect_entry_signals
from zinnia.tests.utils import datetime
from zinnia.tests.utils import skip_if_custom_user
from zinnia.url_shortener.backends.default import base36
class BaseAdminTestCase(TestCase):
rich_urls = 'zinnia.tests.implementations.urls.default'
poor_urls = 'zinnia.tests.implementations.urls.poor'
model_class = None
admin_class = None
def setUp(self):
disconnect_entry_signals()
activate('en')
self.site = AdminSite()
self.admin = self.admin_class(
self.model_class, self.site)
def tearDown(self):
"""
Deactivate the translation system.
"""
deactivate()
def check_with_rich_and_poor_urls(self, func, args,
result_rich, result_poor):
with self.settings(ROOT_URLCONF=self.rich_urls):
self.assertEqual(func(*args), result_rich)
with self.settings(ROOT_URLCONF=self.poor_urls):
self.assertEqual(func(*args), result_poor)
class TestMessageBackend(object):
"""Message backend for testing"""
def __init__(self, *ka, **kw):
self.messages = []
def add(self, *ka, **kw):
self.messages.append(ka)
@skip_if_custom_user
class EntryAdminTestCase(BaseAdminTestCase):
"""Test case for Entry Admin"""
model_class = Entry
admin_class = EntryAdmin
def setUp(self):
super(EntryAdminTestCase, self).setUp()
params = {'title': 'My title',
'content': 'My content',
'slug': 'my-title'}
self.entry = Entry.objects.create(**params)
self.request_factory = RequestFactory()
self.request = self.request_factory.get('/')
def test_get_title(self):
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words)')
self.entry.comment_count = 1
self.entry.save()
self.entry = Entry.objects.get(pk=self.entry.pk)
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words) (1 reaction)')
self.entry.pingback_count = 1
self.entry.save()
self.entry = Entry.objects.get(pk=self.entry.pk)
self.assertEqual(self.admin.get_title(self.entry),
'My title (2 words) (2 reactions)')
def test_get_authors(self):
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'', '')
author_1 = Author.objects.create_user(
'author-1', '[email protected]')
author_2 = Author.objects.create_user(
'author<2>', '[email protected]')
self.entry.authors.add(author_1)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/author-1/" target="blank">author-1</a>',
'author-1')
self.entry.authors.add(author_2)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/author-1/" target="blank">author-1</a>, '
'<a href="/authors/author%3C2%3E/" target="blank">'
'author<2></a>',
'author-1, author<2>')
def test_get_authors_non_ascii(self):
author = Author.objects.create_user(
'тест', '[email protected]')
self.entry.authors.add(author)
self.check_with_rich_and_poor_urls(
self.admin.get_authors, (self.entry,),
'<a href="/authors/%D1%82%D0%B5%D1%81%D1%82/" '
'target="blank">тест</a>',
'тест')
def test_get_categories(self):
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'', '')
category_1 = Category.objects.create(title='Category <b>1</b>',
slug='category-1')
category_2 = Category.objects.create(title='Category <b>2</b>',
slug='category-2')
self.entry.categories.add(category_1)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category-1/" target="blank">'
'Category <b>1</b></a>',
'Category <b>1</b>')
self.entry.categories.add(category_2)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category-1/" target="blank">'
'Category <b>1</b></a>, '
'<a href="/categories/category-2/" target="blank">Category '
'<b>2</b></a>',
'Category <b>1</b>, Category <b>2</b>')
def test_get_categories_non_ascii(self):
category = Category.objects.create(title='Category тест',
slug='category')
self.entry.categories.add(category)
self.check_with_rich_and_poor_urls(
self.admin.get_categories, (self.entry,),
'<a href="/categories/category/" target="blank">'
'Category тест</a>',
'Category тест')
def test_get_tags(self):
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'', '')
self.entry.tags = 'zinnia'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/zinnia/" target="blank">zinnia</a>',
'zinnia')
self.entry.tags = 'zinnia, t<e>st'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/t%3Ce%3Est/" target="blank">t<e>st</a>, '
'<a href="/tags/zinnia/" target="blank">zinnia</a>',
'zinnia, t<e>st') # Yes, this is not the same order...
def test_get_tags_non_ascii(self):
self.entry.tags = 'тест'
self.check_with_rich_and_poor_urls(
self.admin.get_tags, (self.entry,),
'<a href="/tags/%D1%82%D0%B5%D1%81%D1%82/" '
'target="blank">тест</a>',
'тест')
def test_get_sites(self):
self.assertEqual(self.admin.get_sites(self.entry), '')
self.entry.sites.add(Site.objects.get_current())
self.check_with_rich_and_poor_urls(
self.admin.get_sites, (self.entry,),
'<a href="http://example.com/" target="blank">example.com</a>',
'<a href="http://example.com" target="blank">example.com</a>')
def test_get_short_url(self):
with self.settings(ROOT_URLCONF=self.poor_urls):
entry_url = self.entry.get_absolute_url()
self.check_with_rich_and_poor_urls(
self.admin.get_short_url, (self.entry,),
'<a href="http://example.com/%(hash)s/" target="blank">'
'http://example.com/%(hash)s/</a>' % {
'hash': base36(self.entry.pk)},
'<a href="%(url)s" target="blank">%(url)s</a>' % {
'url': entry_url})
def test_get_is_visible(self):
self.assertEqual(self.admin.get_is_visible(self.entry),
self.entry.is_visible)
def test_queryset(self):
user = Author.objects.create_user(
'user', '[email protected]')
self.entry.authors.add(user)
root = Author.objects.create_superuser(
'root', '[email protected]', 'toor')
params = {'title': 'My root title',
'content': 'My root content',
'slug': 'my-root-titile'}
root_entry = Entry.objects.create(**params)
root_entry.authors.add(root)
self.request.user = User.objects.get(pk=user.pk)
self.assertEqual(len(self.admin.get_queryset(self.request)), 1)
self.request.user = User.objects.get(pk=root.pk)
self.assertEqual(len(self.admin.get_queryset(self.request)), 2)
def test_get_changeform_initial_data(self):
user = User.objects.create_user(
'user', '[email protected]')
site = Site.objects.get_current()
self.request.user = user
data = self.admin.get_changeform_initial_data(self.request)
self.assertEqual(data, {'authors': [user.pk],
'sites': [site.pk]})
request = self.request_factory.get('/?title=data')
request.user = user
data = self.admin.get_changeform_initial_data(request)
self.assertEqual(data, {'title': 'data'})
def test_formfield_for_manytomany(self):
staff = User.objects.create_user(
'staff', '[email protected]')
author = User.objects.create_user(
'author', '[email protected]')
root = User.objects.create_superuser(
'root', '[email protected]', 'toor')
self.request.user = root
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 1)
staff.is_staff = True
staff.save()
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 2)
self.entry.authors.add(Author.objects.get(pk=author.pk))
field = self.admin.formfield_for_manytomany(
Entry.authors.field, self.request)
self.assertEqual(field.queryset.count(), 3)
def test_get_readonly_fields(self):
user = User.objects.create_user(
'user', '[email protected]')
root = User.objects.create_superuser(
'root', '[email protected]', 'toor')
self.request.user = user
self.assertEqual(self.admin.get_readonly_fields(self.request),
['status', 'authors'])
self.request.user = root
self.assertEqual(self.admin.get_readonly_fields(self.request),
[])
def test_get_actions(self):
original_ping_directories = settings.PING_DIRECTORIES
user = User.objects.create_user(
'user', '[email protected]')
root = User.objects.create_superuser(
'root', '[email protected]', 'toor')
self.request.user = user
settings.PING_DIRECTORIES = True
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['close_comments',
'close_pingbacks',
'close_trackbacks',
'ping_directories',
'put_on_top',
'mark_featured',
'unmark_featured'])
settings.PING_DIRECTORIES = False
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['close_comments',
'close_pingbacks',
'close_trackbacks',
'put_on_top',
'mark_featured',
'unmark_featured'])
self.request.user = root
self.assertEqual(
list(self.admin.get_actions(self.request).keys()),
['delete_selected',
'make_mine',
'make_published',
'make_hidden',
'close_comments',
'close_pingbacks',
'close_trackbacks',
'put_on_top',
'mark_featured',
'unmark_featured'])
settings.PING_DIRECTORIES = original_ping_directories
def test_get_actions_in_popup_mode_issue_291(self):
user = User.objects.create_user(
'user', '[email protected]')
request = self.request_factory.get('/?_popup=1')
request.user = user
self.assertEqual(
list(self.admin.get_actions(request).keys()),
[])
def test_make_mine(self):
user = Author.objects.create_user(
'user', '[email protected]')
self.request.user = User.objects.get(pk=user.pk)
self.request._messages = TestMessageBackend()
self.assertEqual(user.entries.count(), 0)
self.admin.make_mine(self.request, Entry.objects.all())
self.assertEqual(user.entries.count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
def test_make_published(self):
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = []
self.request._messages = TestMessageBackend()
self.entry.sites.add(Site.objects.get_current())
self.assertEqual(Entry.published.count(), 0)
self.admin.make_published(self.request, Entry.objects.all())
self.assertEqual(Entry.published.count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
settings.PING_DIRECTORIES = original_ping_directories
def test_make_hidden(self):
self.request._messages = TestMessageBackend()
self.entry.status = PUBLISHED
self.entry.save()
self.entry.sites.add(Site.objects.get_current())
self.assertEqual(Entry.published.count(), 1)
self.admin.make_hidden(self.request, Entry.objects.all())
self.assertEqual(Entry.published.count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_comments(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
comment_enabled=True).count(), 1)
self.admin.close_comments(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
comment_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_pingbacks(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
pingback_enabled=True).count(), 1)
self.admin.close_pingbacks(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
pingback_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_close_trackbacks(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
trackback_enabled=True).count(), 1)
self.admin.close_trackbacks(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(
trackback_enabled=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 1)
def test_put_on_top(self):
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = []
self.request._messages = TestMessageBackend()
self.entry.publication_date = datetime(2011, 1, 1, 12, 0)
self.admin.put_on_top(self.request, Entry.objects.all())
self.assertEqual(
Entry.objects.get(pk=self.entry.pk).creation_date.date(),
timezone.now().date())
self.assertEqual(len(self.request._messages.messages), 1)
settings.PING_DIRECTORIES = original_ping_directories
def test_mark_unmark_featured(self):
self.request._messages = TestMessageBackend()
self.assertEqual(Entry.objects.filter(
featured=True).count(), 0)
self.admin.mark_featured(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(featured=True).count(), 1)
self.assertEqual(len(self.request._messages.messages), 1)
self.admin.unmark_featured(self.request, Entry.objects.all())
self.assertEqual(Entry.objects.filter(featured=True).count(), 0)
self.assertEqual(len(self.request._messages.messages), 2)
def test_ping_directories(self):
class FakePinger(object):
def __init__(self, *ka, **kw):
self.results = [{'flerror': False, 'message': 'OK'},
{'flerror': True, 'message': 'KO'}]
def join(self):
pass
original_pinger = entry_admin.DirectoryPinger
entry_admin.DirectoryPinger = FakePinger
original_ping_directories = settings.PING_DIRECTORIES
settings.PING_DIRECTORIES = ['http://ping.com/ping']
self.request._messages = TestMessageBackend()
self.admin.ping_directories(self.request, Entry.objects.all(), False)
self.assertEqual(len(self.request._messages.messages), 0)
self.admin.ping_directories(self.request, Entry.objects.all())
self.assertEqual(len(self.request._messages.messages), 2)
self.assertEqual(self.request._messages.messages,
[(20, 'http://ping.com/ping : KO', ''),
(20, 'http://ping.com/ping directory succesfully '
'pinged 1 entries.', '')])
entry_admin.DirectoryPinger = original_pinger
settings.PING_DIRECTORIES = original_ping_directories
class CategoryAdminTestCase(BaseAdminTestCase):
"""Test cases for Category Admin"""
model_class = Category
admin_class = CategoryAdmin
def test_get_tree_path(self):
category = Category.objects.create(title='Category', slug='cat')
self.check_with_rich_and_poor_urls(
self.admin.get_tree_path, (category,),
'<a href="/categories/cat/" target="blank">/cat/</a>',
'/cat/')
@skip_if_custom_user
@override_settings(
DEBUG=True
)
class FunctionnalAdminTestCase(TestCase):
"""
Functional testing admin integration.
We just executing the view to see if the integration works.
"""
def setUp(self):
disconnect_entry_signals()
self.author = Author.objects.create_superuser(
username='admin',
email='[email protected]',
password='password'
)
self.category = Category.objects.create(
title='Category', slug='cat'
)
params = {
'title': 'My title',
'content': 'My content',
'slug': 'my-title'
}
self.entry = Entry.objects.create(**params)
self.client.force_login(self.author)
def assert_admin(self, url):
response = self.client.get(url)
self.assertEqual(response.status_code, 200)
def test_admin_entry_list(self):
self.assert_admin(
reverse('admin:zinnia_entry_changelist')
)
def test_admin_category_list(self):
self.assert_admin(
reverse('admin:zinnia_category_changelist')
)
def test_admin_entry_add(self):
self.assert_admin(
reverse('admin:zinnia_entry_add')
)
def test_admin_category_add(self):
self.assert_admin(
reverse('admin:zinnia_category_add')
)
def test_admin_entry_update(self):
self.assert_admin(
reverse('admin:zinnia_entry_change', args=[self.entry.pk])
)
def test_admin_category_update(self):
self.assert_admin(
reverse('admin:zinnia_category_change', args=[self.category.pk])
)
|
from .nodes import Node
class NodeVisitor:
"""Walks the abstract syntax tree and call visitor functions for every
node found. The visitor functions may return values which will be
forwarded by the `visit` method.
Per default the visitor functions for the nodes are ``'visit_'`` +
class name of the node. So a `TryFinally` node visit function would
be `visit_TryFinally`. This behavior can be changed by overriding
the `get_visitor` function. If no visitor function exists for a node
(return value `None`) the `generic_visit` visitor is used instead.
"""
def get_visitor(self, node):
"""Return the visitor function for this node or `None` if no visitor
exists for this node. In that case the generic visit function is
used instead.
"""
return getattr(self, f"visit_{node.__class__.__name__}", None)
def visit(self, node, *args, **kwargs):
"""Visit a node."""
f = self.get_visitor(node)
if f is not None:
return f(node, *args, **kwargs)
return self.generic_visit(node, *args, **kwargs)
def generic_visit(self, node, *args, **kwargs):
"""Called if no explicit visitor function exists for a node."""
for node in node.iter_child_nodes():
self.visit(node, *args, **kwargs)
class NodeTransformer(NodeVisitor):
"""Walks the abstract syntax tree and allows modifications of nodes.
The `NodeTransformer` will walk the AST and use the return value of the
visitor functions to replace or remove the old node. If the return
value of the visitor function is `None` the node will be removed
from the previous location otherwise it's replaced with the return
value. The return value may be the original node in which case no
replacement takes place.
"""
def generic_visit(self, node, *args, **kwargs):
for field, old_value in node.iter_fields():
if isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, Node):
value = self.visit(value, *args, **kwargs)
if value is None:
continue
elif not isinstance(value, Node):
new_values.extend(value)
continue
new_values.append(value)
old_value[:] = new_values
elif isinstance(old_value, Node):
new_node = self.visit(old_value, *args, **kwargs)
if new_node is None:
delattr(node, field)
else:
setattr(node, field, new_node)
return node
def visit_list(self, node, *args, **kwargs):
"""As transformers may return lists in some places this method
can be used to enforce a list as return value.
"""
rv = self.visit(node, *args, **kwargs)
if not isinstance(rv, list):
rv = [rv]
return rv
|
from datetime import timedelta
from django.test import TestCase
from django.utils import timezone
from weblate.checks.source import (
EllipsisCheck,
LongUntranslatedCheck,
OptionalPluralCheck,
)
from weblate.checks.tests.test_checks import MockUnit
from weblate.trans.tests.test_views import FixtureTestCase
class OptionalPluralCheckTest(TestCase):
def setUp(self):
self.check = OptionalPluralCheck()
def test_none(self):
self.assertFalse(self.check.check_source(["text"], MockUnit()))
def test_plural(self):
self.assertFalse(self.check.check_source(["text", "texts"], MockUnit()))
def test_failing(self):
self.assertTrue(self.check.check_source(["text(s)"], MockUnit()))
class EllipsisCheckTest(TestCase):
def setUp(self):
self.check = EllipsisCheck()
def test_none(self):
self.assertFalse(self.check.check_source(["text"], MockUnit()))
def test_good(self):
self.assertFalse(self.check.check_source(["text…"], MockUnit()))
def test_failing(self):
self.assertTrue(self.check.check_source(["text..."], MockUnit()))
class LongUntranslatedCheckTestCase(FixtureTestCase):
check = LongUntranslatedCheck()
def test_recent(self):
unit = self.get_unit(language="en")
unit.timestamp = timezone.now()
unit.run_checks()
self.assertNotIn("long_untranslated", unit.all_checks_names)
def test_old(self):
unit = self.get_unit(language="en")
unit.timestamp = timezone.now() - timedelta(days=100)
unit.run_checks()
self.assertNotIn("long_untranslated", unit.all_checks_names)
def test_old_untranslated(self):
unit = self.get_unit(language="en")
unit.timestamp = timezone.now() - timedelta(days=100)
unit.translation.component.stats.lazy_translated_percent = 100
unit.run_checks()
self.assertIn("long_untranslated", unit.all_checks_names)
|
from typing import List, Optional
import voluptuous as vol
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_DEVICE_ID,
CONF_DOMAIN,
CONF_ENTITY_ID,
CONF_TYPE,
)
from homeassistant.core import Context, HomeAssistant
from homeassistant.helpers import entity_registry
import homeassistant.helpers.config_validation as cv
from . import DOMAIN, SERVICE_RETURN_TO_BASE, SERVICE_START
ACTION_TYPES = {"clean", "dock"}
ACTION_SCHEMA = cv.DEVICE_ACTION_BASE_SCHEMA.extend(
{
vol.Required(CONF_TYPE): vol.In(ACTION_TYPES),
vol.Required(CONF_ENTITY_ID): cv.entity_domain(DOMAIN),
}
)
async def async_get_actions(hass: HomeAssistant, device_id: str) -> List[dict]:
"""List device actions for Vacuum devices."""
registry = await entity_registry.async_get_registry(hass)
actions = []
# Get all the integrations entities for this device
for entry in entity_registry.async_entries_for_device(registry, device_id):
if entry.domain != DOMAIN:
continue
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "clean",
}
)
actions.append(
{
CONF_DEVICE_ID: device_id,
CONF_DOMAIN: DOMAIN,
CONF_ENTITY_ID: entry.entity_id,
CONF_TYPE: "dock",
}
)
return actions
async def async_call_action_from_config(
hass: HomeAssistant, config: dict, variables: dict, context: Optional[Context]
) -> None:
"""Execute a device action."""
config = ACTION_SCHEMA(config)
service_data = {ATTR_ENTITY_ID: config[CONF_ENTITY_ID]}
if config[CONF_TYPE] == "clean":
service = SERVICE_START
elif config[CONF_TYPE] == "dock":
service = SERVICE_RETURN_TO_BASE
await hass.services.async_call(
DOMAIN, service, service_data, blocking=True, context=context
)
|
import os
import sys
from pathlib import Path
from Foundation import NSBundle
__package__ = "meld"
__version__ = "3.21.0.osx3"
APPLICATION_NAME = 'Meld'
APPLICATION_ID = 'org.gnome.Meld'
SETTINGS_SCHEMA_ID = 'org.gnome.meld'
RESOURCE_BASE = '/org/gnome/meld'
# START; these paths are clobbered on install by meld.build_helpers
DATADIR = Path(sys.prefix) / "share" / "meld"
LOCALEDIR = Path(sys.prefix) / "share" / "locale"
# END
CONFIGURED = '@configured@'
PROFILE = 'macOS'
if CONFIGURED == 'True':
APPLICATION_ID = '@application_id@'
DATADIR = '@pkgdatadir@'
LOCALEDIR = '@localedir@'
PROFILE = '@profile@'
# Flag enabling some workarounds if data dir isn't installed in standard prefix
DATADIR_IS_UNINSTALLED = False
PYTHON_REQUIREMENT_TUPLE = (3, 6)
# Installed from main script
def no_translation(gettext_string: str) -> str:
return gettext_string
_ = no_translation
ngettext = no_translation
def frozen():
global DATADIR, LOCALEDIR, DATADIR_IS_UNINSTALLED
bundle = NSBundle.mainBundle()
resource_path = bundle.resourcePath().fileSystemRepresentation().decode("utf-8")
#bundle_path = bundle.bundlePath().fileSystemRepresentation().decode("utf-8")
#frameworks_path = bundle.privateFrameworksPath().fileSystemRepresentation().decode("utf-8")
#executable_path = bundle.executablePath().fileSystemRepresentation().decode("utf-8")
etc_path = os.path.join(resource_path, "etc")
lib_path = os.path.join(resource_path, "lib")
share_path = os.path.join(resource_path , "share")
# Glib and GI environment variables
os.environ['GSETTINGS_SCHEMA_DIR'] = os.path.join(
share_path, "glib-2.0")
os.environ['GI_TYPELIB_PATH'] = os.path.join(
lib_path, "girepository-1.0")
# Avoid GTK warnings unless user specifies otherwise
debug_gtk = os.environ.get('G_ENABLE_DIAGNOSTIC', "0")
os.environ['G_ENABLE_DIAGNOSTIC'] = debug_gtk
# GTK environment variables
os.environ['GTK_DATA_PREFIX'] = resource_path
os.environ['GTK_EXE_PREFIX'] = resource_path
os.environ['GTK_PATH'] = resource_path
# XDG environment variables
os.environ['XDG_CONFIG_DIRS'] = os.path.join(etc_path, "xdg")
os.environ['XDG_DATA_DIRS'] = ":".join((share_path, os.path.join(share_path, "meld")))
os.environ['XDG_CONFIG_HOME'] = etc_path
home_dir = os.path.expanduser('~')
if home_dir is not None:
cache_dir = os.path.join(home_dir, 'Library', 'Caches', 'org.gnome.meld')
try:
os.makedirs(cache_dir, mode=0o755, exist_ok=True)
os.environ['XDG_CACHE_HOME'] = cache_dir
except EnvironmentError:
pass
if os.path.isdir(cache_dir):
os.environ['XDG_CACHE_HOME'] = cache_dir
# Pango environment variables
os.environ['PANGO_RC_FILE'] = os.path.join(etc_path, "pango", "pangorc")
os.environ['PANGO_SYSCONFDIR'] = etc_path
os.environ['PANGO_LIBDIR'] = lib_path
# Gdk environment variables
os.environ['GDK_PIXBUF_MODULEDIR'] = os.path.join(lib_path, "gdk-pixbuf-2.0", "2.10.0", "loaders")
#os.environ['GDK_RENDERING'] = "image"
# Python environment variables
os.environ['PYTHONHOME'] = resource_path
original_python_path = os.environ.get('PYTHONPATH', "")
python_path = ":".join((lib_path,
os.path.join(lib_path, "python", "lib-dynload"),
os.path.join(lib_path, "python"),
original_python_path))
os.environ['PYTHONPATH'] = python_path
# meld specific
DATADIR = os.path.join(share_path, "meld")
LOCALEDIR = os.path.join(share_path, "mo")
DATADIR_IS_UNINSTALLED = True
def uninstalled():
global DATADIR, LOCALEDIR, DATADIR_IS_UNINSTALLED
melddir = Path(__file__).resolve().parent.parent
DATADIR = melddir / "data"
LOCALEDIR = melddir / "build" / "mo"
DATADIR_IS_UNINSTALLED = True
resource_path = melddir / "meld" / "resources"
os.environ['G_RESOURCE_OVERLAYS'] = f'{RESOURCE_BASE}={resource_path}'
def ui_file(filename):
return os.path.join(DATADIR, "ui", filename)
|
from datetime import datetime
import pytest
from homeassistant.components import (
device_sun_light_trigger,
device_tracker,
group,
light,
)
from homeassistant.components.device_tracker.const import DOMAIN
from homeassistant.const import (
ATTR_ENTITY_ID,
CONF_PLATFORM,
EVENT_HOMEASSISTANT_START,
STATE_HOME,
STATE_NOT_HOME,
STATE_OFF,
STATE_ON,
)
from homeassistant.core import CoreState
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed
@pytest.fixture
def scanner(hass):
"""Initialize components."""
scanner = getattr(hass.components, "test.device_tracker").get_scanner(None, None)
scanner.reset()
scanner.come_home("DEV1")
getattr(hass.components, "test.light").init()
with patch(
"homeassistant.components.device_tracker.legacy.load_yaml_config_file",
return_value={
"device_1": {
"mac": "DEV1",
"name": "Unnamed Device",
"picture": "http://example.com/dev1.jpg",
"track": True,
"vendor": None,
},
"device_2": {
"mac": "DEV2",
"name": "Unnamed Device",
"picture": "http://example.com/dev2.jpg",
"track": True,
"vendor": None,
},
},
):
assert hass.loop.run_until_complete(
async_setup_component(
hass,
device_tracker.DOMAIN,
{device_tracker.DOMAIN: {CONF_PLATFORM: "test"}},
)
)
assert hass.loop.run_until_complete(
async_setup_component(
hass, light.DOMAIN, {light.DOMAIN: {CONF_PLATFORM: "test"}}
)
)
return scanner
async def test_lights_on_when_sun_sets(hass, scanner):
"""Test lights go on when there is someone home and the sun sets."""
test_time = datetime(2017, 4, 5, 1, 2, 3, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=test_time):
assert await async_setup_component(
hass, device_sun_light_trigger.DOMAIN, {device_sun_light_trigger.DOMAIN: {}}
)
await hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "test.light"},
blocking=True,
)
test_time = test_time.replace(hour=3)
with patch("homeassistant.util.dt.utcnow", return_value=test_time):
async_fire_time_changed(hass, test_time)
await hass.async_block_till_done()
assert all(
hass.states.get(ent_id).state == STATE_ON
for ent_id in hass.states.async_entity_ids("light")
)
async def test_lights_turn_off_when_everyone_leaves(hass):
"""Test lights turn off when everyone leaves the house."""
assert await async_setup_component(
hass, "light", {light.DOMAIN: {CONF_PLATFORM: "test"}}
)
await hass.services.async_call(
light.DOMAIN,
light.SERVICE_TURN_ON,
{ATTR_ENTITY_ID: "test.light"},
blocking=True,
)
hass.states.async_set("device_tracker.bla", STATE_HOME)
assert await async_setup_component(
hass, device_sun_light_trigger.DOMAIN, {device_sun_light_trigger.DOMAIN: {}}
)
hass.states.async_set("device_tracker.bla", STATE_NOT_HOME)
await hass.async_block_till_done()
assert all(
hass.states.get(ent_id).state == STATE_OFF
for ent_id in hass.states.async_entity_ids("light")
)
async def test_lights_turn_on_when_coming_home_after_sun_set(hass, scanner):
"""Test lights turn on when coming home after sun set."""
test_time = datetime(2017, 4, 5, 3, 2, 3, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=test_time):
await hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "all"}, blocking=True
)
assert await async_setup_component(
hass, device_sun_light_trigger.DOMAIN, {device_sun_light_trigger.DOMAIN: {}}
)
hass.states.async_set(f"{DOMAIN}.device_2", STATE_HOME)
await hass.async_block_till_done()
assert all(
hass.states.get(ent_id).state == light.STATE_ON
for ent_id in hass.states.async_entity_ids("light")
)
async def test_lights_turn_on_when_coming_home_after_sun_set_person(hass, scanner):
"""Test lights turn on when coming home after sun set."""
device_1 = f"{DOMAIN}.device_1"
device_2 = f"{DOMAIN}.device_2"
test_time = datetime(2017, 4, 5, 3, 2, 3, tzinfo=dt_util.UTC)
with patch("homeassistant.util.dt.utcnow", return_value=test_time):
await hass.services.async_call(
light.DOMAIN, light.SERVICE_TURN_OFF, {ATTR_ENTITY_ID: "all"}, blocking=True
)
hass.states.async_set(device_1, STATE_NOT_HOME)
hass.states.async_set(device_2, STATE_NOT_HOME)
await hass.async_block_till_done()
assert all(
not light.is_on(hass, ent_id)
for ent_id in hass.states.async_entity_ids("light")
)
assert hass.states.get(device_1).state == "not_home"
assert hass.states.get(device_2).state == "not_home"
assert await async_setup_component(
hass,
"person",
{"person": [{"id": "me", "name": "Me", "device_trackers": [device_1]}]},
)
assert await async_setup_component(hass, "group", {})
await hass.async_block_till_done()
await group.Group.async_create_group(hass, "person_me", ["person.me"])
assert await async_setup_component(
hass,
device_sun_light_trigger.DOMAIN,
{device_sun_light_trigger.DOMAIN: {"device_group": "group.person_me"}},
)
assert all(
hass.states.get(ent_id).state == STATE_OFF
for ent_id in hass.states.async_entity_ids("light")
)
assert hass.states.get(device_1).state == "not_home"
assert hass.states.get(device_2).state == "not_home"
assert hass.states.get("person.me").state == "not_home"
# Unrelated device has no impact
hass.states.async_set(device_2, STATE_HOME)
await hass.async_block_till_done()
assert all(
hass.states.get(ent_id).state == STATE_OFF
for ent_id in hass.states.async_entity_ids("light")
)
assert hass.states.get(device_1).state == "not_home"
assert hass.states.get(device_2).state == "home"
assert hass.states.get("person.me").state == "not_home"
# person home switches on
hass.states.async_set(device_1, STATE_HOME)
await hass.async_block_till_done()
await hass.async_block_till_done()
assert all(
hass.states.get(ent_id).state == light.STATE_ON
for ent_id in hass.states.async_entity_ids("light")
)
assert hass.states.get(device_1).state == "home"
assert hass.states.get(device_2).state == "home"
assert hass.states.get("person.me").state == "home"
async def test_initialize_start(hass):
"""Test we initialize when HA starts."""
hass.state = CoreState.not_running
assert await async_setup_component(
hass,
device_sun_light_trigger.DOMAIN,
{device_sun_light_trigger.DOMAIN: {}},
)
with patch(
"homeassistant.components.device_sun_light_trigger.activate_automation"
) as mock_activate:
hass.bus.fire(EVENT_HOMEASSISTANT_START)
await hass.async_block_till_done()
assert len(mock_activate.mock_calls) == 1
|
import logging
from random import randrange
from pyintesishome import IHAuthenticationError, IHConnectionError, IntesisHome
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_BOOST,
PRESET_COMFORT,
PRESET_ECO,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_SWING_MODE,
SUPPORT_TARGET_TEMPERATURE,
SWING_BOTH,
SWING_HORIZONTAL,
SWING_OFF,
SWING_VERTICAL,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_DEVICE,
CONF_PASSWORD,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
IH_DEVICE_INTESISHOME = "IntesisHome"
IH_DEVICE_AIRCONWITHME = "airconwithme"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_DEVICE, default=IH_DEVICE_INTESISHOME): vol.In(
[IH_DEVICE_AIRCONWITHME, IH_DEVICE_INTESISHOME]
),
}
)
MAP_IH_TO_HVAC_MODE = {
"auto": HVAC_MODE_HEAT_COOL,
"cool": HVAC_MODE_COOL,
"dry": HVAC_MODE_DRY,
"fan": HVAC_MODE_FAN_ONLY,
"heat": HVAC_MODE_HEAT,
"off": HVAC_MODE_OFF,
}
MAP_HVAC_MODE_TO_IH = {v: k for k, v in MAP_IH_TO_HVAC_MODE.items()}
MAP_IH_TO_PRESET_MODE = {
"eco": PRESET_ECO,
"comfort": PRESET_COMFORT,
"powerful": PRESET_BOOST,
}
MAP_PRESET_MODE_TO_IH = {v: k for k, v in MAP_IH_TO_PRESET_MODE.items()}
IH_SWING_STOP = "auto/stop"
IH_SWING_SWING = "swing"
MAP_SWING_TO_IH = {
SWING_OFF: {"vvane": IH_SWING_STOP, "hvane": IH_SWING_STOP},
SWING_BOTH: {"vvane": IH_SWING_SWING, "hvane": IH_SWING_SWING},
SWING_HORIZONTAL: {"vvane": IH_SWING_STOP, "hvane": IH_SWING_SWING},
SWING_VERTICAL: {"vvane": IH_SWING_SWING, "hvane": IH_SWING_STOP},
}
MAP_STATE_ICONS = {
HVAC_MODE_COOL: "mdi:snowflake",
HVAC_MODE_DRY: "mdi:water-off",
HVAC_MODE_FAN_ONLY: "mdi:fan",
HVAC_MODE_HEAT: "mdi:white-balance-sunny",
HVAC_MODE_HEAT_COOL: "mdi:cached",
}
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Create the IntesisHome climate devices."""
ih_user = config[CONF_USERNAME]
ih_pass = config[CONF_PASSWORD]
device_type = config[CONF_DEVICE]
controller = IntesisHome(
ih_user,
ih_pass,
hass.loop,
websession=async_get_clientsession(hass),
device_type=device_type,
)
try:
await controller.poll_status()
except IHAuthenticationError:
_LOGGER.error("Invalid username or password")
return
except IHConnectionError as ex:
_LOGGER.error("Error connecting to the %s server", device_type)
raise PlatformNotReady from ex
ih_devices = controller.get_devices()
if ih_devices:
async_add_entities(
[
IntesisAC(ih_device_id, device, controller)
for ih_device_id, device in ih_devices.items()
],
True,
)
else:
_LOGGER.error(
"Error getting device list from %s API: %s",
device_type,
controller.error_message,
)
await controller.stop()
class IntesisAC(ClimateEntity):
"""Represents an Intesishome air conditioning device."""
def __init__(self, ih_device_id, ih_device, controller):
"""Initialize the thermostat."""
self._controller = controller
self._device_id = ih_device_id
self._ih_device = ih_device
self._device_name = ih_device.get("name")
self._device_type = controller.device_type
self._connected = None
self._setpoint_step = 1
self._current_temp = None
self._max_temp = None
self._hvac_mode_list = []
self._min_temp = None
self._target_temp = None
self._outdoor_temp = None
self._hvac_mode = None
self._preset = None
self._preset_list = [PRESET_ECO, PRESET_COMFORT, PRESET_BOOST]
self._run_hours = None
self._rssi = None
self._swing_list = [SWING_OFF]
self._vvane = None
self._hvane = None
self._power = False
self._fan_speed = None
self._support = 0
self._power_consumption_heat = None
self._power_consumption_cool = None
# Setpoint support
if controller.has_setpoint_control(ih_device_id):
self._support |= SUPPORT_TARGET_TEMPERATURE
# Setup swing list
if controller.has_vertical_swing(ih_device_id):
self._swing_list.append(SWING_VERTICAL)
if controller.has_horizontal_swing(ih_device_id):
self._swing_list.append(SWING_HORIZONTAL)
if SWING_HORIZONTAL in self._swing_list and SWING_VERTICAL in self._swing_list:
self._swing_list.append(SWING_BOTH)
if len(self._swing_list) > 1:
self._support |= SUPPORT_SWING_MODE
# Setup fan speeds
self._fan_modes = controller.get_fan_speed_list(ih_device_id)
if self._fan_modes:
self._support |= SUPPORT_FAN_MODE
# Preset support
if ih_device.get("climate_working_mode"):
self._support |= SUPPORT_PRESET_MODE
# Setup HVAC modes
modes = controller.get_mode_list(ih_device_id)
if modes:
mode_list = [MAP_IH_TO_HVAC_MODE[mode] for mode in modes]
self._hvac_mode_list.extend(mode_list)
self._hvac_mode_list.append(HVAC_MODE_OFF)
async def async_added_to_hass(self):
"""Subscribe to event updates."""
_LOGGER.debug("Added climate device with state: %s", repr(self._ih_device))
await self._controller.add_update_callback(self.async_update_callback)
try:
await self._controller.connect()
except IHConnectionError as ex:
_LOGGER.error("Exception connecting to IntesisHome: %s", ex)
raise PlatformNotReady from ex
@property
def name(self):
"""Return the name of the AC device."""
return self._device_name
@property
def temperature_unit(self):
"""Intesishome API uses celsius on the backend."""
return TEMP_CELSIUS
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attrs = {}
if self._outdoor_temp:
attrs["outdoor_temp"] = self._outdoor_temp
if self._power_consumption_heat:
attrs["power_consumption_heat_kw"] = round(
self._power_consumption_heat / 1000, 1
)
if self._power_consumption_cool:
attrs["power_consumption_cool_kw"] = round(
self._power_consumption_cool / 1000, 1
)
return attrs
@property
def unique_id(self):
"""Return unique ID for this device."""
return self._device_id
@property
def target_temperature_step(self) -> float:
"""Return whether setpoint should be whole or half degree precision."""
return self._setpoint_step
@property
def preset_modes(self):
"""Return a list of HVAC preset modes."""
return self._preset_list
@property
def preset_mode(self):
"""Return the current preset mode."""
return self._preset
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
hvac_mode = kwargs.get(ATTR_HVAC_MODE)
if hvac_mode:
await self.async_set_hvac_mode(hvac_mode)
if temperature:
_LOGGER.debug("Setting %s to %s degrees", self._device_type, temperature)
await self._controller.set_temperature(self._device_id, temperature)
self._target_temp = temperature
# Write updated temperature to HA state to avoid flapping (API confirmation is slow)
self.async_write_ha_state()
async def async_set_hvac_mode(self, hvac_mode):
"""Set operation mode."""
_LOGGER.debug("Setting %s to %s mode", self._device_type, hvac_mode)
if hvac_mode == HVAC_MODE_OFF:
self._power = False
await self._controller.set_power_off(self._device_id)
# Write changes to HA, API can be slow to push changes
self.async_write_ha_state()
return
# First check device is turned on
if not self._controller.is_on(self._device_id):
self._power = True
await self._controller.set_power_on(self._device_id)
# Set the mode
await self._controller.set_mode(self._device_id, MAP_HVAC_MODE_TO_IH[hvac_mode])
# Send the temperature again in case changing modes has changed it
if self._target_temp:
await self._controller.set_temperature(self._device_id, self._target_temp)
# Updates can take longer than 2 seconds, so update locally
self._hvac_mode = hvac_mode
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode):
"""Set fan mode (from quiet, low, medium, high, auto)."""
await self._controller.set_fan_speed(self._device_id, fan_mode)
# Updates can take longer than 2 seconds, so update locally
self._fan_speed = fan_mode
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode):
"""Set preset mode."""
ih_preset_mode = MAP_PRESET_MODE_TO_IH.get(preset_mode)
await self._controller.set_preset_mode(self._device_id, ih_preset_mode)
async def async_set_swing_mode(self, swing_mode):
"""Set the vertical vane."""
swing_settings = MAP_SWING_TO_IH.get(swing_mode)
if swing_settings:
await self._controller.set_vertical_vane(
self._device_id, swing_settings.get("vvane")
)
await self._controller.set_horizontal_vane(
self._device_id, swing_settings.get("hvane")
)
async def async_update(self):
"""Copy values from controller dictionary to climate device."""
# Update values from controller's device dictionary
self._connected = self._controller.is_connected
self._current_temp = self._controller.get_temperature(self._device_id)
self._fan_speed = self._controller.get_fan_speed(self._device_id)
self._power = self._controller.is_on(self._device_id)
self._min_temp = self._controller.get_min_setpoint(self._device_id)
self._max_temp = self._controller.get_max_setpoint(self._device_id)
self._rssi = self._controller.get_rssi(self._device_id)
self._run_hours = self._controller.get_run_hours(self._device_id)
self._target_temp = self._controller.get_setpoint(self._device_id)
self._outdoor_temp = self._controller.get_outdoor_temperature(self._device_id)
# Operation mode
mode = self._controller.get_mode(self._device_id)
self._hvac_mode = MAP_IH_TO_HVAC_MODE.get(mode)
# Preset mode
preset = self._controller.get_preset_mode(self._device_id)
self._preset = MAP_IH_TO_PRESET_MODE.get(preset)
# Swing mode
# Climate module only supports one swing setting.
self._vvane = self._controller.get_vertical_swing(self._device_id)
self._hvane = self._controller.get_horizontal_swing(self._device_id)
# Power usage
self._power_consumption_heat = self._controller.get_heat_power_consumption(
self._device_id
)
self._power_consumption_cool = self._controller.get_cool_power_consumption(
self._device_id
)
async def async_will_remove_from_hass(self):
"""Shutdown the controller when the device is being removed."""
await self._controller.stop()
@property
def icon(self):
"""Return the icon for the current state."""
icon = None
if self._power:
icon = MAP_STATE_ICONS.get(self._hvac_mode)
return icon
async def async_update_callback(self, device_id=None):
"""Let HA know there has been an update from the controller."""
# Track changes in connection state
if not self._controller.is_connected and self._connected:
# Connection has dropped
self._connected = False
reconnect_minutes = 1 + randrange(10)
_LOGGER.error(
"Connection to %s API was lost. Reconnecting in %i minutes",
self._device_type,
reconnect_minutes,
)
# Schedule reconnection
async_call_later(
self.hass, reconnect_minutes * 60, self._controller.connect()
)
if self._controller.is_connected and not self._connected:
# Connection has been restored
self._connected = True
_LOGGER.debug("Connection to %s API was restored", self._device_type)
if not device_id or self._device_id == device_id:
# Update all devices if no device_id was specified
_LOGGER.debug(
"%s API sent a status update for device %s",
self._device_type,
device_id,
)
self.async_schedule_update_ha_state(True)
@property
def min_temp(self):
"""Return the minimum temperature for the current mode of operation."""
return self._min_temp
@property
def max_temp(self):
"""Return the maximum temperature for the current mode of operation."""
return self._max_temp
@property
def should_poll(self):
"""Poll for updates if pyIntesisHome doesn't have a socket open."""
return False
@property
def hvac_modes(self):
"""List of available operation modes."""
return self._hvac_mode_list
@property
def fan_mode(self):
"""Return whether the fan is on."""
return self._fan_speed
@property
def swing_mode(self):
"""Return current swing mode."""
if self._vvane == IH_SWING_SWING and self._hvane == IH_SWING_SWING:
swing = SWING_BOTH
elif self._vvane == IH_SWING_SWING:
swing = SWING_VERTICAL
elif self._hvane == IH_SWING_SWING:
swing = SWING_HORIZONTAL
else:
swing = SWING_OFF
return swing
@property
def fan_modes(self):
"""List of available fan modes."""
return self._fan_modes
@property
def swing_modes(self):
"""List of available swing positions."""
return self._swing_list
@property
def available(self) -> bool:
"""If the device hasn't been able to connect, mark as unavailable."""
return self._connected or self._connected is None
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temp
@property
def hvac_mode(self):
"""Return the current mode of operation if unit is on."""
if self._power:
return self._hvac_mode
return HVAC_MODE_OFF
@property
def target_temperature(self):
"""Return the current setpoint temperature if unit is on."""
return self._target_temp
@property
def supported_features(self):
"""Return the list of supported features."""
return self._support
|
import copy
from datetime import timedelta
import logging
from httplib2 import ServerNotFoundError # pylint: disable=import-error
from homeassistant.components.calendar import (
ENTITY_ID_FORMAT,
CalendarEventDevice,
calculate_offset,
is_offset_reached,
)
from homeassistant.helpers.entity import generate_entity_id
from homeassistant.util import Throttle, dt
from . import (
CONF_CAL_ID,
CONF_DEVICE_ID,
CONF_ENTITIES,
CONF_IGNORE_AVAILABILITY,
CONF_MAX_RESULTS,
CONF_NAME,
CONF_OFFSET,
CONF_SEARCH,
CONF_TRACK,
DEFAULT_CONF_OFFSET,
TOKEN_FILE,
GoogleCalendarService,
)
_LOGGER = logging.getLogger(__name__)
DEFAULT_GOOGLE_SEARCH_PARAMS = {
"orderBy": "startTime",
"maxResults": 5,
"singleEvents": True,
}
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
def setup_platform(hass, config, add_entities, disc_info=None):
"""Set up the calendar platform for event devices."""
if disc_info is None:
return
if not any(data[CONF_TRACK] for data in disc_info[CONF_ENTITIES]):
return
calendar_service = GoogleCalendarService(hass.config.path(TOKEN_FILE))
entities = []
for data in disc_info[CONF_ENTITIES]:
if not data[CONF_TRACK]:
continue
entity_id = generate_entity_id(
ENTITY_ID_FORMAT, data[CONF_DEVICE_ID], hass=hass
)
entity = GoogleCalendarEventDevice(
calendar_service, disc_info[CONF_CAL_ID], data, entity_id
)
entities.append(entity)
add_entities(entities, True)
class GoogleCalendarEventDevice(CalendarEventDevice):
"""A calendar event device."""
def __init__(self, calendar_service, calendar, data, entity_id):
"""Create the Calendar event device."""
self.data = GoogleCalendarData(
calendar_service,
calendar,
data.get(CONF_SEARCH),
data.get(CONF_IGNORE_AVAILABILITY),
data.get(CONF_MAX_RESULTS),
)
self._event = None
self._name = data[CONF_NAME]
self._offset = data.get(CONF_OFFSET, DEFAULT_CONF_OFFSET)
self._offset_reached = False
self.entity_id = entity_id
@property
def device_state_attributes(self):
"""Return the device state attributes."""
return {"offset_reached": self._offset_reached}
@property
def event(self):
"""Return the next upcoming event."""
return self._event
@property
def name(self):
"""Return the name of the entity."""
return self._name
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
return await self.data.async_get_events(hass, start_date, end_date)
def update(self):
"""Update event data."""
self.data.update()
event = copy.deepcopy(self.data.event)
if event is None:
self._event = event
return
event = calculate_offset(event, self._offset)
self._offset_reached = is_offset_reached(event)
self._event = event
class GoogleCalendarData:
"""Class to utilize calendar service object to get next event."""
def __init__(
self, calendar_service, calendar_id, search, ignore_availability, max_results
):
"""Set up how we are going to search the google calendar."""
self.calendar_service = calendar_service
self.calendar_id = calendar_id
self.search = search
self.ignore_availability = ignore_availability
self.max_results = max_results
self.event = None
def _prepare_query(self):
try:
service = self.calendar_service.get()
except ServerNotFoundError:
_LOGGER.error("Unable to connect to Google")
return None, None
params = dict(DEFAULT_GOOGLE_SEARCH_PARAMS)
params["calendarId"] = self.calendar_id
if self.max_results:
params["maxResults"] = self.max_results
if self.search:
params["q"] = self.search
return service, params
async def async_get_events(self, hass, start_date, end_date):
"""Get all events in a specific time frame."""
service, params = await hass.async_add_executor_job(self._prepare_query)
if service is None:
return []
params["timeMin"] = start_date.isoformat("T")
params["timeMax"] = end_date.isoformat("T")
events = await hass.async_add_executor_job(service.events)
result = await hass.async_add_executor_job(events.list(**params).execute)
items = result.get("items", [])
event_list = []
for item in items:
if not self.ignore_availability and "transparency" in item:
if item["transparency"] == "opaque":
event_list.append(item)
else:
event_list.append(item)
return event_list
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data."""
service, params = self._prepare_query()
if service is None:
return
params["timeMin"] = dt.now().isoformat("T")
events = service.events()
result = events.list(**params).execute()
items = result.get("items", [])
new_event = None
for item in items:
if not self.ignore_availability and "transparency" in item:
if item["transparency"] == "opaque":
new_event = item
break
else:
new_event = item
break
self.event = new_event
|
import time
from typing import List
from paasta_tools.deployd.common import DelayDeadlineQueueProtocol
from paasta_tools.deployd.common import PaastaThread
from paasta_tools.deployd.workers import PaastaDeployWorker
from paasta_tools.metrics.metrics_lib import BaseMetrics
class MetricsThread(PaastaThread):
def __init__(self, metrics_provider: BaseMetrics) -> None:
super().__init__()
self.metrics = metrics_provider
def run_once(self) -> None:
raise NotImplementedError()
def run(self) -> None:
while True:
last_run_time = time.time()
self.run_once()
time.sleep(last_run_time + 20 - time.time())
class QueueAndWorkerMetrics(MetricsThread):
def __init__(
self,
queue: DelayDeadlineQueueProtocol,
workers: List[PaastaDeployWorker],
cluster: str,
metrics_provider: BaseMetrics,
) -> None:
super().__init__(metrics_provider)
self.daemon = True
self.queue = queue
self.instances_to_bounce_later_gauge = self.metrics.create_gauge(
"instances_to_bounce_later", paasta_cluster=cluster
)
self.instances_to_bounce_now_gauge = self.metrics.create_gauge(
"instances_to_bounce_now", paasta_cluster=cluster
)
self.instances_with_past_deadline_gauge = self.metrics.create_gauge(
"instances_with_past_deadline", paasta_cluster=cluster
)
self.instances_with_deadline_in_next_n_seconds_gauges = {
(available, n): self.metrics.create_gauge(
f"{available}_instances_with_deadline_in_next_{n}s",
paasta_cluster=cluster,
)
for n in [60, 300, 3600]
for available in ["available", "unavailable"]
}
self.max_time_past_deadline_gauge = self.metrics.create_gauge(
"max_time_past_deadline", paasta_cluster=cluster
)
self.sum_time_past_deadline_gauge = self.metrics.create_gauge(
"sum_time_past_deadline", paasta_cluster=cluster
)
self.workers = workers
self.workers_busy_gauge = self.metrics.create_gauge(
"workers_busy", paasta_cluster=cluster
)
self.workers_idle_gauge = self.metrics.create_gauge(
"workers_idle", paasta_cluster=cluster
)
self.workers_dead_gauge = self.metrics.create_gauge(
"workers_dead", paasta_cluster=cluster
)
def run_once(self) -> None:
currently_available_instances = tuple(
self.queue.get_available_service_instances(fetch_service_instances=False)
)
currently_unavailable_instances = tuple(
self.queue.get_unavailable_service_instances(fetch_service_instances=False)
)
self.instances_to_bounce_later_gauge.set(len(currently_available_instances))
self.instances_to_bounce_now_gauge.set(len(currently_unavailable_instances))
available_deadlines = [
deadline for deadline, _ in currently_available_instances
]
unavailable_deadlines = [
deadline for _, deadline, _ in currently_unavailable_instances
]
now = time.time()
self.instances_with_past_deadline_gauge.set(
len([1 for deadline in available_deadlines if deadline < now])
)
for (
(available, n),
gauge,
) in self.instances_with_deadline_in_next_n_seconds_gauges.items():
if available == "available":
deadlines = available_deadlines
else:
deadlines = unavailable_deadlines
gauge.set(len([1 for deadline in deadlines if now < deadline < now + n]))
self.max_time_past_deadline_gauge.set(
max(
[now - deadline for deadline in available_deadlines if deadline < now],
default=0,
)
)
self.sum_time_past_deadline_gauge.set(
sum([max(0, now - deadline) for deadline in available_deadlines])
)
self.workers_busy_gauge.set(
len([worker for worker in self.workers if worker.busy])
)
self.workers_idle_gauge.set(
len([worker for worker in self.workers if not worker.busy])
)
self.workers_dead_gauge.set(
len([worker for worker in self.workers if not worker.is_alive()])
)
|
import logging
from bimmer_connected.state import ChargingState
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_UNIT_SYSTEM_IMPERIAL,
LENGTH_KILOMETERS,
LENGTH_MILES,
PERCENTAGE,
TIME_HOURS,
VOLUME_GALLONS,
VOLUME_LITERS,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.icon import icon_for_battery_level
from . import DOMAIN as BMW_DOMAIN
from .const import ATTRIBUTION
_LOGGER = logging.getLogger(__name__)
ATTR_TO_HA_METRIC = {
"mileage": ["mdi:speedometer", LENGTH_KILOMETERS],
"remaining_range_total": ["mdi:map-marker-distance", LENGTH_KILOMETERS],
"remaining_range_electric": ["mdi:map-marker-distance", LENGTH_KILOMETERS],
"remaining_range_fuel": ["mdi:map-marker-distance", LENGTH_KILOMETERS],
"max_range_electric": ["mdi:map-marker-distance", LENGTH_KILOMETERS],
"remaining_fuel": ["mdi:gas-station", VOLUME_LITERS],
"charging_time_remaining": ["mdi:update", TIME_HOURS],
"charging_status": ["mdi:battery-charging", None],
# No icon as this is dealt with directly as a special case in icon()
"charging_level_hv": [None, PERCENTAGE],
}
ATTR_TO_HA_IMPERIAL = {
"mileage": ["mdi:speedometer", LENGTH_MILES],
"remaining_range_total": ["mdi:map-marker-distance", LENGTH_MILES],
"remaining_range_electric": ["mdi:map-marker-distance", LENGTH_MILES],
"remaining_range_fuel": ["mdi:map-marker-distance", LENGTH_MILES],
"max_range_electric": ["mdi:map-marker-distance", LENGTH_MILES],
"remaining_fuel": ["mdi:gas-station", VOLUME_GALLONS],
"charging_time_remaining": ["mdi:update", TIME_HOURS],
"charging_status": ["mdi:battery-charging", None],
# No icon as this is dealt with directly as a special case in icon()
"charging_level_hv": [None, PERCENTAGE],
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the BMW sensors."""
if hass.config.units.name == CONF_UNIT_SYSTEM_IMPERIAL:
attribute_info = ATTR_TO_HA_IMPERIAL
else:
attribute_info = ATTR_TO_HA_METRIC
accounts = hass.data[BMW_DOMAIN]
_LOGGER.debug("Found BMW accounts: %s", ", ".join([a.name for a in accounts]))
devices = []
for account in accounts:
for vehicle in account.account.vehicles:
for attribute_name in vehicle.drive_train_attributes:
if attribute_name in vehicle.available_attributes:
device = BMWConnectedDriveSensor(
account, vehicle, attribute_name, attribute_info
)
devices.append(device)
add_entities(devices, True)
class BMWConnectedDriveSensor(Entity):
"""Representation of a BMW vehicle sensor."""
def __init__(self, account, vehicle, attribute: str, attribute_info):
"""Initialize BMW vehicle sensor."""
self._vehicle = vehicle
self._account = account
self._attribute = attribute
self._state = None
self._name = f"{self._vehicle.name} {self._attribute}"
self._unique_id = f"{self._vehicle.vin}-{self._attribute}"
self._attribute_info = attribute_info
@property
def should_poll(self) -> bool:
"""Return False.
Data update is triggered from BMWConnectedDriveEntity.
"""
return False
@property
def unique_id(self):
"""Return the unique ID of the sensor."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
def icon(self):
"""Icon to use in the frontend, if any."""
vehicle_state = self._vehicle.state
charging_state = vehicle_state.charging_status in [ChargingState.CHARGING]
if self._attribute == "charging_level_hv":
return icon_for_battery_level(
battery_level=vehicle_state.charging_level_hv, charging=charging_state
)
icon, _ = self._attribute_info.get(self._attribute, [None, None])
return icon
@property
def state(self):
"""Return the state of the sensor.
The return type of this call depends on the attribute that
is configured.
"""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Get the unit of measurement."""
_, unit = self._attribute_info.get(self._attribute, [None, None])
return unit
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {
"car": self._vehicle.name,
ATTR_ATTRIBUTION: ATTRIBUTION,
}
def update(self) -> None:
"""Read new state data from the library."""
_LOGGER.debug("Updating %s", self._vehicle.name)
vehicle_state = self._vehicle.state
if self._attribute == "charging_status":
self._state = getattr(vehicle_state, self._attribute).value
elif self.unit_of_measurement == VOLUME_GALLONS:
value = getattr(vehicle_state, self._attribute)
value_converted = self.hass.config.units.volume(value, VOLUME_LITERS)
self._state = round(value_converted)
elif self.unit_of_measurement == LENGTH_MILES:
value = getattr(vehicle_state, self._attribute)
value_converted = self.hass.config.units.length(value, LENGTH_KILOMETERS)
self._state = round(value_converted)
else:
self._state = getattr(vehicle_state, self._attribute)
def update_callback(self):
"""Schedule a state update."""
self.schedule_update_ha_state(True)
async def async_added_to_hass(self):
"""Add callback after being added to hass.
Show latest data after startup.
"""
self._account.add_update_listener(self.update_callback)
|
import numpy as np
from ..annotations import _annotations_starts_stops, Annotations
from ..io import BaseRaw
from ..io.pick import _picks_to_idx
from ..utils import (_validate_type, verbose, logger, _pl,
_mask_to_onsets_offsets, ProgressBar)
@verbose
def annotate_flat(raw, bad_percent=5., min_duration=0.005, picks=None,
verbose=None):
"""Annotate flat segments of raw data (or add to a bad channel list).
Parameters
----------
raw : instance of Raw
The raw data.
bad_percent : float
The percentage of the time a channel can be bad.
Below this percentage, temporal bad marking (:class:`~mne.Annotations`)
will be used. Above this percentage, spatial bad marking
(:class:`info['bads'] <mne.Info>`) will be used.
Defaults to 5 (5%%).
min_duration : float
The minimum duration (sec) to consider as actually flat.
For some systems with low bit data representations, adjacent
time samples with exactly the same value are not totally uncommon.
Defaults to 0.005 (5 ms).
%(picks_good_data)s
%(verbose)s
Returns
-------
annot : instance of Annotations
The annotated bad segments.
bads : list
The channels detected as bad.
Notes
-----
This function is useful both for removing short segments of data where
the acquisition system clipped (i.e., hit the ADC limit of the hardware)
and for automatically identifying channels that were flat for a large
proportion of a given recording.
This function may perform much faster if data are loaded
in memory, as it loads data one channel at a time (across all
time points), which is typically not an efficient way to read
raw data from disk.
.. versionadded:: 0.18
"""
_validate_type(raw, BaseRaw, 'raw')
bad_percent = float(bad_percent)
min_duration = float(min_duration)
picks = _picks_to_idx(raw.info, picks, 'data_or_ica', exclude='bads')
# This will not be so efficient for most readers, but we can optimize
# it later
any_flat = np.zeros(len(raw.times), bool)
bads = list()
time_thresh = int(np.round(min_duration * raw.info['sfreq']))
onsets, ends = _annotations_starts_stops(raw, 'bad_acq_skip', invert=True)
idx = np.concatenate([np.arange(onset, end)
for onset, end in zip(onsets, ends)])
logger.info('Finding flat segments')
for pick in ProgressBar(picks, mesg='Channels'):
data = np.concatenate([raw[pick, onset:end][0][0]
for onset, end in zip(onsets, ends)])
flat = np.diff(data) == 0
flat = np.concatenate(
[flat[[0]], flat[1:] | flat[:-1], flat[[-1]]])
starts, stops = _mask_to_onsets_offsets(flat)
for start, stop in zip(starts, stops):
if stop - start < time_thresh:
flat[start:stop] = False
flat_mean = flat.mean()
if flat_mean: # only do something if there are actually flat parts
flat_mean *= 100
if flat_mean > bad_percent:
kind, comp = 'bads', '>'
bads.append(raw.ch_names[pick])
else:
kind, comp = 'BAD_', '≤'
any_flat[idx] |= flat
logger.debug('%s: %s (%s %s %s)'
% (kind, raw.ch_names[pick],
flat_mean, comp, bad_percent))
starts, stops = _mask_to_onsets_offsets(any_flat)
logger.info('Marking %0.2f%% of time points (%d segment%s) and '
'%d/%d channel%s bad%s'
% (100 * any_flat[idx].mean(), len(starts), _pl(starts),
len(bads), len(picks), _pl(bads),
(': %s' % (bads,)) if bads else ''))
bads = [bad for bad in bads if bad not in raw.info['bads']]
starts, stops = np.array(starts), np.array(stops)
onsets = (starts + raw.first_samp) / raw.info['sfreq']
durations = (stops - starts) / raw.info['sfreq']
annot = Annotations(onsets, durations, ['BAD_flat'] * len(onsets),
orig_time=raw.annotations.orig_time)
return annot, bads
|
from __future__ import unicode_literals
import platform
# judge run platform
def is_Windows():
return platform.system() == "Windows"
def is_Linux():
return platform.system() == "Linux"
def is_Mac():
return platform.system() == "Darwin"
# Windows 10 (v1511) Adds Support for ANSI Escape Sequences
def is_higher_win10_v1511():
if is_Windows():
try:
if int(platform.version().split('.')[0]) >= 10 and int(platform.version().split('.')[-1]) >= 1511:
return True
else:
return False
except:
return False
# python version egt 3
def py_ver_egt_3():
if int(platform.python_version()[0]) == 3:
return True
|
from typing import Optional
from homeassistant.components import zone
from homeassistant.const import (
ATTR_BATTERY_LEVEL,
ATTR_GPS_ACCURACY,
ATTR_LATITUDE,
ATTR_LONGITUDE,
STATE_HOME,
STATE_NOT_HOME,
)
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from .const import ATTR_SOURCE_TYPE, DOMAIN, LOGGER
async def async_setup_entry(hass, entry):
"""Set up an entry."""
component: Optional[EntityComponent] = hass.data.get(DOMAIN)
if component is None:
component = hass.data[DOMAIN] = EntityComponent(LOGGER, DOMAIN, hass)
return await component.async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload an entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class BaseTrackerEntity(Entity):
"""Represent a tracked device."""
@property
def battery_level(self):
"""Return the battery level of the device.
Percentage from 0-100.
"""
return None
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
raise NotImplementedError
@property
def state_attributes(self):
"""Return the device state attributes."""
attr = {ATTR_SOURCE_TYPE: self.source_type}
if self.battery_level:
attr[ATTR_BATTERY_LEVEL] = self.battery_level
return attr
class TrackerEntity(BaseTrackerEntity):
"""Represent a tracked device."""
@property
def should_poll(self):
"""No polling for entities that have location pushed."""
return False
@property
def force_update(self):
"""All updates need to be written to the state machine if we're not polling."""
return not self.should_poll
@property
def location_accuracy(self):
"""Return the location accuracy of the device.
Value in meters.
"""
return 0
@property
def location_name(self) -> str:
"""Return a location name for the current location of the device."""
return None
@property
def latitude(self) -> float:
"""Return latitude value of the device."""
return NotImplementedError
@property
def longitude(self) -> float:
"""Return longitude value of the device."""
return NotImplementedError
@property
def state(self):
"""Return the state of the device."""
if self.location_name:
return self.location_name
if self.latitude is not None:
zone_state = zone.async_active_zone(
self.hass, self.latitude, self.longitude, self.location_accuracy
)
if zone_state is None:
state = STATE_NOT_HOME
elif zone_state.entity_id == zone.ENTITY_ID_HOME:
state = STATE_HOME
else:
state = zone_state.name
return state
return None
@property
def state_attributes(self):
"""Return the device state attributes."""
attr = {}
attr.update(super().state_attributes)
if self.latitude is not None:
attr[ATTR_LATITUDE] = self.latitude
attr[ATTR_LONGITUDE] = self.longitude
attr[ATTR_GPS_ACCURACY] = self.location_accuracy
return attr
class ScannerEntity(BaseTrackerEntity):
"""Represent a tracked device that is on a scanned network."""
@property
def state(self):
"""Return the state of the device."""
if self.is_connected:
return STATE_HOME
return STATE_NOT_HOME
@property
def is_connected(self):
"""Return true if the device is connected to the network."""
raise NotImplementedError
|
import logging
from pyrainbird import RainbirdController
from homeassistant.helpers.entity import Entity
from . import (
DATA_RAINBIRD,
RAINBIRD_CONTROLLER,
SENSOR_TYPE_RAINDELAY,
SENSOR_TYPE_RAINSENSOR,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a Rain Bird sensor."""
if discovery_info is None:
return
controller = hass.data[DATA_RAINBIRD][discovery_info[RAINBIRD_CONTROLLER]]
add_entities(
[RainBirdSensor(controller, sensor_type) for sensor_type in SENSOR_TYPES], True
)
class RainBirdSensor(Entity):
"""A sensor implementation for Rain Bird device."""
def __init__(self, controller: RainbirdController, sensor_type):
"""Initialize the Rain Bird sensor."""
self._sensor_type = sensor_type
self._controller = controller
self._name = SENSOR_TYPES[self._sensor_type][0]
self._icon = SENSOR_TYPES[self._sensor_type][2]
self._unit_of_measurement = SENSOR_TYPES[self._sensor_type][1]
self._state = None
@property
def state(self):
"""Return the state of the sensor."""
return self._state
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Updating sensor: %s", self._name)
if self._sensor_type == SENSOR_TYPE_RAINSENSOR:
self._state = self._controller.get_rain_sensor_state()
elif self._sensor_type == SENSOR_TYPE_RAINDELAY:
self._state = self._controller.get_rain_delay()
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def unit_of_measurement(self):
"""Return the units of measurement."""
return self._unit_of_measurement
@property
def icon(self):
"""Return icon."""
return self._icon
|
from __future__ import division
import numpy as np
import six
import unittest
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import testing
from chainermn import create_communicator
from chainercv.links.model.ssd import multibox_loss
from chainercv.utils.testing import attr
@testing.parameterize(*testing.product({
'k': [3, 10000],
'batchsize': [1, 5],
'n_bbox': [10, 500],
'n_class': [3, 20],
'variable': [True, False],
}))
class TestMultiboxLoss(unittest.TestCase):
def setUp(self):
self.mb_locs = np.random.uniform(
-10, 10, size=(self.batchsize, self.n_bbox, 4)) \
.astype(np.float32)
self.mb_confs = np.random.uniform(
-50, 50, size=(self.batchsize, self.n_bbox, self.n_class)) \
.astype(np.float32)
self.gt_mb_locs = np.random.uniform(
-10, 10, size=(self.batchsize, self.n_bbox, 4)) \
.astype(np.float32)
self.gt_mb_labels = np.random.randint(
self.n_class, size=(self.batchsize, self.n_bbox)) \
.astype(np.int32)
# increase negative samples
self.gt_mb_labels[np.random.uniform(
size=self.gt_mb_labels.shape) > 0.1] = 0
def _check_forward(self, mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k):
if self.variable:
mb_locs = chainer.Variable(mb_locs)
mb_confs = chainer.Variable(mb_confs)
gt_mb_locs = chainer.Variable(gt_mb_locs)
gt_mb_labels = chainer.Variable(gt_mb_labels)
loc_loss, conf_loss = multibox_loss(
mb_locs, mb_confs, gt_mb_locs, gt_mb_labels, k)
self.assertIsInstance(loc_loss, chainer.Variable)
self.assertEqual(loc_loss.shape, ())
self.assertEqual(loc_loss.dtype, mb_locs.dtype)
self.assertIsInstance(conf_loss, chainer.Variable)
self.assertEqual(conf_loss.shape, ())
self.assertEqual(conf_loss.dtype, mb_confs.dtype)
if self.variable:
mb_locs = mb_locs.array
mb_confs = mb_confs.array
gt_mb_locs = gt_mb_locs.array
gt_mb_labels = gt_mb_labels.array
mb_locs = cuda.to_cpu(mb_locs)
mb_confs = cuda.to_cpu(mb_confs)
gt_mb_locs = cuda.to_cpu(gt_mb_locs)
gt_mb_labels = cuda.to_cpu(gt_mb_labels)
loc_loss = cuda.to_cpu(loc_loss.array)
conf_loss = cuda.to_cpu(conf_loss.array)
n_positive_total = 0
expect_loc_loss = 0
expect_conf_loss = 0
for i in six.moves.xrange(gt_mb_labels.shape[0]):
n_positive = 0
negatives = []
for j in six.moves.xrange(gt_mb_labels.shape[1]):
loc = F.huber_loss(
mb_locs[np.newaxis, i, j],
gt_mb_locs[np.newaxis, i, j], 1).array
conf = F.softmax_cross_entropy(
mb_confs[np.newaxis, i, j],
gt_mb_labels[np.newaxis, i, j]).array
if gt_mb_labels[i, j] > 0:
n_positive += 1
expect_loc_loss += loc
expect_conf_loss += conf
else:
negatives.append(conf)
n_positive_total += n_positive
if n_positive > 0:
expect_conf_loss += sum(sorted(negatives)[-n_positive * k:])
if n_positive_total == 0:
expect_loc_loss = 0
expect_conf_loss = 0
else:
expect_loc_loss /= n_positive_total
expect_conf_loss /= n_positive_total
np.testing.assert_almost_equal(
loc_loss, expect_loc_loss, decimal=2)
np.testing.assert_almost_equal(
conf_loss, expect_conf_loss, decimal=2)
def test_forward_cpu(self):
self._check_forward(
self.mb_locs, self.mb_confs,
self.gt_mb_locs, self.gt_mb_labels,
self.k)
@attr.gpu
def test_forward_gpu(self):
self._check_forward(
cuda.to_gpu(self.mb_locs), cuda.to_gpu(self.mb_confs),
cuda.to_gpu(self.gt_mb_locs), cuda.to_gpu(self.gt_mb_labels),
self.k)
@attr.mpi
class TestMultiNodeMultiboxLoss(unittest.TestCase):
k = 3
batchsize = 5
n_bbox = 10
n_class = 3
def setUp(self):
self.comm = create_communicator('naive')
batchsize = self.comm.size * self.batchsize
np.random.seed(0)
self.mb_locs = np.random.uniform(
-10, 10, size=(batchsize, self.n_bbox, 4)) \
.astype(np.float32)
self.mb_confs = np.random.uniform(
-50, 50, size=(batchsize, self.n_bbox, self.n_class)) \
.astype(np.float32)
self.gt_mb_locs = np.random.uniform(
-10, 10, size=(batchsize, self.n_bbox, 4)) \
.astype(np.float32)
self.gt_mb_labels = np.random.randint(
self.n_class, size=(batchsize, self.n_bbox)) \
.astype(np.int32)
self.mb_locs_local = self.comm.mpi_comm.scatter(
self.mb_locs.reshape(
(self.comm.size, self.batchsize, self.n_bbox, 4)))
self.mb_confs_local = self.comm.mpi_comm.scatter(
self.mb_confs.reshape(
(self.comm.size, self.batchsize, self.n_bbox, self.n_class)))
self.gt_mb_locs_local = self.comm.mpi_comm.scatter(
self.gt_mb_locs.reshape(
(self.comm.size, self.batchsize, self.n_bbox, 4)))
self.gt_mb_labels_local = self.comm.mpi_comm.scatter(
self.gt_mb_labels.reshape(
(self.comm.size, self.batchsize, self.n_bbox)))
def _check_forward(
self, mb_locs_local, mb_confs_local,
gt_mb_locs_local, gt_mb_labels_local, k):
loc_loss_local, conf_loss_local = multibox_loss(
mb_locs_local, mb_confs_local,
gt_mb_locs_local, gt_mb_labels_local, k, self.comm)
loc_loss_local = cuda.to_cpu(loc_loss_local.array)
conf_loss_local = cuda.to_cpu(conf_loss_local.array)
loc_loss = self.comm.allreduce_obj(loc_loss_local) / self.comm.size
conf_loss = self.comm.allreduce_obj(conf_loss_local) / self.comm.size
expect_loc_loss, expect_conf_loss = multibox_loss(
self.mb_locs, self.mb_confs, self.gt_mb_locs, self.gt_mb_labels, k)
np.testing.assert_almost_equal(
loc_loss, expect_loc_loss.array, decimal=2)
np.testing.assert_almost_equal(
conf_loss, expect_conf_loss.array, decimal=2)
def test_multi_node_forward_cpu(self):
self._check_forward(
self.mb_locs, self.mb_confs,
self.gt_mb_locs, self.gt_mb_labels,
self.k)
@attr.gpu
def test_multi_node_forward_gpu(self):
self._check_forward(
cuda.to_gpu(self.mb_locs), cuda.to_gpu(self.mb_confs),
cuda.to_gpu(self.gt_mb_locs), cuda.to_gpu(self.gt_mb_labels),
self.k)
testing.run_module(__name__, __file__)
|
import os, shutil
class FileSystemListing:
def entries_if_dir_exists(self, path):
if os.path.exists(path):
for entry in os.listdir(path):
yield entry
def exists(self, path):
return os.path.exists(path)
class FileSystemReader(FileSystemListing):
def is_sticky_dir(self, path):
import os
return os.path.isdir(path) and has_sticky_bit(path)
def is_symlink(self, path):
return os.path.islink(path)
def contents_of(self, path):
return open(path).read()
class FileRemover:
def remove_file(self, path):
try:
return os.remove(path)
except OSError:
shutil.rmtree(path)
def remove_file_if_exists(self,path):
if os.path.exists(path): self.remove_file(path)
def contents_of(path): # TODO remove
return FileSystemReader().contents_of(path)
def has_sticky_bit(path): # TODO move to FileSystemReader
import os
import stat
return (os.stat(path).st_mode & stat.S_ISVTX) == stat.S_ISVTX
def remove_file(path):
if(os.path.lexists(path)):
try:
os.remove(path)
except:
return shutil.rmtree(path)
def move(path, dest) :
return shutil.move(path, str(dest))
def list_files_in_dir(path):
for entry in os.listdir(path):
result = os.path.join(path, entry)
yield result
def mkdirs(path):
if os.path.isdir(path):
return
os.makedirs(path)
def atomic_write(filename, content):
file_handle = os.open(filename, os.O_RDWR | os.O_CREAT | os.O_EXCL,
0o600)
os.write(file_handle, content)
os.close(file_handle)
def ensure_dir(path, mode):
if os.path.isdir(path):
os.chmod(path, mode)
return
os.makedirs(path, mode)
|
import json
import unittest
from absl import flags
from perfkitbenchmarker.providers.gcp import bigquery
from tests import pkb_common_test_case
PACKAGE_NAME = 'PACKAGE_NAME'
DATASET_ID = 'DATASET_ID'
PROJECT_ID = 'PROJECT_ID'
QUERY_NAME = 'QUERY_NAME'
_TEST_RUN_URI = 'fakeru'
_GCP_ZONE_US_CENTRAL_1_C = 'us-central1-c'
_BASE_BIGQUERY_SPEC = {
'type': 'bigquery',
'cluster_identifier': 'bigquerypkb.tpcds_100G'
}
FLAGS = flags.FLAGS
class FakeRemoteVM(object):
def Install(self, package_name):
if package_name != 'google_cloud_sdk':
raise RuntimeError
class FakeRemoteVMForCliClientInterfacePrepare(object):
"""Class to setup a Fake VM that prepares a Client VM (CLI Client)."""
def __init__(self):
self.valid_install_package_list = ['pip', 'google_cloud_sdk']
self.valid_remote_command_list = [
'sudo pip install absl-py',
'/tmp/pkb/google-cloud-sdk/bin/gcloud auth activate-service-account '
'SERVICE_ACCOUNT --key-file=SERVICE_ACCOUNT_KEY_FILE',
'chmod 755 script_runner.sh',
'echo "\nMaxSessions 100" | sudo tee -a /etc/ssh/sshd_config'
]
def Install(self, package_name):
if package_name not in self.valid_install_package_list:
raise RuntimeError
def RemoteCommand(self, command):
if command not in self.valid_remote_command_list:
raise RuntimeError
def InstallPreprovisionedPackageData(self, package_name, filenames,
install_path):
if package_name != 'PACKAGE_NAME':
raise RuntimeError
def PushFile(self, source_path):
pass
class FakeRemoteVMForCliClientInterfaceExecuteQuery(object):
"""Class to setup a Fake VM that executes script on Client VM (CLI Client)."""
def RemoteCommand(self, command):
if command == 'echo "\nMaxSessions 100" | sudo tee -a /etc/ssh/sshd_config':
return None, None
expected_command = ('python script_driver.py --script={} --bq_project_id={}'
' --bq_dataset_id={}').format(QUERY_NAME, PROJECT_ID,
DATASET_ID)
if command != expected_command:
raise RuntimeError
response_object = {QUERY_NAME: {'job_id': 'JOB_ID', 'execution_time': 1.0}}
response = json.dumps(response_object)
return response, None
class FakeRemoteVMForJavaClientInterfacePrepare(object):
"""Class to setup a Fake VM that prepares a Client VM (JAVA Client)."""
def __init__(self):
self.valid_install_package_list = ['openjdk']
def Install(self, package_name):
if package_name != 'openjdk':
raise RuntimeError
def RemoteCommand(self, command):
if command == 'echo "\nMaxSessions 100" | sudo tee -a /etc/ssh/sshd_config':
return None, None
else:
raise RuntimeError
def InstallPreprovisionedPackageData(self, package_name, filenames,
install_path):
if package_name != 'PACKAGE_NAME':
raise RuntimeError
class FakeRemoteVMForJavaClientInterfaceExecuteQuery(object):
"""Class to setup a Fake VM that executes script on Client VM (JAVA Client)."""
def RemoteCommand(self, command):
if command == 'echo "\nMaxSessions 100" | sudo tee -a /etc/ssh/sshd_config':
return None, None
expected_command = ('java -cp bq-java-client-2.3.jar '
'com.google.cloud.performance.edw.Single --project {} '
'--credentials_file {} --dataset {} --query_file '
'{}').format(PROJECT_ID, 'SERVICE_ACCOUNT_KEY_FILE',
DATASET_ID, QUERY_NAME)
if command != expected_command:
raise RuntimeError
response_object = {'query_wall_time_in_secs': 1.0,
'details': {'job_id': 'JOB_ID'}}
response = json.dumps(response_object)
return response, None
class FakeBenchmarkSpec(object):
"""Fake BenchmarkSpec to use for setting client interface attributes."""
def __init__(self, client_vm):
self.name = PACKAGE_NAME
self.vms = [client_vm]
class BigqueryTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(BigqueryTestCase, self).setUp()
FLAGS.cloud = 'GCP'
FLAGS.run_uri = _TEST_RUN_URI
FLAGS.zones = [_GCP_ZONE_US_CENTRAL_1_C]
def testGetBigQueryClientInterfaceGeneric(self):
interface = bigquery.GetBigQueryClientInterface(PROJECT_ID, DATASET_ID)
self.assertEqual(interface.project_id, PROJECT_ID)
self.assertEqual(interface.dataset_id, DATASET_ID)
def testGetBigQueryClientInterfaceCli(self):
FLAGS.bq_client_interface = 'CLI'
interface = bigquery.GetBigQueryClientInterface(PROJECT_ID, DATASET_ID)
self.assertIsInstance(interface, bigquery.CliClientInterface)
def testGetBigQueryClientInterfaceJava(self):
FLAGS.bq_client_interface = 'JAVA'
interface = bigquery.GetBigQueryClientInterface(PROJECT_ID, DATASET_ID)
self.assertIsInstance(interface, bigquery.JavaClientInterface)
def testGenericClientInterfaceGetMetada(self):
FLAGS.bq_client_interface = 'CLI'
interface = bigquery.GetBigQueryClientInterface(PROJECT_ID, DATASET_ID)
self.assertDictEqual(interface.GetMetadata(), {'client': 'CLI'})
FLAGS.bq_client_interface = 'JAVA'
interface = bigquery.GetBigQueryClientInterface(PROJECT_ID, DATASET_ID)
self.assertDictEqual(interface.GetMetadata(), {'client': 'JAVA'})
def testCliClientInterfacePrepare(self):
FLAGS.bq_client_interface = 'CLI'
FLAGS.gcp_service_account_key_file = 'SERVICE_ACCOUNT_KEY_FILE'
FLAGS.gcp_service_account = 'SERVICE_ACCOUNT'
interface = bigquery.GetBigQueryClientInterface(PROJECT_ID, DATASET_ID)
self.assertIsInstance(interface, bigquery.CliClientInterface)
bm_spec = FakeBenchmarkSpec(FakeRemoteVMForCliClientInterfacePrepare())
interface.SetProvisionedAttributes(bm_spec)
interface.Prepare(PACKAGE_NAME)
def testCliClientInterfaceExecuteQuery(self):
FLAGS.bq_client_interface = 'CLI'
interface = bigquery.GetBigQueryClientInterface(PROJECT_ID, DATASET_ID)
self.assertIsInstance(interface, bigquery.CliClientInterface)
bm_spec = FakeBenchmarkSpec(FakeRemoteVMForCliClientInterfaceExecuteQuery())
interface.SetProvisionedAttributes(bm_spec)
performance, details = interface.ExecuteQuery(QUERY_NAME)
self.assertEqual(performance, 1.0)
self.assertDictEqual(details, {'client': 'CLI', 'job_id': 'JOB_ID'})
def testJavaClientInterfacePrepare(self):
FLAGS.bq_client_interface = 'JAVA'
FLAGS.gcp_service_account_key_file = 'SERVICE_ACCOUNT_KEY_FILE'
interface = bigquery.GetBigQueryClientInterface(PROJECT_ID, DATASET_ID)
self.assertIsInstance(interface, bigquery.JavaClientInterface)
bm_spec = FakeBenchmarkSpec(FakeRemoteVMForJavaClientInterfacePrepare())
interface.SetProvisionedAttributes(bm_spec)
interface.Prepare(PACKAGE_NAME)
def testJavaClientInterfaceExecuteQuery(self):
FLAGS.bq_client_interface = 'JAVA'
FLAGS.gcp_service_account_key_file = 'SERVICE_ACCOUNT_KEY_FILE'
interface = bigquery.GetBigQueryClientInterface(PROJECT_ID, DATASET_ID)
self.assertIsInstance(interface, bigquery.JavaClientInterface)
bm_spec = FakeBenchmarkSpec(
FakeRemoteVMForJavaClientInterfaceExecuteQuery())
interface.SetProvisionedAttributes(bm_spec)
performance, details = interface.ExecuteQuery(QUERY_NAME)
self.assertEqual(performance, 1.0)
self.assertDictEqual(details, {'client': 'JAVA', 'job_id': 'JOB_ID'})
if __name__ == '__main__':
unittest.main()
|
import os
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
# EC2 provides unique random hostnames.
def test_hostname(host):
pass
def test_etc_molecule_directory(host):
f = host.file('/etc/molecule')
assert f.is_directory
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o755
def test_etc_molecule_ansible_hostname_file(host):
filename = '/etc/molecule/{}'.format(host.check_output('hostname -s'))
f = host.file(filename)
assert f.is_file
assert f.user == 'root'
assert f.group == 'root'
assert f.mode == 0o644
|
from flask import abort, Blueprint, jsonify
from httpobs import SOURCE_URL, VERSION
from httpobs.database import get_cursor
monitoring_api = Blueprint('monitoring-api', __name__)
@monitoring_api.route('/__heartbeat__')
def heartbeat():
# TODO: check celery status
try:
# Check the database
with get_cursor() as _: # noqa
pass
except:
abort(500)
return jsonify({'database': 'OK'})
@monitoring_api.route('/__lbheartbeat__')
def lbheartbeat():
return ''
@monitoring_api.route('/__version__')
def version():
return jsonify({'source': SOURCE_URL,
'version': VERSION})
|
from __future__ import print_function
import collections
import time
from datetime import datetime
import pandas as pd
import ystockquote
from arctic import Arctic
################################################
# Getting started
################################################
# Install Arctic
# pip install git+https://github.com/manahl/arctic.git
# That's it(!)
# Run MongoDB - https://www.mongodb.org/downloads
# $ mkdir /tmp/pydata-demo
# $ mongod --dbpath /tmp/pydata-demo
################################################
# Loading data
################################################
def get_stock_history(ticker, start_date, end_date):
data = ystockquote.get_historical_prices(ticker, start_date, end_date)
df = pd.DataFrame(collections.OrderedDict(sorted(data.items()))).T
df = df.convert_objects(convert_numeric=True)
return df
################################################
# VersionStore: Storing and updating stock data
################################################
arctic = Arctic('localhost')
# Create a VersionStore library
arctic.delete_library('jblackburn.stocks')
arctic.initialize_library('jblackburn.stocks')
arctic.list_libraries()
stocks = arctic['jblackburn.stocks']
# get some prices
aapl = get_stock_history('aapl', '2015-01-01', '2015-02-01')
aapl
# store them in the library
stocks.write('aapl', aapl, metadata={'source': 'YAHOO'})
stocks.read('aapl').data['Adj Close'].plot()
stocks.read('aapl').metadata
stocks.read('aapl').version
# Append some more prices - imagine doing this once per period
aapl = get_stock_history('aapl', '2015-02-01', '2015-03-01')
stocks.append('aapl', aapl)
stocks.read('aapl').data
# Reading different versions of the symbol
stocks.list_symbols()
stocks.list_versions('aapl')
# Read the different versions separately
stocks.read('aapl', as_of=1).data.ix[-1]
stocks.read('aapl', as_of=2).data.ix[-1]
# And we can snapshot all items in the library
stocks.snapshot('snap')
stocks.read('aapl', as_of='snap').data.ix[-1]
#################################
# Dealing with lots of data
#################################
# NSYE library
lib = arctic['nyse']
def load_all_stock_history_NYSE():
# Data downloaded from BBG Open Symbology:
#
nyse = pd.read_csv('/users/is/jblackburn/git/arctic/howtos/nyse.csv')
stocks = [x.split('/')[0] for x in nyse['Ticker']]
print(len(stocks), " symbols")
for i, stock in enumerate(stocks):
try:
now = datetime.now()
data = get_stock_history('aapl', '1980-01-01', '2015-07-07')
lib.write(stock, data)
print("loaded data for: ", stock, datetime.now() - now)
except Exception as e:
print("Failed for ", stock, str(e))
# load_all_stock_history_NYSE()
print(len(lib.list_symbols()), " NYSE symbols loaded")
def read_all_data_from_lib(lib):
start = time.time()
rows_read = 0
for s in lib.list_symbols():
rows_read += len(lib.read(s).data)
print("Symbols: %s Rows: %s Time: %s Rows/s: %s" % (len(lib.list_symbols()),
rows_read,
(time.time() - start),
rows_read / (time.time() - start)))
read_all_data_from_lib(lib)
# Symbols: 1315 Rows: 11460225 Rows/s: 2,209,909
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.