text
stringlengths 213
32.3k
|
---|
from __future__ import absolute_import
from __future__ import unicode_literals
import datetime
import unittest
import json
import os
import time
import contexter
import fs
import parameterized
import requests
import six
from six.moves.queue import Queue
from instalooter.cli import main
from instalooter.cli import time as timeutils
from instalooter.cli import threadutils
from instalooter.cli.constants import USAGE
from instalooter.cli.login import login
from instalooter.worker import InstaDownloader
from .utils import mock
from .utils.method_names import firstparam
from .utils.ig_mock import MockPages
try:
CONNECTION_FAILURE = not requests.get("https://instagr.am/instagram").ok
except requests.exceptions.ConnectionError:
CONNECTION_FAILURE = True
class TestCLI(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.session = requests.Session()
@classmethod
def tearDownClass(cls):
cls.session.close()
def setUp(self):
self.destfs = fs.open_fs("temp://")
self.tmpdir = self.destfs.getsyspath("/")
def tearDown(self):
self.destfs.close()
if os.getenv("CI") == "true":
time.sleep(1)
@unittest.skipIf(CONNECTION_FAILURE, "cannot connect to Instagram")
def test_user(self):
with contexter.Contexter() as ctx:
ctx << mock.patch('instalooter.cli.ProfileLooter.pages', MockPages('nintendo'))
r = main(["user", "nintendo", self.tmpdir, "-q", '-n', '10'])
self.assertEqual(r, 0)
self.assertEqual(len(self.destfs.listdir('/')), 10)
@unittest.skipIf(CONNECTION_FAILURE, "cannot connect to Instagram")
def test_single_post(self):
r = main(["post", "BFB6znLg5s1", self.tmpdir, "-q"])
self.assertEqual(r, 0)
self.assertTrue(self.destfs.exists("1243533605591030581.jpg"))
@unittest.skipIf(CONNECTION_FAILURE, "cannot connect to Instagram")
def test_dump_json(self):
r = main(["post", "BIqZ8L8AHmH", self.tmpdir, '-q', '-d'])
self.assertEqual(r, 0)
self.assertTrue(self.destfs.exists("1308972728853756295.json"))
self.assertTrue(self.destfs.exists("1308972728853756295.jpg"))
with self.destfs.open("1308972728853756295.json") as fp:
json_metadata = json.load(fp)
self.assertEqual("1308972728853756295", json_metadata["id"])
self.assertEqual("BIqZ8L8AHmH", json_metadata["shortcode"])
@unittest.skipIf(CONNECTION_FAILURE, "cannot connect to Instagram")
def test_dump_only(self):
r = main(["post", "BIqZ8L8AHmH", self.tmpdir, '-q', '-D'])
self.assertEqual(r, 0)
self.assertTrue(self.destfs.exists("1308972728853756295.json"))
self.assertFalse(self.destfs.exists("1308972728853756295.jpg"))
with self.destfs.open("1308972728853756295.json") as fp:
json_metadata = json.load(fp)
self.assertEqual("1308972728853756295", json_metadata["id"])
self.assertEqual("BIqZ8L8AHmH", json_metadata["shortcode"])
@unittest.skipIf(CONNECTION_FAILURE, "cannot connect to Instagram")
def test_usage(self):
handle = six.moves.StringIO()
main(["--usage"], stream=handle)
self.assertEqual(handle.getvalue().strip(), USAGE.strip())
@unittest.skipIf(CONNECTION_FAILURE, "cannot connect to Instagram")
def test_single_post_from_url(self):
url = "https://www.instagram.com/p/BFB6znLg5s1/"
main(["post", url, self.tmpdir, "-q"])
self.assertIn("1243533605591030581.jpg", os.listdir(self.tmpdir))
class TestTimeUtils(unittest.TestCase):
@parameterized.parameterized.expand([
(":", (None, None)),
("2017-03-12:", (None, datetime.date(2017, 3, 12))),
(":2016-08-04", (datetime.date(2016, 8, 4), None)),
("2017-03-01:2017-02-01", (datetime.date(2017, 3, 1), datetime.date(2017, 2, 1))),
], testcase_func_name=firstparam)
def test_get_times_from_cli(self, token, expected):
self.assertEqual(timeutils.get_times_from_cli(token), expected)
@parameterized.parameterized.expand([
("thisday", 0, 0),
("thisweek", 7, 7),
("thismonth", 28, 31),
("thisyear", 365, 366),
], testcase_func_name=firstparam)
def test_get_times_from_cli_keywords(self, token, inf, sup):
start, stop = timeutils.get_times_from_cli(token)
self.assertGreaterEqual(start - stop, datetime.timedelta(inf))
self.assertLessEqual(start - stop, datetime.timedelta(sup))
self.assertEqual(start, datetime.date.today())
@parameterized.parameterized.expand([
["x"],
["x:y"],
["x:y:z"],
], testcase_func_name=firstparam)
def test_get_times_from_cli_bad_format(self, token):
self.assertRaises(ValueError, timeutils.get_times_from_cli, token)
@mock.patch('instalooter.looters.InstaLooter._login')
@mock.patch('getpass.getpass')
class TestLoginUtils(unittest.TestCase):
def test_cli_login_no_username(self, getpass_, login_):
args = {'--username': None, "--password": None}
login(args)
login_.assert_not_called()
@mock.patch('instalooter.looters.InstaLooter._logged_in')
def test_cli_login_no_password(self, logged_in_, getpass_, login_):
args = {'--username': "user", "--password": None, "--quiet": False}
logged_in_.return_value = False
getpass_.return_value = "pasw"
login(args)
login_.assert_called_once_with("user", "pasw")
@mock.patch('instalooter.looters.InstaLooter._logged_in')
def test_cli_login(self, logged_in_, getpass_, login_):
args = {'--username': "user", "--password": "pasw", "--quiet": False}
logged_in_.return_value = False
login(args)
login_.assert_called_once_with("user", "pasw")
@mock.patch('instalooter.looters.InstaLooter._logged_in')
def test_cli_already_logged_in(self, logged_in_, getpass_, login_):
args = {'--username': "user", "--password": "pasw", "--quiet": False}
logged_in_.return_value = True
login(args)
login_.assert_not_called()
class TestThreadUtils(unittest.TestCase):
def test_threads_count(self):
q = Queue()
t1 = InstaDownloader(q, None, None)
t2 = InstaDownloader(q, None, None)
try:
self.assertEqual(threadutils.threads_count(), 0)
t1.start()
self.assertEqual(threadutils.threads_count(), 1)
t2.start()
self.assertEqual(threadutils.threads_count(), 2)
finally:
t1.terminate()
t2.terminate()
def test_threads_force_join(self):
q = Queue()
t1 = InstaDownloader(q, None, None)
t2 = InstaDownloader(q, None, None)
t1.start()
t2.start()
self.assertTrue(t1.is_alive())
self.assertTrue(t2.is_alive())
threadutils.threads_force_join()
self.assertFalse(t1.is_alive())
self.assertFalse(t2.is_alive())
|
import voluptuous as vol
from homeassistant.config_entries import _UNDEF
from homeassistant.const import EVENT_HOMEASSISTANT_STOP
from .config_flow import get_master_gateway
from .const import CONF_BRIDGE_ID, CONF_GROUP_ID_BASE, CONF_MASTER_GATEWAY, DOMAIN
from .gateway import DeconzGateway
from .services import async_setup_services, async_unload_services
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: vol.Schema({}, extra=vol.ALLOW_EXTRA)}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass, config):
"""Old way of setting up deCONZ integrations."""
return True
async def async_setup_entry(hass, config_entry):
"""Set up a deCONZ bridge for a config entry.
Load config, group, light and sensor data for server information.
Start websocket for push notification of state changes from deCONZ.
"""
if DOMAIN not in hass.data:
hass.data[DOMAIN] = {}
if not config_entry.options:
await async_update_master_gateway(hass, config_entry)
gateway = DeconzGateway(hass, config_entry)
if not await gateway.async_setup():
return False
# 0.104 introduced config entry unique id, this makes upgrading possible
if config_entry.unique_id is None:
new_data = _UNDEF
if CONF_BRIDGE_ID in config_entry.data:
new_data = dict(config_entry.data)
new_data[CONF_GROUP_ID_BASE] = config_entry.data[CONF_BRIDGE_ID]
hass.config_entries.async_update_entry(
config_entry, unique_id=gateway.api.config.bridgeid, data=new_data
)
hass.data[DOMAIN][config_entry.unique_id] = gateway
await gateway.async_update_device_registry()
await async_setup_services(hass)
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, gateway.shutdown)
return True
async def async_unload_entry(hass, config_entry):
"""Unload deCONZ config entry."""
gateway = hass.data[DOMAIN].pop(config_entry.unique_id)
if not hass.data[DOMAIN]:
await async_unload_services(hass)
elif gateway.master:
await async_update_master_gateway(hass, config_entry)
new_master_gateway = next(iter(hass.data[DOMAIN].values()))
await async_update_master_gateway(hass, new_master_gateway.config_entry)
return await gateway.async_reset()
async def async_update_master_gateway(hass, config_entry):
"""Update master gateway boolean.
Called by setup_entry and unload_entry.
Makes sure there is always one master available.
"""
master = not get_master_gateway(hass)
options = {**config_entry.options, CONF_MASTER_GATEWAY: master}
hass.config_entries.async_update_entry(config_entry, options=options)
|
from __future__ import absolute_import, unicode_literals
import pytest
from libtmux.test import get_test_session_name, temp_session
def test_kills_session(server):
server = server
session_name = get_test_session_name(server=server)
with temp_session(server=server, session_name=session_name):
result = server.has_session(session_name)
assert result
assert not server.has_session(session_name)
@pytest.mark.flaky(reruns=5)
def test_if_session_killed_before(server):
"""Handles situation where session already closed within context"""
server = server
session_name = get_test_session_name(server=server)
with temp_session(server=server, session_name=session_name):
# an error or an exception within a temp_session kills the session
server.kill_session(session_name)
result = server.has_session(session_name)
assert not result
# really dead?
assert not server.has_session(session_name)
|
import sys
from gi.repository import Gtk, GtkSource
from meld.conf import _
FILE_ACTIONS = {
Gtk.FileChooserAction.OPEN,
Gtk.FileChooserAction.SAVE,
}
class MeldFileChooserDialog(Gtk.FileChooserDialog):
"""A simple GTK+ file chooser dialog with a text encoding combo box."""
__gtype_name__ = 'MeldFileChooserDialog'
def __init__(
self, title=None, transient_for=None,
action=Gtk.FileChooserAction.OPEN):
super().__init__(
title=title, transient_for=transient_for, action=action)
self.add_button(Gtk.STOCK_CANCEL, Gtk.ResponseType.CANCEL)
if action == Gtk.FileChooserAction.SAVE:
self.add_button(Gtk.STOCK_SAVE, Gtk.ResponseType.ACCEPT)
else:
self.add_button(Gtk.STOCK_OPEN, Gtk.ResponseType.ACCEPT)
self.encoding_store = Gtk.ListStore(str, str)
self.action_changed_cb()
self.connect("notify::action", self.action_changed_cb)
# We only have sufficient Gio support for remote operations in
# file comparisons, not in folder or version-control.
self.props.local_only = action not in FILE_ACTIONS
def make_encoding_combo(self):
"""Create the combo box for text encoding selection"""
# On Windows, the "current" encoding is the "system default
# ANSI code-page", which is probably not what the user wants,
# so we default to UTF-8.
if sys.platform == 'win32':
current = GtkSource.encoding_get_utf8()
else:
current = GtkSource.encoding_get_current()
codecs = [
(_('Autodetect Encoding'), None),
(None, None),
(
_('Current Locale ({})').format(current.get_charset()),
current.get_charset()
),
(None, None),
]
for encoding in GtkSource.encoding_get_all():
codecs.append((encoding.to_string(), encoding.get_charset()))
self.encoding_store.clear()
for entry in codecs:
self.encoding_store.append(entry)
combo = Gtk.ComboBox()
combo.set_model(self.encoding_store)
cell = Gtk.CellRendererText()
combo.pack_start(cell, True)
combo.add_attribute(cell, 'text', 0)
combo.set_row_separator_func(
lambda model, it, data: not model.get_value(it, 0), None)
combo.props.active = 0
return combo
def get_encoding(self):
"""Return the currently-selected text file encoding"""
combo = self.props.extra_widget
if not combo:
return None
charset = self.encoding_store.get_value(combo.get_active_iter(), 1)
if not charset:
return None
return GtkSource.Encoding.get_from_charset(charset)
def action_changed_cb(self, *args):
if self.props.action in (Gtk.FileChooserAction.OPEN,
Gtk.FileChooserAction.SAVE):
self.props.extra_widget = self.make_encoding_combo()
else:
self.props.extra_widget = None
|
import logging
import sys
from typing import Dict, Optional
import sentry_sdk
from django.conf import settings
from sentry_sdk.integrations.celery import CeleryIntegration
from sentry_sdk.integrations.django import DjangoIntegration
from sentry_sdk.integrations.logging import ignore_logger
from sentry_sdk.integrations.redis import RedisIntegration
import weblate
ERROR_LOGGER = "weblate.errors"
LOGGER = logging.getLogger(ERROR_LOGGER)
try:
import rollbar
HAS_ROLLBAR = True
except ImportError:
HAS_ROLLBAR = False
def report_error(
extra_data: Optional[Dict] = None,
level: str = "warning",
cause: str = "Handled exception",
skip_sentry: bool = False,
print_tb: bool = False,
):
"""Wrapper for error reporting.
This can be used for store exceptions in error reporting solutions as rollbar while
handling error gracefully and giving user cleaner message.
"""
if HAS_ROLLBAR and hasattr(settings, "ROLLBAR"):
rollbar.report_exc_info(extra_data=extra_data, level=level)
if not skip_sentry and settings.SENTRY_DSN:
with sentry_sdk.push_scope() as scope:
if extra_data:
for key, value in extra_data.items():
scope.set_extra(key, value)
scope.set_extra("error_cause", cause)
scope.level = level
sentry_sdk.capture_exception()
log = getattr(LOGGER, level)
error = sys.exc_info()[1]
log("%s: %s: %s", cause, error.__class__.__name__, str(error))
if extra_data:
log("%s: %s: %s", cause, error.__class__.__name__, str(extra_data))
if print_tb:
LOGGER.exception(cause)
def celery_base_data_hook(request, data):
data["framework"] = "celery"
def init_error_collection(celery=False):
if settings.SENTRY_DSN:
sentry_sdk.init(
dsn=settings.SENTRY_DSN,
integrations=[CeleryIntegration(), DjangoIntegration(), RedisIntegration()],
send_default_pii=True,
release=weblate.GIT_REVISION or weblate.TAG_NAME,
environment=settings.SENTRY_ENVIRONMENT,
**settings.SENTRY_EXTRA_ARGS,
)
# Ignore Weblate logging, those are reported using capture_exception
ignore_logger(ERROR_LOGGER)
if celery and HAS_ROLLBAR and hasattr(settings, "ROLLBAR"):
rollbar.init(**settings.ROLLBAR)
rollbar.BASE_DATA_HOOK = celery_base_data_hook
|
import os
import shutil
from django.conf import settings
from django.test import TestCase
from weblate.trans.tests.utils import get_test_file
from weblate.utils.checks import check_data_writable
from weblate.utils.unittest import tempdir_setting
from weblate.vcs.ssh import SSHWrapper, get_host_keys, ssh_file
TEST_HOSTS = get_test_file("known_hosts")
class SSHTest(TestCase):
"""Test for customized admin interface."""
@tempdir_setting("DATA_DIR")
def test_parse(self):
self.assertEqual(check_data_writable(), [])
shutil.copy(TEST_HOSTS, os.path.join(settings.DATA_DIR, "ssh"))
hosts = get_host_keys()
self.assertEqual(len(hosts), 50)
@tempdir_setting("DATA_DIR")
def test_create_ssh_wrapper(self):
self.assertEqual(check_data_writable(), [])
wrapper = SSHWrapper()
filename = wrapper.filename
wrapper.create()
with open(filename) as handle:
data = handle.read()
self.assertTrue(ssh_file("known_hosts") in data)
self.assertTrue(ssh_file("id_rsa") in data)
self.assertTrue(settings.DATA_DIR in data)
self.assertTrue(os.access(filename, os.X_OK))
# Second run should not touch the file
timestamp = os.stat(filename).st_mtime
wrapper.create()
self.assertEqual(timestamp, os.stat(filename).st_mtime)
|
import pytest
import sh
from molecule import config
from molecule.provisioner.lint import ansible_lint
@pytest.fixture
def _provisioner_lint_section_data():
return {
'provisioner': {
'name': 'ansible',
'lint': {
'name': 'ansible-lint',
'options': {
'foo': 'bar',
'v': True,
'exclude': [
'foo',
'bar',
],
'x': [
'foo',
'bar',
],
},
'env': {
'FOO': 'bar',
},
}
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(patched_config_validate, config_instance):
return ansible_lint.AnsibleLint(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance):
x = {
'default_exclude': [_instance._config.scenario.ephemeral_directory],
'exclude': [],
'x': [],
}
assert x == _instance.default_options
def test_name_property(_instance):
assert 'ansible-lint' == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
@pytest.mark.parametrize(
'config_instance', ['_provisioner_lint_section_data'], indirect=True)
def test_options_property(_instance):
x = {
'default_exclude': [_instance._config.scenario.ephemeral_directory],
'exclude': [
'foo',
'bar',
],
'x': [
'foo',
'bar',
],
'foo': 'bar',
'v': True,
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_provisioner_lint_section_data'], indirect=True)
def test_options_property_handles_cli_args(_instance):
_instance._config.args = {'debug': True}
x = {
'default_exclude': [_instance._config.scenario.ephemeral_directory],
'exclude': [
'foo',
'bar',
],
'x': [
'foo',
'bar',
],
'foo': 'bar',
'v': True,
}
assert x == _instance.options
def test_default_env_property(_instance):
assert 'MOLECULE_FILE' in _instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in _instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in _instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in _instance.default_env
@pytest.mark.parametrize(
'config_instance', ['_provisioner_lint_section_data'], indirect=True)
def test_env_property(_instance):
assert 'bar' == _instance.env['FOO']
assert 'ANSIBLE_CONFIG' in _instance.env
assert 'ANSIBLE_ROLES_PATH' in _instance.env
assert 'ANSIBLE_LIBRARY' in _instance.env
assert 'ANSIBLE_FILTER_PLUGINS' in _instance.env
@pytest.mark.parametrize(
'config_instance', ['_provisioner_lint_section_data'], indirect=True)
def test_bake(_instance):
_instance.bake()
x = [
str(sh.ansible_lint),
'--foo=bar',
'-v',
'-x',
'-x',
'--exclude={}'.format(_instance._config.scenario.ephemeral_directory),
'--exclude=foo',
'--exclude=bar',
_instance._config.provisioner.playbooks.converge,
'bar',
'foo',
]
result = str(_instance._ansible_lint_command).split()
assert sorted(x) == sorted(result)
def test_execute(mocker, patched_run_command, patched_logger_info,
patched_logger_success, _instance):
_instance._ansible_lint_command = 'patched-ansiblelint-command'
_instance.execute()
patched_run_command.assert_called_once_with(
'patched-ansiblelint-command', debug=False)
msg = 'Executing Ansible Lint on {}...'.format(
_instance._config.provisioner.playbooks.converge)
patched_logger_info.assert_called_once_with(msg)
msg = 'Lint completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute(patched_run_command, patched_logger_warn,
_instance):
c = _instance._config.config
c['provisioner']['lint']['enabled'] = False
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, lint is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_execute_bakes(patched_run_command, _instance):
_instance.execute()
assert _instance._ansible_lint_command is not None
assert 1 == patched_run_command.call_count
def test_executes_catches_and_exits_return_code(patched_run_command,
patched_yamllint, _instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(
sh.ansible_lint, b'', b'')
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
|
import argparse
from google.protobuf import text_format
import numpy as np
import re
from chainer import serializers
from chainercv.experimental.links import PSPNetResNet101
import caffe_pb2
def copy_conv(layer, config, conv, has_bias=False, inverse_ch=False):
data = np.array(layer.blobs[0].data)
conv.W.data.ravel()[:] = data
if inverse_ch:
conv.W.data[:] = conv.W.data[:, ::-1, ...]
if has_bias:
conv.b.data[:] = np.array(layer.blobs[1].data)
return conv
def copy_conv2d_bn_activ(layer, config, cba, inverse_ch=False):
if 'Convolution' in layer.type:
cba.conv = copy_conv(layer, config, cba.conv, inverse_ch=inverse_ch)
elif 'BN' in layer.type:
cba.bn.eps = config.bn_param.eps
cba.bn.decay = config.bn_param.momentum
cba.bn.gamma.data.ravel()[:] = np.array(layer.blobs[0].data)
cba.bn.beta.data.ravel()[:] = np.array(layer.blobs[1].data)
cba.bn.avg_mean.ravel()[:] = np.array(layer.blobs[2].data)
cba.bn.avg_var.ravel()[:] = np.array(layer.blobs[3].data)
else:
print('Ignored: {} ({})'.format(layer.name, layer.type))
return cba
def copy_res1(layer, config, block):
if layer.name.startswith('conv1_1'):
block.conv1_1 = copy_conv2d_bn_activ(
layer, config, block.conv1_1, inverse_ch=True)
elif layer.name.startswith('conv1_2'):
block.conv1_2 = copy_conv2d_bn_activ(layer, config, block.conv1_2)
elif layer.name.startswith('conv1_3'):
block.conv1_3 = copy_conv2d_bn_activ(layer, config, block.conv1_3)
else:
print('Ignored: {} ({})'.format(layer.name, layer.type))
return block
def copy_bottleneck(layer, config, block):
if 'reduce' in layer.name:
block.conv1 = copy_conv2d_bn_activ(layer, config, block.conv1)
elif '3x3' in layer.name:
block.conv2 = copy_conv2d_bn_activ(layer, config, block.conv2)
elif 'increase' in layer.name:
block.conv3 = copy_conv2d_bn_activ(layer, config, block.conv3)
elif 'proj' in layer.name:
block.residual_conv = copy_conv2d_bn_activ(
layer, config, block.residual_conv)
else:
print('Ignored: {} ({})'.format(layer.name, layer.type))
return block
def copy_resblock(layer, config, block):
if '/' in layer.name:
layer.name = layer.name.split('/')[0]
i = int(layer.name.split('_')[1]) - 1
if i == 0:
name = 'a'
else:
name = 'b{}'.format(i)
setattr(block, name,
copy_bottleneck(layer, config, getattr(block, name)))
return block
def copy_ppm_module(layer, config, block):
ret = re.search('pool([0-9]+)', layer.name)
pool_id = int(ret.groups()[0])
linear_id = {1: 3,
2: 2,
3: 1,
6: 0}[pool_id]
block._children[linear_id] =\
copy_conv2d_bn_activ(layer, config, block[linear_id])
return block
def transfer(model, param, net):
name_config = dict([(l.name, l) for l in net.layer])
for layer in param.layer:
if layer.name not in name_config:
continue
config = name_config[layer.name]
if layer.name.startswith('conv1'):
model.extractor = copy_res1(layer, config, model.extractor)
elif layer.name.startswith('conv2'):
model.extractor.res2 = copy_resblock(
layer, config, model.extractor.res2)
elif layer.name.startswith('conv3'):
model.extractor.res3 = copy_resblock(
layer, config, model.extractor.res3)
elif layer.name.startswith('conv4'):
model.extractor.res4 = copy_resblock(
layer, config, model.extractor.res4)
elif layer.name.startswith('conv5') \
and 'pool' not in layer.name \
and 'conv5_4' not in layer.name:
model.extractor.res5 = copy_resblock(
layer, config, model.extractor.res5)
elif layer.name.startswith('conv5_3') and 'pool' in layer.name:
model.ppm = copy_ppm_module(layer, config, model.ppm)
elif layer.name.startswith('conv5_4'):
model.head_conv1 = copy_conv2d_bn_activ(
layer, config, model.head_conv1)
elif layer.name.startswith('conv6'):
model.head_conv2 = copy_conv(
layer, config, model.head_conv2, has_bias=True)
# NOTE: Auxirillary is not copied
else:
print('Ignored: {} ({})'.format(layer.name, layer.type))
return model
def main():
parser = argparse.ArgumentParser()
parser.add_argument('caffemodel')
parser.add_argument('output')
args = parser.parse_args()
proto_path = 'weights/pspnet101_cityscapes_713.prototxt'
n_class = 19
input_size = (713, 713)
model = PSPNetResNet101(
n_class, None, input_size)
model(np.random.uniform(size=(1, 3) + input_size).astype(np.float32))
caffe_param = caffe_pb2.NetParameter()
caffe_param.MergeFromString(open(args.caffemodel, 'rb').read())
caffe_net = text_format.Merge(
open(proto_path).read(), caffe_pb2.NetParameter())
transfer(model, caffe_param, caffe_net)
serializers.save_npz(args.output, model)
if __name__ == '__main__':
main()
|
import os
import sys
from types import ModuleType
from pscript import commonast
# Hack
sys.modules['commonast'] = commonast
THIS_DIR = os.path.dirname(os.path.abspath(__file__))
DOC_DIR = os.path.abspath(os.path.join(THIS_DIR, '..'))
OUTPUT_DIR = os.path.join(DOC_DIR, 'pscript')
created_files = []
def main():
# Create overview doc page
docs = 'Common AST'
docs += '\n' + '=' * len(docs) + '\n\n'
docs += '.. automodule:: commonast\n\n'
docs += '.. autofunction:: commonast.parse\n\n'
docs += '----\n\n'
docs += 'The nodes\n---------\n\n'
docs += '.. autoclass:: commonast.Node\n :members:\n\n'
code = open(commonast.__file__, 'rb').read().decode()
status = 0
for line in code.splitlines():
if status == 0:
if line.startswith('## --'):
status = 1
elif status == 1:
if line.startswith('## --'):
break
elif line.startswith('## '):
title = line[3:].strip()
docs += '%s\n%s\n\n' % (title, '-' * len(title))
elif line.startswith('class '):
clsname = line[6:].split('(')[0]
docs += '.. autoclass:: %s\n\n' % ('commonast.' + clsname)
cls = getattr(commonast, clsname)
#cls.__doc__ = '%s(%s)\n%s' % (clsname, ', '.join(cls.__slots__), cls.__doc__)
cls.__doc__ = '%s()\n%s' % (clsname, cls.__doc__)
# Write overview doc page
filename = os.path.join(OUTPUT_DIR, 'commonast.rst')
created_files.append(filename)
open(filename, 'wt', encoding='utf-8').write(docs)
print(' generated commonast page')
def clean():
while created_files:
filename = created_files.pop()
if os.path.isfile(filename):
os.remove(filename)
|
import speech_recognition as sr
from kalliope.core import Utils
from kalliope.stt.Utils import SpeechRecognition
class Google(SpeechRecognition):
def __init__(self, callback=None, **kwargs):
"""
Start recording the microphone and analyse audio with google api
:param callback: The callback function to call to send the text
:param kwargs:
"""
# give the audio file path to process directly to the mother class if exist
SpeechRecognition.__init__(self, kwargs.get('audio_file_path', None))
# callback function to call after the translation speech/tex
self.main_controller_callback = callback
self.key = kwargs.get('key', None)
self.language = kwargs.get('language', "en-US")
self.show_all = kwargs.get('show_all', False)
# set the callback that will process the audio stream
self.set_callback(self.google_callback)
# start processing, record a sample from the microphone if no audio file path provided, else read the file
self.start_processing()
def google_callback(self, recognizer, audio):
"""
called from the background thread
"""
try:
captured_audio = recognizer.recognize_google(audio,
key=self.key,
language=self.language,
show_all=self.show_all)
Utils.print_success("Google Speech Recognition thinks you said %s" % captured_audio)
self._analyse_audio(audio_to_text=captured_audio)
except sr.UnknownValueError:
Utils.print_warning("Google Speech Recognition could not understand audio")
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except sr.RequestError as e:
Utils.print_danger("Could not request results from Google Speech Recognition service; {0}".format(e))
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except AssertionError:
Utils.print_warning("No audio caught from microphone")
self._analyse_audio(audio_to_text=None)
def _analyse_audio(self, audio_to_text):
"""
Confirm the audio exists and run it in a Callback
:param audio_to_text: the captured audio
"""
if self.main_controller_callback is not None:
self.main_controller_callback(audio_to_text)
|
from django.utils import timezone
from django.views.decorators.cache import never_cache
from django.utils.translation import gettext_lazy as _
from rest_framework import generics, mixins
from rest_framework.exceptions import NotFound, MethodNotAllowed
from rest_framework.pagination import LimitOffsetPagination
from rest_framework.renderers import BrowsableAPIRenderer
from rest_framework.exceptions import PermissionDenied
from rest_framework.permissions import BasePermission
from shop.rest.money import JSONRenderer
from shop.rest.renderers import CMSPageRenderer
from shop.serializers.order import OrderListSerializer, OrderDetailSerializer
from shop.models.order import OrderModel
class OrderPagination(LimitOffsetPagination):
default_limit = 15
template = 'shop/templatetags/paginator.html'
class OrderPermission(BasePermission):
"""
Allow access to a given Order if the user is entitled to.
"""
def has_permission(self, request, view):
if view.many and request.customer.is_visitor:
detail = _("Only signed in customers can view their list of orders.")
raise PermissionDenied(detail=detail)
return True
def has_object_permission(self, request, view, order):
if request.user.is_authenticated:
return order.customer.pk == request.user.pk
if order.secret and order.secret == view.kwargs.get('secret'):
return True
detail = _("This order does not belong to you.")
raise PermissionDenied(detail=detail)
class OrderView(mixins.ListModelMixin, mixins.RetrieveModelMixin, mixins.UpdateModelMixin,
generics.GenericAPIView):
"""
Base View class to render the fulfilled orders for the current user.
"""
renderer_classes = [CMSPageRenderer, JSONRenderer, BrowsableAPIRenderer]
list_serializer_class = OrderListSerializer
detail_serializer_class = OrderDetailSerializer
pagination_class = OrderPagination
permission_classes = [OrderPermission]
lookup_field = lookup_url_kwarg = 'slug'
many = True
last_order_lapse = timezone.timedelta(minutes=15)
def get_queryset(self):
queryset = OrderModel.objects.all()
if not self.request.customer.is_visitor:
queryset = queryset.filter(customer=self.request.customer).order_by('-updated_at')
return queryset
def get_serializer_class(self):
if self.many:
return self.list_serializer_class
return self.detail_serializer_class
def get_renderer_context(self):
renderer_context = super().get_renderer_context()
if self.request.accepted_renderer.format == 'html':
renderer_context.update(many=self.many)
if not self.many:
# add an extra ance to the breadcrumb to show the order number
renderer_context.update(
is_last_order = self.is_last(),
extra_ance=self.get_object().get_number(),
)
return renderer_context
def is_last(self):
"""
Returns ``True`` if the given order is considered as the last order for its customer.
This information may be used to distinguish between a "thank you" and a normal detail view.
"""
assert self.many is False, "This method can be called for detail views only"
lapse = timezone.now() - self.last_order_lapse
current_order = self.get_object()
last_order = self.get_queryset().first()
return current_order.id == last_order.id and current_order.created_at > lapse
@property
def allowed_methods(self):
"""Restrict method "POST" only on the detail view"""
allowed_methods = self._allowed_methods()
if self.many:
allowed_methods.remove('POST')
return allowed_methods
@never_cache
def get(self, request, *args, **kwargs):
if self.many:
return self.list(request, *args, **kwargs)
return self.retrieve(request, *args, **kwargs)
def post(self, request, *args, **kwargs):
if self.many:
raise MethodNotAllowed("Method POST is not allowed on Order List View")
self.update(request, *args, **kwargs)
return self.retrieve(request, *args, **kwargs)
def list(self, request, *args, **kwargs):
try:
return super().list(request, *args, **kwargs)
except OrderModel.DoesNotExist:
raise NotFound("No orders have been found for the current user.")
def retrieve(self, request, *args, **kwargs):
try:
return super().retrieve(request, *args, **kwargs)
except OrderModel.DoesNotExist:
raise NotFound("No order has been found for the current user.")
|
import cherrypy
from cherrypy.test import helper
script_names = ['', '/path/to/myapp']
class ProxyTest(helper.CPWebCase):
@staticmethod
def setup_server():
# Set up site
cherrypy.config.update({
'tools.proxy.on': True,
'tools.proxy.base': 'www.mydomain.test',
})
# Set up application
class Root:
def __init__(self, sn):
# Calculate a URL outside of any requests.
self.thisnewpage = cherrypy.url(
'/this/new/page', script_name=sn)
@cherrypy.expose
def pageurl(self):
return self.thisnewpage
@cherrypy.expose
def index(self):
raise cherrypy.HTTPRedirect('dummy')
@cherrypy.expose
def remoteip(self):
return cherrypy.request.remote.ip
@cherrypy.expose
@cherrypy.config(**{
'tools.proxy.local': 'X-Host',
'tools.trailing_slash.extra': True,
})
def xhost(self):
raise cherrypy.HTTPRedirect('blah')
@cherrypy.expose
def base(self):
return cherrypy.request.base
@cherrypy.expose
@cherrypy.config(**{'tools.proxy.scheme': 'X-Forwarded-Ssl'})
def ssl(self):
return cherrypy.request.base
@cherrypy.expose
def newurl(self):
return ("Browse to <a href='%s'>this page</a>."
% cherrypy.url('/this/new/page'))
@cherrypy.expose
@cherrypy.config(**{
'tools.proxy.base': None,
})
def base_no_base(self):
return cherrypy.request.base
for sn in script_names:
cherrypy.tree.mount(Root(sn), sn)
def testProxy(self):
self.getPage('/')
self.assertHeader('Location',
'%s://www.mydomain.test%s/dummy' %
(self.scheme, self.prefix()))
# Test X-Forwarded-Host (Apache 1.3.33+ and Apache 2)
self.getPage(
'/', headers=[('X-Forwarded-Host', 'http://www.example.test')])
self.assertHeader('Location', 'http://www.example.test/dummy')
self.getPage('/', headers=[('X-Forwarded-Host', 'www.example.test')])
self.assertHeader('Location', '%s://www.example.test/dummy' %
self.scheme)
# Test multiple X-Forwarded-Host headers
self.getPage('/', headers=[
('X-Forwarded-Host', 'http://www.example.test, www.cherrypy.test'),
])
self.assertHeader('Location', 'http://www.example.test/dummy')
# Test X-Forwarded-For (Apache2)
self.getPage('/remoteip',
headers=[('X-Forwarded-For', '192.168.0.20')])
self.assertBody('192.168.0.20')
# Fix bug #1268
self.getPage('/remoteip',
headers=[
('X-Forwarded-For', '67.15.36.43, 192.168.0.20')
])
self.assertBody('67.15.36.43')
# Test X-Host (lighttpd; see https://trac.lighttpd.net/trac/ticket/418)
self.getPage('/xhost', headers=[('X-Host', 'www.example.test')])
self.assertHeader('Location', '%s://www.example.test/blah' %
self.scheme)
# Test X-Forwarded-Proto (lighttpd)
self.getPage('/base', headers=[('X-Forwarded-Proto', 'https')])
self.assertBody('https://www.mydomain.test')
# Test X-Forwarded-Ssl (webfaction?)
self.getPage('/ssl', headers=[('X-Forwarded-Ssl', 'on')])
self.assertBody('https://www.mydomain.test')
# Test cherrypy.url()
for sn in script_names:
# Test the value inside requests
self.getPage(sn + '/newurl')
self.assertBody(
"Browse to <a href='%s://www.mydomain.test" % self.scheme +
sn + "/this/new/page'>this page</a>.")
self.getPage(sn + '/newurl', headers=[('X-Forwarded-Host',
'http://www.example.test')])
self.assertBody("Browse to <a href='http://www.example.test" +
sn + "/this/new/page'>this page</a>.")
# Test the value outside requests
port = ''
if self.scheme == 'http' and self.PORT != 80:
port = ':%s' % self.PORT
elif self.scheme == 'https' and self.PORT != 443:
port = ':%s' % self.PORT
host = self.HOST
if host in ('0.0.0.0', '::'):
import socket
host = socket.gethostname()
expected = ('%s://%s%s%s/this/new/page'
% (self.scheme, host, port, sn))
self.getPage(sn + '/pageurl')
self.assertBody(expected)
# Test trailing slash (see
# https://github.com/cherrypy/cherrypy/issues/562).
self.getPage('/xhost/', headers=[('X-Host', 'www.example.test')])
self.assertHeader('Location', '%s://www.example.test/xhost'
% self.scheme)
def test_no_base_port_in_host(self):
"""
If no base is indicated, and the host header is used to resolve
the base, it should rely on the host header for the port also.
"""
headers = {'Host': 'localhost:8080'}.items()
self.getPage('/base_no_base', headers=headers)
self.assertBody('http://localhost:8080')
|
import social_django.utils
from django.conf import settings
from django.core import mail
from django.core.signing import TimestampSigner
from django.test.utils import modify_settings, override_settings
from django.urls import reverse
from jsonschema import validate
from social_core.backends.utils import load_backends
from weblate_schemas import load_schema
from weblate.accounts.models import Profile, Subscription
from weblate.accounts.notifications import FREQ_DAILY, FREQ_NONE, SCOPE_WATCHED
from weblate.auth.models import User
from weblate.lang.models import Language
from weblate.trans.tests.test_models import RepoTestCase
from weblate.trans.tests.test_views import FixtureTestCase
from weblate.utils.ratelimit import reset_rate_limit
CONTACT_DATA = {
"name": "Test",
"email": "[email protected]",
"subject": "Message from dark side",
"message": "Hi\n\nThis app looks really cool!",
}
class ViewTest(RepoTestCase):
"""Test for views."""
def setUp(self):
super().setUp()
reset_rate_limit("login", address="127.0.0.1")
reset_rate_limit("message", address="127.0.0.1")
def get_user(self):
user = User.objects.create_user(
username="testuser", password="testpassword", full_name="Test User"
)
user.full_name = "First Second"
user.email = "[email protected]"
user.save()
return user
@override_settings(
REGISTRATION_CAPTCHA=False, ADMINS=(("Weblate test", "[email protected]"),)
)
def test_contact(self):
"""Test for contact form."""
# Basic get
response = self.client.get(reverse("contact"))
self.assertContains(response, 'id="id_message"')
# Sending message
response = self.client.post(reverse("contact"), CONTACT_DATA)
self.assertRedirects(response, reverse("home"))
# Verify message
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "[Weblate] Message from dark side")
self.assertEqual(mail.outbox[0].to, ["[email protected]"])
@override_settings(
REGISTRATION_CAPTCHA=False, ADMINS_CONTACT=["[email protected]"]
)
def test_contact_separate(self):
"""Test for contact form."""
# Sending message
response = self.client.post(reverse("contact"), CONTACT_DATA)
self.assertRedirects(response, reverse("home"))
# Verify message
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "[Weblate] Message from dark side")
self.assertEqual(mail.outbox[0].to, ["[email protected]"])
@override_settings(REGISTRATION_CAPTCHA=False)
def test_contact_invalid(self):
"""Test for contact form."""
# Sending message
data = CONTACT_DATA.copy()
data["email"] = "rejected&[email protected]"
response = self.client.post(reverse("contact"), data)
self.assertContains(response, "Enter a valid e-mail address.")
@override_settings(RATELIMIT_ATTEMPTS=0)
def test_contact_rate(self):
"""Test for contact form rate limiting."""
response = self.client.post(reverse("contact"), CONTACT_DATA)
self.assertContains(response, "Too many messages sent, please try again later.")
@override_settings(RATELIMIT_ATTEMPTS=1, RATELIMIT_WINDOW=0)
def test_contact_rate_window(self):
"""Test for contact form rate limiting."""
message = "Too many messages sent, please try again later."
response = self.client.post(reverse("contact"), CONTACT_DATA)
self.assertNotContains(response, message)
response = self.client.post(reverse("contact"), CONTACT_DATA)
self.assertNotContains(response, message)
@override_settings(OFFER_HOSTING=False)
def test_hosting_disabled(self):
"""Test for hosting form with disabled hosting."""
self.get_user()
self.client.login(username="testuser", password="testpassword")
response = self.client.get(reverse("hosting"))
self.assertRedirects(response, reverse("home"))
@override_settings(OFFER_HOSTING=True)
def test_libre(self):
"""Test for hosting form with enabled hosting."""
from weblate.billing.models import Plan
self.get_user()
self.client.login(username="testuser", password="testpassword")
Plan.objects.create(price=0, slug="libre", name="Libre")
self.client.login(username="testuser", password="testpassword")
response = self.client.get(reverse("hosting"))
self.assertContains(response, "trial")
# Creating a trial
response = self.client.post(reverse("trial"), {"plan": "libre"}, follow=True)
self.assertContains(response, "Create project")
@override_settings(OFFER_HOSTING=False)
def test_trial_disabled(self):
"""Test for trial form with disabled hosting."""
self.get_user()
self.client.login(username="testuser", password="testpassword")
response = self.client.get(reverse("trial"))
self.assertRedirects(response, reverse("home"))
@override_settings(OFFER_HOSTING=True)
@modify_settings(INSTALLED_APPS={"append": "weblate.billing"})
def test_trial(self):
"""Test for trial form with disabled hosting."""
from weblate.billing.models import Plan
Plan.objects.create(price=1, slug="enterprise")
user = self.get_user()
self.client.login(username="testuser", password="testpassword")
response = self.client.get(reverse("trial"))
self.assertContains(response, "Enterprise")
response = self.client.post(reverse("trial"), follow=True)
self.assertContains(response, "Create project")
billing = user.billing_set.get()
self.assertTrue(billing.is_trial)
# Repeated attempt should fail
response = self.client.get(reverse("trial"))
self.assertRedirects(response, reverse("contact") + "?t=trial")
def test_contact_subject(self):
# With set subject
response = self.client.get(reverse("contact"), {"t": "reg"})
self.assertContains(response, "Registration problems")
def test_contact_user(self):
user = self.get_user()
# Login
self.client.login(username=user.username, password="testpassword")
response = self.client.get(reverse("contact"))
self.assertContains(response, 'value="First Second"')
self.assertContains(response, user.email)
def test_user_list(self):
"""Test user pages."""
user = self.get_user()
user_url = user.get_absolute_url()
response = self.client.get(reverse("user_list"), {"q": user.username})
self.assertContains(response, user_url)
response = self.client.get(reverse("user_list"), {"q": user.full_name})
self.assertContains(response, user_url)
response = self.client.get(reverse("user_list"), {"sort_by": "invalid"})
self.assertContains(response, user_url)
def test_user(self):
"""Test user pages."""
# Setup user
user = self.get_user()
# Login as user
self.client.login(username=user.username, password="testpassword")
# Get public profile
response = self.client.get(user.get_absolute_url())
self.assertContains(response, '="/activity/')
def test_suggestions(self):
"""Test user pages."""
# Setup user
user = self.get_user()
# Get public profile
response = self.client.get(
reverse("user_suggestions", kwargs={"user": user.username})
)
self.assertContains(response, "Suggestions")
response = self.client.get(reverse("user_suggestions", kwargs={"user": "-"}))
self.assertContains(response, "Suggestions")
def test_login(self):
user = self.get_user()
# Login
response = self.client.post(
reverse("login"), {"username": user.username, "password": "testpassword"}
)
self.assertRedirects(response, reverse("home"))
# Login redirect
response = self.client.get(reverse("login"))
self.assertRedirects(response, reverse("profile"))
# Logout with GET should fail
response = self.client.get(reverse("logout"))
self.assertEqual(response.status_code, 405)
# Logout
response = self.client.post(reverse("logout"))
self.assertRedirects(response, reverse("home"))
def test_login_redirect(self):
try:
# psa creates copy of settings...
orig_backends = social_django.utils.BACKENDS
social_django.utils.BACKENDS = (
"social_core.backends.github.GithubOAuth2",
"weblate.accounts.auth.WeblateUserBackend",
)
load_backends(social_django.utils.BACKENDS, force_load=True)
response = self.client.get(reverse("login"))
self.assertContains(
response, "Redirecting you to the authentication provider."
)
finally:
social_django.utils.BACKENDS = orig_backends
def test_login_email(self):
user = self.get_user()
# Login
response = self.client.post(
reverse("login"), {"username": user.email, "password": "testpassword"}
)
self.assertRedirects(response, reverse("home"))
def test_login_anonymous(self):
# Login
response = self.client.post(
reverse("login"),
{"username": settings.ANONYMOUS_USER_NAME, "password": "testpassword"},
)
self.assertContains(
response, "This username/password combination was not found."
)
@override_settings(RATELIMIT_ATTEMPTS=20, AUTH_LOCK_ATTEMPTS=5)
def test_login_ratelimit(self, login=False):
if login:
self.test_login()
else:
self.get_user()
# Use auth attempts
for _unused in range(5):
response = self.client.post(
reverse("login"), {"username": "testuser", "password": "invalid"}
)
self.assertContains(response, "Please try again.")
# Try login with valid password
response = self.client.post(
reverse("login"), {"username": "testuser", "password": "testpassword"}
)
self.assertContains(response, "Please try again.")
@override_settings(RATELIMIT_ATTEMPTS=10, AUTH_LOCK_ATTEMPTS=5)
def test_login_ratelimit_login(self):
self.test_login_ratelimit(True)
def test_password(self):
# Create user
self.get_user()
# Login
self.client.login(username="testuser", password="testpassword")
# Change without data
response = self.client.post(reverse("password"))
self.assertContains(response, "This field is required.")
response = self.client.get(reverse("password"))
self.assertContains(response, "Current password")
# Change with wrong password
response = self.client.post(
reverse("password"),
{
"password": "123456",
"new_password1": "123456",
"new_password2": "123456",
},
)
self.assertContains(response, "You have entered an invalid password.")
# Change
response = self.client.post(
reverse("password"),
{
"password": "testpassword",
"new_password1": "1pa$$word!",
"new_password2": "1pa$$word!",
},
)
self.assertRedirects(response, reverse("profile") + "#account")
self.assertTrue(
User.objects.get(username="testuser").check_password("1pa$$word!")
)
def test_api_key(self):
# Create user
user = self.get_user()
# Login
self.client.login(username="testuser", password="testpassword")
# API key reset with GET should fail
response = self.client.get(reverse("reset-api-key"))
self.assertEqual(response.status_code, 405)
# API key reset
response = self.client.post(reverse("reset-api-key"))
self.assertRedirects(response, reverse("profile") + "#api")
# API key reset without token
user.auth_token.delete()
response = self.client.post(reverse("reset-api-key"))
self.assertRedirects(response, reverse("profile") + "#api")
class ProfileTest(FixtureTestCase):
def test_profile(self):
# Get profile page
response = self.client.get(reverse("profile"))
self.assertContains(response, 'action="/accounts/profile/"')
self.assertContains(response, 'name="secondary_languages"')
self.assertContains(response, reverse("userdata"))
# Save profile
response = self.client.post(
reverse("profile"),
{
"language": "en",
"languages": Language.objects.get(code="cs").id,
"secondary_languages": Language.objects.get(code="cs").id,
"full_name": "First Last",
"email": "[email protected]",
"username": "testik",
"dashboard_view": Profile.DASHBOARD_WATCHED,
"translate_mode": Profile.TRANSLATE_FULL,
"zen_mode": Profile.ZEN_VERTICAL,
"nearby_strings": 10,
},
)
self.assertRedirects(response, reverse("profile"))
def test_profile_dasbhoard(self):
# Save profile with invalid settings
response = self.client.post(
reverse("profile"),
{
"language": "en",
"languages": Language.objects.get(code="cs").id,
"secondary_languages": Language.objects.get(code="cs").id,
"full_name": "First Last",
"email": "[email protected]",
"username": "testik",
"dashboard_view": Profile.DASHBOARD_COMPONENT_LIST,
"translate_mode": Profile.TRANSLATE_FULL,
"zen_mode": Profile.ZEN_VERTICAL,
"nearby_strings": 10,
},
)
self.assertRedirects(response, reverse("profile"))
def test_userdata(self):
response = self.client.post(reverse("userdata"))
self.assertContains(response, "basic")
# Add more languages
self.user.profile.languages.add(Language.objects.get(code="pl"))
self.user.profile.secondary_languages.add(Language.objects.get(code="de"))
self.user.profile.secondary_languages.add(Language.objects.get(code="uk"))
response = self.client.post(reverse("userdata"))
self.assertContains(response, '"pl"')
self.assertContains(response, '"de"')
validate(response.json(), load_schema("weblate-userdata.schema.json"))
def test_subscription(self):
# Get profile page
response = self.client.get(reverse("profile"))
self.assertEqual(self.user.subscription_set.count(), 8)
# Extract current form data
data = {}
for form in response.context["all_forms"]:
for field in form:
value = field.value()
name = field.html_name
if value is None:
data[name] = ""
elif isinstance(value, list):
data[name] = value
else:
data[name] = str(value)
# Save unchanged data
response = self.client.post(reverse("profile"), data, follow=True)
self.assertContains(response, "Your profile has been updated.")
self.assertEqual(self.user.subscription_set.count(), 8)
# Remove some subscriptions
data["notifications__1-notify-LastAuthorCommentNotificaton"] = "0"
data["notifications__1-notify-MentionCommentNotificaton"] = "0"
response = self.client.post(reverse("profile"), data, follow=True)
self.assertContains(response, "Your profile has been updated.")
self.assertEqual(self.user.subscription_set.count(), 6)
# Add some subscriptions
data["notifications__2-notify-ChangedStringNotificaton"] = "1"
response = self.client.post(reverse("profile"), data, follow=True)
self.assertContains(response, "Your profile has been updated.")
self.assertEqual(self.user.subscription_set.count(), 7)
def test_subscription_customize(self):
# Initial view
response = self.client.get(reverse("profile"))
self.assertNotContains(response, "Project: Test")
self.assertNotContains(response, "Component: Test/Test")
# Configure project
response = self.client.get(
reverse("profile"), {"notify_project": self.project.pk}
)
self.assertContains(response, "Project: Test")
self.assertNotContains(response, "Component: Test/Test")
# Configure component
response = self.client.get(
reverse("profile"), {"notify_component": self.component.pk}
)
self.assertNotContains(response, "Project: Test")
self.assertContains(response, "Component: Test/Test")
# Configure invalid
response = self.client.get(reverse("profile"), {"notify_component": "a"})
self.assertNotContains(response, "Project: Test")
self.assertNotContains(response, "Component: Test/Test")
# Configure invalid
response = self.client.get(reverse("profile"), {"notify_project": "a"})
self.assertNotContains(response, "Project: Test")
self.assertNotContains(response, "Component: Test/Test")
def test_watch(self):
self.assertEqual(self.user.profile.watched.count(), 0)
self.assertEqual(self.user.subscription_set.count(), 8)
# Watch project
self.client.post(reverse("watch", kwargs=self.kw_project))
self.assertEqual(self.user.profile.watched.count(), 1)
self.assertEqual(
self.user.subscription_set.filter(project=self.project).count(), 0
)
# Mute notifications for component
self.client.post(reverse("mute", kwargs=self.kw_component))
self.assertEqual(
self.user.subscription_set.filter(component=self.component).count(), 18
)
# Mute notifications for project
self.client.post(reverse("mute", kwargs=self.kw_project))
self.assertEqual(
self.user.subscription_set.filter(project=self.project).count(), 18
)
# Unwatch project
self.client.post(reverse("unwatch", kwargs=self.kw_project))
self.assertEqual(self.user.profile.watched.count(), 0)
self.assertEqual(
self.user.subscription_set.filter(project=self.project).count(), 0
)
self.assertEqual(
self.user.subscription_set.filter(component=self.component).count(), 0
)
self.assertEqual(self.user.subscription_set.count(), 8)
def test_watch_component(self):
self.assertEqual(self.user.profile.watched.count(), 0)
self.assertEqual(self.user.subscription_set.count(), 8)
# Watch component
self.client.post(reverse("watch", kwargs=self.kw_component))
self.assertEqual(self.user.profile.watched.count(), 1)
# All project notifications should be muted
self.assertEqual(
self.user.subscription_set.filter(project=self.project).count(), 18
)
# Only default notifications should be enabled
self.assertEqual(
self.user.subscription_set.filter(component=self.component).count(), 3
)
def test_unsubscribe(self):
response = self.client.get(reverse("unsubscribe"), follow=True)
self.assertRedirects(response, reverse("profile") + "#notifications")
response = self.client.get(reverse("unsubscribe"), {"i": "x"}, follow=True)
self.assertRedirects(response, reverse("profile") + "#notifications")
self.assertContains(response, "notification change link is no longer valid")
response = self.client.get(
reverse("unsubscribe"), {"i": TimestampSigner().sign(-1)}, follow=True
)
self.assertRedirects(response, reverse("profile") + "#notifications")
self.assertContains(response, "notification change link is no longer valid")
subscription = Subscription.objects.create(
user=self.user, notification="x", frequency=FREQ_DAILY, scope=SCOPE_WATCHED
)
response = self.client.get(
reverse("unsubscribe"),
{"i": TimestampSigner().sign(subscription.pk)},
follow=True,
)
self.assertRedirects(response, reverse("profile") + "#notifications")
self.assertContains(response, "Notification settings adjusted")
subscription.refresh_from_db()
self.assertEqual(subscription.frequency, FREQ_NONE)
|
from typing import Any, Dict
from aiohttp import ClientConnectionError, ClientResponseError
from homeassistant import config_entries, core, setup
from homeassistant.components.bond.const import DOMAIN
from homeassistant.const import CONF_ACCESS_TOKEN, CONF_HOST
from .common import patch_bond_device_ids, patch_bond_version
from tests.async_mock import Mock, patch
from tests.common import MockConfigEntry
async def test_user_form(hass: core.HomeAssistant):
"""Test we get the user initiated form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch_bond_version(
return_value={"bondid": "test-bond-id"}
), patch_bond_device_ids(), _patch_async_setup() as mock_setup, _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "test-bond-id"
assert result2["data"] == {
CONF_HOST: "some host",
CONF_ACCESS_TOKEN: "test-token",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_user_form_invalid_auth(hass: core.HomeAssistant):
"""Test we handle invalid auth."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch_bond_version(
return_value={"bond_id": "test-bond-id"}
), patch_bond_device_ids(
side_effect=ClientResponseError(Mock(), Mock(), status=401),
):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "invalid_auth"}
async def test_user_form_cannot_connect(hass: core.HomeAssistant):
"""Test we handle cannot connect error."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch_bond_version(
side_effect=ClientConnectionError()
), patch_bond_device_ids():
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "cannot_connect"}
async def test_user_form_old_firmware(hass: core.HomeAssistant):
"""Test we handle unsupported old firmware."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch_bond_version(
return_value={"no_bond_id": "present"}
), patch_bond_device_ids():
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "old_firmware"}
async def test_user_form_unexpected_client_error(hass: core.HomeAssistant):
"""Test we handle unexpected client error gracefully."""
await _help_test_form_unexpected_error(
hass,
source=config_entries.SOURCE_USER,
user_input={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
error=ClientResponseError(Mock(), Mock(), status=500),
)
async def test_user_form_unexpected_error(hass: core.HomeAssistant):
"""Test we handle unexpected error gracefully."""
await _help_test_form_unexpected_error(
hass,
source=config_entries.SOURCE_USER,
user_input={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
error=Exception(),
)
async def test_user_form_one_entry_per_device_allowed(hass: core.HomeAssistant):
"""Test that only one entry allowed per unique ID reported by Bond hub device."""
MockConfigEntry(
domain=DOMAIN,
unique_id="already-registered-bond-id",
data={CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
).add_to_hass(hass)
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}
)
with patch_bond_version(
return_value={"bondid": "already-registered-bond-id"}
), patch_bond_device_ids(), _patch_async_setup() as mock_setup, _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_HOST: "some host", CONF_ACCESS_TOKEN: "test-token"},
)
assert result2["type"] == "abort"
assert result2["reason"] == "already_configured"
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
async def test_zeroconf_form(hass: core.HomeAssistant):
"""Test we get the discovery form."""
await setup.async_setup_component(hass, "persistent_notification", {})
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={"name": "test-bond-id.some-other-tail-info", "host": "test-host"},
)
assert result["type"] == "form"
assert result["errors"] == {}
with patch_bond_version(
return_value={"bondid": "test-bond-id"}
), patch_bond_device_ids(), _patch_async_setup() as mock_setup, _patch_async_setup_entry() as mock_setup_entry:
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"],
{CONF_ACCESS_TOKEN: "test-token"},
)
await hass.async_block_till_done()
assert result2["type"] == "create_entry"
assert result2["title"] == "test-bond-id"
assert result2["data"] == {
CONF_HOST: "test-host",
CONF_ACCESS_TOKEN: "test-token",
}
assert len(mock_setup.mock_calls) == 1
assert len(mock_setup_entry.mock_calls) == 1
async def test_zeroconf_already_configured(hass: core.HomeAssistant):
"""Test starting a flow from discovery when already configured."""
await setup.async_setup_component(hass, "persistent_notification", {})
entry = MockConfigEntry(
domain=DOMAIN,
unique_id="already-registered-bond-id",
data={CONF_HOST: "stored-host", CONF_ACCESS_TOKEN: "test-token"},
)
entry.add_to_hass(hass)
with _patch_async_setup() as mock_setup, _patch_async_setup_entry() as mock_setup_entry:
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_ZEROCONF},
data={
"name": "already-registered-bond-id.some-other-tail-info",
"host": "updated-host",
},
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
assert entry.data["host"] == "updated-host"
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
assert len(mock_setup_entry.mock_calls) == 0
async def test_zeroconf_form_unexpected_error(hass: core.HomeAssistant):
"""Test we handle unexpected error gracefully."""
await _help_test_form_unexpected_error(
hass,
source=config_entries.SOURCE_ZEROCONF,
initial_input={
"name": "test-bond-id.some-other-tail-info",
"host": "test-host",
},
user_input={CONF_ACCESS_TOKEN: "test-token"},
error=Exception(),
)
async def _help_test_form_unexpected_error(
hass: core.HomeAssistant,
*,
source: str,
initial_input: Dict[str, Any] = None,
user_input: Dict[str, Any],
error: Exception,
):
"""Test we handle unexpected error gracefully."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": source}, data=initial_input
)
with patch_bond_version(
return_value={"bond_id": "test-bond-id"}
), patch_bond_device_ids(side_effect=error):
result2 = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input
)
assert result2["type"] == "form"
assert result2["errors"] == {"base": "unknown"}
def _patch_async_setup():
return patch("homeassistant.components.bond.async_setup", return_value=True)
def _patch_async_setup_entry():
return patch(
"homeassistant.components.bond.async_setup_entry",
return_value=True,
)
|
import logging
from os import path
import pytest
from homeassistant import config
from homeassistant.const import SERVICE_RELOAD
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.helpers.entity_platform import async_get_platforms
from homeassistant.helpers.reload import (
async_get_platform_without_config_entry,
async_integration_yaml_config,
async_reload_integration_platforms,
async_setup_reload_service,
)
from homeassistant.loader import async_get_integration
from tests.async_mock import AsyncMock, Mock, patch
from tests.common import (
MockModule,
MockPlatform,
mock_entity_platform,
mock_integration,
)
_LOGGER = logging.getLogger(__name__)
DOMAIN = "test_domain"
PLATFORM = "test_platform"
async def test_reload_platform(hass):
"""Test the polling of only updated entities."""
component_setup = Mock(return_value=True)
setup_called = []
async def setup_platform(*args):
setup_called.append(args)
mock_integration(hass, MockModule(DOMAIN, setup=component_setup))
mock_integration(hass, MockModule(PLATFORM, dependencies=[DOMAIN]))
mock_platform = MockPlatform(async_setup_platform=setup_platform)
mock_entity_platform(hass, f"{DOMAIN}.{PLATFORM}", mock_platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": PLATFORM, "sensors": None}})
await hass.async_block_till_done()
assert component_setup.called
assert f"{DOMAIN}.{PLATFORM}" in hass.config.components
assert len(setup_called) == 1
platform = async_get_platform_without_config_entry(hass, PLATFORM, DOMAIN)
assert platform.platform_name == PLATFORM
assert platform.domain == DOMAIN
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"helpers/reload_configuration.yaml",
)
with patch.object(config, "YAML_CONFIG_FILE", yaml_path):
await async_reload_integration_platforms(hass, PLATFORM, [DOMAIN])
assert len(setup_called) == 2
existing_platforms = async_get_platforms(hass, PLATFORM)
for existing_platform in existing_platforms:
existing_platform.config_entry = "abc"
assert not async_get_platform_without_config_entry(hass, PLATFORM, DOMAIN)
async def test_setup_reload_service(hass):
"""Test setting up a reload service."""
component_setup = Mock(return_value=True)
setup_called = []
async def setup_platform(*args):
setup_called.append(args)
mock_integration(hass, MockModule(DOMAIN, setup=component_setup))
mock_integration(hass, MockModule(PLATFORM, dependencies=[DOMAIN]))
mock_platform = MockPlatform(async_setup_platform=setup_platform)
mock_entity_platform(hass, f"{DOMAIN}.{PLATFORM}", mock_platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": PLATFORM, "sensors": None}})
await hass.async_block_till_done()
assert component_setup.called
assert f"{DOMAIN}.{PLATFORM}" in hass.config.components
assert len(setup_called) == 1
await async_setup_reload_service(hass, PLATFORM, [DOMAIN])
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"helpers/reload_configuration.yaml",
)
with patch.object(config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
PLATFORM,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(setup_called) == 2
async def test_setup_reload_service_when_async_process_component_config_fails(hass):
"""Test setting up a reload service with the config processing failing."""
component_setup = Mock(return_value=True)
setup_called = []
async def setup_platform(*args):
setup_called.append(args)
mock_integration(hass, MockModule(DOMAIN, setup=component_setup))
mock_integration(hass, MockModule(PLATFORM, dependencies=[DOMAIN]))
mock_platform = MockPlatform(async_setup_platform=setup_platform)
mock_entity_platform(hass, f"{DOMAIN}.{PLATFORM}", mock_platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": PLATFORM, "sensors": None}})
await hass.async_block_till_done()
assert component_setup.called
assert f"{DOMAIN}.{PLATFORM}" in hass.config.components
assert len(setup_called) == 1
await async_setup_reload_service(hass, PLATFORM, [DOMAIN])
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"helpers/reload_configuration.yaml",
)
with patch.object(config, "YAML_CONFIG_FILE", yaml_path), patch.object(
config, "async_process_component_config", return_value=None
):
await hass.services.async_call(
PLATFORM,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(setup_called) == 1
async def test_setup_reload_service_with_platform_that_provides_async_reset_platform(
hass,
):
"""Test setting up a reload service using a platform that has its own async_reset_platform."""
component_setup = AsyncMock(return_value=True)
setup_called = []
async_reset_platform_called = []
async def setup_platform(*args):
setup_called.append(args)
async def async_reset_platform(*args):
async_reset_platform_called.append(args)
mock_integration(hass, MockModule(DOMAIN, async_setup=component_setup))
integration = await async_get_integration(hass, DOMAIN)
integration.get_component().async_reset_platform = async_reset_platform
mock_integration(hass, MockModule(PLATFORM, dependencies=[DOMAIN]))
mock_platform = MockPlatform(async_setup_platform=setup_platform)
mock_entity_platform(hass, f"{DOMAIN}.{PLATFORM}", mock_platform)
component = EntityComponent(_LOGGER, DOMAIN, hass)
await component.async_setup({DOMAIN: {"platform": PLATFORM, "name": "xyz"}})
await hass.async_block_till_done()
assert component_setup.called
assert f"{DOMAIN}.{PLATFORM}" in hass.config.components
assert len(setup_called) == 1
await async_setup_reload_service(hass, PLATFORM, [DOMAIN])
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"helpers/reload_configuration.yaml",
)
with patch.object(config, "YAML_CONFIG_FILE", yaml_path):
await hass.services.async_call(
PLATFORM,
SERVICE_RELOAD,
{},
blocking=True,
)
await hass.async_block_till_done()
assert len(setup_called) == 1
assert len(async_reset_platform_called) == 1
async def test_async_integration_yaml_config(hass):
"""Test loading yaml config for an integration."""
mock_integration(hass, MockModule(DOMAIN))
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
f"helpers/{DOMAIN}_configuration.yaml",
)
with patch.object(config, "YAML_CONFIG_FILE", yaml_path):
processed_config = await async_integration_yaml_config(hass, DOMAIN)
assert processed_config == {DOMAIN: [{"name": "one"}, {"name": "two"}]}
async def test_async_integration_missing_yaml_config(hass):
"""Test loading missing yaml config for an integration."""
mock_integration(hass, MockModule(DOMAIN))
yaml_path = path.join(
_get_fixtures_base_path(),
"fixtures",
"helpers/does_not_exist_configuration.yaml",
)
with pytest.raises(FileNotFoundError), patch.object(
config, "YAML_CONFIG_FILE", yaml_path
):
await async_integration_yaml_config(hass, DOMAIN)
def _get_fixtures_base_path():
return path.dirname(path.dirname(__file__))
|
from datetime import datetime, timedelta
import logging
import noaa_coops as coops # pylint: disable=import-error
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_NAME,
CONF_TIME_ZONE,
CONF_UNIT_SYSTEM,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
CONF_STATION_ID = "station_id"
DEFAULT_ATTRIBUTION = "Data provided by NOAA"
DEFAULT_NAME = "NOAA Tides"
DEFAULT_TIMEZONE = "lst_ldt"
SCAN_INTERVAL = timedelta(minutes=60)
TIMEZONES = ["gmt", "lst", "lst_ldt"]
UNIT_SYSTEMS = ["english", "metric"]
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_STATION_ID): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TIME_ZONE, default=DEFAULT_TIMEZONE): vol.In(TIMEZONES),
vol.Optional(CONF_UNIT_SYSTEM): vol.In(UNIT_SYSTEMS),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the NOAA Tides and Currents sensor."""
station_id = config[CONF_STATION_ID]
name = config.get(CONF_NAME)
timezone = config.get(CONF_TIME_ZONE)
if CONF_UNIT_SYSTEM in config:
unit_system = config[CONF_UNIT_SYSTEM]
elif hass.config.units.is_metric:
unit_system = UNIT_SYSTEMS[1]
else:
unit_system = UNIT_SYSTEMS[0]
try:
station = coops.Station(station_id, unit_system)
except KeyError:
_LOGGER.error("NOAA Tides Sensor station_id %s does not exist", station_id)
return
except requests.exceptions.ConnectionError as exception:
_LOGGER.error(
"Connection error during setup in NOAA Tides Sensor for station_id: %s",
station_id,
)
raise PlatformNotReady from exception
noaa_sensor = NOAATidesAndCurrentsSensor(
name, station_id, timezone, unit_system, station
)
add_entities([noaa_sensor], True)
class NOAATidesAndCurrentsSensor(Entity):
"""Representation of a NOAA Tides and Currents sensor."""
def __init__(self, name, station_id, timezone, unit_system, station):
"""Initialize the sensor."""
self._name = name
self._station_id = station_id
self._timezone = timezone
self._unit_system = unit_system
self._station = station
self.data = None
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def device_state_attributes(self):
"""Return the state attributes of this device."""
attr = {ATTR_ATTRIBUTION: DEFAULT_ATTRIBUTION}
if self.data is None:
return attr
if self.data["hi_lo"][1] == "H":
attr["high_tide_time"] = self.data.index[1].strftime("%Y-%m-%dT%H:%M")
attr["high_tide_height"] = self.data["predicted_wl"][1]
attr["low_tide_time"] = self.data.index[2].strftime("%Y-%m-%dT%H:%M")
attr["low_tide_height"] = self.data["predicted_wl"][2]
elif self.data["hi_lo"][1] == "L":
attr["low_tide_time"] = self.data.index[1].strftime("%Y-%m-%dT%H:%M")
attr["low_tide_height"] = self.data["predicted_wl"][1]
attr["high_tide_time"] = self.data.index[2].strftime("%Y-%m-%dT%H:%M")
attr["high_tide_height"] = self.data["predicted_wl"][2]
return attr
@property
def state(self):
"""Return the state of the device."""
if self.data is None:
return None
api_time = self.data.index[0]
if self.data["hi_lo"][0] == "H":
tidetime = api_time.strftime("%-I:%M %p")
return f"High tide at {tidetime}"
if self.data["hi_lo"][0] == "L":
tidetime = api_time.strftime("%-I:%M %p")
return f"Low tide at {tidetime}"
return None
def update(self):
"""Get the latest data from NOAA Tides and Currents API."""
begin = datetime.now()
delta = timedelta(days=2)
end = begin + delta
try:
df_predictions = self._station.get_data(
begin_date=begin.strftime("%Y%m%d %H:%M"),
end_date=end.strftime("%Y%m%d %H:%M"),
product="predictions",
datum="MLLW",
interval="hilo",
units=self._unit_system,
time_zone=self._timezone,
)
self.data = df_predictions.head()
_LOGGER.debug("Data = %s", self.data)
_LOGGER.debug(
"Recent Tide data queried with start time set to %s",
begin.strftime("%m-%d-%Y %H:%M"),
)
except ValueError as err:
_LOGGER.error("Check NOAA Tides and Currents: %s", err.args)
self.data = None
|
from __future__ import unicode_literals
import os
import re
import itertools
from core.CONF import confcore
from lib.fun.decorator import magic
from lib.data.data import paths, pyoptions
from lib.fun.leetmode import leet_mode_magic
from lib.fun.fun import cool, walks_all_files, unique, charanger
try:
import ConfigParser
except:
import configparser as ConfigParser
def extend_magic(rawlist):
if rawlist == []:
exit(pyoptions.CRLF + cool.red("[-] raw extend resource cannot be empty"))
leet = pyoptions.extend_leet
@magic
def extend():
if pyoptions.more:
for _ in walks_all_files(paths.weblist_path):
yield "".join(_)
for _ in walks_all_files(paths.syslist_path):
yield "".join(_)
for _ in extend_enter(rawlist, leet=leet):
yield "".join(_)
def wordsharker(raw, leet=True):
# raw word maybe strange case, both not lowercase and uppercase, such as 'myName'
#
init_word_res = []
raw = str(raw).strip()
if not raw:
return []
# level {format}
if pyoptions.level <= 5:
# 5 {raw}
init_word_res.append(raw)
if pyoptions.level <= 4:
# 4 {raw:lowercase}
init_word_res.append(raw.lower())
# 4 {Raw:capitalize}
init_word_res.append(raw.capitalize())
if pyoptions.level <= 3:
# 3 {RAW:uppercase}
init_word_res.append(raw.upper())
if pyoptions.level <= 2:
# 2 {raw}{raw}
init_word_res.append(raw + raw)
# 2 {raw:lowercase}{raw:lowercase}
init_word_res.append(raw.lower() + raw.lower())
# 2 {raw}{RAW:uppercase}
init_word_res.append(raw + raw.upper())
# 2 {raw:lowercase}{RAW:uppercase}
init_word_res.append(raw.lower() + raw.upper())
if pyoptions.level <= 1:
# 1 {RAW:uppercase}{raw}
init_word_res.append(raw.upper() + raw)
# 1 {RAW:uppercase}{raw:lowercase}
init_word_res.append(raw.upper() + raw.lower())
# 1 {r:initials:lowercase}
init_word_res.append(raw[0].lower())
# 1 {R:initials:uppercase}
init_word_res.append(raw[0].upper())
# 1 {war:reverse}
init_word_res.append(raw[::-1])
# 1 {war:reverse:lowercase}
init_word_res.append(raw[::-1].lower())
# 1 {war:reverse:uppercase}
init_word_res.append(raw[::-1].upper())
# 1 {Raw:capitalize}{raw}
init_word_res.append(raw.capitalize() + raw)
# 1 {Raw:capitalize}{raw:lowercase}
init_word_res.append(raw.capitalize() + raw.lower())
# 1 {Raw:capitalize}{RAW:uppercase}
init_word_res.append(raw.capitalize() + raw.upper())
# 1 {Raw:capitalize}{Raw:capitalize}
init_word_res.append(raw.capitalize() + raw.capitalize())
# 1 {waR:capitalize:reverse}
init_word_res.append(raw.capitalize()[::-1])
# 1 {raW:reverse:capitalize:reverse}
init_word_res.append(raw[::-1].capitalize()[::-1])
# 1 {raw}{war:reverse}
init_word_res.append(raw + raw[::-1])
# 1 {raw}{war:reverse:lowercase}
init_word_res.append(raw + raw[::-1].lower())
# 1 {raw}{war:reverse:uppercase}
init_word_res.append(raw + raw[::-1].upper())
# 1337 mode
if leet:
for code in pyoptions.leetmode_code:
init_word_res.append(leet_mode_magic(raw, code))
return unique(init_word_res)
def extend_enter(rawlist, leet=True):
extend_conf_dict = {'prefix': [], 'suffix': [], 'prefix_suffix': [], 'middle': []}
try:
config = ConfigParser.SafeConfigParser(allow_no_value=True)
config.optionxform = str
config.read(paths.extendconf_path)
for s in config.sections():
for o in config.options(s):
extend_conf_dict[s].append(o)
except Exception as e:
exit(cool.red('[-] Parse extend cfg file error' + pyoptions.CRLF + cool.fuchsia('[!] ' + e.message)))
res = []
prefix_content = extend_conf_dict['prefix']
suffix_content = extend_conf_dict['suffix']
prefix_suffix_content = extend_conf_dict['prefix_suffix']
middle_content = extend_conf_dict['middle']
for raw in rawlist:
shapers = wordsharker(raw, leet=leet)
for middle in middle_content:
matches = re.findall(pyoptions.level_str_pattern, middle)
if matches:
middles = []
level = matches[0][0]
middle = matches[0][1].strip()
for key, value in pyoptions.charmap.items():
middle = middle.replace(key, value)
if re.findall(pyoptions.rangepattern, middle):
middles.extend(charanger(middle[1:-1]))
elif re.findall(pyoptions.confpattern, middle):
for m in confcore(middle):
middles.append(m)
else:
middles.append(middle)
middle_lenght = pyoptions.middle_switcher
for m in middles:
if int(level) >= pyoptions.level:
for item in itertools.product(rawlist, repeat=2):
if len(item[0]) <= middle_lenght and len(item[1]) <= middle_lenght:
res.append(item[0] + m + item[1])
for item in itertools.product(shapers, repeat=2):
if len(item[0]) <= middle_lenght and len(item[1]) <= middle_lenght:
res.append(item[0] + m + item[1])
for w in shapers:
res.append(w)
for suffix in suffix_content:
matches = re.findall(pyoptions.level_str_pattern, suffix)
if matches:
tails = []
level = matches[0][0]
tail = matches[0][1].strip()
for key, value in pyoptions.charmap.items():
tail = tail.replace(key, value)
if re.findall(pyoptions.rangepattern, tail):
tails.extend(charanger(tail[1:-1]))
elif re.findall(pyoptions.confpattern, tail):
for t in confcore(tail):
tails.append(t)
else:
tails.append(tail)
for t in tails:
if int(level) >= pyoptions.level:
res.append(w + t)
for prefix_suffix in prefix_suffix_content:
matches = re.findall(pyoptions.level_str_str_pattern, prefix_suffix)
if matches:
heads = []
tails = []
level = matches[0][0]
head = matches[0][1].strip()
tail = matches[0][2].strip()
for key, value in pyoptions.charmap.items():
head = head.replace(key, value)
tail = tail.replace(key, value)
if re.findall(pyoptions.rangepattern, head):
heads.extend(charanger(head[1:-1]))
elif re.findall(pyoptions.confpattern, head):
for h in confcore(head):
heads.append(h)
else:
heads.append(head)
if re.findall(pyoptions.rangepattern, tail):
tails.extend(charanger(tail[1:-1]))
elif re.findall(pyoptions.confpattern, tail):
for t in confcore(tail):
tails.append(t)
else:
tails.append(tail)
for h in heads:
for t in tails:
if int(level) >= pyoptions.level:
res.append(h + w + t)
for prefix in prefix_content:
matches = re.findall(pyoptions.level_str_pattern, prefix)
if matches:
heads = []
level = matches[0][0]
head = matches[0][1].strip()
for key, value in pyoptions.charmap.items():
head = head.replace(key, value)
if re.findall(pyoptions.rangepattern, head):
heads.extend(charanger(head[1:-1]))
elif re.findall(pyoptions.confpattern, head):
for h in confcore(head):
heads.append(h)
else:
heads.append(head)
for h in heads:
if int(level) >= pyoptions.level:
res.append(h + w)
return unique(res)
def get_extend_dic(target):
rawlist = []
for t in target:
if os.path.isfile(t):
with open(t) as f:
for line in f.readlines():
rawlist.append(line.strip())
else:
rawlist.append(t)
extend_magic(rawlist)
|
from collections import OrderedDict
import os.path as op
import numpy as np
from functools import partial
import xml.etree.ElementTree as ElementTree
from .montage import make_dig_montage
from ..transforms import _sph_to_cart
from ..utils import warn, _pl
from . import __file__ as _CHANNELS_INIT_FILE
MONTAGE_PATH = op.join(op.dirname(_CHANNELS_INIT_FILE), 'data', 'montages')
_str = 'U100'
# In standard_1020, T9=LPA, T10=RPA, Nasion is the same as Iz with a
# sign-flipped Y value
def _egi_256(head_size):
fname = op.join(MONTAGE_PATH, 'EGI_256.csd')
montage = _read_csd(fname, head_size)
ch_pos = montage._get_ch_pos()
# For this cap, the Nasion is the frontmost electrode,
# LPA/RPA we approximate by putting 75% of the way (toward the front)
# between the two electrodes that are halfway down the ear holes
nasion = ch_pos['E31']
lpa = 0.75 * ch_pos['E67'] + 0.25 * ch_pos['E94']
rpa = 0.75 * ch_pos['E219'] + 0.25 * ch_pos['E190']
fids_montage = make_dig_montage(
coord_frame='unknown', nasion=nasion, lpa=lpa, rpa=rpa,
)
montage += fids_montage # add fiducials to montage
return montage
def _easycap(basename, head_size):
fname = op.join(MONTAGE_PATH, basename)
montage = _read_theta_phi_in_degrees(fname, head_size, add_fiducials=True)
return montage
def _hydrocel(basename, head_size):
fname = op.join(MONTAGE_PATH, basename)
return _read_sfp(fname, head_size)
def _str_names(ch_names):
return [str(ch_name) for ch_name in ch_names]
def _safe_np_loadtxt(fname, **kwargs):
out = np.genfromtxt(fname, **kwargs)
ch_names = _str_names(out['f0'])
others = tuple(out['f%d' % ii] for ii in range(1, len(out.dtype.fields)))
return (ch_names,) + others
def _biosemi(basename, head_size):
fname = op.join(MONTAGE_PATH, basename)
fid_names = ('Nz', 'LPA', 'RPA')
return _read_theta_phi_in_degrees(fname, head_size, fid_names)
def _mgh_or_standard(basename, head_size):
fid_names = ('Nz', 'LPA', 'RPA')
fname = op.join(MONTAGE_PATH, basename)
ch_names_, pos = [], []
with open(fname) as fid:
# Ignore units as we will scale later using the norms anyway
for line in fid:
if 'Positions\n' in line:
break
pos = []
for line in fid:
if 'Labels\n' in line:
break
pos.append(list(map(float, line.split())))
for line in fid:
if not line or not set(line) - {' '}:
break
ch_names_.append(line.strip(' ').strip('\n'))
pos = np.array(pos)
ch_pos = _check_dupes_odict(ch_names_, pos)
nasion, lpa, rpa = [ch_pos.pop(n) for n in fid_names]
scale = head_size / np.median(np.linalg.norm(pos, axis=1))
for value in ch_pos.values():
value *= scale
nasion *= scale
lpa *= scale
rpa *= scale
return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown',
nasion=nasion, lpa=lpa, rpa=rpa)
standard_montage_look_up_table = {
'EGI_256': _egi_256,
'easycap-M1': partial(_easycap, basename='easycap-M1.txt'),
'easycap-M10': partial(_easycap, basename='easycap-M10.txt'),
'GSN-HydroCel-128': partial(_hydrocel, basename='GSN-HydroCel-128.sfp'),
'GSN-HydroCel-129': partial(_hydrocel, basename='GSN-HydroCel-129.sfp'),
'GSN-HydroCel-256': partial(_hydrocel, basename='GSN-HydroCel-256.sfp'),
'GSN-HydroCel-257': partial(_hydrocel, basename='GSN-HydroCel-257.sfp'),
'GSN-HydroCel-32': partial(_hydrocel, basename='GSN-HydroCel-32.sfp'),
'GSN-HydroCel-64_1.0': partial(_hydrocel,
basename='GSN-HydroCel-64_1.0.sfp'),
'GSN-HydroCel-65_1.0': partial(_hydrocel,
basename='GSN-HydroCel-65_1.0.sfp'),
'biosemi128': partial(_biosemi, basename='biosemi128.txt'),
'biosemi16': partial(_biosemi, basename='biosemi16.txt'),
'biosemi160': partial(_biosemi, basename='biosemi160.txt'),
'biosemi256': partial(_biosemi, basename='biosemi256.txt'),
'biosemi32': partial(_biosemi, basename='biosemi32.txt'),
'biosemi64': partial(_biosemi, basename='biosemi64.txt'),
'mgh60': partial(_mgh_or_standard, basename='mgh60.elc'),
'mgh70': partial(_mgh_or_standard, basename='mgh70.elc'),
'standard_1005': partial(_mgh_or_standard,
basename='standard_1005.elc'),
'standard_1020': partial(_mgh_or_standard,
basename='standard_1020.elc'),
'standard_alphabetic': partial(_mgh_or_standard,
basename='standard_alphabetic.elc'),
'standard_postfixed': partial(_mgh_or_standard,
basename='standard_postfixed.elc'),
'standard_prefixed': partial(_mgh_or_standard,
basename='standard_prefixed.elc'),
'standard_primed': partial(_mgh_or_standard,
basename='standard_primed.elc'),
}
def _read_sfp(fname, head_size):
"""Read .sfp BESA/EGI files."""
# fname has been already checked
fid_names = ('FidNz', 'FidT9', 'FidT10')
options = dict(dtype=(_str, 'f4', 'f4', 'f4'))
ch_names, xs, ys, zs = _safe_np_loadtxt(fname, **options)
# deal with "headshape"
mask = np.array([ch_name == 'headshape' for ch_name in ch_names], bool)
hsp = np.stack([xs[mask], ys[mask], zs[mask]], axis=-1)
mask = ~mask
pos = np.stack([xs[mask], ys[mask], zs[mask]], axis=-1)
ch_names = [ch_name for ch_name, m in zip(ch_names, mask) if m]
ch_pos = _check_dupes_odict(ch_names, pos)
del xs, ys, zs, ch_names
# no one grants that fid names are there.
nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names]
if head_size is not None:
scale = head_size / np.median(np.linalg.norm(pos, axis=-1))
for value in ch_pos.values():
value *= scale
nasion = nasion * scale if nasion is not None else None
lpa = lpa * scale if lpa is not None else None
rpa = rpa * scale if rpa is not None else None
return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown',
nasion=nasion, rpa=rpa, lpa=lpa, hsp=hsp)
def _read_csd(fname, head_size):
# Label, Theta, Phi, Radius, X, Y, Z, off sphere surface
options = dict(comments='//',
dtype=(_str, 'f4', 'f4', 'f4', 'f4', 'f4', 'f4', 'f4'))
ch_names, _, _, _, xs, ys, zs, _ = _safe_np_loadtxt(fname, **options)
pos = np.stack([xs, ys, zs], axis=-1)
if head_size is not None:
pos *= head_size / np.median(np.linalg.norm(pos, axis=1))
return make_dig_montage(ch_pos=_check_dupes_odict(ch_names, pos))
def _check_dupes_odict(ch_names, pos):
"""Warn if there are duplicates, then turn to ordered dict."""
ch_names = list(ch_names)
dups = OrderedDict((ch_name, ch_names.count(ch_name))
for ch_name in ch_names)
dups = OrderedDict((ch_name, count) for ch_name, count in dups.items()
if count > 1)
n = len(dups)
if n:
dups = ', '.join(
f'{ch_name} ({count})' for ch_name, count in dups.items())
warn(f'Duplicate channel position{_pl(n)} found, the last will be '
f'used for {dups}')
return OrderedDict(zip(ch_names, pos))
def _read_elc(fname, head_size):
"""Read .elc files.
Parameters
----------
fname : str
File extension is expected to be '.elc'.
head_size : float | None
The size of the head in [m]. If none, returns the values read from the
file with no modification.
Returns
-------
montage : instance of DigMontage
The montage in [m].
"""
fid_names = ('Nz', 'LPA', 'RPA')
ch_names_, pos = [], []
with open(fname) as fid:
# _read_elc does require to detect the units. (see _mgh_or_standard)
for line in fid:
if 'UnitPosition' in line:
units = line.split()[1]
scale = dict(m=1., mm=1e-3)[units]
break
else:
raise RuntimeError('Could not detect units in file %s' % fname)
for line in fid:
if 'Positions\n' in line:
break
pos = []
for line in fid:
if 'Labels\n' in line:
break
pos.append(list(map(float, line.split())))
for line in fid:
if not line or not set(line) - {' '}:
break
ch_names_.append(line.strip(' ').strip('\n'))
pos = np.array(pos) * scale
if head_size is not None:
pos *= head_size / np.median(np.linalg.norm(pos, axis=1))
ch_pos = _check_dupes_odict(ch_names_, pos)
nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names]
return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown',
nasion=nasion, lpa=lpa, rpa=rpa)
def _read_theta_phi_in_degrees(fname, head_size, fid_names=None,
add_fiducials=False):
ch_names, theta, phi = _safe_np_loadtxt(fname, skip_header=1,
dtype=(_str, 'i4', 'i4'))
if add_fiducials:
# Add fiducials based on 10/20 spherical coordinate definitions
# http://chgd.umich.edu/wp-content/uploads/2014/06/
# 10-20_system_positioning.pdf
# extrapolated from other sensor coordinates in the Easycap layouts
# https://www.easycap.de/wp-content/uploads/2018/02/
# Easycap-Equidistant-Layouts.pdf
assert fid_names is None
fid_names = ['Nasion', 'LPA', 'RPA']
ch_names.extend(fid_names)
theta = np.append(theta, [115, -115, 115])
phi = np.append(phi, [90, 0, 0])
radii = np.full(len(phi), head_size)
pos = _sph_to_cart(np.array([radii, np.deg2rad(phi), np.deg2rad(theta)]).T)
ch_pos = _check_dupes_odict(ch_names, pos)
nasion, lpa, rpa = None, None, None
if fid_names is not None:
nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names]
return make_dig_montage(ch_pos=ch_pos, coord_frame='unknown',
nasion=nasion, lpa=lpa, rpa=rpa)
def _read_elp_besa(fname, head_size):
# This .elp is not the same as polhemus elp. see _read_isotrak_elp_points
dtype = np.dtype('S8, S8, f8, f8, f8')
try:
data = np.loadtxt(fname, dtype=dtype, skip_header=1)
except TypeError:
data = np.loadtxt(fname, dtype=dtype, skiprows=1)
ch_names = data['f1'].astype(str).tolist()
az = data['f2']
horiz = data['f3']
radius = np.abs(az / 180.)
az = np.deg2rad(np.array([h if a >= 0. else 180 + h
for h, a in zip(horiz, az)]))
pol = radius * np.pi
rad = data['f4'] / 100
pos = _sph_to_cart(np.array([rad, az, pol]).T)
if head_size is not None:
pos *= head_size / np.median(np.linalg.norm(pos, axis=1))
ch_pos = _check_dupes_odict(ch_names, pos)
fid_names = ('Nz', 'LPA', 'RPA')
# No one grants that the fid names actually exist.
nasion, lpa, rpa = [ch_pos.pop(n, None) for n in fid_names]
return make_dig_montage(ch_pos=ch_pos, nasion=nasion, lpa=lpa, rpa=rpa)
def _read_brainvision(fname, head_size):
# 'BrainVision Electrodes File' format
# Based on BrainVision Analyzer coordinate system: Defined between
# standard electrode positions: X-axis from T7 to T8, Y-axis from Oz to
# Fpz, Z-axis orthogonal from XY-plane through Cz, fit to a sphere if
# idealized (when radius=1), specified in millimeters
root = ElementTree.parse(fname).getroot()
ch_names = [s.text for s in root.findall("./Electrode/Name")]
theta = [float(s.text) for s in root.findall("./Electrode/Theta")]
pol = np.deg2rad(np.array(theta))
phi = [float(s.text) for s in root.findall("./Electrode/Phi")]
az = np.deg2rad(np.array(phi))
rad = [float(s.text) for s in root.findall("./Electrode/Radius")]
rad = np.array(rad) # specified in mm
pos = _sph_to_cart(np.array([rad, az, pol]).T)
if head_size is not None:
pos *= head_size / np.median(np.linalg.norm(pos, axis=1))
return make_dig_montage(ch_pos=_check_dupes_odict(ch_names, pos))
|
import asyncio
import contextlib
from datetime import datetime
import logging
import logging.handlers
import os
import sys
import threading
from time import monotonic
from typing import TYPE_CHECKING, Any, Dict, Optional, Set
import voluptuous as vol
import yarl
from homeassistant import config as conf_util, config_entries, core, loader
from homeassistant.components import http
from homeassistant.const import (
EVENT_HOMEASSISTANT_STOP,
REQUIRED_NEXT_PYTHON_DATE,
REQUIRED_NEXT_PYTHON_VER,
)
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType
from homeassistant.setup import (
DATA_SETUP,
DATA_SETUP_STARTED,
async_set_domains_to_be_loaded,
async_setup_component,
)
from homeassistant.util.logging import async_activate_log_queue_handler
from homeassistant.util.package import async_get_user_site, is_virtual_env
from homeassistant.util.yaml import clear_secret_cache
if TYPE_CHECKING:
from .runner import RuntimeConfig
_LOGGER = logging.getLogger(__name__)
ERROR_LOG_FILENAME = "home-assistant.log"
# hass.data key for logging information.
DATA_LOGGING = "logging"
LOG_SLOW_STARTUP_INTERVAL = 60
STAGE_1_TIMEOUT = 120
STAGE_2_TIMEOUT = 300
WRAP_UP_TIMEOUT = 300
COOLDOWN_TIME = 60
DEBUGGER_INTEGRATIONS = {"debugpy", "ptvsd"}
CORE_INTEGRATIONS = ("homeassistant", "persistent_notification")
LOGGING_INTEGRATIONS = {
# Set log levels
"logger",
# Error logging
"system_log",
"sentry",
# To record data
"recorder",
}
STAGE_1_INTEGRATIONS = {
# To make sure we forward data to other instances
"mqtt_eventstream",
# To provide account link implementations
"cloud",
# Ensure supervisor is available
"hassio",
# Get the frontend up and running as soon
# as possible so problem integrations can
# be removed
"frontend",
}
async def async_setup_hass(
runtime_config: "RuntimeConfig",
) -> Optional[core.HomeAssistant]:
"""Set up Home Assistant."""
hass = core.HomeAssistant()
hass.config.config_dir = runtime_config.config_dir
async_enable_logging(
hass,
runtime_config.verbose,
runtime_config.log_rotate_days,
runtime_config.log_file,
runtime_config.log_no_color,
)
hass.config.skip_pip = runtime_config.skip_pip
if runtime_config.skip_pip:
_LOGGER.warning(
"Skipping pip installation of required modules. This may cause issues"
)
if not await conf_util.async_ensure_config_exists(hass):
_LOGGER.error("Error getting configuration path")
return None
_LOGGER.info("Config directory: %s", runtime_config.config_dir)
config_dict = None
basic_setup_success = False
safe_mode = runtime_config.safe_mode
if not safe_mode:
await hass.async_add_executor_job(conf_util.process_ha_config_upgrade, hass)
try:
config_dict = await conf_util.async_hass_config_yaml(hass)
except HomeAssistantError as err:
_LOGGER.error(
"Failed to parse configuration.yaml: %s. Activating safe mode",
err,
)
else:
if not is_virtual_env():
await async_mount_local_lib_path(runtime_config.config_dir)
basic_setup_success = (
await async_from_config_dict(config_dict, hass) is not None
)
finally:
clear_secret_cache()
if config_dict is None:
safe_mode = True
elif not basic_setup_success:
_LOGGER.warning("Unable to set up core integrations. Activating safe mode")
safe_mode = True
elif (
"frontend" in hass.data.get(DATA_SETUP, {})
and "frontend" not in hass.config.components
):
_LOGGER.warning("Detected that frontend did not load. Activating safe mode")
# Ask integrations to shut down. It's messy but we can't
# do a clean stop without knowing what is broken
hass.async_track_tasks()
hass.bus.async_fire(EVENT_HOMEASSISTANT_STOP, {})
with contextlib.suppress(asyncio.TimeoutError):
async with hass.timeout.async_timeout(10):
await hass.async_block_till_done()
safe_mode = True
old_config = hass.config
hass = core.HomeAssistant()
hass.config.skip_pip = old_config.skip_pip
hass.config.internal_url = old_config.internal_url
hass.config.external_url = old_config.external_url
hass.config.config_dir = old_config.config_dir
if safe_mode:
_LOGGER.info("Starting in safe mode")
hass.config.safe_mode = True
http_conf = (await http.async_get_last_config(hass)) or {}
await async_from_config_dict(
{"safe_mode": {}, "http": http_conf},
hass,
)
if runtime_config.open_ui:
hass.add_job(open_hass_ui, hass)
return hass
def open_hass_ui(hass: core.HomeAssistant) -> None:
"""Open the UI."""
import webbrowser # pylint: disable=import-outside-toplevel
if hass.config.api is None or "frontend" not in hass.config.components:
_LOGGER.warning("Cannot launch the UI because frontend not loaded")
return
scheme = "https" if hass.config.api.use_ssl else "http"
url = str(
yarl.URL.build(scheme=scheme, host="127.0.0.1", port=hass.config.api.port)
)
if not webbrowser.open(url):
_LOGGER.warning(
"Unable to open the Home Assistant UI in a browser. Open it yourself at %s",
url,
)
async def async_from_config_dict(
config: ConfigType, hass: core.HomeAssistant
) -> Optional[core.HomeAssistant]:
"""Try to configure Home Assistant from a configuration dictionary.
Dynamically loads required components and its dependencies.
This method is a coroutine.
"""
start = monotonic()
hass.config_entries = config_entries.ConfigEntries(hass, config)
await hass.config_entries.async_initialize()
# Set up core.
_LOGGER.debug("Setting up %s", CORE_INTEGRATIONS)
if not all(
await asyncio.gather(
*(
async_setup_component(hass, domain, config)
for domain in CORE_INTEGRATIONS
)
)
):
_LOGGER.error("Home Assistant core failed to initialize. ")
return None
_LOGGER.debug("Home Assistant core initialized")
core_config = config.get(core.DOMAIN, {})
try:
await conf_util.async_process_ha_core_config(hass, core_config)
except vol.Invalid as config_err:
conf_util.async_log_exception(config_err, "homeassistant", core_config, hass)
return None
except HomeAssistantError:
_LOGGER.error(
"Home Assistant core failed to initialize. "
"Further initialization aborted"
)
return None
await _async_set_up_integrations(hass, config)
stop = monotonic()
_LOGGER.info("Home Assistant initialized in %.2fs", stop - start)
if REQUIRED_NEXT_PYTHON_DATE and sys.version_info[:3] < REQUIRED_NEXT_PYTHON_VER:
msg = (
"Support for the running Python version "
f"{'.'.join(str(x) for x in sys.version_info[:3])} is deprecated and will "
f"be removed in the first release after {REQUIRED_NEXT_PYTHON_DATE}. "
"Please upgrade Python to "
f"{'.'.join(str(x) for x in REQUIRED_NEXT_PYTHON_VER)} or "
"higher."
)
_LOGGER.warning(msg)
hass.components.persistent_notification.async_create(
msg, "Python version", "python_version"
)
return hass
@core.callback
def async_enable_logging(
hass: core.HomeAssistant,
verbose: bool = False,
log_rotate_days: Optional[int] = None,
log_file: Optional[str] = None,
log_no_color: bool = False,
) -> None:
"""Set up the logging.
This method must be run in the event loop.
"""
fmt = "%(asctime)s %(levelname)s (%(threadName)s) [%(name)s] %(message)s"
datefmt = "%Y-%m-%d %H:%M:%S"
if not log_no_color:
try:
# pylint: disable=import-outside-toplevel
from colorlog import ColoredFormatter
# basicConfig must be called after importing colorlog in order to
# ensure that the handlers it sets up wraps the correct streams.
logging.basicConfig(level=logging.INFO)
colorfmt = f"%(log_color)s{fmt}%(reset)s"
logging.getLogger().handlers[0].setFormatter(
ColoredFormatter(
colorfmt,
datefmt=datefmt,
reset=True,
log_colors={
"DEBUG": "cyan",
"INFO": "green",
"WARNING": "yellow",
"ERROR": "red",
"CRITICAL": "red",
},
)
)
except ImportError:
pass
# If the above initialization failed for any reason, setup the default
# formatting. If the above succeeds, this will result in a no-op.
logging.basicConfig(format=fmt, datefmt=datefmt, level=logging.INFO)
# Suppress overly verbose logs from libraries that aren't helpful
logging.getLogger("requests").setLevel(logging.WARNING)
logging.getLogger("urllib3").setLevel(logging.WARNING)
logging.getLogger("aiohttp.access").setLevel(logging.WARNING)
sys.excepthook = lambda *args: logging.getLogger(None).exception(
"Uncaught exception", exc_info=args # type: ignore
)
if sys.version_info[:2] >= (3, 8):
threading.excepthook = lambda args: logging.getLogger(None).exception(
"Uncaught thread exception",
exc_info=(args.exc_type, args.exc_value, args.exc_traceback),
)
# Log errors to a file if we have write access to file or config dir
if log_file is None:
err_log_path = hass.config.path(ERROR_LOG_FILENAME)
else:
err_log_path = os.path.abspath(log_file)
err_path_exists = os.path.isfile(err_log_path)
err_dir = os.path.dirname(err_log_path)
# Check if we can write to the error log if it exists or that
# we can create files in the containing directory if not.
if (err_path_exists and os.access(err_log_path, os.W_OK)) or (
not err_path_exists and os.access(err_dir, os.W_OK)
):
if log_rotate_days:
err_handler: logging.FileHandler = (
logging.handlers.TimedRotatingFileHandler(
err_log_path, when="midnight", backupCount=log_rotate_days
)
)
else:
err_handler = logging.FileHandler(err_log_path, mode="w", delay=True)
err_handler.setLevel(logging.INFO if verbose else logging.WARNING)
err_handler.setFormatter(logging.Formatter(fmt, datefmt=datefmt))
logger = logging.getLogger("")
logger.addHandler(err_handler)
logger.setLevel(logging.INFO if verbose else logging.WARNING)
# Save the log file location for access by other components.
hass.data[DATA_LOGGING] = err_log_path
else:
_LOGGER.error("Unable to set up error log %s (access denied)", err_log_path)
async_activate_log_queue_handler(hass)
async def async_mount_local_lib_path(config_dir: str) -> str:
"""Add local library to Python Path.
This function is a coroutine.
"""
deps_dir = os.path.join(config_dir, "deps")
lib_dir = await async_get_user_site(deps_dir)
if lib_dir not in sys.path:
sys.path.insert(0, lib_dir)
return deps_dir
@core.callback
def _get_domains(hass: core.HomeAssistant, config: Dict[str, Any]) -> Set[str]:
"""Get domains of components to set up."""
# Filter out the repeating and common config section [homeassistant]
domains = {key.split(" ")[0] for key in config if key != core.DOMAIN}
# Add config entry domains
if not hass.config.safe_mode:
domains.update(hass.config_entries.async_domains())
# Make sure the Hass.io component is loaded
if "HASSIO" in os.environ:
domains.add("hassio")
return domains
async def _async_log_pending_setups(
domains: Set[str], setup_started: Dict[str, datetime]
) -> None:
"""Periodic log of setups that are pending for longer than LOG_SLOW_STARTUP_INTERVAL."""
while True:
await asyncio.sleep(LOG_SLOW_STARTUP_INTERVAL)
remaining = [domain for domain in domains if domain in setup_started]
if remaining:
_LOGGER.warning(
"Waiting on integrations to complete setup: %s",
", ".join(remaining),
)
async def async_setup_multi_components(
hass: core.HomeAssistant,
domains: Set[str],
config: Dict[str, Any],
setup_started: Dict[str, datetime],
) -> None:
"""Set up multiple domains. Log on failure."""
futures = {
domain: hass.async_create_task(async_setup_component(hass, domain, config))
for domain in domains
}
log_task = asyncio.create_task(_async_log_pending_setups(domains, setup_started))
await asyncio.wait(futures.values())
log_task.cancel()
errors = [domain for domain in domains if futures[domain].exception()]
for domain in errors:
exception = futures[domain].exception()
assert exception is not None
_LOGGER.error(
"Error setting up integration %s - received exception",
domain,
exc_info=(type(exception), exception, exception.__traceback__),
)
async def _async_set_up_integrations(
hass: core.HomeAssistant, config: Dict[str, Any]
) -> None:
"""Set up all the integrations."""
setup_started = hass.data[DATA_SETUP_STARTED] = {}
domains_to_setup = _get_domains(hass, config)
# Resolve all dependencies so we know all integrations
# that will have to be loaded and start rightaway
integration_cache: Dict[str, loader.Integration] = {}
to_resolve = domains_to_setup
while to_resolve:
old_to_resolve = to_resolve
to_resolve = set()
integrations_to_process = [
int_or_exc
for int_or_exc in await asyncio.gather(
*(
loader.async_get_integration(hass, domain)
for domain in old_to_resolve
),
return_exceptions=True,
)
if isinstance(int_or_exc, loader.Integration)
]
resolve_dependencies_tasks = [
itg.resolve_dependencies()
for itg in integrations_to_process
if not itg.all_dependencies_resolved
]
if resolve_dependencies_tasks:
await asyncio.gather(*resolve_dependencies_tasks)
for itg in integrations_to_process:
integration_cache[itg.domain] = itg
for dep in itg.all_dependencies:
if dep in domains_to_setup:
continue
domains_to_setup.add(dep)
to_resolve.add(dep)
_LOGGER.info("Domains to be set up: %s", domains_to_setup)
logging_domains = domains_to_setup & LOGGING_INTEGRATIONS
# Load logging as soon as possible
if logging_domains:
_LOGGER.info("Setting up logging: %s", logging_domains)
await async_setup_multi_components(hass, logging_domains, config, setup_started)
# Start up debuggers. Start these first in case they want to wait.
debuggers = domains_to_setup & DEBUGGER_INTEGRATIONS
if debuggers:
_LOGGER.debug("Setting up debuggers: %s", debuggers)
await async_setup_multi_components(hass, debuggers, config, setup_started)
# calculate what components to setup in what stage
stage_1_domains = set()
# Find all dependencies of any dependency of any stage 1 integration that
# we plan on loading and promote them to stage 1
deps_promotion = STAGE_1_INTEGRATIONS
while deps_promotion:
old_deps_promotion = deps_promotion
deps_promotion = set()
for domain in old_deps_promotion:
if domain not in domains_to_setup or domain in stage_1_domains:
continue
stage_1_domains.add(domain)
dep_itg = integration_cache.get(domain)
if dep_itg is None:
continue
deps_promotion.update(dep_itg.all_dependencies)
stage_2_domains = domains_to_setup - logging_domains - debuggers - stage_1_domains
# Kick off loading the registries. They don't need to be awaited.
asyncio.create_task(hass.helpers.device_registry.async_get_registry())
asyncio.create_task(hass.helpers.entity_registry.async_get_registry())
asyncio.create_task(hass.helpers.area_registry.async_get_registry())
# Start setup
if stage_1_domains:
_LOGGER.info("Setting up stage 1: %s", stage_1_domains)
try:
async with hass.timeout.async_timeout(
STAGE_1_TIMEOUT, cool_down=COOLDOWN_TIME
):
await async_setup_multi_components(
hass, stage_1_domains, config, setup_started
)
except asyncio.TimeoutError:
_LOGGER.warning("Setup timed out for stage 1 - moving forward")
# Enables after dependencies
async_set_domains_to_be_loaded(hass, stage_1_domains | stage_2_domains)
if stage_2_domains:
_LOGGER.info("Setting up stage 2: %s", stage_2_domains)
try:
async with hass.timeout.async_timeout(
STAGE_2_TIMEOUT, cool_down=COOLDOWN_TIME
):
await async_setup_multi_components(
hass, stage_2_domains, config, setup_started
)
except asyncio.TimeoutError:
_LOGGER.warning("Setup timed out for stage 2 - moving forward")
# Wrap up startup
_LOGGER.debug("Waiting for startup to wrap up")
try:
async with hass.timeout.async_timeout(WRAP_UP_TIMEOUT, cool_down=COOLDOWN_TIME):
await hass.async_block_till_done()
except asyncio.TimeoutError:
_LOGGER.warning("Setup timed out for bootstrap - moving forward")
|
import errno
import os
import sys
import time
from collections import defaultdict
from itertools import chain
from celery.exceptions import TimeoutError
from django.conf import settings
from django.core.cache import cache
from django.core.checks import Critical, Error, Info
from django.core.mail import get_connection
from weblate.utils.celery import get_queue_stats
from weblate.utils.data import data_dir
from weblate.utils.docs import get_doc_url
from weblate.utils.site import check_domain, get_site_domain
GOOD_CACHE = {"MemcachedCache", "PyLibMCCache", "DatabaseCache", "RedisCache"}
DEFAULT_MAILS = {
"root@localhost",
"webmaster@localhost",
"[email protected]",
"[email protected]",
}
DEFAULT_SECRET_KEYS = {
"jm8fqjlg+5!#xu%e-oh#7!$aa7!6avf7ud*_v=chdrb9qdco6(",
"secret key used for tests only",
}
DOC_LINKS = {
"security.W001": ("admin/upgdade", "up-3-1"),
"security.W002": ("admin/upgdade", "up-3-1"),
"security.W003": ("admin/upgdade", "up-3-1"),
"security.W004": ("admin/install", "production-ssl"),
"security.W005": ("admin/install", "production-ssl"),
"security.W006": ("admin/upgdade", "up-3-1"),
"security.W007": ("admin/upgdade", "up-3-1"),
"security.W008": ("admin/install", "production-ssl"),
"security.W009": ("admin/install", "production-secret"),
"security.W010": ("admin/install", "production-ssl"),
"security.W011": ("admin/install", "production-ssl"),
"security.W012": ("admin/install", "production-ssl"),
"security.W018": ("admin/install", "production-debug"),
"security.W019": ("admin/upgdade", "up-3-1"),
"security.W020": ("admin/install", "production-hosts"),
"security.W021": ("admin/install", "production-ssl"),
"weblate.E002": ("admin/install", "file-permissions"),
"weblate.E003": ("admin/install", "out-mail"),
"weblate.E005": ("admin/install", "celery"),
"weblate.E006": ("admin/install", "production-database"),
"weblate.E007": ("admin/install", "production-cache"),
"weblate.E008": ("admin/install", "production-cache-avatar"),
"weblate.E009": ("admin/install", "celery"),
"weblate.E011": ("admin/install", "production-admins"),
"weblate.E012": ("admin/install", "production-email"),
"weblate.E013": ("admin/install", "production-email"),
"weblate.E014": ("admin/install", "production-secret"),
"weblate.E015": ("admin/install", "production-hosts"),
"weblate.E016": ("admin/install", "production-templates"),
"weblate.E017": ("admin/install", "production-site"),
"weblate.E018": ("admin/optionals", "avatars"),
"weblate.E019": ("admin/install", "celery"),
"weblate.E020": ("admin/install", "celery"),
"weblate.I021": ("admin/install", "collecting-errors"),
"weblate.E022": ("admin/optionals", "git-exporter"),
"weblate.C023": ("admin/install", "production-encoding"),
"weblate.C024": ("admin/install", "pangocairo"),
"weblate.W025": ("admin/install", "optional-deps"),
"weblate.E026": ("admin/install", "celery"),
"weblate.E027": ("admin/install", "file-permissions"),
"weblate.I028": ("admin/backup",),
"weblate.C029": ("admin/backup",),
"weblate.C030": ("admin/install", "celery"),
"weblate.I031": ("admin/upgrade",),
"weblate.C031": ("admin/upgrade",),
"weblate.C032": ("admin/install",),
"weblate.W033": ("vcs",),
"weblate.E034": ("admin/install", "celery"),
"weblate.C035": ("vcs",),
"weblate.C036": ("admin/optionals", "gpg-sign"),
}
def check_doc_link(docid, strict=False):
while docid.count(".") > 1:
docid = docid.rsplit(".", 1)[0]
try:
return get_doc_url(*DOC_LINKS[docid])
except KeyError:
if strict:
raise
return None
def weblate_check(id, message, cls=Critical):
"""Returns Django check instance."""
return cls(message, hint=check_doc_link(id), id=id)
def check_mail_connection(app_configs, **kwargs):
errors = []
try:
connection = get_connection()
connection.open()
connection.close()
except Exception as error:
message = "Cannot send e-mail ({}), please check EMAIL_* settings."
errors.append(weblate_check("weblate.E003", message.format(error)))
return errors
def is_celery_queue_long():
"""
Checks whether celery queue is too long.
It does trigger if it is too long for at least one hour. This way peaks are
filtered out, and no warning need be issued for big operations (for example
site-wide autotranslation).
"""
cache_key = "celery_queue_stats"
queues_data = cache.get(cache_key, {})
# Hours since epoch
current_hour = int(time.time() / 3600)
test_hour = current_hour - 1
# Fetch current stats
stats = get_queue_stats()
# Update counters
if current_hour not in queues_data:
# Delete stale items
for key in list(queues_data.keys()):
if key < test_hour:
del queues_data[key]
# Add current one
queues_data[current_hour] = stats
# Store to cache
cache.set(cache_key, queues_data, 7200)
# Do not fire if we do not have counts for two hours ago
if test_hour not in queues_data:
return False
# Check if any queue got bigger
base = queues_data[test_hour]
thresholds = defaultdict(lambda: 50)
thresholds["translate"] = 1000
return any(
stat > thresholds[key] and base.get(key, 0) > thresholds[key]
for key, stat in stats.items()
)
def check_celery(app_configs, **kwargs):
# Import this lazily to avoid evaluating settings too early
from weblate.utils.tasks import ping
errors = []
if settings.CELERY_TASK_ALWAYS_EAGER:
errors.append(
weblate_check(
"weblate.E005", "Celery is configured in the eager mode", Error
)
)
elif settings.CELERY_BROKER_URL == "memory://":
errors.append(
weblate_check(
"weblate.E026", "Celery is configured to store queue in local memory"
)
)
else:
if is_celery_queue_long():
errors.append(
weblate_check(
"weblate.E009",
"The Celery tasks queue is too long, either the worker "
"is not running, or is too slow.",
)
)
result = ping.delay()
try:
pong = result.get(timeout=10, disable_sync_subtasks=False)
current = ping()
# Check for outdated Celery running different version of configuration
if current != pong:
if pong is None:
# Celery runs Weblate 4.0 or older
differing = ["version"]
else:
differing = [
key
for key, value in current.items()
if key not in pong or value != pong[key]
]
errors.append(
weblate_check(
"weblate.E034",
"The Celery process is outdated or misconfigured."
" Following items differ: {}".format(", ".join(differing)),
)
)
except TimeoutError:
errors.append(
weblate_check(
"weblate.E019",
"The Celery does not process tasks, or is too slow "
"in processing them.",
)
)
except NotImplementedError:
errors.append(
weblate_check(
"weblate.E020",
"The Celery is not configured to store results, "
"CELERY_RESULT_BACKEND is probably not set.",
)
)
heartbeat = cache.get("celery_heartbeat")
loaded = cache.get("celery_loaded")
now = time.time()
if loaded and now - loaded > 60 and (not heartbeat or now - heartbeat > 600):
errors.append(
weblate_check(
"weblate.C030",
"The Celery beat scheduler is not executing periodic tasks "
"in a timely manner.",
)
)
return errors
def check_database(app_configs, **kwargs):
if settings.DATABASES["default"]["ENGINE"] == "django.db.backends.postgresql":
return []
return [
weblate_check(
"weblate.E006",
"Weblate performs best with PostgreSQL, consider migrating to it.",
Info,
)
]
def check_cache(app_configs, **kwargs):
"""Check for sane caching."""
errors = []
cache_backend = settings.CACHES["default"]["BACKEND"].split(".")[-1]
if cache_backend not in GOOD_CACHE:
errors.append(
weblate_check(
"weblate.E007",
"The configured cache back-end will lead to serious "
"performance or consistency issues.",
)
)
if settings.ENABLE_AVATARS and "avatar" not in settings.CACHES:
errors.append(
weblate_check(
"weblate.E008",
"Please set up separate avatar caching to reduce pressure "
"on the default cache.",
Error,
)
)
return errors
def check_settings(app_configs, **kwargs):
"""Check for sane settings."""
errors = []
if not settings.ADMINS or "[email protected]" in (x[1] for x in settings.ADMINS):
errors.append(
weblate_check(
"weblate.E011",
"E-mail addresses for site admins is misconfigured",
Error,
)
)
if settings.SERVER_EMAIL in DEFAULT_MAILS:
errors.append(
weblate_check(
"weblate.E012",
"The server e-mail address should be changed from its default value",
)
)
if settings.DEFAULT_FROM_EMAIL in DEFAULT_MAILS:
errors.append(
weblate_check(
"weblate.E013",
'The "From" e-mail address should be changed from its default value',
)
)
if settings.SECRET_KEY in DEFAULT_SECRET_KEYS:
errors.append(
weblate_check(
"weblate.E014",
"The cookie secret key should be changed from its default value",
)
)
if not settings.ALLOWED_HOSTS:
errors.append(weblate_check("weblate.E015", "No allowed hosts are set up"))
return errors
def check_templates(app_configs, **kwargs):
"""Check for cached DjangoTemplates Loader."""
if settings.DEBUG:
return []
from django.template import engines
from django.template.backends.django import DjangoTemplates
from django.template.loaders import cached
is_cached = True
for engine in engines.all():
if not isinstance(engine, DjangoTemplates):
continue
for loader in engine.engine.template_loaders:
if not isinstance(loader, cached.Loader):
is_cached = False
if is_cached:
return []
return [
weblate_check(
"weblate.E016",
"Set up a cached template loader for better performance",
Error,
)
]
def check_data_writable(app_configs=None, **kwargs):
"""Check we can write to data dir."""
errors = []
dirs = [
settings.DATA_DIR,
data_dir("home"),
data_dir("ssh"),
data_dir("vcs"),
data_dir("celery"),
data_dir("backups"),
data_dir("fonts"),
data_dir("cache", "fonts"),
]
message = "Path {} is not writable, check your DATA_DIR settings."
for path in dirs:
if not os.path.exists(path):
os.makedirs(path)
elif not os.access(path, os.W_OK):
errors.append(weblate_check("weblate.E002", message.format(path)))
return errors
def check_site(app_configs, **kwargs):
errors = []
if not check_domain(get_site_domain()):
errors.append(weblate_check("weblate.E017", "Correct the site domain"))
return errors
def check_perms(app_configs=None, **kwargs):
"""Check that the data dir can be written to."""
errors = []
uid = os.getuid()
message = "The path {} is owned by different user, check your DATA_DIR settings."
for dirpath, dirnames, filenames in os.walk(settings.DATA_DIR):
for name in chain(dirnames, filenames):
# Skip toplevel lost+found dir, that one is typically owned by root
# on filesystem toplevel directory
if dirpath == settings.DATA_DIR and name == "lost+found":
continue
path = os.path.join(dirpath, name)
try:
stat = os.lstat(path)
except OSError as error:
# File was removed meanwhile
if error.errno == errno.ENOENT:
continue
raise
if stat.st_uid != uid:
errors.append(weblate_check("weblate.E027", message.format(path)))
return errors
def check_errors(app_configs=None, **kwargs):
"""Check that error collection is configured."""
if (
hasattr(settings, "ROLLBAR")
or hasattr(settings, "RAVEN_CONFIG")
or settings.SENTRY_DSN
):
return []
return [
weblate_check(
"weblate.I021",
"Error collection is not set up, "
"it is highly recommended for production use",
Info,
)
]
def check_encoding(app_configs=None, **kwargs):
"""Check that the encoding is UTF-8."""
if sys.getfilesystemencoding() == "utf-8" and sys.getdefaultencoding() == "utf-8":
return []
return [
weblate_check(
"weblate.C023",
"System encoding is not UTF-8, processing non-ASCII strings will break",
)
]
def check_diskspace(app_configs=None, **kwargs):
"""Check free disk space."""
stat = os.statvfs(settings.DATA_DIR)
if stat.f_bavail * stat.f_bsize < 10000000:
return [weblate_check("weblate.C032", "The disk is nearly full")]
return []
|
import io
import json
import os
from nikola.plugin_categories import TemplateSystem
from nikola.utils import makedirs, req_missing, sort_posts, _smartjoin_filter
try:
import jinja2
from jinja2 import meta
except ImportError:
jinja2 = None
class JinjaTemplates(TemplateSystem):
"""Support for Jinja2 templates."""
name = "jinja"
lookup = None
dependency_cache = {}
per_file_cache = {}
def __init__(self):
"""Initialize Jinja2 environment with extended set of filters."""
if jinja2 is None:
return
def set_directories(self, directories, cache_folder):
"""Create a new template lookup with set directories."""
if jinja2 is None:
req_missing(['jinja2'], 'use this theme')
cache_folder = os.path.join(cache_folder, 'jinja')
makedirs(cache_folder)
cache = jinja2.FileSystemBytecodeCache(cache_folder)
self.lookup = jinja2.Environment(bytecode_cache=cache)
self.lookup.trim_blocks = True
self.lookup.lstrip_blocks = True
self.lookup.filters['tojson'] = json.dumps
self.lookup.filters['sort_posts'] = sort_posts
self.lookup.filters['smartjoin'] = _smartjoin_filter
self.lookup.globals['enumerate'] = enumerate
self.lookup.globals['isinstance'] = isinstance
self.lookup.globals['tuple'] = tuple
self.directories = directories
self.create_lookup()
def inject_directory(self, directory):
"""Add a directory to the lookup and recreate it if it's not there yet."""
if directory not in self.directories:
self.directories.append(directory)
self.create_lookup()
def create_lookup(self):
"""Create a template lookup."""
self.lookup.loader = jinja2.FileSystemLoader(self.directories,
encoding='utf-8')
def set_site(self, site):
"""Set the Nikola site."""
self.site = site
self.lookup.filters.update(self.site.config['TEMPLATE_FILTERS'])
def render_template(self, template_name, output_name, context):
"""Render the template into output_name using context."""
if jinja2 is None:
req_missing(['jinja2'], 'use this theme')
template = self.lookup.get_template(template_name)
data = template.render(**context)
if output_name is not None:
makedirs(os.path.dirname(output_name))
with io.open(output_name, 'w', encoding='utf-8') as output:
output.write(data)
return data
def render_template_to_string(self, template, context):
"""Render template to a string using context."""
return self.lookup.from_string(template).render(**context)
def get_string_deps(self, text):
"""Find dependencies for a template string."""
deps = set([])
ast = self.lookup.parse(text)
dep_names = [d for d in meta.find_referenced_templates(ast) if d]
for dep_name in dep_names:
filename = self.lookup.loader.get_source(self.lookup, dep_name)[1]
sub_deps = [filename] + self.get_deps(filename)
self.dependency_cache[dep_name] = sub_deps
deps |= set(sub_deps)
return list(deps)
def get_deps(self, filename):
"""Return paths to dependencies for the template loaded from filename."""
with io.open(filename, 'r', encoding='utf-8-sig') as fd:
text = fd.read()
return self.get_string_deps(text)
def template_deps(self, template_name):
"""Generate list of dependencies for a template."""
if self.dependency_cache.get(template_name) is None:
filename = self.lookup.loader.get_source(self.lookup, template_name)[1]
self.dependency_cache[template_name] = [filename] + self.get_deps(filename)
return self.dependency_cache[template_name]
def get_template_path(self, template_name):
"""Get the path to a template or return None."""
try:
t = self.lookup.get_template(template_name)
return t.filename
except jinja2.TemplateNotFound:
return None
|
import diamond.collector
from subprocess import Popen, PIPE
try:
import json
except ImportError:
import simplejson as json
class OpenstackSwiftCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(OpenstackSwiftCollector,
self).get_default_config_help()
config_help.update({
'enable_dispersion_report': 'gather swift-dispersion-report ' +
'metrics (default False)',
'enable_container_metrics': 'gather containers metrics ' +
'(# objects, bytes used, ' +
'x_timestamp. default True)',
'auth_url': 'authentication url (for enable_container_metrics)',
'account': 'swift auth account (for enable_container_metrics)',
'user': 'swift auth user (for enable_container_metrics)',
'password': 'swift auth password (for enable_container_metrics)',
'containers': 'containers on which to count number of objects, ' +
'space separated list (for enable_container_metrics)'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(OpenstackSwiftCollector, self).get_default_config()
config.update({
'path': 'openstackswift',
'enable_dispersion_report': False,
'enable_container_metrics': True,
# don't use the threaded model with this one.
# for some reason it crashes.
'interval': 1200, # by default, every 20 minutes
})
return config
def collect(self):
# dispersion report. this can take easily >60s. beware!
if (self.config['enable_dispersion_report']):
p = Popen(
['swift-dispersion-report', '-j'],
stdout=PIPE,
stderr=PIPE)
stdout, stderr = p.communicate()
self.publish('dispersion.errors', len(stderr.split('\n')) - 1)
data = json.loads(stdout)
for t in ('object', 'container'):
for (k, v) in data[t].items():
self.publish('dispersion.%s.%s' % (t, k), v)
# container metrics returned by stat <container>
if(self.config['enable_container_metrics']):
account = '%s:%s' % (self.config['account'], self.config['user'])
for container in self.config['containers'].split(','):
cmd = ['swift', '-A', self.config['auth_url'],
'-U', account,
'-K', self.config['password'],
'stat', container]
p = Popen(cmd, stdout=PIPE, stderr=PIPE)
stdout, stderr = p.communicate()
stats = {}
# stdout is some lines in 'key : val' format
for line in stdout.split('\n'):
if line:
line = line.split(':', 2)
stats[line[0].strip()] = line[1].strip()
key = 'container_metrics.%s.%s' % (self.config['account'],
container)
self.publish('%s.objects' % key, stats['Objects'])
self.publish('%s.bytes' % key, stats['Bytes'])
self.publish('%s.x_timestamp' % key, stats['X-Timestamp'])
|
import unittest
import mock
from kalliope.core.NeuronModule import NeuronModule
from kalliope.neurons.systemdate import Systemdate
class TestSystemdate(unittest.TestCase):
def setUp(self):
pass
def test_date_is_returned(self):
"""
Check that the neuron return consistent values
:return:
"""
with mock.patch.object(NeuronModule, 'say', return_value=None) as mock_method:
systemdate = Systemdate()
# check returned value
self.assertTrue(0 <= int(systemdate.message["hours"]) <= 24)
self.assertTrue(0 <= int(systemdate.message["minutes"]) <= 60)
self.assertTrue(0 <= int(systemdate.message["weekday"]) <= 6)
self.assertTrue(1 <= int(systemdate.message["day_month"]) <= 31)
self.assertTrue(1 <= int(systemdate.message["month"]) <= 12)
self.assertTrue(2016 <= int(systemdate.message["year"]) <= 3000)
if __name__ == '__main__':
unittest.main()
|
import numpy as np
from scipy.special import ndtr
from scattertext.termsignificance import LogOddsRatioUninformativeDirichletPrior
def z_to_p_val(z_scores):
# return norm.sf(-z_scores) - 0.5 + 0.5
return ndtr(z_scores)
class LogOddsRatioInformativeDirichletPrior(LogOddsRatioUninformativeDirichletPrior):
'''
Implements the log-odds-ratio with an uninformative dirichlet prior from
Monroe, B. L., Colaresi, M. P., & Quinn, K. M. (2008). Fightin' words: Lexical feature selection and evaluation for identifying the content of political conflict. Political Analysis, 16(4), 372–403.
'''
def __init__(self,
priors,
sigma=10,
scale_type='none',
prior_power=1):
'''
Parameters
----------
priors : pd.Series
term -> prior count
sigma : np.float
prior scale
scale_type : str
'none': Don't scale prior. Jurafsky approach.
'class-size': Scale prior st the sum of the priors is the same as the word count
in the document-class being scaled
'corpus-size': Scale prior to the size of the corpus
'word': Original formulation from MCQ. Sum of priors will be sigma.
'background-corpus-size': Scale corpus size to multiple of background-corpus.
prior_power : numeric
Exponent to apply to prior
> 1 will shrink frequent words
'''
assert scale_type in ['none', 'class-size', 'corpus-size',
'background-corpus-size', 'word']
self._priors = priors
self._scale_type = scale_type
self._prior_power = prior_power
self._scale = sigma
LogOddsRatioUninformativeDirichletPrior.__init__(self, sigma)
def get_priors(self):
return self._priors
def get_name(self):
return "Log-Odds-Ratio w/ Informative Prior"
def get_zeta_i_j_given_separate_counts(self, y_i, y_j):
'''
Parameters
----------
y_i, np.array(int)
Arrays of word counts of words occurring in positive class
y_j, np.array(int)
Returns
-------
np.array of z-scores
'''
n_i, n_j = y_i.sum(), y_j.sum()
prior_scale_j = prior_scale_i = 1
if self._scale_type == 'class-size':
prior_scale_i = ((n_i) * self._scale * 1. / np.sum(self._priors))
prior_scale_j = ((n_j) * self._scale * 1. / np.sum(self._priors))
elif self._scale_type == 'corpus-size':
prior_scale_j = prior_scale_i = ((n_i + n_j) * self._scale * 1. / np.sum(self._priors))
elif self._scale_type == 'word':
prior_scale_j = prior_scale_i = self._scale / np.sum(self._priors)
elif self._scale_type == 'background-corpus-size':
prior_scale_j = prior_scale_i = self._scale
a_wj = (self._priors * prior_scale_j) ** self._prior_power
a_0j = np.sum(a_wj)
a_wi = (self._priors * prior_scale_i) ** self._prior_power
a_0i = np.sum(a_wi)
delta_i_j = (np.log((y_i + a_wi) / (n_i + a_0i - y_i - a_wi))
- np.log((y_j + a_wj) / (n_j + a_0j - y_j - a_wj)))
var_delta_i_j = (1. / (y_i + a_wi)
+ 1. / (n_i + a_0i - y_i - a_wi)
+ 1. / (y_j + a_wj)
+ 1. / (n_j + a_0j - y_j - a_wj))
zeta_i_j = delta_i_j / np.sqrt(var_delta_i_j)
return zeta_i_j
|
from __future__ import unicode_literals
from os.path import isfile
from lib.fun.decorator import magic
from lib.data.data import pyoptions
from lib.fun.fun import cool, finishcounter
def uniqifer_magic(*args):
"""[file]"""
args = list(args[0])
if len(args) == 2:
original_file_path = args[1]
if not isfile(original_file_path):
exit(pyoptions.CRLF + cool.red("[-] File: {} don't exists".format(original_file_path)))
else:
exit(pyoptions.CRLF + cool.fuchsia("[!] Usage: {} {}".format(args[0], pyoptions.tools_info.get(args[0]))))
@magic
def uniqifer():
with open(original_file_path) as o_f:
for item in o_f.readlines():
yield item.strip()
print("[+] Source of :{0} lines".format(cool.orange(finishcounter(original_file_path))))
|
import argparse
import glob
import os
import struct
import sys
def clamp_to_u8(value):
if value > 255:
value = 255
elif value < 0:
value = 0
return value
def parse_args():
parser = argparse.ArgumentParser(description="Set the charging colour")
parser.add_argument('-d', '--device', type=str, help="Device string like \"0003:1532:0045.000C\"")
parser.add_argument('--colour', required=True, nargs=3, metavar=("R", "G", "B"), type=int, help="Charging colour")
args = parser.parse_args()
return args
def run():
args = parse_args()
if args.device is None:
mouse_dirs = glob.glob(os.path.join('/sys/bus/hid/drivers/razermouse/', "*:*:*.*"))
if len(mouse_dirs) > 1:
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
if len(mouse_dirs) < 1:
print("No mouse directories found. Make sure the driver is binded", file=sys.stderr)
sys.exit(1)
mouse_dir = mouse_dirs[0]
else:
mouse_dir = os.path.join('/sys/bus/hid/drivers/razermouse/', args.device)
if not os.path.isdir(mouse_dir):
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
values = map(clamp_to_u8, args.colour)
byte_string = struct.pack(">BBB", *values)
set_charging_colour_filepath = os.path.join(mouse_dir, "set_charging_colour")
with open(set_charging_colour_filepath, 'wb') as set_charging_colour_file:
set_charging_colour_file.write(byte_string)
print("Done")
if __name__ == '__main__':
run()
|
from homeassistant.components.climate.const import HVAC_MODE_HEAT_COOL
from .util import async_init_integration
async def test_climate_zones(hass):
"""Test creation climate zones."""
await async_init_integration(hass)
state = hass.states.get("climate.nick_office")
assert state.state == HVAC_MODE_HEAT_COOL
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"current_humidity": 52.0,
"current_temperature": 22.8,
"dehumidify_setpoint": 45.0,
"dehumidify_supported": True,
"fan_mode": "Auto",
"fan_modes": ["Auto", "On", "Circulate"],
"friendly_name": "Nick Office",
"humidify_supported": False,
"humidity": 45.0,
"hvac_action": "cooling",
"hvac_modes": ["off", "auto", "heat_cool", "heat", "cool"],
"max_humidity": 65.0,
"max_temp": 37.2,
"min_humidity": 35.0,
"min_temp": 12.8,
"preset_mode": "None",
"preset_modes": ["None", "Home", "Away", "Sleep"],
"supported_features": 31,
"target_temp_high": 26.1,
"target_temp_low": 17.2,
"target_temp_step": 1.0,
"temperature": None,
"zone_status": "Relieving Air",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("climate.kitchen")
assert state.state == HVAC_MODE_HEAT_COOL
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"current_humidity": 36.0,
"current_temperature": 25.0,
"dehumidify_setpoint": 50.0,
"dehumidify_supported": True,
"fan_mode": "Auto",
"fan_modes": ["Auto", "On", "Circulate"],
"friendly_name": "Kitchen",
"humidify_supported": False,
"humidity": 50.0,
"hvac_action": "idle",
"hvac_modes": ["off", "auto", "heat_cool", "heat", "cool"],
"max_humidity": 65.0,
"max_temp": 37.2,
"min_humidity": 35.0,
"min_temp": 12.8,
"preset_mode": "None",
"preset_modes": ["None", "Home", "Away", "Sleep"],
"supported_features": 31,
"target_temp_high": 26.1,
"target_temp_low": 17.2,
"target_temp_step": 1.0,
"temperature": None,
"zone_status": "Idle",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
|
import pytest
import homeassistant.components.automation as automation
from homeassistant.components.fan import DOMAIN
from homeassistant.const import STATE_OFF, STATE_ON
from homeassistant.helpers import device_registry
from homeassistant.setup import async_setup_component
from tests.common import (
MockConfigEntry,
assert_lists_same,
async_get_device_automations,
async_mock_service,
mock_device_registry,
mock_registry,
)
@pytest.fixture
def device_reg(hass):
"""Return an empty, loaded, registry."""
return mock_device_registry(hass)
@pytest.fixture
def entity_reg(hass):
"""Return an empty, loaded, registry."""
return mock_registry(hass)
@pytest.fixture
def calls(hass):
"""Track calls to a mock service."""
return async_mock_service(hass, "test", "automation")
async def test_get_triggers(hass, device_reg, entity_reg):
"""Test we get the expected triggers from a fan."""
config_entry = MockConfigEntry(domain="test", data={})
config_entry.add_to_hass(hass)
device_entry = device_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
connections={(device_registry.CONNECTION_NETWORK_MAC, "12:34:56:AB:CD:EF")},
)
entity_reg.async_get_or_create(DOMAIN, "test", "5678", device_id=device_entry.id)
expected_triggers = [
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_off",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
{
"platform": "device",
"domain": DOMAIN,
"type": "turned_on",
"device_id": device_entry.id,
"entity_id": f"{DOMAIN}.test_5678",
},
]
triggers = await async_get_device_automations(hass, "trigger", device_entry.id)
assert_lists_same(triggers, expected_triggers)
async def test_if_fires_on_state_change(hass, calls):
"""Test for turn_on and turn_off triggers firing."""
hass.states.async_set("fan.entity", STATE_OFF)
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "fan.entity",
"type": "turned_on",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"turn_on - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
{
"trigger": {
"platform": "device",
"domain": DOMAIN,
"device_id": "",
"entity_id": "fan.entity",
"type": "turned_off",
},
"action": {
"service": "test.automation",
"data_template": {
"some": (
"turn_off - {{ trigger.platform}} - "
"{{ trigger.entity_id}} - {{ trigger.from_state.state}} - "
"{{ trigger.to_state.state}} - {{ trigger.for }}"
)
},
},
},
]
},
)
# Fake that the entity is turning on.
hass.states.async_set("fan.entity", STATE_ON)
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].data["some"] == "turn_on - device - fan.entity - off - on - None"
# Fake that the entity is turning off.
hass.states.async_set("fan.entity", STATE_OFF)
await hass.async_block_till_done()
assert len(calls) == 2
assert calls[1].data["some"] == "turn_off - device - fan.entity - on - off - None"
|
import os
import pytest
from qutebrowser.misc import crashdialog
VALID_CRASH_TEXT = """
Fatal Python error: Segmentation fault
_
Current thread 0x00007f09b538d700 (most recent call first):
File "", line 1 in testfunc
File "filename", line 88 in func
"""
VALID_CRASH_TEXT_EMPTY = """
Fatal Python error: Aborted
_
Current thread 0x00007f09b538d700 (most recent call first):
File "", line 1 in_
File "filename", line 88 in func
"""
VALID_CRASH_TEXT_THREAD = """
Fatal Python error: Segmentation fault
_
Thread 0x00007fa135ac7700 (most recent call first):
File "", line 1 in testfunc
"""
WINDOWS_CRASH_TEXT = r"""
Windows fatal exception: access violation
_
Current thread 0x000014bc (most recent call first):
File "qutebrowser\mainwindow\tabbedbrowser.py", line 468 in tabopen
File "qutebrowser\browser\shared.py", line 247 in get_tab
"""
INVALID_CRASH_TEXT = """
Hello world!
"""
@pytest.mark.parametrize('text, typ, func', [
(VALID_CRASH_TEXT, 'Segmentation fault', 'testfunc'),
(VALID_CRASH_TEXT_THREAD, 'Segmentation fault', 'testfunc'),
(VALID_CRASH_TEXT_EMPTY, 'Aborted', ''),
(WINDOWS_CRASH_TEXT, 'Windows access violation', 'tabopen'),
(INVALID_CRASH_TEXT, '', ''),
])
def test_parse_fatal_stacktrace(text, typ, func):
text = text.strip().replace('_', ' ')
assert crashdialog.parse_fatal_stacktrace(text) == (typ, func)
@pytest.mark.parametrize('env, expected', [
({'FOO': 'bar'}, ""),
({'FOO': 'bar', 'LC_ALL': 'baz'}, "LC_ALL = baz"),
({'LC_ALL': 'baz', 'PYTHONFOO': 'fish'}, "LC_ALL = baz\nPYTHONFOO = fish"),
(
{'DE': 'KDE', 'DESKTOP_SESSION': 'plasma'},
"DE = KDE\nDESKTOP_SESSION = plasma"
),
(
{'QT5_IM_MODULE': 'fcitx', 'QT_IM_MODULE': 'fcitx'},
"QT_IM_MODULE = fcitx"
),
({'LANGUAGE': 'foo', 'LANG': 'en_US.UTF-8'}, "LANG = en_US.UTF-8"),
({'FOO': 'bar', 'QUTE_BLAH': '1'}, "QUTE_BLAH = 1"),
])
def test_get_environment_vars(monkeypatch, env, expected):
"""Test for crashdialog._get_environment_vars."""
for key in os.environ.copy():
monkeypatch.delenv(key)
for k, v in env.items():
monkeypatch.setenv(k, v)
assert crashdialog._get_environment_vars() == expected
|
import json
import logging
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import providers
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.configs import option_decoders
from perfkitbenchmarker.providers.rackspace import util
FLAGS = flags.FLAGS
DURABILITY = 'durability'
EPHEMERAL = 'ephemeral'
PERSISTENT = 'persistent'
RAID10 = 'RAID10'
BOOT = 'boot'
LOCAL = 'local'
CBS_SATA = 'cbs-sata'
CBS_SSD = 'cbs-ssd'
REMOTE_TYPES = (CBS_SSD, CBS_SATA,)
REMOTE_TYPES_TRANSLATION = {
CBS_SATA: 'SATA',
CBS_SSD: 'SSD'
}
DISK_TYPE = {
disk.STANDARD: BOOT,
disk.REMOTE_SSD: CBS_SSD,
disk.LOCAL: LOCAL
}
DISK_METADATA = {
BOOT: {
disk.REPLICATION: RAID10,
DURABILITY: EPHEMERAL
},
LOCAL: {
disk.REPLICATION: RAID10,
DURABILITY: EPHEMERAL
},
CBS_SSD: {
disk.REPLICATION: disk.REGION,
DURABILITY: PERSISTENT,
disk.MEDIA: disk.SSD
},
CBS_SATA: {
disk.REPLICATION: disk.REGION,
DURABILITY: PERSISTENT,
disk.MEDIA: disk.HDD
}
}
disk.RegisterDiskTypeMap(providers.RACKSPACE, DISK_TYPE)
class RackspaceDiskSpec(disk.BaseDiskSpec):
"""Object containing the information needed to create a
RackspaceDisk.
Attributes:
rackspace_region: None or string. Rackspace region to build VM resources.
rack_profile: None or string. Rack CLI profile configuration.
"""
CLOUD = providers.RACKSPACE
@classmethod
def _ApplyFlags(cls, config_values, flag_values):
super(RackspaceDiskSpec, cls)._ApplyFlags(config_values, flag_values)
if flag_values['rackspace_region'].present:
config_values['rackspace_region'] = flag_values.rackspace_region
if flag_values['rack_profile'].present:
config_values['rack_profile'] = flag_values.rack_profile
@classmethod
def _GetOptionDecoderConstructions(cls):
result = super(RackspaceDiskSpec, cls)._GetOptionDecoderConstructions()
result.update({
'rackspace_region': (option_decoders.StringDecoder, {'default': 'IAD'}),
'rack_profile': (option_decoders.StringDecoder, {'default': None})})
return result
class RackspaceDisk(disk.BaseDisk):
"""Base class for Rackspace disks."""
def __init__(self, disk_spec, name, region, project, image=None):
super(RackspaceDisk, self).__init__(disk_spec)
self.name = name
self.zone = region
self.region = disk_spec.rackspace_region
self.profile = disk_spec.rack_profile
self.project = project
self.image = image
self.attached_vm_id = None
self.metadata.update(DISK_METADATA[disk_spec.disk_type])
def _Create(self):
"""Creates the disk."""
raise NotImplementedError()
def _Delete(self):
"""Deletes the disk."""
raise NotImplementedError()
def Attach(self, vm):
"""Attaches disk, if needed, to the VM."""
self.attached_vm_id = vm.id
def Detach(self):
"""Detaches disk, if needed, from the VM."""
self.attached_vm_id = None
class RackspaceLocalDisk(RackspaceDisk):
"""RackspaceLocalDisk is a disk object to represent an ephemeral storage disk
locally attached to an instance.
"""
def __init__(self, disk_spec, name, region, project, device_path, image=None):
super(RackspaceLocalDisk, self).__init__(disk_spec, name, region, project,
image)
self.exists = False
self.device_path = device_path
self.name = name
def _Create(self):
self.exists = True
def _Delete(self):
self.exists = False
def _Exists(self):
return self.exists
class RackspaceBootDisk(RackspaceLocalDisk):
"""RackspaceBootDisk is a disk object to represent the root (boot) disk of an
instance. Boot disk provides a directory path as a scratch disk space for a
benchmark, but does not allow its backing block device to be formatted, or
its mount point to be changed.
"""
def __init__(self, disk_spec, zone, project, device_path, image):
super(RackspaceBootDisk, self).__init__(disk_spec, 'boot-disk', zone,
project, device_path, image)
self.mount_point = disk_spec.mount_point
class RackspaceRemoteDisk(RackspaceDisk):
"""RackspaceRemoteDisk is a RackspaceDisk object to represent a remotely
attached Cloud Block Storage Volume.
"""
def __init__(self, disk_spec, name, region, project, image=None, media=None):
super(RackspaceRemoteDisk, self).__init__(disk_spec, name, region, project,
image)
self.media = media
self.id = None
def _Create(self):
cmd = util.RackCLICommand(self, 'block-storage', 'volume', 'create')
cmd.flags['size'] = self.disk_size
cmd.flags['name'] = self.name
cmd.flags['volume-type'] = REMOTE_TYPES_TRANSLATION[self.media]
stdout, stderr, _ = cmd.Issue()
resp = json.loads(stdout)
self.id = resp['ID']
def _Delete(self):
if self.id is None:
return
cmd = util.RackCLICommand(self, 'block-storage', 'volume', 'delete')
cmd.flags['id'] = self.id
cmd.Issue()
self._WaitForRemoteDiskDeletion()
def _Exists(self):
if self.id is None:
return False
cmd = util.RackCLICommand(self, 'block-storage', 'volume', 'get')
cmd.flags['id'] = self.id
stdout, stderr, _ = cmd.Issue(suppress_warning=True)
if stdout and stdout.strip():
return stdout
return not stderr
def Attach(self, vm):
self._AttachRemoteDisk(vm)
self._WaitForRemoteDiskAttached(vm)
self.attached_vm_id = vm.id
def Detach(self):
self._DetachRemoteDisk()
self.attached_vm_id = None
def _AttachRemoteDisk(self, vm):
if self.id is None:
raise errors.Error('Cannot attach remote disk %s' % self.name)
if vm.id is None:
raise errors.VirtualMachine.VmStateError(
'Cannot attach remote disk %s to non-existing %s VM' % (self.name,
vm.name))
cmd = util.RackCLICommand(self, 'servers', 'volume-attachment', 'create')
cmd.flags['volume-id'] = self.id
cmd.flags['server-id'] = vm.id
stdout, stderr, _ = cmd.Issue()
if stderr:
raise errors.Error(
'Failed to attach remote disk %s to %s' % (self.name, vm.name))
resp = json.loads(stdout)
self.device_path = resp['Device']
@vm_util.Retry(poll_interval=1, max_retries=-1, timeout=300, log_errors=False,
retryable_exceptions=(errors.Resource.RetryableCreationError,))
def _WaitForRemoteDiskAttached(self, vm):
cmd = util.RackCLICommand(self, 'block-storage', 'volume', 'get')
cmd.flags['id'] = self.id
stdout, stderr, _ = cmd.Issue()
if stdout:
resp = json.loads(stdout)
attachments = resp['Attachments']
if attachments:
logging.info('Disk: %s has been attached to %s.' % (self.name, vm.id))
return
raise errors.Resource.RetryableCreationError(
'Disk: %s is not yet attached. Retrying to check status.' % self.name)
def _DetachRemoteDisk(self):
if self.id is None:
raise errors.Error('Cannot detach remote disk %s' % self.name)
if self.attached_vm_id is None:
raise errors.VirtualMachine.VmStateError(
'Cannot detach remote disk %s from a non-existing VM' % self.name)
cmd = util.RackCLICommand(self, 'servers', 'volume-attachment', 'delete')
cmd.flags['id'] = self.id
cmd.flags['server-id'] = self.attached_vm_id
stdout, stderr, _ = cmd.Issue()
if stdout:
resp = json.loads(stdout)
if 'Successfully deleted' in resp['result']:
return
raise errors.Resource.RetryableDeletionError(stderr)
@vm_util.Retry(poll_interval=1, max_retries=-1, timeout=300, log_errors=False,
retryable_exceptions=(errors.Resource.RetryableDeletionError,))
def _WaitForRemoteDiskDeletion(self):
cmd = util.RackCLICommand(self, 'block-storage', 'volume', 'get')
cmd.flags['id'] = self.id
stdout, stderr, _ = cmd.Issue()
if stderr:
logging.info('Disk: %s has been successfully deleted.' % self.name)
return
raise errors.Resource.RetryableDeletionError(
'Disk: %s has not been deleted. Retrying to check status.' % self.name)
|
import warnings
from importlib import import_module
from django.core.exceptions import ImproperlyConfigured
from zinnia.settings import SPAM_CHECKER_BACKENDS
def get_spam_checker(backend_path):
"""
Return the selected spam checker backend.
"""
try:
backend_module = import_module(backend_path)
backend = getattr(backend_module, 'backend')
except (ImportError, AttributeError):
warnings.warn('%s backend cannot be imported' % backend_path,
RuntimeWarning)
backend = None
except ImproperlyConfigured as e:
warnings.warn(str(e), RuntimeWarning)
backend = None
return backend
def check_is_spam(content, content_object, request,
backends=None):
"""
Return True if the content is a spam, else False.
"""
if backends is None:
backends = SPAM_CHECKER_BACKENDS
for backend_path in backends:
spam_checker = get_spam_checker(backend_path)
if spam_checker is not None:
is_spam = spam_checker(content, content_object, request)
if is_spam:
return True
return False
|
from datetime import timedelta
import logging
import requests
from homeassistant.components.light import LightEntity
from homeassistant.core import callback
import homeassistant.util.dt as dt_util
from . import DOMAIN
from .entity import RingEntityMixin
_LOGGER = logging.getLogger(__name__)
# It takes a few seconds for the API to correctly return an update indicating
# that the changes have been made. Once we request a change (i.e. a light
# being turned on) we simply wait for this time delta before we allow
# updates to take place.
SKIP_UPDATES_DELAY = timedelta(seconds=5)
ON_STATE = "on"
OFF_STATE = "off"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Create the lights for the Ring devices."""
devices = hass.data[DOMAIN][config_entry.entry_id]["devices"]
lights = []
for device in devices["stickup_cams"]:
if device.has_capability("light"):
lights.append(RingLight(config_entry.entry_id, device))
async_add_entities(lights)
class RingLight(RingEntityMixin, LightEntity):
"""Creates a switch to turn the ring cameras light on and off."""
def __init__(self, config_entry_id, device):
"""Initialize the light."""
super().__init__(config_entry_id, device)
self._unique_id = device.id
self._light_on = device.lights == ON_STATE
self._no_updates_until = dt_util.utcnow()
@callback
def _update_callback(self):
"""Call update method."""
if self._no_updates_until > dt_util.utcnow():
return
self._light_on = self._device.lights == ON_STATE
self.async_write_ha_state()
@property
def name(self):
"""Name of the light."""
return f"{self._device.name} light"
@property
def unique_id(self):
"""Return a unique ID."""
return self._unique_id
@property
def is_on(self):
"""If the switch is currently on or off."""
return self._light_on
def _set_light(self, new_state):
"""Update light state, and causes Home Assistant to correctly update."""
try:
self._device.lights = new_state
except requests.Timeout:
_LOGGER.error("Time out setting %s light to %s", self.entity_id, new_state)
return
self._light_on = new_state == ON_STATE
self._no_updates_until = dt_util.utcnow() + SKIP_UPDATES_DELAY
self.async_write_ha_state()
def turn_on(self, **kwargs):
"""Turn the light on for 30 seconds."""
self._set_light(ON_STATE)
def turn_off(self, **kwargs):
"""Turn the light off."""
self._set_light(OFF_STATE)
|
from homeassistant.components.gree.const import DOMAIN as GREE_DOMAIN
from homeassistant.config_entries import ENTRY_STATE_LOADED, ENTRY_STATE_NOT_LOADED
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_setup_simple(hass):
"""Test gree integration is setup."""
await async_setup_component(hass, GREE_DOMAIN, {})
await hass.async_block_till_done()
# No flows started
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_unload_config_entry(hass):
"""Test that the async_unload_entry works."""
# As we have currently no configuration, we just to pass the domain here.
entry = MockConfigEntry(domain=GREE_DOMAIN)
entry.add_to_hass(hass)
with patch(
"homeassistant.components.gree.climate.async_setup_entry",
return_value=True,
) as climate_setup:
assert await async_setup_component(hass, GREE_DOMAIN, {})
await hass.async_block_till_done()
assert len(climate_setup.mock_calls) == 1
assert entry.state == ENTRY_STATE_LOADED
await hass.config_entries.async_unload(entry.entry_id)
assert entry.state == ENTRY_STATE_NOT_LOADED
|
from math import pi, sqrt
from pygal.graph.graph import Graph
from pygal.util import alter, decorate
class SolidGauge(Graph):
def gaugify(self, serie, squares, sq_dimensions, current_square):
serie_node = self.svg.serie(serie)
if self.half_pie:
start_angle = 3 * pi / 2
center = ((current_square[1] * sq_dimensions[0]) -
(sq_dimensions[0] / 2.),
(current_square[0] * sq_dimensions[1]) -
(sq_dimensions[1] / 4))
end_angle = pi / 2
else:
start_angle = 0
center = ((current_square[1] * sq_dimensions[0]) -
(sq_dimensions[0] / 2.),
(current_square[0] * sq_dimensions[1]) -
(sq_dimensions[1] / 2.))
end_angle = 2 * pi
max_value = serie.metadata.get(0, {}).get('max_value', 100)
radius = min([sq_dimensions[0] / 2, sq_dimensions[1] / 2]) * .9
small_radius = radius * serie.inner_radius
self.svg.gauge_background(
serie_node, start_angle, center, radius, small_radius, end_angle,
self.half_pie, self._serie_format(serie, max_value)
)
sum_ = 0
for i, value in enumerate(serie.values):
if value is None:
continue
ratio = min(value, max_value) / max_value
if self.half_pie:
angle = 2 * pi * ratio / 2
else:
angle = 2 * pi * ratio
val = self._format(serie, i)
metadata = serie.metadata.get(i)
gauge_ = decorate(
self.svg, self.svg.node(serie_node['plot'], class_="gauge"),
metadata
)
alter(
self.svg.solid_gauge(
serie_node, gauge_, radius, small_radius, angle,
start_angle, center, val, i, metadata, self.half_pie,
end_angle, self._serie_format(serie, max_value)
), metadata
)
start_angle += angle
sum_ += value
x, y = center
self.svg.node(
serie_node['text_overlay'],
'text',
class_='value gauge-sum',
x=x,
y=y + self.style.value_font_size / 3,
attrib={
'text-anchor': 'middle'
}
).text = self._serie_format(serie, sum_)
def _compute_x_labels(self):
pass
def _compute_y_labels(self):
pass
def _plot(self):
"""Draw all the serie slices"""
squares = self._squares()
sq_dimensions = self.add_squares(squares)
for index, serie in enumerate(self.series):
current_square = self._current_square(squares, index)
self.gaugify(serie, squares, sq_dimensions, current_square)
def _squares(self):
n_series_ = len(self.series)
i = 2
if sqrt(n_series_).is_integer():
_x = int(sqrt(n_series_))
_y = int(sqrt(n_series_))
else:
while i * i < n_series_:
while n_series_ % i == 0:
n_series_ = n_series_ / i
i = i + 1
_y = int(n_series_)
_x = int(len(self.series) / _y)
if len(self.series) == 5:
_x, _y = 2, 3
if abs(_x - _y) > 2:
_sq = 3
while (_x * _y) - 1 < len(self.series):
_x, _y = _sq, _sq
_sq += 1
return (_x, _y)
def _current_square(self, squares, index):
current_square = [1, 1]
steps = index + 1
steps_taken = 0
for i in range(squares[0] * squares[1]):
steps_taken += 1
if steps_taken != steps and steps_taken % squares[0] != 0:
current_square[1] += 1
elif steps_taken != steps and steps_taken % squares[0] == 0:
current_square[1] = 1
current_square[0] += 1
else:
return tuple(current_square)
raise Exception(
'Something went wrong with the current square assignment.'
)
|
import importlib
import logging
import types
from typing import Any, Dict, Optional
import voluptuous as vol
from voluptuous.humanize import humanize_error
from homeassistant import data_entry_flow, requirements
from homeassistant.const import CONF_ID, CONF_NAME, CONF_TYPE
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import HomeAssistantError
from homeassistant.util.decorator import Registry
MULTI_FACTOR_AUTH_MODULES = Registry()
MULTI_FACTOR_AUTH_MODULE_SCHEMA = vol.Schema(
{
vol.Required(CONF_TYPE): str,
vol.Optional(CONF_NAME): str,
# Specify ID if you have two mfa auth module for same type.
vol.Optional(CONF_ID): str,
},
extra=vol.ALLOW_EXTRA,
)
DATA_REQS = "mfa_auth_module_reqs_processed"
_LOGGER = logging.getLogger(__name__)
class MultiFactorAuthModule:
"""Multi-factor Auth Module of validation function."""
DEFAULT_TITLE = "Unnamed auth module"
MAX_RETRY_TIME = 3
def __init__(self, hass: HomeAssistant, config: Dict[str, Any]) -> None:
"""Initialize an auth module."""
self.hass = hass
self.config = config
@property
def id(self) -> str:
"""Return id of the auth module.
Default is same as type
"""
return self.config.get(CONF_ID, self.type)
@property
def type(self) -> str:
"""Return type of the module."""
return self.config[CONF_TYPE] # type: ignore
@property
def name(self) -> str:
"""Return the name of the auth module."""
return self.config.get(CONF_NAME, self.DEFAULT_TITLE)
# Implement by extending class
@property
def input_schema(self) -> vol.Schema:
"""Return a voluptuous schema to define mfa auth module's input."""
raise NotImplementedError
async def async_setup_flow(self, user_id: str) -> "SetupFlow":
"""Return a data entry flow handler for setup module.
Mfa module should extend SetupFlow
"""
raise NotImplementedError
async def async_setup_user(self, user_id: str, setup_data: Any) -> Any:
"""Set up user for mfa auth module."""
raise NotImplementedError
async def async_depose_user(self, user_id: str) -> None:
"""Remove user from mfa module."""
raise NotImplementedError
async def async_is_user_setup(self, user_id: str) -> bool:
"""Return whether user is setup."""
raise NotImplementedError
async def async_validate(self, user_id: str, user_input: Dict[str, Any]) -> bool:
"""Return True if validation passed."""
raise NotImplementedError
class SetupFlow(data_entry_flow.FlowHandler):
"""Handler for the setup flow."""
def __init__(
self, auth_module: MultiFactorAuthModule, setup_schema: vol.Schema, user_id: str
) -> None:
"""Initialize the setup flow."""
self._auth_module = auth_module
self._setup_schema = setup_schema
self._user_id = user_id
async def async_step_init(
self, user_input: Optional[Dict[str, str]] = None
) -> Dict[str, Any]:
"""Handle the first step of setup flow.
Return self.async_show_form(step_id='init') if user_input is None.
Return self.async_create_entry(data={'result': result}) if finish.
"""
errors: Dict[str, str] = {}
if user_input:
result = await self._auth_module.async_setup_user(self._user_id, user_input)
return self.async_create_entry(
title=self._auth_module.name, data={"result": result}
)
return self.async_show_form(
step_id="init", data_schema=self._setup_schema, errors=errors
)
async def auth_mfa_module_from_config(
hass: HomeAssistant, config: Dict[str, Any]
) -> MultiFactorAuthModule:
"""Initialize an auth module from a config."""
module_name = config[CONF_TYPE]
module = await _load_mfa_module(hass, module_name)
try:
config = module.CONFIG_SCHEMA(config) # type: ignore
except vol.Invalid as err:
_LOGGER.error(
"Invalid configuration for multi-factor module %s: %s",
module_name,
humanize_error(config, err),
)
raise
return MULTI_FACTOR_AUTH_MODULES[module_name](hass, config) # type: ignore
async def _load_mfa_module(hass: HomeAssistant, module_name: str) -> types.ModuleType:
"""Load an mfa auth module."""
module_path = f"homeassistant.auth.mfa_modules.{module_name}"
try:
module = importlib.import_module(module_path)
except ImportError as err:
_LOGGER.error("Unable to load mfa module %s: %s", module_name, err)
raise HomeAssistantError(
f"Unable to load mfa module {module_name}: {err}"
) from err
if hass.config.skip_pip or not hasattr(module, "REQUIREMENTS"):
return module
processed = hass.data.get(DATA_REQS)
if processed and module_name in processed:
return module
processed = hass.data[DATA_REQS] = set()
# https://github.com/python/mypy/issues/1424
await requirements.async_process_requirements(
hass, module_path, module.REQUIREMENTS # type: ignore
)
processed.add(module_name)
return module
|
import argparse
import uuid
from IPython.display import display, HTML, Javascript
def _tensorboard_magic(line):
"""Line magic function.
Makes an AJAX call to the Jupyter TensorBoard server extension and outputs
an IFrame displaying the TensorBoard instance.
"""
parser = argparse.ArgumentParser()
parser.add_argument('--logdir', default='/kaggle/working')
args = parser.parse_args(line.split())
iframe_id = 'tensorboard-' + str(uuid.uuid4())
html = """
<!-- JUPYTER_TENSORBOARD_TEST_MARKER -->
<script>
const req = {
method: 'POST',
contentType: 'application/json',
body: JSON.stringify({ 'logdir': '%s' }),
headers: { 'Content-Type': 'application/json' }
};
const baseUrl = Jupyter.notebook.base_url;
fetch(baseUrl + 'api/tensorboard', req)
.then(res => res.json())
.then(res => {
const iframe = document.getElementById('%s');
iframe.src = baseUrl + 'tensorboard/' + res.name;
iframe.style.display = 'block';
});
</script>
<iframe
id="%s"
style="width: 100%%; height: 620px; display: none;"
frameBorder="0">
</iframe>
""" % (args.logdir, iframe_id, iframe_id)
display(HTML(html))
def load_ipython_extension(ipython):
"""IPython extension entry point."""
ipython.register_magic_function(
_tensorboard_magic,
magic_kind='line',
magic_name='tensorboard',
)
|
import logging
import jinja2
import six
import traceback
from kalliope.core.ConfigurationManager.SettingLoader import SettingLoader
from kalliope.core.Cortex import Cortex
from kalliope.core.Utils.Utils import Utils
logging.basicConfig()
logger = logging.getLogger("kalliope")
class NeuronParameterNotAvailable(Exception):
pass
class NeuronLauncher:
"""
Static Class to launch Neurons
"""
def __init__(self):
pass
@classmethod
def launch_neuron(cls, neuron):
"""
Start a neuron plugin
:param neuron: neuron object
:type neuron: Neuron
:return:
"""
logger.debug("Run neuron: \"%s\"" % (neuron.__str__()))
settings = cls.load_settings()
neuron_folder = None
if settings.resources:
neuron_folder = settings.resources.neuron_folder
return Utils.get_dynamic_class_instantiation(package_name="neurons",
module_name=neuron.name,
parameters=neuron.parameters,
resources_dir=neuron_folder)
@classmethod
def start_neuron(cls, neuron, parameters_dict=dict()):
"""
Execute each neuron from the received neuron_list.
Replace parameter if exist in the received dict of parameters_dict
:param neuron: Neuron object to run
:param parameters_dict: dict of parameter to load in each neuron if expecting a parameter
:return: List of the instantiated neurons (no errors detected)
"""
if neuron.parameters is not None:
try:
neuron.parameters = cls._replace_brackets_by_loaded_parameter(neuron.parameters, parameters_dict)
except NeuronParameterNotAvailable:
Utils.print_danger("Missing parameter in neuron %s. Execution skipped" % neuron.name)
return None
try:
instantiated_neuron = NeuronLauncher.launch_neuron(neuron)
except Exception as e:
Utils.print_danger("ERROR: Fail to execute neuron '%s'. "
'%s' ". -> Execution skipped, run with debug flag for more information" % (neuron.name, e.message))
logger.debug(traceback.format_exc())
return None
return instantiated_neuron
@classmethod
def _replace_brackets_by_loaded_parameter(cls, neuron_parameters, loaded_parameters=dict()):
"""
Receive a value (which can be a str or dict or list) and instantiate value in double brace bracket
by the value specified in the loaded_parameters dict.
This method will call itself until all values has been instantiated
:param neuron_parameters: value to instantiate. Str or dict or list
:param loaded_parameters: dict of parameters
"""
logger.debug("[NeuronLauncher] replacing brackets from %s, using %s" % (neuron_parameters, loaded_parameters))
if isinstance(neuron_parameters, str) or isinstance(neuron_parameters, six.text_type):
# replace bracket parameter only if the str contains brackets
if Utils.is_containing_bracket(neuron_parameters):
settings = cls.load_settings()
# Priority to memory over the variables
loaded_parameters.update(settings.variables)
memory_dict = dict()
# add variables from the short term memory to the list of loaded parameters that can be used in a template
# the final dict is added into a key "kalliope_memory" to not override existing keys loaded form the order
memory_dict["kalliope_memory"] = Cortex.get_memory()
loaded_parameters.update(memory_dict)
# check that the parameter to replace is available in the loaded_parameters dict
if cls._neuron_parameters_are_available_in_loaded_parameters(neuron_parameters, loaded_parameters):
# add parameters from global variable into the final loaded parameter dict
neuron_parameters = jinja2.Template(neuron_parameters).render(loaded_parameters)
neuron_parameters = Utils.encode_text_utf8(neuron_parameters)
return str(neuron_parameters)
else:
raise NeuronParameterNotAvailable
return neuron_parameters
if isinstance(neuron_parameters, dict):
returned_dict = dict()
for key, value in neuron_parameters.items():
# following keys are reserved by kalliope core
if key in "say_template" or key in "file_template" or key in "kalliope_memory" \
or key in "from_answer_link":
returned_dict[key] = value
else:
returned_dict[key] = cls._replace_brackets_by_loaded_parameter(value, loaded_parameters)
return returned_dict
if isinstance(neuron_parameters, list):
returned_list = list()
for el in neuron_parameters:
templated_value = cls._replace_brackets_by_loaded_parameter(el, loaded_parameters)
returned_list.append(templated_value)
return returned_list
# in all other case (boolean or int for example) we return the value as it
return neuron_parameters
@staticmethod
def _neuron_parameters_are_available_in_loaded_parameters(string_parameters, loaded_parameters):
"""
Check that all parameters in brackets are available in the loaded_parameters dict
E.g:
string_parameters = "this is a {{ parameter1 }}"
Will return true if the loaded_parameters looks like the following
loaded_parameters { "parameter1": "a value"}
:param string_parameters: The string that contains one or more parameters in brace brackets
:param loaded_parameters: Dict of parameter
:return: True if all parameters in brackets have an existing key in loaded_parameters dict
"""
list_parameters_with_brackets = Utils.find_all_matching_brackets(string_parameters)
# remove brackets to keep only the parameter name
for parameter_with_brackets in list_parameters_with_brackets:
parameter = Utils.remove_spaces_in_brackets(parameter_with_brackets)
parameter = parameter.replace("{{", "").replace("}}", "")
if loaded_parameters is None or parameter not in loaded_parameters:
Utils.print_danger("The parameter %s is not available in the order" % str(parameter))
return False
return True
@staticmethod
def load_settings():
"""
Return loaded kalliope settings
:return: setting object
"""
sl = SettingLoader()
return sl.settings
|
import typing
from pathlib import Path
import keras
import pandas as pd
import matchzoo
_url = "https://firebasestorage.googleapis.com/v0/b/mtl-sentence" \
"-representations.appspot.com/o/data%2FQQP.zip?alt=media&" \
"token=700c6acf-160d-4d89-81d1-de4191d02cb5"
def load_data(
stage: str = 'train',
task: str = 'classification',
return_classes: bool = False,
) -> typing.Union[matchzoo.DataPack, tuple]:
"""
Load QuoraQP data.
:param path: `None` for download from quora, specific path for
downloaded data.
:param stage: One of `train`, `dev`, and `test`.
:param task: Could be one of `ranking`, `classification` or a
:class:`matchzoo.engine.BaseTask` instance.
:param return_classes: Whether return classes for classification task.
:return: A DataPack if `ranking`, a tuple of (DataPack, classes) if
`classification`.
"""
if stage not in ('train', 'dev', 'test'):
raise ValueError(f"{stage} is not a valid stage."
f"Must be one of `train`, `dev`, and `test`.")
data_root = _download_data()
file_path = data_root.joinpath(f"{stage}.tsv")
data_pack = _read_data(file_path, stage)
if task == 'ranking':
task = matchzoo.tasks.Ranking()
elif task == 'classification':
task = matchzoo.tasks.Classification()
if isinstance(task, matchzoo.tasks.Ranking):
return data_pack
elif isinstance(task, matchzoo.tasks.Classification):
if stage != 'test':
data_pack.one_hot_encode_label(num_classes=2, inplace=True)
if return_classes:
return data_pack, [False, True]
else:
return data_pack
else:
raise ValueError(f"{task} is not a valid task.")
def _download_data():
ref_path = keras.utils.data_utils.get_file(
'quora_qp', _url, extract=True,
cache_dir=matchzoo.USER_DATA_DIR,
cache_subdir='quora_qp'
)
return Path(ref_path).parent.joinpath('QQP')
def _read_data(path, stage):
data = pd.read_csv(path, sep='\t', error_bad_lines=False)
data = data.dropna(axis=0, how='any').reset_index(drop=True)
if stage in ['train', 'dev']:
df = pd.DataFrame({
'id_left': data['qid1'],
'id_right': data['qid2'],
'text_left': data['question1'],
'text_right': data['question2'],
'label': data['is_duplicate'].astype(int)
})
else:
df = pd.DataFrame({
'text_left': data['question1'],
'text_right': data['question2']
})
return matchzoo.pack(df)
|
import numpy as np
import pandas as pd
from scipy.stats import norm
class CohensDCalculator(object):
def get_cohens_d_df(self, cat_X, ncat_X, orig_cat_X, orig_ncat_X, correction_method=None):
empty_cat_X_smoothing_doc = np.zeros((1, cat_X.shape[1]))
empty_ncat_X_smoothing_doc = np.zeros((1, ncat_X.shape[1]))
smoothed_cat_X = np.vstack([empty_cat_X_smoothing_doc, cat_X])
smoothed_ncat_X = np.vstack([empty_ncat_X_smoothing_doc, ncat_X])
n1, n2 = float(smoothed_cat_X.shape[0]), float(smoothed_ncat_X.shape[0])
n = n1 + n2
#print(cat_X.shape, type(cat_X))
m1 = cat_X.mean(axis=0).A1 if type(cat_X) == np.matrix else cat_X.mean(axis=0)
m2 = ncat_X.mean(axis=0).A1 if type(ncat_X) == np.matrix else ncat_X.mean(axis=0)
v1 = smoothed_cat_X.var(axis=0).A1 if type(smoothed_cat_X) == np.matrix else smoothed_cat_X.mean(axis=0)
v2 = smoothed_ncat_X.var(axis=0).A1 if type(smoothed_ncat_X) == np.matrix else smoothed_ncat_X.mean(axis=0)
s_pooled = np.sqrt(((n2 - 1) * v2 + (n1 - 1) * v1) / (n - 2.))
cohens_d = (m1 - m2) / s_pooled
cohens_d_se = np.sqrt(((n - 1.) / (n - 3)) * (4. / n) * (1 + np.square(cohens_d) / 8.))
cohens_d_z = cohens_d / cohens_d_se
cohens_d_p = norm.sf(cohens_d_z)
hedges_r = cohens_d * (1 - 3. / ((4. * (n - 2)) - 1))
hedges_r_se = np.sqrt(n / (n1 * n2) + np.square(hedges_r) / (n - 2.))
hedges_r_z = hedges_r / hedges_r_se
hedges_r_p = norm.sf(hedges_r_z)
score_df = pd.DataFrame({
'cohens_d': cohens_d,
'cohens_d_se': cohens_d_se,
'cohens_d_z': cohens_d_z,
'cohens_d_p': cohens_d_p,
'hedges_r': hedges_r,
'hedges_r_se': hedges_r_se,
'hedges_r_z': hedges_r_z,
'hedges_r_p': hedges_r_p,
'm1': m1,
'm2': m2,
'count1': orig_cat_X.sum(axis=0).A1,
'count2': orig_ncat_X.sum(axis=0).A1,
'docs1': (orig_cat_X > 0).sum(axis=0).A1,
'docs2': (orig_ncat_X > 0).sum(axis=0).A1,
}).fillna(0)
if correction_method is not None:
from statsmodels.stats.multitest import multipletests
score_df['hedges_r_p_corr'] = 0.5
for method in ['cohens_d', 'hedges_r']:
score_df[method + '_p_corr'] = 0.5
pvals = score_df.loc[(score_df['m1'] != 0) | (score_df['m2'] != 0), method + '_p']
pvals = np.min(np.array([pvals, 1. - pvals])) * 2.
score_df.loc[(score_df['m1'] != 0) | (score_df['m2'] != 0), method + '_p_corr'] = (
multipletests(pvals, method=correction_method)[1]
)
return score_df
|
from datetime import timedelta
import logging
import metno
import voluptuous as vol
from homeassistant.components.air_quality import PLATFORM_SCHEMA, AirQualityEntity
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE, CONF_NAME
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = (
"Air quality from "
"https://luftkvalitet.miljostatus.no/, "
"delivered by the Norwegian Meteorological Institute."
)
# https://api.met.no/license_data.html
CONF_FORECAST = "forecast"
DEFAULT_FORECAST = 0
DEFAULT_NAME = "Air quality Norway"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_FORECAST, default=DEFAULT_FORECAST): vol.Coerce(int),
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
SCAN_INTERVAL = timedelta(minutes=5)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the air_quality norway sensor."""
forecast = config.get(CONF_FORECAST)
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
name = config.get(CONF_NAME)
if None in (latitude, longitude):
_LOGGER.error("Latitude or longitude not set in Home Assistant config")
return
coordinates = {"lat": str(latitude), "lon": str(longitude)}
async_add_entities(
[AirSensor(name, coordinates, forecast, async_get_clientsession(hass))], True
)
def round_state(func):
"""Round state."""
def _decorator(self):
res = func(self)
if isinstance(res, float):
return round(res, 2)
return res
return _decorator
class AirSensor(AirQualityEntity):
"""Representation of an Yr.no sensor."""
def __init__(self, name, coordinates, forecast, session):
"""Initialize the sensor."""
self._name = name
self._api = metno.AirQualityData(coordinates, forecast, session)
@property
def attribution(self) -> str:
"""Return the attribution."""
return ATTRIBUTION
@property
def device_state_attributes(self) -> dict:
"""Return other details about the sensor state."""
return {
"level": self._api.data.get("level"),
"location": self._api.data.get("location"),
}
@property
def name(self) -> str:
"""Return the name of the sensor."""
return self._name
@property
@round_state
def air_quality_index(self):
"""Return the Air Quality Index (AQI)."""
return self._api.data.get("aqi")
@property
@round_state
def nitrogen_dioxide(self):
"""Return the NO2 (nitrogen dioxide) level."""
return self._api.data.get("no2_concentration")
@property
@round_state
def ozone(self):
"""Return the O3 (ozone) level."""
return self._api.data.get("o3_concentration")
@property
@round_state
def particulate_matter_2_5(self):
"""Return the particulate matter 2.5 level."""
return self._api.data.get("pm25_concentration")
@property
@round_state
def particulate_matter_10(self):
"""Return the particulate matter 10 level."""
return self._api.data.get("pm10_concentration")
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._api.units.get("pm25_concentration")
async def async_update(self) -> None:
"""Update the sensor."""
await self._api.update()
|
import asyncio
import json
import logging
from aiohttp import client_exceptions
from pyControl4.account import C4Account
from pyControl4.director import C4Director
from pyControl4.error_handling import BadCredentials
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_SCAN_INTERVAL,
CONF_TOKEN,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers import aiohttp_client, device_registry as dr
from homeassistant.helpers.update_coordinator import (
CoordinatorEntity,
DataUpdateCoordinator,
)
from .const import (
CONF_ACCOUNT,
CONF_CONFIG_LISTENER,
CONF_CONTROLLER_UNIQUE_ID,
CONF_DIRECTOR,
CONF_DIRECTOR_ALL_ITEMS,
CONF_DIRECTOR_MODEL,
CONF_DIRECTOR_SW_VERSION,
CONF_DIRECTOR_TOKEN_EXPIRATION,
DEFAULT_SCAN_INTERVAL,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
PLATFORMS = ["light"]
async def async_setup(hass: HomeAssistant, entry: ConfigEntry) -> bool:
"""Stub to allow setting up this component.
Configuration through YAML is not supported at this time.
"""
hass.data.setdefault(DOMAIN, {})
return True
async def async_setup_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Set up Control4 from a config entry."""
entry_data = hass.data[DOMAIN].setdefault(entry.entry_id, {})
account_session = aiohttp_client.async_get_clientsession(hass)
config = entry.data
account = C4Account(config[CONF_USERNAME], config[CONF_PASSWORD], account_session)
try:
await account.getAccountBearerToken()
except client_exceptions.ClientError as exception:
_LOGGER.error("Error connecting to Control4 account API: %s", exception)
raise ConfigEntryNotReady from exception
except BadCredentials as exception:
_LOGGER.error(
"Error authenticating with Control4 account API, incorrect username or password: %s",
exception,
)
return False
entry_data[CONF_ACCOUNT] = account
controller_unique_id = config[CONF_CONTROLLER_UNIQUE_ID]
entry_data[CONF_CONTROLLER_UNIQUE_ID] = controller_unique_id
director_token_dict = await account.getDirectorBearerToken(controller_unique_id)
director_session = aiohttp_client.async_get_clientsession(hass, verify_ssl=False)
director = C4Director(
config[CONF_HOST], director_token_dict[CONF_TOKEN], director_session
)
entry_data[CONF_DIRECTOR] = director
entry_data[CONF_DIRECTOR_TOKEN_EXPIRATION] = director_token_dict["token_expiration"]
# Add Control4 controller to device registry
controller_href = (await account.getAccountControllers())["href"]
entry_data[CONF_DIRECTOR_SW_VERSION] = await account.getControllerOSVersion(
controller_href
)
_, model, mac_address = controller_unique_id.split("_", 3)
entry_data[CONF_DIRECTOR_MODEL] = model.upper()
device_registry = await dr.async_get_registry(hass)
device_registry.async_get_or_create(
config_entry_id=entry.entry_id,
identifiers={(DOMAIN, controller_unique_id)},
connections={(dr.CONNECTION_NETWORK_MAC, mac_address)},
manufacturer="Control4",
name=controller_unique_id,
model=entry_data[CONF_DIRECTOR_MODEL],
sw_version=entry_data[CONF_DIRECTOR_SW_VERSION],
)
# Store all items found on controller for platforms to use
director_all_items = await director.getAllItemInfo()
director_all_items = json.loads(director_all_items)
entry_data[CONF_DIRECTOR_ALL_ITEMS] = director_all_items
# Load options from config entry
entry_data[CONF_SCAN_INTERVAL] = entry.options.get(
CONF_SCAN_INTERVAL, DEFAULT_SCAN_INTERVAL
)
entry_data[CONF_CONFIG_LISTENER] = entry.add_update_listener(update_listener)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(entry, component)
)
return True
async def update_listener(hass, config_entry):
"""Update when config_entry options update."""
_LOGGER.debug("Config entry was updated, rerunning setup")
await hass.config_entries.async_reload(config_entry.entry_id)
async def async_unload_entry(hass: HomeAssistant, entry: ConfigEntry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(entry, component)
for component in PLATFORMS
]
)
)
hass.data[DOMAIN][entry.entry_id][CONF_CONFIG_LISTENER]()
if unload_ok:
hass.data[DOMAIN].pop(entry.entry_id)
_LOGGER.debug("Unloaded entry for %s", entry.entry_id)
return unload_ok
async def get_items_of_category(hass: HomeAssistant, entry: ConfigEntry, category: str):
"""Return a list of all Control4 items with the specified category."""
director_all_items = hass.data[DOMAIN][entry.entry_id][CONF_DIRECTOR_ALL_ITEMS]
return_list = []
for item in director_all_items:
if "categories" in item and category in item["categories"]:
return_list.append(item)
return return_list
class Control4Entity(CoordinatorEntity):
"""Base entity for Control4."""
def __init__(
self,
entry_data: dict,
entry: ConfigEntry,
coordinator: DataUpdateCoordinator,
name: str,
idx: int,
device_name: str,
device_manufacturer: str,
device_model: str,
device_id: int,
):
"""Initialize a Control4 entity."""
super().__init__(coordinator)
self.entry = entry
self.entry_data = entry_data
self._name = name
self._idx = idx
self._controller_unique_id = entry_data[CONF_CONTROLLER_UNIQUE_ID]
self._device_name = device_name
self._device_manufacturer = device_manufacturer
self._device_model = device_model
self._device_id = device_id
@property
def name(self):
"""Return name of entity."""
return self._name
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._idx
@property
def device_info(self):
"""Return info of parent Control4 device of entity."""
return {
"config_entry_id": self.entry.entry_id,
"identifiers": {(DOMAIN, self._device_id)},
"name": self._device_name,
"manufacturer": self._device_manufacturer,
"model": self._device_model,
"via_device": (DOMAIN, self._controller_unique_id),
}
|
import logging
import random
from flux_led import BulbScanner, WifiLedBulb
import voluptuous as vol
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_HS_COLOR,
ATTR_WHITE_VALUE,
EFFECT_COLORLOOP,
EFFECT_RANDOM,
PLATFORM_SCHEMA,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
SUPPORT_COLOR_TEMP,
SUPPORT_EFFECT,
SUPPORT_WHITE_VALUE,
LightEntity,
)
from homeassistant.const import ATTR_MODE, CONF_DEVICES, CONF_NAME, CONF_PROTOCOL
import homeassistant.helpers.config_validation as cv
import homeassistant.util.color as color_util
_LOGGER = logging.getLogger(__name__)
CONF_AUTOMATIC_ADD = "automatic_add"
CONF_CUSTOM_EFFECT = "custom_effect"
CONF_COLORS = "colors"
CONF_SPEED_PCT = "speed_pct"
CONF_TRANSITION = "transition"
DOMAIN = "flux_led"
SUPPORT_FLUX_LED = SUPPORT_BRIGHTNESS | SUPPORT_EFFECT | SUPPORT_COLOR
MODE_RGB = "rgb"
MODE_RGBW = "rgbw"
# This mode enables white value to be controlled by brightness.
# RGB value is ignored when this mode is specified.
MODE_WHITE = "w"
# Constant color temp values for 2 flux_led special modes
# Warm-white and Cool-white modes
COLOR_TEMP_WARM_VS_COLD_WHITE_CUT_OFF = 285
# List of supported effects which aren't already declared in LIGHT
EFFECT_RED_FADE = "red_fade"
EFFECT_GREEN_FADE = "green_fade"
EFFECT_BLUE_FADE = "blue_fade"
EFFECT_YELLOW_FADE = "yellow_fade"
EFFECT_CYAN_FADE = "cyan_fade"
EFFECT_PURPLE_FADE = "purple_fade"
EFFECT_WHITE_FADE = "white_fade"
EFFECT_RED_GREEN_CROSS_FADE = "rg_cross_fade"
EFFECT_RED_BLUE_CROSS_FADE = "rb_cross_fade"
EFFECT_GREEN_BLUE_CROSS_FADE = "gb_cross_fade"
EFFECT_COLORSTROBE = "colorstrobe"
EFFECT_RED_STROBE = "red_strobe"
EFFECT_GREEN_STROBE = "green_strobe"
EFFECT_BLUE_STROBE = "blue_strobe"
EFFECT_YELLOW_STROBE = "yellow_strobe"
EFFECT_CYAN_STROBE = "cyan_strobe"
EFFECT_PURPLE_STROBE = "purple_strobe"
EFFECT_WHITE_STROBE = "white_strobe"
EFFECT_COLORJUMP = "colorjump"
EFFECT_CUSTOM = "custom"
EFFECT_MAP = {
EFFECT_COLORLOOP: 0x25,
EFFECT_RED_FADE: 0x26,
EFFECT_GREEN_FADE: 0x27,
EFFECT_BLUE_FADE: 0x28,
EFFECT_YELLOW_FADE: 0x29,
EFFECT_CYAN_FADE: 0x2A,
EFFECT_PURPLE_FADE: 0x2B,
EFFECT_WHITE_FADE: 0x2C,
EFFECT_RED_GREEN_CROSS_FADE: 0x2D,
EFFECT_RED_BLUE_CROSS_FADE: 0x2E,
EFFECT_GREEN_BLUE_CROSS_FADE: 0x2F,
EFFECT_COLORSTROBE: 0x30,
EFFECT_RED_STROBE: 0x31,
EFFECT_GREEN_STROBE: 0x32,
EFFECT_BLUE_STROBE: 0x33,
EFFECT_YELLOW_STROBE: 0x34,
EFFECT_CYAN_STROBE: 0x35,
EFFECT_PURPLE_STROBE: 0x36,
EFFECT_WHITE_STROBE: 0x37,
EFFECT_COLORJUMP: 0x38,
}
EFFECT_CUSTOM_CODE = 0x60
TRANSITION_GRADUAL = "gradual"
TRANSITION_JUMP = "jump"
TRANSITION_STROBE = "strobe"
FLUX_EFFECT_LIST = sorted(list(EFFECT_MAP)) + [EFFECT_RANDOM]
CUSTOM_EFFECT_SCHEMA = vol.Schema(
{
vol.Required(CONF_COLORS): vol.All(
cv.ensure_list,
vol.Length(min=1, max=16),
[
vol.All(
vol.ExactSequence((cv.byte, cv.byte, cv.byte)), vol.Coerce(tuple)
)
],
),
vol.Optional(CONF_SPEED_PCT, default=50): vol.All(
vol.Range(min=0, max=100), vol.Coerce(int)
),
vol.Optional(CONF_TRANSITION, default=TRANSITION_GRADUAL): vol.All(
cv.string, vol.In([TRANSITION_GRADUAL, TRANSITION_JUMP, TRANSITION_STROBE])
),
}
)
DEVICE_SCHEMA = vol.Schema(
{
vol.Optional(CONF_NAME): cv.string,
vol.Optional(ATTR_MODE, default=MODE_RGBW): vol.All(
cv.string, vol.In([MODE_RGBW, MODE_RGB, MODE_WHITE])
),
vol.Optional(CONF_PROTOCOL): vol.All(cv.string, vol.In(["ledenet"])),
vol.Optional(CONF_CUSTOM_EFFECT): CUSTOM_EFFECT_SCHEMA,
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_DEVICES, default={}): {cv.string: DEVICE_SCHEMA},
vol.Optional(CONF_AUTOMATIC_ADD, default=False): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Flux lights."""
lights = []
light_ips = []
for ipaddr, device_config in config.get(CONF_DEVICES, {}).items():
device = {}
device["name"] = device_config[CONF_NAME]
device["ipaddr"] = ipaddr
device[CONF_PROTOCOL] = device_config.get(CONF_PROTOCOL)
device[ATTR_MODE] = device_config[ATTR_MODE]
device[CONF_CUSTOM_EFFECT] = device_config.get(CONF_CUSTOM_EFFECT)
light = FluxLight(device)
lights.append(light)
light_ips.append(ipaddr)
if not config.get(CONF_AUTOMATIC_ADD, False):
add_entities(lights, True)
return
# Find the bulbs on the LAN
scanner = BulbScanner()
scanner.scan(timeout=10)
for device in scanner.getBulbInfo():
ipaddr = device["ipaddr"]
if ipaddr in light_ips:
continue
device["name"] = f"{device['id']} {ipaddr}"
device[ATTR_MODE] = None
device[CONF_PROTOCOL] = None
device[CONF_CUSTOM_EFFECT] = None
light = FluxLight(device)
lights.append(light)
add_entities(lights, True)
class FluxLight(LightEntity):
"""Representation of a Flux light."""
def __init__(self, device):
"""Initialize the light."""
self._name = device["name"]
self._ipaddr = device["ipaddr"]
self._protocol = device[CONF_PROTOCOL]
self._mode = device[ATTR_MODE]
self._custom_effect = device[CONF_CUSTOM_EFFECT]
self._bulb = None
self._error_reported = False
def _connect(self):
"""Connect to Flux light."""
self._bulb = WifiLedBulb(self._ipaddr, timeout=5)
if self._protocol:
self._bulb.setProtocol(self._protocol)
# After bulb object is created the status is updated. We can
# now set the correct mode if it was not explicitly defined.
if not self._mode:
if self._bulb.rgbwcapable:
self._mode = MODE_RGBW
else:
self._mode = MODE_RGB
def _disconnect(self):
"""Disconnect from Flux light."""
self._bulb = None
@property
def available(self) -> bool:
"""Return True if entity is available."""
return self._bulb is not None
@property
def name(self):
"""Return the name of the device if any."""
return self._name
@property
def is_on(self):
"""Return true if device is on."""
return self._bulb.isOn()
@property
def brightness(self):
"""Return the brightness of this light between 0..255."""
if self._mode == MODE_WHITE:
return self.white_value
return self._bulb.brightness
@property
def hs_color(self):
"""Return the color property."""
return color_util.color_RGB_to_hs(*self._bulb.getRgb())
@property
def supported_features(self):
"""Flag supported features."""
if self._mode == MODE_RGBW:
return SUPPORT_FLUX_LED | SUPPORT_WHITE_VALUE | SUPPORT_COLOR_TEMP
if self._mode == MODE_WHITE:
return SUPPORT_BRIGHTNESS
return SUPPORT_FLUX_LED
@property
def white_value(self):
"""Return the white value of this light between 0..255."""
return self._bulb.getRgbw()[3]
@property
def effect_list(self):
"""Return the list of supported effects."""
if self._custom_effect:
return FLUX_EFFECT_LIST + [EFFECT_CUSTOM]
return FLUX_EFFECT_LIST
@property
def effect(self):
"""Return the current effect."""
current_mode = self._bulb.raw_state[3]
if current_mode == EFFECT_CUSTOM_CODE:
return EFFECT_CUSTOM
for effect, code in EFFECT_MAP.items():
if current_mode == code:
return effect
return None
def turn_on(self, **kwargs):
"""Turn the specified or all lights on."""
if not self.is_on:
self._bulb.turnOn()
hs_color = kwargs.get(ATTR_HS_COLOR)
if hs_color:
rgb = color_util.color_hs_to_RGB(*hs_color)
else:
rgb = None
brightness = kwargs.get(ATTR_BRIGHTNESS)
effect = kwargs.get(ATTR_EFFECT)
white = kwargs.get(ATTR_WHITE_VALUE)
color_temp = kwargs.get(ATTR_COLOR_TEMP)
# handle special modes
if color_temp is not None:
if brightness is None:
brightness = self.brightness
if color_temp > COLOR_TEMP_WARM_VS_COLD_WHITE_CUT_OFF:
self._bulb.setRgbw(w=brightness)
else:
self._bulb.setRgbw(w2=brightness)
return
# Show warning if effect set with rgb, brightness, or white level
if effect and (brightness or white or rgb):
_LOGGER.warning(
"RGB, brightness and white level are ignored when"
" an effect is specified for a flux bulb"
)
# Random color effect
if effect == EFFECT_RANDOM:
self._bulb.setRgb(
random.randint(0, 255), random.randint(0, 255), random.randint(0, 255)
)
return
if effect == EFFECT_CUSTOM:
if self._custom_effect:
self._bulb.setCustomPattern(
self._custom_effect[CONF_COLORS],
self._custom_effect[CONF_SPEED_PCT],
self._custom_effect[CONF_TRANSITION],
)
return
# Effect selection
if effect in EFFECT_MAP:
self._bulb.setPresetPattern(EFFECT_MAP[effect], 50)
return
# Preserve current brightness on color/white level change
if brightness is None:
brightness = self.brightness
# Preserve color on brightness/white level change
if rgb is None:
rgb = self._bulb.getRgb()
if white is None and self._mode == MODE_RGBW:
white = self.white_value
# handle W only mode (use brightness instead of white value)
if self._mode == MODE_WHITE:
self._bulb.setRgbw(0, 0, 0, w=brightness)
# handle RGBW mode
elif self._mode == MODE_RGBW:
self._bulb.setRgbw(*tuple(rgb), w=white, brightness=brightness)
# handle RGB mode
else:
self._bulb.setRgb(*tuple(rgb), brightness=brightness)
def turn_off(self, **kwargs):
"""Turn the specified or all lights off."""
self._bulb.turnOff()
def update(self):
"""Synchronize state with bulb."""
if not self.available:
try:
self._connect()
self._error_reported = False
except OSError:
self._disconnect()
if not self._error_reported:
_LOGGER.warning(
"Failed to connect to bulb %s, %s", self._ipaddr, self._name
)
self._error_reported = True
return
self._bulb.update_state(retry=2)
|
import itertools
import logging
import threading
from collections import deque, namedtuple
from contextlib import contextmanager
from six.moves import xrange
# noinspection PyPep8Naming
from .shcommon import Graphics as graphics
class ShScreenNotLocked(Exception):
pass
#: A container for a single character, field names are *hopefully*
#: self-explanatory.
_Char = namedtuple("_Char",
[
"data",
"fg",
"bg",
"bold",
"italics",
"underscore",
"strikethrough",
"reverse",
])
class ShChar(_Char):
"""
Class of attributed character.
:param str data: The actual character
:param str fg: The foreground color
:param str bg: The background color
:param bool bold: Bold font
:param bool italics: Italics font
:param bool underscore: Underline the character
:param bool reverse: NOT Implemented
:param bool strikethrough: Strike through the character
"""
__slots__ = ()
# noinspection PyInitNewSignature
def __new__(
cls,
data,
fg="default",
bg="default",
bold=False,
italics=False,
underscore=False,
reverse=False,
strikethrough=False
):
return _Char.__new__(cls, data, fg, bg, bold, italics, underscore, strikethrough, reverse)
@staticmethod
def same_style(char1, char2):
"""
Check if both chars have the same style
:param char1: first char to compare
:type char1: ShChar
:param char2: second char to compare
:type char2: ShChar
:return: whether both chars have the same style or not
:rtype: bool
"""
return char1.fg == char2.fg \
and char1.bg == char2.bg \
and char1.bold is char2.bold \
and char1.italics is char2.italics \
and char1.underscore is char2.underscore \
and char1.strikethrough is char2.strikethrough
DEFAULT_CHAR = ShChar(data=' ', fg='default', bg='default')
DEFAULT_LINE = itertools.repeat(DEFAULT_CHAR)
def take(n, iterable):
return list(itertools.islice(iterable, n))
# noinspection PyAttributeOutsideInit
class ShSequentialScreen(object):
"""
The sequential type in-memory screen. Running scripts can only
add characters at the end of the screen buffer, no backspace or
cursor movement is possible. Hence it is sequential.
:param int nlines_max: The maximum number of lines to be stored.
"""
def __init__(self, stash, nlines_max=100, debug=False):
self.stash = stash
self.nlines_max = nlines_max
self.debug = debug
self.logger = logging.getLogger('StaSh.Screen')
self._buffer = deque() # buffer to hold chars
self.lock = threading.Lock()
self.attrs = ShChar(' ')
self.reset()
def reset(self, *args): # *args is a necessary placeholder
"""
Clear the screen and reset its state.
*args is needed because dispatch from stream always call handlers
with at least one parameter (even it is a dummy 0).
"""
# Empty the buffer
self._buffer.clear()
# The cursor position
self.cursor_xs = self.cursor_xe = 0
# This is the location where modifiable chars start. It is immediately
# after where latest program write ends. This property is used to calculate
# modifiable range and is really only useful for User side actions.
self.x_drawend = 0
# The left and right bounds are helpers to renderer for performance.
# Only texts outside of the bounds get rebuilt and re-rendered.
# All chars before this location must be removed from terminal text.
# Note this value is relative to start of Terminal text.
self.intact_left_bound = 0
# All chars after this location must be re-rendered. Note this value is
# relative to start of the Screen's buffer.
self.intact_right_bound = 0
self.nlines = 0
@property
def cursor_x(self):
"""
Note this method returns both bounds of cursor as a tuple.
:rtype: (int, int)
"""
return self.cursor_xs, self.cursor_xe
@cursor_x.setter
def cursor_x(self, value):
"""
This method sets both bounds of the cursor to the same value.
:param int value: New value for both bounds of the cursor
:return:
"""
self.cursor_xs = self.cursor_xe = value
@property
def text(self):
"""
:rtype: str
"""
return ''.join(c.data for c in self._buffer)
@property
def text_length(self):
"""
:rtype: int
"""
return len(self._buffer)
@property
def renderable_chars(self):
"""
Trailing characters that need to be re-rendered (this is not the same
as modifiable chars).
Note this return a list of ShChar not a String.
:rtype: [ShChar]
"""
_, rbound = self.get_bounds()
return [self._buffer[x] for x in xrange(rbound, len(self._buffer))]
@property
def x_modifiable(self):
"""
The location where characters start to be modifiable by users. The value
is relative to the beginning of screen buffer.
:rtype: int
"""
# The position is either the x_drawend or last LF location plus one,
# whichever is larger.
for idx in xrange(self.text_length - 1, self.x_drawend - 1, -1):
if self._buffer[idx].data == '\n':
return idx + 1
else:
return self.x_drawend
@property
def modifiable_range(self):
"""
The range of modifiable characters. Values are relative to the
beginning of screen buffer.
:rtype: (int, int)
"""
return self.x_modifiable, self.text_length
@property
def modifiable_string(self):
"""
A string represents the characters that are in the modifiable range.
:rtype: str
"""
return ''.join(self._buffer[idx].data for idx in xrange(*self.modifiable_range))
@modifiable_string.setter
def modifiable_string(self, s):
"""
Set the modifiable_string to the given string using default Char properties.
This method is only called by UI delegate side, i.e. NOT running scripts.
:param str s: A new value for modifiable_string.
"""
self.replace_in_range(self.modifiable_range, s)
@contextmanager
def acquire_lock(self, blocking=True):
"""
Lock the screen for modification so that it will not be corrupted.
:param blocking: By default the method blocks until a lock is acquired.
"""
locked = self.lock.acquire(blocking)
try:
yield locked
finally:
if locked:
self.lock.release()
@contextmanager
def buffer_rotate(self, n):
"""
This method is used for when operations like replacing, insertion, deletion
are needed in the middle of the character buffer.
:param n:
:return:
"""
self._buffer.rotate(n)
try:
yield
finally:
self._buffer.rotate(-n)
def get_bounds(self):
"""
Get the left and right intact bounds of the screen buffer.
The bounds could become negative if entire screen is flushed out before
any rendering. In this case, the bounds need to be adjusted accordingly.
:rtype (int, int):
"""
rbound = self.intact_right_bound if self.intact_right_bound >= 0 else 0
lbound = self.intact_left_bound if rbound > 0 else 0
return lbound, rbound
def clean(self):
"""
Mark everything as rendered.
"""
self.intact_left_bound = 0
self.intact_right_bound = len(self._buffer)
# noinspection PyProtectedMember
def replace_in_range(self, rng, s, relative_to_x_modifiable=False, set_drawend=False):
"""
Replace the buffer content in the given range. This method should
ONLY be called from the UI delegation side, i.e. NOT running
scripts.
:param (int, int) rng: Range of buffer to be replaced
:param str s: String to be inserted (to be converted to Char with default properties).
:param bool relative_to_x_modifiable: If True, the range is relative to the x_modifiable
:param bool set_drawend: If True, the x_drawend will be set to the end of this replacement.
:return:
"""
if rng is None:
rng = (len(self._buffer), len(self._buffer))
elif relative_to_x_modifiable: # Convert to absolute location if necessary
rng = rng[0] + self.x_modifiable, rng[1] + self.x_modifiable
# Update the right bound if necessary
if rng[0] < self.intact_right_bound:
self.intact_right_bound = rng[0]
rotate_n = max(len(self._buffer) - rng[1], 0)
self._buffer.rotate(rotate_n) # rotate buffer first so deletion is possible
try:
if rng[0] != rng[1]: # delete chars if necessary
self._pop_chars(rng[1] - rng[0])
# The newly inserted chars are always of default properties
self._buffer.extend(DEFAULT_CHAR._replace(data=c) for c in s)
finally:
self._buffer.rotate(-rotate_n)
# Update cursor to the end of this replacement
self.cursor_x = rng[0] + len(s)
# Normally the draw end is not set
if set_drawend:
self.x_drawend = self.cursor_xs
nlf = s.count('\n')
if nlf > 0: # ensure max number of lines is kept
self.nlines += nlf
self._ensure_nlines_max()
def _pop_chars(self, n=1):
"""
Remove number of given characters form the right END of the buffer
:param n:
:return:
"""
for _ in xrange(n):
self._buffer.pop()
if self.text_length < self.intact_right_bound:
self.intact_right_bound = self.text_length
def _ensure_nlines_max(self):
"""
Keep number of lines under control
"""
char_count = line_count = 0
for _ in xrange(self.nlines_max, self.nlines):
# Remove the top line
for idx in xrange(self.text_length):
char_count += 1
if self._buffer.popleft().data == '\n':
line_count += 1
break
if char_count > 0:
self.intact_left_bound += char_count
self.intact_right_bound -= char_count
self.cursor_xs -= char_count
self.cursor_xe -= char_count
self.x_drawend -= char_count
if line_count > 0:
self.nlines -= line_count
def _rfind_nth_nl(self, from_x=None, n=1, default=None):
if from_x is None:
from_x = self.cursor_xs
for idx in xrange(from_x, -1, -1):
try: # try for when from_x is equal to buffer length (i.e. at the end of the buffer)
if self._buffer[idx].data == '\n':
n -= 1
if n == 0:
return idx
except IndexError:
pass
else:
return default
def _find_nth_nl(self, from_x=None, n=1, default=None):
if from_x is None:
from_x = self.cursor_xs
for idx in xrange(from_x, self.text_length):
try:
if self._buffer[idx].data == '\n':
n -= 1
if n == 0:
return idx
except IndexError:
pass
else:
return default
# noinspection PyProtectedMember
def draw(self, c):
"""
Add given char to the right end of the buffer and update the last draw
location. This method should ONLY be called by ShStream.
:param str c: A new character to draw
"""
if self.cursor_xs == self.text_length: # cursor is at the end
if self.text_length < self.intact_right_bound:
self.intact_right_bound = self.text_length
self._buffer.append(self.attrs._replace(data=c))
self.cursor_x = self.x_drawend = self.text_length
else: # cursor is in the middle
# First rotate the text is that to the right of cursor
with self.buffer_rotate(self.text_length - self.cursor_xs - 1):
# Remove the character at the cursor and append new character
# This is effectively character REPLACING operation
c_poped = self._buffer.pop()
self._buffer.append(self.attrs._replace(data=c))
# The replacing must be within a single line, so the newline
# character cannot be replaced and instead a new char is inserted
# right before the newline.
# Also when the new character is a newline, it is effectively an
# insertion NOT replacement (i.e. it pushes everything following
# it to the next line).
if c == '\n' or c_poped.data == '\n':
self._buffer.append(c_poped)
# Update the cursor and drawing end
self.cursor_x = self.x_drawend = self.cursor_xs + 1
# Update the intact right bound
if self.x_drawend < self.intact_right_bound:
self.intact_right_bound = self.x_drawend
# Count the number of lines
if c == '\n':
self.nlines += 1
self._ensure_nlines_max()
def backspace(self):
"""
Move cursor back one character. Do not cross lines.
"""
cursor_xs = self.cursor_xs - 1
try:
if self._buffer[cursor_xs] != '\n':
self.cursor_x = cursor_xs
except IndexError:
self.cursor_x = 0
def carriage_return(self):
"""
Process \r to move cursor to the beginning of the current line.
"""
self.cursor_x = self._rfind_nth_nl(default=-1) + 1
def delete_characters(self, count=0):
"""
Delete n characters from cursor including cursor within the current line.
:param count: If count is 0, delete till the next newline.
"""
if self.cursor_xs == self.text_length or self._buffer[self.cursor_xs] == '\n':
return
if count == 0: # delete till the next newline
count = self.text_length
with self.buffer_rotate(-self.cursor_xs):
for _ in xrange(min(count, self.text_length - self.cursor_xs)):
c = self._buffer.popleft()
if c.data == '\n': # do not delete newline
self._buffer.appendleft(c)
break
self.x_drawend = self.cursor_xs
if self.x_drawend < self.intact_right_bound:
self.intact_right_bound = self.x_drawend
def erase_in_line(self, mode=0):
"""
Erase a line with different mode. Note the newline character is NOT deleted.
:param mode:
:return:
"""
# Calculate the range for erase
if mode == 0: # erase from cursor to end of line, including cursor
rng = [self.cursor_xs, self._find_nth_nl(default=self.text_length)]
try: # do not include the newline character
if self._buffer[rng[0]] == '\n':
rng[0] += 1
except IndexError:
pass
elif mode == 1: # erase form beginning of line to cursor, including cursor
rng = [self._rfind_nth_nl(default=-1) + 1, min(self.cursor_xs + 1, self.text_length)]
try:
if self._buffer[rng[1] - 1] == '\n':
rng[1] -= 1
except IndexError:
pass
else: # mode == 2: # erase the complete line
rng = [self._rfind_nth_nl(default=-1) + 1, self._find_nth_nl(default=self.text_length)]
# fast fail when there is nothing to erase
if rng[0] >= rng[1]:
return
# Erase characters in the range
with self.buffer_rotate(self.text_length - rng[1]):
for _ in xrange(*rng):
self._buffer.pop()
self._buffer.extend(take(rng[1] - rng[0], DEFAULT_LINE))
self.x_drawend = rng[0]
# update the intact right bound
if self.x_drawend < self.intact_right_bound:
self.intact_right_bound = self.x_drawend
# noinspection PyProtectedMember
def select_graphic_rendition(self, *attrs):
"""
Act on text style ASCII escapes
:param [ShChar] attrs: List of characters and their attributes
"""
replace = {}
for attr in attrs or [0]:
if attr in graphics.FG:
replace["fg"] = graphics.FG[attr]
elif attr in graphics.BG:
replace["bg"] = graphics.BG[attr]
elif attr in graphics.TEXT:
attr = graphics.TEXT[attr]
replace[attr[1:]] = attr.startswith("+")
elif not attr:
replace = DEFAULT_CHAR._asdict()
self.attrs = self.attrs._replace(**replace)
def load_pyte_screen(self, pyte_screen):
"""
This method is for command script only, e.g. ssh.
"""
with self.acquire_lock():
self.intact_left_bound = 0
nlines, ncolumns = pyte_screen.lines, pyte_screen.columns
line_count = 0
column_count = 0
for line in reversed(pyte_screen.display):
line = line.rstrip()
if line != '':
column_count = len(line)
break
line_count += 1
nchars_pyte_screen = (nlines - line_count - 1) * (ncolumns + 1) + column_count
idx_cursor_pyte_screen = pyte_screen.cursor.x + pyte_screen.cursor.y * (ncolumns + 1)
if nchars_pyte_screen < idx_cursor_pyte_screen:
nchars_pyte_screen = idx_cursor_pyte_screen
try:
min_idx_dirty_line = min(pyte_screen.dirty)
except ValueError:
min_idx_dirty_line = 0
idx_dirty_char = (ncolumns + 1) * min_idx_dirty_line
# self.logger.info(
# 'min_idx_dirty_line={}, idx_dirty_char={}, nchars_pyte_screen={}, self.text_length={}'.format(
# min_idx_dirty_line, idx_dirty_char, nchars_pyte_screen, self.text_length
# )
# )
if idx_dirty_char > self.text_length - 1:
self.intact_right_bound = self.text_length
else:
self.intact_right_bound = min(self.text_length, nchars_pyte_screen)
for idx in xrange(idx_dirty_char, nchars_pyte_screen):
# self.logger.info('idx = %s' % idx)
if idx >= self.text_length:
break
idx_line, idx_column = idx / (ncolumns + 1), idx % (ncolumns + 1)
if idx_column == ncolumns:
continue
pyte_char = pyte_screen.buffer[idx_line][idx_column]
# self.logger.info('HERE = %s' % idx)
if self._buffer[idx].data != pyte_char.data \
or not ShChar.same_style(self._buffer[idx], pyte_char):
# self.logger.info('breaking %s' % idx)
self.intact_right_bound = idx
break
for _ in xrange(self.intact_right_bound, self.text_length):
self._buffer.pop()
for idx in xrange(self.intact_right_bound, nchars_pyte_screen):
idx_line, idx_column = idx / (ncolumns + 1), idx % (ncolumns + 1)
if idx_column != ncolumns:
c = pyte_screen.buffer[idx_line][idx_column]
self._buffer.append(ShChar(**c._asdict()))
else:
self._buffer.append(ShChar('\n'))
self.cursor_x = idx_cursor_pyte_screen
# self.logger.info('intact_right_bound={}, cursor={}'.format(self.intact_right_bound, self.cursor_xs))
# self.logger.info('|%s|' % pyte_screen.display)
# self.logger.info('text=|%s|' % self.text)
# self.logger.info('text_length=%s' % self.text_length)
|
from __future__ import print_function
from collections import OrderedDict
import itertools
def test_config(python, chainer, optional, target):
key = 'chainercv.py{}.{}'.format(python, chainer)
if not optional:
key += '.mini'
value = OrderedDict((
('requirement', OrderedDict((
('cpu', 4),
('memory', 24),
('disk', 10),
))),
('time_limit', None),
('command', None),
('environment_variables', [
('PYTHON', str(python)),
('CHAINER', chainer),
('OPTIONAL_MODULES', '1' if optional else '0'),
]),
))
if target == 'cpu':
value['requirement']['cpu'] = 6
value['requirement']['memory'] = 36
value['time_limit'] = {'seconds': 3600}
value['command'] = 'sh .pfnci/tests.sh'
elif target == 'gpu':
key += '.gpu'
value['requirement']['gpu'] = 1
value['command'] = 'sh .pfnci/tests_gpu.sh'
elif target == 'examples':
key += '.examples'
value['requirement']['cpu'] = 6
value['requirement']['memory'] = 36
value['requirement']['gpu'] = 2
value['time_limit'] = {'seconds': 1800}
value['command'] = 'sh .pfnci/examples_tests.sh'
return key, value
def main():
configs = []
configs.append((
'chainercv.cache',
OrderedDict((
('requirement', OrderedDict((
('cpu', 8),
('memory', 48),
))),
('time_limit', OrderedDict((
('seconds', 1800),
))),
('command', 'sh .pfnci/cache.sh'),
))
))
for python, chainer in itertools.product(
(2, 3), ('stable', 'latest', 'master')):
if python == 2 and chainer in {'latest', 'master'}:
continue
for optional in (True, False):
configs.append(test_config(python, chainer, optional, 'cpu'))
configs.append(test_config(python, chainer, optional, 'gpu'))
configs.append(test_config(python, chainer, True, 'examples'))
print('# DO NOT MODIFY THIS FILE MANUALLY.')
print('# USE gen_config.py INSTEAD.')
print()
dump_pbtxt('configs', configs)
def dump_pbtxt(key, value, level=0):
indent = ' ' * level
if isinstance(value, int):
print('{}{}: {}'.format(indent, key, value))
elif isinstance(value, str):
print('{}{}: "{}"'.format(indent, key, value))
elif isinstance(value, list):
for k, v in value:
print('{}{} {{'.format(indent, key))
dump_pbtxt('key', k, level + 1)
dump_pbtxt('value', v, level + 1)
print('{}}}'.format(indent))
elif isinstance(value, dict):
print('{}{} {{'.format(indent, key))
for k, v in value.items():
dump_pbtxt(k, v, level + 1)
print('{}}}'.format(indent))
if __name__ == '__main__':
main()
|
from git import Git
from .base import BaseOperation
class Inspector(BaseOperation):
"""
Used to introspect a Git repository.
"""
def merged_refs(self, skip=[]):
"""
Returns a list of remote refs that have been merged into the master
branch.
The "master" branch may have a different name than master. The value of
``self.master_name`` is used to determine what this name is.
"""
origin = self._origin
master = self._master_ref(origin)
refs = self._filtered_remotes(
origin, skip=['HEAD', self.master_branch] + skip)
merged = []
for ref in refs:
upstream = '{origin}/{master}'.format(
origin=origin.name, master=master.remote_head)
head = '{origin}/{branch}'.format(
origin=origin.name, branch=ref.remote_head)
cmd = Git(self.repo.working_dir)
# Drop to the git binary to do this, it's just easier to work with
# at this level.
(retcode, stdout, stderr) = cmd.execute(
['git', 'cherry', upstream, head],
with_extended_output=True, with_exceptions=False)
if retcode == 0 and not stdout:
# This means there are no commits in the branch that are not
# also in the master branch. This is ready to be deleted.
merged.append(ref)
return merged
|
import voluptuous as vol
from homeassistant.components.binary_sensor import (
DEVICE_CLASSES_SCHEMA,
PLATFORM_SCHEMA,
BinarySensorEntity,
)
from homeassistant.const import CONF_DEVICE_CLASS, CONF_ID, CONF_NAME
import homeassistant.helpers.config_validation as cv
from .device import EnOceanEntity
DEFAULT_NAME = "EnOcean binary sensor"
DEPENDENCIES = ["enocean"]
EVENT_BUTTON_PRESSED = "button_pressed"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ID): vol.All(cv.ensure_list, [vol.Coerce(int)]),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_DEVICE_CLASS): DEVICE_CLASSES_SCHEMA,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Binary Sensor platform for EnOcean."""
dev_id = config.get(CONF_ID)
dev_name = config.get(CONF_NAME)
device_class = config.get(CONF_DEVICE_CLASS)
add_entities([EnOceanBinarySensor(dev_id, dev_name, device_class)])
class EnOceanBinarySensor(EnOceanEntity, BinarySensorEntity):
"""Representation of EnOcean binary sensors such as wall switches.
Supported EEPs (EnOcean Equipment Profiles):
- F6-02-01 (Light and Blind Control - Application Style 2)
- F6-02-02 (Light and Blind Control - Application Style 1)
"""
def __init__(self, dev_id, dev_name, device_class):
"""Initialize the EnOcean binary sensor."""
super().__init__(dev_id, dev_name)
self._device_class = device_class
self.which = -1
self.onoff = -1
@property
def name(self):
"""Return the default name for the binary sensor."""
return self.dev_name
@property
def device_class(self):
"""Return the class of this sensor."""
return self._device_class
def value_changed(self, packet):
"""Fire an event with the data that have changed.
This method is called when there is an incoming packet associated
with this platform.
Example packet data:
- 2nd button pressed
['0xf6', '0x10', '0x00', '0x2d', '0xcf', '0x45', '0x30']
- button released
['0xf6', '0x00', '0x00', '0x2d', '0xcf', '0x45', '0x20']
"""
# Energy Bow
pushed = None
if packet.data[6] == 0x30:
pushed = 1
elif packet.data[6] == 0x20:
pushed = 0
self.schedule_update_ha_state()
action = packet.data[1]
if action == 0x70:
self.which = 0
self.onoff = 0
elif action == 0x50:
self.which = 0
self.onoff = 1
elif action == 0x30:
self.which = 1
self.onoff = 0
elif action == 0x10:
self.which = 1
self.onoff = 1
elif action == 0x37:
self.which = 10
self.onoff = 0
elif action == 0x15:
self.which = 10
self.onoff = 1
self.hass.bus.fire(
EVENT_BUTTON_PRESSED,
{
"id": self.dev_id,
"pushed": pushed,
"which": self.which,
"onoff": self.onoff,
},
)
|
from stashutils.fsi.errors import OperationFailure
import random
import os
import time
import stat
import pwd
class BaseFSI(object):
"""
Baseclass for all FSIs.
Other FSIs should subclass this.
This class currently only serves as a documentation, but this may change.
"""
def __init__(self, logger=None):
"""
called on __init__().
"logger" should be a callable,
which will be called with log messages, or None.
"""
self.logger = logger
def connect(self, *args):
"""
Called to 'connect' to a filesystem.
'args' are the additional args passed by the user.
This should be no-op on if no connection nor setup is required.
This should return True on success, otherwise a string describing the error.
"""
return "Not Implemented"
def repr(self):
"""
this should return a string identifying the instance of this interface.
"""
return "Unknown Interface"
def listdir(self, path="."):
"""
called for listing a dir.
The FSI is responsible for keeping track of the cwd.
This should return a list of strings.
'..' doesnt need to be added.
"""
return []
def cd(self, name):
"""this should change the cwd to name."""
raise OperationFailure("NotImplemented")
def get_path(self):
"""this should return the current path as a string."""
return "/"
def remove(self, name):
"""this should remove name. name may refer either to a dir or a file."""
raise OperationFailure("NotImplemented")
def open(self, name, mode="r", buffering=0):
"""
this should return a file-like object opened in mode mode.
"""
raise OperationFailure("NotImplemented")
def mkdir(self, name):
"""this should create a dir."""
raise OperationFailure("NotImplemented")
def close(self):
"""this should close the interface.
There is a chance that this may not be called."""
pass
def isdir(self, name):
"""this should return True if name is an existing directory and
False if not."""
raise OperationFailure("NotImplemented")
def isfile(self, name):
"""this should return wether name is an existing file."""
# default: not isdir(). problem: no exist check
return not self.isdir(name)
def stat(self, name):
"""
this should stat the file name and return a os.stat_result or
FakeStatResult().
"""
if self.isfile(name):
return make_stat(type=stat.S_IFREG)
else:
return make_stat(type=stat.S_IFDIR)
def log(self, msg):
"""logs/prints a message to self.logger."""
if self.logger is not None:
self.logger(msg)
def calc_mode(
sticky=False,
isuid=True,
isgid=True,
type=stat.S_IFREG,
owner_read=True,
owner_write=True,
owner_exec=True,
group_read=True,
group_write=True,
group_exec=True,
other_read=True,
other_write=True,
other_exec=True,
):
"""helper function to calculate the mode bits of a file."""
mode = 0
if owner_read:
mode |= stat.S_IRUSR
if owner_write:
mode |= stat.S_IWUSR
if owner_exec:
mode |= stat.S_IXUSR
if group_read:
mode |= stat.S_IRGRP
if group_write:
mode |= stat.S_IWGRP
if group_exec:
mode |= stat.S_IXGRP
if other_read:
mode |= stat.S_IROTH
if other_write:
mode |= stat.S_IWOTH
if other_exec:
mode |= stat.S_IXOTH
if sticky:
mode |= stat.S_ISVTX
if isuid:
mode |= stat.ST_UID
if isgid:
mode |= stat.ST_GID
mode |= type
return mode
DEFAULT_MODE = calc_mode()
def make_stat(
mode=DEFAULT_MODE,
inode=None,
dev=None,
nlinks=1,
gid=None,
uid=None,
size=0,
atime=None,
mtime=None,
ctime=None,
blocks=1,
blksize=None,
rdev=stat.S_IFREG,
flags=0,
):
"""helper function to generate os.stat results."""
if inode is None:
inode = random.randint(1000, 9999999)
if dev is None:
dev = os.makedev(64, random.randint(1, 100))
if uid is None:
uid = os.getuid()
if gid is None:
uid2 = os.getuid()
gid = pwd.getpwuid(uid2).pw_gid
if atime is None:
atime = time.time()
if mtime is None:
mtime = time.time()
if ctime is None:
ctime = time.time()
if os.stat_float_times():
ctime = float(ctime)
mtime = float(mtime)
atime = float(atime)
else:
ctime = int(ctime)
atime = int(atime)
mtime = int(mtime)
if blksize is None:
blksize = max(size, 2048)
s = os.stat_result(
(
mode,
inode,
dev,
nlinks,
gid,
uid,
size,
atime,
mtime,
ctime,
),
{
"st_blocks": blocks,
"st_blksize": blksize,
"st_rdev": rdev,
"st_flags": flags,
}
)
return s
|
import logging
from twtxt.parser import parse_tweets
logger = logging.getLogger(__name__)
def get_local_tweets(source, limit):
try:
with open(source.file, "r") as fh:
input_lines = fh.readlines()
except (FileNotFoundError, PermissionError) as e:
logger.debug(e)
return []
local_tweets = parse_tweets(input_lines, source)
return sorted(local_tweets, reverse=True)[:limit]
def add_local_tweet(tweet, file):
try:
with open(file, "a") as fh:
fh.write("{0}\n".format(str(tweet)))
except (FileNotFoundError, PermissionError) as e:
logger.debug(e)
return False
return True
|
from homeassistant.core import State
from tests.common import async_mock_service
async def test_reproducing_states(hass, caplog):
"""Test reproducing Counter states."""
hass.states.async_set("counter.entity", "5", {})
hass.states.async_set(
"counter.entity_attr",
"8",
{"initial": 12, "minimum": 5, "maximum": 15, "step": 3},
)
configure_calls = async_mock_service(hass, "counter", "configure")
# These calls should do nothing as entities already in desired state
await hass.helpers.state.async_reproduce_state(
[
State("counter.entity", "5"),
State(
"counter.entity_attr",
"8",
{"initial": 12, "minimum": 5, "maximum": 15, "step": 3},
),
]
)
assert len(configure_calls) == 0
# Test invalid state is handled
await hass.helpers.state.async_reproduce_state(
[State("counter.entity", "not_supported")]
)
assert "not_supported" in caplog.text
assert len(configure_calls) == 0
# Make sure correct services are called
await hass.helpers.state.async_reproduce_state(
[
State("counter.entity", "2"),
State(
"counter.entity_attr",
"7",
{"initial": 10, "minimum": 3, "maximum": 21, "step": 5},
),
# Should not raise
State("counter.non_existing", "6"),
]
)
valid_calls = [
{"entity_id": "counter.entity", "value": "2"},
{
"entity_id": "counter.entity_attr",
"value": "7",
"initial": 10,
"minimum": 3,
"maximum": 21,
"step": 5,
},
]
assert len(configure_calls) == 2
for call in configure_calls:
assert call.domain == "counter"
assert call.data in valid_calls
valid_calls.remove(call.data)
|
from lxml import etree
import sys
import re
import doctest
try:
from html import escape as html_escape
except ImportError:
from cgi import escape as html_escape
__all__ = ['PARSE_HTML', 'PARSE_XML', 'NOPARSE_MARKUP', 'LXMLOutputChecker',
'LHTMLOutputChecker', 'install', 'temp_install']
try:
_basestring = basestring
except NameError:
_basestring = (str, bytes)
_IS_PYTHON_3 = sys.version_info[0] >= 3
PARSE_HTML = doctest.register_optionflag('PARSE_HTML')
PARSE_XML = doctest.register_optionflag('PARSE_XML')
NOPARSE_MARKUP = doctest.register_optionflag('NOPARSE_MARKUP')
OutputChecker = doctest.OutputChecker
def strip(v):
if v is None:
return None
else:
return v.strip()
def norm_whitespace(v):
return _norm_whitespace_re.sub(' ', v)
_html_parser = etree.HTMLParser(recover=False, remove_blank_text=True)
def html_fromstring(html):
return etree.fromstring(html, _html_parser)
# We use this to distinguish repr()s from elements:
_repr_re = re.compile(r'^<[^>]+ (at|object) ')
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
class LXMLOutputChecker(OutputChecker):
empty_tags = (
'param', 'img', 'area', 'br', 'basefont', 'input',
'base', 'meta', 'link', 'col')
def get_default_parser(self):
return etree.XML
def check_output(self, want, got, optionflags):
alt_self = getattr(self, '_temp_override_self', None)
if alt_self is not None:
super_method = self._temp_call_super_check_output
self = alt_self
else:
super_method = OutputChecker.check_output
parser = self.get_parser(want, got, optionflags)
if not parser:
return super_method(
self, want, got, optionflags)
try:
want_doc = parser(want)
except etree.XMLSyntaxError:
return False
try:
got_doc = parser(got)
except etree.XMLSyntaxError:
return False
return self.compare_docs(want_doc, got_doc)
def get_parser(self, want, got, optionflags):
parser = None
if NOPARSE_MARKUP & optionflags:
return None
if PARSE_HTML & optionflags:
parser = html_fromstring
elif PARSE_XML & optionflags:
parser = etree.XML
elif (want.strip().lower().startswith('<html')
and got.strip().startswith('<html')):
parser = html_fromstring
elif (self._looks_like_markup(want)
and self._looks_like_markup(got)):
parser = self.get_default_parser()
return parser
def _looks_like_markup(self, s):
s = s.strip()
return (s.startswith('<')
and not _repr_re.search(s))
def compare_docs(self, want, got):
if not self.tag_compare(want.tag, got.tag):
return False
if not self.text_compare(want.text, got.text, True):
return False
if not self.text_compare(want.tail, got.tail, True):
return False
if 'any' not in want.attrib:
want_keys = sorted(want.attrib.keys())
got_keys = sorted(got.attrib.keys())
if want_keys != got_keys:
return False
for key in want_keys:
if not self.text_compare(want.attrib[key], got.attrib[key], False):
return False
if want.text != '...' or len(want):
want_children = list(want)
got_children = list(got)
while want_children or got_children:
if not want_children or not got_children:
return False
want_first = want_children.pop(0)
got_first = got_children.pop(0)
if not self.compare_docs(want_first, got_first):
return False
if not got_children and want_first.tail == '...':
break
return True
def text_compare(self, want, got, strip):
want = want or ''
got = got or ''
if strip:
want = norm_whitespace(want).strip()
got = norm_whitespace(got).strip()
want = '^%s$' % re.escape(want)
want = want.replace(r'\.\.\.', '.*')
if re.search(want, got):
return True
else:
return False
def tag_compare(self, want, got):
if want == 'any':
return True
if (not isinstance(want, _basestring)
or not isinstance(got, _basestring)):
return want == got
want = want or ''
got = got or ''
if want.startswith('{...}'):
# Ellipsis on the namespace
return want.split('}')[-1] == got.split('}')[-1]
else:
return want == got
def output_difference(self, example, got, optionflags):
want = example.want
parser = self.get_parser(want, got, optionflags)
errors = []
if parser is not None:
try:
want_doc = parser(want)
except etree.XMLSyntaxError:
e = sys.exc_info()[1]
errors.append('In example: %s' % e)
try:
got_doc = parser(got)
except etree.XMLSyntaxError:
e = sys.exc_info()[1]
errors.append('In actual output: %s' % e)
if parser is None or errors:
value = OutputChecker.output_difference(
self, example, got, optionflags)
if errors:
errors.append(value)
return '\n'.join(errors)
else:
return value
html = parser is html_fromstring
diff_parts = ['Expected:',
self.format_doc(want_doc, html, 2),
'Got:',
self.format_doc(got_doc, html, 2),
'Diff:',
self.collect_diff(want_doc, got_doc, html, 2)]
return '\n'.join(diff_parts)
def html_empty_tag(self, el, html=True):
if not html:
return False
if el.tag not in self.empty_tags:
return False
if el.text or len(el):
# This shouldn't happen (contents in an empty tag)
return False
return True
def format_doc(self, doc, html, indent, prefix=''):
parts = []
if not len(doc):
# No children...
parts.append(' '*indent)
parts.append(prefix)
parts.append(self.format_tag(doc))
if not self.html_empty_tag(doc, html):
if strip(doc.text):
parts.append(self.format_text(doc.text))
parts.append(self.format_end_tag(doc))
if strip(doc.tail):
parts.append(self.format_text(doc.tail))
parts.append('\n')
return ''.join(parts)
parts.append(' '*indent)
parts.append(prefix)
parts.append(self.format_tag(doc))
if not self.html_empty_tag(doc, html):
parts.append('\n')
if strip(doc.text):
parts.append(' '*indent)
parts.append(self.format_text(doc.text))
parts.append('\n')
for el in doc:
parts.append(self.format_doc(el, html, indent+2))
parts.append(' '*indent)
parts.append(self.format_end_tag(doc))
parts.append('\n')
if strip(doc.tail):
parts.append(' '*indent)
parts.append(self.format_text(doc.tail))
parts.append('\n')
return ''.join(parts)
def format_text(self, text, strip=True):
if text is None:
return ''
if strip:
text = text.strip()
return html_escape(text, 1)
def format_tag(self, el):
attrs = []
if isinstance(el, etree.CommentBase):
# FIXME: probably PIs should be handled specially too?
return '<!--'
for name, value in sorted(el.attrib.items()):
attrs.append('%s="%s"' % (name, self.format_text(value, False)))
if not attrs:
return '<%s>' % el.tag
return '<%s %s>' % (el.tag, ' '.join(attrs))
def format_end_tag(self, el):
if isinstance(el, etree.CommentBase):
# FIXME: probably PIs should be handled specially too?
return '-->'
return '</%s>' % el.tag
def collect_diff(self, want, got, html, indent):
parts = []
if not len(want) and not len(got):
parts.append(' '*indent)
parts.append(self.collect_diff_tag(want, got))
if not self.html_empty_tag(got, html):
parts.append(self.collect_diff_text(want.text, got.text))
parts.append(self.collect_diff_end_tag(want, got))
parts.append(self.collect_diff_text(want.tail, got.tail))
parts.append('\n')
return ''.join(parts)
parts.append(' '*indent)
parts.append(self.collect_diff_tag(want, got))
parts.append('\n')
if strip(want.text) or strip(got.text):
parts.append(' '*indent)
parts.append(self.collect_diff_text(want.text, got.text))
parts.append('\n')
want_children = list(want)
got_children = list(got)
while want_children or got_children:
if not want_children:
parts.append(self.format_doc(got_children.pop(0), html, indent+2, '+'))
continue
if not got_children:
parts.append(self.format_doc(want_children.pop(0), html, indent+2, '-'))
continue
parts.append(self.collect_diff(
want_children.pop(0), got_children.pop(0), html, indent+2))
parts.append(' '*indent)
parts.append(self.collect_diff_end_tag(want, got))
parts.append('\n')
if strip(want.tail) or strip(got.tail):
parts.append(' '*indent)
parts.append(self.collect_diff_text(want.tail, got.tail))
parts.append('\n')
return ''.join(parts)
def collect_diff_tag(self, want, got):
if not self.tag_compare(want.tag, got.tag):
tag = '%s (got: %s)' % (want.tag, got.tag)
else:
tag = got.tag
attrs = []
any = want.tag == 'any' or 'any' in want.attrib
for name, value in sorted(got.attrib.items()):
if name not in want.attrib and not any:
attrs.append('+%s="%s"' % (name, self.format_text(value, False)))
else:
if name in want.attrib:
text = self.collect_diff_text(want.attrib[name], value, False)
else:
text = self.format_text(value, False)
attrs.append('%s="%s"' % (name, text))
if not any:
for name, value in sorted(want.attrib.items()):
if name in got.attrib:
continue
attrs.append('-%s="%s"' % (name, self.format_text(value, False)))
if attrs:
tag = '<%s %s>' % (tag, ' '.join(attrs))
else:
tag = '<%s>' % tag
return tag
def collect_diff_end_tag(self, want, got):
if want.tag != got.tag:
tag = '%s (got: %s)' % (want.tag, got.tag)
else:
tag = got.tag
return '</%s>' % tag
def collect_diff_text(self, want, got, strip=True):
if self.text_compare(want, got, strip):
if not got:
return ''
return self.format_text(got, strip)
text = '%s (got: %s)' % (want, got)
return self.format_text(text, strip)
class LHTMLOutputChecker(LXMLOutputChecker):
def get_default_parser(self):
return html_fromstring
def install(html=False):
"""
Install doctestcompare for all future doctests.
If html is true, then by default the HTML parser will be used;
otherwise the XML parser is used.
"""
if html:
doctest.OutputChecker = LHTMLOutputChecker
else:
doctest.OutputChecker = LXMLOutputChecker
def temp_install(html=False, del_module=None):
"""
Use this *inside* a doctest to enable this checker for this
doctest only.
If html is true, then by default the HTML parser will be used;
otherwise the XML parser is used.
"""
if html:
Checker = LHTMLOutputChecker
else:
Checker = LXMLOutputChecker
frame = _find_doctest_frame()
dt_self = frame.f_locals['self']
checker = Checker()
old_checker = dt_self._checker
dt_self._checker = checker
# The unfortunate thing is that there is a local variable 'check'
# in the function that runs the doctests, that is a bound method
# into the output checker. We have to update that. We can't
# modify the frame, so we have to modify the object in place. The
# only way to do this is to actually change the func_code
# attribute of the method. We change it, and then wait for
# __record_outcome to be run, which signals the end of the __run
# method, at which point we restore the previous check_output
# implementation.
if _IS_PYTHON_3:
check_func = frame.f_locals['check'].__func__
checker_check_func = checker.check_output.__func__
else:
check_func = frame.f_locals['check'].im_func
checker_check_func = checker.check_output.im_func
# Because we can't patch up func_globals, this is the only global
# in check_output that we care about:
doctest.etree = etree
_RestoreChecker(dt_self, old_checker, checker,
check_func, checker_check_func,
del_module)
class _RestoreChecker(object):
def __init__(self, dt_self, old_checker, new_checker, check_func, clone_func,
del_module):
self.dt_self = dt_self
self.checker = old_checker
self.checker._temp_call_super_check_output = self.call_super
self.checker._temp_override_self = new_checker
self.check_func = check_func
self.clone_func = clone_func
self.del_module = del_module
self.install_clone()
self.install_dt_self()
def install_clone(self):
if _IS_PYTHON_3:
self.func_code = self.check_func.__code__
self.func_globals = self.check_func.__globals__
self.check_func.__code__ = self.clone_func.__code__
else:
self.func_code = self.check_func.func_code
self.func_globals = self.check_func.func_globals
self.check_func.func_code = self.clone_func.func_code
def uninstall_clone(self):
if _IS_PYTHON_3:
self.check_func.__code__ = self.func_code
else:
self.check_func.func_code = self.func_code
def install_dt_self(self):
self.prev_func = self.dt_self._DocTestRunner__record_outcome
self.dt_self._DocTestRunner__record_outcome = self
def uninstall_dt_self(self):
self.dt_self._DocTestRunner__record_outcome = self.prev_func
def uninstall_module(self):
if self.del_module:
import sys
del sys.modules[self.del_module]
if '.' in self.del_module:
package, module = self.del_module.rsplit('.', 1)
package_mod = sys.modules[package]
delattr(package_mod, module)
def __call__(self, *args, **kw):
self.uninstall_clone()
self.uninstall_dt_self()
del self.checker._temp_override_self
del self.checker._temp_call_super_check_output
result = self.prev_func(*args, **kw)
self.uninstall_module()
return result
def call_super(self, *args, **kw):
self.uninstall_clone()
try:
return self.check_func(*args, **kw)
finally:
self.install_clone()
def _find_doctest_frame():
import sys
frame = sys._getframe(1)
while frame:
l = frame.f_locals
if 'BOOM' in l:
# Sign of doctest
return frame
frame = frame.f_back
raise LookupError(
"Could not find doctest (only use this function *inside* a doctest)")
__test__ = {
'basic': '''
>>> temp_install()
>>> print """<xml a="1" b="2">stuff</xml>"""
<xml b="2" a="1">...</xml>
>>> print """<xml xmlns="http://example.com"><tag attr="bar" /></xml>"""
<xml xmlns="...">
<tag attr="..." />
</xml>
>>> print """<xml>blahblahblah<foo /></xml>""" # doctest: +NOPARSE_MARKUP, +ELLIPSIS
<xml>...foo /></xml>
'''}
if __name__ == '__main__':
import doctest
doctest.testmod()
|
import os
import os.path as op
import re
import shutil
import numpy as np
from numpy.testing import assert_allclose, assert_array_almost_equal
import pytest
import mne
from mne.datasets import testing
from mne.io.kit.tests import data_dir as kit_data_dir
from mne.surface import dig_mri_distances
from mne.transforms import invert_transform
from mne.utils import (run_tests_if_main, requires_mayavi, traits_test,
modified_env)
data_path = testing.data_path(download=False)
raw_path = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc_raw.fif')
fname_trans = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-trans.fif')
kit_raw_path = op.join(kit_data_dir, 'test_bin_raw.fif')
subjects_dir = op.join(data_path, 'subjects')
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_decimation(subjects_dir_tmp):
"""Test CoregModel decimation of high-res to low-res head."""
from mne.gui._coreg_gui import CoregModel
# This makes the test much faster
subject_dir = op.join(subjects_dir_tmp, 'sample')
shutil.move(op.join(subject_dir, 'bem', 'outer_skin.surf'),
op.join(subject_dir, 'surf', 'lh.seghead'))
for fname in ('sample-head.fif', 'sample-head-dense.fif'):
os.remove(op.join(subject_dir, 'bem', fname))
model = CoregModel(guess_mri_subject=False)
with pytest.warns(RuntimeWarning, match='No low-resolution'):
model.mri.subjects_dir = op.dirname(subject_dir)
assert model.mri.subject == 'sample' # already set by setting subjects_dir
assert model.mri.bem_low_res.file == ''
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 2562 # because we moved it
@requires_mayavi
@traits_test
def test_coreg_model(subjects_dir_tmp):
"""Test CoregModel."""
from mne.gui._coreg_gui import CoregModel
trans_dst = op.join(subjects_dir_tmp, 'test-trans.fif')
# make it use MNI fiducials
os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',
'sample-fiducials.fif'))
model = CoregModel()
with pytest.raises(RuntimeError, match='Not enough information for savin'):
model.save_trans('blah.fif')
model.mri.subjects_dir = subjects_dir_tmp
model.mri.subject = 'sample'
assert model.mri.fid_ok # automated using MNI fiducials
model.hsp.file = raw_path
assert_allclose(model.hsp.lpa, [[-7.137e-2, 0, 5.122e-9]], 1e-4)
assert_allclose(model.hsp.rpa, [[+7.527e-2, 0, 5.588e-9]], 1e-4)
assert_allclose(model.hsp.nasion, [[+3.725e-9, 1.026e-1, 4.191e-9]], 1e-4)
assert model.has_lpa_data
assert model.has_nasion_data
assert model.has_rpa_data
assert len(model.hsp.eeg_points) > 1
assert len(model.mri.bem_low_res.surf.rr) == 2562
assert len(model.mri.bem_high_res.surf.rr) == 267122
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
model.nasion_weight = 1.
model.fit_fiducials(0)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert new_x < old_x
model.fit_icp(0)
new_dist = np.mean(model.point_distance)
assert new_dist < avg_point_distance
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
# test restoring trans
x, y, z = 100, 200, 50
rot_x, rot_y, rot_z = np.rad2deg([1.5, 0.1, -1.2])
model.trans_x = x
model.trans_y = y
model.trans_z = z
model.rot_x = rot_x
model.rot_y = rot_y
model.rot_z = rot_z
trans = model.mri_head_t
model.reset_traits(["trans_x", "trans_y", "trans_z", "rot_x", "rot_y",
"rot_z"])
assert model.trans_x == 0
model.set_trans(trans)
assert_array_almost_equal(model.trans_x, x)
assert_array_almost_equal(model.trans_y, y)
assert_array_almost_equal(model.trans_z, z)
assert_array_almost_equal(model.rot_x, rot_x)
assert_array_almost_equal(model.rot_y, rot_y)
assert_array_almost_equal(model.rot_z, rot_z)
# info
assert isinstance(model.fid_eval_str, str)
assert isinstance(model.points_eval_str, str)
# scaling job
assert not model.can_prepare_bem_model
model.n_scale_params = 1
assert model.can_prepare_bem_model
model.prepare_bem_model = True
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', False)
assert sdir == subjects_dir_tmp
assert sfrom == 'sample'
assert sto == 'sample2'
assert_allclose(scale, model.parameters[6:9])
assert skip_fiducials is False
# find BEM files
bems = set()
for fname in os.listdir(op.join(subjects_dir, 'sample', 'bem')):
match = re.match(r'sample-(.+-bem)\.fif', fname)
if match:
bems.add(match.group(1))
assert set(bemsol) == bems
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('sample2', True)
assert bemsol == []
assert (skip_fiducials)
model.load_trans(fname_trans)
model.save_trans(trans_dst)
trans = mne.read_trans(trans_dst)
assert_allclose(trans['trans'], model.head_mri_t)
assert_allclose(invert_transform(trans)['trans'][:3, 3] * 1000.,
[model.trans_x, model.trans_y, model.trans_z])
@requires_mayavi
@traits_test
def test_coreg_gui_display(subjects_dir_tmp, check_gui_ci):
"""Test CoregFrame."""
from mayavi import mlab
from tvtk.api import tvtk
home_dir = subjects_dir_tmp
# Remove the two files that will make the fiducials okay via MNI estimation
os.remove(op.join(subjects_dir_tmp, 'sample', 'bem',
'sample-fiducials.fif'))
os.remove(op.join(subjects_dir_tmp, 'sample', 'mri', 'transforms',
'talairach.xfm'))
with modified_env(_MNE_GUI_TESTING_MODE='true',
_MNE_FAKE_HOME_DIR=home_dir):
with pytest.raises(ValueError, match='not a valid subject'):
mne.gui.coregistration(
subject='Elvis', subjects_dir=subjects_dir_tmp)
# avoid modal dialog if SUBJECTS_DIR is set to a directory that
# does not contain valid subjects
ui, frame = mne.gui.coregistration(subjects_dir='')
mlab.process_ui_events()
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir_tmp,
subject='sample')
mlab.process_ui_events()
assert not frame.model.mri.fid_ok
frame.model.mri.lpa = [[-0.06, 0, 0]]
frame.model.mri.nasion = [[0, 0.05, 0]]
frame.model.mri.rpa = [[0.08, 0, 0]]
assert frame.model.mri.fid_ok
frame.data_panel.raw_src.file = raw_path
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.SphereSource)
frame.data_panel.view_options_panel.eeg_obj.project_to_surface = True
assert isinstance(frame.eeg_obj.glyph.glyph.glyph_source.glyph_source,
tvtk.CylinderSource)
mlab.process_ui_events()
# grow hair (faster for low-res)
assert frame.data_panel.view_options_panel.head_high_res
frame.data_panel.view_options_panel.head_high_res = False
frame.model.grow_hair = 40.
# scale
frame.coreg_panel.n_scale_params = 3
frame.coreg_panel.scale_x_inc = True
assert frame.model.scale_x == 101.
frame.coreg_panel.scale_y_dec = True
assert frame.model.scale_y == 99.
# reset parameters
frame.coreg_panel.reset_params = True
assert frame.model.grow_hair == 0
assert not frame.data_panel.view_options_panel.head_high_res
# configuration persistence
assert (frame.model.prepare_bem_model)
frame.model.prepare_bem_model = False
frame.save_config(home_dir)
ui.dispose()
mlab.process_ui_events()
ui, frame = mne.gui.coregistration(subjects_dir=subjects_dir_tmp)
assert not frame.model.prepare_bem_model
assert not frame.data_panel.view_options_panel.head_high_res
ui.dispose()
mlab.process_ui_events()
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_model_with_fsaverage(tmpdir):
"""Test CoregModel with the fsaverage brain data."""
tempdir = str(tmpdir)
from mne.gui._coreg_gui import CoregModel
mne.create_default_subject(subjects_dir=tempdir,
fs_home=op.join(subjects_dir, '..'))
model = CoregModel()
model.mri.subjects_dir = tempdir
model.mri.subject = 'fsaverage'
assert model.mri.fid_ok
model.hsp.file = raw_path
lpa_distance = model.lpa_distance
nasion_distance = model.nasion_distance
rpa_distance = model.rpa_distance
avg_point_distance = np.mean(model.point_distance)
# test hsp point omission
model.nasion_weight = 1.
model.trans_y = -0.008
model.fit_fiducials(0)
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(np.inf)
assert model.hsp.n_omitted == 0
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.005)
assert model.hsp.n_omitted == 40
model.omit_hsp_points(0.01)
assert model.hsp.n_omitted == 4
model.omit_hsp_points(0.02)
assert model.hsp.n_omitted == 1
# scale with 1 parameter
model.n_scale_params = 1
model.fit_fiducials(1)
old_x = lpa_distance ** 2 + rpa_distance ** 2 + nasion_distance ** 2
new_x = (model.lpa_distance ** 2 + model.rpa_distance ** 2 +
model.nasion_distance ** 2)
assert (new_x < old_x)
model.fit_icp(1)
avg_point_distance_1param = np.mean(model.point_distance)
assert (avg_point_distance_1param < avg_point_distance)
# scaling job
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert sdir == tempdir
assert sfrom == 'fsaverage'
assert sto == 'scaled'
assert_allclose(scale, model.parameters[6:9])
assert set(bemsol) == {'inner_skull-bem'}
model.prepare_bem_model = False
sdir, sfrom, sto, scale, skip_fiducials, labels, annot, bemsol = \
model.get_scaling_job('scaled', False)
assert bemsol == []
# scale with 3 parameters
model.n_scale_params = 3
model.fit_icp(3)
assert (np.mean(model.point_distance) < avg_point_distance_1param)
# test switching raw disables point omission
assert model.hsp.n_omitted == 1
model.hsp.file = kit_raw_path
assert model.hsp.n_omitted == 0
@testing.requires_testing_data
@requires_mayavi
@traits_test
def test_coreg_gui_automation():
"""Test that properties get properly updated."""
from mne.gui._file_traits import DigSource
from mne.gui._fiducials_gui import MRIHeadWithFiducialsModel
from mne.gui._coreg_gui import CoregModel
subject = 'sample'
hsp = DigSource()
hsp.file = raw_path
mri = MRIHeadWithFiducialsModel(subjects_dir=subjects_dir, subject=subject)
model = CoregModel(mri=mri, hsp=hsp)
# gh-7254
assert not (model.nearest_transformed_high_res_mri_idx_hsp == 0).all()
model.fit_fiducials()
model.icp_iterations = 2
model.nasion_weight = 2.
model.fit_icp()
model.omit_hsp_points(distance=5e-3)
model.icp_iterations = 2
model.fit_icp()
errs_icp = np.median(
model._get_point_distance())
assert 2e-3 < errs_icp < 3e-3
info = mne.io.read_info(raw_path)
errs_nearest = np.median(
dig_mri_distances(info, fname_trans, subject, subjects_dir))
assert 1e-3 < errs_nearest < 2e-3
run_tests_if_main()
|
from pkg_resources import get_distribution, DistributionNotFound
from typing import Dict, Optional, Tuple, Union
from cerberus.base import (
rules_set_registry,
schema_registry,
DocumentError,
TypeDefinition,
UnconcernedValidator,
)
from cerberus.schema import SchemaError
from cerberus.validator import Validator
try:
__version__ = get_distribution("Cerberus").version
except DistributionNotFound:
__version__ = "unknown"
def validator_factory(
name: str,
bases: Union[type, Tuple[type], None] = None,
namespace: Optional[Dict] = None,
validated_schema: bool = True,
) -> type:
"""
Dynamically create a :class:`~cerberus.Validator` subclass. Docstrings of
mixin-classes will be added to the resulting class' one if ``__doc__`` is not
in :obj:`namespace`.
:param name: The name of the new class.
:param bases: Class(es) with additional and overriding attributes.
:param namespace: Attributes for the new class.
:param validated_schema: Indicates that schemas that are provided to the validator
are to be validated.
:return: The created class.
"""
validator_class = Validator if validated_schema else UnconcernedValidator
if namespace is None:
namespace = {}
if bases is None:
computed_bases = (validator_class,)
elif isinstance(bases, tuple) and validator_class not in bases:
computed_bases = bases + (validator_class,) # type: ignore
else:
computed_bases = (bases, validator_class) # type: ignore
docstrings = [x.__doc__ for x in computed_bases if x.__doc__]
if len(docstrings) > 1 and '__doc__' not in namespace:
namespace.update({'__doc__': '\n'.join(docstrings)})
return type(name, computed_bases, namespace)
__all__ = [
DocumentError.__name__,
SchemaError.__name__,
TypeDefinition.__name__,
UnconcernedValidator.__name__,
Validator.__name__,
"__version__",
'schema_registry',
'rules_set_registry',
validator_factory.__name__,
]
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from ups import UPSCollector
##########################################################################
class TestUPSCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('UPSCollector', {
'interval': 10,
'bin': 'true',
'use_sudo': False,
})
self.collector = UPSCollector(config, None)
def test_import(self):
self.assertTrue(UPSCollector)
@patch('os.access', Mock(return_value=True))
@patch.object(Collector, 'publish')
def test_should_work_with_real_data_cp550slg(self, publish_mock):
patch_listdir = patch('os.listdir', Mock(return_value=['sda']))
patch_communicate = patch(
'subprocess.Popen.communicate',
Mock(return_value=(
self.getFixture('cp550slg').getvalue(),
'')))
patch_listdir.start()
patch_communicate.start()
self.collector.collect()
patch_listdir.stop()
patch_communicate.stop()
metrics = {
'battery.charge.charge': 100.0,
'battery.charge.low': 10.0,
'battery.charge.warning': 20.0,
'battery.runtime.runtime': 960.0,
'battery.runtime.low': 300.0,
'battery.voltage.voltage': 4.9,
'battery.voltage.nominal': 12.0,
'driver.parameter.pollfreq': 30.0,
'driver.parameter.pollinterval': 2.0,
'driver.version.internal': 0.34,
'input.transfer.high': 0.0,
'input.transfer.low': 0.0,
'input.voltage.voltage': 121.0,
'input.voltage.nominal': 120.0,
'output.voltage.voltage': 120.0,
'ups.delay.shutdown': 20.0,
'ups.delay.start': 30.0,
'ups.load.load': 46.0,
'ups.productid.productid': 501.0,
'ups.realpower.nominal': 330.0,
'ups.timer.shutdown': -60.0,
'ups.timer.start': 0.0,
'ups.vendorid.vendorid': 764.0,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import itertools
from datetime import datetime as dt, timedelta as dtd
import bson
import pytest
from mock import patch, sentinel
from arctic._util import mongo_count, FwPointersCfg
from arctic.scripts.arctic_fsck import main
from tests.integration.store.test_version_store import FwPointersCtx
from ...util import run_as_main, read_str_as_pandas
@pytest.fixture(scope='function')
def library_name():
return 'user.library'
ts = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0""")
some_object = {'thing': sentinel.val}
@pytest.mark.parametrize(
['dry_run', 'data', 'fw_pointers_config'],
[(x, y, z) for (x, y, z) in itertools.product(
[True, False], [some_object, ts], [FwPointersCfg.DISABLED, FwPointersCfg.HYBRID, FwPointersCfg.ENABLED])])
def test_cleanup_orphaned_chunks(mongo_host, library, data, dry_run, fw_pointers_config):
"""
Check that we do / don't cleanup chunks based on the dry-run
"""
with FwPointersCtx(fw_pointers_config):
yesterday = dt.utcnow() - dtd(days=1, seconds=1)
_id = bson.ObjectId.from_datetime(yesterday)
with patch("bson.ObjectId", return_value=_id):
library.write('symbol', data, prune_previous_version=False)
# Number of chunks
chunk_count = mongo_count(library._collection)
# Remove the version document ; should cleanup
library._collection.versions.delete_one({'_id': _id})
# No cleanup on dry-run
if dry_run:
run_as_main(main, '--library', 'user.library', '--host', mongo_host)
assert mongo_count(library._collection) == chunk_count
else:
run_as_main(main, '--library', 'user.library', '--host', mongo_host, '-f')
assert mongo_count(library._collection) == 0
@pytest.mark.parametrize(
['dry_run', 'data', 'fw_pointers_config'],
[(x, y, z) for (x, y, z) in itertools.product(
[True, False], [some_object, ts], [FwPointersCfg.DISABLED, FwPointersCfg.HYBRID, FwPointersCfg.ENABLED])])
def test_cleanup_noop(mongo_host, library, data, dry_run, fw_pointers_config):
"""
Check that we do / don't cleanup chunks based on the dry-run
"""
with FwPointersCtx(fw_pointers_config):
yesterday = dt.utcnow() - dtd(days=1, seconds=1)
_id = bson.ObjectId.from_datetime(yesterday)
with patch("bson.ObjectId", return_value=_id):
library.write('symbol', data, prune_previous_version=False)
# Number of chunks
chunk_count = mongo_count(library._collection)
# No cleanup on dry-run
if dry_run:
run_as_main(main, '--library', 'user.library', '--host', mongo_host)
assert mongo_count(library._collection) == chunk_count
assert repr(library.read('symbol').data) == repr(data)
else:
run_as_main(main, '--library', 'user.library', '--host', mongo_host, '-f')
assert mongo_count(library._collection) == chunk_count
assert repr(library.read('symbol').data) == repr(data)
@pytest.mark.parametrize(
['dry_run', 'data', 'fw_pointers_config'],
[(x, y, z) for (x, y, z) in itertools.product(
[True, False], [some_object, ts], [FwPointersCfg.DISABLED, FwPointersCfg.HYBRID, FwPointersCfg.ENABLED])])
def test_cleanup_orphaned_chunks_ignores_recent(mongo_host, library, data, dry_run, fw_pointers_config):
"""
We don't cleanup any chunks in the range of today. That's just asking for trouble
"""
with FwPointersCtx(fw_pointers_config):
yesterday = dt.utcnow() - dtd(hours=12)
_id = bson.ObjectId.from_datetime(yesterday)
with patch("bson.ObjectId", return_value=_id):
library.write('symbol', data, prune_previous_version=False)
chunk_count = mongo_count(library._collection)
library._collection.versions.delete_one({'_id': _id})
if dry_run:
run_as_main(main, '--library', 'user.library', '--host', mongo_host)
assert mongo_count(library._collection) == chunk_count
else:
run_as_main(main, '--library', 'user.library', '--host', mongo_host, '-f')
assert mongo_count(library._collection) == chunk_count
@pytest.mark.parametrize('data, fw_pointers_config',
[(x, y) for (x, y) in itertools.product(
[some_object, ts],
[FwPointersCfg.DISABLED, FwPointersCfg.HYBRID, FwPointersCfg.ENABLED])])
def test_cleanup_orphaned_chunk_doesnt_break_versions(mongo_host, library, data, fw_pointers_config):
"""
Check that a chunk pointed to by more than one version, aren't inadvertently cleared
"""
with FwPointersCtx(fw_pointers_config):
yesterday = dt.utcnow() - dtd(days=1, seconds=1)
_id = bson.ObjectId.from_datetime(yesterday)
with patch("bson.ObjectId", return_value=_id):
library.write('symbol', data, prune_previous_version=False)
# Re-Write the data again
# Write a whole new version rather than going down the append path...
# - we want two self-standing versions, the removal of one shouldn't break the other...
with patch('arctic.store._ndarray_store._APPEND_COUNT', 0):
library.write('symbol', data, prune_previous_version=False)
library._delete_version('symbol', 1)
library._collection.versions.delete_one({'_id': _id})
assert repr(library.read('symbol').data) == repr(data)
run_as_main(main, '--library', 'user.library', '--host', mongo_host, '-f')
assert repr(library.read('symbol').data) == repr(data)
library.delete('symbol')
assert mongo_count(library._collection.versions) == 0
@pytest.mark.parametrize(
['dry_run', 'data', 'fw_pointers_config'],
[(x, y, z) for (x, y, z) in itertools.product(
[True, False], [some_object, ts], [FwPointersCfg.DISABLED, FwPointersCfg.HYBRID, FwPointersCfg.ENABLED])])
def test_cleanup_orphaned_snapshots(mongo_host, library, data, dry_run, fw_pointers_config):
"""
Check that we do / don't cleanup chunks based on the dry-run
"""
with FwPointersCtx(fw_pointers_config):
yesterday = dt.utcnow() - dtd(days=1, seconds=1)
_id = bson.ObjectId.from_datetime(yesterday)
library.write('symbol', data, prune_previous_version=False)
with patch("bson.ObjectId", return_value=_id):
library.snapshot('snap_name')
# Remove the version document ; should cleanup
assert library._collection.snapshots.delete_one({})
# No cleanup on dry-run
if dry_run:
run_as_main(main, '--library', 'user.library', '--host', mongo_host)
assert mongo_count(library._collection) > 0
assert mongo_count(library._collection.versions)
assert repr(library.read('symbol').data) == repr(data)
# Nothing done_APPEND_COUNT
assert len(library._collection.versions.find_one({})['parent'])
else:
run_as_main(main, '--library', 'user.library', '--host', mongo_host, '-f')
assert mongo_count(library._collection) > 0
assert mongo_count(library._collection.versions)
# Data still available (write with prune_previous_version will do the cleanup)
assert repr(library.read('symbol').data) == repr(data)
# Snapshot cleaned up
assert not len(library._collection.versions.find_one({})['parent'])
@pytest.mark.parametrize(
['dry_run', 'data', 'fw_pointers_config'],
[(x, y, z) for (x, y, z) in itertools.product(
[True, False], [some_object, ts], [FwPointersCfg.DISABLED, FwPointersCfg.HYBRID, FwPointersCfg.ENABLED])])
def test_cleanup_orphaned_snapshots_nop(mongo_host, library, data, dry_run, fw_pointers_config):
"""
Check that we do / don't cleanup chunks based on the dry-run
"""
with FwPointersCtx(fw_pointers_config):
yesterday = dt.utcnow() - dtd(days=1, seconds=1)
_id = bson.ObjectId.from_datetime(yesterday)
library.write('symbol', data, prune_previous_version=False)
with patch("bson.ObjectId", return_value=_id):
library.snapshot('snap_name')
# No cleanup on dry-run
if dry_run:
run_as_main(main, '--library', 'user.library', '--host', mongo_host)
assert mongo_count(library._collection) > 0
assert mongo_count(library._collection.versions)
assert repr(library.read('symbol').data) == repr(data)
# Nothing done
assert len(library._collection.versions.find_one({})['parent'])
else:
run_as_main(main, '--library', 'user.library', '--host', mongo_host, '-f')
assert mongo_count(library._collection) > 0
assert mongo_count(library._collection.versions)
# Data still available (write with prune_previous_version will do the cleanup)
assert repr(library.read('symbol').data) == repr(data)
# Nothing done
assert len(library._collection.versions.find_one({})['parent'])
@pytest.mark.parametrize(
['dry_run', 'data', 'fw_pointers_config'],
[(x, y, z) for (x, y, z) in itertools.product(
[True, False], [some_object, ts], [FwPointersCfg.DISABLED, FwPointersCfg.HYBRID, FwPointersCfg.ENABLED])])
def test_dont_cleanup_recent_orphaned_snapshots(mongo_host, library, data, dry_run, fw_pointers_config):
"""
Check that we do / don't cleanup chunks based on the dry-run
"""
with FwPointersCtx(fw_pointers_config):
today = dt.utcnow() - dtd(hours=12, seconds=1)
_id = bson.ObjectId.from_datetime(today)
library.write('symbol', data, prune_previous_version=False)
with patch("bson.ObjectId", return_value=_id):
library.snapshot('snap_name')
# Remove the version document ; should cleanup
assert library._collection.snapshots.delete_many({})
# No cleanup on dry-run
if dry_run:
run_as_main(main, '--library', 'user.library', '--host', mongo_host)
assert mongo_count(library._collection) > 0
assert mongo_count(library._collection.versions)
assert repr(library.read('symbol').data) == repr(data)
# Nothing done
assert len(library._collection.versions.find_one({})['parent'])
else:
run_as_main(main, '--library', 'user.library', '--host', mongo_host, '-f')
assert mongo_count(library._collection) > 0
assert mongo_count(library._collection.versions)
# Data still available (write with prune_previous_version will do the cleanup)
assert repr(library.read('symbol').data) == repr(data)
# Snapshot cleaned up
assert len(library._collection.versions.find_one({})['parent'])
|
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.components.xbox.const import DOMAIN, OAUTH2_AUTHORIZE, OAUTH2_TOKEN
from homeassistant.helpers import config_entry_oauth2_flow
from tests.async_mock import patch
from tests.common import MockConfigEntry
CLIENT_ID = "1234"
CLIENT_SECRET = "5678"
async def test_abort_if_existing_entry(hass):
"""Check flow abort when an entry already exist."""
MockConfigEntry(domain=DOMAIN).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
"xbox", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_full_flow(hass, aiohttp_client, aioclient_mock, current_request):
"""Check full flow."""
assert await setup.async_setup_component(
hass,
"xbox",
{
"xbox": {"client_id": CLIENT_ID, "client_secret": CLIENT_SECRET},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
"xbox", context={"source": config_entries.SOURCE_USER}
)
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
scope = "+".join(["Xboxlive.signin", "Xboxlive.offline_access"])
assert result["url"] == (
f"{OAUTH2_AUTHORIZE}?response_type=code&client_id={CLIENT_ID}"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}&scope={scope}"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
OAUTH2_TOKEN,
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.xbox.async_setup_entry", return_value=True
) as mock_setup:
await hass.config_entries.flow.async_configure(result["flow_id"])
assert len(hass.config_entries.async_entries(DOMAIN)) == 1
assert len(mock_setup.mock_calls) == 1
|
import os
from urllib.request import urlopen
# Internal imports
import vcr
from vcr.persisters.filesystem import FilesystemPersister
class CustomFilesystemPersister(object):
"""Behaves just like default FilesystemPersister but adds .test extension
to the cassette file"""
@staticmethod
def load_cassette(cassette_path, serializer):
cassette_path += ".test"
return FilesystemPersister.load_cassette(cassette_path, serializer)
@staticmethod
def save_cassette(cassette_path, cassette_dict, serializer):
cassette_path += ".test"
FilesystemPersister.save_cassette(cassette_path, cassette_dict, serializer)
def test_save_cassette_with_custom_persister(tmpdir, httpbin):
"""Ensure you can save a cassette using custom persister"""
my_vcr = vcr.VCR()
my_vcr.register_persister(CustomFilesystemPersister)
# Check to make sure directory doesnt exist
assert not os.path.exists(str(tmpdir.join("nonexistent")))
# Run VCR to create dir and cassette file using new save_cassette callback
with my_vcr.use_cassette(str(tmpdir.join("nonexistent", "cassette.yml"))):
urlopen(httpbin.url).read()
# Callback should have made the file and the directory
assert os.path.exists(str(tmpdir.join("nonexistent", "cassette.yml.test")))
def test_load_cassette_with_custom_persister(tmpdir, httpbin):
"""
Ensure you can load a cassette using custom persister
"""
my_vcr = vcr.VCR()
my_vcr.register_persister(CustomFilesystemPersister)
test_fixture = str(tmpdir.join("synopsis.json.test"))
with my_vcr.use_cassette(test_fixture, serializer="json"):
response = urlopen(httpbin.url).read()
assert b"difficult sometimes" in response
|
from __future__ import unicode_literals
import os
import re
import ssl
from lib.fun.decorator import magic
from lib.fun.osjudger import py_ver_egt_3
from lib.data.data import paths, pyoptions
from lib.fun.fun import unique, cool, walk_pure_file
try:
# ssl._create_unverified_context is present in Python 2.7.9 and later
ssl._create_default_https_context = ssl._create_unverified_context
except:
try:
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
except:
pass
# in python3: urllib + urilib2 -> urllib, and
# urllib2.urlopen() -> urllib.request.urlopen(), urllib2.Request() -> urllib.request.Request()
try:
if py_ver_egt_3():
from urllib.request import urlopen
else:
from urllib2 import urlopen
except ImportError as e:
print(e.message)
exit(cool.red('[-] can not import urllib or urllib2 module:') + pyoptions.CRLF)
passcratch_black_list = walk_pure_file(paths.scratch_blacklist)
def stripHTMLTags(html):
text = html
rules = [
{r'>\s+': '>'}, # Remove spaces after a tag opens or closes.
{r'\s+': ' '}, # Replace consecutive spaces.
{r'\s*<br\s*/?>\s*': '\n'}, # Newline after a <br>.
{r'</(div)\s*>\s*': '\n'}, # Newline after </p> and </div> and <h1/>.
{r'</(p|h\d)\s*>\s*': '\n\n'}, # Newline after </p> and </div> and <h1/>.
{r'<head>.*<\s*(/head|body)[^>]*>': ''}, # Remove <head> to </head>.
{r'<a\s+href="([^"]+)"[^>]*>.*</a>': r'\1'}, # Show links instead of texts.
{r'[ \t]*<[^<]*?/?>': ''}, # Remove remaining tags.
{r'^\s+': ''} # Remove spaces at the beginning.
]
for rule in rules:
for (k, v) in rule.items():
try:
regex = re.compile(k)
text = str(regex.sub(v, text))
except:
pass
htmlspecial = {
' ': ' ', '&': '&', '"': '"',
'<': '<', '>': '>'
}
for (k, v) in htmlspecial.items():
text = text.replace(k, v)
return text
def scratchword(siteList):
resluts = []
# Create an empty list for generation logic.
y_arr = []
for site in siteList:
try:
site = site.strip()
response = urlopen(site)
response.addheaders = \
[('User-agent', 'Mozilla/5.0 (Windows NT 6.1; WOW64; rv:35.0) Gecko/20100101 Firefox/35.0')]
# if you don't decode('utf-8'), it will don't work both in python2 and python3
try:
x = stripHTMLTags(response.read().decode('utf-8') + site)
except:
try:
x = stripHTMLTags(response.read().decode('GBK') + site)
except:
exit(cool.red("[-] Page coding parse error, please use 'extend' plug instead") + pyoptions.CRLF)
# Replace junk found in our response
x = x.replace('\n', ' ')
x = x.replace(',', ' ')
x = x.replace('.', ' ')
x = x.replace('/', ' ')
x = re.sub('[^A-Za-z0-9]+', ' ', x)
x_arr = x.split(' ')
for y in x_arr:
y = y.strip()
if y and (len(y) >= 5):
if ((y[0] == '2') and (y[1] == 'F')) \
or ((y[0] == '2') and (y[1] == '3')) \
or ((y[0] == '3') and (y[1] == 'F')) or ((y[0] == '3') and (y[1] == 'D')):
y = y[2:]
if len(y) <= 8 and True if y.lower() not in passcratch_black_list and len(y) >= 5 else False:
y_arr.append(y)
elif 9 <= len(y) <= 25 and True if y.lower() not in passcratch_black_list else False:
y_arr.append(y)
except Exception:
exit(cool.red("[-] Process abort, please check url and try use 'extend' function instead") + pyoptions.CRLF)
for yy in unique(y_arr):
if yy.strip().isdigit():
pass
else:
if not re.findall(pyoptions.scratch_filter, yy.strip(), flags=re.I):
resluts.append(yy.strip())
return unique(resluts)
def checkurl(urlike):
try:
if not str(urlike).startswith('http'):
return 'http://' + urlike.strip()
else:
return urlike
except:
exit(cool.red("[-] Incorrect url/uri: {0}".format(cool.red(urlike.strip()))))
def scratch_magic(*args):
"""[url_or_file]"""
args = list(args[0])
if len(args) == 1:
target = paths.scrapersites_path
elif len(args) == 2:
target = args[1]
else:
exit(pyoptions.CRLF + cool.fuchsia("[!] Usage: {} {}".format(args[0], pyoptions.plugins_info.get(args[0]))))
sites = []
if os.path.isfile(target):
with open(target, 'r') as f:
for _ in f.readlines():
if _.startswith(pyoptions.annotator):
pass
else:
sites.append(checkurl(_))
else:
sites.append(checkurl(target))
rawlist = scratchword(sites)
@magic
def scratch():
return rawlist
|
from functools import partial
import logging
from urllib.parse import urlparse
import denonavr
from getmac import get_mac_address
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.components import ssdp
from homeassistant.const import CONF_HOST, CONF_MAC
from homeassistant.core import callback
from homeassistant.helpers.device_registry import format_mac
from .receiver import ConnectDenonAVR
_LOGGER = logging.getLogger(__name__)
DOMAIN = "denonavr"
SUPPORTED_MANUFACTURERS = ["Denon", "DENON", "DENON PROFESSIONAL", "Marantz"]
IGNORED_MODELS = ["HEOS 1", "HEOS 3", "HEOS 5", "HEOS 7"]
CONF_SHOW_ALL_SOURCES = "show_all_sources"
CONF_ZONE2 = "zone2"
CONF_ZONE3 = "zone3"
CONF_TYPE = "type"
CONF_MODEL = "model"
CONF_MANUFACTURER = "manufacturer"
CONF_SERIAL_NUMBER = "serial_number"
DEFAULT_SHOW_SOURCES = False
DEFAULT_TIMEOUT = 5
DEFAULT_ZONE2 = False
DEFAULT_ZONE3 = False
CONFIG_SCHEMA = vol.Schema({vol.Optional(CONF_HOST): str})
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options for the component."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Init object."""
self.config_entry = config_entry
async def async_step_init(self, user_input=None):
"""Manage the options."""
if user_input is not None:
return self.async_create_entry(title="", data=user_input)
settings_schema = vol.Schema(
{
vol.Optional(
CONF_SHOW_ALL_SOURCES,
default=self.config_entry.options.get(
CONF_SHOW_ALL_SOURCES, DEFAULT_SHOW_SOURCES
),
): bool,
vol.Optional(
CONF_ZONE2,
default=self.config_entry.options.get(CONF_ZONE2, DEFAULT_ZONE2),
): bool,
vol.Optional(
CONF_ZONE3,
default=self.config_entry.options.get(CONF_ZONE3, DEFAULT_ZONE3),
): bool,
}
)
return self.async_show_form(step_id="init", data_schema=settings_schema)
class DenonAvrFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a Denon AVR config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize the Denon AVR flow."""
self.host = None
self.serial_number = None
self.model_name = None
self.timeout = DEFAULT_TIMEOUT
self.show_all_sources = DEFAULT_SHOW_SOURCES
self.zone2 = DEFAULT_ZONE2
self.zone3 = DEFAULT_ZONE3
self.d_receivers = []
@staticmethod
@callback
def async_get_options_flow(config_entry) -> OptionsFlowHandler:
"""Get the options flow."""
return OptionsFlowHandler(config_entry)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
errors = {}
if user_input is not None:
# check if IP address is set manually
host = user_input.get(CONF_HOST)
if host:
self.host = host
return await self.async_step_connect()
# discovery using denonavr library
self.d_receivers = await self.hass.async_add_executor_job(denonavr.discover)
# More than one receiver could be discovered by that method
if len(self.d_receivers) == 1:
self.host = self.d_receivers[0]["host"]
return await self.async_step_connect()
if len(self.d_receivers) > 1:
# show selection form
return await self.async_step_select()
errors["base"] = "discovery_error"
return self.async_show_form(
step_id="user", data_schema=CONFIG_SCHEMA, errors=errors
)
async def async_step_select(self, user_input=None):
"""Handle multiple receivers found."""
errors = {}
if user_input is not None:
self.host = user_input["select_host"]
return await self.async_step_connect()
select_scheme = vol.Schema(
{
vol.Required("select_host"): vol.In(
[d_receiver["host"] for d_receiver in self.d_receivers]
)
}
)
return self.async_show_form(
step_id="select", data_schema=select_scheme, errors=errors
)
async def async_step_confirm(self, user_input=None):
"""Allow the user to confirm adding the device."""
if user_input is not None:
return await self.async_step_connect()
return self.async_show_form(step_id="confirm")
async def async_step_connect(self, user_input=None):
"""Connect to the receiver."""
connect_denonavr = ConnectDenonAVR(
self.hass,
self.host,
self.timeout,
self.show_all_sources,
self.zone2,
self.zone3,
)
if not await connect_denonavr.async_connect_receiver():
return self.async_abort(reason="cannot_connect")
receiver = connect_denonavr.receiver
mac_address = await self.async_get_mac(self.host)
if not self.serial_number:
self.serial_number = receiver.serial_number
if not self.model_name:
self.model_name = (receiver.model_name).replace("*", "")
if self.serial_number is not None:
unique_id = self.construct_unique_id(self.model_name, self.serial_number)
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
else:
_LOGGER.error(
"Could not get serial number of host %s, "
"unique_id's will not be available",
self.host,
)
for entry in self._async_current_entries():
if entry.data[CONF_HOST] == self.host:
return self.async_abort(reason="already_configured")
return self.async_create_entry(
title=receiver.name,
data={
CONF_HOST: self.host,
CONF_MAC: mac_address,
CONF_TYPE: receiver.receiver_type,
CONF_MODEL: self.model_name,
CONF_MANUFACTURER: receiver.manufacturer,
CONF_SERIAL_NUMBER: self.serial_number,
},
)
async def async_step_ssdp(self, discovery_info):
"""Handle a discovered Denon AVR.
This flow is triggered by the SSDP component. It will check if the
host is already configured and delegate to the import step if not.
"""
# Filter out non-Denon AVRs#1
if (
discovery_info.get(ssdp.ATTR_UPNP_MANUFACTURER)
not in SUPPORTED_MANUFACTURERS
):
return self.async_abort(reason="not_denonavr_manufacturer")
# Check if required information is present to set the unique_id
if (
ssdp.ATTR_UPNP_MODEL_NAME not in discovery_info
or ssdp.ATTR_UPNP_SERIAL not in discovery_info
):
return self.async_abort(reason="not_denonavr_missing")
self.model_name = discovery_info[ssdp.ATTR_UPNP_MODEL_NAME].replace("*", "")
self.serial_number = discovery_info[ssdp.ATTR_UPNP_SERIAL]
self.host = urlparse(discovery_info[ssdp.ATTR_SSDP_LOCATION]).hostname
if self.model_name in IGNORED_MODELS:
return self.async_abort(reason="not_denonavr_manufacturer")
unique_id = self.construct_unique_id(self.model_name, self.serial_number)
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured({CONF_HOST: self.host})
# pylint: disable=no-member # https://github.com/PyCQA/pylint/issues/3167
self.context.update(
{
"title_placeholders": {
"name": discovery_info.get(ssdp.ATTR_UPNP_FRIENDLY_NAME, self.host)
}
}
)
return await self.async_step_confirm()
@staticmethod
def construct_unique_id(model_name, serial_number):
"""Construct the unique id from the ssdp discovery or user_step."""
return f"{model_name}-{serial_number}"
async def async_get_mac(self, host):
"""Get the mac address of the DenonAVR receiver."""
try:
mac_address = await self.hass.async_add_executor_job(
partial(get_mac_address, **{"ip": host})
)
if not mac_address:
mac_address = await self.hass.async_add_executor_job(
partial(get_mac_address, **{"hostname": host})
)
except Exception as err: # pylint: disable=broad-except
_LOGGER.error("Unable to get mac address: %s", err)
mac_address = None
if mac_address is not None:
mac_address = format_mac(mac_address)
return mac_address
|
import os
import sh
from molecule import logger
from molecule import util
from molecule.verifier.lint import base
LOG = logger.get_logger(__name__)
class Yamllint(base.Base):
"""
`Yamllint`_ is not the default verifier linter.
`Yamllint`_ is a linter for yaml files.
Additional options can be passed to `yamllint` through the options
dict. Any option set in this section will override the defaults.
.. code-block:: yaml
verifier:
name: goss
lint:
name: yamllint
options:
config-file: foo/bar
Test file linting can be disabled by setting `enabled` to False.
.. code-block:: yaml
verifier:
name: goss
lint:
name: yamllint
enabled: False
Environment variables can be passed to lint.
.. code-block:: yaml
verifier:
name: goss
lint:
name: yamllint
env:
FOO: bar
.. _`Yamllint`: https://github.com/adrienverge/yamllint
"""
def __init__(self, config):
"""
Sets up the requirements to execute `yamllint` and returns None.
:param config: An instance of a Molecule config.
:return: None
"""
super(Yamllint, self).__init__(config)
self._yamllint_command = None
if config:
self._tests = self._get_tests()
@property
def default_options(self):
return {
's': True,
}
@property
def default_env(self):
return util.merge_dicts(os.environ.copy(), self._config.env)
def bake(self):
"""
Bake a `yamllint` command so it's ready to execute and returns None.
:return: None
"""
self._yamllint_command = sh.yamllint.bake(
self.options,
self._tests,
_env=self.env,
_out=LOG.out,
_err=LOG.error)
def execute(self):
if not self.enabled:
msg = 'Skipping, verifier_lint is disabled.'
LOG.warn(msg)
return
if not len(self._tests) > 0:
msg = 'Skipping, no tests found.'
LOG.warn(msg)
return
if self._yamllint_command is None:
self.bake()
msg = 'Executing Yamllint on files found in {}/...'.format(
self._config.verifier.directory)
LOG.info(msg)
try:
util.run_command(self._yamllint_command, debug=self._config.debug)
msg = 'Lint completed successfully.'
LOG.success(msg)
except sh.ErrorReturnCode as e:
util.sysexit(e.exit_code)
def _get_tests(self):
"""
Walk the verifier's directory for tests and returns a list.
:return: list
"""
return [
filename for filename in util.os_walk(
self._config.verifier.directory, 'test_*.yml')
]
|
import numpy as np
import unittest
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.utils import testing
class SampleDataset(GetterDataset):
def __init__(self, iterable=tuple):
super(SampleDataset, self).__init__()
self.add_getter('item0', self.get_item0)
self.add_getter(iterable(('item1', 'item2')), self.get_item1_item2)
self.add_getter(('item3',), self.get_item3)
self.count = 0
def __len__(self):
return 10
def get_item0(self, i):
self.count += 1
return 'item0({:d})'.format(i)
def get_item1_item2(self, i):
self.count += 1
return 'item1({:d})'.format(i), 'item2({:d})'.format(i)
def get_item3(self, i):
self.count += 1
return ('item3({:d})'.format(i),)
@testing.parameterize(
{'iterable': tuple},
{'iterable': list},
{'iterable': np.array},
)
class TestGetterDataset(unittest.TestCase):
def setUp(self):
self.dataset = SampleDataset(self.iterable)
def test_keys(self):
self.assertEqual(
self.dataset.keys, ('item0', 'item1', 'item2', 'item3'))
def test_get_example_by_keys(self):
example = self.dataset.get_example_by_keys(1, (1, 2, 3))
self.assertEqual(example, ('item1(1)', 'item2(1)', 'item3(1)'))
self.assertEqual(self.dataset.count, 2)
def test_set_keys_single_name(self):
self.dataset.keys = 'item0'
self.assertEqual(self.dataset.keys, 'item0')
self.assertEqual(self.dataset[1], 'item0(1)')
def test_set_keys_single_index(self):
self.dataset.keys = 0
self.assertEqual(self.dataset.keys, 'item0')
self.assertEqual(self.dataset[1], 'item0(1)')
def test_set_keys_single_tuple_name(self):
if self.iterable is np.array:
self.skipTest('ndarray of strings is not supported')
self.dataset.keys = self.iterable(('item1',))
self.assertEqual(self.dataset.keys, ('item1',))
self.assertEqual(self.dataset[2], ('item1(2)',))
def test_set_keys_single_tuple_index(self):
self.dataset.keys = self.iterable((1,))
self.assertEqual(self.dataset.keys, ('item1',))
self.assertEqual(self.dataset[2], ('item1(2)',))
def test_set_keys_multiple_name(self):
if self.iterable is np.array:
self.skipTest('ndarray of strings is not supported')
self.dataset.keys = self.iterable(('item0', 'item2'))
self.assertEqual(self.dataset.keys, ('item0', 'item2'))
self.assertEqual(self.dataset[3], ('item0(3)', 'item2(3)'))
def test_set_keys_multiple_index(self):
self.dataset.keys = self.iterable((0, 2))
self.assertEqual(self.dataset.keys, ('item0', 'item2'))
self.assertEqual(self.dataset[3], ('item0(3)', 'item2(3)'))
def test_set_keys_multiple_bool(self):
self.dataset.keys = self.iterable((True, False, True, False))
self.assertEqual(self.dataset.keys, ('item0', 'item2'))
self.assertEqual(self.dataset[3], ('item0(3)', 'item2(3)'))
def test_set_keys_multiple_mixed(self):
if self.iterable is np.array:
self.skipTest('ndarray of strings is not supported')
self.dataset.keys = self.iterable(('item0', 2))
self.assertEqual(self.dataset.keys, ('item0', 'item2'))
self.assertEqual(self.dataset[3], ('item0(3)', 'item2(3)'))
def test_set_keys_invalid_name(self):
with self.assertRaises(KeyError):
self.dataset.keys = 'invalid'
def test_set_keys_invalid_index(self):
with self.assertRaises(IndexError):
self.dataset.keys = 4
def test_set_keys_invalid_bool(self):
with self.assertRaises(ValueError):
self.dataset.keys = (True, True)
testing.run_module(__name__, __file__)
|
from pyaehw4a1 import exceptions
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import hisense_aehw4a1
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
async def test_creating_entry_sets_up_climate_discovery(hass):
"""Test setting up Hisense AEH-W4A1 loads the climate component."""
with patch(
"homeassistant.components.hisense_aehw4a1.config_flow.AehW4a1.discovery",
return_value=["1.2.3.4"],
):
with patch(
"homeassistant.components.hisense_aehw4a1.climate.async_setup_entry",
return_value=True,
) as mock_setup:
result = await hass.config_entries.flow.async_init(
hisense_aehw4a1.DOMAIN, context={"source": config_entries.SOURCE_USER}
)
# Confirmation form
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result = await hass.config_entries.flow.async_configure(
result["flow_id"], {}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
async def test_configuring_hisense_w4a1_create_entry(hass):
"""Test that specifying config will create an entry."""
with patch(
"homeassistant.components.hisense_aehw4a1.config_flow.AehW4a1.check",
return_value=True,
):
with patch(
"homeassistant.components.hisense_aehw4a1.async_setup_entry",
return_value=True,
) as mock_setup:
await async_setup_component(
hass,
hisense_aehw4a1.DOMAIN,
{"hisense_aehw4a1": {"ip_address": ["1.2.3.4"]}},
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 1
async def test_configuring_hisense_w4a1_not_creates_entry_for_device_not_found(hass):
"""Test that specifying config will not create an entry."""
with patch(
"homeassistant.components.hisense_aehw4a1.config_flow.AehW4a1.check",
side_effect=exceptions.ConnectionError,
):
with patch(
"homeassistant.components.hisense_aehw4a1.async_setup_entry",
return_value=True,
) as mock_setup:
await async_setup_component(
hass,
hisense_aehw4a1.DOMAIN,
{"hisense_aehw4a1": {"ip_address": ["1.2.3.4"]}},
)
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
async def test_configuring_hisense_w4a1_not_creates_entry_for_empty_import(hass):
"""Test that specifying config will not create an entry."""
with patch(
"homeassistant.components.hisense_aehw4a1.async_setup_entry",
return_value=True,
) as mock_setup:
await async_setup_component(hass, hisense_aehw4a1.DOMAIN, {})
await hass.async_block_till_done()
assert len(mock_setup.mock_calls) == 0
|
import binascii
from os import urandom
from urllib.parse import urljoin
from django.db import models
from django.utils import timezone
from django.utils.translation import gettext_lazy as _, pgettext_lazy
from shop.models import order
class Order(order.BaseOrder):
"""Default materialized model for Order"""
number = models.PositiveIntegerField(
_("Order Number"),
null=True,
default=None,
unique=True,
)
shipping_address_text = models.TextField(
_("Shipping Address"),
blank=True,
null=True,
help_text=_("Shipping address at the moment of purchase."),
)
billing_address_text = models.TextField(
_("Billing Address"),
blank=True,
null=True,
help_text=_("Billing address at the moment of purchase."),
)
token = models.CharField(
_("Token"),
max_length=40,
editable=False,
null=True,
help_text=_("Secret key to verify ownership on detail view without requiring authentication."),
)
class Meta:
verbose_name = pgettext_lazy('order_models', "Order")
verbose_name_plural = pgettext_lazy('order_models', "Orders")
def get_or_assign_number(self):
"""
Set a unique number to identify this Order object. The first 4 digits represent the
current year. The last five digits represent a zero-padded incremental counter.
"""
if self.number is None:
epoch = timezone.now()
epoch = epoch.replace(epoch.year, 1, 1, 0, 0, 0, 0)
aggr = Order.objects.filter(number__isnull=False, created_at__gt=epoch).aggregate(models.Max('number'))
try:
epoc_number = int(str(aggr['number__max'])[4:]) + 1
self.number = int('{0}{1:05d}'.format(epoch.year, epoc_number))
except (KeyError, ValueError):
# the first order this year
self.number = int('{0}00001'.format(epoch.year))
return self.get_number()
def get_number(self):
number = str(self.number)
return '{}-{}'.format(number[:4], number[4:])
@classmethod
def resolve_number(cls, number):
bits = number.split('-')
return dict(number=''.join(bits))
def assign_secret(self):
self.token = binascii.hexlify(urandom(20)).decode()
return self.token
@property
def secret(self):
return self.token
def get_absolute_url(self):
url = super().get_absolute_url()
if self.token:
if not url.endswith('/'):
url += '/'
url = urljoin(url, self.token)
return url
def populate_from_cart(self, cart, request):
self.shipping_address_text = cart.shipping_address.as_text() if cart.shipping_address else ''
self.billing_address_text = cart.billing_address.as_text() if cart.billing_address else ''
# in case one of the addresses was None, the customer presumably intended the other one.
if not self.shipping_address_text:
self.shipping_address_text = self.billing_address_text
if not self.billing_address_text:
self.billing_address_text = self.shipping_address_text
super().populate_from_cart(cart, request)
|
from django.apps import AppConfig
from django.core.checks import Info, register
from weblate.utils.checks import weblate_check
class WLAdminConfig(AppConfig):
name = "weblate.wladmin"
label = "wladmin"
verbose_name = "Weblate Admin Extensions"
def ready(self):
super().ready()
register(check_backups, deploy=True)
def check_backups(app_configs, **kwargs):
from weblate.wladmin.models import BackupService
errors = []
if not BackupService.objects.filter(enabled=True).exists():
errors.append(
weblate_check(
"weblate.I028",
"Backups are not configured, "
"it is highly recommended for production use",
Info,
)
)
for service in BackupService.objects.filter(enabled=True):
try:
last_obj = service.last_logs()[0]
last_event = last_obj.event
last_log = last_obj.log
except IndexError:
last_event = "error"
last_log = "missing"
if last_event == "error":
errors.append(
weblate_check(
"weblate.C029",
f"There was error while performing backups: {last_log}",
)
)
break
return errors
|
from __future__ import print_function, unicode_literals
import argparse
import json
import logging
import pprint
import sys
import time
from scrapy_redis import get_redis
logger = logging.getLogger('process_items')
def process_items(r, keys, timeout, limit=0, log_every=1000, wait=.1):
"""Process items from a redis queue.
Parameters
----------
r : Redis
Redis connection instance.
keys : list
List of keys to read the items from.
timeout: int
Read timeout.
"""
limit = limit or float('inf')
processed = 0
while processed < limit:
# Change ``blpop`` to ``brpop`` to process as LIFO.
ret = r.blpop(keys, timeout)
# If data is found before the timeout then we consider we are done.
if ret is None:
time.sleep(wait)
continue
source, data = ret
try:
item = json.loads(data)
except Exception:
logger.exception("Failed to load item:\n%r", pprint.pformat(data))
continue
try:
name = item.get('name') or item.get('title')
url = item.get('url') or item.get('link')
logger.debug("[%s] Processing item: %s <%s>", source, name, url)
except KeyError:
logger.exception("[%s] Failed to process item:\n%r",
source, pprint.pformat(item))
continue
processed += 1
if processed % log_every == 0:
logger.info("Processed %s items", processed)
def main():
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument('key', help="Redis key where items are stored")
parser.add_argument('--host')
parser.add_argument('--port')
parser.add_argument('--timeout', type=int, default=5)
parser.add_argument('--limit', type=int, default=0)
parser.add_argument('--progress-every', type=int, default=100)
parser.add_argument('-v', '--verbose', action='store_true')
args = parser.parse_args()
params = {}
if args.host:
params['host'] = args.host
if args.port:
params['port'] = args.port
logging.basicConfig(level=logging.DEBUG if args.verbose else logging.INFO)
r = get_redis(**params)
host = r.connection_pool.get_connection('info').host
logger.info("Waiting for items in '%s' (server: %s)", args.key, host)
kwargs = {
'keys': [args.key],
'timeout': args.timeout,
'limit': args.limit,
'log_every': args.progress_every,
}
try:
process_items(r, **kwargs)
retcode = 0 # ok
except KeyboardInterrupt:
retcode = 0 # ok
except Exception:
logger.exception("Unhandled exception")
retcode = 2
return retcode
if __name__ == '__main__':
sys.exit(main())
|
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _get_version, _version_doc,
_data_path_doc_accept)
has_brainstorm_data = partial(has_dataset, name='brainstorm.bst_phantom_ctf')
_description = u"""
URL: http://neuroimage.usc.edu/brainstorm/Tutorials/PhantomCtf
"""
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
*, accept=False, verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name='brainstorm',
download=download, archive_name='bst_phantom_ctf.tar.gz',
accept=accept)
_data_path_doc = _data_path_doc_accept.format(
name='brainstorm', conf='MNE_DATASETS_BRAINSTORM_DATA_PATH')
_data_path_doc = _data_path_doc.replace('brainstorm dataset',
'brainstorm (bst_phantom_ctf) dataset')
data_path.__doc__ = _data_path_doc
def get_version(): # noqa: D103
return _get_version('brainstorm.bst_phantom_ctf')
get_version.__doc__ = _version_doc.format(name='brainstorm')
def description():
"""Get description of brainstorm (bst_phantom_ctf) dataset."""
for desc in _description.splitlines():
print(desc)
|
from typing import Any, Dict, List, Optional
from toonapi import (
ACTIVE_STATE_AWAY,
ACTIVE_STATE_COMFORT,
ACTIVE_STATE_HOME,
ACTIVE_STATE_SLEEP,
)
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
HVAC_MODE_HEAT,
PRESET_AWAY,
PRESET_COMFORT,
PRESET_HOME,
PRESET_SLEEP,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import ATTR_TEMPERATURE, TEMP_CELSIUS
from homeassistant.helpers.typing import HomeAssistantType
from .const import DEFAULT_MAX_TEMP, DEFAULT_MIN_TEMP, DOMAIN
from .helpers import toon_exception_handler
from .models import ToonDisplayDeviceEntity
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up a Toon binary sensors based on a config entry."""
coordinator = hass.data[DOMAIN][entry.entry_id]
async_add_entities(
[ToonThermostatDevice(coordinator, name="Thermostat", icon="mdi:thermostat")]
)
class ToonThermostatDevice(ToonDisplayDeviceEntity, ClimateEntity):
"""Representation of a Toon climate device."""
@property
def unique_id(self) -> str:
"""Return the unique ID for this thermostat."""
agreement_id = self.coordinator.data.agreement.agreement_id
# This unique ID is a bit ugly and contains unneeded information.
# It is here for lecagy / backward compatible reasons.
return f"{DOMAIN}_{agreement_id}_climate"
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
return HVAC_MODE_HEAT
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return [HVAC_MODE_HEAT]
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation."""
if self.coordinator.data.thermostat.heating:
return CURRENT_HVAC_HEAT
return CURRENT_HVAC_IDLE
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp."""
mapping = {
ACTIVE_STATE_AWAY: PRESET_AWAY,
ACTIVE_STATE_COMFORT: PRESET_COMFORT,
ACTIVE_STATE_HOME: PRESET_HOME,
ACTIVE_STATE_SLEEP: PRESET_SLEEP,
}
return mapping.get(self.coordinator.data.thermostat.active_state)
@property
def preset_modes(self) -> List[str]:
"""Return a list of available preset modes."""
return [PRESET_AWAY, PRESET_COMFORT, PRESET_HOME, PRESET_SLEEP]
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self.coordinator.data.thermostat.current_display_temperature
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
return self.coordinator.data.thermostat.current_setpoint
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
return DEFAULT_MIN_TEMP
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
return DEFAULT_MAX_TEMP
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the current state of the burner."""
return {"heating_type": self.coordinator.data.agreement.heating_type}
@toon_exception_handler
async def async_set_temperature(self, **kwargs) -> None:
"""Change the setpoint of the thermostat."""
temperature = kwargs.get(ATTR_TEMPERATURE)
await self.coordinator.toon.set_current_setpoint(temperature)
@toon_exception_handler
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
mapping = {
PRESET_AWAY: ACTIVE_STATE_AWAY,
PRESET_COMFORT: ACTIVE_STATE_COMFORT,
PRESET_HOME: ACTIVE_STATE_HOME,
PRESET_SLEEP: ACTIVE_STATE_SLEEP,
}
if preset_mode in mapping:
await self.coordinator.toon.set_active_state(mapping[preset_mode])
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
# Intentionally left empty
# The HAVC mode is always HEAT
|
from unittest import mock
import os
import pytest
import http.client as httplib
from vcr import VCR, mode, use_cassette
from vcr.request import Request
from vcr.stubs import VCRHTTPSConnection
from vcr.patch import _HTTPConnection, force_reset
def test_vcr_use_cassette():
record_mode = mock.Mock()
test_vcr = VCR(record_mode=record_mode)
with mock.patch(
"vcr.cassette.Cassette.load", return_value=mock.MagicMock(inject=False)
) as mock_cassette_load:
@test_vcr.use_cassette("test")
def function():
pass
assert mock_cassette_load.call_count == 0
function()
assert mock_cassette_load.call_args[1]["record_mode"] is record_mode
# Make sure that calls to function now use cassettes with the
# new filter_header_settings
test_vcr.record_mode = mock.Mock()
function()
assert mock_cassette_load.call_args[1]["record_mode"] == test_vcr.record_mode
# Ensure that explicitly provided arguments still supercede
# those on the vcr.
new_record_mode = mock.Mock()
with test_vcr.use_cassette("test", record_mode=new_record_mode) as cassette:
assert cassette.record_mode == new_record_mode
def test_vcr_before_record_request_params():
base_path = "http://httpbin.org/"
def before_record_cb(request):
if request.path != "/get":
return request
test_vcr = VCR(
filter_headers=("cookie", ("bert", "ernie")),
before_record_request=before_record_cb,
ignore_hosts=("www.test.com",),
ignore_localhost=True,
filter_query_parameters=("foo", ("tom", "jerry")),
filter_post_data_parameters=("posted", ("no", "trespassing")),
)
with test_vcr.use_cassette("test") as cassette:
# Test explicit before_record_cb
request_get = Request("GET", base_path + "get", "", {})
assert cassette.filter_request(request_get) is None
request = Request("GET", base_path + "get2", "", {})
assert cassette.filter_request(request) is not None
# Test filter_query_parameters
request = Request("GET", base_path + "?foo=bar", "", {})
assert cassette.filter_request(request).query == []
request = Request("GET", base_path + "?tom=nobody", "", {})
assert cassette.filter_request(request).query == [("tom", "jerry")]
# Test filter_headers
request = Request(
"GET", base_path + "?foo=bar", "", {"cookie": "test", "other": "fun", "bert": "nobody"}
)
assert cassette.filter_request(request).headers == {"other": "fun", "bert": "ernie"}
# Test ignore_hosts
request = Request("GET", "http://www.test.com" + "?foo=bar", "", {"cookie": "test", "other": "fun"})
assert cassette.filter_request(request) is None
# Test ignore_localhost
request = Request("GET", "http://localhost:8000" + "?foo=bar", "", {"cookie": "test", "other": "fun"})
assert cassette.filter_request(request) is None
with test_vcr.use_cassette("test", before_record_request=None) as cassette:
# Test that before_record can be overwritten in context manager.
assert cassette.filter_request(request_get) is not None
def test_vcr_before_record_response_iterable():
# Regression test for #191
request = Request("GET", "/", "", {})
response = object() # just can't be None
# Prevent actually saving the cassette
with mock.patch("vcr.cassette.FilesystemPersister.save_cassette"):
# Baseline: non-iterable before_record_response should work
mock_filter = mock.Mock()
vcr = VCR(before_record_response=mock_filter)
with vcr.use_cassette("test") as cassette:
assert mock_filter.call_count == 0
cassette.append(request, response)
assert mock_filter.call_count == 1
# Regression test: iterable before_record_response should work too
mock_filter = mock.Mock()
vcr = VCR(before_record_response=(mock_filter,))
with vcr.use_cassette("test") as cassette:
assert mock_filter.call_count == 0
cassette.append(request, response)
assert mock_filter.call_count == 1
def test_before_record_response_as_filter():
request = Request("GET", "/", "", {})
response = object() # just can't be None
# Prevent actually saving the cassette
with mock.patch("vcr.cassette.FilesystemPersister.save_cassette"):
filter_all = mock.Mock(return_value=None)
vcr = VCR(before_record_response=filter_all)
with vcr.use_cassette("test") as cassette:
cassette.append(request, response)
assert cassette.data == []
assert not cassette.dirty
def test_vcr_path_transformer():
# Regression test for #199
# Prevent actually saving the cassette
with mock.patch("vcr.cassette.FilesystemPersister.save_cassette"):
# Baseline: path should be unchanged
vcr = VCR()
with vcr.use_cassette("test") as cassette:
assert cassette._path == "test"
# Regression test: path_transformer=None should do the same.
vcr = VCR(path_transformer=None)
with vcr.use_cassette("test") as cassette:
assert cassette._path == "test"
# and it should still work with cassette_library_dir
vcr = VCR(cassette_library_dir="/foo")
with vcr.use_cassette("test") as cassette:
assert os.path.abspath(cassette._path) == os.path.abspath("/foo/test")
@pytest.fixture
def random_fixture():
return 1
@use_cassette("test")
def test_fixtures_with_use_cassette(random_fixture):
# Applying a decorator to a test function that requests features can cause
# problems if the decorator does not preserve the signature of the original
# test function.
# This test ensures that use_cassette preserves the signature of
# the original test function, and thus that use_cassette is
# compatible with py.test fixtures. It is admittedly a bit strange
# because the test would never even run if the relevant feature
# were broken.
pass
def test_custom_patchers():
class Test:
attribute = None
attribute2 = None
test_vcr = VCR(custom_patches=((Test, "attribute", VCRHTTPSConnection),))
with test_vcr.use_cassette("custom_patches"):
assert issubclass(Test.attribute, VCRHTTPSConnection)
assert VCRHTTPSConnection is not Test.attribute
with test_vcr.use_cassette("custom_patches", custom_patches=((Test, "attribute2", VCRHTTPSConnection),)):
assert issubclass(Test.attribute, VCRHTTPSConnection)
assert VCRHTTPSConnection is not Test.attribute
assert Test.attribute is Test.attribute2
def test_inject_cassette():
vcr = VCR(inject_cassette=True)
@vcr.use_cassette("test", record_mode=mode.ONCE)
def with_cassette_injected(cassette):
assert cassette.record_mode == mode.ONCE
@vcr.use_cassette("test", record_mode=mode.ONCE, inject_cassette=False)
def without_cassette_injected():
pass
with_cassette_injected()
without_cassette_injected()
def test_with_current_defaults():
vcr = VCR(inject_cassette=True, record_mode=mode.ONCE)
@vcr.use_cassette("test", with_current_defaults=False)
def changing_defaults(cassette, checks):
checks(cassette)
@vcr.use_cassette("test", with_current_defaults=True)
def current_defaults(cassette, checks):
checks(cassette)
def assert_record_mode_once(cassette):
assert cassette.record_mode == mode.ONCE
def assert_record_mode_all(cassette):
assert cassette.record_mode == mode.ALL
changing_defaults(assert_record_mode_once)
current_defaults(assert_record_mode_once)
vcr.record_mode = "all"
changing_defaults(assert_record_mode_all)
current_defaults(assert_record_mode_once)
def test_cassette_library_dir_with_decoration_and_no_explicit_path():
library_dir = "/libary_dir"
vcr = VCR(inject_cassette=True, cassette_library_dir=library_dir)
@vcr.use_cassette()
def function_name(cassette):
assert cassette._path == os.path.join(library_dir, "function_name")
function_name()
def test_cassette_library_dir_with_decoration_and_explicit_path():
library_dir = "/libary_dir"
vcr = VCR(inject_cassette=True, cassette_library_dir=library_dir)
@vcr.use_cassette(path="custom_name")
def function_name(cassette):
assert cassette._path == os.path.join(library_dir, "custom_name")
function_name()
def test_cassette_library_dir_with_decoration_and_super_explicit_path():
library_dir = "/libary_dir"
vcr = VCR(inject_cassette=True, cassette_library_dir=library_dir)
@vcr.use_cassette(path=os.path.join(library_dir, "custom_name"))
def function_name(cassette):
assert cassette._path == os.path.join(library_dir, "custom_name")
function_name()
def test_cassette_library_dir_with_path_transformer():
library_dir = "/libary_dir"
vcr = VCR(
inject_cassette=True, cassette_library_dir=library_dir, path_transformer=lambda path: path + ".json"
)
@vcr.use_cassette()
def function_name(cassette):
assert cassette._path == os.path.join(library_dir, "function_name.json")
function_name()
def test_use_cassette_with_no_extra_invocation():
vcr = VCR(inject_cassette=True, cassette_library_dir="/")
@vcr.use_cassette
def function_name(cassette):
assert cassette._path == os.path.join("/", "function_name")
function_name()
def test_path_transformer():
vcr = VCR(inject_cassette=True, cassette_library_dir="/", path_transformer=lambda x: x + "_test")
@vcr.use_cassette
def function_name(cassette):
assert cassette._path == os.path.join("/", "function_name_test")
function_name()
def test_cassette_name_generator_defaults_to_using_module_function_defined_in():
vcr = VCR(inject_cassette=True)
@vcr.use_cassette
def function_name(cassette):
assert cassette._path == os.path.join(os.path.dirname(__file__), "function_name")
function_name()
def test_ensure_suffix():
vcr = VCR(inject_cassette=True, path_transformer=VCR.ensure_suffix(".yaml"))
@vcr.use_cassette
def function_name(cassette):
assert cassette._path == os.path.join(os.path.dirname(__file__), "function_name.yaml")
function_name()
def test_additional_matchers():
vcr = VCR(match_on=("uri",), inject_cassette=True)
@vcr.use_cassette
def function_defaults(cassette):
assert set(cassette._match_on) == {vcr.matchers["uri"]}
@vcr.use_cassette(additional_matchers=("body",))
def function_additional(cassette):
assert set(cassette._match_on) == {vcr.matchers["uri"], vcr.matchers["body"]}
function_defaults()
function_additional()
def test_decoration_should_respect_function_return_value():
vcr = VCR()
ret = "a-return-value"
@vcr.use_cassette
def function_with_return():
return ret
assert ret == function_with_return()
class TestVCRClass(VCR().test_case()):
def no_decoration(self):
assert httplib.HTTPConnection == _HTTPConnection
self.test_dynamically_added()
assert httplib.HTTPConnection == _HTTPConnection
def test_one(self):
with force_reset():
self.no_decoration()
with force_reset():
self.test_two()
assert httplib.HTTPConnection != _HTTPConnection
def test_two(self):
assert httplib.HTTPConnection != _HTTPConnection
def test_dynamically_added(self):
assert httplib.HTTPConnection != _HTTPConnection
TestVCRClass.test_dynamically_added = test_dynamically_added
del test_dynamically_added
|
from django.utils.translation import activate, gettext
from weblate.lang.models import Language
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
help = "List language definitions"
def add_arguments(self, parser):
parser.add_argument(
"--lower", action="store_true", help="Lowercase translated name"
)
parser.add_argument("locale", help="Locale for printing")
def handle(self, *args, **options):
"""Create default set of languages.
Optionally updating them to match current shipped definitions.
"""
activate(options["locale"])
for language in Language.objects.order():
name = gettext(language.name)
if options["lower"]:
name = name[0].lower() + name[1:]
self.stdout.write(f"| {language.code} || {language.name} || {name}")
self.stdout.write("|-")
|
from django.utils.translation import gettext_lazy as _
from shop.modifiers.base import BaseCartModifier
from shop.payment.providers import PaymentProvider, ForwardFundPayment
class PaymentModifier(BaseCartModifier):
"""
Base class for all payment modifiers. The purpose of a payment modifier is to calculate the payment surcharge and/or
prevent its usage, in case the choosen payment method is not available for the given customer. The merchant may
either append a single payment modifier to the list of ``SHOP_CART_MODIFIERS``, or create a sublist of payment
modifier and append this sublist to ``SHOP_CART_MODIFIERS``. The latter is useful to instantiate the same payment
modifier multiple times for different payment service providers using the same interface.
The merchant must specify at least one payment modifier. If there is more than one, the merchant shall offer a
select option during checkout. In django-SHOP, one can use the plugin **Payment Method Form** to render such a
select option.
Each payment modifier can add a surcharge on the current cart.
"""
def __init__(self):
assert isinstance(getattr(self, 'payment_provider', None), PaymentProvider), \
"Each Payment modifier class requires a Payment Provider"
super().__init__()
@property
def identifier(self):
"""
Default identifier for payment providers.
"""
return self.payment_provider.namespace
def get_choice(self):
"""
:returns: A tuple consisting of 'value, label' used by the payment form dialog to render
the available payment choices.
"""
raise NotImplemented("{} must implement method `get_choice()`.".format(self.__class__))
def is_active(self, payment_modifier):
"""
:returns: ``True`` if this payment modifier is active.
"""
assert hasattr(self, 'payment_provider'), "A Payment Modifier requires a Payment Provider"
return payment_modifier == self.identifier
def is_disabled(self, cart):
"""
Hook method to be overridden by the concrete payment modifier. Shall be used to
temporarily disable a payment method, in case the cart does not fulfill certain criteria,
for instance a too small total.
:returns: ``True`` if this payment modifier is disabled for the current cart.
"""
return False
def update_render_context(self, context):
"""
Hook to update the rendering context with payment specific data.
"""
from shop.models.cart import CartModel
if 'payment_modifiers' not in context:
context['payment_modifiers'] = {}
try:
cart = CartModel.objects.get_from_request(context['request'])
if self.is_active(cart.extra.get('payment_modifier')):
cart.update(context['request'])
data = cart.extra_rows[self.identifier].data
data.update(modifier=self.identifier)
context['payment_modifiers']['initial_row'] = data
except (KeyError, CartModel.DoesNotExist):
pass
class PayInAdvanceModifier(PaymentModifier):
"""
This modifiers has no influence on the cart final. It can be used,
to enable the customer to pay the products on delivery.
"""
payment_provider = ForwardFundPayment()
def get_choice(self):
return (self.payment_provider.namespace, _("Pay in advance"))
|
from aiohomekit.model.characteristics import (
ActivationStateValues,
CharacteristicsTypes,
CurrentHeaterCoolerStateValues,
SwingModeValues,
TargetHeaterCoolerStateValues,
)
from aiohomekit.model.services import ServicesTypes
from homeassistant.components.climate.const import (
DOMAIN,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
SERVICE_SET_HUMIDITY,
SERVICE_SET_HVAC_MODE,
SERVICE_SET_SWING_MODE,
SERVICE_SET_TEMPERATURE,
)
from tests.components.homekit_controller.common import setup_test_component
HEATING_COOLING_TARGET = ("thermostat", "heating-cooling.target")
HEATING_COOLING_CURRENT = ("thermostat", "heating-cooling.current")
TEMPERATURE_TARGET = ("thermostat", "temperature.target")
TEMPERATURE_CURRENT = ("thermostat", "temperature.current")
HUMIDITY_TARGET = ("thermostat", "relative-humidity.target")
HUMIDITY_CURRENT = ("thermostat", "relative-humidity.current")
# Test thermostat devices
def create_thermostat_service(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.THERMOSTAT)
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_TARGET)
char.value = 0
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_CURRENT)
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_TARGET)
char.minValue = 7
char.maxValue = 35
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_CURRENT)
char.value = 0
char = service.add_char(CharacteristicsTypes.RELATIVE_HUMIDITY_TARGET)
char.value = 0
char = service.add_char(CharacteristicsTypes.RELATIVE_HUMIDITY_CURRENT)
char.value = 0
def create_thermostat_service_min_max(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.THERMOSTAT)
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_TARGET)
char.value = 0
char.minValue = 0
char.maxValue = 1
async def test_climate_respect_supported_op_modes_1(hass, utcnow):
"""Test that climate respects minValue/maxValue hints."""
helper = await setup_test_component(hass, create_thermostat_service_min_max)
state = await helper.poll_and_get_state()
assert state.attributes["hvac_modes"] == ["off", "heat"]
def create_thermostat_service_valid_vals(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.THERMOSTAT)
char = service.add_char(CharacteristicsTypes.HEATING_COOLING_TARGET)
char.value = 0
char.valid_values = [0, 1, 2]
async def test_climate_respect_supported_op_modes_2(hass, utcnow):
"""Test that climate respects validValue hints."""
helper = await setup_test_component(hass, create_thermostat_service_valid_vals)
state = await helper.poll_and_get_state()
assert state.attributes["hvac_modes"] == ["off", "heat", "cool"]
async def test_climate_change_thermostat_state(hass, utcnow):
"""Test that we can turn a HomeKit thermostat on and off again."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
assert helper.characteristics[HEATING_COOLING_TARGET].value == 1
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_COOL},
blocking=True,
)
assert helper.characteristics[HEATING_COOLING_TARGET].value == 2
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
assert helper.characteristics[HEATING_COOLING_TARGET].value == 3
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_OFF},
blocking=True,
)
assert helper.characteristics[HEATING_COOLING_TARGET].value == 0
async def test_climate_change_thermostat_temperature(hass, utcnow):
"""Test that we can turn a HomeKit thermostat on and off again."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{"entity_id": "climate.testdevice", "temperature": 21},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 21
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{"entity_id": "climate.testdevice", "temperature": 25},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_TARGET].value == 25
async def test_climate_change_thermostat_humidity(hass, utcnow):
"""Test that we can turn a HomeKit thermostat on and off again."""
helper = await setup_test_component(hass, create_thermostat_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{"entity_id": "climate.testdevice", "humidity": 50},
blocking=True,
)
assert helper.characteristics[HUMIDITY_TARGET].value == 50
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HUMIDITY,
{"entity_id": "climate.testdevice", "humidity": 45},
blocking=True,
)
assert helper.characteristics[HUMIDITY_TARGET].value == 45
async def test_climate_read_thermostat_state(hass, utcnow):
"""Test that we can read the state of a HomeKit thermostat accessory."""
helper = await setup_test_component(hass, create_thermostat_service)
# Simulate that heating is on
helper.characteristics[TEMPERATURE_CURRENT].value = 19
helper.characteristics[TEMPERATURE_TARGET].value = 21
helper.characteristics[HEATING_COOLING_CURRENT].value = 1
helper.characteristics[HEATING_COOLING_TARGET].value = 1
helper.characteristics[HUMIDITY_CURRENT].value = 50
helper.characteristics[HUMIDITY_TARGET].value = 45
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_HEAT
assert state.attributes["current_temperature"] == 19
assert state.attributes["current_humidity"] == 50
assert state.attributes["min_temp"] == 7
assert state.attributes["max_temp"] == 35
# Simulate that cooling is on
helper.characteristics[TEMPERATURE_CURRENT].value = 21
helper.characteristics[TEMPERATURE_TARGET].value = 19
helper.characteristics[HEATING_COOLING_CURRENT].value = 2
helper.characteristics[HEATING_COOLING_TARGET].value = 2
helper.characteristics[HUMIDITY_CURRENT].value = 45
helper.characteristics[HUMIDITY_TARGET].value = 45
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_COOL
assert state.attributes["current_temperature"] == 21
assert state.attributes["current_humidity"] == 45
# Simulate that we are in heat/cool mode
helper.characteristics[TEMPERATURE_CURRENT].value = 21
helper.characteristics[TEMPERATURE_TARGET].value = 21
helper.characteristics[HEATING_COOLING_CURRENT].value = 0
helper.characteristics[HEATING_COOLING_TARGET].value = 3
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_HEAT_COOL
async def test_hvac_mode_vs_hvac_action(hass, utcnow):
"""Check that we haven't conflated hvac_mode and hvac_action."""
helper = await setup_test_component(hass, create_thermostat_service)
# Simulate that current temperature is above target temp
# Heating might be on, but hvac_action currently 'off'
helper.characteristics[TEMPERATURE_CURRENT].value = 22
helper.characteristics[TEMPERATURE_TARGET].value = 21
helper.characteristics[HEATING_COOLING_CURRENT].value = 0
helper.characteristics[HEATING_COOLING_TARGET].value = 1
helper.characteristics[HUMIDITY_CURRENT].value = 50
helper.characteristics[HUMIDITY_TARGET].value = 45
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "idle"
# Simulate that current temperature is below target temp
# Heating might be on and hvac_action currently 'heat'
helper.characteristics[TEMPERATURE_CURRENT].value = 19
helper.characteristics[HEATING_COOLING_CURRENT].value = 1
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "heating"
TARGET_HEATER_COOLER_STATE = ("heater-cooler", "heater-cooler.state.target")
CURRENT_HEATER_COOLER_STATE = ("heater-cooler", "heater-cooler.state.current")
HEATER_COOLER_ACTIVE = ("heater-cooler", "active")
HEATER_COOLER_TEMPERATURE_CURRENT = ("heater-cooler", "temperature.current")
TEMPERATURE_COOLING_THRESHOLD = ("heater-cooler", "temperature.cooling-threshold")
TEMPERATURE_HEATING_THRESHOLD = ("heater-cooler", "temperature.heating-threshold")
SWING_MODE = ("heater-cooler", "swing-mode")
def create_heater_cooler_service(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.HEATER_COOLER)
char = service.add_char(CharacteristicsTypes.TARGET_HEATER_COOLER_STATE)
char.value = 0
char = service.add_char(CharacteristicsTypes.CURRENT_HEATER_COOLER_STATE)
char.value = 0
char = service.add_char(CharacteristicsTypes.ACTIVE)
char.value = 1
char = service.add_char(CharacteristicsTypes.TEMPERATURE_COOLING_THRESHOLD)
char.minValue = 7
char.maxValue = 35
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_HEATING_THRESHOLD)
char.minValue = 7
char.maxValue = 35
char.value = 0
char = service.add_char(CharacteristicsTypes.TEMPERATURE_CURRENT)
char.value = 0
char = service.add_char(CharacteristicsTypes.SWING_MODE)
char.value = 0
# Test heater-cooler devices
def create_heater_cooler_service_min_max(accessory):
"""Define thermostat characteristics."""
service = accessory.add_service(ServicesTypes.HEATER_COOLER)
char = service.add_char(CharacteristicsTypes.TARGET_HEATER_COOLER_STATE)
char.value = 1
char.minValue = 1
char.maxValue = 2
async def test_heater_cooler_respect_supported_op_modes_1(hass, utcnow):
"""Test that climate respects minValue/maxValue hints."""
helper = await setup_test_component(hass, create_heater_cooler_service_min_max)
state = await helper.poll_and_get_state()
assert state.attributes["hvac_modes"] == ["heat", "cool", "off"]
def create_theater_cooler_service_valid_vals(accessory):
"""Define heater-cooler characteristics."""
service = accessory.add_service(ServicesTypes.HEATER_COOLER)
char = service.add_char(CharacteristicsTypes.TARGET_HEATER_COOLER_STATE)
char.value = 1
char.valid_values = [1, 2]
async def test_heater_cooler_respect_supported_op_modes_2(hass, utcnow):
"""Test that climate respects validValue hints."""
helper = await setup_test_component(hass, create_theater_cooler_service_valid_vals)
state = await helper.poll_and_get_state()
assert state.attributes["hvac_modes"] == ["heat", "cool", "off"]
async def test_heater_cooler_change_thermostat_state(hass, utcnow):
"""Test that we can change the operational mode."""
helper = await setup_test_component(hass, create_heater_cooler_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
assert (
helper.characteristics[TARGET_HEATER_COOLER_STATE].value
== TargetHeaterCoolerStateValues.HEAT
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_COOL},
blocking=True,
)
assert (
helper.characteristics[TARGET_HEATER_COOLER_STATE].value
== TargetHeaterCoolerStateValues.COOL
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT_COOL},
blocking=True,
)
assert (
helper.characteristics[TARGET_HEATER_COOLER_STATE].value
== TargetHeaterCoolerStateValues.AUTOMATIC
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_OFF},
blocking=True,
)
assert (
helper.characteristics[HEATER_COOLER_ACTIVE].value
== ActivationStateValues.INACTIVE
)
async def test_heater_cooler_change_thermostat_temperature(hass, utcnow):
"""Test that we can change the target temperature."""
helper = await setup_test_component(hass, create_heater_cooler_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_HEAT},
blocking=True,
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{"entity_id": "climate.testdevice", "temperature": 20},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_HEATING_THRESHOLD].value == 20
await hass.services.async_call(
DOMAIN,
SERVICE_SET_HVAC_MODE,
{"entity_id": "climate.testdevice", "hvac_mode": HVAC_MODE_COOL},
blocking=True,
)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_TEMPERATURE,
{"entity_id": "climate.testdevice", "temperature": 26},
blocking=True,
)
assert helper.characteristics[TEMPERATURE_COOLING_THRESHOLD].value == 26
async def test_heater_cooler_read_thermostat_state(hass, utcnow):
"""Test that we can read the state of a HomeKit thermostat accessory."""
helper = await setup_test_component(hass, create_heater_cooler_service)
# Simulate that heating is on
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 19
helper.characteristics[TEMPERATURE_HEATING_THRESHOLD].value = 20
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.HEATING
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.HEAT
helper.characteristics[SWING_MODE].value = SwingModeValues.DISABLED
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_HEAT
assert state.attributes["current_temperature"] == 19
assert state.attributes["min_temp"] == 7
assert state.attributes["max_temp"] == 35
# Simulate that cooling is on
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 21
helper.characteristics[TEMPERATURE_COOLING_THRESHOLD].value = 19
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.COOLING
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.COOL
helper.characteristics[SWING_MODE].value = SwingModeValues.DISABLED
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_COOL
assert state.attributes["current_temperature"] == 21
# Simulate that we are in auto mode
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 21
helper.characteristics[TEMPERATURE_COOLING_THRESHOLD].value = 21
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.COOLING
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.AUTOMATIC
helper.characteristics[SWING_MODE].value = SwingModeValues.DISABLED
state = await helper.poll_and_get_state()
assert state.state == HVAC_MODE_HEAT_COOL
async def test_heater_cooler_hvac_mode_vs_hvac_action(hass, utcnow):
"""Check that we haven't conflated hvac_mode and hvac_action."""
helper = await setup_test_component(hass, create_heater_cooler_service)
# Simulate that current temperature is above target temp
# Heating might be on, but hvac_action currently 'off'
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 22
helper.characteristics[TEMPERATURE_HEATING_THRESHOLD].value = 21
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.IDLE
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.HEAT
helper.characteristics[SWING_MODE].value = SwingModeValues.DISABLED
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "idle"
# Simulate that current temperature is below target temp
# Heating might be on and hvac_action currently 'heat'
helper.characteristics[HEATER_COOLER_TEMPERATURE_CURRENT].value = 19
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.HEATING
state = await helper.poll_and_get_state()
assert state.state == "heat"
assert state.attributes["hvac_action"] == "heating"
async def test_heater_cooler_change_swing_mode(hass, utcnow):
"""Test that we can change the swing mode."""
helper = await setup_test_component(hass, create_heater_cooler_service)
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{"entity_id": "climate.testdevice", "swing_mode": "vertical"},
blocking=True,
)
assert helper.characteristics[SWING_MODE].value == SwingModeValues.ENABLED
await hass.services.async_call(
DOMAIN,
SERVICE_SET_SWING_MODE,
{"entity_id": "climate.testdevice", "swing_mode": "off"},
blocking=True,
)
assert helper.characteristics[SWING_MODE].value == SwingModeValues.DISABLED
async def test_heater_cooler_turn_off(hass, utcnow):
"""Test that both hvac_action and hvac_mode return "off" when turned off."""
helper = await setup_test_component(hass, create_heater_cooler_service)
# Simulate that the device is turned off but CURRENT_HEATER_COOLER_STATE still returns HEATING/COOLING
helper.characteristics[HEATER_COOLER_ACTIVE].value = ActivationStateValues.INACTIVE
helper.characteristics[
CURRENT_HEATER_COOLER_STATE
].value = CurrentHeaterCoolerStateValues.HEATING
helper.characteristics[
TARGET_HEATER_COOLER_STATE
].value = TargetHeaterCoolerStateValues.HEAT
state = await helper.poll_and_get_state()
assert state.state == "off"
assert state.attributes["hvac_action"] == "off"
|
import asyncio
from aiohttp.client_exceptions import ClientResponseError
from homeassistant.const import HTTP_INTERNAL_SERVER_ERROR
from homeassistant.setup import async_setup_component
from homeassistant.util import dt as dt_util
# An infinitesimally small time-delta.
EPSILON_DELTA = 0.0000000001
def radar_map_url(dim: int = 512, country_code: str = "NL") -> str:
"""Build map url, defaulting to 512 wide (as in component)."""
return f"https://api.buienradar.nl/image/1.0/RadarMap{country_code}?w={dim}&h={dim}"
async def test_fetching_url_and_caching(aioclient_mock, hass, hass_client):
"""Test that it fetches the given url."""
aioclient_mock.get(radar_map_url(), text="hello world")
await async_setup_component(
hass, "camera", {"camera": {"name": "config_test", "platform": "buienradar"}}
)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/camera_proxy/camera.config_test")
assert resp.status == 200
assert aioclient_mock.call_count == 1
body = await resp.text()
assert body == "hello world"
# default delta is 600s -> should be the same when calling immediately
# afterwards.
resp = await client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 1
async def test_expire_delta(aioclient_mock, hass, hass_client):
"""Test that the cache expires after delta."""
aioclient_mock.get(radar_map_url(), text="hello world")
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "buienradar",
"delta": EPSILON_DELTA,
}
},
)
await hass.async_block_till_done()
client = await hass_client()
resp = await client.get("/api/camera_proxy/camera.config_test")
assert resp.status == 200
assert aioclient_mock.call_count == 1
body = await resp.text()
assert body == "hello world"
await asyncio.sleep(EPSILON_DELTA)
# tiny delta has passed -> should immediately call again
resp = await client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 2
async def test_only_one_fetch_at_a_time(aioclient_mock, hass, hass_client):
"""Test that it fetches with only one request at the same time."""
aioclient_mock.get(radar_map_url(), text="hello world")
await async_setup_component(
hass, "camera", {"camera": {"name": "config_test", "platform": "buienradar"}}
)
await hass.async_block_till_done()
client = await hass_client()
resp_1 = client.get("/api/camera_proxy/camera.config_test")
resp_2 = client.get("/api/camera_proxy/camera.config_test")
resp = await resp_1
resp_2 = await resp_2
assert (await resp.text()) == (await resp_2.text())
assert aioclient_mock.call_count == 1
async def test_dimension(aioclient_mock, hass, hass_client):
"""Test that it actually adheres to the dimension."""
aioclient_mock.get(radar_map_url(700), text="hello world")
await async_setup_component(
hass,
"camera",
{"camera": {"name": "config_test", "platform": "buienradar", "dimension": 700}},
)
await hass.async_block_till_done()
client = await hass_client()
await client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 1
async def test_belgium_country(aioclient_mock, hass, hass_client):
"""Test that it actually adheres to another country like Belgium."""
aioclient_mock.get(radar_map_url(country_code="BE"), text="hello world")
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "buienradar",
"country_code": "BE",
}
},
)
await hass.async_block_till_done()
client = await hass_client()
await client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 1
async def test_failure_response_not_cached(aioclient_mock, hass, hass_client):
"""Test that it does not cache a failure response."""
aioclient_mock.get(radar_map_url(), text="hello world", status=401)
await async_setup_component(
hass, "camera", {"camera": {"name": "config_test", "platform": "buienradar"}}
)
await hass.async_block_till_done()
client = await hass_client()
await client.get("/api/camera_proxy/camera.config_test")
await client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 2
async def test_last_modified_updates(aioclient_mock, hass, hass_client):
"""Test that it does respect HTTP not modified."""
# Build Last-Modified header value
now = dt_util.utcnow()
last_modified = now.strftime("%a, %d %m %Y %H:%M:%S GMT")
aioclient_mock.get(
radar_map_url(),
text="hello world",
status=200,
headers={"Last-Modified": last_modified},
)
await async_setup_component(
hass,
"camera",
{
"camera": {
"name": "config_test",
"platform": "buienradar",
"delta": EPSILON_DELTA,
}
},
)
await hass.async_block_till_done()
client = await hass_client()
resp_1 = await client.get("/api/camera_proxy/camera.config_test")
# It is not possible to check if header was sent.
assert aioclient_mock.call_count == 1
await asyncio.sleep(EPSILON_DELTA)
# Content has expired, change response to a 304 NOT MODIFIED, which has no
# text, i.e. old value should be kept
aioclient_mock.clear_requests()
# mock call count is now reset as well:
assert aioclient_mock.call_count == 0
aioclient_mock.get(radar_map_url(), text=None, status=304)
resp_2 = await client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 1
assert (await resp_1.read()) == (await resp_2.read())
async def test_retries_after_error(aioclient_mock, hass, hass_client):
"""Test that it does retry after an error instead of caching."""
await async_setup_component(
hass, "camera", {"camera": {"name": "config_test", "platform": "buienradar"}}
)
await hass.async_block_till_done()
client = await hass_client()
aioclient_mock.get(radar_map_url(), text=None, status=HTTP_INTERNAL_SERVER_ERROR)
# A 404 should not return data and throw:
try:
await client.get("/api/camera_proxy/camera.config_test")
except ClientResponseError:
pass
assert aioclient_mock.call_count == 1
# Change the response to a 200
aioclient_mock.clear_requests()
aioclient_mock.get(radar_map_url(), text="DEADBEEF")
assert aioclient_mock.call_count == 0
# http error should not be cached, immediate retry.
resp_2 = await client.get("/api/camera_proxy/camera.config_test")
assert aioclient_mock.call_count == 1
# Binary text can not be added as body to `aioclient_mock.get(text=...)`,
# while `resp.read()` returns bytes, encode the value.
assert (await resp_2.read()) == b"DEADBEEF"
|
from flexx import flx
class Panel(flx.Label):
CSS = '.flx-Panel {background: #66dd88; color: #FFF; padding: 1px;}'
class Boxes(flx.Widget):
def init(self):
with flx.HSplit():
with flx.VBox(flex=1):
flx.Label(html='<b>Box mode</b> (aware of natural size)')
flx.Label(text='flex: 1, sub-flexes: 0, 0, 0')
with flx.HBox(flex=1):
Panel(text='A', flex=0)
Panel(text='B', flex=0)
Panel(text='C is a bit longer', flex=0)
flx.Label(text='flex: 0, sub-flexes: 1, 1, 1')
with flx.HBox(flex=0):
Panel(text='A', flex=1)
Panel(text='B', flex=1)
Panel(text='C is a bit longer', flex=1)
flx.Label(text='flex: 1, sub-flexes: 1, 0, 2')
with flx.HBox(flex=1):
Panel(text='A', flex=1)
Panel(text='B', flex=0)
Panel(text='C is a bit longer', flex=2)
flx.Label(text='flex: 2, sub-flexes: 1, 2, 3')
with flx.HBox(flex=2):
Panel(text='A', flex=1)
Panel(text='B', flex=2)
Panel(text='C is a bit longer', flex=3)
with flx.VBox(flex=1):
flx.Label(html='<b>Fix mode</b> (high level layout)')
flx.Label(text='flex: 1, sub-flexes: 0, 0, 0')
with flx.HFix(flex=1):
Panel(text='A', flex=0)
Panel(text='B', flex=0)
Panel(text='C is a bit longer', flex=0)
flx.Label(text='flex: 0 (collapses), sub-flexes: 1, 1, 1')
with flx.HFix(flex=0):
Panel(text='A', flex=1, style='min-height:5px;')
Panel(text='B', flex=1)
Panel(text='C is a bit longer', flex=1)
flx.Label(text='flex: 1, sub-flexes: 1, 0, 2')
with flx.HFix(flex=1):
Panel(text='A', flex=1)
Panel(text='B', flex=0)
Panel(text='C is a bit longer', flex=2)
flx.Label(text='flex: 2, sub-flexes: 1, 2, 3')
with flx.HFix(flex=2):
Panel(text='A', flex=1)
Panel(text='B', flex=2)
Panel(text='C is a bit longer', flex=3)
if __name__ == '__main__':
m = flx.launch(Boxes)
flx.run()
|
import unittest
from absl import flags
import mock
from perfkitbenchmarker import dpb_service
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers.gcp import gcp_dpb_dataproc
from perfkitbenchmarker.providers.gcp import gcs
from tests import pkb_common_test_case
TEST_RUN_URI = 'fakeru'
GCP_ZONE_US_CENTRAL1_A = 'us-central1-a'
BUCKET_NAME = 'foo'
PROJECT = 'fake-project'
FLAGS = flags.FLAGS
CLUSTER_SPEC = mock.Mock(
static_dpb_service_instance=None,
worker_count=2,
version='fake-version',
applications=['foo-component', 'bar-component'],
worker_group=mock.Mock(
vm_spec=mock.Mock(machine_type='fake-machine-type', num_local_ssds=2),
disk_spec=mock.Mock(disk_type='pd-ssd', disk_size=42)))
class LocalGcpDpbDataproc(gcp_dpb_dataproc.GcpDpbDataproc):
def __init__(self):
# Bypass GCS initialization in Dataproc's constructor
dpb_service.BaseDpbService.__init__(self, CLUSTER_SPEC)
self.project = PROJECT
self.region = self.dpb_service_zone.rsplit('-', 1)[0]
self.storage_service = gcs.GoogleCloudStorageService()
self.storage_service.PrepareService(location=self.region)
class GcpDpbDataprocTestCase(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(GcpDpbDataprocTestCase, self).setUp()
FLAGS.run_uri = TEST_RUN_URI
FLAGS.dpb_service_zone = GCP_ZONE_US_CENTRAL1_A
FLAGS.zones = [GCP_ZONE_US_CENTRAL1_A]
@mock.patch.object(
vm_util, 'IssueCommand', return_value=('fake_stdout', 'fake_stderr', 0))
def testCreateBucket(self, mock_issue):
cluster = LocalGcpDpbDataproc()
cluster.CreateBucket(BUCKET_NAME)
self.assertEqual(mock_issue.call_count, 2)
call_arg_list, _ = mock_issue.call_args_list[0]
self.assertListEqual([
'gsutil', 'mb', '-l',
GCP_ZONE_US_CENTRAL1_A.rsplit('-', 1)[0], '-c', 'regional',
'gs://{}'.format(BUCKET_NAME)
], call_arg_list[0])
@mock.patch.object(
vm_util, 'IssueCommand', return_value=('fake_stdout', 'fake_stderr', 0))
def testDeleteBucket(self, mock_issue):
cluster = LocalGcpDpbDataproc()
cluster.DeleteBucket(BUCKET_NAME)
self.assertEqual(mock_issue.call_count, 2)
call_arg_list, _ = mock_issue.call_args
self.assertListEqual(['gsutil', 'rb', 'gs://{}'.format(BUCKET_NAME)],
call_arg_list[0])
@mock.patch.object(
vm_util, 'IssueCommand', return_value=('fake_stdout', 'fake_stderr', 0))
def testCreate(self, mock_issue):
cluster = LocalGcpDpbDataproc()
cluster._Create()
self.assertEqual(mock_issue.call_count, 1)
command_string = ' '.join(mock_issue.call_args[0][0])
self.assertIn('gcloud dataproc clusters create pkb-fakeru', command_string)
self.assertIn('--image-version fake-version', command_string)
self.assertIn('--master-boot-disk-size 42GB', command_string)
self.assertIn('--master-boot-disk-type pd-ssd', command_string)
self.assertIn('--master-machine-type fake-machine-type', command_string)
self.assertIn('--num-master-local-ssds 2', command_string)
self.assertIn('--worker-boot-disk-size 42GB', command_string)
self.assertIn('--worker-boot-disk-type pd-ssd', command_string)
self.assertIn('--worker-machine-type fake-machine-type', command_string)
self.assertIn('--num-worker-local-ssds 2', command_string)
self.assertIn('--num-workers 2', command_string)
self.assertIn('--optional-components foo-component,bar-component',
command_string)
self.assertIn('--project fake-project ', command_string)
self.assertIn('--region us-central1', command_string)
self.assertIn('--zone us-central1-a', command_string)
@mock.patch.object(
vm_util,
'IssueCommand',
return_value=(
'fake_stdout', "The zone 'projects/fake-project/zones/us-central1-a' "
'does not have enough resources available to fulfill the request.', 1)
)
def testCreateResourceExhausted(self, mock_issue):
cluster = LocalGcpDpbDataproc()
with self.assertRaises(errors.Benchmarks.InsufficientCapacityCloudFailure):
cluster._Create()
self.assertEqual(mock_issue.call_count, 1)
if __name__ == '__main__':
unittest.main()
|
import logging
from homeassistant.components import pilight
import homeassistant.components.sensor as sensor
from homeassistant.setup import setup_component
from tests.common import assert_setup_component, get_test_home_assistant, mock_component
HASS = None
def fire_pilight_message(protocol, data):
"""Fire the fake Pilight message."""
message = {pilight.CONF_PROTOCOL: protocol}
message.update(data)
HASS.bus.fire(pilight.EVENT, message)
# pylint: disable=invalid-name
def setup_function():
"""Initialize a Home Assistant server."""
global HASS
HASS = get_test_home_assistant()
mock_component(HASS, "pilight")
# pylint: disable=invalid-name
def teardown_function():
"""Stop the Home Assistant server."""
HASS.stop()
def test_sensor_value_from_code():
"""Test the setting of value via pilight."""
with assert_setup_component(1):
setup_component(
HASS,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "pilight",
"name": "test",
"variable": "test",
"payload": {"protocol": "test-protocol"},
"unit_of_measurement": "fav unit",
}
},
)
HASS.block_till_done()
state = HASS.states.get("sensor.test")
assert state.state == "unknown"
unit_of_measurement = state.attributes.get("unit_of_measurement")
assert unit_of_measurement == "fav unit"
# Set value from data with correct payload
fire_pilight_message(protocol="test-protocol", data={"test": 42})
HASS.block_till_done()
state = HASS.states.get("sensor.test")
assert state.state == "42"
def test_disregard_wrong_payload():
"""Test omitting setting of value with wrong payload."""
with assert_setup_component(1):
setup_component(
HASS,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "pilight",
"name": "test_2",
"variable": "test",
"payload": {"uuid": "1-2-3-4", "protocol": "test-protocol_2"},
}
},
)
HASS.block_till_done()
# Try set value from data with incorrect payload
fire_pilight_message(
protocol="test-protocol_2", data={"test": "data", "uuid": "0-0-0-0"}
)
HASS.block_till_done()
state = HASS.states.get("sensor.test_2")
assert state.state == "unknown"
# Try set value from data with partially matched payload
fire_pilight_message(
protocol="wrong-protocol", data={"test": "data", "uuid": "1-2-3-4"}
)
HASS.block_till_done()
state = HASS.states.get("sensor.test_2")
assert state.state == "unknown"
# Try set value from data with fully matched payload
fire_pilight_message(
protocol="test-protocol_2",
data={"test": "data", "uuid": "1-2-3-4", "other_payload": 3.141},
)
HASS.block_till_done()
state = HASS.states.get("sensor.test_2")
assert state.state == "data"
def test_variable_missing(caplog):
"""Check if error message when variable missing."""
caplog.set_level(logging.ERROR)
with assert_setup_component(1):
setup_component(
HASS,
sensor.DOMAIN,
{
sensor.DOMAIN: {
"platform": "pilight",
"name": "test_3",
"variable": "test",
"payload": {"protocol": "test-protocol"},
}
},
)
HASS.block_till_done()
# Create code without sensor variable
fire_pilight_message(
protocol="test-protocol", data={"uuid": "1-2-3-4", "other_variable": 3.141}
)
HASS.block_till_done()
logs = caplog.text
assert "No variable test in received code" in logs
|
import logging
from homeassistant.config import YAML_CONFIG_FILE
from homeassistant.helpers.check_config import (
CheckConfigError,
async_check_ha_config_file,
)
from tests.async_mock import patch
from tests.common import patch_yaml_files
_LOGGER = logging.getLogger(__name__)
BASE_CONFIG = (
"homeassistant:\n"
" name: Home\n"
" latitude: -26.107361\n"
" longitude: 28.054500\n"
" elevation: 1600\n"
" unit_system: metric\n"
" time_zone: GMT\n"
"\n\n"
)
BAD_CORE_CONFIG = "homeassistant:\n unit_system: bad\n\n\n"
def log_ha_config(conf):
"""Log the returned config."""
cnt = 0
_LOGGER.debug("CONFIG - %s lines - %s errors", len(conf), len(conf.errors))
for key, val in conf.items():
_LOGGER.debug("#%s - %s: %s", cnt, key, val)
cnt += 1
for cnt, err in enumerate(conf.errors):
_LOGGER.debug("error[%s] = %s", cnt, err)
async def test_bad_core_config(hass, loop):
"""Test a bad core config setup."""
files = {YAML_CONFIG_FILE: BAD_CORE_CONFIG}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert isinstance(res.errors[0].message, str)
assert res.errors[0].domain == "homeassistant"
assert res.errors[0].config == {"unit_system": "bad"}
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
async def test_config_platform_valid(hass, loop):
"""Test a valid platform setup."""
files = {YAML_CONFIG_FILE: BASE_CONFIG + "light:\n platform: demo"}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.keys() == {"homeassistant", "light"}
assert res["light"] == [{"platform": "demo"}]
assert not res.errors
async def test_component_platform_not_found(hass, loop):
"""Test errors if component or platform not found."""
# Make sure they don't exist
files = {YAML_CONFIG_FILE: BASE_CONFIG + "beer:"}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.keys() == {"homeassistant"}
assert res.errors[0] == CheckConfigError(
"Component error: beer - Integration 'beer' not found.", None, None
)
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
async def test_component_platform_not_found_2(hass, loop):
"""Test errors if component or platform not found."""
# Make sure they don't exist
files = {YAML_CONFIG_FILE: BASE_CONFIG + "light:\n platform: beer"}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.keys() == {"homeassistant", "light"}
assert res["light"] == []
assert res.errors[0] == CheckConfigError(
"Platform error light.beer - Integration 'beer' not found.", None, None
)
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
async def test_package_invalid(hass, loop):
"""Test a valid platform setup."""
files = {
YAML_CONFIG_FILE: BASE_CONFIG + (" packages:\n p1:\n" ' group: ["a"]')
}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.errors[0].domain == "homeassistant.packages.p1.group"
assert res.errors[0].config == {"group": ["a"]}
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
assert res.keys() == {"homeassistant"}
async def test_bootstrap_error(hass, loop):
"""Test a valid platform setup."""
files = {YAML_CONFIG_FILE: BASE_CONFIG + "automation: !include no.yaml"}
with patch("os.path.isfile", return_value=True), patch_yaml_files(files):
res = await async_check_ha_config_file(hass)
log_ha_config(res)
assert res.errors[0].domain is None
# Only 1 error expected
res.errors.pop(0)
assert not res.errors
|
from ProgettiHWSW.ProgettiHWSWAPI import ProgettiHWSWAPI
import voluptuous as vol
from homeassistant import config_entries, core, exceptions
from .const import DOMAIN
DATA_SCHEMA = vol.Schema(
{vol.Required("host"): str, vol.Required("port", default=80): int}
)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user host input."""
confs = hass.config_entries.async_entries(DOMAIN)
same_entries = [
True
for entry in confs
if entry.data.get("host") == data["host"]
and entry.data.get("port") == data["port"]
]
if same_entries:
raise ExistingEntry
api_instance = ProgettiHWSWAPI(f'{data["host"]}:{data["port"]}')
is_valid = await api_instance.check_board()
if not is_valid:
raise CannotConnect
return {
"title": is_valid["title"],
"relay_count": is_valid["relay_count"],
"input_count": is_valid["input_count"],
"is_old": is_valid["is_old"],
}
class ProgettiHWSWConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for ProgettiHWSW Automation."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Initialize class variables."""
self.s1_in = None
async def async_step_relay_modes(self, user_input=None):
"""Manage relay modes step."""
errors = {}
if user_input is not None:
whole_data = user_input
whole_data.update(self.s1_in)
return self.async_create_entry(title=whole_data["title"], data=whole_data)
relay_modes_schema = {}
for i in range(1, int(self.s1_in["relay_count"]) + 1):
relay_modes_schema[
vol.Required(f"relay_{str(i)}", default="bistable")
] = vol.In(
{
"bistable": "Bistable (ON/OFF Mode)",
"monostable": "Monostable (Timer Mode)",
}
)
return self.async_show_form(
step_id="relay_modes",
data_schema=vol.Schema(relay_modes_schema),
errors=errors,
)
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
info = await validate_input(self.hass, user_input)
except CannotConnect:
errors["base"] = "cannot_connect"
except ExistingEntry:
return self.async_abort(reason="already_configured")
except Exception: # pylint: disable=broad-except
errors["base"] = "unknown"
else:
user_input.update(info)
self.s1_in = user_input
return await self.async_step_relay_modes()
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA, errors=errors
)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot identify host."""
class WrongInfo(exceptions.HomeAssistantError):
"""Error to indicate we cannot validate relay modes input."""
class ExistingEntry(exceptions.HomeAssistantError):
"""Error to indicate we cannot validate relay modes input."""
|
import sys
try:
from cStringIO import StringIO # Python 2.x
except ImportError:
from io import StringIO # Python 3.x
import roslib.msgs
import roslib.names
import roslib.srvs
from roslib.msgs import MsgSpecException
import rospkg
# name of the Header type as gentools knows it
_header_type_name = 'std_msgs/Header'
def _add_msgs_depends(rospack, spec, deps, package_context):
"""
Add the list of message types that spec depends on to depends.
@param spec: message to compute dependencies for
@type spec: roslib.msgs.MsgSpec/roslib.srvs.SrvSpec
@param deps [str]: list of dependencies. This list will be updated
with the dependencies of spec when the method completes
@type deps: [str]
@raise KeyError for invalid dependent types due to missing package dependencies.
"""
def _get_valid_packages(package_context, rospack):
valid_packages = ['', package_context]
try:
valid_packages = valid_packages + rospack.get_depends(package_context, implicit=True)
except rospkg.ResourceNotFound:
# this happens in dynamic generation situations where the
# package is not present. we soft fail here because we assume
# missing messages will be caught later during lookup.
pass
return valid_packages
valid_packages = None
for t in spec.types:
t = roslib.msgs.base_msg_type(t)
if not roslib.msgs.is_builtin(t):
t_package, t_base = roslib.names.package_resource_name(t)
# special mapping for header
if t == roslib.msgs.HEADER:
# have to re-names Header
deps.append(_header_type_name)
if roslib.msgs.is_registered(t):
depspec = roslib.msgs.get_registered(t)
if t != roslib.msgs.HEADER:
if '/' in t:
deps.append(t)
else:
deps.append(package_context+'/'+t)
else:
if valid_packages is None:
valid_packages = _get_valid_packages(package_context, rospack)
if t_package in valid_packages:
# if we are allowed to load the message, load it.
key, depspec = roslib.msgs.load_by_type(t, package_context)
if t != roslib.msgs.HEADER:
deps.append(key)
roslib.msgs.register(key, depspec)
else:
# not allowed to load the message, so error.
raise KeyError(t)
_add_msgs_depends(rospack, depspec, deps, package_context)
def compute_md5_text(get_deps_dict, spec, rospack=None):
"""
Compute the text used for md5 calculation. MD5 spec states that we
removes comments and non-meaningful whitespace. We also strip
packages names from type names. For convenience sake, constants are
reordered ahead of other declarations, in the order that they were
originally defined.
@return: text for ROS MD5-processing
@rtype: str
"""
uniquedeps = get_deps_dict['uniquedeps']
package = get_deps_dict['package']
# #1554: need to suppress computation of files in dynamic generation case
compute_files = 'files' in get_deps_dict
buff = StringIO()
for c in spec.constants:
buff.write('%s %s=%s\n' % (c.type, c.name, c.val_text))
for type_, name in zip(spec.types, spec.names):
base_msg_type = roslib.msgs.base_msg_type(type_)
# md5 spec strips package names
if roslib.msgs.is_builtin(base_msg_type):
buff.write('%s %s\n' % (type_, name))
else:
# recursively generate md5 for subtype. have to build up
# dependency representation for subtype in order to
# generate md5
# - ugly special-case handling of Header
if base_msg_type == roslib.msgs.HEADER:
base_msg_type = _header_type_name
sub_pkg, _ = roslib.names.package_resource_name(base_msg_type)
sub_pkg = sub_pkg or package
sub_spec = roslib.msgs.get_registered(base_msg_type, package)
sub_deps = get_dependencies(sub_spec, sub_pkg, compute_files=compute_files, rospack=rospack)
sub_md5 = compute_md5(sub_deps, rospack)
buff.write('%s %s\n' % (sub_md5, name))
return buff.getvalue().strip() # remove trailing new line
def _compute_hash(get_deps_dict, hash, rospack=None):
"""
subroutine of compute_md5()
@param get_deps_dict: dictionary returned by get_dependencies call
@type get_deps_dict: dict
@param hash: hash instance
@type hash: hash instance
"""
# accumulate the hash
# - root file
from roslib.msgs import MsgSpec
from roslib.srvs import SrvSpec
spec = get_deps_dict['spec']
if isinstance(spec, MsgSpec):
hash.update(compute_md5_text(get_deps_dict, spec, rospack=rospack).encode())
elif isinstance(spec, SrvSpec):
hash.update(compute_md5_text(get_deps_dict, spec.request, rospack=rospack).encode())
hash.update(compute_md5_text(get_deps_dict, spec.response, rospack=rospack).encode())
else:
raise Exception('[%s] is not a message or service' % spec)
return hash.hexdigest()
def _compute_hash_v1(get_deps_dict, hash):
"""
subroutine of compute_md5_v1()
@param get_deps_dict: dictionary returned by get_dependencies call
@type get_deps_dict: dict
@param hash: hash instance
@type hash: hash instance
"""
uniquedeps = get_deps_dict['uniquedeps']
spec = get_deps_dict['spec']
# accumulate the hash
# - root file
hash.update(spec.text)
# - dependencies
for d in uniquedeps:
hash.update(roslib.msgs.get_registered(d).text)
return hash.hexdigest()
def compute_md5_v1(get_deps_dict):
"""
Compute original V1 md5 hash for message/service. This was replaced with V2 in ROS 0.6.
@param get_deps_dict: dictionary returned by get_dependencies call
@type get_deps_dict: dict
@return: md5 hash
@rtype: str
"""
import hashlib
return _compute_hash_v1(get_deps_dict, hashlib.md5())
def compute_md5(get_deps_dict, rospack=None):
"""
Compute md5 hash for message/service
@param get_deps_dict dict: dictionary returned by get_dependencies call
@type get_deps_dict: dict
@return: md5 hash
@rtype: str
"""
try:
# md5 is deprecated in Python 2.6 in favor of hashlib, but hashlib is
# unavailable in Python 2.4
import hashlib
return _compute_hash(get_deps_dict, hashlib.md5(), rospack=rospack)
except ImportError:
import md5
return _compute_hash(get_deps_dict, md5.new(), rospack=rospack)
# alias
compute_md5_v2 = compute_md5
def compute_full_text(get_deps_dict):
"""
Compute full text of message/service, including text of embedded
types. The text of the main msg/srv is listed first. Embedded
msg/srv files are denoted first by an 80-character '=' separator,
followed by a type declaration line,'MSG: pkg/type', followed by
the text of the embedded type.
@param get_deps_dict dict: dictionary returned by get_dependencies call
@type get_deps_dict: dict
@return: concatenated text for msg/srv file and embedded msg/srv types.
@rtype: str
"""
buff = StringIO()
sep = '='*80+'\n'
# write the text of the top-level type
buff.write(get_deps_dict['spec'].text)
buff.write('\n')
# append the text of the dependencies (embedded types)
for d in get_deps_dict['uniquedeps']:
buff.write(sep)
buff.write('MSG: %s\n' % d)
buff.write(roslib.msgs.get_registered(d).text)
buff.write('\n')
# #1168: remove the trailing \n separator that is added by the concatenation logic
return buff.getvalue()[:-1]
def get_file_dependencies(f, stdout=sys.stdout, stderr=sys.stderr, rospack=None):
"""
Compute dependencies of the specified message/service file
@param f: message or service file to get dependencies for
@type f: str
@param stdout pipe: stdout pipe
@type stdout: file
@param stderr pipe: stderr pipe
@type stderr: file
@return: 'files': list of files that \a file depends on,
'deps': list of dependencies by type, 'spec': Msgs/Srvs
instance.
@rtype: dict
"""
package = rospkg.get_package_name(f)
spec = None
if f.endswith(roslib.msgs.EXT):
_, spec = roslib.msgs.load_from_file(f)
elif f.endswith(roslib.srvs.EXT):
_, spec = roslib.srvs.load_from_file(f)
else:
raise Exception('[%s] does not appear to be a message or service' % spec)
return get_dependencies(spec, package, stdout, stderr, rospack=rospack)
def get_dependencies(spec, package, compute_files=True, stdout=sys.stdout, stderr=sys.stderr, rospack=None):
"""
Compute dependencies of the specified Msgs/Srvs
@param spec: message or service instance
@type spec: L{roslib.msgs.MsgSpec}/L{roslib.srvs.SrvSpec}
@param package: package name
@type package: str
@param stdout: (optional) stdout pipe
@type stdout: file
@param stderr: (optional) stderr pipe
@type stderr: file
@param compute_files: (optional, default=True) compute file
dependencies of message ('files' key in return value)
@type compute_files: bool
@return: dict:
* 'files': list of files that \a file depends on
* 'deps': list of dependencies by type
* 'spec': Msgs/Srvs instance.
* 'uniquedeps': list of dependencies with duplicates removed,
* 'package': package that dependencies were generated relative to.
@rtype: dict
"""
# #518: as a performance optimization, we're going to manually control the loading
# of msgs instead of doing package-wide loads.
# we're going to manipulate internal apis of msgs, so have to manually init
roslib.msgs._init()
deps = []
try:
if not rospack:
rospack = rospkg.RosPack()
if isinstance(spec, roslib.msgs.MsgSpec):
_add_msgs_depends(rospack, spec, deps, package)
elif isinstance(spec, roslib.srvs.SrvSpec):
_add_msgs_depends(rospack, spec.request, deps, package)
_add_msgs_depends(rospack, spec.response, deps, package)
else:
raise MsgSpecException('spec does not appear to be a message or service')
except KeyError as e:
raise MsgSpecException('Cannot load type %s. Perhaps the package is missing a dependency.' % (str(e)))
# convert from type names to file names
if compute_files:
files = {}
for d in set(deps):
d_pkg, t = roslib.names.package_resource_name(d)
d_pkg = d_pkg or package # convert '' -> local package
files[d] = roslib.msgs.msg_file(d_pkg, t)
else:
files = None
# create unique dependency list
uniquedeps = []
for d in deps:
if d not in uniquedeps:
uniquedeps.append(d)
if compute_files:
return {'files': files, 'deps': deps, 'spec': spec, 'package': package, 'uniquedeps': uniquedeps}
else:
return {'deps': deps, 'spec': spec, 'package': package, 'uniquedeps': uniquedeps}
|
from cms.models import Page
from cms.templatetags.cms_tags import PageAttribute as BrokenPageAttribute
class PageAttribute(BrokenPageAttribute):
"""
This monkey patch can be withdrawn, after https://github.com/divio/django-cms/issues/5930
has been fixed.
"""
def get_value_for_context(self, context, **kwargs):
try:
return super().get_value_for_context(context, **kwargs)
except Page.DoesNotExist:
return ''
|
import os
import sys
import warnings
import pytest
from coverage import env
# Pytest can take additional options:
# $set_env.py: PYTEST_ADDOPTS - Extra arguments to pytest.
@pytest.fixture(autouse=True)
def set_warnings():
"""Enable DeprecationWarnings during all tests."""
warnings.simplefilter("default")
warnings.simplefilter("once", DeprecationWarning)
# A warning to suppress:
# setuptools/py33compat.py:54: DeprecationWarning: The value of convert_charrefs will become
# True in 3.5. You are encouraged to set the value explicitly.
# unescape = getattr(html, 'unescape', html_parser.HTMLParser().unescape)
# How come this warning is successfully suppressed here, but not in setup.cfg??
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message="The value of convert_charrefs will become True in 3.5.",
)
warnings.filterwarnings(
"ignore",
category=DeprecationWarning,
message=".* instead of inspect.getfullargspec",
)
if env.PYPY3:
# pypy3 warns about unclosed files a lot.
warnings.filterwarnings("ignore", r".*unclosed file", category=ResourceWarning)
@pytest.fixture(autouse=True)
def reset_sys_path():
"""Clean up sys.path changes around every test."""
sys_path = list(sys.path)
yield
sys.path[:] = sys_path
@pytest.fixture(autouse=True)
def fix_xdist_sys_path():
"""Prevent xdist from polluting the Python path.
We run tests that care a lot about the contents of sys.path. Pytest-xdist
changes sys.path, so running with xdist, vs without xdist, sets sys.path
differently. With xdist, sys.path[1] is an empty string, without xdist,
it's the virtualenv bin directory. We don't want the empty string, so
clobber that entry.
See: https://github.com/pytest-dev/pytest-xdist/issues/376
"""
if os.environ.get('PYTEST_XDIST_WORKER', ''):
# We are running in an xdist worker.
if sys.path[1] == '':
# xdist has set sys.path[1] to ''. Clobber it.
del sys.path[1]
# Also, don't let it sneak stuff in via PYTHONPATH.
try:
del os.environ['PYTHONPATH']
except KeyError:
pass
|
from __future__ import division
import numpy as np
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer.initializers import HeNormal
import chainer.links as L
from chainercv.links import Conv2DActiv
from chainercv.utils.bbox.bbox_iou import bbox_iou
from chainercv.links.model.fpn.mask_utils import mask_to_segm
from chainercv.links.model.fpn.mask_utils import segm_to_mask
class MaskHead(chainer.Chain):
"""Mask Head network of Mask R-CNN.
Args:
n_class (int): The number of classes including background.
scales (tuple of floats): The scales of feature maps.
"""
_canonical_level = 2
_canonical_scale = 224
_roi_size = 14
_roi_sample_ratio = 2
segm_size = _roi_size * 2
def __init__(self, n_class, scales):
super(MaskHead, self).__init__()
initialW = HeNormal(1, fan_option='fan_out')
with self.init_scope():
self.conv1 = Conv2DActiv(256, 3, pad=1, initialW=initialW)
self.conv2 = Conv2DActiv(256, 3, pad=1, initialW=initialW)
self.conv3 = Conv2DActiv(256, 3, pad=1, initialW=initialW)
self.conv4 = Conv2DActiv(256, 3, pad=1, initialW=initialW)
self.conv5 = L.Deconvolution2D(
256, 2, pad=0, stride=2, initialW=initialW)
self.seg = L.Convolution2D(n_class, 1, pad=0, initialW=initialW)
self._n_class = n_class
self._scales = scales
def forward(self, hs, rois, roi_indices):
pooled_hs = []
for l, h in enumerate(hs):
if len(rois[l]) == 0:
continue
pooled_hs.append(F.roi_average_align_2d(
h, rois[l], roi_indices[l],
self._roi_size,
self._scales[l], self._roi_sample_ratio))
if len(pooled_hs) == 0:
out_size = self.segm_size
segs = chainer.Variable(
self.xp.empty((0, self._n_class, out_size, out_size),
dtype=np.float32))
return segs
h = F.concat(pooled_hs, axis=0)
h = self.conv1(h)
h = self.conv2(h)
h = self.conv3(h)
h = self.conv4(h)
h = F.relu(self.conv5(h))
return self.seg(h)
def distribute(self, rois, roi_indices):
"""Assigns feature levels to Rois based on their size.
Args:
rois (array): An array of shape :math:`(R, 4)`, \
where :math:`R` is the total number of RoIs in the given batch.
roi_indices (array): An array of shape :math:`(R,)`.
Returns:
two lists and one array:
:obj:`out_rois`, :obj:`out_roi_indices` and :obj:`order`.
* **out_rois**: A list of arrays of shape :math:`(R_l, 4)`, \
where :math:`R_l` is the number of RoIs in the :math:`l`-th \
feature map.
* **out_roi_indices** : A list of arrays of shape :math:`(R_l,)`.
* **order**: A correspondence between the output and the input. \
The relationship below is satisfied.
.. code:: python
xp.concatenate(out_rois, axis=0)[order[i]] == rois[i]
"""
size = self.xp.sqrt(self.xp.prod(rois[:, 2:] - rois[:, :2], axis=1))
level = self.xp.floor(self.xp.log2(
size / self._canonical_scale + 1e-6)).astype(np.int32)
# skip last level
level = self.xp.clip(
level + self._canonical_level, 0, len(self._scales) - 2)
masks = [level == l for l in range(len(self._scales))]
out_rois = [rois[mask] for mask in masks]
out_roi_indices = [roi_indices[mask] for mask in masks]
order = self.xp.argsort(
self.xp.concatenate([self.xp.where(mask)[0] for mask in masks]))
return out_rois, out_roi_indices, order
def decode(self, segms, bboxes, labels, sizes):
"""Decodes back to masks.
Args:
segms (iterable of arrays): An iterable of arrays of
shape :math:`(R_n, n\_class, M, M)`.
bboxes (iterable of arrays): An iterable of arrays of
shape :math:`(R_n, 4)`.
labels (iterable of arrays): An iterable of arrays of
shape :math:`(R_n,)`.
sizes (list of tuples of two ints): A list of
:math:`(H_n, W_n)`, where :math:`H_n` and :math:`W_n`
are height and width of the :math:`n`-th image.
Returns:
list of arrays:
This list contains instance segmentation for each image
in the batch.
More precisely, this is a list of boolean arrays of shape
:math:`(R'_n, H_n, W_n)`, where :math:`R'_n` is the number of
bounding boxes in the :math:`n`-th image.
"""
xp = chainer.backends.cuda.get_array_module(*segms)
if xp != np:
raise ValueError(
'MaskHead.decode only supports numpy inputs for now.')
masks = []
for bbox, segm, label, size in zip(
bboxes, segms, labels, sizes):
if len(segm) > 0:
masks.append(
segm_to_mask(segm[np.arange(len(label)), label + 1],
bbox, size))
else:
masks.append(np.zeros((0,) + size, dtype=np.bool))
return masks
def mask_head_loss_pre(rois, roi_indices, gt_masks, gt_bboxes,
gt_head_labels, segm_size):
"""Loss function for Mask Head (pre).
This function processes RoIs for :func:`mask_head_loss_post` by
selecting RoIs for mask loss calculation and
preparing ground truth network output.
Args:
rois (iterable of arrays): An iterable of arrays of
shape :math:`(R_l, 4)`, where :math:`R_l` is the number
of RoIs in the :math:`l`-th feature map.
roi_indices (iterable of arrays): An iterable of arrays of
shape :math:`(R_l,)`.
gt_masks (iterable of arrays): An iterable of arrays whose shape is
:math:`(R_n, H, W)`, where :math:`R_n` is the number of
ground truth objects.
gt_head_labels (iterable of arrays): An iterable of arrays of
shape :math:`(R_l,)`. This is a collection of ground-truth
labels assigned to :obj:`rois` during bounding box localization
stage. The range of value is :math:`(0, n\_class - 1)`.
segm_size (int): Size of the ground truth network output.
Returns:
tuple of four lists:
:obj:`mask_rois`, :obj:`mask_roi_indices`,
:obj:`gt_segms`, and :obj:`gt_mask_labels`.
* **rois**: A list of arrays of shape :math:`(R'_l, 4)`, \
where :math:`R'_l` is the number of RoIs in the :math:`l`-th \
feature map.
* **roi_indices**: A list of arrays of shape :math:`(R'_l,)`.
* **gt_segms**: A list of arrays of shape :math:`(R'_l, M, M). \
:math:`M` is the argument :obj:`segm_size`.
* **gt_mask_labels**: A list of arrays of shape :math:`(R'_l,)` \
indicating the classes of ground truth.
"""
xp = cuda.get_array_module(*rois)
n_level = len(rois)
roi_levels = xp.hstack(
xp.array((l,) * len(rois[l])) for l in range(n_level)).astype(np.int32)
rois = xp.vstack(rois).astype(np.float32)
roi_indices = xp.hstack(roi_indices).astype(np.int32)
gt_head_labels = xp.hstack(gt_head_labels)
index = (gt_head_labels > 0).nonzero()[0]
mask_roi_levels = roi_levels[index]
mask_rois = rois[index]
mask_roi_indices = roi_indices[index]
gt_mask_labels = gt_head_labels[index]
gt_segms = xp.empty(
(len(mask_rois), segm_size, segm_size), dtype=np.float32)
for i in np.unique(cuda.to_cpu(mask_roi_indices)):
gt_mask = gt_masks[i]
gt_bbox = gt_bboxes[i]
index = (mask_roi_indices == i).nonzero()[0]
mask_roi = mask_rois[index]
iou = bbox_iou(mask_roi, gt_bbox)
gt_index = iou.argmax(axis=1)
gt_segms[index] = xp.array(
mask_to_segm(gt_mask, mask_roi, segm_size, gt_index))
flag_masks = [mask_roi_levels == l for l in range(n_level)]
mask_rois = [mask_rois[m] for m in flag_masks]
mask_roi_indices = [mask_roi_indices[m] for m in flag_masks]
gt_segms = [gt_segms[m] for m in flag_masks]
gt_mask_labels = [gt_mask_labels[m] for m in flag_masks]
return mask_rois, mask_roi_indices, gt_segms, gt_mask_labels
def mask_head_loss_post(segms, mask_roi_indices, gt_segms, gt_mask_labels,
batchsize):
"""Loss function for Mask Head (post).
Args:
segms (array): An array whose shape is :math:`(R, n\_class, M, M)`,
where :math:`R` is the total number of RoIs in the given batch.
mask_roi_indices (array): A list of arrays returned by
:func:`mask_head_loss_pre`.
gt_segms (list of arrays): A list of arrays returned by
:func:`mask_head_loss_pre`.
gt_mask_labels (list of arrays): A list of arrays returned by
:func:`mask_head_loss_pre`.
batchsize (int): The size of batch.
Returns:
chainer.Variable:
Mask loss.
"""
xp = cuda.get_array_module(segms.array)
mask_roi_indices = xp.hstack(mask_roi_indices).astype(np.int32)
gt_segms = xp.vstack(gt_segms)
gt_mask_labels = xp.hstack(gt_mask_labels).astype(np.int32)
mask_loss = F.sigmoid_cross_entropy(
segms[np.arange(len(gt_mask_labels)), gt_mask_labels],
gt_segms.astype(np.int32))
return mask_loss
|
import logging
from YesssSMS import YesssSMS
import voluptuous as vol
from homeassistant.components.notify import PLATFORM_SCHEMA, BaseNotificationService
from homeassistant.const import CONF_PASSWORD, CONF_RECIPIENT, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from .const import CONF_PROVIDER
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_RECIPIENT): cv.string,
vol.Optional(CONF_PROVIDER, default="YESSS"): cv.string,
}
)
def get_service(hass, config, discovery_info=None):
"""Get the YesssSMS notification service."""
try:
yesss = YesssSMS(
config[CONF_USERNAME], config[CONF_PASSWORD], provider=config[CONF_PROVIDER]
)
except YesssSMS.UnsupportedProviderError as ex:
_LOGGER.error("Unknown provider: %s", ex)
return None
try:
if not yesss.login_data_valid():
_LOGGER.error(
"Login data is not valid! Please double check your login data at %s",
yesss.get_login_url(),
)
return None
_LOGGER.debug("Login data for '%s' valid", yesss.get_provider())
except YesssSMS.ConnectionError:
_LOGGER.warning(
"Connection Error, could not verify login data for '%s'",
yesss.get_provider(),
)
_LOGGER.debug(
"initialized; library version: %s, with %s",
yesss.version(),
yesss.get_provider(),
)
return YesssSMSNotificationService(yesss, config[CONF_RECIPIENT])
class YesssSMSNotificationService(BaseNotificationService):
"""Implement a notification service for the YesssSMS service."""
def __init__(self, client, recipient):
"""Initialize the service."""
self.yesss = client
self._recipient = recipient
def send_message(self, message="", **kwargs):
"""Send a SMS message via Yesss.at's website."""
if self.yesss.account_is_suspended():
# only retry to login after Home Assistant was restarted with (hopefully)
# new login data.
_LOGGER.error(
"Account is suspended, cannot send SMS. "
"Check your login data and restart Home Assistant"
)
return
try:
self.yesss.send(self._recipient, message)
except self.yesss.NoRecipientError as ex:
_LOGGER.error(
"You need to provide a recipient for SMS notification: %s", ex
)
except self.yesss.EmptyMessageError as ex:
_LOGGER.error("Cannot send empty SMS message: %s", ex)
except self.yesss.SMSSendingError as ex:
_LOGGER.error(ex)
except self.yesss.ConnectionError as ex:
_LOGGER.error(
"Unable to connect to server of provider (%s): %s",
self.yesss.get_provider(),
ex,
)
except self.yesss.AccountSuspendedError as ex:
_LOGGER.error(
"Wrong login credentials!! Verify correct credentials and "
"restart Home Assistant: %s",
ex,
)
except self.yesss.LoginError as ex:
_LOGGER.error("Wrong login credentials: %s", ex)
else:
_LOGGER.info("SMS sent")
|
import warnings
DISPLAY_WIDTH = "display_width"
ARITHMETIC_JOIN = "arithmetic_join"
ENABLE_CFTIMEINDEX = "enable_cftimeindex"
FILE_CACHE_MAXSIZE = "file_cache_maxsize"
WARN_FOR_UNCLOSED_FILES = "warn_for_unclosed_files"
CMAP_SEQUENTIAL = "cmap_sequential"
CMAP_DIVERGENT = "cmap_divergent"
KEEP_ATTRS = "keep_attrs"
DISPLAY_STYLE = "display_style"
OPTIONS = {
DISPLAY_WIDTH: 80,
ARITHMETIC_JOIN: "inner",
ENABLE_CFTIMEINDEX: True,
FILE_CACHE_MAXSIZE: 128,
WARN_FOR_UNCLOSED_FILES: False,
CMAP_SEQUENTIAL: "viridis",
CMAP_DIVERGENT: "RdBu_r",
KEEP_ATTRS: "default",
DISPLAY_STYLE: "html",
}
_JOIN_OPTIONS = frozenset(["inner", "outer", "left", "right", "exact"])
_DISPLAY_OPTIONS = frozenset(["text", "html"])
def _positive_integer(value):
return isinstance(value, int) and value > 0
_VALIDATORS = {
DISPLAY_WIDTH: _positive_integer,
ARITHMETIC_JOIN: _JOIN_OPTIONS.__contains__,
ENABLE_CFTIMEINDEX: lambda value: isinstance(value, bool),
FILE_CACHE_MAXSIZE: _positive_integer,
WARN_FOR_UNCLOSED_FILES: lambda value: isinstance(value, bool),
KEEP_ATTRS: lambda choice: choice in [True, False, "default"],
DISPLAY_STYLE: _DISPLAY_OPTIONS.__contains__,
}
def _set_file_cache_maxsize(value):
from ..backends.file_manager import FILE_CACHE
FILE_CACHE.maxsize = value
def _warn_on_setting_enable_cftimeindex(enable_cftimeindex):
warnings.warn(
"The enable_cftimeindex option is now a no-op "
"and will be removed in a future version of xarray.",
FutureWarning,
)
_SETTERS = {
FILE_CACHE_MAXSIZE: _set_file_cache_maxsize,
ENABLE_CFTIMEINDEX: _warn_on_setting_enable_cftimeindex,
}
def _get_keep_attrs(default):
global_choice = OPTIONS["keep_attrs"]
if global_choice == "default":
return default
elif global_choice in [True, False]:
return global_choice
else:
raise ValueError(
"The global option keep_attrs must be one of True, False or 'default'."
)
class set_options:
"""Set options for xarray in a controlled context.
Currently supported options:
- ``display_width``: maximum display width for ``repr`` on xarray objects.
Default: ``80``.
- ``arithmetic_join``: DataArray/Dataset alignment in binary operations.
Default: ``'inner'``.
- ``file_cache_maxsize``: maximum number of open files to hold in xarray's
global least-recently-usage cached. This should be smaller than your
system's per-process file descriptor limit, e.g., ``ulimit -n`` on Linux.
Default: 128.
- ``warn_for_unclosed_files``: whether or not to issue a warning when
unclosed files are deallocated (default False). This is mostly useful
for debugging.
- ``cmap_sequential``: colormap to use for nondivergent data plots.
Default: ``viridis``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``cmap_divergent``: colormap to use for divergent data plots.
Default: ``RdBu_r``. If string, must be matplotlib built-in colormap.
Can also be a Colormap object (e.g. mpl.cm.magma)
- ``keep_attrs``: rule for whether to keep attributes on xarray
Datasets/dataarrays after operations. Either ``True`` to always keep
attrs, ``False`` to always discard them, or ``'default'`` to use original
logic that attrs should only be kept in unambiguous circumstances.
Default: ``'default'``.
- ``display_style``: display style to use in jupyter for xarray objects.
Default: ``'text'``. Other options are ``'html'``.
You can use ``set_options`` either as a context manager:
>>> ds = xr.Dataset({"x": np.arange(1000)})
>>> with xr.set_options(display_width=40):
... print(ds)
...
<xarray.Dataset>
Dimensions: (x: 1000)
Coordinates:
* x (x) int64 0 1 2 ... 998 999
Data variables:
*empty*
Or to set global options:
>>> xr.set_options(display_width=80) # doctest: +ELLIPSIS
<xarray.core.options.set_options object at 0x...>
"""
def __init__(self, **kwargs):
self.old = {}
for k, v in kwargs.items():
if k not in OPTIONS:
raise ValueError(
"argument name %r is not in the set of valid options %r"
% (k, set(OPTIONS))
)
if k in _VALIDATORS and not _VALIDATORS[k](v):
if k == ARITHMETIC_JOIN:
expected = f"Expected one of {_JOIN_OPTIONS!r}"
elif k == DISPLAY_STYLE:
expected = f"Expected one of {_DISPLAY_OPTIONS!r}"
else:
expected = ""
raise ValueError(
f"option {k!r} given an invalid value: {v!r}. " + expected
)
self.old[k] = OPTIONS[k]
self._apply_update(kwargs)
def _apply_update(self, options_dict):
for k, v in options_dict.items():
if k in _SETTERS:
_SETTERS[k](v)
OPTIONS.update(options_dict)
def __enter__(self):
return
def __exit__(self, type, value, traceback):
self._apply_update(self.old)
|
from django.apps import AppConfig
from django.conf import settings
from django.core.checks import register
from django.db.models import CharField, TextField
from weblate.utils.checks import (
check_cache,
check_celery,
check_data_writable,
check_diskspace,
check_encoding,
check_errors,
check_mail_connection,
check_perms,
check_settings,
check_site,
check_templates,
)
from weblate.utils.errors import init_error_collection
from weblate.utils.version import check_version
from .db import (
MySQLSearchLookup,
MySQLSubstringLookup,
PostgreSQLSearchLookup,
PostgreSQLSubstringLookup,
)
class UtilsConfig(AppConfig):
name = "weblate.utils"
label = "utils"
verbose_name = "Utils"
def ready(self):
super().ready()
register(check_data_writable)
register(check_mail_connection, deploy=True)
register(check_celery, deploy=True)
register(check_cache, deploy=True)
register(check_settings, deploy=True)
register(check_templates, deploy=True)
register(check_site)
register(check_perms, deploy=True)
register(check_errors, deploy=True)
register(check_version, deploy=True)
register(check_encoding)
register(check_diskspace, deploy=True)
init_error_collection()
engine = settings.DATABASES["default"]["ENGINE"]
if engine == "django.db.backends.postgresql":
CharField.register_lookup(PostgreSQLSearchLookup)
TextField.register_lookup(PostgreSQLSearchLookup)
CharField.register_lookup(PostgreSQLSubstringLookup)
TextField.register_lookup(PostgreSQLSubstringLookup)
elif engine == "django.db.backends.mysql":
CharField.register_lookup(MySQLSearchLookup)
TextField.register_lookup(MySQLSearchLookup)
CharField.register_lookup(MySQLSubstringLookup)
TextField.register_lookup(MySQLSubstringLookup)
else:
raise Exception(f"Unsupported database: {engine}")
|
from __future__ import division
import numpy as np
import unittest
from chainer import testing
from chainercv.links.model.fpn.mask_utils import mask_to_segm
from chainercv.links.model.fpn.mask_utils import segm_to_mask
try:
import cv2 # NOQA
_cv2_available = True
except ImportError:
_cv2_available = False
@unittest.skipUnless(_cv2_available, 'cv2 is not installed')
class TestMaskUtils(unittest.TestCase):
def setUp(self):
# When n_inst >= 3, the test fails.
# This is due to the fact that the transformed
# image of `transforms.resize` is misaligned to the corners.
n_inst = 2
self.segm_size = 3
self.size = (36, 48)
self.segm = np.ones(
(n_inst, self.segm_size, self.segm_size), dtype=np.float32)
self.bbox = np.zeros((n_inst, 4), dtype=np.float32)
for i in range(n_inst):
self.bbox[i, 0] = 10 + i
self.bbox[i, 1] = 10 + i
self.bbox[i, 2] = self.bbox[i, 0] + self.segm_size * (1 + i)
self.bbox[i, 3] = self.bbox[i, 1] + self.segm_size * (1 + i)
self.mask = np.zeros((n_inst,) + self.size, dtype=np.bool)
for i, bb in enumerate(self.bbox):
bb = bb.astype(np.int32)
self.mask[i, bb[0]:bb[2], bb[1]:bb[3]] = 1
def test_segm_to_mask(self):
mask = segm_to_mask(self.segm, self.bbox, self.size)
np.testing.assert_equal(mask, self.mask)
def test_mask_to_segm(self):
segm = mask_to_segm(self.mask, self.bbox, self.segm_size)
np.testing.assert_equal(segm, self.segm)
def test_mask_to_segm_index(self):
index = np.arange(len(self.bbox))[::-1]
segm = mask_to_segm(
self.mask, self.bbox[::-1],
self.segm_size, index=index)
segm = segm[::-1]
np.testing.assert_equal(segm, self.segm)
testing.run_module(__name__, __file__)
|
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
import homeassistant.util.color as color_util
from . import DOMAIN as SKYBELL_DOMAIN, SkybellDevice
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the platform for a Skybell device."""
skybell = hass.data.get(SKYBELL_DOMAIN)
sensors = []
for device in skybell.get_devices():
sensors.append(SkybellLight(device))
add_entities(sensors, True)
def _to_skybell_level(level):
"""Convert the given Home Assistant light level (0-255) to Skybell (0-100)."""
return int((level * 100) / 255)
def _to_hass_level(level):
"""Convert the given Skybell (0-100) light level to Home Assistant (0-255)."""
return int((level * 255) / 100)
class SkybellLight(SkybellDevice, LightEntity):
"""A binary sensor implementation for Skybell devices."""
def __init__(self, device):
"""Initialize a light for a Skybell device."""
super().__init__(device)
self._name = self._device.name
@property
def name(self):
"""Return the name of the sensor."""
return self._name
def turn_on(self, **kwargs):
"""Turn on the light."""
if ATTR_HS_COLOR in kwargs:
rgb = color_util.color_hs_to_RGB(*kwargs[ATTR_HS_COLOR])
self._device.led_rgb = rgb
elif ATTR_BRIGHTNESS in kwargs:
self._device.led_intensity = _to_skybell_level(kwargs[ATTR_BRIGHTNESS])
else:
self._device.led_intensity = _to_skybell_level(255)
def turn_off(self, **kwargs):
"""Turn off the light."""
self._device.led_intensity = 0
@property
def is_on(self):
"""Return true if device is on."""
return self._device.led_intensity > 0
@property
def brightness(self):
"""Return the brightness of the light."""
return _to_hass_level(self._device.led_intensity)
@property
def hs_color(self):
"""Return the color of the light."""
return color_util.color_RGB_to_hs(*self._device.led_rgb)
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
|
from copy import deepcopy
from homeassistant.components.deconz.const import (
CONF_ALLOW_DECONZ_GROUPS,
DOMAIN as DECONZ_DOMAIN,
)
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_MAX_MIREDS,
ATTR_MIN_MIREDS,
ATTR_TRANSITION,
DOMAIN as LIGHT_DOMAIN,
EFFECT_COLORLOOP,
FLASH_LONG,
FLASH_SHORT,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_SUPPORTED_FEATURES,
STATE_OFF,
STATE_ON,
)
from homeassistant.setup import async_setup_component
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.async_mock import patch
GROUPS = {
"1": {
"id": "Light group id",
"name": "Light group",
"type": "LightGroup",
"state": {"all_on": False, "any_on": True},
"action": {},
"scenes": [],
"lights": ["1", "2"],
},
"2": {
"id": "Empty group id",
"name": "Empty group",
"type": "LightGroup",
"state": {},
"action": {},
"scenes": [],
"lights": [],
},
}
LIGHTS = {
"1": {
"id": "RGB light id",
"name": "RGB light",
"state": {
"on": True,
"bri": 255,
"colormode": "xy",
"effect": "colorloop",
"xy": (500, 500),
"reachable": True,
},
"type": "Extended color light",
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"ctmax": 454,
"ctmin": 155,
"id": "Tunable white light id",
"name": "Tunable white light",
"state": {"on": True, "colormode": "ct", "ct": 2500, "reachable": True},
"type": "Tunable white light",
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
"3": {
"id": "On off switch id",
"name": "On off switch",
"type": "On/Off plug-in unit",
"state": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
"4": {
"name": "On off light",
"state": {"on": True, "reachable": True},
"type": "On and Off light",
"uniqueid": "00:00:00:00:00:00:00:03-00",
},
"5": {
"ctmax": 1000,
"ctmin": 0,
"id": "Tunable white light with bad maxmin values id",
"name": "Tunable white light with bad maxmin values",
"state": {"on": True, "colormode": "ct", "ct": 2500, "reachable": True},
"type": "Tunable white light",
"uniqueid": "00:00:00:00:00:00:00:04-00",
},
}
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, LIGHT_DOMAIN, {"light": {"platform": DECONZ_DOMAIN}}
)
is True
)
assert DECONZ_DOMAIN not in hass.data
async def test_no_lights_or_groups(hass):
"""Test that no lights or groups entities are created."""
await setup_deconz_integration(hass)
assert len(hass.states.async_all()) == 0
async def test_lights_and_groups(hass):
"""Test that lights or groups entities are created."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["groups"] = deepcopy(GROUPS)
data["lights"] = deepcopy(LIGHTS)
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 6
rgb_light = hass.states.get("light.rgb_light")
assert rgb_light.state == STATE_ON
assert rgb_light.attributes[ATTR_BRIGHTNESS] == 255
assert rgb_light.attributes[ATTR_HS_COLOR] == (224.235, 100.0)
assert rgb_light.attributes["is_deconz_group"] is False
assert rgb_light.attributes[ATTR_SUPPORTED_FEATURES] == 61
tunable_white_light = hass.states.get("light.tunable_white_light")
assert tunable_white_light.state == STATE_ON
assert tunable_white_light.attributes[ATTR_COLOR_TEMP] == 2500
assert tunable_white_light.attributes[ATTR_MAX_MIREDS] == 454
assert tunable_white_light.attributes[ATTR_MIN_MIREDS] == 155
assert tunable_white_light.attributes[ATTR_SUPPORTED_FEATURES] == 2
tunable_white_light_bad_maxmin = hass.states.get(
"light.tunable_white_light_with_bad_maxmin_values"
)
assert tunable_white_light_bad_maxmin.state == STATE_ON
assert tunable_white_light_bad_maxmin.attributes[ATTR_COLOR_TEMP] == 2500
assert tunable_white_light_bad_maxmin.attributes[ATTR_MAX_MIREDS] == 650
assert tunable_white_light_bad_maxmin.attributes[ATTR_MIN_MIREDS] == 140
assert tunable_white_light_bad_maxmin.attributes[ATTR_SUPPORTED_FEATURES] == 2
on_off_light = hass.states.get("light.on_off_light")
assert on_off_light.state == STATE_ON
assert on_off_light.attributes[ATTR_SUPPORTED_FEATURES] == 0
light_group = hass.states.get("light.light_group")
assert light_group.state == STATE_ON
assert light_group.attributes["all_on"] is False
empty_group = hass.states.get("light.empty_group")
assert empty_group is None
state_changed_event = {
"t": "event",
"e": "changed",
"r": "lights",
"id": "1",
"state": {"on": False},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
rgb_light = hass.states.get("light.rgb_light")
assert rgb_light.state == STATE_OFF
# Verify service calls
rgb_light_device = gateway.api.lights["1"]
# Service turn on light with short color loop
with patch.object(rgb_light_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: "light.rgb_light",
ATTR_COLOR_TEMP: 2500,
ATTR_BRIGHTNESS: 200,
ATTR_TRANSITION: 5,
ATTR_FLASH: FLASH_SHORT,
ATTR_EFFECT: EFFECT_COLORLOOP,
},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put",
"/lights/1/state",
json={
"ct": 2500,
"bri": 200,
"transitiontime": 50,
"alert": "select",
"effect": "colorloop",
},
)
# Service turn on light disabling color loop with long flashing
with patch.object(rgb_light_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_ON,
{
ATTR_ENTITY_ID: "light.rgb_light",
ATTR_HS_COLOR: (20, 30),
ATTR_FLASH: FLASH_LONG,
ATTR_EFFECT: "None",
},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put",
"/lights/1/state",
json={"xy": (0.411, 0.351), "alert": "lselect", "effect": "none"},
)
# Service turn on light with short flashing
with patch.object(rgb_light_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{
ATTR_ENTITY_ID: "light.rgb_light",
ATTR_TRANSITION: 5,
ATTR_FLASH: FLASH_SHORT,
},
blocking=True,
)
await hass.async_block_till_done()
assert not set_callback.called
state_changed_event = {
"t": "event",
"e": "changed",
"r": "lights",
"id": "1",
"state": {"on": True},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
# Service turn off light with short flashing
with patch.object(rgb_light_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{
ATTR_ENTITY_ID: "light.rgb_light",
ATTR_TRANSITION: 5,
ATTR_FLASH: FLASH_SHORT,
},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put",
"/lights/1/state",
json={"bri": 0, "transitiontime": 50, "alert": "select"},
)
# Service turn off light with long flashing
with patch.object(rgb_light_device, "_request", return_value=True) as set_callback:
await hass.services.async_call(
LIGHT_DOMAIN,
SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: "light.rgb_light", ATTR_FLASH: FLASH_LONG},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put", "/lights/1/state", json={"alert": "lselect"}
)
await hass.config_entries.async_unload(config_entry.entry_id)
assert len(hass.states.async_all()) == 0
async def test_disable_light_groups(hass):
"""Test disallowing light groups work."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["groups"] = deepcopy(GROUPS)
data["lights"] = deepcopy(LIGHTS)
config_entry = await setup_deconz_integration(
hass,
options={CONF_ALLOW_DECONZ_GROUPS: False},
get_state_response=data,
)
assert len(hass.states.async_all()) == 5
assert hass.states.get("light.rgb_light")
assert hass.states.get("light.tunable_white_light")
assert hass.states.get("light.light_group") is None
assert hass.states.get("light.empty_group") is None
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_DECONZ_GROUPS: True}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 6
assert hass.states.get("light.light_group")
hass.config_entries.async_update_entry(
config_entry, options={CONF_ALLOW_DECONZ_GROUPS: False}
)
await hass.async_block_till_done()
assert len(hass.states.async_all()) == 5
assert hass.states.get("light.light_group") is None
|
import copy
from datetime import timedelta
import ssl
import plexapi
import requests
import homeassistant.components.plex.const as const
from homeassistant.config_entries import (
ENTRY_STATE_LOADED,
ENTRY_STATE_NOT_LOADED,
ENTRY_STATE_SETUP_ERROR,
ENTRY_STATE_SETUP_RETRY,
)
from homeassistant.const import CONF_TOKEN, CONF_URL, CONF_VERIFY_SSL
import homeassistant.util.dt as dt_util
from .const import DEFAULT_DATA, DEFAULT_OPTIONS
from .helpers import trigger_plex_update
from .mock_classes import MockGDM, MockPlexAccount, MockPlexServer
from tests.async_mock import patch
from tests.common import MockConfigEntry, async_fire_time_changed
async def test_set_config_entry_unique_id(hass, entry, mock_plex_server):
"""Test updating missing unique_id from config entry."""
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
assert (
hass.config_entries.async_entries(const.DOMAIN)[0].unique_id
== mock_plex_server.machineIdentifier
)
async def test_setup_config_entry_with_error(hass, entry):
"""Test setup component from config entry with errors."""
with patch(
"homeassistant.components.plex.PlexServer.connect",
side_effect=requests.exceptions.ConnectionError,
):
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id) is False
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_SETUP_RETRY
with patch(
"homeassistant.components.plex.PlexServer.connect",
side_effect=plexapi.exceptions.BadRequest,
):
next_update = dt_util.utcnow() + timedelta(seconds=30)
async_fire_time_changed(hass, next_update)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_SETUP_ERROR
async def test_setup_with_insecure_config_entry(hass, entry, setup_plex_server):
"""Test setup component with config."""
INSECURE_DATA = copy.deepcopy(DEFAULT_DATA)
INSECURE_DATA[const.PLEX_SERVER_CONFIG][CONF_VERIFY_SSL] = False
entry.data = INSECURE_DATA
await setup_plex_server(config_entry=entry)
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
async def test_unload_config_entry(hass, entry, mock_plex_server):
"""Test unloading a config entry."""
config_entries = hass.config_entries.async_entries(const.DOMAIN)
assert len(config_entries) == 1
assert entry is config_entries[0]
assert entry.state == ENTRY_STATE_LOADED
server_id = mock_plex_server.machineIdentifier
loaded_server = hass.data[const.DOMAIN][const.SERVERS][server_id]
assert loaded_server.plex_server == mock_plex_server
websocket = hass.data[const.DOMAIN][const.WEBSOCKETS][server_id]
await hass.config_entries.async_unload(entry.entry_id)
assert websocket.close.called
assert entry.state == ENTRY_STATE_NOT_LOADED
async def test_setup_with_photo_session(hass, entry, mock_websocket, setup_plex_server):
"""Test setup component with config."""
mock_plex_server = await setup_plex_server(config_entry=entry, session_type="photo")
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert entry.state == ENTRY_STATE_LOADED
trigger_plex_update(mock_websocket)
await hass.async_block_till_done()
media_player = hass.states.get("media_player.plex_product_title")
assert media_player.state == "idle"
sensor = hass.states.get("sensor.plex_plex_server_1")
assert sensor.state == str(len(mock_plex_server.accounts))
async def test_setup_when_certificate_changed(hass, entry):
"""Test setup component when the Plex certificate has changed."""
old_domain = "1-2-3-4.1234567890abcdef1234567890abcdef.plex.direct"
old_url = f"https://{old_domain}:32400"
OLD_HOSTNAME_DATA = copy.deepcopy(DEFAULT_DATA)
OLD_HOSTNAME_DATA[const.PLEX_SERVER_CONFIG][CONF_URL] = old_url
class WrongCertHostnameException(requests.exceptions.SSLError):
"""Mock the exception showing a mismatched hostname."""
def __init__(self):
self.__context__ = ssl.SSLCertVerificationError(
f"hostname '{old_domain}' doesn't match"
)
old_entry = MockConfigEntry(
domain=const.DOMAIN,
data=OLD_HOSTNAME_DATA,
options=DEFAULT_OPTIONS,
unique_id=DEFAULT_DATA["server_id"],
)
# Test with account failure
with patch(
"plexapi.server.PlexServer", side_effect=WrongCertHostnameException
), patch(
"plexapi.myplex.MyPlexAccount", side_effect=plexapi.exceptions.Unauthorized
):
old_entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(old_entry.entry_id) is False
await hass.async_block_till_done()
assert old_entry.state == ENTRY_STATE_SETUP_ERROR
await hass.config_entries.async_unload(old_entry.entry_id)
# Test with no servers found
with patch(
"plexapi.server.PlexServer", side_effect=WrongCertHostnameException
), patch("plexapi.myplex.MyPlexAccount", return_value=MockPlexAccount(servers=0)):
assert await hass.config_entries.async_setup(old_entry.entry_id) is False
await hass.async_block_till_done()
assert old_entry.state == ENTRY_STATE_SETUP_ERROR
await hass.config_entries.async_unload(old_entry.entry_id)
# Test with success
with patch(
"plexapi.server.PlexServer", side_effect=WrongCertHostnameException
), patch("plexapi.myplex.MyPlexAccount", return_value=MockPlexAccount()):
assert await hass.config_entries.async_setup(old_entry.entry_id)
await hass.async_block_till_done()
assert len(hass.config_entries.async_entries(const.DOMAIN)) == 1
assert old_entry.state == ENTRY_STATE_LOADED
assert (
old_entry.data[const.PLEX_SERVER_CONFIG][CONF_URL]
== entry.data[const.PLEX_SERVER_CONFIG][CONF_URL]
)
async def test_tokenless_server(hass, entry, mock_websocket, setup_plex_server):
"""Test setup with a server with token auth disabled."""
TOKENLESS_DATA = copy.deepcopy(DEFAULT_DATA)
TOKENLESS_DATA[const.PLEX_SERVER_CONFIG].pop(CONF_TOKEN, None)
entry.data = TOKENLESS_DATA
await setup_plex_server(config_entry=entry)
assert entry.state == ENTRY_STATE_LOADED
async def test_bad_token_with_tokenless_server(hass, entry):
"""Test setup with a bad token and a server with token auth disabled."""
with patch("plexapi.server.PlexServer", return_value=MockPlexServer()), patch(
"plexapi.myplex.MyPlexAccount", side_effect=plexapi.exceptions.Unauthorized
), patch(
"homeassistant.components.plex.GDM", return_value=MockGDM(disabled=True)
), patch(
"homeassistant.components.plex.PlexWebsocket", autospec=True
) as mock_websocket:
entry.add_to_hass(hass)
assert await hass.config_entries.async_setup(entry.entry_id)
await hass.async_block_till_done()
assert entry.state == ENTRY_STATE_LOADED
# Ensure updates that rely on account return nothing
trigger_plex_update(mock_websocket)
await hass.async_block_till_done()
|
import datetime
import io
import json
import os
import shutil
import textwrap
import unidecode
from urllib.parse import urlsplit, urlunsplit
import dateutil.tz
import dateutil.zoneinfo
from mako.template import Template
from pkg_resources import resource_filename
import nikola
from nikola.nikola import DEFAULT_INDEX_READ_MORE_LINK, DEFAULT_FEED_READ_MORE_LINK, LEGAL_VALUES
from nikola.plugin_categories import Command
from nikola.utils import ask, ask_yesno, get_logger, makedirs, load_messages
from nikola.packages.tzlocal import get_localzone
LOGGER = get_logger('init')
SAMPLE_CONF = {
'BLOG_AUTHOR': "Your Name",
'BLOG_TITLE': "Demo Site",
'SITE_URL': "https://example.com/",
'BLOG_EMAIL': "[email protected]",
'BLOG_DESCRIPTION': "This is a demo site for Nikola.",
'PRETTY_URLS': True,
'STRIP_INDEXES': True,
'DEFAULT_LANG': "en",
'TRANSLATIONS': """{
DEFAULT_LANG: "",
# Example for another language:
# "es": "./es",
}""",
'THEME': LEGAL_VALUES['DEFAULT_THEME'],
'TIMEZONE': 'UTC',
'COMMENT_SYSTEM': 'disqus',
'COMMENT_SYSTEM_ID': 'nikolademo',
'CATEGORY_ALLOW_HIERARCHIES': False,
'CATEGORY_OUTPUT_FLAT_HIERARCHY': False,
'INDEX_READ_MORE_LINK': DEFAULT_INDEX_READ_MORE_LINK,
'FEED_READ_MORE_LINK': DEFAULT_FEED_READ_MORE_LINK,
'POSTS': """(
("posts/*.rst", "posts", "post.tmpl"),
("posts/*.md", "posts", "post.tmpl"),
("posts/*.txt", "posts", "post.tmpl"),
("posts/*.html", "posts", "post.tmpl"),
)""",
'PAGES': """(
("pages/*.rst", "pages", "page.tmpl"),
("pages/*.md", "pages", "page.tmpl"),
("pages/*.txt", "pages", "page.tmpl"),
("pages/*.html", "pages", "page.tmpl"),
)""",
'COMPILERS': """{
"rest": ['.rst', '.txt'],
"markdown": ['.md', '.mdown', '.markdown'],
"textile": ['.textile'],
"txt2tags": ['.t2t'],
"bbcode": ['.bb'],
"wiki": ['.wiki'],
"ipynb": ['.ipynb'],
"html": ['.html', '.htm'],
# PHP files are rendered the usual way (i.e. with the full templates).
# The resulting files have .php extensions, making it possible to run
# them without reconfiguring your server to recognize them.
"php": ['.php'],
# Pandoc detects the input from the source filename
# but is disabled by default as it would conflict
# with many of the others.
# "pandoc": ['.rst', '.md', '.txt'],
}""",
'NAVIGATION_LINKS': """{
DEFAULT_LANG: (
("/archive.html", "Archives"),
("/categories/index.html", "Tags"),
("/rss.xml", "RSS feed"),
),
}""",
'REDIRECTIONS': [],
'_METADATA_MAPPING_FORMATS': ', '.join(LEGAL_VALUES['METADATA_MAPPING'])
}
# Generate a list of supported languages here.
# Ugly code follows.
_suplang = {}
_sllength = 0
for k, v in LEGAL_VALUES['TRANSLATIONS'].items():
if not isinstance(k, tuple):
main = k
_suplang[main] = v
else:
main = k[0]
k = k[1:]
bad = []
good = []
for i in k:
if i.startswith('!'):
bad.append(i[1:])
else:
good.append(i)
different = ''
if good or bad:
different += ' ['
if good:
different += 'ALTERNATIVELY ' + ', '.join(good)
if bad:
if good:
different += '; '
different += 'NOT ' + ', '.join(bad)
if good or bad:
different += ']'
_suplang[main] = v + different
if len(main) > _sllength:
_sllength = len(main)
_sllength = str(_sllength)
suplang = (u'# {0:<' + _sllength + u'} {1}\n').format('en', 'English')
del _suplang['en']
for k, v in sorted(_suplang.items()):
suplang += (u'# {0:<' + _sllength + u'} {1}\n').format(k, v)
SAMPLE_CONF['_SUPPORTED_LANGUAGES'] = suplang.strip()
# Generate a list of supported comment systems here.
SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'] = '\n'.join(textwrap.wrap(
u', '.join(LEGAL_VALUES['COMMENT_SYSTEM']),
initial_indent=u'# ', subsequent_indent=u'# ', width=79))
def format_default_translations_config(additional_languages):
"""Adapt TRANSLATIONS setting for all additional languages."""
if not additional_languages:
return SAMPLE_CONF["TRANSLATIONS"]
lang_paths = [' DEFAULT_LANG: "",']
for lang in sorted(additional_languages):
lang_paths.append(' "{0}": "./{0}",'.format(lang))
return "{{\n{0}\n}}".format("\n".join(lang_paths))
def get_default_translations_dict(default_lang, additional_languages):
"""Generate a TRANSLATIONS dict matching the config from 'format_default_translations_config'."""
tr = {default_lang: ''}
for l in additional_languages:
tr[l] = './' + l
return tr
def format_navigation_links(additional_languages, default_lang, messages, strip_indexes=False):
"""Return the string to configure NAVIGATION_LINKS."""
f = u"""\
{0}: (
("{1}/archive.html", "{2[Archive]}"),
("{1}/categories/{3}", "{2[Tags]}"),
("{1}/rss.xml", "{2[RSS feed]}"),
),"""
pairs = []
def get_msg(lang):
"""Generate a smaller messages dict with fallback."""
fmsg = {}
for i in (u'Archive', u'Tags', u'RSS feed'):
if messages[lang][i]:
fmsg[i] = messages[lang][i]
else:
fmsg[i] = i
return fmsg
if strip_indexes:
index_html = ''
else:
index_html = 'index.html'
# handle the default language
pairs.append(f.format('DEFAULT_LANG', '', get_msg(default_lang), index_html))
for l in additional_languages:
pairs.append(f.format(json.dumps(l, ensure_ascii=False), '/' + l, get_msg(l), index_html))
return u'{{\n{0}\n}}'.format('\n\n'.join(pairs))
# In order to ensure proper escaping, all variables but the pre-formatted ones
# are handled by json.dumps().
def prepare_config(config):
"""Parse sample config with JSON."""
p = config.copy()
p.update({k: json.dumps(v, ensure_ascii=False) for k, v in p.items()
if k not in ('POSTS', 'PAGES', 'COMPILERS', 'TRANSLATIONS', 'NAVIGATION_LINKS', '_SUPPORTED_LANGUAGES', '_SUPPORTED_COMMENT_SYSTEMS', 'INDEX_READ_MORE_LINK', 'FEED_READ_MORE_LINK', '_METADATA_MAPPING_FORMATS')})
# READ_MORE_LINKs require some special treatment.
p['INDEX_READ_MORE_LINK'] = "'" + p['INDEX_READ_MORE_LINK'].replace("'", "\\'") + "'"
p['FEED_READ_MORE_LINK'] = "'" + p['FEED_READ_MORE_LINK'].replace("'", "\\'") + "'"
# fix booleans and None
p.update({k: str(v) for k, v in config.items() if isinstance(v, bool) or v is None})
return p
def test_destination(destination, demo=False):
"""Check if the destination already exists, which can break demo site creation."""
# Issue #2214
if demo and os.path.exists(destination):
LOGGER.warning("The directory {0} already exists, and a new demo site cannot be initialized in an existing directory.".format(destination))
LOGGER.warning("Please remove the directory and try again, or use another directory.")
LOGGER.info("Hint: If you want to initialize a git repository in this directory, run `git init` in the directory after creating a Nikola site.")
return False
else:
return True
class CommandInit(Command):
"""Create a new site."""
name = "init"
doc_usage = "[--demo] [--quiet] folder"
needs_config = False
doc_purpose = "create a Nikola site in the specified folder"
cmd_options = [
{
'name': 'quiet',
'long': 'quiet',
'short': 'q',
'default': False,
'type': bool,
'help': "Do not ask questions about config.",
},
{
'name': 'demo',
'long': 'demo',
'short': 'd',
'default': False,
'type': bool,
'help': "Create a site filled with example data.",
}
]
@classmethod
def copy_sample_site(cls, target):
"""Copy sample site data to target directory."""
src = resource_filename('nikola', os.path.join('data', 'samplesite'))
shutil.copytree(src, target)
@staticmethod
def create_configuration(target):
"""Create configuration file."""
template_path = resource_filename('nikola', 'conf.py.in')
conf_template = Template(filename=template_path)
conf_path = os.path.join(target, 'conf.py')
with io.open(conf_path, 'w+', encoding='utf8') as fd:
fd.write(conf_template.render(**prepare_config(SAMPLE_CONF)))
@staticmethod
def create_configuration_to_string():
"""Return configuration file as a string."""
template_path = resource_filename('nikola', 'conf.py.in')
conf_template = Template(filename=template_path)
return conf_template.render(**prepare_config(SAMPLE_CONF))
@classmethod
def create_empty_site(cls, target):
"""Create an empty site with directories only."""
for folder in ('files', 'galleries', 'images', 'listings', 'posts', 'pages'):
makedirs(os.path.join(target, folder))
@staticmethod
def ask_questions(target, demo=False):
"""Ask some questions about Nikola."""
def urlhandler(default, toconf):
answer = ask('Site URL', 'https://example.com/')
try:
answer = answer.decode('utf-8')
except (AttributeError, UnicodeDecodeError):
pass
if not answer.startswith(u'http'):
print(" ERROR: You must specify a protocol (http or https).")
urlhandler(default, toconf)
return
if not answer.endswith('/'):
print(" The URL does not end in '/' -- adding it.")
answer += '/'
dst_url = urlsplit(answer)
try:
dst_url.netloc.encode('ascii')
except (UnicodeEncodeError, UnicodeDecodeError):
# The IDN contains characters beyond ASCII. We must convert it
# to Punycode. (Issue #1644)
nl = dst_url.netloc.encode('idna')
answer = urlunsplit((dst_url.scheme,
nl,
dst_url.path,
dst_url.query,
dst_url.fragment))
print(" Converting to Punycode:", answer)
SAMPLE_CONF['SITE_URL'] = answer
def prettyhandler(default, toconf):
SAMPLE_CONF['PRETTY_URLS'] = ask_yesno('Enable pretty URLs (/page/ instead of /page.html) that don\'t need web server configuration?', default=True)
def lhandler(default, toconf, show_header=True):
if show_header:
print("We will now ask you to provide the list of languages you want to use.")
print("Please list all the desired languages, comma-separated, using ISO 639-1 codes. The first language will be used as the default.")
print("Type '?' (a question mark, sans quotes) to list available languages.")
answer = ask('Language(s) to use', 'en')
while answer.strip() == '?':
print('\n# Available languages:')
try:
print(SAMPLE_CONF['_SUPPORTED_LANGUAGES'] + '\n')
except UnicodeEncodeError:
# avoid Unicode characters in supported language names
print(unidecode.unidecode(SAMPLE_CONF['_SUPPORTED_LANGUAGES']) + '\n')
answer = ask('Language(s) to use', 'en')
langs = [i.strip().lower().replace('-', '_') for i in answer.split(',')]
for partial, full in LEGAL_VALUES['_TRANSLATIONS_WITH_COUNTRY_SPECIFIERS'].items():
if partial in langs:
langs[langs.index(partial)] = full
print("NOTICE: Assuming '{0}' instead of '{1}'.".format(full, partial))
default = langs.pop(0)
SAMPLE_CONF['DEFAULT_LANG'] = default
# format_default_translations_config() is intelligent enough to
# return the current value if there are no additional languages.
SAMPLE_CONF['TRANSLATIONS'] = format_default_translations_config(langs)
# Get messages for navigation_links. In order to do this, we need
# to generate a throwaway TRANSLATIONS dict.
tr = get_default_translations_dict(default, langs)
# Assuming that base contains all the locales, and that base does
# not inherit from anywhere.
try:
messages = load_messages(['base'], tr, default, themes_dirs=['themes'])
SAMPLE_CONF['NAVIGATION_LINKS'] = format_navigation_links(langs, default, messages, SAMPLE_CONF['STRIP_INDEXES'])
except nikola.utils.LanguageNotFoundError as e:
print(" ERROR: the language '{0}' is not supported.".format(e.lang))
print(" Are you sure you spelled the name correctly? Names are case-sensitive and need to be reproduced as-is (complete with the country specifier, if any).")
print("\nType '?' (a question mark, sans quotes) to list available languages.")
lhandler(default, toconf, show_header=False)
def tzhandler(default, toconf):
print("\nPlease choose the correct time zone for your blog. Nikola uses the tz database.")
print("You can find your time zone here:")
print("https://en.wikipedia.org/wiki/List_of_tz_database_time_zones")
print("")
answered = False
while not answered:
try:
lz = get_localzone()
except Exception:
lz = None
answer = ask('Time zone', lz if lz else "UTC")
tz = dateutil.tz.gettz(answer)
if tz is None:
print(" WARNING: Time zone not found. Searching list of time zones for a match.")
all_zones = dateutil.zoneinfo.get_zonefile_instance().zones
matching_zones = [zone for zone in all_zones if answer.lower() in zone.lower()]
if len(matching_zones) == 1:
tz = dateutil.tz.gettz(matching_zones[0])
answer = matching_zones[0]
print(" Picking '{0}'.".format(answer))
elif len(matching_zones) > 1:
print(" The following time zones match your query:")
print(' ' + '\n '.join(matching_zones))
continue
if tz is not None:
time = datetime.datetime.now(tz).strftime('%H:%M:%S')
print(" Current time in {0}: {1}".format(answer, time))
answered = ask_yesno("Use this time zone?", True)
else:
print(" ERROR: No matches found. Please try again.")
SAMPLE_CONF['TIMEZONE'] = answer
def chandler(default, toconf):
print("You can configure comments now. Type '?' (a question mark, sans quotes) to list available comment systems. If you do not want any comments, just leave the field blank.")
answer = ask('Comment system', '')
while answer.strip() == '?':
print('\n# Available comment systems:')
print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])
print('')
answer = ask('Comment system', '')
while answer and answer not in LEGAL_VALUES['COMMENT_SYSTEM']:
if answer != '?':
print(' ERROR: Nikola does not know this comment system.')
print('\n# Available comment systems:')
print(SAMPLE_CONF['_SUPPORTED_COMMENT_SYSTEMS'])
print('')
answer = ask('Comment system', '')
SAMPLE_CONF['COMMENT_SYSTEM'] = answer
SAMPLE_CONF['COMMENT_SYSTEM_ID'] = ''
if answer:
print("You need to provide the site identifier for your comment system. Consult the Nikola manual for details on what the value should be. (you can leave it empty and come back later)")
answer = ask('Comment system site identifier', '')
SAMPLE_CONF['COMMENT_SYSTEM_ID'] = answer
STORAGE = {'target': target}
questions = [
('Questions about the site', None, None, None),
# query, default, toconf, destination
('Destination', None, False, '!target'),
('Site title', 'My Nikola Site', True, 'BLOG_TITLE'),
('Site author', 'Nikola Tesla', True, 'BLOG_AUTHOR'),
('Site author\'s e-mail', '[email protected]', True, 'BLOG_EMAIL'),
('Site description', 'This is a demo site for Nikola.', True, 'BLOG_DESCRIPTION'),
(urlhandler, None, True, True),
(prettyhandler, None, True, True),
('Questions about languages and locales', None, None, None),
(lhandler, None, True, True),
(tzhandler, None, True, True),
('Questions about comments', None, None, None),
(chandler, None, True, True),
]
print("Creating Nikola Site")
print("====================\n")
print("This is Nikola v{0}. We will now ask you a few easy questions about your new site.".format(nikola.__version__))
print("If you do not want to answer and want to go with the defaults instead, simply restart with the `-q` parameter.")
for query, default, toconf, destination in questions:
if target and destination == '!target' and test_destination(target, demo):
# Skip the destination question if we know it already
pass
else:
if default is toconf is destination is None:
print('--- {0} ---'.format(query))
elif destination is True:
query(default, toconf)
else:
answer = ask(query, default)
try:
answer = answer.decode('utf-8')
except (AttributeError, UnicodeDecodeError):
pass
if toconf:
SAMPLE_CONF[destination] = answer
if destination == '!target':
while not answer or not test_destination(answer, demo):
if not answer:
print(' ERROR: you need to specify a target directory.\n')
answer = ask(query, default)
STORAGE['target'] = answer
print("\nThat's it, Nikola is now configured. Make sure to edit conf.py to your liking.")
print("If you are looking for themes and addons, check out https://themes.getnikola.com/ and https://plugins.getnikola.com/.")
print("Have fun!")
return STORAGE
def _execute(self, options={}, args=None):
"""Create a new site."""
try:
target = args[0]
except IndexError:
target = None
if not options.get('quiet'):
st = self.ask_questions(target=target, demo=options.get('demo'))
try:
if not target:
target = st['target']
except KeyError:
pass
if not target:
print("Usage: nikola init [--demo] [--quiet] folder")
print("""
Options:
-q, --quiet Do not ask questions about config.
-d, --demo Create a site filled with example data.""")
return 1
if not options.get('demo'):
self.create_empty_site(target)
LOGGER.info('Created empty site at {0}.'.format(target))
else:
if not test_destination(target, True):
return 2
self.copy_sample_site(target)
LOGGER.info("A new site with example data has been created at "
"{0}.".format(target))
LOGGER.info("See README.txt in that folder for more information.")
self.create_configuration(target)
|
import numpy as np
import jax
import pytest
from tensornetwork.backends.jax import jitted_functions
jax.config.update('jax_enable_x64', True)
jax_dtypes = [np.float32, np.float64, np.complex64, np.complex128]
precision = jax.lax.Precision.HIGHEST
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("ncv", [10, 20, 30])
def test_arnoldi_factorization(dtype, ncv):
np.random.seed(10)
D = 20
mat = np.random.rand(D, D).astype(dtype)
x = np.random.rand(D).astype(dtype)
@jax.tree_util.Partial
@jax.jit
def matvec(vector, matrix):
return matrix @ vector
arnoldi = jitted_functions._generate_arnoldi_factorization(jax)
Vm = jax.numpy.zeros((ncv, D), dtype=dtype)
H = jax.numpy.zeros((ncv, ncv), dtype=dtype)
start = 0
tol = 1E-5
Vm, Hm, residual, norm, _, _ = arnoldi(matvec, [mat], x, Vm, H, start, ncv,
tol, precision)
fm = residual * norm
em = np.zeros((1, Vm.shape[0]))
em[0, -1] = 1
#test arnoldi relation
np.testing.assert_almost_equal(mat @ Vm.T - Vm.T @ Hm - fm[:, None] * em,
np.zeros((D, ncv)).astype(dtype))
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_LR_sort(dtype):
np.random.seed(10)
x = np.random.rand(20).astype(dtype)
p = 10
LR_sort = jitted_functions._LR_sort(jax)
actual_x, actual_inds = LR_sort(p, jax.numpy.array(np.real(x)))
exp_inds = np.argsort(x)[::-1]
exp_x = x[exp_inds][-p:]
np.testing.assert_allclose(exp_x, actual_x)
np.testing.assert_allclose(exp_inds, actual_inds)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_SA_sort(dtype):
np.random.seed(10)
x = np.random.rand(20).astype(dtype)
p = 10
SA_sort = jitted_functions._SA_sort(jax)
actual_x, actual_inds = SA_sort(p, jax.numpy.array(np.real(x)))
exp_inds = np.argsort(x)
exp_x = x[exp_inds][-p:]
np.testing.assert_allclose(exp_x, actual_x)
np.testing.assert_allclose(exp_inds, actual_inds)
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
def test_shifted_QR(dtype):
np.random.seed(10)
D = 20
ncv = 10
numeig = 4
mat = np.random.rand(D, D).astype(dtype)
Ham = mat + mat.T.conj()
x = np.random.rand(D).astype(dtype)
@jax.tree_util.Partial
@jax.jit
def matvec(vector, matrix):
return matrix @ vector
lanczos = jitted_functions._generate_lanczos_factorization(jax)
shifted_QR = jitted_functions._shifted_QR(jax)
SA_sort = jitted_functions._SA_sort(jax)
Vm = jax.numpy.zeros((ncv, D), dtype=dtype)
alphas = jax.numpy.zeros(ncv, dtype=dtype)
betas = jax.numpy.zeros(ncv - 1, dtype=dtype)
start = 0
tol = 1E-5
Vm, alphas, betas, residual, norm, _, _ = lanczos(matvec, [Ham], x, Vm,
alphas, betas, start, ncv,
tol, precision)
Hm = jax.numpy.diag(alphas) + jax.numpy.diag(betas, -1) + jax.numpy.diag(
betas.conj(), 1)
fm = residual * norm
em = np.zeros((1, ncv))
em[0, -1] = 1
#test arnoldi relation
np.testing.assert_almost_equal(Ham @ Vm.T - Vm.T @ Hm - fm[:, None] * em,
np.zeros((D, ncv)).astype(dtype))
evals, _ = jax.numpy.linalg.eigh(Hm)
shifts, _ = SA_sort(numeig, evals)
Vk, Hk, fk = shifted_QR(Vm, Hm, fm, shifts, numeig)
Vk = Vk.at[numeig:, :].set(0)
Hk = Hk.at[numeig:, :].set(0)
Hk = Hk.at[:, numeig:].set(0)
ek = np.zeros((1, ncv))
ek[0, numeig - 1] = 1.0
np.testing.assert_almost_equal(Ham @ Vk.T - Vk.T @ Hk - fk[:, None] * ek,
np.zeros((D, ncv)).astype(dtype))
@pytest.mark.parametrize("dtype", [np.float64, np.complex128])
@pytest.mark.parametrize("ncv", [10, 20, 30])
def test_lanczos_factorization(dtype, ncv):
np.random.seed(10)
D = 20
mat = np.random.rand(D, D).astype(dtype)
Ham = mat + mat.T.conj()
x = np.random.rand(D).astype(dtype)
@jax.tree_util.Partial
@jax.jit
def matvec(vector, matrix):
return matrix @ vector
lanczos = jitted_functions._generate_lanczos_factorization(jax)
Vm = jax.numpy.zeros((ncv, D), dtype=dtype)
alphas = jax.numpy.zeros(ncv, dtype=dtype)
betas = jax.numpy.zeros(ncv-1, dtype=dtype)
start = 0
tol = 1E-5
Vm, alphas, betas, residual, norm, _, _ = lanczos(matvec, [Ham], x, Vm,
alphas, betas, start, ncv,
tol, precision)
Hm = jax.numpy.diag(alphas) + jax.numpy.diag(betas, -1) + jax.numpy.diag(
betas.conj(), 1)
fm = residual * norm
em = np.zeros((1, Vm.shape[0]))
em[0, -1] = 1
#test arnoldi relation
np.testing.assert_almost_equal(Ham @ Vm.T - Vm.T @ Hm - fm[:, None] * em,
np.zeros((D, ncv)).astype(dtype))
@pytest.mark.parametrize("dtype", jax_dtypes)
def test_gmres_on_small_known_problem(dtype):
"""
GMRES produces the correct result on an analytically solved
linear system.
"""
dummy = jax.numpy.zeros(1, dtype=dtype)
dtype = dummy.dtype
gmres = jitted_functions.gmres_wrapper(jax)
A = jax.numpy.array(([[1, 1], [3, -4]]), dtype=dtype)
b = jax.numpy.array([3, 2], dtype=dtype)
x0 = jax.numpy.ones(2, dtype=dtype)
n_kry = 2
maxiter = 1
@jax.tree_util.Partial
def A_mv(x):
return A @ x
tol = A.size*jax.numpy.finfo(dtype).eps
x, _, _, _ = gmres.gmres_m(A_mv, [], b, x0, tol, tol, n_kry, maxiter,
precision)
solution = jax.numpy.array([2., 1.], dtype=dtype)
np.testing.assert_allclose(x, solution, atol=tol)
@pytest.mark.parametrize("dtype", jax_dtypes)
def test_gmres_krylov(dtype):
"""
gmres_krylov correctly builds the QR-decomposed Arnoldi decomposition.
This function assumes that gmres["kth_arnoldi_step (which is
independently tested) is correct.
"""
dummy = jax.numpy.zeros(1, dtype=dtype)
dtype = dummy.dtype
gmres = jitted_functions.gmres_wrapper(jax)
n = 2
n_kry = n
np.random.seed(10)
@jax.tree_util.Partial
def A_mv(x):
return A @ x
A = jax.numpy.array(np.random.rand(n, n).astype(dtype))
tol = A.size*jax.numpy.finfo(dtype).eps
x0 = jax.numpy.array(np.random.rand(n).astype(dtype))
b = jax.numpy.array(np.random.rand(n), dtype=dtype)
r, beta = gmres.gmres_residual(A_mv, [], b, x0)
_, V, R, _ = gmres.gmres_krylov(A_mv, [], n_kry, x0, r, beta,
tol, jax.numpy.linalg.norm(b),
precision)
phases = jax.numpy.sign(jax.numpy.diagonal(R[:-1, :]))
R = phases.conj()[:, None] * R[:-1, :]
Vtest = np.zeros((n, n_kry + 1), dtype=x0.dtype)
Vtest[:, 0] = r/beta
Vtest = jax.numpy.array(Vtest)
Htest = jax.numpy.zeros((n_kry + 1, n_kry), dtype=x0.dtype)
for k in range(n_kry):
Vtest, Htest = gmres.kth_arnoldi_step(k, A_mv, [], Vtest, Htest, tol,
precision)
_, Rtest = jax.numpy.linalg.qr(Htest)
phases = jax.numpy.sign(jax.numpy.diagonal(Rtest))
Rtest = phases.conj()[:, None] * Rtest
np.testing.assert_allclose(V, Vtest, atol=tol)
np.testing.assert_allclose(R, Rtest, atol=tol)
@pytest.mark.parametrize("dtype", jax_dtypes)
def test_gmres_arnoldi_step(dtype):
"""
The Arnoldi decomposition within GMRES is correct.
"""
gmres = jitted_functions.gmres_wrapper(jax)
dummy = jax.numpy.zeros(1, dtype=dtype)
dtype = dummy.dtype
n = 4
n_kry = n
np.random.seed(10)
A = jax.numpy.array(np.random.rand(n, n).astype(dtype))
x0 = jax.numpy.array(np.random.rand(n).astype(dtype))
Q = np.zeros((n, n_kry + 1), dtype=x0.dtype)
Q[:, 0] = x0/jax.numpy.linalg.norm(x0)
Q = jax.numpy.array(Q)
H = jax.numpy.zeros((n_kry + 1, n_kry), dtype=x0.dtype)
tol = A.size*jax.numpy.finfo(dtype).eps
@jax.tree_util.Partial
def A_mv(x):
return A @ x
for k in range(n_kry):
Q, H = gmres.kth_arnoldi_step(k, A_mv, [], Q, H, tol, precision)
QAQ = Q[:, :n_kry].conj().T @ A @ Q[:, :n_kry]
np.testing.assert_allclose(H[:n_kry, :], QAQ, atol=tol)
@pytest.mark.parametrize("dtype", jax_dtypes)
def test_givens(dtype):
"""
gmres["givens_rotation produces the correct rotation factors.
"""
gmres = jitted_functions.gmres_wrapper(jax)
np.random.seed(10)
v = jax.numpy.array(np.random.rand(2).astype(dtype))
cs, sn = gmres.givens_rotation(*v)
rot = np.zeros((2, 2), dtype=dtype)
rot[0, 0] = cs
rot[1, 1] = cs
rot[0, 1] = -sn
rot[1, 0] = sn
rot = jax.numpy.array(rot)
result = rot @ v
tol = 4*jax.numpy.finfo(dtype).eps
np.testing.assert_allclose(result[-1], 0., atol=tol)
|
import pytest
from helpers import utils
@pytest.mark.parametrize('val1, val2', [
({'a': 1}, {'a': 1}),
({'a': 1, 'b': 2}, {'a': 1}),
({'a': [1, 2, 3]}, {'a': [1]}),
({'a': [1, 2, 3]}, {'a': [..., 2]}),
(1.0, 1.00000001),
("foobarbaz", "foo*baz"),
])
def test_partial_compare_equal(val1, val2):
assert utils.partial_compare(val1, val2)
@pytest.mark.parametrize('val1, val2, error', [
({'a': 1}, {'a': 2}, "1 != 2"),
({'a': 1}, {'b': 1}, "Key 'b' is in second dict but not in first!"),
({'a': 1, 'b': 2}, {'a': 2}, "1 != 2"),
({'a': [1]}, {'a': [1, 2, 3]}, "Second list is longer than first list"),
({'a': [1]}, {'a': [2, 3, 4]}, "Second list is longer than first list"),
([1], {1: 2}, "Different types (list, dict) -> False"),
({1: 1}, {1: [1]}, "Different types (int, list) -> False"),
({'a': [1, 2, 3]}, {'a': [..., 3]}, "2 != 3"),
("foo*baz", "foobarbaz", "'foo*baz' != 'foobarbaz' (pattern matching)"),
(23.42, 13.37, "23.42 != 13.37 (float comparison)"),
])
def test_partial_compare_not_equal(val1, val2, error):
outcome = utils.partial_compare(val1, val2)
assert not outcome
assert isinstance(outcome, utils.PartialCompareOutcome)
assert outcome.error == error
@pytest.mark.parametrize('pattern, value, expected', [
('foo', 'foo', True),
('foo', 'bar', False),
('foo', 'Foo', False),
('foo', 'foobar', False),
('foo', 'barfoo', False),
('foo*', 'foobarbaz', True),
('*bar', 'foobar', True),
('foo*baz', 'foobarbaz', True),
('foo[b]ar', 'foobar', False),
('foo[b]ar', 'foo[b]ar', True),
('foo?ar', 'foobar', False),
('foo?ar', 'foo?ar', True),
])
def test_pattern_match(pattern, value, expected):
assert utils.pattern_match(pattern=pattern, value=value) == expected
def test_nop_contextmanager():
with utils.nop_contextmanager():
pass
|
from hangups import message_parser, hangouts_pb2
def parse_text(text):
parser = message_parser.ChatMessageParser()
return [(s.text, s.params) for s in parser.parse(text)]
def test_parse_linebreaks():
text = 'line1\nline2\r\nline3'
expected = [('line1', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('line2', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('line3', {})]
assert expected == parse_text(text)
def test_parse_auto_link_minimal():
text = (
'http://domain.tld\n'
'https://domain.tld\n'
'sub.domain.tld\n'
'domain.tld/\n'
'1.1.1.1/\n'
)
expected = [
('http://domain.tld', {'link_target': 'http://domain.tld'}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('https://domain.tld', {'link_target': 'https://domain.tld'}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('sub.domain.tld', {'link_target': 'http://sub.domain.tld'}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('domain.tld/', {'link_target': 'http://domain.tld/'}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('1.1.1.1/', {'link_target': 'http://1.1.1.1/'}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
]
assert expected == parse_text(text)
def test_parse_auto_link_port():
text = (
'http://domain.tld:8080\n'
'https://domain.tld:8080\n'
'sub.domain.tld:8080\n'
'domain.tld:8080/\n'
)
expected = [
('http://domain.tld:8080',
{'link_target': 'http://domain.tld:8080'}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('https://domain.tld:8080',
{'link_target': 'https://domain.tld:8080'}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('sub.domain.tld:8080',
{'link_target': 'http://sub.domain.tld:8080'}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('domain.tld:8080/',
{'link_target': 'http://domain.tld:8080/'}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
]
assert expected == parse_text(text)
def test_parse_auto_link_parens():
text = (
'pre (https://domain.tld) post\n'
'pre (inner https://domain.tld inner) post\n'
'pre (inner (https://domain.tld) inner) post\n'
'pre https://domain.tld/path(inner) post\n'
'pre (https://domain.tld/path(inner)) post\n'
)
expected = [
('pre (', {}),
('https://domain.tld', {'link_target': 'https://domain.tld'}),
(') post', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('pre (inner ', {}),
('https://domain.tld', {'link_target': 'https://domain.tld'}),
(' inner) post', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('pre (inner (', {}),
('https://domain.tld', {'link_target': 'https://domain.tld'}),
(') inner) post', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('pre ', {}),
('https://domain.tld/path(inner)',
{'link_target': 'https://domain.tld/path(inner)'}),
(' post', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('pre (', {}),
('https://domain.tld/path(inner)',
{'link_target': 'https://domain.tld/path(inner)'}),
(') post', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
]
assert expected == parse_text(text)
def test_parse_auto_link_email():
text = (
'[email protected]\n'
'[email protected]\n'
'[email protected]\n'
)
expected = [
('[email protected]', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('[email protected]', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('[email protected]', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
]
assert expected == parse_text(text)
def test_parse_auto_link_invalid():
text = (
'hangups:hangups\n'
'http://tld\n'
'http://tld/path\n'
'version 3.5.11\n'
)
expected = [
('hangups:hangups', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('http://tld', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('http://tld/path', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('version 3.5.11', {}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
]
assert expected == parse_text(text)
def test_parse_autolinks():
text = ('www.google.com google.com/maps '
'(https://en.wikipedia.org/wiki/Parenthesis_(disambiguation))')
expected = [
('www.google.com', {'link_target': 'http://www.google.com'}),
(' ', {}),
('google.com/maps', {'link_target': 'http://google.com/maps'}),
(' (', {}),
('https://en.wikipedia.org/wiki/Parenthesis_(disambiguation)',
{'link_target':
'https://en.wikipedia.org/wiki/Parenthesis_(disambiguation)'}),
(')', {})
]
assert expected == parse_text(text)
def test_parse_markdown():
text = ('Test **bold *bolditalic* bold** _italic_ not_italic_not '
'~~strike~~ [Google](www.google.com)'
r'**`_bold not italic_`**')
expected = [('Test ', {}),
('bold ', {'is_bold': True}),
('bolditalic', {'is_bold': True, 'is_italic': True}),
(' bold', {'is_bold': True}),
(' ', {}),
('italic', {'is_italic': True}),
(' not_italic_not ', {}),
('strike', {'is_strikethrough': True}),
(' ', {}),
('Google', {'link_target': 'http://www.google.com'}),
('_bold not italic_', {'is_bold': True})]
assert expected == parse_text(text)
text = '*first opened **second opened _third opened __fourth opened'
assert [(text, {})] == parse_text(text)
def test_parse_html():
text = (
'Test <b>bold <i>bolditalic</i> bold</b> <em>italic</em> '
'<del>strike</del><br><a href="google.com">Google</a>'
'<img src=\'https://upload.wikimedia.org/wikipedia/en/8/80/'
'Wikipedia-logo-v2.svg\'><i>default'
)
expected = [
('Test ', {}),
('bold ', {'is_bold': True}),
('bolditalic', {'is_bold': True, 'is_italic': True}),
(' bold', {'is_bold': True}),
(' ', {}),
('italic', {'is_italic': True}),
(' ', {}),
('strike', {'is_strikethrough': True}),
('\n', {'segment_type': hangouts_pb2.SEGMENT_TYPE_LINE_BREAK}),
('Google', {'link_target': 'http://google.com'}),
('https://upload.wikimedia.org/wikipedia/en/8/80/'
'Wikipedia-logo-v2.svg',
{'link_target':
'https://upload.wikimedia.org/wikipedia/en/8/80/'
'Wikipedia-logo-v2.svg'}),
('<i>default', {}),
]
assert expected == parse_text(text)
|
from datetime import datetime, timedelta, timezone
import re
import homeassistant.components.wsdot.sensor as wsdot
from homeassistant.components.wsdot.sensor import (
ATTR_DESCRIPTION,
ATTR_TIME_UPDATED,
CONF_API_KEY,
CONF_ID,
CONF_NAME,
CONF_TRAVEL_TIMES,
RESOURCE,
SCAN_INTERVAL,
)
from homeassistant.setup import async_setup_component
from tests.common import load_fixture
config = {
CONF_API_KEY: "foo",
SCAN_INTERVAL: timedelta(seconds=120),
CONF_TRAVEL_TIMES: [{CONF_ID: 96, CONF_NAME: "I90 EB"}],
}
async def test_setup_with_config(hass):
"""Test the platform setup with configuration."""
assert await async_setup_component(hass, "sensor", {"wsdot": config})
async def test_setup(hass, requests_mock):
"""Test for operational WSDOT sensor with proper attributes."""
entities = []
def add_entities(new_entities, update_before_add=False):
"""Mock add entities."""
if update_before_add:
for entity in new_entities:
entity.update()
for entity in new_entities:
entities.append(entity)
uri = re.compile(RESOURCE + "*")
requests_mock.get(uri, text=load_fixture("wsdot.json"))
wsdot.setup_platform(hass, config, add_entities)
assert len(entities) == 1
sensor = entities[0]
assert sensor.name == "I90 EB"
assert sensor.state == 11
assert (
sensor.device_state_attributes[ATTR_DESCRIPTION]
== "Downtown Seattle to Downtown Bellevue via I-90"
)
assert sensor.device_state_attributes[ATTR_TIME_UPDATED] == datetime(
2017, 1, 21, 15, 10, tzinfo=timezone(timedelta(hours=-8))
)
|
import logging
import httpx
DEFAULT_TIMEOUT = 10
_LOGGER = logging.getLogger(__name__)
class RestData:
"""Class for handling the data retrieval."""
def __init__(
self,
method,
resource,
auth,
headers,
data,
verify_ssl,
timeout=DEFAULT_TIMEOUT,
):
"""Initialize the data object."""
self._method = method
self._resource = resource
self._auth = auth
self._headers = headers
self._request_data = data
self._timeout = timeout
self._verify_ssl = verify_ssl
self._async_client = None
self.data = None
self.headers = None
async def async_remove(self):
"""Destroy the http session on destroy."""
if self._async_client:
await self._async_client.aclose()
def set_url(self, url):
"""Set url."""
self._resource = url
async def async_update(self):
"""Get the latest data from REST service with provided method."""
if not self._async_client:
self._async_client = httpx.AsyncClient(verify=self._verify_ssl)
_LOGGER.debug("Updating from %s", self._resource)
try:
response = await self._async_client.request(
self._method,
self._resource,
headers=self._headers,
auth=self._auth,
data=self._request_data,
timeout=self._timeout,
)
self.data = response.text
self.headers = response.headers
except httpx.RequestError as ex:
_LOGGER.error("Error fetching data: %s failed with %s", self._resource, ex)
self.data = None
self.headers = None
|
from huawei_lte_api.enums.client import ResponseCodeEnum
from huawei_lte_api.enums.user import LoginErrorEnum, LoginStateEnum, PasswordTypeEnum
import pytest
from requests.exceptions import ConnectionError
from requests_mock import ANY
from homeassistant import config_entries, data_entry_flow
from homeassistant.components import ssdp
from homeassistant.components.huawei_lte.const import DOMAIN
from homeassistant.const import (
CONF_NAME,
CONF_PASSWORD,
CONF_RECIPIENT,
CONF_URL,
CONF_USERNAME,
)
from tests.async_mock import patch
from tests.common import MockConfigEntry
FIXTURE_USER_INPUT = {
CONF_URL: "http://192.168.1.1/",
CONF_USERNAME: "admin",
CONF_PASSWORD: "secret",
}
FIXTURE_USER_INPUT_OPTIONS = {
CONF_NAME: DOMAIN,
CONF_RECIPIENT: "+15555551234",
}
async def test_show_set_form(hass):
"""Test that the setup form is served."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=None
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_urlize_plain_host(hass, requests_mock):
"""Test that plain host or IP gets converted to a URL."""
requests_mock.request(ANY, ANY, exc=ConnectionError())
host = "192.168.100.1"
user_input = {**FIXTURE_USER_INPUT, CONF_URL: host}
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=user_input
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert user_input[CONF_URL] == f"http://{host}/"
async def test_already_configured(hass):
"""Test we reject already configured devices."""
MockConfigEntry(
domain=DOMAIN, data=FIXTURE_USER_INPUT, title="Already configured"
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data={
**FIXTURE_USER_INPUT,
# Tweak URL a bit to check that doesn't fail duplicate detection
CONF_URL: FIXTURE_USER_INPUT[CONF_URL].replace("http", "HTTP"),
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_connection_error(hass, requests_mock):
"""Test we show user form on connection error."""
requests_mock.request(ANY, ANY, exc=ConnectionError())
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {CONF_URL: "unknown"}
@pytest.fixture
def login_requests_mock(requests_mock):
"""Set up a requests_mock with base mocks for login tests."""
requests_mock.request(
ANY, FIXTURE_USER_INPUT[CONF_URL], text='<meta name="csrf_token" content="x"/>'
)
requests_mock.request(
ANY,
f"{FIXTURE_USER_INPUT[CONF_URL]}api/user/state-login",
text=(
f"<response><State>{LoginStateEnum.LOGGED_OUT}</State>"
f"<password_type>{PasswordTypeEnum.SHA256}</password_type></response>"
),
)
return requests_mock
@pytest.mark.parametrize(
("code", "errors"),
(
(LoginErrorEnum.USERNAME_WRONG, {CONF_USERNAME: "incorrect_username"}),
(LoginErrorEnum.PASSWORD_WRONG, {CONF_PASSWORD: "incorrect_password"}),
(
LoginErrorEnum.USERNAME_PWD_WRONG,
{CONF_USERNAME: "invalid_auth"},
),
(LoginErrorEnum.USERNAME_PWD_ORERRUN, {"base": "login_attempts_exceeded"}),
(ResponseCodeEnum.ERROR_SYSTEM_UNKNOWN, {"base": "response_error"}),
),
)
async def test_login_error(hass, login_requests_mock, code, errors):
"""Test we show user form with appropriate error on response failure."""
login_requests_mock.request(
ANY,
f"{FIXTURE_USER_INPUT[CONF_URL]}api/user/login",
text=f"<error><code>{code}</code><message/></error>",
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": config_entries.SOURCE_USER}, data=FIXTURE_USER_INPUT
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == errors
async def test_success(hass, login_requests_mock):
"""Test successful flow provides entry creation data."""
login_requests_mock.request(
ANY,
f"{FIXTURE_USER_INPUT[CONF_URL]}api/user/login",
text="<response>OK</response>",
)
with patch("homeassistant.components.huawei_lte.async_setup"), patch(
"homeassistant.components.huawei_lte.async_setup_entry"
):
result = await hass.config_entries.flow.async_init(
DOMAIN,
context={"source": config_entries.SOURCE_USER},
data=FIXTURE_USER_INPUT,
)
await hass.async_block_till_done()
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"][CONF_URL] == FIXTURE_USER_INPUT[CONF_URL]
assert result["data"][CONF_USERNAME] == FIXTURE_USER_INPUT[CONF_USERNAME]
assert result["data"][CONF_PASSWORD] == FIXTURE_USER_INPUT[CONF_PASSWORD]
async def test_ssdp(hass):
"""Test SSDP discovery initiates config properly."""
url = "http://192.168.100.1/"
context = {"source": config_entries.SOURCE_SSDP}
result = await hass.config_entries.flow.async_init(
DOMAIN,
context=context,
data={
ssdp.ATTR_SSDP_LOCATION: "http://192.168.100.1:60957/rootDesc.xml",
ssdp.ATTR_SSDP_ST: "upnp:rootdevice",
ssdp.ATTR_UPNP_DEVICE_TYPE: "urn:schemas-upnp-org:device:InternetGatewayDevice:1",
ssdp.ATTR_UPNP_FRIENDLY_NAME: "Mobile Wi-Fi",
ssdp.ATTR_UPNP_MANUFACTURER: "Huawei",
ssdp.ATTR_UPNP_MANUFACTURER_URL: "http://www.huawei.com/",
ssdp.ATTR_UPNP_MODEL_NAME: "Huawei router",
ssdp.ATTR_UPNP_MODEL_NUMBER: "12345678",
ssdp.ATTR_UPNP_PRESENTATION_URL: url,
ssdp.ATTR_UPNP_SERIAL: "00000000",
ssdp.ATTR_UPNP_UDN: "uuid:XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX",
},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert context[CONF_URL] == url
async def test_options(hass):
"""Test options produce expected data."""
config_entry = MockConfigEntry(
domain=DOMAIN, data=FIXTURE_USER_INPUT, options=FIXTURE_USER_INPUT_OPTIONS
)
config_entry.add_to_hass(hass)
result = await hass.config_entries.options.async_init(config_entry.entry_id)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "init"
recipient = "+15555550000"
result = await hass.config_entries.options.async_configure(
result["flow_id"], user_input={CONF_RECIPIENT: recipient}
)
assert result["data"][CONF_NAME] == DOMAIN
assert result["data"][CONF_RECIPIENT] == [recipient]
|
import pytest
import sh
from molecule import config
from molecule.verifier.lint import rubocop
from test.functional.conftest import needs_rubocop
pytestmark = needs_rubocop
@pytest.fixture
def _patched_get_tests(mocker):
m = mocker.patch('molecule.verifier.lint.rubocop.RuboCop._get_tests')
m.return_value = ['test1', 'test2', 'test3']
return m
@pytest.fixture
def _verifier_lint_section_data():
return {
'verifier': {
'name': 'inspec',
'lint': {
'name': 'rubocop',
'options': {
'foo': 'bar',
},
'env': {
'FOO': 'bar',
},
}
}
}
# NOTE(retr0h): The use of the `patched_config_validate` fixture, disables
# config.Config._validate from executing. Thus preventing odd side-effects
# throughout patched.assert_called unit tests.
@pytest.fixture
def _instance(patched_config_validate, config_instance):
return rubocop.RuboCop(config_instance)
def test_config_private_member(_instance):
assert isinstance(_instance._config, config.Config)
def test_default_options_property(_instance):
assert {} == _instance.default_options
def test_default_env_property(_instance):
assert 'MOLECULE_FILE' in _instance.default_env
assert 'MOLECULE_INVENTORY_FILE' in _instance.default_env
assert 'MOLECULE_SCENARIO_DIRECTORY' in _instance.default_env
assert 'MOLECULE_INSTANCE_CONFIG' in _instance.default_env
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_env_property(_instance):
assert 'bar' == _instance.env['FOO']
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_name_property(_instance):
assert 'rubocop' == _instance.name
def test_enabled_property(_instance):
assert _instance.enabled
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_options_property(_instance):
x = {
'foo': 'bar',
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_options_property_handles_cli_args(_instance):
_instance._config.args = {'debug': True}
x = {
'foo': 'bar',
'd': True,
}
assert x == _instance.options
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_bake(_instance):
_instance._tests = ['test1', 'test2', 'test3']
_instance.bake()
x = '{} --foo=bar test1 test2 test3'.format(str(sh.rubocop))
assert x == _instance._rubocop_command
def test_execute(patched_logger_info, patched_logger_success,
patched_run_command, _instance):
_instance._tests = ['test1', 'test2', 'test3']
_instance._rubocop_command = 'patched-command'
_instance.execute()
patched_run_command.assert_called_once_with('patched-command', debug=False)
msg = 'Executing RuboCop on files found in {}/...'.format(
_instance._config.verifier.directory)
patched_logger_info.assert_called_once_with(msg)
msg = 'Lint completed successfully.'
patched_logger_success.assert_called_once_with(msg)
def test_execute_does_not_execute(patched_run_command, patched_logger_warn,
_instance):
_instance._config.config['verifier']['lint']['enabled'] = False
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, verifier_lint is disabled.'
patched_logger_warn.assert_called_once_with(msg)
def test_does_not_execute_without_tests(patched_run_command,
patched_logger_warn, _instance):
_instance.execute()
assert not patched_run_command.called
msg = 'Skipping, no tests found.'
patched_logger_warn.assert_called_once_with(msg)
@pytest.mark.parametrize(
'config_instance', ['_verifier_lint_section_data'], indirect=True)
def test_execute_bakes(patched_run_command, _instance):
_instance._tests = ['test1', 'test2', 'test3']
_instance.execute()
assert _instance._rubocop_command is not None
cmd = '{} --foo=bar test1 test2 test3'.format(str(sh.rubocop))
patched_run_command.assert_called_once_with(cmd, debug=False)
def test_executes_catches_and_exits_return_code(patched_run_command,
_patched_get_tests, _instance):
patched_run_command.side_effect = sh.ErrorReturnCode_1(
sh.rubocop, b'', b'')
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import json
import logging
import os
import re
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import errors
from perfkitbenchmarker import flag_util
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import netperf
import six
from six.moves import zip
flags.DEFINE_integer('netperf_max_iter', None,
'Maximum number of iterations to run during '
'confidence interval estimation. If unset, '
'a single iteration will be run.',
lower_bound=3, upper_bound=30)
flags.DEFINE_integer('netperf_test_length', 60,
'netperf test length, in seconds',
lower_bound=1)
flags.DEFINE_bool('netperf_enable_histograms', True,
'Determines whether latency histograms are '
'collected/reported. Only for *RR benchmarks')
flag_util.DEFINE_integerlist('netperf_num_streams', flag_util.IntegerList([1]),
'Number of netperf processes to run. Netperf '
'will run once for each value in the list.',
module_name=__name__)
flags.DEFINE_integer('netperf_thinktime', 0,
'Time in nanoseconds to do work for each request.')
flags.DEFINE_integer('netperf_thinktime_array_size', 0,
'The size of the array to traverse for thinktime.')
flags.DEFINE_integer('netperf_thinktime_run_length', 0,
'The number of contiguous numbers to sum at a time in the '
'thinktime array.')
flags.DEFINE_integer('netperf_udp_stream_send_size_in_bytes', 1024,
'Send size to use for UDP_STREAM tests (netperf -m flag)',
lower_bound=1, upper_bound=65507)
# We set the default to 128KB (131072 bytes) to override the Linux default
# of 16K so that we can achieve the "link rate".
flags.DEFINE_integer('netperf_tcp_stream_send_size_in_bytes', 131072,
'Send size to use for TCP_STREAM tests (netperf -m flag)')
ALL_BENCHMARKS = ['TCP_RR', 'TCP_CRR', 'TCP_STREAM', 'UDP_RR', 'UDP_STREAM']
flags.DEFINE_list('netperf_benchmarks', ALL_BENCHMARKS,
'The netperf benchmark(s) to run.')
flags.register_validator(
'netperf_benchmarks',
lambda benchmarks: benchmarks and set(benchmarks).issubset(ALL_BENCHMARKS))
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'netperf'
BENCHMARK_CONFIG = """
netperf:
description: Run TCP_RR, TCP_CRR, UDP_RR, TCP_STREAM and UDP_STREAM
vpc_peering: True
vm_groups:
vm_1:
vm_spec: *default_single_core
vm_2:
vm_spec: *default_single_core
"""
MBPS = 'Mbits/sec'
TRANSACTIONS_PER_SECOND = 'transactions_per_second'
# Specifies the keys and to include in the results for OMNI tests.
# Any user of ParseNetperfOutput() (e.g. container_netperf_benchmark), must
# specify these selectors to ensure the parsing doesn't break.
OUTPUT_SELECTOR = (
'THROUGHPUT,THROUGHPUT_UNITS,P50_LATENCY,P90_LATENCY,'
'P99_LATENCY,STDDEV_LATENCY,MIN_LATENCY,MAX_LATENCY,'
'CONFIDENCE_ITERATION,THROUGHPUT_CONFID,'
'LOCAL_TRANSPORT_RETRANS,REMOTE_TRANSPORT_RETRANS')
# Command ports are even (id*2), data ports are odd (id*2 + 1)
PORT_START = 20000
REMOTE_SCRIPTS_DIR = 'netperf_test_scripts'
REMOTE_SCRIPT = 'netperf_test.py'
PERCENTILES = [50, 90, 99]
# By default, Container-Optimized OS (COS) host firewall allows only
# outgoing connections and incoming SSH connections. To allow incoming
# connections from VMs running netperf, we need to add iptables rules
# on the VM running netserver.
_COS_RE = re.compile(r'\b(cos|gci)-')
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def PrepareNetperf(vm):
"""Installs netperf on a single vm."""
vm.Install('netperf')
def Prepare(benchmark_spec):
"""Install netperf on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vms = vms[:2]
vm_util.RunThreaded(PrepareNetperf, vms)
num_streams = max(FLAGS.netperf_num_streams)
# See comments where _COS_RE is defined.
if vms[1].image and re.search(_COS_RE, vms[1].image):
_SetupHostFirewall(benchmark_spec)
# Start the netserver processes
if vm_util.ShouldRunOnExternalIpAddress():
# Open all of the command and data ports
vms[1].AllowPort(PORT_START, PORT_START + num_streams * 2 - 1)
netserver_cmd = ('for i in $(seq {port_start} 2 {port_end}); do '
'{netserver_path} -p $i & done').format(
port_start=PORT_START,
port_end=PORT_START + num_streams * 2 - 1,
netserver_path=netperf.NETSERVER_PATH)
vms[1].RemoteCommand(netserver_cmd)
# Copy remote test script to client
path = data.ResourcePath(os.path.join(REMOTE_SCRIPTS_DIR, REMOTE_SCRIPT))
logging.info('Uploading %s to %s', path, vms[0])
vms[0].PushFile(path, REMOTE_SCRIPT)
vms[0].RemoteCommand('sudo chmod 777 %s' % REMOTE_SCRIPT)
def _SetupHostFirewall(benchmark_spec):
"""Set up host firewall to allow incoming traffic.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
client_vm = benchmark_spec.vms[0]
server_vm = benchmark_spec.vms[1]
ip_addrs = [client_vm.internal_ip]
if vm_util.ShouldRunOnExternalIpAddress():
ip_addrs.append(client_vm.ip_address)
logging.info('setting up host firewall on %s running %s for client at %s',
server_vm.name, server_vm.image, ip_addrs)
cmd = 'sudo iptables -A INPUT -p %s -s %s -j ACCEPT'
for protocol in 'tcp', 'udp':
for ip_addr in ip_addrs:
server_vm.RemoteHostCommand(cmd % (protocol, ip_addr))
def _HistogramStatsCalculator(histogram, percentiles=PERCENTILES):
"""Computes values at percentiles in a distribution as well as stddev.
Args:
histogram: A dict mapping values to the number of samples with that value.
percentiles: An array of percentiles to calculate.
Returns:
A dict mapping stat names to their values.
"""
stats = {}
# Histogram data in list form sorted by key
by_value = sorted([(value, count) for value, count in histogram.items()],
key=lambda x: x[0])
total_count = sum(histogram.values())
cur_value_index = 0 # Current index in by_value
cur_index = 0 # Number of values we've passed so far
for p in percentiles:
index = int(float(total_count) * float(p) / 100.0)
index = min(index, total_count - 1) # Handle 100th percentile
for value, count in by_value[cur_value_index:]:
if cur_index + count > index:
stats['p%s' % str(p)] = by_value[cur_value_index][0]
break
else:
cur_index += count
cur_value_index += 1
# Compute stddev
value_sum = float(sum([value * count for value, count in histogram.items()]))
average = value_sum / float(total_count)
if total_count > 1:
total_of_squares = sum([(value - average) ** 2 * count
for value, count in histogram.items()])
stats['stddev'] = (total_of_squares / (total_count - 1)) ** 0.5
else:
stats['stddev'] = 0
return stats
def ParseNetperfOutput(stdout, metadata, benchmark_name,
enable_latency_histograms):
"""Parses the stdout of a single netperf process.
Args:
stdout: the stdout of the netperf process
metadata: metadata for any sample.Sample objects we create
benchmark_name: the name of the netperf benchmark
enable_latency_histograms: bool indicating if latency histograms are
included in stdout
Returns:
A tuple containing (throughput_sample, latency_samples, latency_histogram)
"""
# Don't modify the metadata dict that was passed in
metadata = metadata.copy()
# Extract stats from stdout
# Sample output:
#
# "MIGRATED TCP REQUEST/RESPONSE TEST from 0.0.0.0 (0.0.0.0) port 20001
# AF_INET to 104.154.50.86 () port 20001 AF_INET : +/-2.500% @ 99% conf.
# : first burst 0",\n
# Throughput,Throughput Units,Throughput Confidence Width (%),
# Confidence Iterations Run,Stddev Latency Microseconds,
# 50th Percentile Latency Microseconds,90th Percentile Latency Microseconds,
# 99th Percentile Latency Microseconds,Minimum Latency Microseconds,
# Maximum Latency Microseconds\n
# 1405.50,Trans/s,2.522,4,783.80,683,735,841,600,900\n
try:
fp = six.StringIO(stdout)
# "-o" flag above specifies CSV output, but there is one extra header line:
banner = next(fp)
assert banner.startswith('MIGRATED'), stdout
r = csv.DictReader(fp)
results = next(r)
logging.info('Netperf Results: %s', results)
assert 'Throughput' in results
except (StopIteration, AssertionError):
# The output returned by netperf was unparseable - usually due to a broken
# connection or other error. Raise KnownIntermittentError to signal the
# benchmark can be retried. Do not automatically retry as an immediate
# retry on these VMs may be adveresly affected (e.g. burstable credits
# partially used)
message = 'Netperf ERROR: Failed to parse stdout. STDOUT: %s' % stdout
logging.error(message)
raise errors.Benchmarks.KnownIntermittentError(message)
# Update the metadata with some additional infos
meta_keys = [('Confidence Iterations Run', 'confidence_iter'),
('Throughput Confidence Width (%)', 'confidence_width_percent')]
if 'TCP' in benchmark_name:
meta_keys.extend([
('Local Transport Retransmissions', 'netperf_retransmissions'),
('Remote Transport Retransmissions', 'netserver_retransmissions'),
])
metadata.update({meta_key: results[netperf_key]
for netperf_key, meta_key in meta_keys})
# Create the throughput sample
throughput = float(results['Throughput'])
throughput_units = results['Throughput Units']
if throughput_units == '10^6bits/s':
# TCP_STREAM benchmark
unit = MBPS
metric = '%s_Throughput' % benchmark_name
elif throughput_units == 'Trans/s':
# *RR benchmarks
unit = TRANSACTIONS_PER_SECOND
metric = '%s_Transaction_Rate' % benchmark_name
else:
raise ValueError('Netperf output specifies unrecognized throughput units %s'
% throughput_units)
throughput_sample = sample.Sample(metric, throughput, unit, metadata)
latency_hist = None
latency_samples = []
if enable_latency_histograms:
# Parse the latency histogram. {latency: count} where "latency" is the
# latency in microseconds with only 2 significant figures and "count" is the
# number of response times that fell in that latency range.
latency_hist = netperf.ParseHistogram(stdout)
hist_metadata = {'histogram': json.dumps(latency_hist)}
hist_metadata.update(metadata)
latency_samples.append(sample.Sample(
'%s_Latency_Histogram' % benchmark_name, 0, 'us', hist_metadata))
if unit != MBPS:
for metric_key, metric_name in [
('50th Percentile Latency Microseconds', 'p50'),
('90th Percentile Latency Microseconds', 'p90'),
('99th Percentile Latency Microseconds', 'p99'),
('Minimum Latency Microseconds', 'min'),
('Maximum Latency Microseconds', 'max'),
('Stddev Latency Microseconds', 'stddev')]:
if metric_key in results:
latency_samples.append(
sample.Sample('%s_Latency_%s' % (benchmark_name, metric_name),
float(results[metric_key]), 'us', metadata))
return (throughput_sample, latency_samples, latency_hist)
def RunNetperf(vm, benchmark_name, server_ip, num_streams):
"""Spawns netperf on a remote VM, parses results.
Args:
vm: The VM that the netperf TCP_RR benchmark will be run upon.
benchmark_name: The netperf benchmark to run, see the documentation.
server_ip: A machine that is running netserver.
num_streams: The number of netperf client threads to run.
Returns:
A sample.Sample object with the result.
"""
enable_latency_histograms = FLAGS.netperf_enable_histograms or num_streams > 1
# Throughput benchmarks don't have latency histograms
enable_latency_histograms = (enable_latency_histograms and
(benchmark_name not in ['TCP_STREAM', 'UDP_STREAM']))
# Flags:
# -o specifies keys to include in CSV output.
# -j keeps additional latency numbers
# -v sets the verbosity level so that netperf will print out histograms
# -I specifies the confidence % and width - here 99% confidence that the true
# value is within +/- 2.5% of the reported value
# -i specifies the maximum and minimum number of iterations.
confidence = ('-I 99,5 -i {0},3'.format(FLAGS.netperf_max_iter)
if FLAGS.netperf_max_iter else '')
verbosity = '-v2 ' if enable_latency_histograms else ''
remote_cmd_timeout = (
FLAGS.netperf_test_length * (FLAGS.netperf_max_iter or 1) + 300)
metadata = {'netperf_test_length': FLAGS.netperf_test_length,
'sending_thread_count': num_streams,
'max_iter': FLAGS.netperf_max_iter or 1}
netperf_cmd = ('{netperf_path} -p {{command_port}} -j {verbosity} '
'-t {benchmark_name} -H {server_ip} -l {length} {confidence}'
' -- '
'-P ,{{data_port}} '
'-o {output_selector}').format(
netperf_path=netperf.NETPERF_PATH,
benchmark_name=benchmark_name,
server_ip=server_ip,
length=FLAGS.netperf_test_length,
output_selector=OUTPUT_SELECTOR,
confidence=confidence,
verbosity=verbosity)
if benchmark_name.upper() == 'UDP_STREAM':
netperf_cmd += (' -R 1 -m {send_size} -M {send_size} '.format(
send_size=FLAGS.netperf_udp_stream_send_size_in_bytes))
metadata['netperf_send_size_in_bytes'] = FLAGS.netperf_udp_stream_send_size_in_bytes
elif benchmark_name.upper() == 'TCP_STREAM':
netperf_cmd += (' -m {send_size} -M {send_size} '.format(
send_size=FLAGS.netperf_tcp_stream_send_size_in_bytes))
metadata['netperf_send_size_in_bytes'] = FLAGS.netperf_tcp_stream_send_size_in_bytes
if FLAGS.netperf_thinktime != 0:
netperf_cmd += (' -X {thinktime},{thinktime_array_size},'
'{thinktime_run_length} ').format(
thinktime=FLAGS.netperf_thinktime,
thinktime_array_size=FLAGS.netperf_thinktime_array_size,
thinktime_run_length=FLAGS.netperf_thinktime_run_length)
# Run all of the netperf processes and collect their stdout
# TODO(dlott): Analyze process start delta of netperf processes on the remote
# machine
# Give the remote script the max possible test length plus 5 minutes to
# complete
remote_cmd_timeout = \
FLAGS.netperf_test_length * (FLAGS.netperf_max_iter or 1) + 300
remote_cmd = ('./%s --netperf_cmd="%s" --num_streams=%s --port_start=%s' %
(REMOTE_SCRIPT, netperf_cmd, num_streams, PORT_START))
remote_stdout, _ = vm.RobustRemoteCommand(remote_cmd, should_log=True,
timeout=remote_cmd_timeout)
# Decode stdouts, stderrs, and return codes from remote command's stdout
json_out = json.loads(remote_stdout)
stdouts = json_out[0]
parsed_output = [ParseNetperfOutput(stdout, metadata, benchmark_name,
enable_latency_histograms)
for stdout in stdouts]
if len(parsed_output) == 1:
# Only 1 netperf thread
throughput_sample, latency_samples, histogram = parsed_output[0]
return [throughput_sample] + latency_samples
else:
# Multiple netperf threads
samples = []
# Unzip parsed output
# Note that latency_samples are invalid with multiple threads because stats
# are computed per-thread by netperf, so we don't use them here.
throughput_samples, _, latency_histograms = [list(t)
for t in zip(*parsed_output)]
# They should all have the same units
throughput_unit = throughput_samples[0].unit
# Extract the throughput values from the samples
throughputs = [s.value for s in throughput_samples]
# Compute some stats on the throughput values
throughput_stats = sample.PercentileCalculator(throughputs, [50, 90, 99])
throughput_stats['min'] = min(throughputs)
throughput_stats['max'] = max(throughputs)
# Calculate aggregate throughput
throughput_stats['total'] = throughput_stats['average'] * len(throughputs)
# Create samples for throughput stats
for stat, value in throughput_stats.items():
samples.append(
sample.Sample('%s_Throughput_%s' % (benchmark_name, stat),
float(value),
throughput_unit, metadata))
if enable_latency_histograms:
# Combine all of the latency histogram dictionaries
latency_histogram = collections.Counter()
for histogram in latency_histograms:
latency_histogram.update(histogram)
# Create a sample for the aggregate latency histogram
hist_metadata = {'histogram': json.dumps(latency_histogram)}
hist_metadata.update(metadata)
samples.append(sample.Sample(
'%s_Latency_Histogram' % benchmark_name, 0, 'us', hist_metadata))
# Calculate stats on aggregate latency histogram
latency_stats = _HistogramStatsCalculator(latency_histogram, [50, 90, 99])
# Create samples for the latency stats
for stat, value in latency_stats.items():
samples.append(
sample.Sample('%s_Latency_%s' % (benchmark_name, stat),
float(value),
'us', metadata))
return samples
def Run(benchmark_spec):
"""Run netperf TCP_RR on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
client_vm = vms[0] # Client aka "sending vm"
server_vm = vms[1] # Server aka "receiving vm"
logging.info('netperf running on %s', client_vm)
results = []
metadata = {
'sending_zone': client_vm.zone,
'sending_machine_type': client_vm.machine_type,
'receiving_zone': server_vm.zone,
'receiving_machine_type': server_vm.machine_type
}
for num_streams in FLAGS.netperf_num_streams:
assert num_streams >= 1
for netperf_benchmark in FLAGS.netperf_benchmarks:
if vm_util.ShouldRunOnExternalIpAddress():
external_ip_results = RunNetperf(client_vm, netperf_benchmark,
server_vm.ip_address, num_streams)
for external_ip_result in external_ip_results:
external_ip_result.metadata[
'ip_type'] = vm_util.IpAddressMetadata.EXTERNAL
external_ip_result.metadata.update(metadata)
results.extend(external_ip_results)
if vm_util.ShouldRunOnInternalIpAddress(client_vm, server_vm):
internal_ip_results = RunNetperf(client_vm, netperf_benchmark,
server_vm.internal_ip, num_streams)
for internal_ip_result in internal_ip_results:
internal_ip_result.metadata.update(metadata)
internal_ip_result.metadata[
'ip_type'] = vm_util.IpAddressMetadata.INTERNAL
results.extend(internal_ip_results)
return results
def Cleanup(benchmark_spec):
"""Cleanup netperf on the target vm (by uninstalling).
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vms[1].RemoteCommand('sudo killall netserver')
vms[0].RemoteCommand('sudo rm -rf %s' % REMOTE_SCRIPT)
|
import logging
import os
import shelve
from time import time as timestamp
from click import get_app_dir
logger = logging.getLogger(__name__)
class Cache:
cache_dir = get_app_dir("twtxt")
cache_name = "cache"
def __init__(self, cache_file, cache, update_interval):
"""Initializes new :class:`Cache` object.
:param str cache_file: full path to the loaded cache file.
:param ~shelve.Shelve cache: a Shelve object, with cache loaded.
:param int update_interval: number of seconds the cache is considered to be
up-to-date without calling any external resources.
"""
self.cache_file = cache_file
self.cache = cache
self.update_interval = update_interval
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
return self.close()
@classmethod
def from_file(cls, file, *args, **kwargs):
"""Try loading given cache file."""
try:
cache = shelve.open(file)
return cls(file, cache, *args, **kwargs)
except OSError as e:
logger.debug("Loading {0} failed".format(file))
raise e
@classmethod
def discover(cls, *args, **kwargs):
"""Make a guess about the cache file location an try loading it."""
file = os.path.join(Cache.cache_dir, Cache.cache_name)
return cls.from_file(file, *args, **kwargs)
@property
def last_updated(self):
"""Returns *NIX timestamp of last update of the cache."""
try:
return self.cache["last_update"]
except KeyError:
return 0
@property
def is_valid(self):
"""Checks if the cache is considered to be up-to-date."""
if timestamp() - self.last_updated <= self.update_interval:
return True
else:
return False
def mark_updated(self):
"""Mark cache as updated at current *NIX timestamp"""
if not self.is_valid:
self.cache["last_update"] = timestamp()
def is_cached(self, url):
"""Checks if specified URL is cached."""
try:
return True if url in self.cache else False
except TypeError:
return False
def last_modified(self, url):
"""Returns saved 'Last-Modified' header, if available."""
try:
return self.cache[url]["last_modified"]
except KeyError:
return None
def add_tweets(self, url, last_modified, tweets):
"""Adds new tweets to the cache."""
try:
self.cache[url] = {"last_modified": last_modified, "tweets": tweets}
self.mark_updated()
return True
except TypeError:
return False
def get_tweets(self, url, limit=None):
"""Retrieves tweets from the cache."""
try:
tweets = self.cache[url]["tweets"]
self.mark_updated()
return sorted(tweets, reverse=True)[:limit]
except KeyError:
return []
def remove_tweets(self, url):
"""Tries to remove cached tweets."""
try:
del self.cache[url]
self.mark_updated()
return True
except KeyError:
return False
def close(self):
"""Closes Shelve object."""
try:
self.cache.close()
return True
except AttributeError:
return False
def sync(self):
"""Syncs Shelve object."""
try:
self.cache.sync()
return True
except AttributeError:
return False
|
from __future__ import division
import chainer
import chainer.functions as F
import chainer.links as L
import numpy as np
from chainercv.experimental.links.model.fcis import FCIS
from chainercv.functions import ps_roi_average_pooling_2d
from chainercv.links import Conv2DBNActiv
from chainercv.links.model.faster_rcnn.region_proposal_network import \
RegionProposalNetwork
from chainercv.links.model.faster_rcnn.utils.loc2bbox import loc2bbox
from chainercv.links.model.resnet.resblock import ResBlock
from chainercv.links import ResNet101
from chainercv import utils
class FCISResNet101(FCIS):
"""FCIS based on ResNet101.
When you specify the path of a pre-trained chainer model serialized as
a :obj:`.npz` file in the constructor, this chain model automatically
initializes all the parameters with it.
When a string in prespecified set is provided, a pretrained model is
loaded from weights distributed on the Internet.
The list of pretrained models supported are as follows:
* :obj:`sbd`: Loads weights trained with the trainval split of Semantic \
Boundaries Dataset.
For descriptions on the interface of this model, please refer to
:class:`~chainercv.experimental.links.model.fcis.FCIS`.
:class:`~chainercv.experimental.links.model.fcis.FCISResNet101`
supports finer control on random initializations of weights by arguments
:obj:`resnet_initialW`, :obj:`rpn_initialW` and :obj:`head_initialW`.
It accepts a callable that takes an array and edits its values.
If :obj:`None` is passed as an initializer, the default initializer is
used.
Args:
n_fg_class (int): The number of classes excluding the background.
pretrained_model (str): The destination of the pre-trained
chainer model serialized as a :obj:`.npz` file.
If this is one of the strings described
above, it automatically loads weights stored under a directory
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/models/`,
where :obj:`$CHAINER_DATASET_ROOT` is set as
:obj:`$HOME/.chainer/dataset` unless you specify another value
by modifying the environment variable.
min_size (int): A preprocessing paramter for :meth:`prepare`.
max_size (int): A preprocessing paramter for :meth:`prepare`.
roi_size (int): Height and width of the feature maps after
Position Sensitive RoI pooling.
group_size (int): Group height and width for Position Sensitive
ROI pooling.
ratios (list of floats): This is ratios of width to height of
the anchors.
anchor_scales (list of numbers): This is areas of anchors.
Those areas will be the product of the square of an element in
:obj:`anchor_scales` and the original area of the reference
window.
loc_normalize_mean (tuple of four floats): Mean values of
localization estimates.
loc_normalize_std (tupler of four floats): Standard deviation
of localization estimates.
iter2 (bool): if the value is set :obj:`True`, Position Sensitive
ROI pooling is executed twice. In the second time, Position
Sensitive ROI pooling uses improved ROIs by the localization
parameters calculated in the first time.
resnet_initialW (callable): Initializer for the layers corresponding to
the ResNet101 layers.
rpn_initialW (callable): Initializer for Region Proposal Network
layers.
head_initialW (callable): Initializer for the head layers.
proposal_creator_params (dict): Key valued paramters for
:class:`~chainercv.links.model.faster_rcnn.ProposalCreator`.
"""
_models = {
'sbd': {
'param': {'n_fg_class': 20},
'url': 'https://chainercv-models.preferred.jp/'
'fcis_resnet101_sbd_trained_2018_06_22.npz',
'cv2': True
},
'sbd_converted': {
'param': {'n_fg_class': 20},
'url': 'https://chainercv-models.preferred.jp/'
'fcis_resnet101_sbd_converted_2018_07_02.npz',
'cv2': True
},
'coco': {
'param': {'n_fg_class': 80},
'url': 'https://chainercv-models.preferred.jp/'
'fcis_resnet101_coco_trained_2019_01_30.npz',
'cv2': True
},
'coco_converted': {
'param': {'n_fg_class': 80},
'url': 'https://chainercv-models.preferred.jp/'
'fcis_resnet101_coco_converted_2019_01_30.npz',
'cv2': True
}
}
feat_stride = 16
proposal_creator_params = {
'nms_thresh': 0.7,
'n_train_pre_nms': 6000,
'n_train_post_nms': 300,
'n_test_pre_nms': 6000,
'n_test_post_nms': 300,
'force_cpu_nms': False,
'min_size': 16
}
def __init__(
self,
n_fg_class=None,
pretrained_model=None,
min_size=600, max_size=1000,
roi_size=21, group_size=7,
ratios=[0.5, 1, 2], anchor_scales=[8, 16, 32],
loc_normalize_mean=(0.0, 0.0, 0.0, 0.0),
loc_normalize_std=(0.2, 0.2, 0.5, 0.5),
iter2=True,
resnet_initialW=None, rpn_initialW=None, head_initialW=None,
proposal_creator_params=None):
param, path = utils.prepare_pretrained_model(
{'n_fg_class': n_fg_class}, pretrained_model, self._models)
if rpn_initialW is None:
rpn_initialW = chainer.initializers.Normal(0.01)
if resnet_initialW is None and pretrained_model:
resnet_initialW = chainer.initializers.constant.Zero()
if proposal_creator_params is not None:
self.proposal_creator_params = proposal_creator_params
extractor = ResNet101Extractor(
initialW=resnet_initialW)
rpn = RegionProposalNetwork(
1024, 512,
ratios=ratios,
anchor_scales=anchor_scales,
feat_stride=self.feat_stride,
initialW=rpn_initialW,
proposal_creator_params=self.proposal_creator_params)
head = FCISResNet101Head(
param['n_fg_class'] + 1,
roi_size=roi_size, group_size=group_size,
spatial_scale=1. / self.feat_stride,
loc_normalize_mean=loc_normalize_mean,
loc_normalize_std=loc_normalize_std,
iter2=iter2, initialW=head_initialW)
mean = np.array([123.15, 115.90, 103.06],
dtype=np.float32)[:, None, None]
super(FCISResNet101, self).__init__(
extractor, rpn, head,
mean, min_size, max_size,
loc_normalize_mean, loc_normalize_std)
if path == 'imagenet':
self._copy_imagenet_pretrained_resnet()
elif path:
chainer.serializers.load_npz(path, self)
def _copy_imagenet_pretrained_resnet(self):
def _copy_conv2dbn(src, dst):
dst.conv.W.array = src.conv.W.array
if src.conv.b is not None and dst.conv.b is not None:
dst.conv.b.array = src.conv.b.array
dst.bn.gamma.array = src.bn.gamma.array
dst.bn.beta.array = src.bn.beta.array
dst.bn.avg_var = src.bn.avg_var
dst.bn.avg_mean = src.bn.avg_mean
def _copy_bottleneck(src, dst):
if hasattr(src, 'residual_conv'):
_copy_conv2dbn(src.residual_conv, dst.residual_conv)
_copy_conv2dbn(src.conv1, dst.conv1)
_copy_conv2dbn(src.conv2, dst.conv2)
_copy_conv2dbn(src.conv3, dst.conv3)
def _copy_resblock(src, dst):
for layer_name in src.layer_names:
_copy_bottleneck(
getattr(src, layer_name), getattr(dst, layer_name))
pretrained_model = ResNet101(arch='he', pretrained_model='imagenet')
_copy_conv2dbn(pretrained_model.conv1, self.extractor.conv1)
_copy_resblock(pretrained_model.res2, self.extractor.res2)
_copy_resblock(pretrained_model.res3, self.extractor.res3)
_copy_resblock(pretrained_model.res4, self.extractor.res4)
_copy_resblock(pretrained_model.res5, self.extractor.res5)
class ResNet101Extractor(chainer.Chain):
"""ResNet101 Extractor for FCIS ResNet101 implementation.
This class is used as an extractor for FCISResNet101.
This outputs feature maps.
Dilated convolution is used in the C5 stage.
Args:
initialW: Initializer for ResNet101 extractor.
"""
def __init__(self, initialW=None):
super(ResNet101Extractor, self).__init__()
if initialW is None:
initialW = chainer.initializers.HeNormal()
kwargs = {
'initialW': initialW,
'bn_kwargs': {'eps': 1e-5},
'stride_first': True
}
with self.init_scope():
# ResNet
self.conv1 = Conv2DBNActiv(
3, 64, 7, 2, 3, nobias=True, initialW=initialW)
self.pool1 = lambda x: F.max_pooling_2d(x, ksize=3, stride=2)
self.res2 = ResBlock(3, 64, 64, 256, 1, **kwargs)
self.res3 = ResBlock(4, 256, 128, 512, 2, **kwargs)
self.res4 = ResBlock(23, 512, 256, 1024, 2, **kwargs)
self.res5 = ResBlock(3, 1024, 512, 2048, 1, 2, **kwargs)
def forward(self, x):
"""Forward the chain.
Args:
x (~chainer.Variable): 4D image variable.
"""
with chainer.using_config('train', False):
h = self.pool1(self.conv1(x))
h = self.res2(h)
h.unchain_backward()
h = self.res3(h)
res4 = self.res4(h)
res5 = self.res5(res4)
return res4, res5
class FCISResNet101Head(chainer.Chain):
"""FCIS Head for ResNet101 based implementation.
This class is used as a head for FCIS.
This outputs class-agnostice segmentation scores, class-agnostic
localizations and classification based on feature maps in the given RoIs.
Args:
n_class (int): The number of classes possibly including the background.
roi_size (int): Height and width of the feature maps after
Position Sensitive RoI pooling.
group_size (int): Group height and width for Position Sensitive
ROI pooling.
spatial_scale (float): Scale of the roi is resized.
loc_normalize_mean (tuple of four floats): Mean values of
localization estimates.
loc_normalize_std (tupler of four floats): Standard deviation
of localization estimates.
iter2 (bool): if the value is set :obj:`True`, Position Sensitive
ROI pooling is executed twice. In the second time, Position
Sensitive ROI pooling uses improved ROIs by the localization
parameters calculated in the first time.
initialW (callable): Initializer for the layers.
"""
def __init__(
self,
n_class,
roi_size, group_size, spatial_scale,
loc_normalize_mean, loc_normalize_std,
iter2, initialW=None
):
super(FCISResNet101Head, self).__init__()
if initialW is None:
initialW = chainer.initializers.Normal(0.01)
self.n_class = n_class
self.spatial_scale = spatial_scale
self.group_size = group_size
self.roi_size = roi_size
self.loc_normalize_mean = loc_normalize_mean
self.loc_normalize_std = loc_normalize_std
self.iter2 = iter2
with self.init_scope():
self.conv1 = L.Convolution2D(
2048, 1024, 1, 1, 0, initialW=initialW)
self.cls_seg = L.Convolution2D(
1024, group_size * group_size * n_class * 2,
1, 1, 0, initialW=initialW)
self.ag_loc = L.Convolution2D(
1024, group_size * group_size * 2 * 4,
1, 1, 0, initialW=initialW)
def forward(self, x, rois, roi_indices, img_size, gt_roi_labels=None):
"""Forward the chain.
We assume that there are :math:`N` batches.
Args:
x (~chainer.Variable): 4D image variable.
rois (array): A bounding box array containing coordinates of
proposal boxes. This is a concatenation of bounding box
arrays from multiple images in the batch.
Its shape is :math:`(R', 4)`. Given :math:`R_i` proposed
RoIs from the :math:`i` th image,
:math:`R' = \\sum _{i=1} ^ N R_i`.
roi_indices (array): An array containing indices of images to
which bounding boxes correspond to. Its shape is :math:`(R',)`.
img_size (tuple of int): A tuple containing image size.
"""
h = F.relu(self.conv1(x))
h_cls_seg = self.cls_seg(h)
h_ag_loc = self.ag_loc(h)
# PSROI pooling and regression
roi_ag_seg_scores, roi_ag_locs, roi_cls_scores = self._pool(
h_cls_seg, h_ag_loc, rois, roi_indices, gt_roi_labels)
if self.iter2:
# 2nd Iteration
# get rois2 for more precise prediction
roi_ag_locs = roi_ag_locs.array
mean = self.xp.array(self.loc_normalize_mean)
std = self.xp.array(self.loc_normalize_std)
roi_locs = roi_ag_locs[:, 1, :]
roi_locs = (roi_locs * std + mean).astype(np.float32)
rois2 = loc2bbox(rois, roi_locs)
rois2[:, 0::2] = self.xp.clip(rois2[:, 0::2], 0, img_size[0])
rois2[:, 1::2] = self.xp.clip(rois2[:, 1::2], 0, img_size[1])
# PSROI pooling and regression
roi_ag_seg_scores2, roi_ag_locs2, roi_cls_scores2 = self._pool(
h_cls_seg, h_ag_loc, rois2, roi_indices, gt_roi_labels)
# concat 1st and 2nd iteration results
rois = self.xp.concatenate((rois, rois2))
roi_indices = self.xp.concatenate((roi_indices, roi_indices))
roi_ag_seg_scores = F.concat(
(roi_ag_seg_scores, roi_ag_seg_scores2), axis=0)
roi_ag_locs = F.concat(
(roi_ag_locs, roi_ag_locs2), axis=0)
roi_cls_scores = F.concat(
(roi_cls_scores, roi_cls_scores2), axis=0)
return roi_ag_seg_scores, roi_ag_locs, roi_cls_scores, \
rois, roi_indices
def _pool(
self, h_cls_seg, h_ag_loc, rois, roi_indices, gt_roi_labels):
# PSROI Pooling
# shape: (n_roi, n_class, 2, roi_size, roi_size)
roi_cls_ag_seg_scores = ps_roi_average_pooling_2d(
h_cls_seg, rois, roi_indices,
(self.n_class * 2, self.roi_size, self.roi_size),
self.spatial_scale, self.group_size)
roi_cls_ag_seg_scores = F.reshape(
roi_cls_ag_seg_scores,
(-1, self.n_class, 2, self.roi_size, self.roi_size))
# shape: (n_roi, 2*4, roi_size, roi_size)
roi_ag_loc_scores = ps_roi_average_pooling_2d(
h_ag_loc, rois, roi_indices,
(2 * 4, self.roi_size, self.roi_size),
self.spatial_scale, self.group_size)
# shape: (n_roi, n_class)
roi_cls_scores = F.average(
F.max(roi_cls_ag_seg_scores, axis=2), axis=(2, 3))
# Bbox Regression
# shape: (n_roi, 2, 4)
roi_ag_locs = F.average(roi_ag_loc_scores, axis=(2, 3))
roi_ag_locs = F.reshape(roi_ag_locs, (-1, 2, 4))
# Mask Regression
# shape: (n_roi, n_class, 2, roi_size, roi_size)
if gt_roi_labels is None:
max_cls_indices = roi_cls_scores.array.argmax(axis=1)
else:
max_cls_indices = gt_roi_labels
# shape: (n_roi, 2, roi_size, roi_size)
roi_ag_seg_scores = roi_cls_ag_seg_scores[
self.xp.arange(len(max_cls_indices)), max_cls_indices]
return roi_ag_seg_scores, roi_ag_locs, roi_cls_scores
|
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import CONF_NAME, CONF_TIME_ZONE
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
import homeassistant.util.dt as dt_util
CONF_TIME_FORMAT = "time_format"
DEFAULT_NAME = "Worldclock Sensor"
ICON = "mdi:clock"
DEFAULT_TIME_STR_FORMAT = "%H:%M"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_TIME_ZONE): cv.time_zone,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TIME_FORMAT, default=DEFAULT_TIME_STR_FORMAT): cv.string,
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the World clock sensor."""
name = config.get(CONF_NAME)
time_zone = dt_util.get_time_zone(config.get(CONF_TIME_ZONE))
async_add_entities(
[
WorldClockSensor(
time_zone,
name,
config.get(CONF_TIME_FORMAT),
)
],
True,
)
class WorldClockSensor(Entity):
"""Representation of a World clock sensor."""
def __init__(self, time_zone, name, time_format):
"""Initialize the sensor."""
self._name = name
self._time_zone = time_zone
self._state = None
self._time_format = time_format
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def icon(self):
"""Icon to use in the frontend, if any."""
return ICON
async def async_update(self):
"""Get the time and updates the states."""
self._state = dt_util.now(time_zone=self._time_zone).strftime(self._time_format)
|
import unittest
import mock
from paasta_tools.deployd import metrics
class TestQueueAndWorkerMetrics(unittest.TestCase):
def setUp(self):
mock_metrics_provider = mock.Mock()
self.mock_queue = mock.Mock()
self.mock_workers = []
mock_create_gauge = mock.Mock(side_effect=lambda *args, **kwargs: mock.Mock())
mock_metrics_provider.create_gauge = mock_create_gauge
self.metrics = metrics.QueueAndWorkerMetrics(
self.mock_queue, self.mock_workers, "mock-cluster", mock_metrics_provider
)
def test_all_metrics(self):
with mock.patch("time.time", autospec=True, return_value=10):
self.mock_queue.get_available_service_instances.return_value = [
(0, mock.Mock()),
(1, mock.Mock()),
(2, mock.Mock()),
(3, mock.Mock()),
(4, mock.Mock()),
# 0-60
(11, mock.Mock()),
(12, mock.Mock()),
# 60-300
(71, mock.Mock()),
(72, mock.Mock()),
(73, mock.Mock()),
# 300-3600
(311, mock.Mock()),
# 3600+
(3611, mock.Mock()),
]
self.mock_queue.get_unavailable_service_instances.return_value = [
(15, 75, mock.Mock())
]
self.metrics.run_once()
# Don't bother testing instances_to_bounce_later_gauge and instances_to_bounce_now_gauge -- they just call
# qsize on things we've mocked out.
self.metrics.instances_with_past_deadline_gauge.set.assert_called_once_with(
5
)
self.metrics.instances_with_deadline_in_next_n_seconds_gauges[
"available", 60
].set.assert_called_once_with(2)
self.metrics.instances_with_deadline_in_next_n_seconds_gauges[
"available", 300
].set.assert_called_once_with(5)
self.metrics.instances_with_deadline_in_next_n_seconds_gauges[
"available", 3600
].set.assert_called_once_with(6)
self.metrics.instances_with_deadline_in_next_n_seconds_gauges[
"unavailable", 60
].set.assert_called_once_with(0)
self.metrics.instances_with_deadline_in_next_n_seconds_gauges[
"unavailable", 300
].set.assert_called_once_with(1)
self.metrics.instances_with_deadline_in_next_n_seconds_gauges[
"unavailable", 3600
].set.assert_called_once_with(1)
self.metrics.max_time_past_deadline_gauge.set.assert_called_once_with(10)
self.metrics.sum_time_past_deadline_gauge.set.assert_called_once_with(
10 + 9 + 8 + 7 + 6
)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.