text
stringlengths 213
32.3k
|
---|
import hashlib
import json
import logging
import os
from builtins import zip
from netort.resource import manager as resource
from . import format as fmt
from . import info
from .config import ComponentFactory
class AmmoFactory(object):
'''
A generator that produces ammo.
'''
def __init__(self, factory):
'''
Factory parameter is a configured ComponentFactory that
is able to produce load plan and ammo generator.
'''
self.factory = factory
self.load_plan = factory.get_load_plan()
self.ammo_generator = factory.get_ammo_generator()
self.filter = factory.get_filter()
self.marker = factory.get_marker()
def __iter__(self):
'''
Returns a generator of (timestamp, marker, missile) tuples
where missile is in a string representation. Load Plan (timestamps
generator) and ammo generator are taken from the previously
configured ComponentFactory, passed as a parameter to the
__init__ method of this class.
'''
ammo_stream = (
ammo
for ammo in ((missile, marker or self.marker(missile))
for missile, marker in self.ammo_generator)
if self.filter(ammo))
return ((timestamp, marker or self.marker(missile), missile)
for timestamp, (missile, marker
) in zip(self.load_plan, ammo_stream))
class Stepper(object):
def __init__(self, core, **kwargs):
info.status = info.StepperStatus()
info.status.core = core
self.af = AmmoFactory(ComponentFactory(**kwargs))
self.ammo = fmt.Stpd(self.af)
def write(self, f):
for missile in self.ammo:
f.write(missile)
try:
info.status.inc_ammo_count()
except StopIteration:
break
class LoadProfile(object):
def __init__(self, load_type, schedule):
self.load_type = load_type
self.schedule = self.__make_steps(schedule)
def is_rps(self):
return self.load_type == 'rps'
def is_instances(self):
return self.load_type == 'instances'
@staticmethod
def __make_steps(schedule):
steps = []
for step in " ".join(schedule.split("\n")).split(')'):
if step.strip():
steps.append(step.strip() + ')')
return steps
class StepperWrapper(object):
# TODO: review and rewrite this class
'''
Wrapper for cached stepper functionality
'''
OPTION_LOAD = 'load_profile'
OPTION_LOAD_TYPE = 'load_type'
OPTION_SCHEDULE = 'schedule'
OPTION_STEPS = 'steps'
OPTION_TEST_DURATION = 'test_duration'
OPTION_AMMO_COUNT = 'ammo_count'
OPTION_LOOP = 'loop'
OPTION_LOOP_COUNT = 'loop_count'
OPTION_AMMOFILE = "ammofile"
OPTION_LOADSCHEME = 'loadscheme'
OPTION_INSTANCES_LIMIT = 'instances'
def __init__(self, core, cfg):
self.log = logging.getLogger(__name__)
self.core = core
self.cfg = cfg
self.cache_dir = '.'
# per-shoot params
self.instances = 1000
self.http_ver = '1.0'
self.ammo_file = None
self.loop_limit = -1
self.ammo_limit = -1
self.uris = []
self.headers = []
self.autocases = 0
self.enum_ammo = False
self.force_stepping = None
self.chosen_cases = []
# out params
self.stpd = None
self.steps = []
self.ammo_count = 1
self.duration = 0
self.loop_count = 0
self.loadscheme = ""
self.file_cache = 8192
def get_option(self, option, param2=None):
''' get_option wrapper'''
result = self.cfg[option]
self.log.debug(
"Option %s = %s", option, result)
return result
@staticmethod
def get_available_options():
opts = [
StepperWrapper.OPTION_AMMOFILE, StepperWrapper.OPTION_LOOP,
StepperWrapper.OPTION_SCHEDULE, StepperWrapper.OPTION_INSTANCES_LIMIT
]
opts += [
"instances_schedule", "uris", "headers", "header_http", "autocases",
"enum_ammo", "ammo_type", "ammo_limit"
]
opts += [
"use_caching", "cache_dir", "force_stepping", "file_cache",
"chosen_cases"
]
return opts
def read_config(self):
''' stepper part of reading options '''
self.log.info("Configuring StepperWrapper...")
self.ammo_file = self.get_option(self.OPTION_AMMOFILE)
self.ammo_type = self.get_option('ammo_type')
if self.ammo_file:
self.ammo_file = os.path.expanduser(self.ammo_file)
self.loop_limit = self.get_option(self.OPTION_LOOP)
self.ammo_limit = self.get_option("ammo_limit")
self.load_profile = LoadProfile(**self.get_option('load_profile'))
self.instances = int(
self.get_option(self.OPTION_INSTANCES_LIMIT, '1000'))
self.uris = self.get_option("uris", [])
while '' in self.uris:
self.uris.remove('')
self.headers = self.get_option("headers")
self.http_ver = self.get_option("header_http")
self.autocases = self.get_option("autocases")
self.enum_ammo = self.get_option("enum_ammo")
self.use_caching = self.get_option("use_caching")
self.file_cache = self.get_option('file_cache')
cache_dir = self.get_option("cache_dir") or self.core.artifacts_base_dir
self.cache_dir = os.path.expanduser(cache_dir)
self.force_stepping = self.get_option("force_stepping")
if self.get_option(self.OPTION_LOAD)[self.OPTION_LOAD_TYPE] == 'stpd_file':
self.stpd = self.get_option(self.OPTION_LOAD)[self.OPTION_SCHEDULE]
self.chosen_cases = self.get_option("chosen_cases").split()
if self.chosen_cases:
self.log.info("chosen_cases LIMITS: %s", self.chosen_cases)
def prepare_stepper(self):
''' Generate test data if necessary '''
def publish_info(stepper_info):
info.status.publish('loadscheme', stepper_info.loadscheme)
info.status.publish('loop_count', stepper_info.loop_count)
info.status.publish('steps', stepper_info.steps)
info.status.publish('duration', stepper_info.duration)
info.status.ammo_count = stepper_info.ammo_count
info.status.publish('instances', stepper_info.instances)
self.core.publish('stepper', 'loadscheme', stepper_info.loadscheme)
self.core.publish('stepper', 'loop_count', stepper_info.loop_count)
self.core.publish('stepper', 'steps', stepper_info.steps)
self.core.publish('stepper', 'duration', stepper_info.duration)
self.core.publish('stepper', 'ammo_count', stepper_info.ammo_count)
self.core.publish('stepper', 'instances', stepper_info.instances)
return stepper_info
if not self.stpd:
self.stpd = self.__get_stpd_filename()
if self.use_caching and not self.force_stepping and os.path.exists(
self.stpd) and os.path.exists(self.__si_filename()):
self.log.info("Using cached stpd-file: %s", self.stpd)
stepper_info = self.__read_cached_options()
if self.instances and self.load_profile.is_rps():
self.log.info(
"rps_schedule is set. Overriding cached instances param from config: %s",
self.instances)
stepper_info = stepper_info._replace(
instances=self.instances)
publish_info(stepper_info)
else:
if (
self.force_stepping and os.path.exists(self.__si_filename())):
os.remove(self.__si_filename())
self.__make_stpd_file()
stepper_info = info.status.get_info()
self.__write_cached_options(stepper_info)
else:
self.log.info("Using specified stpd-file: %s", self.stpd)
stepper_info = publish_info(self.__read_cached_options())
self.ammo_count = stepper_info.ammo_count
self.duration = stepper_info.duration
self.loop_count = stepper_info.loop_count
self.loadscheme = stepper_info.loadscheme
self.steps = stepper_info.steps
if stepper_info.instances:
self.instances = stepper_info.instances
def __si_filename(self):
'''Return name for stepper_info json file'''
return "%s_si.json" % self.stpd
def __get_stpd_filename(self):
''' Choose the name for stepped data file '''
if self.use_caching:
sep = "|"
hasher = hashlib.md5()
hashed_str = "cache version 6" + sep + \
';'.join(self.load_profile.schedule) + sep + str(self.loop_limit)
hashed_str += sep + str(self.ammo_limit) + sep + ';'.join(
self.load_profile.schedule) + sep + str(self.autocases)
hashed_str += sep + ";".join(self.uris) + sep + ";".join(
self.headers) + sep + self.http_ver + sep + ";".join(
self.chosen_cases)
hashed_str += sep + str(self.enum_ammo) + sep + str(self.ammo_type)
if self.load_profile.is_instances():
hashed_str += sep + str(self.instances)
if self.ammo_file:
opener = resource.get_opener(self.ammo_file)
hashed_str += sep + opener.hash
else:
if not self.uris:
raise RuntimeError("Neither ammofile nor uris specified")
hashed_str += sep + \
';'.join(self.uris) + sep + ';'.join(self.headers)
self.log.debug("stpd-hash source: %s", hashed_str)
hasher.update(hashed_str.encode('utf8'))
if not os.path.exists(self.cache_dir):
os.makedirs(self.cache_dir)
stpd = self.cache_dir + '/' + \
os.path.basename(self.ammo_file) + \
"_" + hasher.hexdigest() + ".stpd"
else:
stpd = os.path.realpath("ammo.stpd")
self.log.debug("Generated cache file name: %s", stpd)
return stpd
def __read_cached_options(self):
'''
Read stepper info from json
'''
self.log.debug("Reading cached stepper info: %s", self.__si_filename())
with open(self.__si_filename(), 'r') as si_file:
si = info.StepperInfo(**json.load(si_file))
return si
def __write_cached_options(self, si):
'''
Write stepper info to json
'''
self.log.debug("Saving stepper info: %s", self.__si_filename())
with open(self.__si_filename(), 'w') as si_file:
json.dump(si._asdict(), si_file, indent=4)
def __make_stpd_file(self):
''' stpd generation using Stepper class '''
self.log.info("Making stpd-file: %s", self.stpd)
stepper = Stepper(
self.core,
rps_schedule=self.load_profile.schedule if self.load_profile.is_rps() else None,
http_ver=self.http_ver,
ammo_file=self.ammo_file,
instances_schedule=self.load_profile.schedule if self.load_profile.is_instances() else None,
instances=self.instances,
loop_limit=self.loop_limit,
ammo_limit=self.ammo_limit,
uris=self.uris,
headers=[header.strip('[]') for header in self.headers],
autocases=self.autocases,
enum_ammo=self.enum_ammo,
ammo_type=self.ammo_type,
chosen_cases=self.chosen_cases,
use_cache=self.use_caching)
with open(self.stpd, 'wb', self.file_cache) as os:
stepper.write(os)
|
from mock import call
from mock import Mock
from mock import patch
from paasta_tools.cli.cli import parse_args
from paasta_tools.cli.cmds.rollback import get_git_shas_for_service
from paasta_tools.cli.cmds.rollback import list_previously_deployed_shas
from paasta_tools.cli.cmds.rollback import paasta_rollback
from paasta_tools.cli.cmds.rollback import validate_given_deploy_groups
from paasta_tools.utils import RollbackTypes
@patch("paasta_tools.cli.cmds.rollback.get_currently_deployed_sha", autospec=True)
@patch("paasta_tools.cli.cmds.rollback._log_audit", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.figure_out_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_url", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.mark_for_deployment", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_shas_for_service", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.deploy_authz_check", autospec=True)
def test_paasta_rollback_mark_for_deployment_simple_invocation(
mock_deploy_authz_check,
mock_get_git_shas_for_service,
mock_mark_for_deployment,
mock_get_git_url,
mock_figure_out_service_name,
mock_list_deploy_groups,
mock_log_audit,
mock_get_currently_deployed_sha,
):
fake_args, _ = parse_args(
["rollback", "-s", "fakeservice", "-k", "abcd" * 10, "-l", "fake_deploy_group1"]
)
mock_get_git_shas_for_service.return_value = {
fake_args.commit: ("20170403T025512", fake_args.deploy_groups),
"dcba" * 10: ("20161006T025416", "fake_deploy_group2"),
}
mock_get_git_url.return_value = "git://git.repo"
mock_figure_out_service_name.return_value = fake_args.service
mock_list_deploy_groups.return_value = [fake_args.deploy_groups]
mock_mark_for_deployment.return_value = 0
mock_get_currently_deployed_sha.return_value = "1234" * 10
assert paasta_rollback(fake_args) == 0
mock_mark_for_deployment.assert_called_once_with(
git_url=mock_get_git_url.return_value,
deploy_group=fake_args.deploy_groups,
service=mock_figure_out_service_name.return_value,
commit=fake_args.commit,
)
# ensure that we logged each deploy group that was rolled back AND that we logged things correctly
mock_log_audit.call_count == len(fake_args.deploy_groups)
for call_args in mock_log_audit.call_args_list:
_, call_kwargs = call_args
assert call_kwargs["action"] == "rollback"
assert (
call_kwargs["action_details"]["rolled_back_from"]
== mock_get_currently_deployed_sha.return_value
)
assert call_kwargs["action_details"]["rolled_back_to"] == fake_args.commit
assert (
call_kwargs["action_details"]["rollback_type"]
== RollbackTypes.USER_INITIATED_ROLLBACK.value
)
assert call_kwargs["action_details"]["deploy_group"] in fake_args.deploy_groups
assert call_kwargs["service"] == fake_args.service
@patch("paasta_tools.cli.cmds.rollback.get_currently_deployed_sha", autospec=True)
@patch("paasta_tools.cli.cmds.rollback._log_audit", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.figure_out_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_url", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.mark_for_deployment", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_shas_for_service", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.deploy_authz_check", autospec=True)
def test_paasta_rollback_with_force(
mock_deploy_authz_check,
mock_get_git_shas_for_service,
mock_mark_for_deployment,
mock_get_git_url,
mock_figure_out_service_name,
mock_list_deploy_groups,
mock_log_audit,
mock_get_currently_deployed_sha,
):
fake_args, _ = parse_args(
[
"rollback",
"-s",
"fakeservice",
"-k",
"abcd" * 10,
"-l",
"fake_deploy_group1",
"-f",
]
)
mock_get_git_shas_for_service.return_value = {
"fake_sha1": ("20170403T025512", "fake_deploy_group1"),
"fake_sha2": ("20161006T025416", "fake_deploy_group2"),
}
mock_get_git_url.return_value = "git://git.repo"
mock_figure_out_service_name.return_value = fake_args.service
mock_list_deploy_groups.return_value = [fake_args.deploy_groups]
mock_mark_for_deployment.return_value = 0
mock_get_currently_deployed_sha.return_value = "1234" * 10
assert paasta_rollback(fake_args) == 0
mock_mark_for_deployment.assert_called_once_with(
git_url=mock_get_git_url.return_value,
deploy_group=fake_args.deploy_groups,
service=mock_figure_out_service_name.return_value,
commit=fake_args.commit,
)
# ensure that we logged each deploy group that was rolled back AND that we logged things correctly
mock_log_audit.call_count == len(fake_args.deploy_groups)
for call_args in mock_log_audit.call_args_list:
_, call_kwargs = call_args
assert call_kwargs["action"] == "rollback"
assert (
call_kwargs["action_details"]["rolled_back_from"]
== mock_get_currently_deployed_sha.return_value
)
assert call_kwargs["action_details"]["rolled_back_to"] == fake_args.commit
assert (
call_kwargs["action_details"]["rollback_type"]
== RollbackTypes.USER_INITIATED_ROLLBACK.value
)
assert call_kwargs["action_details"]["deploy_group"] in fake_args.deploy_groups
assert call_kwargs["service"] == fake_args.service
@patch("paasta_tools.cli.cmds.rollback.get_currently_deployed_sha", autospec=True)
@patch("paasta_tools.cli.cmds.rollback._log_audit", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.figure_out_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_url", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.mark_for_deployment", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_shas_for_service", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.deploy_authz_check", autospec=True)
def test_paasta_rollback_mark_for_deployment_no_deploy_group_arg(
mock_deploy_authz_check,
mock_get_git_shas_for_service,
mock_mark_for_deployment,
mock_get_git_url,
mock_figure_out_service_name,
mock_list_deploy_groups,
mock_log_audit,
mock_get_currently_deployed_sha,
):
fake_args, _ = parse_args(["rollback", "-s", "fakeservice", "-k", "abcd" * 10])
mock_get_git_shas_for_service.return_value = {
"fake_sha1": ("20170403T025512", "fake_deploy_group1"),
fake_args.commit: ("20161006T025416", "fake_deploy_group2"),
}
mock_get_git_url.return_value = "git://git.repo"
mock_figure_out_service_name.return_value = fake_args.service
mock_list_deploy_groups.return_value = [
"fake_deploy_group",
"fake_cluster.fake_instance",
]
mock_mark_for_deployment.return_value = 0
mock_get_currently_deployed_sha.return_value = "1234" * 10
assert paasta_rollback(fake_args) == 0
expected = [
call(
git_url=mock_get_git_url.return_value,
service=mock_figure_out_service_name.return_value,
commit=fake_args.commit,
deploy_group="fake_cluster.fake_instance",
),
call(
git_url=mock_get_git_url.return_value,
service=mock_figure_out_service_name.return_value,
commit=fake_args.commit,
deploy_group="fake_deploy_group",
),
]
assert all([x in expected for x in mock_mark_for_deployment.mock_calls])
assert mock_mark_for_deployment.call_count == len(expected)
mock_log_audit.call_count == len(fake_args.deploy_groups)
for call_args in mock_log_audit.call_args_list:
_, call_kwargs = call_args
assert call_kwargs["action"] == "rollback"
assert (
call_kwargs["action_details"]["rolled_back_from"]
== mock_get_currently_deployed_sha.return_value
)
assert call_kwargs["action_details"]["rolled_back_to"] == fake_args.commit
assert (
call_kwargs["action_details"]["rollback_type"]
== RollbackTypes.USER_INITIATED_ROLLBACK.value
)
assert (
call_kwargs["action_details"]["deploy_group"]
in mock_list_deploy_groups.return_value
)
assert call_kwargs["service"] == fake_args.service
@patch("paasta_tools.cli.cmds.rollback._log_audit", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.figure_out_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_url", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.mark_for_deployment", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.deploy_authz_check", autospec=True)
def test_paasta_rollback_mark_for_deployment_wrong_deploy_group_args(
mock_deploy_authz_check,
mock_mark_for_deployment,
mock_get_git_url,
mock_figure_out_service_name,
mock_list_deploy_groups,
mock_log_audit,
):
fake_args, _ = parse_args(
["rollback", "-s", "fakeservice", "-k", "abcd" * 10, "-l", "wrong_deploy_group"]
)
mock_get_git_url.return_value = "git://git.repo"
mock_figure_out_service_name.return_value = fake_args.service
mock_list_deploy_groups.return_value = ["some_other_instance.some_other_cluster"]
assert paasta_rollback(fake_args) == 1
assert not mock_mark_for_deployment.called
assert not mock_log_audit.called
@patch("paasta_tools.cli.cmds.rollback._log_audit", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.figure_out_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_url", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.mark_for_deployment", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_shas_for_service", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.deploy_authz_check", autospec=True)
def test_paasta_rollback_git_sha_was_not_marked_before(
mock_deploy_authz_check,
mock_get_git_shas_for_service,
mock_mark_for_deployment,
mock_get_git_url,
mock_figure_out_service_name,
mock_list_deploy_groups,
mock_log_audit,
):
fake_args, _ = parse_args(
["rollback", "-s", "fakeservice", "-k", "abcd" * 10, "-l", "fake_deploy_group1"]
)
mock_get_git_shas_for_service.return_value = {
"fake_sha1": ("20170403T025512", "fake_deploy_group1"),
"fake_sha2": ("20161006T025416", "fake_deploy_group2"),
}
mock_get_git_url.return_value = "git://git.repo"
mock_figure_out_service_name.return_value = fake_args.service
mock_list_deploy_groups.return_value = [fake_args.deploy_groups]
mock_mark_for_deployment.return_value = 0
assert paasta_rollback(fake_args) == 1
assert not mock_mark_for_deployment.called
assert not mock_log_audit.called
@patch("paasta_tools.cli.cmds.rollback.get_currently_deployed_sha", autospec=True)
@patch("paasta_tools.cli.cmds.rollback._log_audit", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.list_deploy_groups", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.figure_out_service_name", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_url", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.mark_for_deployment", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.get_git_shas_for_service", autospec=True)
@patch("paasta_tools.cli.cmds.rollback.deploy_authz_check", autospec=True)
def test_paasta_rollback_mark_for_deployment_multiple_deploy_group_args(
mock_deploy_authz_check,
mock_get_git_shas_for_service,
mock_mark_for_deployment,
mock_get_git_url,
mock_figure_out_service_name,
mock_list_deploy_groups,
mock_log_audit,
mock_get_currently_deployed_sha,
):
fake_args, _ = parse_args(
[
"rollback",
"-s",
"fakeservice",
"-k",
"abcd" * 10,
"-l",
"cluster.instance1,cluster.instance2",
]
)
fake_deploy_groups = fake_args.deploy_groups.split(",")
mock_get_git_shas_for_service.return_value = {
"fake_sha1": ("20170403T025512", "fake_deploy_group1"),
fake_args.commit: ("20161006T025416", "fake_deploy_group2"),
}
mock_get_git_url.return_value = "git://git.repo"
mock_figure_out_service_name.return_value = fake_args.service
mock_list_deploy_groups.return_value = fake_deploy_groups
mock_mark_for_deployment.return_value = 0
mock_get_currently_deployed_sha.return_value = "1234" * 10
assert paasta_rollback(fake_args) == 0
expected = [
call(
git_url=mock_get_git_url.return_value,
service=mock_figure_out_service_name.return_value,
commit=fake_args.commit,
deploy_group=deploy_group,
)
for deploy_group in fake_deploy_groups
]
mock_mark_for_deployment.assert_has_calls(expected, any_order=True)
assert mock_mark_for_deployment.call_count == len(fake_deploy_groups)
mock_log_audit.call_count == len(fake_args.deploy_groups)
for call_args in mock_log_audit.call_args_list:
_, call_kwargs = call_args
assert call_kwargs["action"] == "rollback"
assert (
call_kwargs["action_details"]["rolled_back_from"]
== mock_get_currently_deployed_sha.return_value
)
assert call_kwargs["action_details"]["rolled_back_to"] == fake_args.commit
assert (
call_kwargs["action_details"]["rollback_type"]
== RollbackTypes.USER_INITIATED_ROLLBACK.value
)
assert (
call_kwargs["action_details"]["deploy_group"]
in mock_list_deploy_groups.return_value
)
assert call_kwargs["service"] == fake_args.service
def test_validate_given_deploy_groups_no_arg():
service_deploy_groups = ["deploy_group1", "deploy_group2"]
given_deploy_groups = []
expected_valid = {"deploy_group1", "deploy_group2"}
expected_invalid = set()
actual_valid, actual_invalid = validate_given_deploy_groups(
service_deploy_groups, given_deploy_groups
)
assert actual_valid == expected_valid
assert actual_invalid == expected_invalid
def test_validate_given_deploy_groups_wrong_arg():
service_deploy_groups = ["deploy_group1", "deploy_group2"]
given_deploy_groups = ["deploy_group0", "not_an_deploy_group"]
expected_valid = set()
expected_invalid = {"deploy_group0", "not_an_deploy_group"}
actual_valid, actual_invalid = validate_given_deploy_groups(
service_deploy_groups, given_deploy_groups
)
assert actual_valid == expected_valid
assert actual_invalid == expected_invalid
def test_validate_given_deploy_groups_single_arg():
service_deploy_groups = ["deploy_group1", "deploy_group2"]
given_deploy_groups = ["deploy_group1"]
expected_valid = {"deploy_group1"}
expected_invalid = set()
actual_valid, actual_invalid = validate_given_deploy_groups(
service_deploy_groups, given_deploy_groups
)
assert actual_valid == expected_valid
assert actual_invalid == expected_invalid
def test_validate_given_deploy_groups_multiple_args():
service_deploy_groups = ["deploy_group1", "deploy_group2", "deploy_group3"]
given_deploy_groups = ["deploy_group1", "deploy_group2"]
expected_valid = {"deploy_group1", "deploy_group2"}
expected_invalid = set()
actual_valid, actual_invalid = validate_given_deploy_groups(
service_deploy_groups, given_deploy_groups
)
assert actual_valid == expected_valid
assert actual_invalid == expected_invalid
def test_validate_given_deploy_groups_duplicate_args():
service_deploy_groups = ["deploy_group1", "deploy_group2", "deploy_group3"]
given_deploy_groups = ["deploy_group1", "deploy_group1"]
expected_valid = {"deploy_group1"}
expected_invalid = set()
actual_valid, actual_invalid = validate_given_deploy_groups(
service_deploy_groups, given_deploy_groups
)
assert actual_valid == expected_valid
assert actual_invalid == expected_invalid
def test_list_previously_deployed_shas():
fake_refs = {
"refs/tags/paasta-test.deploy.group-00000000T000000-deploy": "SHA_IN_OUTPUT",
"refs/tags/paasta-other.deploy.group-00000000T000000-deploy": "NOT_IN_OUTPUT",
}
fake_deploy_groups = ["test.deploy.group"]
with patch(
"paasta_tools.cli.cmds.rollback.list_remote_refs",
autospec=True,
return_value=fake_refs,
), patch(
"paasta_tools.cli.cmds.rollback.list_deploy_groups",
autospec=True,
return_value=fake_deploy_groups,
):
fake_args = Mock(
service="fake_service",
deploy_groups="test.deploy.group,nonexistant.deploy.group",
soa_dir="/fake/soa/dir",
force=None,
)
assert set(list_previously_deployed_shas(fake_args)) == {"SHA_IN_OUTPUT"}
def test_list_previously_deployed_shas_no_deploy_groups():
fake_refs = {
"refs/tags/paasta-test.deploy.group-00000000T000000-deploy": "SHA_IN_OUTPUT",
"refs/tags/paasta-other.deploy.group-00000000T000000-deploy": "SHA_IN_OUTPUT_2",
"refs/tags/paasta-nonexistant.deploy.group-00000000T000000-deploy": "SHA_NOT_IN_OUTPUT",
}
fake_deploy_groups = ["test.deploy.group", "other.deploy.group"]
with patch(
"paasta_tools.cli.cmds.rollback.list_remote_refs",
autospec=True,
return_value=fake_refs,
), patch(
"paasta_tools.cli.cmds.rollback.list_deploy_groups",
autospec=True,
return_value=fake_deploy_groups,
):
fake_args = Mock(
service="fake_service",
deploy_groups="",
soa_dir="/fake/soa/dir",
force=None,
)
assert set(list_previously_deployed_shas(fake_args)) == {
"SHA_IN_OUTPUT",
"SHA_IN_OUTPUT_2",
}
def test_get_git_shas_for_service_no_service_name():
assert get_git_shas_for_service(None, None, "/fake/soa/dir") == []
|
import glob
import os
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.datasets.ade20k.ade20k_utils import get_ade20k
from chainercv.utils import read_image
root = 'pfnet/chainercv/ade20k'
url = 'http://data.csail.mit.edu/places/ADEchallenge/release_test.zip'
class ADE20KTestImageDataset(GetterDataset):
"""Image dataset for test split of `ADE20K`_.
This is an image dataset of test split in ADE20K dataset distributed at
MIT Scene Parsing Benchmark website. It has 3,352 test images.
.. _`MIT Scene Parsing Benchmark`: http://sceneparsing.csail.mit.edu/
Args:
data_dir (string): Path to the dataset directory. The directory should
contain the :obj:`release_test` dir. If :obj:`auto` is given, the
dataset is automatically downloaded into
:obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/ade20k`.
This dataset returns the following data.
.. csv-table::
:header: name, shape, dtype, format
:obj:`img`, ":math:`(3, H, W)`", :obj:`float32`, \
"RGB, :math:`[0, 255]`"
"""
def __init__(self, data_dir='auto'):
super(ADE20KTestImageDataset, self).__init__()
if data_dir is 'auto':
data_dir = get_ade20k(root, url)
img_dir = os.path.join(data_dir, 'release_test', 'testing')
self.img_paths = sorted(glob.glob(os.path.join(img_dir, '*.jpg')))
self.add_getter('img', self._get_image)
self.keys = 'img' # do not return tuple
def __len__(self):
return len(self.img_paths)
def _get_image(self, i):
return read_image(self.img_paths[i])
|
import numpy as np
import os
import warnings
from chainercv.chainer_experimental.datasets.sliceable import GetterDataset
from chainercv.datasets.sbd import sbd_utils
from chainercv.datasets.voc import voc_utils
from chainercv.utils import read_image
try:
import scipy
_available = True
except ImportError:
_available = False
def _check_available():
if not _available:
warnings.warn(
'SciPy is not installed in your environment,'
'so the dataset cannot be loaded.'
'Please install SciPy to load dataset.\n\n'
'$ pip install scipy')
class SBDInstanceSegmentationDataset(GetterDataset):
"""Instance segmentation dataset for Semantic Boundaries Dataset `SBD`_.
.. _`SBD`: http://home.bharathh.info/pubs/codes/SBD/download.html
Args:
data_dir (string): Path to the root of the training data. If this is
:obj:`auto`, this class will automatically download data for you
under :obj:`$CHAINER_DATASET_ROOT/pfnet/chainercv/sbd`.
split ({'train', 'val', 'trainval'}): Select a split of the dataset.
This dataset returns the following data.
.. csv-table::
:header: name, shape, dtype, format
:obj:`img`, ":math:`(3, H, W)`", :obj:`float32`, \
"RGB, :math:`[0, 255]`"
:obj:`mask`, ":math:`(R, H, W)`", :obj:`bool`, --
:obj:`label`, ":math:`(R,)`", :obj:`int32`, \
":math:`[0, \#fg\_class - 1]`"
"""
def __init__(self, data_dir='auto', split='train'):
super(SBDInstanceSegmentationDataset, self).__init__()
_check_available()
if split not in ['train', 'trainval', 'val']:
raise ValueError(
'please pick split from \'train\', \'trainval\', \'val\'')
if data_dir == 'auto':
data_dir = sbd_utils.get_sbd()
id_list_file = os.path.join(
data_dir, '{}_voc2012.txt'.format(split))
self.ids = [id_.strip() for id_ in open(id_list_file)]
self.data_dir = data_dir
self.add_getter('img', self._get_image)
self.add_getter(('mask', 'label'), self._get_annotations)
def __len__(self):
return len(self.ids)
def _get_image(self, i):
data_id = self.ids[i]
img_file = os.path.join(
self.data_dir, 'img', data_id + '.jpg')
return read_image(img_file, color=True)
def _get_annotations(self, i):
data_id = self.ids[i]
label_img, inst_img = self._load_label_inst(data_id)
mask, label = voc_utils.image_wise_to_instance_wise(
label_img, inst_img)
return mask, label
def _load_label_inst(self, data_id):
label_file = os.path.join(
self.data_dir, 'cls', data_id + '.mat')
inst_file = os.path.join(
self.data_dir, 'inst', data_id + '.mat')
label_anno = scipy.io.loadmat(label_file)
label_img = label_anno['GTcls']['Segmentation'][0][0].astype(np.int32)
inst_anno = scipy.io.loadmat(inst_file)
inst_img = inst_anno['GTinst']['Segmentation'][0][0].astype(np.int32)
inst_img[inst_img == 0] = -1
inst_img[inst_img == 255] = -1
return label_img, inst_img
|
from collections import Counter, deque
from copy import copy
from datetime import timedelta
from functools import partial
import logging
from numbers import Number
import statistics
from typing import Optional
import voluptuous as vol
from homeassistant.components import history
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ENTITY_ID,
ATTR_ICON,
ATTR_UNIT_OF_MEASUREMENT,
CONF_ENTITY_ID,
CONF_NAME,
STATE_UNAVAILABLE,
STATE_UNKNOWN,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_state_change_event
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.util.decorator import Registry
import homeassistant.util.dt as dt_util
from . import DOMAIN, PLATFORMS
_LOGGER = logging.getLogger(__name__)
FILTER_NAME_RANGE = "range"
FILTER_NAME_LOWPASS = "lowpass"
FILTER_NAME_OUTLIER = "outlier"
FILTER_NAME_THROTTLE = "throttle"
FILTER_NAME_TIME_THROTTLE = "time_throttle"
FILTER_NAME_TIME_SMA = "time_simple_moving_average"
FILTERS = Registry()
CONF_FILTERS = "filters"
CONF_FILTER_NAME = "filter"
CONF_FILTER_WINDOW_SIZE = "window_size"
CONF_FILTER_PRECISION = "precision"
CONF_FILTER_RADIUS = "radius"
CONF_FILTER_TIME_CONSTANT = "time_constant"
CONF_FILTER_LOWER_BOUND = "lower_bound"
CONF_FILTER_UPPER_BOUND = "upper_bound"
CONF_TIME_SMA_TYPE = "type"
TIME_SMA_LAST = "last"
WINDOW_SIZE_UNIT_NUMBER_EVENTS = 1
WINDOW_SIZE_UNIT_TIME = 2
DEFAULT_WINDOW_SIZE = 1
DEFAULT_PRECISION = 2
DEFAULT_FILTER_RADIUS = 2.0
DEFAULT_FILTER_TIME_CONSTANT = 10
NAME_TEMPLATE = "{} filter"
ICON = "mdi:chart-line-variant"
FILTER_SCHEMA = vol.Schema(
{vol.Optional(CONF_FILTER_PRECISION, default=DEFAULT_PRECISION): vol.Coerce(int)}
)
FILTER_OUTLIER_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_OUTLIER,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(CONF_FILTER_RADIUS, default=DEFAULT_FILTER_RADIUS): vol.Coerce(
float
),
}
)
FILTER_LOWPASS_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_LOWPASS,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
vol.Optional(
CONF_FILTER_TIME_CONSTANT, default=DEFAULT_FILTER_TIME_CONSTANT
): vol.Coerce(int),
}
)
FILTER_RANGE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_RANGE,
vol.Optional(CONF_FILTER_LOWER_BOUND): vol.Coerce(float),
vol.Optional(CONF_FILTER_UPPER_BOUND): vol.Coerce(float),
}
)
FILTER_TIME_SMA_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_SMA,
vol.Optional(CONF_TIME_SMA_TYPE, default=TIME_SMA_LAST): vol.In(
[TIME_SMA_LAST]
),
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
FILTER_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_THROTTLE,
vol.Optional(CONF_FILTER_WINDOW_SIZE, default=DEFAULT_WINDOW_SIZE): vol.Coerce(
int
),
}
)
FILTER_TIME_THROTTLE_SCHEMA = FILTER_SCHEMA.extend(
{
vol.Required(CONF_FILTER_NAME): FILTER_NAME_TIME_THROTTLE,
vol.Required(CONF_FILTER_WINDOW_SIZE): vol.All(
cv.time_period, cv.positive_timedelta
),
}
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ENTITY_ID): cv.entity_id,
vol.Optional(CONF_NAME): cv.string,
vol.Required(CONF_FILTERS): vol.All(
cv.ensure_list,
[
vol.Any(
FILTER_OUTLIER_SCHEMA,
FILTER_LOWPASS_SCHEMA,
FILTER_TIME_SMA_SCHEMA,
FILTER_THROTTLE_SCHEMA,
FILTER_TIME_THROTTLE_SCHEMA,
FILTER_RANGE_SCHEMA,
)
],
),
}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the template sensors."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
name = config.get(CONF_NAME)
entity_id = config.get(CONF_ENTITY_ID)
filters = [
FILTERS[_filter.pop(CONF_FILTER_NAME)](entity=entity_id, **_filter)
for _filter in config[CONF_FILTERS]
]
async_add_entities([SensorFilter(name, entity_id, filters)])
class SensorFilter(Entity):
"""Representation of a Filter Sensor."""
def __init__(self, name, entity_id, filters):
"""Initialize the sensor."""
self._name = name
self._entity = entity_id
self._unit_of_measurement = None
self._state = None
self._filters = filters
self._icon = None
@callback
def _update_filter_sensor_state_event(self, event):
"""Handle device state changes."""
self._update_filter_sensor_state(event.data.get("new_state"))
@callback
def _update_filter_sensor_state(self, new_state, update_ha=True):
"""Process device state changes."""
if new_state is None or new_state.state in [STATE_UNKNOWN, STATE_UNAVAILABLE]:
return
temp_state = new_state
try:
for filt in self._filters:
filtered_state = filt.filter_state(copy(temp_state))
_LOGGER.debug(
"%s(%s=%s) -> %s",
filt.name,
self._entity,
temp_state.state,
"skip" if filt.skip_processing else filtered_state.state,
)
if filt.skip_processing:
return
temp_state = filtered_state
except ValueError:
_LOGGER.error("Could not convert state: %s to number", self._state)
return
self._state = temp_state.state
if self._icon is None:
self._icon = new_state.attributes.get(ATTR_ICON, ICON)
if self._unit_of_measurement is None:
self._unit_of_measurement = new_state.attributes.get(
ATTR_UNIT_OF_MEASUREMENT
)
if update_ha:
self.async_write_ha_state()
async def async_added_to_hass(self):
"""Register callbacks."""
if "recorder" in self.hass.config.components:
history_list = []
largest_window_items = 0
largest_window_time = timedelta(0)
# Determine the largest window_size by type
for filt in self._filters:
if (
filt.window_unit == WINDOW_SIZE_UNIT_NUMBER_EVENTS
and largest_window_items < filt.window_size
):
largest_window_items = filt.window_size
elif (
filt.window_unit == WINDOW_SIZE_UNIT_TIME
and largest_window_time < filt.window_size
):
largest_window_time = filt.window_size
# Retrieve the largest window_size of each type
if largest_window_items > 0:
filter_history = await self.hass.async_add_executor_job(
partial(
history.get_last_state_changes,
self.hass,
largest_window_items,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(filter_history[self._entity])
if largest_window_time > timedelta(seconds=0):
start = dt_util.utcnow() - largest_window_time
filter_history = await self.hass.async_add_executor_job(
partial(
history.state_changes_during_period,
self.hass,
start,
entity_id=self._entity,
)
)
if self._entity in filter_history:
history_list.extend(
[
state
for state in filter_history[self._entity]
if state not in history_list
]
)
# Sort the window states
history_list = sorted(history_list, key=lambda s: s.last_updated)
_LOGGER.debug(
"Loading from history: %s",
[(s.state, s.last_updated) for s in history_list],
)
# Replay history through the filter chain
for state in history_list:
self._update_filter_sensor_state(state, False)
self.async_on_remove(
async_track_state_change_event(
self.hass, [self._entity], self._update_filter_sensor_state_event
)
)
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self._icon
@property
def unit_of_measurement(self):
"""Return the unit_of_measurement of the device."""
return self._unit_of_measurement
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ENTITY_ID: self._entity}
class FilterState:
"""State abstraction for filter usage."""
def __init__(self, state):
"""Initialize with HA State object."""
self.timestamp = state.last_updated
try:
self.state = float(state.state)
except ValueError:
self.state = state.state
def set_precision(self, precision):
"""Set precision of Number based states."""
if isinstance(self.state, Number):
value = round(float(self.state), precision)
self.state = int(value) if precision == 0 else value
def __str__(self):
"""Return state as the string representation of FilterState."""
return str(self.state)
def __repr__(self):
"""Return timestamp and state as the representation of FilterState."""
return f"{self.timestamp} : {self.state}"
class Filter:
"""Filter skeleton."""
def __init__(
self,
name,
window_size: int = 1,
precision: Optional[int] = None,
entity: Optional[str] = None,
):
"""Initialize common attributes.
:param window_size: size of the sliding window that holds previous values
:param precision: round filtered value to precision value
:param entity: used for debugging only
"""
if isinstance(window_size, int):
self.states = deque(maxlen=window_size)
self.window_unit = WINDOW_SIZE_UNIT_NUMBER_EVENTS
else:
self.states = deque(maxlen=0)
self.window_unit = WINDOW_SIZE_UNIT_TIME
self.precision = precision
self._name = name
self._entity = entity
self._skip_processing = False
self._window_size = window_size
self._store_raw = False
self._only_numbers = True
@property
def window_size(self):
"""Return window size."""
return self._window_size
@property
def name(self):
"""Return filter name."""
return self._name
@property
def skip_processing(self):
"""Return whether the current filter_state should be skipped."""
return self._skip_processing
def _filter_state(self, new_state):
"""Implement filter."""
raise NotImplementedError()
def filter_state(self, new_state):
"""Implement a common interface for filters."""
fstate = FilterState(new_state)
if self._only_numbers and not isinstance(fstate.state, Number):
raise ValueError
filtered = self._filter_state(fstate)
filtered.set_precision(self.precision)
if self._store_raw:
self.states.append(copy(FilterState(new_state)))
else:
self.states.append(copy(filtered))
new_state.state = filtered.state
return new_state
@FILTERS.register(FILTER_NAME_RANGE)
class RangeFilter(Filter):
"""Range filter.
Determines if new state is in the range of upper_bound and lower_bound.
If not inside, lower or upper bound is returned instead.
"""
def __init__(
self,
entity,
precision: Optional[int] = DEFAULT_PRECISION,
lower_bound: Optional[float] = None,
upper_bound: Optional[float] = None,
):
"""Initialize Filter.
:param upper_bound: band upper bound
:param lower_bound: band lower bound
"""
super().__init__(FILTER_NAME_RANGE, precision=precision, entity=entity)
self._lower_bound = lower_bound
self._upper_bound = upper_bound
self._stats_internal = Counter()
def _filter_state(self, new_state):
"""Implement the range filter."""
if self._upper_bound is not None and new_state.state > self._upper_bound:
self._stats_internal["erasures_up"] += 1
_LOGGER.debug(
"Upper outlier nr. %s in %s: %s",
self._stats_internal["erasures_up"],
self._entity,
new_state,
)
new_state.state = self._upper_bound
elif self._lower_bound is not None and new_state.state < self._lower_bound:
self._stats_internal["erasures_low"] += 1
_LOGGER.debug(
"Lower outlier nr. %s in %s: %s",
self._stats_internal["erasures_low"],
self._entity,
new_state,
)
new_state.state = self._lower_bound
return new_state
@FILTERS.register(FILTER_NAME_OUTLIER)
class OutlierFilter(Filter):
"""BASIC outlier filter.
Determines if new state is in a band around the median.
"""
def __init__(self, window_size, precision, entity, radius: float):
"""Initialize Filter.
:param radius: band radius
"""
super().__init__(FILTER_NAME_OUTLIER, window_size, precision, entity)
self._radius = radius
self._stats_internal = Counter()
self._store_raw = True
def _filter_state(self, new_state):
"""Implement the outlier filter."""
median = statistics.median([s.state for s in self.states]) if self.states else 0
if (
len(self.states) == self.states.maxlen
and abs(new_state.state - median) > self._radius
):
self._stats_internal["erasures"] += 1
_LOGGER.debug(
"Outlier nr. %s in %s: %s",
self._stats_internal["erasures"],
self._entity,
new_state,
)
new_state.state = median
return new_state
@FILTERS.register(FILTER_NAME_LOWPASS)
class LowPassFilter(Filter):
"""BASIC Low Pass Filter."""
def __init__(self, window_size, precision, entity, time_constant: int):
"""Initialize Filter."""
super().__init__(FILTER_NAME_LOWPASS, window_size, precision, entity)
self._time_constant = time_constant
def _filter_state(self, new_state):
"""Implement the low pass filter."""
if not self.states:
return new_state
new_weight = 1.0 / self._time_constant
prev_weight = 1.0 - new_weight
new_state.state = (
prev_weight * self.states[-1].state + new_weight * new_state.state
)
return new_state
@FILTERS.register(FILTER_NAME_TIME_SMA)
class TimeSMAFilter(Filter):
"""Simple Moving Average (SMA) Filter.
The window_size is determined by time, and SMA is time weighted.
"""
def __init__(
self, window_size, precision, entity, type
): # pylint: disable=redefined-builtin
"""Initialize Filter.
:param type: type of algorithm used to connect discrete values
"""
super().__init__(FILTER_NAME_TIME_SMA, window_size, precision, entity)
self._time_window = window_size
self.last_leak = None
self.queue = deque()
def _leak(self, left_boundary):
"""Remove timeouted elements."""
while self.queue:
if self.queue[0].timestamp + self._time_window <= left_boundary:
self.last_leak = self.queue.popleft()
else:
return
def _filter_state(self, new_state):
"""Implement the Simple Moving Average filter."""
self._leak(new_state.timestamp)
self.queue.append(copy(new_state))
moving_sum = 0
start = new_state.timestamp - self._time_window
prev_state = self.last_leak or self.queue[0]
for state in self.queue:
moving_sum += (state.timestamp - start).total_seconds() * prev_state.state
start = state.timestamp
prev_state = state
new_state.state = moving_sum / self._time_window.total_seconds()
return new_state
@FILTERS.register(FILTER_NAME_THROTTLE)
class ThrottleFilter(Filter):
"""Throttle Filter.
One sample per window.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_THROTTLE, window_size, precision, entity)
self._only_numbers = False
def _filter_state(self, new_state):
"""Implement the throttle filter."""
if not self.states or len(self.states) == self.states.maxlen:
self.states.clear()
self._skip_processing = False
else:
self._skip_processing = True
return new_state
@FILTERS.register(FILTER_NAME_TIME_THROTTLE)
class TimeThrottleFilter(Filter):
"""Time Throttle Filter.
One sample per time period.
"""
def __init__(self, window_size, precision, entity):
"""Initialize Filter."""
super().__init__(FILTER_NAME_TIME_THROTTLE, window_size, precision, entity)
self._time_window = window_size
self._last_emitted_at = None
self._only_numbers = False
def _filter_state(self, new_state):
"""Implement the filter."""
window_start = new_state.timestamp - self._time_window
if not self._last_emitted_at or self._last_emitted_at <= window_start:
self._last_emitted_at = new_state.timestamp
self._skip_processing = False
else:
self._skip_processing = True
return new_state
|
from typing import Optional
from aioshelly import Block
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_COLOR_TEMP,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR_TEMP,
LightEntity,
)
from homeassistant.core import callback
from homeassistant.util.color import (
color_temperature_kelvin_to_mired,
color_temperature_mired_to_kelvin,
)
from . import ShellyDeviceWrapper
from .const import DATA_CONFIG_ENTRY, DOMAIN
from .entity import ShellyBlockEntity
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up lights for device."""
wrapper = hass.data[DOMAIN][DATA_CONFIG_ENTRY][config_entry.entry_id]
blocks = [block for block in wrapper.device.blocks if block.type == "light"]
if not blocks:
return
async_add_entities(ShellyLight(wrapper, block) for block in blocks)
class ShellyLight(ShellyBlockEntity, LightEntity):
"""Switch that controls a relay block on Shelly devices."""
def __init__(self, wrapper: ShellyDeviceWrapper, block: Block) -> None:
"""Initialize light."""
super().__init__(wrapper, block)
self.control_result = None
self._supported_features = 0
if hasattr(block, "brightness"):
self._supported_features |= SUPPORT_BRIGHTNESS
if hasattr(block, "colorTemp"):
self._supported_features |= SUPPORT_COLOR_TEMP
@property
def supported_features(self) -> int:
"""Supported features."""
return self._supported_features
@property
def is_on(self) -> bool:
"""If light is on."""
if self.control_result:
return self.control_result["ison"]
return self.block.output
@property
def brightness(self) -> Optional[int]:
"""Brightness of light."""
if self.control_result:
brightness = self.control_result["brightness"]
else:
brightness = self.block.brightness
return int(brightness / 100 * 255)
@property
def color_temp(self) -> Optional[float]:
"""Return the CT color value in mireds."""
if self.control_result:
color_temp = self.control_result["temp"]
else:
color_temp = self.block.colorTemp
# If you set DUO to max mireds in Shelly app, 2700K,
# It reports 0 temp
if color_temp == 0:
return self.max_mireds
return int(color_temperature_kelvin_to_mired(color_temp))
@property
def min_mireds(self) -> float:
"""Return the coldest color_temp that this light supports."""
return color_temperature_kelvin_to_mired(6500)
@property
def max_mireds(self) -> float:
"""Return the warmest color_temp that this light supports."""
return color_temperature_kelvin_to_mired(2700)
async def async_turn_on(self, **kwargs) -> None:
"""Turn on light."""
params = {"turn": "on"}
if ATTR_BRIGHTNESS in kwargs:
tmp_brightness = kwargs[ATTR_BRIGHTNESS]
params["brightness"] = int(tmp_brightness / 255 * 100)
if ATTR_COLOR_TEMP in kwargs:
color_temp = color_temperature_mired_to_kelvin(kwargs[ATTR_COLOR_TEMP])
if color_temp > 6500:
color_temp = 6500
elif color_temp < 2700:
color_temp = 2700
params["temp"] = int(color_temp)
self.control_result = await self.block.set_state(**params)
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off light."""
self.control_result = await self.block.set_state(turn="off")
self.async_write_ha_state()
@callback
def _update_callback(self):
"""When device updates, clear control result that overrides state."""
self.control_result = None
super()._update_callback()
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///app.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
db = SQLAlchemy(app)
migrate = Migrate(app, db, directory='temp_folder/temp_migrations')
manager = Manager(app)
manager.add_command('db', MigrateCommand)
class User(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(128))
@manager.command
def add():
db.session.add(User(name='test'))
db.session.commit()
if __name__ == '__main__':
manager.run()
|
import logging
from typing import Dict
from aiofreepybox.exceptions import InsufficientPermissionsError
from homeassistant.components.switch import SwitchEntity
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from .const import DOMAIN
from .router import FreeboxRouter
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up the switch."""
router = hass.data[DOMAIN][entry.unique_id]
async_add_entities([FreeboxWifiSwitch(router)], True)
class FreeboxWifiSwitch(SwitchEntity):
"""Representation of a freebox wifi switch."""
def __init__(self, router: FreeboxRouter) -> None:
"""Initialize the Wifi switch."""
self._name = "Freebox WiFi"
self._state = None
self._router = router
self._unique_id = f"{self._router.mac} {self._name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
return self._unique_id
@property
def name(self) -> str:
"""Return the name of the switch."""
return self._name
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
@property
def device_info(self) -> Dict[str, any]:
"""Return the device information."""
return self._router.device_info
async def _async_set_state(self, enabled: bool):
"""Turn the switch on or off."""
wifi_config = {"enabled": enabled}
try:
await self._router.wifi.set_global_config(wifi_config)
except InsufficientPermissionsError:
_LOGGER.warning(
"Home Assistant does not have permissions to modify the Freebox settings. Please refer to documentation"
)
async def async_turn_on(self, **kwargs):
"""Turn the switch on."""
await self._async_set_state(True)
async def async_turn_off(self, **kwargs):
"""Turn the switch off."""
await self._async_set_state(False)
async def async_update(self):
"""Get the state and update it."""
datas = await self._router.wifi.get_global_config()
active = datas["enabled"]
self._state = bool(active)
|
import os
from six import string_types
from mlpatches.mount_patches import MOUNT_PATCHES
from stashutils.core import get_stash
from stashutils.fsi.base import BaseFSI
from stashutils.fsi.errors import OperationFailure
from stashutils.mount_ctrl import get_manager, set_manager
_stash = get_stash()
# Exceptions
class MountError(Exception):
"""raised when a mount failed."""
pass
# the manager
class MountManager(object):
"""
this class keeps track of the FSIs and their position in the filesystem.
"""
def __init__(self):
self.path2fs = {}
def check_patches_enabled(self):
"""checks wether all required patches are enabled."""
return MOUNT_PATCHES.enabled
def enable_patches(self):
"""enables all patches required for mount."""
if self.check_patches_enabled():
return
MOUNT_PATCHES.enable()
def disable_patches(self):
"""disables all required patches."""
MOUNT_PATCHES.disable()
def get_fsi(self, path):
"""
returns a tuple of (fsi, relpath) if path is on a mountpoint.
otherwise, return (None, path).
fsi is a FSI which should be used for the action.
relpath is a path which should be used as the path for FSI actions.
"""
path = os.path.abspath(path)
i = None
for p in self.path2fs:
if path.startswith(p):
i, readonly = self.path2fs[p]
relpath = path.replace(p, "", 1)
if not relpath.startswith("/"):
relpath = "/" + relpath
return (i, relpath, readonly)
return (None, path, False)
def mount_fsi(self, path, fsi, readonly=False):
"""mounts a fsi to a path."""
if not isinstance(fsi, BaseFSI):
raise ValueError("Expected a FSI!")
if not isinstance(path, string_types):
raise ValueError("Expected a string or unicode!")
path = os.path.abspath(path)
if path in self.path2fs:
raise MountError("A Filesystem is already mounted on '{p}'!".format(p=path))
elif not (os.path.exists(path) and os.path.isdir(path)):
raise MountError("Path does not exists.")
self.path2fs[path] = (fsi, readonly)
def unmount_fsi(self, path, force=False):
"""unmounts a fsi."""
path = os.path.abspath(path)
if not path in self.path2fs:
raise MountError("Nothing mounted there.")
fsi, readonly = self.path2fs[path]
if not force:
try:
fsi.close()
except OperationFailure as e:
raise MountError(e.message)
del self.path2fs[path] # todo: close files
def get_mounts(self):
"""
returns a list of (path, fsi, readonly) containing all currently
mounted filesystems.
"""
ret = []
for p in self.path2fs:
fs, readonly = self.path2fs[p]
ret.append((p, fs, readonly))
return ret
# install manager
if get_manager() is None:
set_manager(MountManager())
|
import argparse
import base64
import json
import os
import sys
import re
import urllib.request
import attr
sys.path.insert(0, os.path.join(os.path.dirname(__file__), os.pardir))
from qutebrowser.browser.webengine import spell
from qutebrowser.config import configdata
from qutebrowser.utils import standarddir
API_URL = 'https://chromium.googlesource.com/chromium/deps/hunspell_dictionaries.git/+/master/'
class InvalidLanguageError(Exception):
"""Raised when requesting invalid languages."""
def __init__(self, invalid_langs):
msg = 'invalid languages: {}'.format(', '.join(invalid_langs))
super().__init__(msg)
@attr.s
class Language:
"""Dictionary language specs."""
code = attr.ib()
name = attr.ib()
remote_filename = attr.ib()
local_filename = attr.ib(default=None)
def __attrs_post_init__(self):
if self.local_filename is None:
self.local_filename = spell.local_filename(self.code)
@property
def remote_version(self):
"""Resolve the version of the local dictionary."""
return spell.version(self.remote_filename)
@property
def local_version(self):
"""Resolve the version of the local dictionary."""
local_filename = self.local_filename
if local_filename is None:
return None
return spell.version(local_filename)
def get_argparser():
"""Get the argparse parser."""
desc = 'Install and manage Hunspell dictionaries for QtWebEngine.'
parser = argparse.ArgumentParser(prog='dictcli',
description=desc)
subparsers = parser.add_subparsers(help='Command', dest='cmd')
subparsers.required = True
subparsers.add_parser('list',
help='Display the list of available languages.')
subparsers.add_parser('update',
help='Update dictionaries')
subparsers.add_parser('remove-old',
help='Remove old versions of dictionaries.')
install_parser = subparsers.add_parser('install',
help='Install dictionaries')
install_parser.add_argument('language',
nargs='*',
help="A list of languages to install.")
return parser
def version_str(version):
return '.'.join(str(n) for n in version)
def print_list(languages):
"""Print the list of available languages."""
pat = '{:<7}{:<26}{:<8}{:<5}'
print(pat.format('Code', 'Name', 'Version', 'Installed'))
for lang in languages:
remote_version = version_str(lang.remote_version)
local_version = '-'
if lang.local_version is not None:
local_version = version_str(lang.local_version)
if lang.local_version < lang.remote_version:
local_version += ' - update available!'
print(pat.format(lang.code, lang.name, remote_version, local_version))
def valid_languages():
"""Return a mapping from valid language codes to their names."""
option = configdata.DATA['spellcheck.languages']
return option.typ.valtype.valid_values.descriptions
def parse_entry(entry):
"""Parse an entry from the remote API."""
dict_re = re.compile(r"""
(?P<filename>(?P<code>[a-z]{2}(-[A-Z]{2})?).*\.bdic)
""", re.VERBOSE)
match = dict_re.fullmatch(entry['name'])
if match is not None:
return match.group('code'), match.group('filename')
else:
return None
def language_list_from_api():
"""Return a JSON with a list of available languages from Google API."""
listurl = API_URL + '?format=JSON'
response = urllib.request.urlopen(listurl)
# A special 5-byte prefix must be stripped from the response content
# See: https://github.com/google/gitiles/issues/22
# https://github.com/google/gitiles/issues/82
json_content = response.read()[5:]
entries = json.loads(json_content.decode('utf-8'))['entries']
parsed_entries = [parse_entry(entry) for entry in entries]
return [entry for entry in parsed_entries if entry is not None]
def latest_yet(code2file, code, filename):
"""Determine whether the latest version so far."""
if code not in code2file:
return True
return spell.version(code2file[code]) < spell.version(filename)
def available_languages():
"""Return a list of Language objects of all available languages."""
lang_map = valid_languages()
api_list = language_list_from_api()
code2file = {}
for code, filename in api_list:
if latest_yet(code2file, code, filename):
code2file[code] = filename
return [
Language(code, name, code2file[code])
for code, name in lang_map.items()
if code in code2file
]
def download_dictionary(url, dest):
"""Download a decoded dictionary file."""
response = urllib.request.urlopen(url)
decoded = base64.decodebytes(response.read())
with open(dest, 'bw') as dict_file:
dict_file.write(decoded)
def filter_languages(languages, selected):
"""Filter a list of languages based on an inclusion list.
Args:
languages: a list of languages to filter
selected: a list of keys to select
"""
filtered_languages = []
for language in languages:
if language.code in selected:
filtered_languages.append(language)
selected.remove(language.code)
if selected:
raise InvalidLanguageError(selected)
return filtered_languages
def install_lang(lang):
"""Install a single lang given by the argument."""
lang_url = API_URL + lang.remote_filename + '?format=TEXT'
if not os.path.isdir(spell.dictionary_dir()):
msg = '{} does not exist, creating the directory'
print(msg.format(spell.dictionary_dir()))
os.makedirs(spell.dictionary_dir())
print('Downloading {}'.format(lang_url))
dest = os.path.join(spell.dictionary_dir(), lang.remote_filename)
download_dictionary(lang_url, dest)
print('Installed to {}.'.format(dest))
def install(languages):
"""Install languages."""
for lang in languages:
print('Installing {}: {}'.format(lang.code, lang.name))
install_lang(lang)
def update(languages):
"""Update the given languages."""
installed = [lang for lang in languages if lang.local_version is not None]
for lang in installed:
if lang.local_version < lang.remote_version:
print('Upgrading {} from {} to {}'.format(
lang.code,
version_str(lang.local_version),
version_str(lang.remote_version)))
install_lang(lang)
def remove_old(languages):
"""Remove old versions of languages."""
installed = [lang for lang in languages if lang.local_version is not None]
for lang in installed:
local_files = spell.local_files(lang.code)
for old_file in local_files[1:]:
os.remove(os.path.join(spell.dictionary_dir(), old_file))
def main():
if configdata.DATA is None:
configdata.init()
standarddir.init(None)
parser = get_argparser()
argv = sys.argv[1:]
args = parser.parse_args(argv)
languages = available_languages()
if args.cmd == 'list':
print_list(languages)
elif args.cmd == 'update':
update(languages)
elif args.cmd == 'remove-old':
remove_old(languages)
elif not args.language:
sys.exit('You must provide a list of languages to install.')
else:
try:
install(filter_languages(languages, args.language))
except InvalidLanguageError as e:
print(e)
if __name__ == '__main__':
main()
|
import pytest
from homeassistant import config_entries, data_entry_flow, setup
from homeassistant.config import async_process_ha_core_config
from homeassistant.helpers import config_entry_flow
from tests.async_mock import Mock, patch
from tests.common import (
MockConfigEntry,
MockModule,
mock_entity_platform,
mock_integration,
)
@pytest.fixture
def discovery_flow_conf(hass):
"""Register a handler."""
handler_conf = {"discovered": False}
async def has_discovered_devices(hass):
"""Mock if we have discovered devices."""
return handler_conf["discovered"]
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_discovery_flow(
"test", "Test", has_discovered_devices, config_entries.CONN_CLASS_LOCAL_POLL
)
yield handler_conf
@pytest.fixture
def webhook_flow_conf(hass):
"""Register a handler."""
with patch.dict(config_entries.HANDLERS):
config_entry_flow.register_webhook_flow("test_single", "Test Single", {}, False)
config_entry_flow.register_webhook_flow(
"test_multiple", "Test Multiple", {}, True
)
yield {}
async def test_single_entry_allowed(hass, discovery_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {}
MockConfigEntry(domain="test").add_to_hass(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_user_no_devices_found(hass, discovery_flow_conf):
"""Test if no devices found."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {"source": config_entries.SOURCE_USER}
result = await flow.async_step_confirm(user_input={})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "no_devices_found"
async def test_user_has_confirmation(hass, discovery_flow_conf):
"""Test user requires confirmation to setup."""
discovery_flow_conf["discovered"] = True
mock_entity_platform(hass, "config_flow.test", None)
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_USER}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
@pytest.mark.parametrize("source", ["discovery", "mqtt", "ssdp", "zeroconf"])
async def test_discovery_single_instance(hass, discovery_flow_conf, source):
"""Test we not allow duplicates."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {}
MockConfigEntry(domain="test").add_to_hass(hass)
result = await getattr(flow, f"async_step_{source}")({})
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
@pytest.mark.parametrize("source", ["discovery", "mqtt", "ssdp", "zeroconf"])
async def test_discovery_confirmation(hass, discovery_flow_conf, source):
"""Test we ask for confirmation via discovery."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {"source": source}
result = await getattr(flow, f"async_step_{source}")({})
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "confirm"
result = await flow.async_step_confirm({})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_multiple_discoveries(hass, discovery_flow_conf):
"""Test we only create one instance for multiple discoveries."""
mock_entity_platform(hass, "config_flow.test", None)
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Second discovery
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_only_one_in_progress(hass, discovery_flow_conf):
"""Test a user initialized one will finish and cancel discovered one."""
mock_entity_platform(hass, "config_flow.test", None)
# Discovery starts flow
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# User starts flow
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_USER}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Discovery flow has not been aborted
assert len(hass.config_entries.flow.async_progress()) == 2
# Discovery should be aborted once user confirms
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_import_abort_discovery(hass, discovery_flow_conf):
"""Test import will finish and cancel discovered one."""
mock_entity_platform(hass, "config_flow.test", None)
# Discovery starts flow
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
# Start import flow
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_IMPORT}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
# Discovery flow has been aborted
assert len(hass.config_entries.flow.async_progress()) == 0
async def test_import_no_confirmation(hass, discovery_flow_conf):
"""Test import requires no confirmation to set up."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {}
discovery_flow_conf["discovered"] = True
result = await flow.async_step_import(None)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
async def test_import_single_instance(hass, discovery_flow_conf):
"""Test import doesn't create second instance."""
flow = config_entries.HANDLERS["test"]()
flow.hass = hass
flow.context = {}
discovery_flow_conf["discovered"] = True
MockConfigEntry(domain="test").add_to_hass(hass)
result = await flow.async_step_import(None)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_ignored_discoveries(hass, discovery_flow_conf):
"""Test we can ignore discovered entries."""
mock_entity_platform(hass, "config_flow.test", None)
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
flow = next(
(
flw
for flw in hass.config_entries.flow.async_progress()
if flw["flow_id"] == result["flow_id"]
),
None,
)
# Ignore it.
await hass.config_entries.flow.async_init(
flow["handler"],
context={"source": config_entries.SOURCE_IGNORE},
data={"unique_id": flow["context"]["unique_id"]},
)
# Second discovery should be aborted
result = await hass.config_entries.flow.async_init(
"test", context={"source": config_entries.SOURCE_DISCOVERY}, data={}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_webhook_single_entry_allowed(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
flow = config_entries.HANDLERS["test_single"]()
flow.hass = hass
MockConfigEntry(domain="test_single").add_to_hass(hass)
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "single_instance_allowed"
async def test_webhook_multiple_entries_allowed(hass, webhook_flow_conf):
"""Test multiple entries are allowed when specified."""
flow = config_entries.HANDLERS["test_multiple"]()
flow.hass = hass
MockConfigEntry(domain="test_multiple").add_to_hass(hass)
hass.config.api = Mock(base_url="http://example.com")
result = await flow.async_step_user()
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
async def test_webhook_config_flow_registers_webhook(hass, webhook_flow_conf):
"""Test setting up an entry creates a webhook."""
flow = config_entries.HANDLERS["test_single"]()
flow.hass = hass
await async_process_ha_core_config(
hass,
{"external_url": "https://example.com"},
)
result = await flow.async_step_user(user_input={})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["data"]["webhook_id"] is not None
async def test_webhook_create_cloudhook(hass, webhook_flow_conf):
"""Test only a single entry is allowed."""
assert await setup.async_setup_component(hass, "cloud", {})
async_setup_entry = Mock(return_value=True)
async_unload_entry = Mock(return_value=True)
mock_integration(
hass,
MockModule(
"test_single",
async_setup_entry=async_setup_entry,
async_unload_entry=async_unload_entry,
async_remove_entry=config_entry_flow.webhook_async_remove_entry,
),
)
mock_entity_platform(hass, "config_flow.test_single", None)
result = await hass.config_entries.flow.async_init(
"test_single", context={"source": config_entries.SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
with patch(
"hass_nabucasa.cloudhooks.Cloudhooks.async_create",
return_value={"cloudhook_url": "https://example.com"},
) as mock_create, patch(
"homeassistant.components.cloud.async_active_subscription", return_value=True
), patch(
"homeassistant.components.cloud.async_is_logged_in", return_value=True
):
result = await hass.config_entries.flow.async_configure(result["flow_id"], {})
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["description_placeholders"]["webhook_url"] == "https://example.com"
assert len(mock_create.mock_calls) == 1
assert len(async_setup_entry.mock_calls) == 1
with patch(
"hass_nabucasa.cloudhooks.Cloudhooks.async_delete",
return_value={"cloudhook_url": "https://example.com"},
) as mock_delete:
result = await hass.config_entries.async_remove(result["result"].entry_id)
assert len(mock_delete.mock_calls) == 1
assert result["require_restart"] is False
|
import os
import pytest
from molecule.command.init import scenario
@pytest.fixture
def _command_args():
return {
'driver_name': 'docker',
'role_name': 'test-role',
'scenario_name': 'test-scenario',
'subcommand': __name__,
'verifier_name': 'testinfra'
}
@pytest.fixture
def _instance(_command_args):
return scenario.Scenario(_command_args)
def test_execute(temp_dir, _instance, patched_logger_info,
patched_logger_success):
_instance.execute()
msg = 'Initializing new scenario test-scenario...'
patched_logger_info.assert_called_once_with(msg)
assert os.path.isdir('./molecule/test-scenario')
assert os.path.isdir('./molecule/test-scenario/tests')
scenario_directory = os.path.join(temp_dir.strpath, 'molecule',
'test-scenario')
msg = 'Initialized scenario in {} successfully.'.format(scenario_directory)
patched_logger_success.assert_called_once_with(msg)
def test_execute_scenario_exists(temp_dir, _instance, patched_logger_critical):
_instance.execute()
with pytest.raises(SystemExit) as e:
_instance.execute()
assert 1 == e.value.code
msg = ('The directory molecule/test-scenario exists. '
'Cannot create new scenario.')
patched_logger_critical.assert_called_once_with(msg)
|
def __init__():
"""
This is a module initialization function.
"""
"""
Yandex.Tank will call these scenarios
passing 3 parameters to them:
missile: missile from ammo file
marker: marker from ammo file
measure: measuring context
"""
def scenario_1(missile, marker, measure):
with measure("scenario_1_step_1") as m:
# make step 1 and set result codes
m["proto_code"] = 200
m["net_code"] = 0
with measure("scenario_1_step_2") as m:
# make step 2 and set result codes
m["proto_code"] = 200
m["net_code"] = 0
def scenario_2(missile, marker, measure):
with measure("scenario_2_step_1") as m:
# make step 1 and set result codes
m["proto_code"] = 200
m["net_code"] = 0
with measure("scenario_2_step_2") as m:
# make step 2 and set result codes
m["proto_code"] = 200
m["net_code"] = 0
"""
SCENARIOS module variable is used by Tank to choose the scenario to
shoot with. For each missile Tank will look up missile marker in this dict.
"""
SCENARIOS = {
"scenario_1": scenario_1,
"scenario_2": scenario_1,
}
|
from django.conf import settings
from django.core.checks import run_checks
from django.core.mail import send_mail
from django.db.models import Count, Q
from django.http import HttpResponse
from django.shortcuts import redirect, render
from django.utils import timezone
from django.utils.translation import gettext as _
from django.utils.translation import gettext_lazy
from weblate.auth.decorators import management_access
from weblate.auth.forms import AdminInviteUserForm
from weblate.auth.models import User
from weblate.configuration.models import Setting
from weblate.configuration.views import CustomCSSView
from weblate.trans.forms import AnnouncementForm
from weblate.trans.models import Alert, Announcement, Component, Project
from weblate.utils import messages
from weblate.utils.celery import get_queue_stats
from weblate.utils.errors import report_error
from weblate.utils.tasks import database_backup, settings_backup
from weblate.utils.views import show_form_errors
from weblate.vcs.ssh import (
RSA_KEY,
add_host_key,
can_generate_key,
generate_ssh_key,
get_host_keys,
get_key_data,
ssh_file,
)
from weblate.wladmin.forms import (
ActivateForm,
AppearanceForm,
BackupForm,
SSHAddForm,
TestMailForm,
UserSearchForm,
)
from weblate.wladmin.models import BackupService, ConfigurationError, SupportStatus
from weblate.wladmin.tasks import backup_service, configuration_health_check
MENU = (
("index", "manage", gettext_lazy("Weblate status")),
("backups", "manage-backups", gettext_lazy("Backups")),
("memory", "manage-memory", gettext_lazy("Translation memory")),
("performance", "manage-performance", gettext_lazy("Performance report")),
("ssh", "manage-ssh", gettext_lazy("SSH keys")),
("alerts", "manage-alerts", gettext_lazy("Alerts")),
("repos", "manage-repos", gettext_lazy("Repositories")),
("users", "manage-users", gettext_lazy("Users")),
("appearance", "manage-appearance", gettext_lazy("Appearance")),
("tools", "manage-tools", gettext_lazy("Tools")),
)
if "weblate.billing" in settings.INSTALLED_APPS:
MENU += (("billing", "manage-billing", gettext_lazy("Billing")),)
@management_access
def manage(request):
support = SupportStatus.objects.get_current()
return render(
request,
"manage/index.html",
{
"menu_items": MENU,
"menu_page": "index",
"support": support,
"activate_form": ActivateForm(),
},
)
def send_test_mail(email):
send_mail(
subject="Test e-mail from Weblate on %s" % timezone.now(),
message="It works.",
recipient_list=[email],
from_email=None,
)
@management_access
def tools(request):
email_form = TestMailForm(initial={"email": request.user.email})
announce_form = AnnouncementForm()
if request.method == "POST":
if "email" in request.POST:
email_form = TestMailForm(request.POST)
if email_form.is_valid():
try:
send_test_mail(**email_form.cleaned_data)
messages.success(request, _("Test e-mail sent."))
except Exception as error:
report_error()
messages.error(request, _("Could not send test e-mail: %s") % error)
if "sentry" in request.POST:
try:
raise Exception("Test exception")
except Exception:
report_error()
if "message" in request.POST:
announce_form = AnnouncementForm(request.POST)
if announce_form.is_valid():
Announcement.objects.create(
user=request.user, **announce_form.cleaned_data
)
return render(
request,
"manage/tools.html",
{
"menu_items": MENU,
"menu_page": "tools",
"email_form": email_form,
"announce_form": announce_form,
},
)
@management_access
def activate(request):
form = ActivateForm(request.POST)
if form.is_valid():
support = SupportStatus(**form.cleaned_data)
try:
support.refresh()
support.save()
messages.success(request, _("Activation completed."))
except Exception:
report_error()
messages.error(
request,
_(
"Could not activate your installation. "
"Please ensure your activation token is correct."
),
)
else:
show_form_errors(request, form)
return redirect("manage")
@management_access
def repos(request):
"""Provide report about Git status of all repos."""
return render(
request,
"manage/repos.html",
{
"components": Component.objects.order_project(),
"menu_items": MENU,
"menu_page": "repos",
},
)
@management_access
def backups(request):
form = BackupForm()
if request.method == "POST":
if "repository" in request.POST:
form = BackupForm(request.POST)
if form.is_valid():
form.save()
return redirect("manage-backups")
elif "remove" in request.POST:
service = BackupService.objects.get(pk=request.POST["service"])
service.delete()
return redirect("manage-backups")
elif "toggle" in request.POST:
service = BackupService.objects.get(pk=request.POST["service"])
service.enabled = not service.enabled
service.save()
return redirect("manage-backups")
elif "trigger" in request.POST:
settings_backup.delay()
database_backup.delay()
backup_service.delay(pk=request.POST["service"])
messages.success(request, _("Backup process triggered"))
return redirect("manage-backups")
context = {
"services": BackupService.objects.all(),
"menu_items": MENU,
"menu_page": "backups",
"form": form,
"activate_form": ActivateForm(),
}
return render(request, "manage/backups.html", context)
def handle_dismiss(request):
try:
error = ConfigurationError.objects.get(pk=int(request.POST["pk"]))
if "ignore" in request.POST:
error.ignored = True
error.save(update_fields=["ignored"])
else:
error.delete()
except (ValueError, KeyError, ConfigurationError.DoesNotExist):
messages.error(request, _("Could not dismiss the configuration error!"))
return redirect("manage-performance")
@management_access
def performance(request):
"""Show performance tuning tips."""
if request.method == "POST":
return handle_dismiss(request)
checks = run_checks(include_deployment_checks=True)
configuration_health_check.delay()
context = {
"checks": [check for check in checks if not check.is_silenced()],
"errors": ConfigurationError.objects.filter(ignored=False),
"queues": get_queue_stats().items(),
"menu_items": MENU,
"menu_page": "performance",
}
return render(request, "manage/performance.html", context)
@management_access
def ssh_key(request):
with open(ssh_file(RSA_KEY)) as handle:
data = handle.read()
response = HttpResponse(data, content_type="text/plain")
response["Content-Disposition"] = f"attachment; filename={RSA_KEY}"
response["Content-Length"] = len(data)
return response
@management_access
def ssh(request):
"""Show information and manipulate with SSH key."""
# Check whether we can generate SSH key
can_generate = can_generate_key()
# Grab action type
action = request.POST.get("action")
# Generate key if it does not exist yet
if can_generate and action == "generate":
generate_ssh_key(request)
# Read key data if it exists
key = get_key_data()
# Add host key
form = SSHAddForm()
if action == "add-host":
form = SSHAddForm(request.POST)
if form.is_valid():
add_host_key(request, **form.cleaned_data)
context = {
"public_key": key,
"can_generate": can_generate,
"host_keys": get_host_keys(),
"menu_items": MENU,
"menu_page": "ssh",
"add_form": form,
}
return render(request, "manage/ssh.html", context)
@management_access
def alerts(request):
"""Shows component alerts."""
context = {
"alerts": Alert.objects.order_by(
"name", "component__project__name", "component__name"
).select_related("component", "component__project"),
"no_components": Project.objects.annotate(Count("component")).filter(
component__count=0
),
"menu_items": MENU,
"menu_page": "alerts",
}
return render(request, "manage/alerts.html", context)
@management_access
def users(request):
invite_form = AdminInviteUserForm()
if request.method == "POST":
if "email" in request.POST:
invite_form = AdminInviteUserForm(request.POST)
if invite_form.is_valid():
invite_form.save(request)
messages.success(request, _("User has been invited to this project."))
return redirect("manage-users")
return render(
request,
"manage/users.html",
{
"menu_items": MENU,
"menu_page": "users",
"invite_form": invite_form,
"search_form": UserSearchForm,
},
)
@management_access
def users_check(request):
form = UserSearchForm(request.GET if request.GET else None)
user_list = None
if form.is_valid():
email = form.cleaned_data["email"]
user_list = User.objects.filter(
Q(email=email) | Q(social_auth__verifiedemail__email__iexact=email)
).distinct()
return render(
request,
"manage/users_check.html",
{
"menu_items": MENU,
"menu_page": "users",
"form": form,
"users": user_list,
},
)
@management_access
def appearance(request):
current = Setting.objects.get_settings_dict(Setting.CATEGORY_UI)
form = AppearanceForm(initial=current)
if request.method == "POST":
if "reset" in request.POST:
Setting.objects.filter(category=Setting.CATEGORY_UI).delete()
CustomCSSView.drop_cache()
return redirect("manage-appearance")
form = AppearanceForm(request.POST)
if form.is_valid():
for name, value in form.cleaned_data.items():
if name not in current:
# New setting previously not set
Setting.objects.create(
category=Setting.CATEGORY_UI, name=name, value=value
)
else:
if value != current[name]:
# Update setting
Setting.objects.filter(
category=Setting.CATEGORY_UI, name=name
).update(value=value)
current.pop(name)
# Drop stale settings
if current:
Setting.objects.filter(
category=Setting.CATEGORY_UI, name__in=current.keys()
).delete()
# Flush cache
CustomCSSView.drop_cache()
return redirect("manage-appearance")
return render(
request,
"manage/appearance.html",
{
"menu_items": MENU,
"menu_page": "appearance",
"form": form,
},
)
@management_access
def billing(request):
from weblate.billing.models import Billing
trial = []
pending = []
removal = []
free = []
paid = []
terminated = []
# We will list all billings anyway, so fetch them at once
billings = Billing.objects.prefetch().order_by("expiry", "removal", "id")
for currrent in billings:
if currrent.removal:
removal.append(currrent)
elif currrent.state == Billing.STATE_TRIAL:
if (
currrent.plan
and currrent.plan.price == 0
and currrent.payment.get("libre_request")
):
pending.append(currrent)
trial.append(currrent)
elif currrent.state == Billing.STATE_TERMINATED:
terminated.append(currrent)
elif currrent.plan.price:
paid.append(currrent)
else:
free.append(currrent)
return render(
request,
"manage/billing.html",
{
"menu_items": MENU,
"menu_page": "billing",
"trial": trial,
"removal": removal,
"free": free,
"paid": paid,
"terminated": terminated,
"pending": pending,
},
)
|
import speech_recognition as sr
from kalliope.core import Utils
from kalliope.stt.Utils import SpeechRecognition
class Houndify(SpeechRecognition):
def __init__(self, callback=None, **kwargs):
"""
Start recording the microphone and analyse audio with Houndify api
:param callback: The callback function to call to send the text
:param kwargs:
"""
# give the audio file path to process directly to the mother class if exist
SpeechRecognition.__init__(self, kwargs.get('audio_file_path', None))
# callback function to call after the translation speech/tex
self.main_controller_callback = callback
self.client_id = kwargs.get('client_id', None)
self.key = kwargs.get('key', None)
# only english supported
# self.language = kwargs.get('language', "en-US")
self.show_all = kwargs.get('show_all', False)
# start listening in the background
self.set_callback(self.houndify_callback)
# start processing, record a sample from the microphone if no audio file path provided, else read the file
self.start_processing()
def houndify_callback(self, recognizer, audio):
"""
called from the background thread
"""
try:
captured_audio = recognizer.recognize_houndify(audio,
client_id=self.client_id,
client_key=self.key,
show_all=self.show_all)
Utils.print_success("Houndify Speech Recognition thinks you said %s" % captured_audio)
self._analyse_audio(captured_audio)
except sr.UnknownValueError:
Utils.print_warning("Houndify Speech Recognition could not understand audio")
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except sr.RequestError as e:
Utils.print_danger("Could not request results from Houndify Speech Recognition service; {0}".format(e))
# callback anyway, we need to listen again for a new order
self._analyse_audio(audio_to_text=None)
except AssertionError:
Utils.print_warning("No audio caught from microphone")
self._analyse_audio(audio_to_text=None)
def _analyse_audio(self, audio_to_text):
"""
Confirm the audio exists and run it in a Callback
:param audio_to_text: the captured audio
"""
if self.main_controller_callback is not None:
self.main_controller_callback(audio_to_text)
|
from datetime import datetime, timedelta
import os
import sys
import click
import pandas as pd
import pytz
from qstrader.alpha_model.fixed_signals import FixedSignalsAlphaModel
from qstrader.asset.equity import Equity
from qstrader.asset.universe.static import StaticUniverse
from qstrader.data.backtest_data_handler import BacktestDataHandler
from qstrader.data.daily_bar_csv import CSVDailyBarDataSource
from qstrader.statistics.json_statistics import JSONStatistics
from qstrader.statistics.tearsheet import TearsheetStatistics
from qstrader.trading.backtest import BacktestTradingSession
def obtain_allocations(allocations):
"""
Converts the provided command-line allocations string
into a dictionary used for QSTrader.
Parameters
----------
allocations : `str`
The asset allocations string.
Returns
-------
`dict`
The asset allocation dictionary
"""
allocs_dict = {}
try:
allocs = allocations.split(',')
for alloc in allocs:
alloc_asset, alloc_value = alloc.split(':')
allocs_dict['EQ:%s' % alloc_asset] = float(alloc_value)
except Exception:
print(
"Could not determine the allocations from the provided "
"allocations string. Terminating."
)
sys.exit()
else:
return allocs_dict
@click.command()
@click.option('--start-date', 'start_date', help='Backtest starting date')
@click.option('--end-date', 'end_date', help='Backtest ending date')
@click.option('--allocations', 'allocations', help='Allocations key-values, i.e. "SPY:0.6,AGG:0.4"')
@click.option('--title', 'strat_title', help='Backtest strategy title')
@click.option('--id', 'strat_id', help='Backtest strategy ID string')
@click.option('--tearsheet', 'tearsheet', is_flag=True, default=False, help='Whether to display the (blocking) tearsheet plot')
def cli(start_date, end_date, allocations, strat_title, strat_id, tearsheet):
csv_dir = os.environ.get('QSTRADER_CSV_DATA_DIR', '.')
start_dt = pd.Timestamp('%s 00:00:00' % start_date, tz=pytz.UTC)
if end_date is None:
# Use yesterday's date
yesterday = (datetime.now() - timedelta(1)).strftime('%Y-%m-%d')
end_dt = pd.Timestamp('%s 23:59:00' % yesterday, tz=pytz.UTC)
else:
end_dt = pd.Timestamp('%s 23:59:00' % end_date, tz=pytz.UTC)
alloc_dict = obtain_allocations(allocations)
# Assets and Data Handling
strategy_assets = list(alloc_dict.keys())
strategy_symbols = [symbol.replace('EQ:', '') for symbol in strategy_assets]
strategy_universe = StaticUniverse(strategy_assets)
strategy_data_source = CSVDailyBarDataSource(csv_dir, Equity, csv_symbols=strategy_symbols)
strategy_data_handler = BacktestDataHandler(
strategy_universe, data_sources=[strategy_data_source]
)
strategy_assets = alloc_dict.keys()
strategy_alpha_model = FixedSignalsAlphaModel(alloc_dict)
strategy_backtest = BacktestTradingSession(
start_dt,
end_dt,
strategy_universe,
strategy_alpha_model,
rebalance='end_of_month',
account_name=strat_title,
portfolio_id='STATIC001',
portfolio_name=strat_title,
long_only=True,
cash_buffer_percentage=0.01,
data_handler=strategy_data_handler
)
strategy_backtest.run()
# Benchmark: 60/40 US Equities/Bonds
benchmark_symbols = ['SPY', 'AGG']
benchmark_assets = ['EQ:SPY', 'EQ:AGG']
benchmark_universe = StaticUniverse(benchmark_assets)
benchmark_data_source = CSVDailyBarDataSource(csv_dir, Equity, csv_symbols=benchmark_symbols)
benchmark_data_handler = BacktestDataHandler(
benchmark_universe, data_sources=[benchmark_data_source]
)
benchmark_signal_weights = {'EQ:SPY': 0.6, 'EQ:AGG': 0.4}
benchmark_title = '60/40 US Equities/Bonds'
benchmark_alpha_model = FixedSignalsAlphaModel(benchmark_signal_weights)
benchmark_backtest = BacktestTradingSession(
start_dt,
end_dt,
benchmark_universe,
benchmark_alpha_model,
rebalance='end_of_month',
account_name='60/40 US Equities/Bonds',
portfolio_id='6040EQBD',
portfolio_name=benchmark_title,
long_only=True,
cash_buffer_percentage=0.01,
data_handler=benchmark_data_handler
)
benchmark_backtest.run()
output_filename = ('%s_monthly.json' % strat_id).replace('-', '_')
stats = JSONStatistics(
equity_curve=strategy_backtest.get_equity_curve(),
target_allocations=strategy_backtest.get_target_allocations(),
strategy_id=strat_id,
strategy_name=strat_title,
benchmark_curve=benchmark_backtest.get_equity_curve(),
benchmark_id='6040-us-equitiesbonds',
benchmark_name=benchmark_title,
output_filename=output_filename
)
stats.to_file()
if tearsheet:
tearsheet = TearsheetStatistics(
strategy_equity=strategy_backtest.get_equity_curve(),
benchmark_equity=benchmark_backtest.get_equity_curve(),
title=strat_title
)
tearsheet.plot_results()
if __name__ == "__main__":
cli()
|
import pytest
from pyvizio.api.apps import AppConfig
from pyvizio.const import DEVICE_CLASS_SPEAKER, MAX_VOLUME
from .const import (
ACCESS_TOKEN,
APP_LIST,
CH_TYPE,
CURRENT_APP_CONFIG,
CURRENT_EQ,
CURRENT_INPUT,
EQ_LIST,
INPUT_LIST,
INPUT_LIST_WITH_APPS,
MODEL,
RESPONSE_TOKEN,
UNIQUE_ID,
VERSION,
ZEROCONF_HOST,
MockCompletePairingResponse,
MockStartPairingResponse,
)
from tests.async_mock import AsyncMock, patch
class MockInput:
"""Mock Vizio device input."""
def __init__(self, name):
"""Initialize mock Vizio device input."""
self.meta_name = name
self.name = name
def get_mock_inputs(input_list):
"""Return list of MockInput."""
return [MockInput(input) for input in input_list]
@pytest.fixture(name="skip_notifications", autouse=True)
def skip_notifications_fixture():
"""Skip notification calls."""
with patch("homeassistant.components.persistent_notification.async_create"), patch(
"homeassistant.components.persistent_notification.async_dismiss"
):
yield
@pytest.fixture(name="vizio_get_unique_id", autouse=True)
def vizio_get_unique_id_fixture():
"""Mock get vizio unique ID."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.get_unique_id",
AsyncMock(return_value=UNIQUE_ID),
):
yield
@pytest.fixture(name="vizio_data_coordinator_update", autouse=True)
def vizio_data_coordinator_update_fixture():
"""Mock get data coordinator update."""
with patch(
"homeassistant.components.vizio.gen_apps_list_from_url",
return_value=APP_LIST,
):
yield
@pytest.fixture(name="vizio_no_unique_id")
def vizio_no_unique_id_fixture():
"""Mock no vizio unique ID returrned."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.get_unique_id",
return_value=None,
):
yield
@pytest.fixture(name="vizio_connect")
def vizio_connect_fixture():
"""Mock valid vizio device and entry setup."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.validate_ha_config",
AsyncMock(return_value=True),
):
yield
@pytest.fixture(name="vizio_complete_pairing")
def vizio_complete_pairing_fixture():
"""Mock complete vizio pairing workflow."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.start_pair",
return_value=MockStartPairingResponse(CH_TYPE, RESPONSE_TOKEN),
), patch(
"homeassistant.components.vizio.config_flow.VizioAsync.pair",
return_value=MockCompletePairingResponse(ACCESS_TOKEN),
):
yield
@pytest.fixture(name="vizio_start_pairing_failure")
def vizio_start_pairing_failure_fixture():
"""Mock vizio start pairing failure."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.start_pair",
return_value=None,
):
yield
@pytest.fixture(name="vizio_invalid_pin_failure")
def vizio_invalid_pin_failure_fixture():
"""Mock vizio failure due to invalid pin."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.start_pair",
return_value=MockStartPairingResponse(CH_TYPE, RESPONSE_TOKEN),
), patch(
"homeassistant.components.vizio.config_flow.VizioAsync.pair",
return_value=None,
):
yield
@pytest.fixture(name="vizio_bypass_setup")
def vizio_bypass_setup_fixture():
"""Mock component setup."""
with patch("homeassistant.components.vizio.async_setup_entry", return_value=True):
yield
@pytest.fixture(name="vizio_bypass_update")
def vizio_bypass_update_fixture():
"""Mock component update."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.can_connect_with_auth_check",
return_value=True,
), patch("homeassistant.components.vizio.media_player.VizioDevice.async_update"):
yield
@pytest.fixture(name="vizio_guess_device_type")
def vizio_guess_device_type_fixture():
"""Mock vizio async_guess_device_type function."""
with patch(
"homeassistant.components.vizio.config_flow.async_guess_device_type",
return_value="speaker",
):
yield
@pytest.fixture(name="vizio_cant_connect")
def vizio_cant_connect_fixture():
"""Mock vizio device can't connect with valid auth."""
with patch(
"homeassistant.components.vizio.config_flow.VizioAsync.validate_ha_config",
AsyncMock(return_value=False),
):
yield
@pytest.fixture(name="vizio_update")
def vizio_update_fixture():
"""Mock valid updates to vizio device."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.can_connect_with_auth_check",
return_value=True,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_all_settings",
return_value={
"volume": int(MAX_VOLUME[DEVICE_CLASS_SPEAKER] / 2),
"eq": CURRENT_EQ,
"mute": "Off",
},
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_setting_options",
return_value=EQ_LIST,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_input",
return_value=CURRENT_INPUT,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_inputs_list",
return_value=get_mock_inputs(INPUT_LIST),
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_power_state",
return_value=True,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_model_name",
return_value=MODEL,
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_version",
return_value=VERSION,
):
yield
@pytest.fixture(name="vizio_update_with_apps")
def vizio_update_with_apps_fixture(vizio_update: pytest.fixture):
"""Mock valid updates to vizio device that supports apps."""
with patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_inputs_list",
return_value=get_mock_inputs(INPUT_LIST_WITH_APPS),
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_input",
return_value="CAST",
), patch(
"homeassistant.components.vizio.media_player.VizioAsync.get_current_app_config",
return_value=AppConfig(**CURRENT_APP_CONFIG),
):
yield
@pytest.fixture(name="vizio_hostname_check")
def vizio_hostname_check():
"""Mock vizio hostname resolution."""
with patch(
"homeassistant.components.vizio.config_flow.socket.gethostbyname",
return_value=ZEROCONF_HOST,
):
yield
|
import unittest
import numpy as np
from chainer import testing
from chainercv.visualizations import vis_point
try:
import matplotlib # NOQA
_available = True
except ImportError:
_available = False
@testing.parameterize(
{'visible': np.array([[True, True, False]])},
{'visible': None}
)
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisPoint(unittest.TestCase):
def test_vis_point(self):
img = np.random.randint(
0, 255, size=(3, 32, 32)).astype(np.float32)
point = np.random.uniform(size=(1, 3, 2)).astype(np.float32)
ax = vis_point(img, point, self.visible)
self.assertTrue(isinstance(ax, matplotlib.axes.Axes))
testing.run_module(__name__, __file__)
|
import logging
import socket
import snapcast.control
from snapcast.control.server import CONTROL_PORT
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
SUPPORT_SELECT_SOURCE,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
)
from homeassistant.const import (
CONF_HOST,
CONF_PORT,
STATE_IDLE,
STATE_OFF,
STATE_ON,
STATE_PLAYING,
STATE_UNKNOWN,
)
from homeassistant.helpers import config_validation as cv, entity_platform
from .const import (
ATTR_LATENCY,
ATTR_MASTER,
CLIENT_PREFIX,
CLIENT_SUFFIX,
DATA_KEY,
GROUP_PREFIX,
GROUP_SUFFIX,
SERVICE_JOIN,
SERVICE_RESTORE,
SERVICE_SET_LATENCY,
SERVICE_SNAPSHOT,
SERVICE_UNJOIN,
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_SNAPCAST_CLIENT = (
SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | SUPPORT_SELECT_SOURCE
)
SUPPORT_SNAPCAST_GROUP = (
SUPPORT_VOLUME_MUTE | SUPPORT_VOLUME_SET | SUPPORT_SELECT_SOURCE
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT): cv.port}
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Snapcast platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT, CONTROL_PORT)
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(SERVICE_SNAPSHOT, {}, "snapshot")
platform.async_register_entity_service(SERVICE_RESTORE, {}, "async_restore")
platform.async_register_entity_service(
SERVICE_JOIN, {vol.Required(ATTR_MASTER): cv.entity_id}, handle_async_join
)
platform.async_register_entity_service(SERVICE_UNJOIN, {}, handle_async_unjoin)
platform.async_register_entity_service(
SERVICE_SET_LATENCY,
{vol.Required(ATTR_LATENCY): cv.positive_int},
handle_set_latency,
)
try:
server = await snapcast.control.create_server(
hass.loop, host, port, reconnect=True
)
except socket.gaierror:
_LOGGER.error("Could not connect to Snapcast server at %s:%d", host, port)
return
# Note: Host part is needed, when using multiple snapservers
hpid = f"{host}:{port}"
groups = [SnapcastGroupDevice(group, hpid) for group in server.groups]
clients = [SnapcastClientDevice(client, hpid) for client in server.clients]
devices = groups + clients
hass.data[DATA_KEY] = devices
async_add_entities(devices)
async def handle_async_join(entity, service_call):
"""Handle the entity service join."""
if not isinstance(entity, SnapcastClientDevice):
raise ValueError("Entity is not a client. Can only join clients.")
await entity.async_join(service_call.data[ATTR_MASTER])
async def handle_async_unjoin(entity, service_call):
"""Handle the entity service unjoin."""
if not isinstance(entity, SnapcastClientDevice):
raise ValueError("Entity is not a client. Can only unjoin clients.")
await entity.async_unjoin()
async def handle_set_latency(entity, service_call):
"""Handle the entity service set_latency."""
if not isinstance(entity, SnapcastClientDevice):
raise ValueError("Latency can only be set for a Snapcast client.")
await entity.async_set_latency(service_call.data[ATTR_LATENCY])
class SnapcastGroupDevice(MediaPlayerEntity):
"""Representation of a Snapcast group device."""
def __init__(self, group, uid_part):
"""Initialize the Snapcast group device."""
group.set_callback(self.schedule_update_ha_state)
self._group = group
self._uid = f"{GROUP_PREFIX}{uid_part}_{self._group.identifier}"
@property
def state(self):
"""Return the state of the player."""
return {
"idle": STATE_IDLE,
"playing": STATE_PLAYING,
"unknown": STATE_UNKNOWN,
}.get(self._group.stream_status, STATE_UNKNOWN)
@property
def unique_id(self):
"""Return the ID of snapcast group."""
return self._uid
@property
def name(self):
"""Return the name of the device."""
return f"{GROUP_PREFIX}{self._group.identifier}"
@property
def source(self):
"""Return the current input source."""
return self._group.stream
@property
def volume_level(self):
"""Return the volume level."""
return self._group.volume / 100
@property
def is_volume_muted(self):
"""Volume muted."""
return self._group.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SNAPCAST_GROUP
@property
def source_list(self):
"""List of available input sources."""
return list(self._group.streams_by_name().keys())
@property
def device_state_attributes(self):
"""Return the state attributes."""
name = f"{self._group.friendly_name} {GROUP_SUFFIX}"
return {"friendly_name": name}
@property
def should_poll(self):
"""Do not poll for state."""
return False
async def async_select_source(self, source):
"""Set input source."""
streams = self._group.streams_by_name()
if source in streams:
await self._group.set_stream(streams[source].identifier)
self.async_write_ha_state()
async def async_mute_volume(self, mute):
"""Send the mute command."""
await self._group.set_muted(mute)
self.async_write_ha_state()
async def async_set_volume_level(self, volume):
"""Set the volume level."""
await self._group.set_volume(round(volume * 100))
self.async_write_ha_state()
def snapshot(self):
"""Snapshot the group state."""
self._group.snapshot()
async def async_restore(self):
"""Restore the group state."""
await self._group.restore()
class SnapcastClientDevice(MediaPlayerEntity):
"""Representation of a Snapcast client device."""
def __init__(self, client, uid_part):
"""Initialize the Snapcast client device."""
client.set_callback(self.schedule_update_ha_state)
self._client = client
self._uid = f"{CLIENT_PREFIX}{uid_part}_{self._client.identifier}"
@property
def unique_id(self):
"""
Return the ID of this snapcast client.
Note: Host part is needed, when using multiple snapservers
"""
return self._uid
@property
def identifier(self):
"""Return the snapcast identifier."""
return self._client.identifier
@property
def name(self):
"""Return the name of the device."""
return f"{CLIENT_PREFIX}{self._client.identifier}"
@property
def source(self):
"""Return the current input source."""
return self._client.group.stream
@property
def volume_level(self):
"""Return the volume level."""
return self._client.volume / 100
@property
def is_volume_muted(self):
"""Volume muted."""
return self._client.muted
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_SNAPCAST_CLIENT
@property
def source_list(self):
"""List of available input sources."""
return list(self._client.group.streams_by_name().keys())
@property
def state(self):
"""Return the state of the player."""
if self._client.connected:
return STATE_ON
return STATE_OFF
@property
def device_state_attributes(self):
"""Return the state attributes."""
state_attrs = {}
if self.latency is not None:
state_attrs["latency"] = self.latency
name = f"{self._client.friendly_name} {CLIENT_SUFFIX}"
state_attrs["friendly_name"] = name
return state_attrs
@property
def should_poll(self):
"""Do not poll for state."""
return False
@property
def latency(self):
"""Latency for Client."""
return self._client.latency
async def async_select_source(self, source):
"""Set input source."""
streams = self._client.group.streams_by_name()
if source in streams:
await self._client.group.set_stream(streams[source].identifier)
self.async_write_ha_state()
async def async_mute_volume(self, mute):
"""Send the mute command."""
await self._client.set_muted(mute)
self.async_write_ha_state()
async def async_set_volume_level(self, volume):
"""Set the volume level."""
await self._client.set_volume(round(volume * 100))
self.async_write_ha_state()
async def async_join(self, master):
"""Join the group of the master player."""
master_entity = next(
entity for entity in self.hass.data[DATA_KEY] if entity.entity_id == master
)
if not isinstance(master_entity, SnapcastClientDevice):
raise ValueError("Master is not a client device. Can only join clients.")
master_group = next(
group
for group in self._client.groups_available()
if master_entity.identifier in group.clients
)
await master_group.add_client(self._client.identifier)
self.async_write_ha_state()
async def async_unjoin(self):
"""Unjoin the group the player is currently in."""
await self._client.group.remove_client(self._client.identifier)
self.async_write_ha_state()
def snapshot(self):
"""Snapshot the client state."""
self._client.snapshot()
async def async_restore(self):
"""Restore the client state."""
await self._client.restore()
async def async_set_latency(self, latency):
"""Set the latency of the client."""
await self._client.set_latency(latency)
self.async_write_ha_state()
|
import datetime
import logging
from typing import Any, Dict, List, Optional
import requests
import somecomfort
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_AUTO,
FAN_DIFFUSE,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_AUX_HEAT,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_HUMIDITY,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_PASSWORD,
CONF_REGION,
CONF_USERNAME,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
ATTR_FAN_ACTION = "fan_action"
CONF_COOL_AWAY_TEMPERATURE = "away_cool_temperature"
CONF_HEAT_AWAY_TEMPERATURE = "away_heat_temperature"
CONF_DEV_ID = "thermostat"
CONF_LOC_ID = "location"
DEFAULT_COOL_AWAY_TEMPERATURE = 88
DEFAULT_HEAT_AWAY_TEMPERATURE = 61
PLATFORM_SCHEMA = vol.All(
cv.deprecated(CONF_REGION),
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(
CONF_COOL_AWAY_TEMPERATURE, default=DEFAULT_COOL_AWAY_TEMPERATURE
): vol.Coerce(int),
vol.Optional(
CONF_HEAT_AWAY_TEMPERATURE, default=DEFAULT_HEAT_AWAY_TEMPERATURE
): vol.Coerce(int),
vol.Optional(CONF_REGION): cv.string,
vol.Optional(CONF_DEV_ID): cv.string,
vol.Optional(CONF_LOC_ID): cv.string,
}
),
)
HVAC_MODE_TO_HW_MODE = {
"SwitchOffAllowed": {HVAC_MODE_OFF: "off"},
"SwitchAutoAllowed": {HVAC_MODE_HEAT_COOL: "auto"},
"SwitchCoolAllowed": {HVAC_MODE_COOL: "cool"},
"SwitchHeatAllowed": {HVAC_MODE_HEAT: "heat"},
}
HW_MODE_TO_HVAC_MODE = {
"off": HVAC_MODE_OFF,
"emheat": HVAC_MODE_HEAT,
"heat": HVAC_MODE_HEAT,
"cool": HVAC_MODE_COOL,
"auto": HVAC_MODE_HEAT_COOL,
}
HW_MODE_TO_HA_HVAC_ACTION = {
"off": CURRENT_HVAC_IDLE,
"fan": CURRENT_HVAC_FAN,
"heat": CURRENT_HVAC_HEAT,
"cool": CURRENT_HVAC_COOL,
}
FAN_MODE_TO_HW = {
"fanModeOnAllowed": {FAN_ON: "on"},
"fanModeAutoAllowed": {FAN_AUTO: "auto"},
"fanModeCirculateAllowed": {FAN_DIFFUSE: "circulate"},
}
HW_FAN_MODE_TO_HA = {
"on": FAN_ON,
"auto": FAN_AUTO,
"circulate": FAN_DIFFUSE,
"follow schedule": FAN_AUTO,
}
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Honeywell thermostat."""
username = config.get(CONF_USERNAME)
password = config.get(CONF_PASSWORD)
try:
client = somecomfort.SomeComfort(username, password)
except somecomfort.AuthError:
_LOGGER.error("Failed to login to honeywell account %s", username)
return
except somecomfort.SomeComfortError:
_LOGGER.error(
"Failed to initialize the Honeywell client: "
"Check your configuration (username, password), "
"or maybe you have exceeded the API rate limit?"
)
return
dev_id = config.get(CONF_DEV_ID)
loc_id = config.get(CONF_LOC_ID)
cool_away_temp = config.get(CONF_COOL_AWAY_TEMPERATURE)
heat_away_temp = config.get(CONF_HEAT_AWAY_TEMPERATURE)
add_entities(
[
HoneywellUSThermostat(
client,
device,
cool_away_temp,
heat_away_temp,
username,
password,
)
for location in client.locations_by_id.values()
for device in location.devices_by_id.values()
if (
(not loc_id or location.locationid == loc_id)
and (not dev_id or device.deviceid == dev_id)
)
]
)
class HoneywellUSThermostat(ClimateEntity):
"""Representation of a Honeywell US Thermostat."""
def __init__(
self, client, device, cool_away_temp, heat_away_temp, username, password
):
"""Initialize the thermostat."""
self._client = client
self._device = device
self._cool_away_temp = cool_away_temp
self._heat_away_temp = heat_away_temp
self._away = False
self._username = username
self._password = password
_LOGGER.debug("latestData = %s ", device._data)
# not all honeywell HVACs support all modes
mappings = [v for k, v in HVAC_MODE_TO_HW_MODE.items() if device.raw_ui_data[k]]
self._hvac_mode_map = {k: v for d in mappings for k, v in d.items()}
self._supported_features = (
SUPPORT_PRESET_MODE
| SUPPORT_TARGET_TEMPERATURE
| SUPPORT_TARGET_TEMPERATURE_RANGE
)
if device._data["canControlHumidification"]:
self._supported_features |= SUPPORT_TARGET_HUMIDITY
if device.raw_ui_data["SwitchEmergencyHeatAllowed"]:
self._supported_features |= SUPPORT_AUX_HEAT
if not device._data["hasFan"]:
return
# not all honeywell fans support all modes
mappings = [v for k, v in FAN_MODE_TO_HW.items() if device.raw_fan_data[k]]
self._fan_mode_map = {k: v for d in mappings for k, v in d.items()}
self._supported_features |= SUPPORT_FAN_MODE
@property
def name(self) -> Optional[str]:
"""Return the name of the honeywell, if any."""
return self._device.name
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the device specific state attributes."""
data = {}
data[ATTR_FAN_ACTION] = "running" if self._device.fan_running else "idle"
if self._device.raw_dr_data:
data["dr_phase"] = self._device.raw_dr_data.get("Phase")
return data
@property
def supported_features(self) -> int:
"""Return the list of supported features."""
return self._supported_features
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
if self.hvac_mode in [HVAC_MODE_COOL, HVAC_MODE_HEAT_COOL]:
return self._device.raw_ui_data["CoolLowerSetptLimit"]
if self.hvac_mode == HVAC_MODE_HEAT:
return self._device.raw_ui_data["HeatLowerSetptLimit"]
return None
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
if self.hvac_mode == HVAC_MODE_COOL:
return self._device.raw_ui_data["CoolUpperSetptLimit"]
if self.hvac_mode in [HVAC_MODE_HEAT, HVAC_MODE_HEAT_COOL]:
return self._device.raw_ui_data["HeatUpperSetptLimit"]
return None
@property
def temperature_unit(self) -> str:
"""Return the unit of measurement."""
return TEMP_CELSIUS if self._device.temperature_unit == "C" else TEMP_FAHRENHEIT
@property
def current_humidity(self) -> Optional[int]:
"""Return the current humidity."""
return self._device.current_humidity
@property
def hvac_mode(self) -> str:
"""Return hvac operation ie. heat, cool mode."""
return HW_MODE_TO_HVAC_MODE[self._device.system_mode]
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes."""
return list(self._hvac_mode_map)
@property
def hvac_action(self) -> Optional[str]:
"""Return the current running hvac operation if supported."""
if self.hvac_mode == HVAC_MODE_OFF:
return None
return HW_MODE_TO_HA_HVAC_ACTION[self._device.equipment_output_status]
@property
def current_temperature(self) -> Optional[float]:
"""Return the current temperature."""
return self._device.current_temperature
@property
def target_temperature(self) -> Optional[float]:
"""Return the temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_COOL:
return self._device.setpoint_cool
if self.hvac_mode == HVAC_MODE_HEAT:
return self._device.setpoint_heat
return None
@property
def target_temperature_high(self) -> Optional[float]:
"""Return the highbound target temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return self._device.setpoint_cool
return None
@property
def target_temperature_low(self) -> Optional[float]:
"""Return the lowbound target temperature we try to reach."""
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
return self._device.setpoint_heat
return None
@property
def preset_mode(self) -> Optional[str]:
"""Return the current preset mode, e.g., home, away, temp."""
return PRESET_AWAY if self._away else None
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return a list of available preset modes."""
return [PRESET_NONE, PRESET_AWAY]
@property
def is_aux_heat(self) -> Optional[str]:
"""Return true if aux heater."""
return self._device.system_mode == "emheat"
@property
def fan_mode(self) -> Optional[str]:
"""Return the fan setting."""
return HW_FAN_MODE_TO_HA[self._device.fan_mode]
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return the list of available fan modes."""
return list(self._fan_mode_map)
def _set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
try:
# Get current mode
mode = self._device.system_mode
# Set hold if this is not the case
if getattr(self._device, f"hold_{mode}") is False:
# Get next period key
next_period_key = f"{mode.capitalize()}NextPeriod"
# Get next period raw value
next_period = self._device.raw_ui_data.get(next_period_key)
# Get next period time
hour, minute = divmod(next_period * 15, 60)
# Set hold time
setattr(self._device, f"hold_{mode}", datetime.time(hour, minute))
# Set temperature
setattr(self._device, f"setpoint_{mode}", temperature)
except somecomfort.SomeComfortError:
_LOGGER.error("Temperature %.1f out of range", temperature)
def set_temperature(self, **kwargs) -> None:
"""Set new target temperature."""
if {HVAC_MODE_COOL, HVAC_MODE_HEAT} & set(self._hvac_mode_map):
self._set_temperature(**kwargs)
try:
if HVAC_MODE_HEAT_COOL in self._hvac_mode_map:
temperature = kwargs.get(ATTR_TARGET_TEMP_HIGH)
if temperature:
self._device.setpoint_cool = temperature
temperature = kwargs.get(ATTR_TARGET_TEMP_LOW)
if temperature:
self._device.setpoint_heat = temperature
except somecomfort.SomeComfortError as err:
_LOGGER.error("Invalid temperature %s: %s", temperature, err)
def set_fan_mode(self, fan_mode: str) -> None:
"""Set new target fan mode."""
self._device.fan_mode = self._fan_mode_map[fan_mode]
def set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target hvac mode."""
self._device.system_mode = self._hvac_mode_map[hvac_mode]
def _turn_away_mode_on(self) -> None:
"""Turn away on.
Somecomfort does have a proprietary away mode, but it doesn't really
work the way it should. For example: If you set a temperature manually
it doesn't get overwritten when away mode is switched on.
"""
self._away = True
try:
# Get current mode
mode = self._device.system_mode
except somecomfort.SomeComfortError:
_LOGGER.error("Can not get system mode")
return
try:
# Set permanent hold
setattr(self._device, f"hold_{mode}", True)
# Set temperature
setattr(
self._device, f"setpoint_{mode}", getattr(self, f"_{mode}_away_temp")
)
except somecomfort.SomeComfortError:
_LOGGER.error(
"Temperature %.1f out of range", getattr(self, f"_{mode}_away_temp")
)
def _turn_away_mode_off(self) -> None:
"""Turn away off."""
self._away = False
try:
# Disabling all hold modes
self._device.hold_cool = False
self._device.hold_heat = False
except somecomfort.SomeComfortError:
_LOGGER.error("Can not stop hold mode")
def set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode == PRESET_AWAY:
self._turn_away_mode_on()
else:
self._turn_away_mode_off()
def turn_aux_heat_on(self) -> None:
"""Turn auxiliary heater on."""
self._device.system_mode = "emheat"
def turn_aux_heat_off(self) -> None:
"""Turn auxiliary heater off."""
if HVAC_MODE_HEAT in self.hvac_modes:
self.set_hvac_mode(HVAC_MODE_HEAT)
else:
self.set_hvac_mode(HVAC_MODE_OFF)
def _retry(self) -> bool:
"""Recreate a new somecomfort client.
When we got an error, the best way to be sure that the next query
will succeed, is to recreate a new somecomfort client.
"""
try:
self._client = somecomfort.SomeComfort(self._username, self._password)
except somecomfort.AuthError:
_LOGGER.error("Failed to login to honeywell account %s", self._username)
return False
except somecomfort.SomeComfortError as ex:
_LOGGER.error("Failed to initialize honeywell client: %s", str(ex))
return False
devices = [
device
for location in self._client.locations_by_id.values()
for device in location.devices_by_id.values()
if device.name == self._device.name
]
if len(devices) != 1:
_LOGGER.error("Failed to find device %s", self._device.name)
return False
self._device = devices[0]
return True
def update(self) -> None:
"""Update the state."""
retries = 3
while retries > 0:
try:
self._device.refresh()
break
except (
somecomfort.client.APIRateLimited,
OSError,
requests.exceptions.ReadTimeout,
) as exp:
retries -= 1
if retries == 0:
raise exp
if not self._retry():
raise exp
_LOGGER.error("SomeComfort update failed, Retrying - Error: %s", exp)
_LOGGER.debug(
"latestData = %s ", self._device._data # pylint: disable=protected-access
)
|
import argparse
import atexit
import cmd
import os
import shutil
import sys
import tempfile
import time
PYLIB = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'pylib')
sys.path.insert(1, PYLIB)
import openrazer._fake_driver as fake_driver
class FakeDevicePrompt(cmd.Cmd):
def __init__(self, device_map, *args, **kwargs):
super(FakeDevicePrompt, self).__init__(*args, **kwargs)
self._device_map = device_map
self._current_device = None
self._ep = {}
self._read = []
self._write = []
# If only 1 device, auto use that
if len(self._device_map) == 1:
self._change_device(list(self._device_map.keys())[0])
else:
self._change_device(None)
def _change_device(self, device_name=None):
if device_name is not None:
self._current_device = device_name
self.prompt = self._current_device + "> "
for endpoint, details in self._device_map[self._current_device].endpoints.items():
self._ep[endpoint] = details[2]
self._read = [endpoint for endpoint, perm in self._ep.items()]
self._write = [endpoint for endpoint, perm in self._ep.items() if perm in ('w', 'rw')]
else:
self._current_device = None
self.prompt = "> "
def do_dev(self, arg):
"""
Change current device
"""
if arg in self._device_map:
if arg is None or len(arg) == 0:
print('Need to specify a device name. One of: {0}'.format(','.join(self._device_map.keys())))
else:
self._change_device(arg)
else:
print('Invalid device name: {0}'.format(arg))
def complete_dev(self, text, line, begidx, endidx):
if not text:
completions = list(self._device_map.keys())
else:
completions = [item for item in list(self._device_map.keys()) if item.startswith(text)]
return completions
def do_list(self, arg):
"""List available device files"""
if self._current_device is not None:
print('Device files')
print('------------')
for endpoint, permission in self._ep.items():
if permission in ('r', 'rw'):
print(" {0:-<2}- {1}".format(permission, endpoint))
else:
print(" {0:->2}- {1}".format(permission, endpoint))
print()
print('Event files')
print('-----------')
for event_id, event_value in sorted(self._device_map[self._current_device].events.items(), key=lambda x: x[0]):
print(" {0: >2} {1}".format(event_id, event_value[0]))
else:
print('Devices')
print('-------')
for device in list(self._device_map.keys()):
print(' {0}'.format(device))
def do_ls(self, arg):
"""List available device files"""
self.do_list(arg)
def do_read(self, arg, binary=False):
"""Read ASCII from given device file"""
if self._current_device is not None:
if arg in self._ep:
result = self._device_map[self._current_device].get(arg, binary=binary)
print(result)
elif arg in self._ep:
print('Device endpoint not readable')
else:
print("Device endpoint not found")
def do_binary_read(self, arg):
"""Read binary from given device file"""
self.do_read(arg, binary=True)
def complete_read(self, text, line, begidx, endidx):
if not text:
completions = self._read
else:
completions = [item for item in self._read if item.startswith(text)]
return completions
complete_binary_read = complete_read
def do_write(self, arg):
"""Write ASCII to device file. DEVICE_FILE DATA"""
if self._current_device is not None:
try:
device_file, data = arg.split(' ', 1)
if device_file in self._ep:
if len(data) > 0:
self._device_map[self._current_device].set(device_file, data)
print("{0}: {1}".format(device_file, self._device_map[self._current_device].get(device_file)))
else:
print("Device endpoint not found")
except ValueError:
print("Must specify a device endpoint then a space then data to write")
def complete_write(self, text, line, begidx, endidx):
if not text:
completions = self._write
else:
completions = [item for item in self._write if item.startswith(text)]
return completions
def do_event(self, arg):
"""Emit an event, format: EVENT_ID KEY_ID STATE
Where state in 'up' 'down' and 'repeat'
"""
if self._current_device is not None:
try:
event_file, key_id, value = arg.split(' ')
except ValueError:
print("Usage: event event_file key_id value")
return
if event_file not in self._device_map[self._current_device].events:
print("Event ID {0} is invalid".format(event_file))
else:
try:
bytes_written = self._device_map[self._current_device].emit_kb_event(event_file, int(key_id), value)
print("Wrote {0} bytes to {1}".format(bytes_written, self._device_map[self._current_device].events[event_file][0]))
except ValueError as err:
print("Caught exception: {0}".format(err))
def do_exit(self, arg):
"""Exit"""
if self._current_device is not None:
self._change_device(None)
return False
else:
return True
def do_EOF(self, arg):
"""Press Ctrl+D to exit"""
self.do_exit(arg)
def create_envionment(device_name, destination):
os.makedirs(destination, exist_ok=True)
try:
fake_device = fake_driver.FakeDevice(device_name, tmp_dir=destination)
return fake_device
except ValueError:
print('Device {0}.cfg not found'.format(device_name))
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument('device', metavar='DEVICE', nargs='*', help='Device config name')
parser.add_argument('--dest', metavar='DESTDIR', required=False, default=None, help='Directory to create driver files in. If omitted then a tmp directory is used')
parser.add_argument('--all', action='store_true', help='Create all possible fake devices')
parser.add_argument('--non-interactive', dest='interactive', action='store_false', help='Dont display prompt, just hang until killed')
parser.add_argument('--clear-dest', action='store_true', help='Clear the destination folder if it exists before starting')
return parser.parse_args()
def run():
args = parse_args()
if args.dest is None:
destination = tempfile.mkdtemp(prefix='tmp_', suffix='_fakerazer')
else:
destination = args.dest
if args.clear_dest and os.path.exists(destination):
shutil.rmtree(destination, ignore_errors=True)
if args.all:
devices = fake_driver.SPECS
else:
devices = args.device
device_map = {}
for device in devices:
# Device name: FakeDriver
fake_device = create_envionment(device, destination)
if fake_device is not None:
device_map[device] = fake_device
if len(device_map) == 0:
print("ERROR: No valid devices passed to script, you either need to pass devices as arguments or use '--all'")
sys.exit(1)
# Register cleanup
if args.dest is None:
atexit.register(lambda: shutil.rmtree(destination, ignore_errors=True))
else:
for device in device_map.values():
# device = FakeDriver
atexit.register(device.close)
print("Device test directory: {0}".format(destination))
try:
if not args.interactive:
print("Sleeping forever, use Ctrl-C to exit...")
while True:
time.sleep(99999999)
else:
FakeDevicePrompt(device_map).cmdloop()
except KeyboardInterrupt:
pass
if __name__ == '__main__':
run()
|
from django.conf import settings
from weblate.machinery.base import MachineTranslation, MissingConfiguration
DEEPL_TRANSLATE = "https://api.deepl.com/{}/translate"
DEEPL_LANGUAGES = "https://api.deepl.com/{}/languages"
class DeepLTranslation(MachineTranslation):
"""DeepL (Linguee) machine translation support."""
name = "DeepL"
# This seems to be currently best MT service, so score it a bit
# better than other ones.
max_score = 91
language_map = {
"zh_hans": "zh",
}
def __init__(self):
"""Check configuration."""
super().__init__()
if settings.MT_DEEPL_KEY is None:
raise MissingConfiguration("DeepL requires API key")
def map_language_code(self, code):
"""Convert language to service specific code."""
return super().map_language_code(code).replace("_", "-").upper()
def download_languages(self):
"""List of supported languages is currently hardcoded."""
response = self.request(
"post",
DEEPL_LANGUAGES.format(settings.MT_DEEPL_API_VERSION),
data={"auth_key": settings.MT_DEEPL_KEY},
)
return [x["language"] for x in response.json()]
def download_translations(
self,
source,
language,
text: str,
unit,
user,
search: bool,
threshold: int = 75,
):
"""Download list of possible translations from a service."""
response = self.request(
"post",
DEEPL_TRANSLATE.format(settings.MT_DEEPL_API_VERSION),
data={
"auth_key": settings.MT_DEEPL_KEY,
"text": text,
"source_lang": source,
"target_lang": language,
},
)
payload = response.json()
for translation in payload["translations"]:
yield {
"text": translation["text"],
"quality": self.max_score,
"service": self.name,
"source": text,
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
trunc_normal = lambda stddev: tf.truncated_normal_initializer(0.0, stddev)
def overfeat_arg_scope(weight_decay=0.0005):
with slim.arg_scope([slim.conv2d, slim.fully_connected],
activation_fn=tf.nn.relu,
weights_regularizer=slim.l2_regularizer(weight_decay),
biases_initializer=tf.zeros_initializer()):
with slim.arg_scope([slim.conv2d], padding='SAME'):
with slim.arg_scope([slim.max_pool2d], padding='VALID') as arg_sc:
return arg_sc
def overfeat(inputs,
num_classes=1000,
is_training=True,
dropout_keep_prob=0.5,
spatial_squeeze=True,
scope='overfeat'):
"""Contains the model definition for the OverFeat network.
The definition for the network was obtained from:
OverFeat: Integrated Recognition, Localization and Detection using
Convolutional Networks
Pierre Sermanet, David Eigen, Xiang Zhang, Michael Mathieu, Rob Fergus and
Yann LeCun, 2014
http://arxiv.org/abs/1312.6229
Note: All the fully_connected layers have been transformed to conv2d layers.
To use in classification mode, resize input to 231x231. To use in fully
convolutional mode, set spatial_squeeze to false.
Args:
inputs: a tensor of size [batch_size, height, width, channels].
num_classes: number of predicted classes.
is_training: whether or not the model is being trained.
dropout_keep_prob: the probability that activations are kept in the dropout
layers during training.
spatial_squeeze: whether or not should squeeze the spatial dimensions of the
outputs. Useful to remove unnecessary dimensions for classification.
scope: Optional scope for the variables.
Returns:
the last op containing the log predictions and end_points dict.
"""
with tf.variable_scope(scope, 'overfeat', [inputs]) as sc:
end_points_collection = sc.name + '_end_points'
# Collect outputs for conv2d, fully_connected and max_pool2d
with slim.arg_scope([slim.conv2d, slim.fully_connected, slim.max_pool2d],
outputs_collections=end_points_collection):
net = slim.conv2d(inputs, 64, [11, 11], 4, padding='VALID',
scope='conv1')
net = slim.max_pool2d(net, [2, 2], scope='pool1')
net = slim.conv2d(net, 256, [5, 5], padding='VALID', scope='conv2')
net = slim.max_pool2d(net, [2, 2], scope='pool2')
net = slim.conv2d(net, 512, [3, 3], scope='conv3')
net = slim.conv2d(net, 1024, [3, 3], scope='conv4')
net = slim.conv2d(net, 1024, [3, 3], scope='conv5')
net = slim.max_pool2d(net, [2, 2], scope='pool5')
with slim.arg_scope([slim.conv2d],
weights_initializer=trunc_normal(0.005),
biases_initializer=tf.constant_initializer(0.1)):
# Use conv2d instead of fully_connected layers.
net = slim.conv2d(net, 3072, [6, 6], padding='VALID', scope='fc6')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout6')
net = slim.conv2d(net, 4096, [1, 1], scope='fc7')
net = slim.dropout(net, dropout_keep_prob, is_training=is_training,
scope='dropout7')
net = slim.conv2d(net, num_classes, [1, 1],
activation_fn=None,
normalizer_fn=None,
biases_initializer=tf.zeros_initializer(),
scope='fc8')
# Convert end_points_collection into a end_point dict.
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
if spatial_squeeze:
net = tf.squeeze(net, [1, 2], name='fc8/squeezed')
end_points[sc.name + '/fc8'] = net
return net, end_points
overfeat.default_image_size = 231
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from netapp import NetAppCollector
###############################################################################
class TestNetAppCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NetAppCollector', {
})
self.collector = NetAppCollector(config, None)
def test_import(self):
self.assertTrue(NetAppCollector)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
import tensorflow as tf
from keras import backend as K
from keras.engine import Layer
class DecayingDropoutLayer(Layer):
"""
Layer that processes dropout with exponential decayed keep rate during
training.
:param initial_keep_rate: the initial keep rate of decaying dropout.
:param decay_interval: the decay interval of decaying dropout.
:param decay_rate: the decay rate of decaying dropout.
:param noise_shape: a 1D integer tensor representing the shape of the
binary dropout mask that will be multiplied with the input.
:param seed: a python integer to use as random seed.
:param kwargs: standard layer keyword arguments.
Examples:
>>> import matchzoo as mz
>>> layer = mz.contrib.layers.DecayingDropoutLayer(
... initial_keep_rate=1.0,
... decay_interval=10000,
... decay_rate=0.977,
... )
>>> num_batch, num_dim =5, 10
>>> layer.build([num_batch, num_dim])
"""
def __init__(self,
initial_keep_rate: float = 1.0,
decay_interval: int = 10000,
decay_rate: float = 0.977,
noise_shape=None,
seed=None,
**kwargs):
""":class: 'DecayingDropoutLayer' constructor."""
super(DecayingDropoutLayer, self).__init__(**kwargs)
self._iterations = None
self._initial_keep_rate = initial_keep_rate
self._decay_interval = decay_interval
self._decay_rate = min(1.0, max(0.0, decay_rate))
self._noise_shape = noise_shape
self._seed = seed
def _get_noise_shape(self, inputs):
if self._noise_shape is None:
return self._noise_shape
symbolic_shape = tf.shape(inputs)
noise_shape = [symbolic_shape[axis] if shape is None else shape
for axis, shape in enumerate(self._noise_shape)]
return tuple(noise_shape)
def build(self, input_shape):
"""
Build the layer.
:param input_shape: the shape of the input tensor,
for DecayingDropoutLayer we need one input tensor.
"""
self._iterations = self.add_weight(name='iterations',
shape=(1,),
dtype=K.floatx(),
initializer='zeros',
trainable=False)
super(DecayingDropoutLayer, self).build(input_shape)
def call(self, inputs, training=None):
"""
The computation logic of DecayingDropoutLayer.
:param inputs: an input tensor.
"""
noise_shape = self._get_noise_shape(inputs)
t = tf.cast(self._iterations, K.floatx()) + 1
p = t / float(self._decay_interval)
keep_rate = self._initial_keep_rate * tf.pow(self._decay_rate, p)
def dropped_inputs():
update_op = self._iterations.assign_add([1])
with tf.control_dependencies([update_op]):
return tf.nn.dropout(inputs, 1 - keep_rate[0], noise_shape,
seed=self._seed)
return K.in_train_phase(dropped_inputs, inputs, training=training)
def get_config(self):
"""Get the config dict of DecayingDropoutLayer."""
config = {'initial_keep_rate': self._initial_keep_rate,
'decay_interval': self._decay_interval,
'decay_rate': self._decay_rate,
'noise_shape': self._noise_shape,
'seed': self._seed}
base_config = super(DecayingDropoutLayer, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
|
import os.path as op
import numpy as np
from numpy.testing import assert_allclose
import pytest
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from mne.viz.utils import (compare_fiff, _fake_click, _compute_scalings,
_validate_if_list_of_axes, _get_color_list,
_setup_vmin_vmax, center_cmap, centers_to_edges,
_make_event_color_dict)
from mne.viz import ClickableImage, add_background_image, mne_analyze_colormap
from mne.utils import run_tests_if_main
from mne.io import read_raw_fif
from mne.event import read_events
from mne.epochs import Epochs
base_dir = op.join(op.dirname(__file__), '..', '..', 'io', 'tests', 'data')
raw_fname = op.join(base_dir, 'test_raw.fif')
cov_fname = op.join(base_dir, 'test-cov.fif')
ev_fname = op.join(base_dir, 'test_raw-eve.fif')
def test_setup_vmin_vmax_warns():
"""Test that _setup_vmin_vmax warns properly."""
expected_msg = r'\(min=0.0, max=1\) range.*minimum of data is -1'
with pytest.warns(UserWarning, match=expected_msg):
_setup_vmin_vmax(data=[-1, 0], vmin=None, vmax=None, norm=True)
def test_get_color_list():
"""Test getting a colormap from rcParams."""
colors = _get_color_list()
assert isinstance(colors, list)
colors_no_red = _get_color_list(annotations=True)
assert '#ff0000' not in colors_no_red
def test_mne_analyze_colormap():
"""Test mne_analyze_colormap."""
pytest.raises(ValueError, mne_analyze_colormap, [0])
pytest.raises(ValueError, mne_analyze_colormap, [-1, 1, 2])
pytest.raises(ValueError, mne_analyze_colormap, [0, 2, 1])
def test_compare_fiff():
"""Test compare_fiff."""
compare_fiff(raw_fname, cov_fname, read_limit=0, show=False)
plt.close('all')
def test_clickable_image():
"""Test the ClickableImage class."""
# Gen data and create clickable image
im = np.random.RandomState(0).randn(100, 100)
clk = ClickableImage(im)
clicks = [(12, 8), (46, 48), (10, 24)]
# Generate clicks
for click in clicks:
_fake_click(clk.fig, clk.ax, click, xform='data')
assert_allclose(np.array(clicks), np.array(clk.coords))
assert (len(clicks) == len(clk.coords))
# Exporting to layout
lt = clk.to_layout()
assert (lt.pos.shape[0] == len(clicks))
assert_allclose(lt.pos[1, 0] / lt.pos[2, 0],
clicks[1][0] / float(clicks[2][0]))
clk.plot_clicks()
plt.close('all')
def test_add_background_image():
"""Test adding background image to a figure."""
rng = np.random.RandomState(0)
for ii in range(2):
f, axs = plt.subplots(1, 2)
x, y = rng.randn(2, 10)
im = rng.randn(10, 10)
axs[0].scatter(x, y)
axs[1].scatter(y, x)
for ax in axs:
ax.set_aspect(1)
# Background without changing aspect
if ii == 0:
ax_im = add_background_image(f, im)
return
assert (ax_im.get_aspect() == 'auto')
for ax in axs:
assert (ax.get_aspect() == 1)
else:
# Background with changing aspect
ax_im_asp = add_background_image(f, im, set_ratios='auto')
assert (ax_im_asp.get_aspect() == 'auto')
for ax in axs:
assert (ax.get_aspect() == 'auto')
plt.close('all')
# Make sure passing None as image returns None
f, axs = plt.subplots(1, 2)
assert (add_background_image(f, None) is None)
plt.close('all')
def test_auto_scale():
"""Test auto-scaling of channels for quick plotting."""
raw = read_raw_fif(raw_fname)
epochs = Epochs(raw, read_events(ev_fname))
rand_data = np.random.randn(10, 100)
for inst in [raw, epochs]:
scale_grad = 1e10
scalings_def = dict([('eeg', 'auto'), ('grad', scale_grad),
('stim', 'auto')])
# Test for wrong inputs
with pytest.raises(ValueError, match=r".*scalings.*'foo'.*"):
inst.plot(scalings='foo')
# Make sure compute_scalings doesn't change anything not auto
scalings_new = _compute_scalings(scalings_def, inst)
assert (scale_grad == scalings_new['grad'])
assert (scalings_new['eeg'] != 'auto')
with pytest.raises(ValueError, match='Must supply either Raw or Epochs'):
_compute_scalings(scalings_def, rand_data)
epochs = epochs[0].load_data()
epochs.pick_types(eeg=True, meg=False)
def test_validate_if_list_of_axes():
"""Test validation of axes."""
fig, ax = plt.subplots(2, 2)
pytest.raises(ValueError, _validate_if_list_of_axes, ax)
ax_flat = ax.ravel()
ax = ax.ravel().tolist()
_validate_if_list_of_axes(ax_flat)
_validate_if_list_of_axes(ax_flat, 4)
pytest.raises(ValueError, _validate_if_list_of_axes, ax_flat, 5)
pytest.raises(ValueError, _validate_if_list_of_axes, ax, 3)
pytest.raises(ValueError, _validate_if_list_of_axes, 'error')
pytest.raises(ValueError, _validate_if_list_of_axes, ['error'] * 2)
pytest.raises(ValueError, _validate_if_list_of_axes, ax[0])
pytest.raises(ValueError, _validate_if_list_of_axes, ax, 3)
ax_flat[2] = 23
pytest.raises(ValueError, _validate_if_list_of_axes, ax_flat)
_validate_if_list_of_axes(ax, 4)
plt.close('all')
def test_center_cmap():
"""Test centering of colormap."""
from matplotlib.colors import LinearSegmentedColormap
from matplotlib.pyplot import Normalize
cmap = center_cmap(cm.get_cmap("RdBu"), -5, 10)
assert isinstance(cmap, LinearSegmentedColormap)
# get new colors for values -5 (red), 0 (white), and 10 (blue)
new_colors = cmap(Normalize(-5, 10)([-5, 0, 10]))
# get original colors for 0 (red), 0.5 (white), and 1 (blue)
reference = cm.RdBu([0., 0.5, 1.])
assert_allclose(new_colors, reference)
# new and old colors at 0.5 must be different
assert not np.allclose(cmap(0.5), reference[1])
def test_centers_to_edges():
"""Test centers_to_edges."""
assert_allclose(centers_to_edges([0, 1, 2])[0], [-0.5, 0.5, 1.5, 2.5])
assert_allclose(centers_to_edges([0])[0], [-0.001, 0.001])
assert_allclose(centers_to_edges([1])[0], [0.999, 1.001])
assert_allclose(centers_to_edges([1000])[0], [999., 1001.])
def test_event_color_dict():
"""Test handling of event_color."""
one = _make_event_color_dict('k')
two = _make_event_color_dict((0, 0, 0))
three = _make_event_color_dict('#000')
assert one == two
assert one == three
# test dict with integer keys / event name keys
event_id = dict(foo=1, bar=2)
one = _make_event_color_dict({1: 'r', 2: 'b'}, event_id=event_id)
two = _make_event_color_dict(dict(foo='r', bar='b'), event_id=event_id)
assert one == two
# test default value
one = _make_event_color_dict({1: 'r', -1: 'b'}, event_id=event_id)
two = _make_event_color_dict({1: 'r', 2: 'b'}, event_id=event_id)
assert one[2] == two[2]
# test error
with pytest.raises(KeyError, match='must be strictly positive, or -1'):
_ = _make_event_color_dict({-2: 'r', -1: 'b'})
run_tests_if_main()
|
import time
from react.render import render_component
from .settings import Components
def median(l):
half = int(len(l) / 2)
l.sort()
if len(l) % 2 == 0:
return (l[half-1] + l[half]) / 2.0
else:
return l[half]
def run_perf_test():
render_component_times = []
rendered_components = []
iteration_count = 25
for i in range(iteration_count):
start = time.time()
rendered_components.append(
render_component(
Components.PERF_TEST,
{'name': 'world'},
to_static_markup=True,
)
)
end = time.time()
render_component_times.append(end - start)
for component in rendered_components:
assert str(component) == '<span>Hello world</span>'
print('Total time taken to render a component {iteration_count} times: {value}'.format(
iteration_count=iteration_count,
value=sum(render_component_times)
))
print('Times: {}'.format(render_component_times))
print('Max: {}'.format(max(render_component_times)))
print('Min: {}'.format(min(render_component_times)))
print('Mean: {}'.format(sum(render_component_times) / len(render_component_times)))
print('Median: {}'.format(median(render_component_times)))
|
import os
import platform
from typing import Any, Dict
from homeassistant.const import __version__ as current_version
from homeassistant.loader import bind_hass
from homeassistant.util.package import is_virtual_env
from .typing import HomeAssistantType
@bind_hass
async def async_get_system_info(hass: HomeAssistantType) -> Dict[str, Any]:
"""Return info about the system."""
info_object = {
"installation_type": "Unknown",
"version": current_version,
"dev": "dev" in current_version,
"hassio": hass.components.hassio.is_hassio(),
"virtualenv": is_virtual_env(),
"python_version": platform.python_version(),
"docker": False,
"arch": platform.machine(),
"timezone": str(hass.config.time_zone),
"os_name": platform.system(),
"os_version": platform.release(),
}
if platform.system() == "Windows":
info_object["os_version"] = platform.win32_ver()[0]
elif platform.system() == "Darwin":
info_object["os_version"] = platform.mac_ver()[0]
elif platform.system() == "Linux":
info_object["docker"] = os.path.isfile("/.dockerenv")
# Determine installation type on current data
if info_object["docker"]:
info_object["installation_type"] = "Home Assistant Container"
elif is_virtual_env():
info_object["installation_type"] = "Home Assistant Core"
# Enrich with Supervisor information
if hass.components.hassio.is_hassio():
info = hass.components.hassio.get_info()
host = hass.components.hassio.get_host_info()
info_object["supervisor"] = info.get("supervisor")
info_object["host_os"] = host.get("operating_system")
info_object["chassis"] = host.get("chassis")
info_object["docker_version"] = info.get("docker")
if info.get("hassos") is not None:
info_object["installation_type"] = "Home Assistant OS"
else:
info_object["installation_type"] = "Home Assistant Supervised"
return info_object
|
import os
import unittest
def get_test_path():
return os.path.abspath(os.path.dirname(__file__))
class RoslibManifestTest(unittest.TestCase):
def test_ManifestException(self):
from roslib.manifest import ManifestException
self.assert_(isinstance(ManifestException(), Exception))
def test_Depend(self):
from roslib.manifestlib import Depend
for bad in [None, '']:
try:
Depend(bad)
self.fail('should have failed on [%s]' % bad)
except ValueError:
pass
d = Depend('roslib')
self.assertEquals('roslib', str(d))
self.assertEquals('roslib', repr(d))
self.assertEquals('<depend package="roslib" />', d.xml())
self.assertEquals(d, Depend('roslib'))
self.assertNotEquals(d, Depend('roslib2'))
self.assertNotEquals(d, 1)
def test_ROSDep(self):
from roslib.manifest import ROSDep
for bad in [None, '']:
try:
rd = ROSDep(bad)
self.fail('should have failed on [%s]' % bad)
except ValueError:
pass
rd = ROSDep('python')
self.assertEquals('<rosdep name="python" />', rd.xml())
def test_VersionControl(self):
from roslib.manifest import VersionControl
ros_svn = 'https://ros.svn.sf.net/svnroot'
bad = [(None, ros_svn)]
for type_, url in bad:
try:
VersionControl(type_, url)
self.fail('should have failed on [%s] [%s]' % (type_, url))
except ValueError:
pass
tests = [
('svn', ros_svn, '<versioncontrol type="svn" url="%s" />' % ros_svn),
('cvs', None, '<versioncontrol type="cvs" />'),
]
for type_, url, xml in tests:
vc = VersionControl(type_, url)
self.assertEquals(type_, vc.type)
self.assertEquals(url, vc.url)
self.assertEquals(xml, vc.xml())
def _subtest_parse_example1(self, m):
from roslib.manifest import Manifest
self.assert_(isinstance(m, Manifest))
self.assertEquals('a brief description', m.brief)
self.assertEquals('Line 1\nLine 2', m.description.strip())
self.assertEquals('The authors\ngo here', m.author.strip())
self.assertEquals('Public Domain\nwith other stuff', m.license.strip())
self.assertEquals('http://pr.willowgarage.com/package/', m.url)
self.assertEquals('http://www.willowgarage.com/files/willowgarage/robot10.jpg', m.logo)
dpkgs = [d.package for d in m.depends]
self.assertEquals({'pkgname', 'common'}, set(dpkgs))
rdpkgs = [d.name for d in m.rosdeps]
self.assertEquals({'python', 'bar', 'baz'}, set(rdpkgs))
def test_parse_example1_file(self):
from roslib.manifest import parse_file
p = os.path.join(get_test_path(), 'manifest_tests', 'example1.xml')
self._subtest_parse_example1(parse_file(p))
def test_parse_example1_string(self):
from roslib.manifest import parse
self._subtest_parse_example1(parse(EXAMPLE1))
def test_Manifest_str(self):
# just make sure it doesn't crash
from roslib.manifest import parse
str(parse(EXAMPLE1))
def test_Manifest_xml(self):
from roslib.manifest import parse
m = parse(EXAMPLE1)
self._subtest_parse_example1(m)
# verify roundtrip
m2 = parse(m.xml())
self._subtest_parse_example1(m2)
def test_parse_bad_file(self):
from roslib.manifest import parse_file
# have to import from ManifestException due to weirdness when run in --cov mode
from roslib.manifestlib import ManifestException
base_p = os.path.join(get_test_path(), 'manifest_tests')
for b in ['bad1.xml', 'bad2.xml', 'bad3.xml']:
p = os.path.join(base_p, b)
try:
parse_file(p)
self.fail('parse should have failed on bad manifest')
except ManifestException as e:
print(str(e))
self.assert_(b in str(e), 'file name should be in error message: %s' % (str(e)))
EXAMPLE1 = """<package>
<description brief="a brief description">Line 1
Line 2
</description>
<author>The authors
go here</author>
<license>Public Domain
with other stuff</license>
<url>http://pr.willowgarage.com/package/</url>
<logo>http://www.willowgarage.com/files/willowgarage/robot10.jpg</logo>
<depend package="pkgname" />
<depend package="common"/>
<export>
<cpp cflags="-I${prefix}/include" lflags="-L${prefix}/lib -lros"/>
<cpp os="osx" cflags="-I${prefix}/include" lflags="-L${prefix}/lib -lrosthread -framework CoreServices"/>
</export>
<rosdep name="python" />
<rosdep name="bar" />
<rosdep name="baz" />
<rosbuild2>
<depend thirdparty="thisshouldbeokay"/>
</rosbuild2>
</package>"""
|
import os
import pytest
from molecule import util
from molecule.provisioner import ansible_playbooks
@pytest.fixture
def _provisioner_section_data():
return {
'provisioner': {
'name': 'ansible',
'options': {},
'lint': {
'name': 'ansible-lint',
},
'config_options': {},
},
}
@pytest.fixture
def _instance(_provisioner_section_data, config_instance):
return ansible_playbooks.AnsiblePlaybooks(config_instance)
def test_cleanup_property_is_optional(_instance):
assert _instance._config.provisioner.playbooks.cleanup is None
def test_create_property(_instance):
x = os.path.join(_instance._get_playbook_directory(), 'docker',
'create.yml')
assert x == _instance._config.provisioner.playbooks.create
def test_converge_property(_instance):
x = os.path.join(_instance._config.scenario.directory, 'playbook.yml')
assert x == _instance._config.provisioner.playbooks.converge
def test_destroy_property(_instance):
x = os.path.join(_instance._get_playbook_directory(), 'docker',
'destroy.yml')
assert x == _instance._config.provisioner.playbooks.destroy
def test_prepare_property(_instance):
assert _instance._config.provisioner.playbooks.prepare is None
def test_side_effect_property(_instance):
assert _instance._config.provisioner.playbooks.side_effect is None
def test_verify_property(_instance):
assert _instance._config.provisioner.playbooks.verify is None
def test_get_playbook_directory(_instance):
result = _instance._get_playbook_directory()
parts = pytest.helpers.os_split(result)
x = ('molecule', 'provisioner', 'ansible', 'playbooks')
assert x == parts[-4:]
def test_get_playbook(tmpdir, _instance):
x = os.path.join(_instance._config.scenario.directory, 'create.yml')
util.write_file(x, '')
assert x == _instance._get_playbook('create')
def test_get_playbook_returns_bundled_driver_playbook_when_local_not_found(
tmpdir, _instance):
x = os.path.join(_instance._get_playbook_directory(), 'docker',
'create.yml')
assert x == _instance._get_playbook('create')
@pytest.fixture
def _provisioner_driver_section_data():
return {
'provisioner': {
'name': 'ansible',
'playbooks': {
'docker': {
'create': 'docker-create.yml',
},
'create': 'create.yml',
},
}
}
@pytest.mark.parametrize(
'config_instance', ['_provisioner_driver_section_data'], indirect=True)
def test_get_ansible_playbook_with_driver_key(tmpdir, _instance):
x = os.path.join(_instance._config.scenario.directory, 'docker-create.yml')
util.write_file(x, '')
assert x == _instance._get_playbook('create')
@pytest.fixture
def _provisioner_driver_playbook_key_missing_section_data():
return {
'provisioner': {
'name': 'ansible',
'playbooks': {
'docker': {
'create': 'docker-create.yml',
},
'side_effect': 'side_effect.yml',
},
}
}
@pytest.mark.parametrize(
'config_instance',
['_provisioner_driver_playbook_key_missing_section_data'],
indirect=True)
def test_get_ansible_playbook_with_driver_key_when_playbook_key_missing(
tmpdir, _instance):
x = os.path.join(_instance._config.scenario.directory, 'side_effect.yml')
util.write_file(x, '')
assert x == _instance._get_playbook('side_effect')
def test_get_bundled_driver_playbook(_instance):
result = _instance._get_bundled_driver_playbook('create')
parts = pytest.helpers.os_split(result)
x = ('ansible', 'playbooks', 'docker', 'create.yml')
assert x == parts[-4:]
|
from pygal.etree import etree
from pygal.graph.graph import Graph
from pygal.util import alter, cached_property, cut, decorate
class BaseMap(Graph):
"""Base class for maps"""
_dual = True
@cached_property
def _values(self):
"""Getter for series values (flattened)"""
return [
val[1] for serie in self.series for val in serie.values
if val[1] is not None
]
def enumerate_values(self, serie):
"""Hook to replace default enumeration on values"""
return enumerate(serie.values)
def adapt_code(self, area_code):
"""Hook to change the area code"""
return area_code
def _value_format(self, value):
"""
Format value for map value display.
"""
return '%s: %s' % (
self.area_names.get(self.adapt_code(value[0]), '?'),
self._y_format(value[1])
)
def _plot(self):
"""Insert a map in the chart and apply data on it"""
map = etree.fromstring(self.svg_map)
map.set('width', str(self.view.width))
map.set('height', str(self.view.height))
for i, serie in enumerate(self.series):
safe_vals = list(
filter(lambda x: x is not None, cut(serie.values, 1))
)
if not safe_vals:
continue
min_ = min(safe_vals)
max_ = max(safe_vals)
for j, (area_code, value) in self.enumerate_values(serie):
area_code = self.adapt_code(area_code)
if value is None:
continue
if max_ == min_:
ratio = 1
else:
ratio = .3 + .7 * (value - min_) / (max_ - min_)
areae = map.findall(
".//*[@class='%s%s %s map-element']" %
(self.area_prefix, area_code, self.kind)
)
if not areae:
continue
for area in areae:
cls = area.get('class', '').split(' ')
cls.append('color-%d' % i)
cls.append('serie-%d' % i)
cls.append('series')
area.set('class', ' '.join(cls))
area.set('style', 'fill-opacity: %f' % ratio)
metadata = serie.metadata.get(j)
if metadata:
node = decorate(self.svg, area, metadata)
if node != area:
area.remove(node)
for g in map:
if area not in g:
continue
index = list(g).index(area)
g.remove(area)
node.append(area)
g.insert(index, node)
for node in area:
cls = node.get('class', '').split(' ')
cls.append('reactive')
cls.append('tooltip-trigger')
cls.append('map-area')
node.set('class', ' '.join(cls))
alter(node, metadata)
val = self._format(serie, j)
self._tooltip_data(area, val, 0, 0, 'auto')
self.nodes['plot'].append(map)
def _compute_x_labels(self):
pass
def _compute_y_labels(self):
pass
|
from time import sleep
from weblate.addons.discovery import DiscoveryAddon
from weblate.trans.models import Component, Project
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
"""Command for creating demo project."""
help = "imports demo project and components"
def handle(self, *args, **options):
# Create project
project = Project.objects.create(
name="Demo", slug="demo", web="https://demo.weblate.org/"
)
# Create main component
component = Component.objects.create(
name="Gettext",
slug="gettext",
project=project,
vcs="git",
repo="https://github.com/WeblateOrg/demo.git",
repoweb=(
"https://github.com/WeblateOrg/weblate/"
"blob/{{branch}}/{{filename}}#L{{line}}"
),
filemask="weblate/langdata/locale/*/LC_MESSAGES/django.po",
new_base="weblate/langdata/locale/django.pot",
file_format="po",
license="GPL-3.0-or-later",
)
component.clean()
while component.in_progress():
self.stdout.write(
"Importing base component: {}%".format(component.get_progress()[0])
)
sleep(1)
# Install discovery
DiscoveryAddon.create(
component,
configuration={
"file_format": "po",
"match": (
r"weblate/locale/(?P<language>[^/]*)/"
r"LC_MESSAGES/(?P<component>[^/]*)\.po"
),
"name_template": "Discovered: {{ component|title }}",
"language_regex": "^[^.]+$",
"base_file_template": "",
"remove": True,
},
)
# Manually add Android
Component.objects.create(
name="Android",
slug="android",
project=project,
vcs="git",
repo=component.get_repo_link_url(),
filemask="app/src/main/res/values-*/strings.xml",
template="app/src/main/res/values/strings.xml",
file_format="aresource",
license="GPL-3.0-or-later",
)
|
import unittest
import numpy as np
from chainer import testing
from chainercv.utils import generate_random_bbox
from chainercv.visualizations import vis_bbox
try:
import matplotlib # NOQA
_available = True
except ImportError:
_available = False
@testing.parameterize(
*testing.product_dict([
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': None,
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': None, 'score': (0, 0.5, 1),
'label_names': None},
{
'n_bbox': 3, 'label': None, 'score': None,
'label_names': None},
{
'n_bbox': 3, 'label': (0, 1, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 0, 'label': (), 'score': (),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'), 'no_img': True},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2'),
'instance_colors': [
(255, 0, 0), (0, 255, 0), (0, 0, 255), (100, 100, 100)]},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisBbox(unittest.TestCase):
def setUp(self):
if hasattr(self, 'no_img'):
self.img = None
else:
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.bbox = generate_random_bbox(
self.n_bbox, (48, 32), 8, 16)
if self.label is not None:
self.label = np.array(self.label, dtype=int)
if self.score is not None:
self.score = np.array(self.score)
if not hasattr(self, 'instance_colors'):
self.instance_colors = None
def test_vis_bbox(self):
ax = vis_bbox(
self.img, self.bbox, self.label, self.score,
label_names=self.label_names,
instance_colors=self.instance_colors,
sort_by_score=self.sort_by_score)
self.assertIsInstance(ax, matplotlib.axes.Axes)
@testing.parameterize(*testing.product_dict([
{
'n_bbox': 3, 'label': (0, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2, 1), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 2), 'score': (0, 0.5, 1, 0.75),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (0, 1, 3), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
{
'n_bbox': 3, 'label': (-1, 1, 2), 'score': (0, 0.5, 1),
'label_names': ('c0', 'c1', 'c2')},
], [{'sort_by_score': False}, {'sort_by_score': True}]))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisBboxInvalidInputs(unittest.TestCase):
def setUp(self):
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.bbox = np.random.uniform(size=(self.n_bbox, 4))
if self.label is not None:
self.label = np.array(self.label, dtype=int)
if self.score is not None:
self.score = np.array(self.score)
if not hasattr(self, 'instance_colors'):
self.instance_colors = None
def test_vis_bbox_invalid_inputs(self):
with self.assertRaises(ValueError):
vis_bbox(
self.img, self.bbox, self.label, self.score,
label_names=self.label_names,
instance_colors=self.instance_colors,
sort_by_score=self.sort_by_score)
testing.run_module(__name__, __file__)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import datetime
import subprocess
from . import plot_scatter_points
from six.moves import range
# Assumes Sysbench 0.5 stderr output.
DATETIME_FORMAT = '{:%m_%d_%Y_%H_%M_}'
DATA_INDICATOR_LINE = 'Threads started!'
TPS = 'tps:'
BREAK = ','
FILENAME_SUFFIX = '_TPS_gnuplot_data.txt'
class STDERRFileDoesNotExistError(Exception):
pass
class PatternNotFoundError(Exception):
pass
class Plotter(object):
"""Plotter generates a per second output graph of TPS vs Thread Values.
Given run configurations, and run stderr filenames for any number of PKB runs,
Plotter extracts TPS values and generates a gnuplot graph which can be
uploaded to cloud storage.
"""
def __init__(self, run_seconds, report_interval, run_uri):
"""Initialize a Plotter.
Args:
run_seconds: (integer) length of run phase.
report_interval: (integer) seconds between TPS reports.
run_uri: (string) run identifier.
"""
self.run_uri = run_uri
self.data_entries_per_file = run_seconds // report_interval
self.filename = self._generate_filename()
self.max_tps = 0
self.iterations = 0
def _generate_filename(self):
"""Generates filename for parsed data.
Returns:
(string): Filename for gnuplot data (tps numbers).
"""
date_string = DATETIME_FORMAT.format(datetime.datetime.now())
filename = date_string + self.run_uri + FILENAME_SUFFIX
return filename
def add_file(self, filename):
"""Given STDERR filename for ONE run with a given thread count, add data.
Args:
filename: (string) Name of file to be parsed.
Raises:
STDERRFileDoesNotExistError:
"""
try:
f = open(filename, 'r')
except:
raise STDERRFileDoesNotExistError(
('Unable to open file (%s). Assume this is because run failed. Will'
' raise exception to kill run now.' % filename))
data = self._parse_file(f)
f.close()
self._add_data(data)
self.iterations += 1
def _parse_file(self, f):
"""Parses stderr file, f, extracts list of TPS values.
Assumes no warmup phase and only one report per file.
Method will need to be updated if Sysbench output format changes. Assumes
Sysbench 0.5.
Args:
f: (file object) file to be parsed.
Returns:
(list): list of TPS values.
Raises:
PatternNotFoundError: if thread data is missing.
"""
tps_values = []
line = f.readline()
while line:
if line.strip() == DATA_INDICATOR_LINE:
line = f.readline() # blank line
for _ in range(self.data_entries_per_file):
line = f.readline()
start_id = line.find(TPS) + len(TPS)
end_id = line.find(BREAK, start_id)
if start_id == -1 or end_id == -1:
raise PatternNotFoundError('No thread data (OR improper run seconds'
'/report interval given) found in STDERR'
'. Assume run failed.')
tps = float(line[start_id:end_id].strip())
tps_values.append(tps)
if tps > self.max_tps:
self.max_tps = tps
break
line = f.readline()
return tps_values
def _add_data(self, data):
"""Given data, adds to self.filename.
Args:
data: list of tps values.
"""
with open(self.filename, 'a') as f:
for d in data:
f.write(str(d) + '\n')
def plot(self):
"""Generates a graph using gnuplot and data from filename.
"""
p = plot_scatter_points.GnuplotInfo(self.filename,
self.data_entries_per_file,
self.run_uri,
self.max_tps,
self.iterations)
output_gnuplot_file, _ = p.create_file()
subprocess.Popen(['gnuplot', output_gnuplot_file])
# TODO(samspano): Implement copy command to copy output_chart
|
import numpy as np
import unittest
from chainer.iterators import SerialIterator
from chainer import testing
from chainercv.utils import apply_to_iterator
from chainercv.utils import ProgressHook
class TestProgressHook(unittest.TestCase):
def setUp(self):
def func(*in_values):
n_sample = len(in_values[0])
return [np.random.uniform() for _ in range(n_sample)]
self.func = func
self.dataset = []
for _ in range(5):
H, W = np.random.randint(8, 16, size=2)
self.dataset.append(np.random.randint(0, 256, size=(3, H, W)))
def test_progress_hook(self):
iterator = SerialIterator(self.dataset, 2, repeat=False)
in_values, out_values, rest_values = apply_to_iterator(
self.func, iterator,
hook=ProgressHook(n_total=len(self.dataset)))
# consume all data
for _ in in_values[0]:
pass
def test_progress_hook_with_infinite_iterator(self):
iterator = SerialIterator(self.dataset, 2)
in_values, out_values, rest_values = apply_to_iterator(
self.func, iterator, hook=ProgressHook())
for _ in range(10):
next(in_values[0])
testing.run_module(__name__, __file__)
|
from mypy_extensions import TypedDict
class HPAMetricsDict(TypedDict, total=False):
name: str
target_value: str
current_value: str
class HPAMetricsParser:
def __init__(self, hpa):
self.NAME = "name"
self.TARGET = "target_value"
self.CURRENT = "current_value"
def parse_target(self, metric) -> HPAMetricsDict:
"""
Parse target metrics.
"""
metric_spec = getattr(metric, metric.type.lower())
status: HPAMetricsDict = {}
switchers = {
"Pods": self.parse_pod_metric,
"External": self.parse_external_metric,
"Resource": self.parse_resource_metric,
}
switchers[metric.type](metric_spec, status)
status["target_value"] = (
str(status["target_value"]) if status["target_value"] else "N/A"
)
return status
def parse_current(self, metric) -> HPAMetricsDict:
"""
Parse current metrics
"""
metric_spec = getattr(metric, metric.type.lower())
status: HPAMetricsDict = {}
switchers = {
"Pods": self.parse_pod_metric_current,
"External": self.parse_external_metric_current,
"Resource": self.parse_resource_metric_current,
}
switchers[metric.type](metric_spec, status)
status["current_value"] = (
str(status["current_value"]) if status["current_value"] else "N/A"
)
return status
def parse_external_metric(self, metric_spec, status: HPAMetricsDict):
status["name"] = metric_spec.metric.name
status["target_value"] = (
metric_spec.target.average_value
if getattr(metric_spec.target, "average_value")
else metric_spec.target.value
)
def parse_external_metric_current(self, metric_spec, status: HPAMetricsDict):
status["name"] = metric_spec.metric.name
status["current_value"] = (
metric_spec.current.average_value
if getattr(metric_spec.current, "average_value")
else metric_spec.current.value
)
def parse_pod_metric(self, metric_spec, status: HPAMetricsDict):
status["name"] = metric_spec.metric.name
status["target_value"] = metric_spec.target.average_value
def parse_pod_metric_current(self, metric_spec, status: HPAMetricsDict):
status["name"] = metric_spec.metric.name
status["current_value"] = metric_spec.current.average_value
def parse_resource_metric(self, metric_spec, status: HPAMetricsDict):
status["name"] = metric_spec.name
status["target_value"] = (
metric_spec.target.average_value
if getattr(metric_spec.target, "average_value")
else metric_spec.target.average_utilization
)
def parse_resource_metric_current(self, metric_spec, status: HPAMetricsDict):
status["name"] = metric_spec.name
status["current_value"] = (
metric_spec.current.average_value
if getattr(metric_spec.current, "average_value")
else metric_spec.current.average_utilization
)
|
import os.path as op
import numpy as np
import pytest
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_)
import mne
from mne.datasets import testing
from mne.minimum_norm.resolution_matrix import make_inverse_resolution_matrix
from mne.minimum_norm.spatial_resolution import (resolution_metrics,
_rectify_resolution_matrix)
from mne.utils import run_tests_if_main
data_path = testing.data_path(download=False)
subjects_dir = op.join(data_path, 'subjects')
fname_inv = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-meg-inv.fif')
fname_evoked = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-ave.fif')
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_cov = op.join(data_path, 'MEG', 'sample', 'sample_audvis_trunc-cov.fif')
@testing.requires_testing_data
def test_resolution_metrics():
"""Test resolution metrics."""
fwd = mne.read_forward_solution(fname_fwd)
# forward operator with fixed source orientations
fwd = mne.convert_forward_solution(fwd, surf_ori=True,
force_fixed=True, copy=False)
# noise covariance matrix
noise_cov = mne.read_cov(fname_cov)
# evoked data for info
evoked = mne.read_evokeds(fname_evoked, 0)
# fixed source orientation
inv = mne.minimum_norm.make_inverse_operator(
info=evoked.info, forward=fwd, noise_cov=noise_cov, loose=0.,
depth=None, fixed=True)
# regularisation parameter based on SNR
snr = 3.0
lambda2 = 1.0 / snr ** 2
# resolution matrices for fixed source orientation
# compute resolution matrix for MNE
rm_mne = make_inverse_resolution_matrix(fwd, inv,
method='MNE', lambda2=lambda2)
# compute very smooth MNE
rm_mne_smooth = make_inverse_resolution_matrix(fwd, inv,
method='MNE', lambda2=100.)
# compute resolution matrix for sLORETA
rm_lor = make_inverse_resolution_matrix(fwd, inv,
method='sLORETA', lambda2=lambda2)
# Compute localisation error (STCs)
# Peak
le_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf',
metric='peak_err')
le_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf',
metric='peak_err')
le_lor_psf = resolution_metrics(rm_lor, fwd['src'], function='psf',
metric='peak_err')
# Centre-of-gravity
cog_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf',
metric='cog_err')
cog_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf',
metric='cog_err')
# Compute spatial spread (STCs)
# Spatial deviation
sd_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf',
metric='sd_ext')
sd_mne_psf_smooth = resolution_metrics(rm_mne_smooth, fwd['src'],
function='psf',
metric='sd_ext')
sd_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf',
metric='sd_ext')
sd_lor_ctf = resolution_metrics(rm_lor, fwd['src'], function='ctf',
metric='sd_ext')
# Maximum radius
mr_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf',
metric='maxrad_ext', threshold=0.6)
mr_mne_psf_smooth = resolution_metrics(rm_mne_smooth, fwd['src'],
function='psf', metric='maxrad_ext',
threshold=0.6)
mr_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf',
metric='maxrad_ext', threshold=0.6)
mr_lor_ctf = resolution_metrics(rm_lor, fwd['src'], function='ctf',
metric='maxrad_ext', threshold=0.6)
# lower threshold -> larger spatial extent
mr_mne_psf_0 = resolution_metrics(rm_mne, fwd['src'], function='psf',
metric='maxrad_ext', threshold=0.)
mr_mne_psf_9 = resolution_metrics(rm_mne, fwd['src'], function='psf',
metric='maxrad_ext', threshold=0.9)
# Compute relative amplitude (STCs)
ra_mne_psf = resolution_metrics(rm_mne, fwd['src'], function='psf',
metric='peak_amp')
ra_mne_ctf = resolution_metrics(rm_mne, fwd['src'], function='ctf',
metric='peak_amp')
# Tests
with pytest.raises(ValueError, match='is not a recognized metric'):
resolution_metrics(rm_mne, fwd['src'], function='psf', metric='foo')
with pytest.raises(ValueError, match='a recognised resolution function'):
resolution_metrics(rm_mne, fwd['src'], function='foo',
metric='peak_err')
# For MNE: PLE for PSF and CTF equal?
assert_array_almost_equal(le_mne_psf.data, le_mne_ctf.data)
assert_array_almost_equal(cog_mne_psf.data, cog_mne_ctf.data)
# For MNE: SD and maxrad for PSF and CTF equal?
assert_array_almost_equal(sd_mne_psf.data, sd_mne_ctf.data)
assert_array_almost_equal(mr_mne_psf.data, mr_mne_ctf.data)
assert_((mr_mne_psf_0.data > mr_mne_psf_9.data).all())
# For MNE: RA for PSF and CTF equal?
assert_array_almost_equal(ra_mne_psf.data, ra_mne_ctf.data)
# Zero PLE for sLORETA?
assert_((le_lor_psf.data == 0.).all())
# Spatial deviation and maxrad of CTFs for MNE and sLORETA equal?
assert_array_almost_equal(sd_mne_ctf.data, sd_lor_ctf.data)
assert_array_almost_equal(mr_mne_ctf.data, mr_lor_ctf.data)
# Smooth MNE has larger spatial extent?
assert_(np.sum(sd_mne_psf_smooth.data) > np.sum(sd_mne_psf.data))
assert_(np.sum(mr_mne_psf_smooth.data) > np.sum(mr_mne_psf.data))
# test "rectification" of resolution matrix
r1 = np.ones([8, 4])
r2 = _rectify_resolution_matrix(r1)
assert_array_equal(r2, np.sqrt(2) * np.ones((4, 4)))
run_tests_if_main()
|
import numpy as np
import pandas as pd
import pytest
from pandas.util.testing import assert_frame_equal
from arctic.serialization.numpy_arrays import FrameConverter, FrametoArraySerializer
def test_frame_converter():
f = FrameConverter()
df = pd.DataFrame(np.random.randint(0, 100, size=(100, 4)),
columns=list('ABCD'))
assert_frame_equal(f.objify(f.docify(df)), df)
def test_with_strings():
f = FrameConverter()
df = pd.DataFrame(data={'one': ['a', 'b', 'c']})
assert_frame_equal(f.objify(f.docify(df)), df)
def test_with_objects_raises():
class Example(object):
def __init__(self, data):
self.data = data
def get(self):
return self.data
f = FrameConverter()
df = pd.DataFrame(data={'one': [Example(444)]})
with pytest.raises(Exception):
f.docify(df)
def test_without_index():
df = pd.DataFrame(np.random.randint(0, 100, size=(100, 4)),
columns=list('ABCD'))
n = FrametoArraySerializer()
a = n.serialize(df)
assert_frame_equal(df, n.deserialize(a))
def test_with_index():
df = pd.DataFrame(np.random.randint(0, 100, size=(100, 4)),
columns=list('ABCD'))
df = df.set_index(['A'])
n = FrametoArraySerializer()
a = n.serialize(df)
assert_frame_equal(df, n.deserialize(a))
def test_with_nans():
df = pd.DataFrame(np.random.randint(0, 100, size=(100, 4)),
columns=list('ABCD'))
df['A'] = np.NaN
n = FrametoArraySerializer()
a = n.serialize(df)
assert_frame_equal(df, n.deserialize(a))
def test_empty_dataframe():
df = pd.DataFrame()
n = FrametoArraySerializer()
a = n.serialize(df)
assert_frame_equal(df, n.deserialize(a))
def test_empty_columns():
df = pd.DataFrame(data={'A': [], 'B': [], 'C': []})
n = FrametoArraySerializer()
a = n.serialize(df)
assert_frame_equal(df, n.deserialize(a))
def test_string_cols_with_nans():
f = FrameConverter()
df = pd.DataFrame(data={'one': ['a', 'b', 'c', np.NaN]})
assert(df.equals(f.objify(f.docify(df))))
def test_objify_with_missing_columns():
f = FrameConverter()
df = pd.DataFrame(data={'one': ['a', 'b', 'c', np.NaN]})
res = f.objify(f.docify(df), columns=['one', 'two'])
assert res['one'].equals(df['one'])
assert all(res['two'].isnull())
def test_multi_column_fail():
df = pd.DataFrame(data={'A': [1, 2, 3], 'B': [2, 3, 4], 'C': [3, 4, 5]})
df = df.set_index(['A'])
n = FrametoArraySerializer()
a = n.serialize(df)
with pytest.raises(Exception) as e:
n.deserialize(a, columns=['A', 'B'])
assert('Duplicate' in str(e.value))
def test_dataframe_writable_after_objify():
f = FrameConverter()
df = pd.DataFrame(data={'one': [5, 6, 2]})
df = f.objify(f.docify(df))
df['one'] = 7
assert np.all(df['one'].values == np.array([7, 7, 7]))
|
import logging
from pyrainbird import RainbirdController
from homeassistant.components.binary_sensor import BinarySensorEntity
from . import (
DATA_RAINBIRD,
RAINBIRD_CONTROLLER,
SENSOR_TYPE_RAINDELAY,
SENSOR_TYPE_RAINSENSOR,
SENSOR_TYPES,
)
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up a Rain Bird sensor."""
if discovery_info is None:
return
controller = hass.data[DATA_RAINBIRD][discovery_info[RAINBIRD_CONTROLLER]]
add_entities(
[RainBirdSensor(controller, sensor_type) for sensor_type in SENSOR_TYPES], True
)
class RainBirdSensor(BinarySensorEntity):
"""A sensor implementation for Rain Bird device."""
def __init__(self, controller: RainbirdController, sensor_type):
"""Initialize the Rain Bird sensor."""
self._sensor_type = sensor_type
self._controller = controller
self._name = SENSOR_TYPES[self._sensor_type][0]
self._icon = SENSOR_TYPES[self._sensor_type][2]
self._state = None
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return None if self._state is None else bool(self._state)
def update(self):
"""Get the latest data and updates the states."""
_LOGGER.debug("Updating sensor: %s", self._name)
state = None
if self._sensor_type == SENSOR_TYPE_RAINSENSOR:
state = self._controller.get_rain_sensor_state()
elif self._sensor_type == SENSOR_TYPE_RAINDELAY:
state = self._controller.get_rain_delay()
self._state = None if state is None else bool(state)
@property
def name(self):
"""Return the name of this camera."""
return self._name
@property
def icon(self):
"""Return icon."""
return self._icon
|
import logging
import voluptuous as vol
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import CONF_ADDRESS, CONF_NAME, DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from . import (
CONF_BOARD,
CONF_CHANNELS,
CONF_I2C_HATS,
CONF_INDEX,
CONF_INITIAL_STATE,
CONF_INVERT_LOGIC,
I2C_HAT_NAMES,
I2C_HATS_MANAGER,
I2CHatsException,
)
_LOGGER = logging.getLogger(__name__)
_CHANNELS_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_INDEX): cv.positive_int,
vol.Required(CONF_NAME): cv.string,
vol.Optional(CONF_INVERT_LOGIC, default=False): cv.boolean,
vol.Optional(CONF_INITIAL_STATE): cv.boolean,
}
]
)
_I2C_HATS_SCHEMA = vol.Schema(
[
{
vol.Required(CONF_BOARD): vol.In(I2C_HAT_NAMES),
vol.Required(CONF_ADDRESS): vol.Coerce(int),
vol.Required(CONF_CHANNELS): _CHANNELS_SCHEMA,
}
]
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Optional(CONF_I2C_HATS): _I2C_HATS_SCHEMA}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the raspihats switch devices."""
I2CHatSwitch.I2C_HATS_MANAGER = hass.data[I2C_HATS_MANAGER]
switches = []
i2c_hat_configs = config.get(CONF_I2C_HATS)
for i2c_hat_config in i2c_hat_configs:
board = i2c_hat_config[CONF_BOARD]
address = i2c_hat_config[CONF_ADDRESS]
try:
I2CHatSwitch.I2C_HATS_MANAGER.register_board(board, address)
for channel_config in i2c_hat_config[CONF_CHANNELS]:
switches.append(
I2CHatSwitch(
board,
address,
channel_config[CONF_INDEX],
channel_config[CONF_NAME],
channel_config[CONF_INVERT_LOGIC],
channel_config.get(CONF_INITIAL_STATE),
)
)
except I2CHatsException as ex:
_LOGGER.error(
"Failed to register %s I2CHat@%s %s", board, hex(address), str(ex)
)
add_entities(switches)
class I2CHatSwitch(ToggleEntity):
"""Representation a switch that uses a I2C-HAT digital output."""
I2C_HATS_MANAGER = None
def __init__(self, board, address, channel, name, invert_logic, initial_state):
"""Initialize switch."""
self._board = board
self._address = address
self._channel = channel
self._name = name or DEVICE_DEFAULT_NAME
self._invert_logic = invert_logic
if initial_state is not None:
if self._invert_logic:
state = not initial_state
else:
state = initial_state
self.I2C_HATS_MANAGER.write_dq(self._address, self._channel, state)
def online_callback():
"""Call fired when board is online."""
self.schedule_update_ha_state()
self.I2C_HATS_MANAGER.register_online_callback(
self._address, self._channel, online_callback
)
def _log_message(self, message):
"""Create log message."""
string = f"{self._name} "
string += f"{self._board}I2CHat@{hex(self._address)} "
string += f"channel:{str(self._channel)}{message}"
return string
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""Return the polling state."""
return False
@property
def is_on(self):
"""Return true if device is on."""
try:
state = self.I2C_HATS_MANAGER.read_dq(self._address, self._channel)
return state != self._invert_logic
except I2CHatsException as ex:
_LOGGER.error(self._log_message(f"Is ON check failed, {ex!s}"))
return False
def turn_on(self, **kwargs):
"""Turn the device on."""
try:
state = self._invert_logic is False
self.I2C_HATS_MANAGER.write_dq(self._address, self._channel, state)
self.schedule_update_ha_state()
except I2CHatsException as ex:
_LOGGER.error(self._log_message(f"Turn ON failed, {ex!s}"))
def turn_off(self, **kwargs):
"""Turn the device off."""
try:
state = self._invert_logic is not False
self.I2C_HATS_MANAGER.write_dq(self._address, self._channel, state)
self.schedule_update_ha_state()
except I2CHatsException as ex:
_LOGGER.error(self._log_message(f"Turn OFF failed, {ex!s}"))
|
import pytest_bdd as bdd
from qutebrowser.utils import qtutils
bdd.scenarios('qutescheme.feature')
@bdd.then(bdd.parsers.parse("the {kind} request should be blocked"))
def request_blocked(request, quteproc, kind):
blocking_set_msg = (
"Blocking malicious request from qute://settings/set?* to "
"qute://settings/set?*")
blocking_csrf_msg = (
"Blocking malicious request from "
"http://localhost:*/data/misc/qutescheme_csrf.html to "
"qute://settings/set?*")
blocking_js_msg = (
"[http://localhost:*/data/misc/qutescheme_csrf.html:0] Not allowed to "
"load local resource: qute://settings/set?*"
)
unsafe_redirect_msg = "Load error: ERR_UNSAFE_REDIRECT"
blocked_request_msg = "Load error: ERR_BLOCKED_BY_CLIENT"
webkit_error_invalid = (
"Error while loading qute://settings/set?*: Invalid qute://settings "
"request")
webkit_error_unsupported = (
"Error while loading qute://settings/set?*: Unsupported request type")
if request.config.webengine:
# On Qt 5.12, we mark qute:// as a local scheme, causing most requests
# being blocked by Chromium internally (logging to the JS console).
expected_messages = {
'img': [blocking_js_msg],
'link': [blocking_js_msg],
'redirect': [blocking_set_msg, blocked_request_msg],
'form': [blocking_js_msg],
}
if qtutils.version_check('5.15', compiled=False):
# On Qt 5.15, Chromium blocks the redirect as ERR_UNSAFE_REDIRECT
# instead.
expected_messages['redirect'] = [unsafe_redirect_msg]
else: # QtWebKit
expected_messages = {
'img': [blocking_csrf_msg],
'link': [blocking_csrf_msg, webkit_error_invalid],
'redirect': [blocking_csrf_msg, webkit_error_invalid],
'form': [webkit_error_unsupported],
}
for pattern in expected_messages[kind]:
msg = quteproc.wait_for(message=pattern)
msg.expected = True
|
from behave import given
from behave import when
from paasta_tools.utils import _run
@given("some tronfig")
def step_some_tronfig(context):
context.soa_dir = "fake_soa_configs_tron"
@when("we run paasta_setup_tron_namespace in dry-run mode")
def step_run_paasta_setup_tron_namespace_dry_run(context):
cmd = (
f"paasta_setup_tron_namespace --dry-run -a --soa-dir {context.soa_dir}"
f" --cluster test-cluster"
)
context.return_code, context.output = _run(command=cmd)
|
from lemur.plugins.base import Plugin
class IssuerPlugin(Plugin):
"""
This is the base class from which all of the supported
issuers will inherit from.
"""
type = "issuer"
def create_certificate(self, csr, issuer_options):
raise NotImplementedError
def create_authority(self, options):
raise NotImplementedError
def revoke_certificate(self, certificate, reason):
raise NotImplementedError
def get_ordered_certificate(self, certificate):
raise NotImplementedError
def cancel_ordered_certificate(self, pending_cert, **kwargs):
raise NotImplementedError
|
from spotipy import SpotifyException
from homeassistant import data_entry_flow, setup
from homeassistant.components.spotify.const import DOMAIN
from homeassistant.config_entries import SOURCE_USER, SOURCE_ZEROCONF
from homeassistant.const import CONF_CLIENT_ID, CONF_CLIENT_SECRET
from homeassistant.helpers import config_entry_oauth2_flow
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_abort_if_no_configuration(hass):
"""Check flow aborts when no configuration is present."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "missing_configuration"
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "missing_configuration"
async def test_zeroconf_abort_if_existing_entry(hass):
"""Check zeroconf flow aborts when an entry already exist."""
MockConfigEntry(domain=DOMAIN).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_ZEROCONF}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_full_flow(hass, aiohttp_client, aioclient_mock, current_request):
"""Check a full flow."""
assert await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: "client", CONF_CLIENT_SECRET: "secret"},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
assert result["type"] == data_entry_flow.RESULT_TYPE_EXTERNAL_STEP
assert result["url"] == (
"https://accounts.spotify.com/authorize"
"?response_type=code&client_id=client"
"&redirect_uri=https://example.com/auth/external/callback"
f"&state={state}"
"&scope=user-modify-playback-state,user-read-playback-state,user-read-private,"
"playlist-read-private,playlist-read-collaborative,user-library-read,"
"user-top-read,user-read-playback-position,user-read-recently-played,user-follow-read"
)
client = await aiohttp_client(hass.http.app)
resp = await client.get(f"/auth/external/callback?code=abcd&state={state}")
assert resp.status == 200
assert resp.headers["content-type"] == "text/html; charset=utf-8"
aioclient_mock.post(
"https://accounts.spotify.com/api/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("homeassistant.components.spotify.config_flow.Spotify") as spotify_mock:
spotify_mock.return_value.current_user.return_value = {
"id": "fake_id",
"display_name": "frenck",
}
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["data"]["auth_implementation"] == DOMAIN
result["data"]["token"].pop("expires_at")
assert result["data"]["name"] == "frenck"
assert result["data"]["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
async def test_abort_if_spotify_error(
hass, aiohttp_client, aioclient_mock, current_request
):
"""Check Spotify errors causes flow to abort."""
await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: "client", CONF_CLIENT_SECRET: "secret"},
"http": {"base_url": "https://example.com"},
},
)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://accounts.spotify.com/api/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch(
"homeassistant.components.spotify.config_flow.Spotify.current_user",
side_effect=SpotifyException(400, -1, "message"),
):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "connection_error"
async def test_reauthentication(hass, aiohttp_client, aioclient_mock, current_request):
"""Test Spotify reauthentication."""
await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: "client", CONF_CLIENT_SECRET: "secret"},
"http": {"base_url": "https://example.com"},
},
)
old_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=123,
version=1,
data={"id": "frenck", "auth_implementation": DOMAIN},
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=old_entry.data
)
flows = hass.config_entries.flow.async_progress()
assert len(flows) == 1
result = await hass.config_entries.flow.async_configure(flows[0]["flow_id"], {})
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://accounts.spotify.com/api/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("homeassistant.components.spotify.config_flow.Spotify") as spotify_mock:
spotify_mock.return_value.current_user.return_value = {"id": "frenck"}
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["data"]["auth_implementation"] == DOMAIN
result["data"]["token"].pop("expires_at")
assert result["data"]["token"] == {
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
}
async def test_reauth_account_mismatch(
hass, aiohttp_client, aioclient_mock, current_request
):
"""Test Spotify reauthentication with different account."""
await setup.async_setup_component(
hass,
DOMAIN,
{
DOMAIN: {CONF_CLIENT_ID: "client", CONF_CLIENT_SECRET: "secret"},
"http": {"base_url": "https://example.com"},
},
)
old_entry = MockConfigEntry(
domain=DOMAIN,
unique_id=123,
version=1,
data={"id": "frenck", "auth_implementation": DOMAIN},
)
old_entry.add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": "reauth"}, data=old_entry.data
)
flows = hass.config_entries.flow.async_progress()
result = await hass.config_entries.flow.async_configure(flows[0]["flow_id"], {})
# pylint: disable=protected-access
state = config_entry_oauth2_flow._encode_jwt(hass, {"flow_id": result["flow_id"]})
client = await aiohttp_client(hass.http.app)
await client.get(f"/auth/external/callback?code=abcd&state={state}")
aioclient_mock.post(
"https://accounts.spotify.com/api/token",
json={
"refresh_token": "mock-refresh-token",
"access_token": "mock-access-token",
"type": "Bearer",
"expires_in": 60,
},
)
with patch("homeassistant.components.spotify.config_flow.Spotify") as spotify_mock:
spotify_mock.return_value.current_user.return_value = {"id": "fake_id"}
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "reauth_account_mismatch"
|
import asyncio
from functools import partial
import logging
from urllib.request import URLError
from panasonic_viera import EncryptionRequired, Keys, RemoteControl, SOAPError
import voluptuous as vol
from homeassistant.components.media_player.const import DOMAIN as MEDIA_PLAYER_DOMAIN
from homeassistant.config_entries import SOURCE_IMPORT
from homeassistant.const import CONF_HOST, CONF_NAME, CONF_PORT, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.script import Script
from .const import (
ATTR_DEVICE_INFO,
ATTR_REMOTE,
ATTR_UDN,
CONF_APP_ID,
CONF_ENCRYPTION_KEY,
CONF_ON_ACTION,
DEFAULT_NAME,
DEFAULT_PORT,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_ON_ACTION): cv.SCRIPT_SCHEMA,
}
)
],
)
},
extra=vol.ALLOW_EXTRA,
)
PLATFORMS = [MEDIA_PLAYER_DOMAIN]
async def async_setup(hass, config):
"""Set up Panasonic Viera from configuration.yaml."""
if DOMAIN not in config:
return True
for conf in config[DOMAIN]:
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=conf
)
)
return True
async def async_setup_entry(hass, config_entry):
"""Set up Panasonic Viera from a config entry."""
panasonic_viera_data = hass.data.setdefault(DOMAIN, {})
config = config_entry.data
host = config[CONF_HOST]
port = config[CONF_PORT]
on_action = config[CONF_ON_ACTION]
if on_action is not None:
on_action = Script(hass, on_action, config[CONF_NAME], DOMAIN)
params = {}
if CONF_APP_ID in config and CONF_ENCRYPTION_KEY in config:
params["app_id"] = config[CONF_APP_ID]
params["encryption_key"] = config[CONF_ENCRYPTION_KEY]
remote = Remote(hass, host, port, on_action, **params)
await remote.async_create_remote_control(during_setup=True)
panasonic_viera_data[config_entry.entry_id] = {ATTR_REMOTE: remote}
# Add device_info to older config entries
if ATTR_DEVICE_INFO not in config or config[ATTR_DEVICE_INFO] is None:
device_info = await remote.async_get_device_info()
unique_id = config_entry.unique_id
if device_info is None:
_LOGGER.error(
"Couldn't gather device info. Please restart Home Assistant with your TV turned on and connected to your network."
)
else:
unique_id = device_info[ATTR_UDN]
hass.config_entries.async_update_entry(
config_entry,
unique_id=unique_id,
data={**config, ATTR_DEVICE_INFO: device_info},
)
for component in PLATFORMS:
hass.async_create_task(
hass.config_entries.async_forward_entry_setup(config_entry, component)
)
return True
async def async_unload_entry(hass, config_entry):
"""Unload a config entry."""
unload_ok = all(
await asyncio.gather(
*[
hass.config_entries.async_forward_entry_unload(config_entry, component)
for component in PLATFORMS
]
)
)
if unload_ok:
hass.data[DOMAIN].pop(config_entry.entry_id)
return unload_ok
class Remote:
"""The Remote class. It stores the TV properties and the remote control connection itself."""
def __init__(
self,
hass,
host,
port,
on_action=None,
app_id=None,
encryption_key=None,
):
"""Initialize the Remote class."""
self._hass = hass
self._host = host
self._port = port
self._on_action = on_action
self._app_id = app_id
self._encryption_key = encryption_key
self.state = None
self.available = False
self.volume = 0
self.muted = False
self.playing = True
self._control = None
async def async_create_remote_control(self, during_setup=False):
"""Create remote control."""
control_existed = self._control is not None
try:
params = {}
if self._app_id and self._encryption_key:
params["app_id"] = self._app_id
params["encryption_key"] = self._encryption_key
self._control = await self._hass.async_add_executor_job(
partial(RemoteControl, self._host, self._port, **params)
)
self.state = STATE_ON
self.available = True
except (TimeoutError, URLError, SOAPError, OSError) as err:
if control_existed or during_setup:
_LOGGER.debug("Could not establish remote connection: %s", err)
self._control = None
self.state = STATE_OFF
self.available = self._on_action is not None
except Exception as err: # pylint: disable=broad-except
if control_existed or during_setup:
_LOGGER.exception("An unknown error occurred: %s", err)
self._control = None
self.state = STATE_OFF
self.available = self._on_action is not None
async def async_update(self):
"""Update device data."""
if self._control is None:
await self.async_create_remote_control()
return
await self._handle_errors(self._update)
def _update(self):
"""Retrieve the latest data."""
self.muted = self._control.get_mute()
self.volume = self._control.get_volume() / 100
async def async_send_key(self, key):
"""Send a key to the TV and handle exceptions."""
try:
key = getattr(Keys, key)
except (AttributeError, TypeError):
key = getattr(key, "value", key)
await self._handle_errors(self._control.send_key, key)
async def async_turn_on(self, context):
"""Turn on the TV."""
if self._on_action is not None:
await self._on_action.async_run(context=context)
self.state = STATE_ON
elif self.state != STATE_ON:
await self.async_send_key(Keys.power)
self.state = STATE_ON
async def async_turn_off(self):
"""Turn off the TV."""
if self.state != STATE_OFF:
await self.async_send_key(Keys.power)
self.state = STATE_OFF
await self.async_update()
async def async_set_mute(self, enable):
"""Set mute based on 'enable'."""
await self._handle_errors(self._control.set_mute, enable)
async def async_set_volume(self, volume):
"""Set volume level, range 0..1."""
volume = int(volume * 100)
await self._handle_errors(self._control.set_volume, volume)
async def async_play_media(self, media_type, media_id):
"""Play media."""
_LOGGER.debug("Play media: %s (%s)", media_id, media_type)
await self._handle_errors(self._control.open_webpage, media_id)
async def async_get_device_info(self):
"""Return device info."""
if self._control is None:
return None
device_info = await self._handle_errors(self._control.get_device_info)
_LOGGER.debug("Fetched device info: %s", str(device_info))
return device_info
async def _handle_errors(self, func, *args):
"""Handle errors from func, set available and reconnect if needed."""
try:
return await self._hass.async_add_executor_job(func, *args)
except EncryptionRequired:
_LOGGER.error(
"The connection couldn't be encrypted. Please reconfigure your TV"
)
except (TimeoutError, URLError, SOAPError, OSError):
self.state = STATE_OFF
self.available = self._on_action is not None
await self.async_create_remote_control()
except Exception as err: # pylint: disable=broad-except
_LOGGER.exception("An unknown error occurred: %s", err)
self.state = STATE_OFF
self.available = self._on_action is not None
|
from datetime import timedelta
import logging
import os
import mpd
import voluptuous as vol
from homeassistant.components.media_player import PLATFORM_SCHEMA, MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_PLAYLIST,
SUPPORT_CLEAR_PLAYLIST,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_SELECT_SOURCE,
SUPPORT_SHUFFLE_SET,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_SET,
SUPPORT_VOLUME_STEP,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "MPD"
DEFAULT_PORT = 6600
PLAYLIST_UPDATE_INTERVAL = timedelta(seconds=120)
SUPPORT_MPD = (
SUPPORT_PAUSE
| SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_PLAY_MEDIA
| SUPPORT_PLAY
| SUPPORT_CLEAR_PLAYLIST
| SUPPORT_SHUFFLE_SET
| SUPPORT_SEEK
| SUPPORT_STOP
| SUPPORT_TURN_OFF
| SUPPORT_TURN_ON
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the MPD platform."""
host = config.get(CONF_HOST)
port = config.get(CONF_PORT)
name = config.get(CONF_NAME)
password = config.get(CONF_PASSWORD)
device = MpdDevice(host, port, password, name)
add_entities([device], True)
class MpdDevice(MediaPlayerEntity):
"""Representation of a MPD server."""
# pylint: disable=no-member
def __init__(self, server, port, password, name):
"""Initialize the MPD device."""
self.server = server
self.port = port
self._name = name
self.password = password
self._status = None
self._currentsong = None
self._playlists = None
self._currentplaylist = None
self._is_connected = False
self._muted = False
self._muted_volume = 0
self._media_position_updated_at = None
self._media_position = None
# set up MPD client
self._client = mpd.MPDClient()
self._client.timeout = 30
self._client.idletimeout = None
def _connect(self):
"""Connect to MPD."""
try:
self._client.connect(self.server, self.port)
if self.password is not None:
self._client.password(self.password)
except mpd.ConnectionError:
return
self._is_connected = True
def _disconnect(self):
"""Disconnect from MPD."""
try:
self._client.disconnect()
except mpd.ConnectionError:
pass
self._is_connected = False
self._status = None
def _fetch_status(self):
"""Fetch status from MPD."""
self._status = self._client.status()
self._currentsong = self._client.currentsong()
position = self._status.get("elapsed")
if position is None:
position = self._status.get("time")
if isinstance(position, str) and ":" in position:
position = position.split(":")[0]
if position is not None and self._media_position != position:
self._media_position_updated_at = dt_util.utcnow()
self._media_position = int(float(position))
self._update_playlists()
@property
def available(self):
"""Return true if MPD is available and connected."""
return self._is_connected
def update(self):
"""Get the latest data and update the state."""
try:
if not self._is_connected:
self._connect()
self._fetch_status()
except (mpd.ConnectionError, OSError, BrokenPipeError, ValueError) as error:
# Cleanly disconnect in case connection is not in valid state
_LOGGER.debug("Error updating status: %s", error)
self._disconnect()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def state(self):
"""Return the media state."""
if self._status is None:
return STATE_OFF
if self._status["state"] == "play":
return STATE_PLAYING
if self._status["state"] == "pause":
return STATE_PAUSED
if self._status["state"] == "stop":
return STATE_OFF
return STATE_OFF
@property
def is_volume_muted(self):
"""Boolean if volume is currently muted."""
return self._muted
@property
def media_content_id(self):
"""Return the content ID of current playing media."""
return self._currentsong.get("file")
@property
def media_content_type(self):
"""Return the content type of current playing media."""
return MEDIA_TYPE_MUSIC
@property
def media_duration(self):
"""Return the duration of current playing media in seconds."""
# Time does not exist for streams
return self._currentsong.get("time")
@property
def media_position(self):
"""Position of current playing media in seconds.
This is returned as part of the mpd status rather than in the details
of the current song.
"""
return self._media_position
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
return self._media_position_updated_at
@property
def media_title(self):
"""Return the title of current playing media."""
name = self._currentsong.get("name", None)
title = self._currentsong.get("title", None)
file_name = self._currentsong.get("file", None)
if name is None and title is None:
if file_name is None:
return "None"
return os.path.basename(file_name)
if name is None:
return title
if title is None:
return name
return f"{name}: {title}"
@property
def media_artist(self):
"""Return the artist of current playing media (Music track only)."""
return self._currentsong.get("artist")
@property
def media_album_name(self):
"""Return the album of current playing media (Music track only)."""
return self._currentsong.get("album")
@property
def volume_level(self):
"""Return the volume level."""
if "volume" in self._status:
return int(self._status["volume"]) / 100
return None
@property
def supported_features(self):
"""Flag media player features that are supported."""
if self._status is None:
return 0
supported = SUPPORT_MPD
if "volume" in self._status:
supported |= SUPPORT_VOLUME_SET | SUPPORT_VOLUME_STEP | SUPPORT_VOLUME_MUTE
if self._playlists is not None:
supported |= SUPPORT_SELECT_SOURCE
return supported
@property
def source(self):
"""Name of the current input source."""
return self._currentplaylist
@property
def source_list(self):
"""Return the list of available input sources."""
return self._playlists
def select_source(self, source):
"""Choose a different available playlist and play it."""
self.play_media(MEDIA_TYPE_PLAYLIST, source)
@Throttle(PLAYLIST_UPDATE_INTERVAL)
def _update_playlists(self, **kwargs):
"""Update available MPD playlists."""
try:
self._playlists = []
for playlist_data in self._client.listplaylists():
self._playlists.append(playlist_data["playlist"])
except mpd.CommandError as error:
self._playlists = None
_LOGGER.warning("Playlists could not be updated: %s:", error)
def set_volume_level(self, volume):
"""Set volume of media player."""
if "volume" in self._status:
self._client.setvol(int(volume * 100))
def volume_up(self):
"""Service to send the MPD the command for volume up."""
if "volume" in self._status:
current_volume = int(self._status["volume"])
if current_volume <= 100:
self._client.setvol(current_volume + 5)
def volume_down(self):
"""Service to send the MPD the command for volume down."""
if "volume" in self._status:
current_volume = int(self._status["volume"])
if current_volume >= 0:
self._client.setvol(current_volume - 5)
def media_play(self):
"""Service to send the MPD the command for play/pause."""
if self._status["state"] == "pause":
self._client.pause(0)
else:
self._client.play()
def media_pause(self):
"""Service to send the MPD the command for play/pause."""
self._client.pause(1)
def media_stop(self):
"""Service to send the MPD the command for stop."""
self._client.stop()
def media_next_track(self):
"""Service to send the MPD the command for next track."""
self._client.next()
def media_previous_track(self):
"""Service to send the MPD the command for previous track."""
self._client.previous()
def mute_volume(self, mute):
"""Mute. Emulated with set_volume_level."""
if "volume" in self._status:
if mute:
self._muted_volume = self.volume_level
self.set_volume_level(0)
else:
self.set_volume_level(self._muted_volume)
self._muted = mute
def play_media(self, media_type, media_id, **kwargs):
"""Send the media player the command for playing a playlist."""
_LOGGER.debug("Playing playlist: %s", media_id)
if media_type == MEDIA_TYPE_PLAYLIST:
if media_id in self._playlists:
self._currentplaylist = media_id
else:
self._currentplaylist = None
_LOGGER.warning("Unknown playlist name %s", media_id)
self._client.clear()
self._client.load(media_id)
self._client.play()
else:
self._client.clear()
self._client.add(media_id)
self._client.play()
@property
def shuffle(self):
"""Boolean if shuffle is enabled."""
return bool(int(self._status["random"]))
def set_shuffle(self, shuffle):
"""Enable/disable shuffle mode."""
self._client.random(int(shuffle))
def turn_off(self):
"""Service to send the MPD the command to stop playing."""
self._client.stop()
def turn_on(self):
"""Service to send the MPD the command to start playing."""
self._client.play()
self._update_playlists(no_throttle=True)
def clear_playlist(self):
"""Clear players playlist."""
self._client.clear()
def media_seek(self, position):
"""Send seek command."""
self._client.seekcur(position)
|
from Handler import Handler
from diamond.util import get_diamond_version
import json
import logging
import time
import urllib2
import re
class SignalfxHandler(Handler):
# Inititalize Handler with url and batch size
def __init__(self, config=None):
Handler.__init__(self, config)
self.metrics = []
self.filter_metrics = self.config["filter_metrics_regex"]
self.batch_size = int(self.config['batch'])
self.url = self.config['url']
self.auth_token = self.config['auth_token']
self.batch_max_interval = self.config['batch_max_interval']
self.resetBatchTimeout()
self._compiled_filters = []
for fltr in self.filter_metrics:
collector, metric = fltr.split(":")
self._compiled_filters.append((collector,
re.compile(metric),))
if self.auth_token == "":
logging.error("Failed to load Signalfx module")
return
def _match_metric(self, metric):
"""
matches the metric path, if the metrics are empty, it shorts to True
"""
if len(self._compiled_filters) == 0:
return True
for (collector, filter_regex) in self._compiled_filters:
if collector != metric.getCollectorPath():
continue
if filter_regex.match(metric.getMetricPath()):
return True
return False
def resetBatchTimeout(self):
self.batch_max_timestamp = int(time.time() + self.batch_max_interval)
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(SignalfxHandler, self).get_default_config_help()
config.update({
'url': 'Where to send metrics',
'batch': 'How many to store before sending',
'filter_metrics_regex': 'Comma separated collector:regex filters',
'auth_token': 'Org API token to use when sending metrics',
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(SignalfxHandler, self).get_default_config()
config.update({
'url': 'https://ingest.signalfx.com/v2/datapoint',
'batch': 300,
'filter_metrics_regex': '',
# Don't wait more than 30 sec between pushes
'batch_max_interval': 30,
'auth_token': '',
})
return config
def process(self, metric):
"""
Queue a metric. Flushing queue if batch size reached
"""
if self._match_metric(metric):
self.metrics.append(metric)
if self.should_flush():
self._send()
def should_flush(self):
return len(self.metrics) >= self.batch_size or \
time.time() >= self.batch_max_timestamp
def into_signalfx_point(self, metric):
"""
Convert diamond metric into something signalfx can understand
"""
dims = {
"collector": metric.getCollectorPath(),
"prefix": metric.getPathPrefix(),
}
if metric.host is not None and metric.host != "":
dims["host"] = metric.host
return {
"metric": metric.getMetricPath(),
"value": metric.value,
"dimensions": dims,
# We expect ms timestamps
"timestamp": metric.timestamp * 1000,
}
def flush(self):
"""Flush metrics in queue"""
self._send()
def user_agent(self):
"""
HTTP user agent
"""
return "Diamond: %s" % get_diamond_version()
def _send(self):
# Potentially use protobufs in the future
postDictionary = {}
for metric in self.metrics:
t = metric.metric_type.lower()
if t not in postDictionary:
postDictionary[t] = []
postDictionary[t].append(self.into_signalfx_point(metric))
self.metrics = []
postBody = json.dumps(postDictionary)
logging.debug("Body is %s", postBody)
req = urllib2.Request(self.url, postBody,
{"Content-type": "application/json",
"X-SF-TOKEN": self.auth_token,
"User-Agent": self.user_agent()})
self.resetBatchTimeout()
try:
urllib2.urlopen(req)
except urllib2.URLError as err:
error_message = err.read()
logging.exception("Unable to post signalfx metrics" + error_message)
return
|
import pytest
from homeassistant.components.alarm_control_panel import DOMAIN as ALARM_DOMAIN
from homeassistant.components.alarm_control_panel.const import (
SUPPORT_ALARM_ARM_AWAY,
SUPPORT_ALARM_ARM_CUSTOM_BYPASS,
SUPPORT_ALARM_ARM_HOME,
SUPPORT_ALARM_ARM_NIGHT,
)
from homeassistant.components.risco import CannotConnectError, UnauthorizedError
from homeassistant.components.risco.const import DOMAIN
from homeassistant.const import (
SERVICE_ALARM_ARM_AWAY,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
SERVICE_ALARM_ARM_HOME,
SERVICE_ALARM_ARM_NIGHT,
SERVICE_ALARM_DISARM,
STATE_ALARM_ARMED_AWAY,
STATE_ALARM_ARMED_CUSTOM_BYPASS,
STATE_ALARM_ARMED_HOME,
STATE_ALARM_ARMED_NIGHT,
STATE_ALARM_ARMING,
STATE_ALARM_DISARMED,
STATE_ALARM_TRIGGERED,
STATE_UNKNOWN,
)
from homeassistant.helpers.entity_component import async_update_entity
from .util import TEST_CONFIG, TEST_SITE_UUID, setup_risco
from tests.async_mock import MagicMock, PropertyMock, patch
from tests.common import MockConfigEntry
FIRST_ENTITY_ID = "alarm_control_panel.risco_test_site_name_partition_0"
SECOND_ENTITY_ID = "alarm_control_panel.risco_test_site_name_partition_1"
CODES_REQUIRED_OPTIONS = {"code_arm_required": True, "code_disarm_required": True}
TEST_RISCO_TO_HA = {
"arm": STATE_ALARM_ARMED_AWAY,
"partial_arm": STATE_ALARM_ARMED_HOME,
"A": STATE_ALARM_ARMED_HOME,
"B": STATE_ALARM_ARMED_HOME,
"C": STATE_ALARM_ARMED_NIGHT,
"D": STATE_ALARM_ARMED_NIGHT,
}
TEST_FULL_RISCO_TO_HA = {
**TEST_RISCO_TO_HA,
"D": STATE_ALARM_ARMED_CUSTOM_BYPASS,
}
TEST_HA_TO_RISCO = {
STATE_ALARM_ARMED_AWAY: "arm",
STATE_ALARM_ARMED_HOME: "partial_arm",
STATE_ALARM_ARMED_NIGHT: "C",
}
TEST_FULL_HA_TO_RISCO = {
**TEST_HA_TO_RISCO,
STATE_ALARM_ARMED_CUSTOM_BYPASS: "D",
}
CUSTOM_MAPPING_OPTIONS = {
"risco_states_to_ha": TEST_RISCO_TO_HA,
"ha_states_to_risco": TEST_HA_TO_RISCO,
}
FULL_CUSTOM_MAPPING = {
"risco_states_to_ha": TEST_FULL_RISCO_TO_HA,
"ha_states_to_risco": TEST_FULL_HA_TO_RISCO,
}
EXPECTED_FEATURES = (
SUPPORT_ALARM_ARM_AWAY | SUPPORT_ALARM_ARM_HOME | SUPPORT_ALARM_ARM_NIGHT
)
def _partition_mock():
return MagicMock(
triggered=False,
arming=False,
armed=False,
disarmed=False,
partially_armed=False,
)
@pytest.fixture
def two_part_alarm():
"""Fixture to mock alarm with two partitions."""
partition_mocks = {0: _partition_mock(), 1: _partition_mock()}
alarm_mock = MagicMock()
with patch.object(
partition_mocks[0], "id", new_callable=PropertyMock(return_value=0)
), patch.object(
partition_mocks[1], "id", new_callable=PropertyMock(return_value=1)
), patch.object(
alarm_mock,
"partitions",
new_callable=PropertyMock(return_value=partition_mocks),
), patch(
"homeassistant.components.risco.RiscoAPI.get_state",
return_value=alarm_mock,
):
yield alarm_mock
async def test_cannot_connect(hass):
"""Test connection error."""
with patch(
"homeassistant.components.risco.RiscoAPI.login",
side_effect=CannotConnectError,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=TEST_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
registry = await hass.helpers.entity_registry.async_get_registry()
assert not registry.async_is_registered(FIRST_ENTITY_ID)
assert not registry.async_is_registered(SECOND_ENTITY_ID)
async def test_unauthorized(hass):
"""Test unauthorized error."""
with patch(
"homeassistant.components.risco.RiscoAPI.login",
side_effect=UnauthorizedError,
):
config_entry = MockConfigEntry(domain=DOMAIN, data=TEST_CONFIG)
config_entry.add_to_hass(hass)
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
registry = await hass.helpers.entity_registry.async_get_registry()
assert not registry.async_is_registered(FIRST_ENTITY_ID)
assert not registry.async_is_registered(SECOND_ENTITY_ID)
async def test_setup(hass, two_part_alarm):
"""Test entity setup."""
registry = await hass.helpers.entity_registry.async_get_registry()
assert not registry.async_is_registered(FIRST_ENTITY_ID)
assert not registry.async_is_registered(SECOND_ENTITY_ID)
await setup_risco(hass)
assert registry.async_is_registered(FIRST_ENTITY_ID)
assert registry.async_is_registered(SECOND_ENTITY_ID)
registry = await hass.helpers.device_registry.async_get_registry()
device = registry.async_get_device({(DOMAIN, TEST_SITE_UUID + "_0")}, {})
assert device is not None
assert device.manufacturer == "Risco"
device = registry.async_get_device({(DOMAIN, TEST_SITE_UUID + "_1")}, {})
assert device is not None
assert device.manufacturer == "Risco"
async def _check_state(hass, alarm, property, state, entity_id, partition_id):
with patch.object(alarm.partitions[partition_id], property, return_value=True):
await async_update_entity(hass, entity_id)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == state
async def test_states(hass, two_part_alarm):
"""Test the various alarm states."""
await setup_risco(hass, CUSTOM_MAPPING_OPTIONS)
assert hass.states.get(FIRST_ENTITY_ID).state == STATE_UNKNOWN
for partition_id, entity_id in {0: FIRST_ENTITY_ID, 1: SECOND_ENTITY_ID}.items():
await _check_state(
hass,
two_part_alarm,
"triggered",
STATE_ALARM_TRIGGERED,
entity_id,
partition_id,
)
await _check_state(
hass, two_part_alarm, "arming", STATE_ALARM_ARMING, entity_id, partition_id
)
await _check_state(
hass,
two_part_alarm,
"armed",
STATE_ALARM_ARMED_AWAY,
entity_id,
partition_id,
)
await _check_state(
hass,
two_part_alarm,
"partially_armed",
STATE_ALARM_ARMED_HOME,
entity_id,
partition_id,
)
await _check_state(
hass,
two_part_alarm,
"disarmed",
STATE_ALARM_DISARMED,
entity_id,
partition_id,
)
groups = {"A": False, "B": False, "C": True, "D": False}
with patch.object(
two_part_alarm.partitions[partition_id],
"groups",
new_callable=PropertyMock(return_value=groups),
):
await _check_state(
hass,
two_part_alarm,
"partially_armed",
STATE_ALARM_ARMED_NIGHT,
entity_id,
partition_id,
)
async def _test_service_call(
hass, service, method, entity_id, partition_id, *args, **kwargs
):
with patch(f"homeassistant.components.risco.RiscoAPI.{method}") as set_mock:
await _call_alarm_service(hass, service, entity_id, **kwargs)
set_mock.assert_awaited_once_with(partition_id, *args)
async def _test_no_service_call(
hass, service, method, entity_id, partition_id, **kwargs
):
with patch(f"homeassistant.components.risco.RiscoAPI.{method}") as set_mock:
await _call_alarm_service(hass, service, entity_id, **kwargs)
set_mock.assert_not_awaited()
async def _call_alarm_service(hass, service, entity_id, **kwargs):
data = {"entity_id": entity_id, **kwargs}
await hass.services.async_call(
ALARM_DOMAIN, service, service_data=data, blocking=True
)
async def test_sets_custom_mapping(hass, two_part_alarm):
"""Test settings the various modes when mapping some states."""
await setup_risco(hass, CUSTOM_MAPPING_OPTIONS)
registry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.async_get(FIRST_ENTITY_ID)
assert entity.supported_features == EXPECTED_FEATURES
await _test_service_call(hass, SERVICE_ALARM_DISARM, "disarm", FIRST_ENTITY_ID, 0)
await _test_service_call(hass, SERVICE_ALARM_DISARM, "disarm", SECOND_ENTITY_ID, 1)
await _test_service_call(hass, SERVICE_ALARM_ARM_AWAY, "arm", FIRST_ENTITY_ID, 0)
await _test_service_call(hass, SERVICE_ALARM_ARM_AWAY, "arm", SECOND_ENTITY_ID, 1)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", FIRST_ENTITY_ID, 0
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", SECOND_ENTITY_ID, 1
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", FIRST_ENTITY_ID, 0, "C"
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", SECOND_ENTITY_ID, 1, "C"
)
async def test_sets_full_custom_mapping(hass, two_part_alarm):
"""Test settings the various modes when mapping all states."""
await setup_risco(hass, FULL_CUSTOM_MAPPING)
registry = await hass.helpers.entity_registry.async_get_registry()
entity = registry.async_get(FIRST_ENTITY_ID)
assert (
entity.supported_features == EXPECTED_FEATURES | SUPPORT_ALARM_ARM_CUSTOM_BYPASS
)
await _test_service_call(hass, SERVICE_ALARM_DISARM, "disarm", FIRST_ENTITY_ID, 0)
await _test_service_call(hass, SERVICE_ALARM_DISARM, "disarm", SECOND_ENTITY_ID, 1)
await _test_service_call(hass, SERVICE_ALARM_ARM_AWAY, "arm", FIRST_ENTITY_ID, 0)
await _test_service_call(hass, SERVICE_ALARM_ARM_AWAY, "arm", SECOND_ENTITY_ID, 1)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", FIRST_ENTITY_ID, 0
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", SECOND_ENTITY_ID, 1
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", FIRST_ENTITY_ID, 0, "C"
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", SECOND_ENTITY_ID, 1, "C"
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_CUSTOM_BYPASS, "group_arm", FIRST_ENTITY_ID, 0, "D"
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_CUSTOM_BYPASS, "group_arm", SECOND_ENTITY_ID, 1, "D"
)
async def test_sets_with_correct_code(hass, two_part_alarm):
"""Test settings the various modes when code is required."""
await setup_risco(hass, {**CUSTOM_MAPPING_OPTIONS, **CODES_REQUIRED_OPTIONS})
code = {"code": 1234}
await _test_service_call(
hass, SERVICE_ALARM_DISARM, "disarm", FIRST_ENTITY_ID, 0, **code
)
await _test_service_call(
hass, SERVICE_ALARM_DISARM, "disarm", SECOND_ENTITY_ID, 1, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_AWAY, "arm", FIRST_ENTITY_ID, 0, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_AWAY, "arm", SECOND_ENTITY_ID, 1, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", SECOND_ENTITY_ID, 1, **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", FIRST_ENTITY_ID, 0, "C", **code
)
await _test_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", SECOND_ENTITY_ID, 1, "C", **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_CUSTOM_BYPASS, "partial_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
"partial_arm",
SECOND_ENTITY_ID,
1,
**code,
)
async def test_sets_with_incorrect_code(hass, two_part_alarm):
"""Test settings the various modes when code is required and incorrect."""
await setup_risco(hass, {**CUSTOM_MAPPING_OPTIONS, **CODES_REQUIRED_OPTIONS})
code = {"code": 4321}
await _test_no_service_call(
hass, SERVICE_ALARM_DISARM, "disarm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_DISARM, "disarm", SECOND_ENTITY_ID, 1, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_AWAY, "arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_AWAY, "arm", SECOND_ENTITY_ID, 1, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_HOME, "partial_arm", SECOND_ENTITY_ID, 1, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_NIGHT, "group_arm", SECOND_ENTITY_ID, 1, **code
)
await _test_no_service_call(
hass, SERVICE_ALARM_ARM_CUSTOM_BYPASS, "partial_arm", FIRST_ENTITY_ID, 0, **code
)
await _test_no_service_call(
hass,
SERVICE_ALARM_ARM_CUSTOM_BYPASS,
"partial_arm",
SECOND_ENTITY_ID,
1,
**code,
)
|
import logging
from aioasuswrt.asuswrt import AsusWrt
import voluptuous as vol
from homeassistant.const import (
CONF_HOST,
CONF_MODE,
CONF_PASSWORD,
CONF_PORT,
CONF_PROTOCOL,
CONF_USERNAME,
)
from homeassistant.helpers import config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
from homeassistant.helpers.event import async_call_later
_LOGGER = logging.getLogger(__name__)
CONF_DNSMASQ = "dnsmasq"
CONF_INTERFACE = "interface"
CONF_PUB_KEY = "pub_key"
CONF_REQUIRE_IP = "require_ip"
CONF_SENSORS = "sensors"
CONF_SSH_KEY = "ssh_key"
DOMAIN = "asuswrt"
DATA_ASUSWRT = DOMAIN
DEFAULT_SSH_PORT = 22
DEFAULT_INTERFACE = "eth0"
DEFAULT_DNSMASQ = "/var/lib/misc"
FIRST_RETRY_TIME = 60
MAX_RETRY_TIME = 900
SECRET_GROUP = "Password or SSH Key"
SENSOR_TYPES = ["devices", "upload_speed", "download_speed", "download", "upload"]
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME): cv.string,
vol.Optional(CONF_PROTOCOL, default="ssh"): vol.In(["ssh", "telnet"]),
vol.Optional(CONF_MODE, default="router"): vol.In(["router", "ap"]),
vol.Optional(CONF_PORT, default=DEFAULT_SSH_PORT): cv.port,
vol.Optional(CONF_REQUIRE_IP, default=True): cv.boolean,
vol.Exclusive(CONF_PASSWORD, SECRET_GROUP): cv.string,
vol.Exclusive(CONF_SSH_KEY, SECRET_GROUP): cv.isfile,
vol.Exclusive(CONF_PUB_KEY, SECRET_GROUP): cv.isfile,
vol.Optional(CONF_SENSORS): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_INTERFACE, default=DEFAULT_INTERFACE): cv.string,
vol.Optional(CONF_DNSMASQ, default=DEFAULT_DNSMASQ): cv.string,
}
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config, retry_delay=FIRST_RETRY_TIME):
"""Set up the asuswrt component."""
conf = config[DOMAIN]
api = AsusWrt(
conf[CONF_HOST],
conf[CONF_PORT],
conf[CONF_PROTOCOL] == "telnet",
conf[CONF_USERNAME],
conf.get(CONF_PASSWORD, ""),
conf.get("ssh_key", conf.get("pub_key", "")),
conf[CONF_MODE],
conf[CONF_REQUIRE_IP],
interface=conf[CONF_INTERFACE],
dnsmasq=conf[CONF_DNSMASQ],
)
try:
await api.connection.async_connect()
except OSError as ex:
_LOGGER.warning(
"Error [%s] connecting %s to %s. Will retry in %s seconds...",
str(ex),
DOMAIN,
conf[CONF_HOST],
retry_delay,
)
async def retry_setup(now):
"""Retry setup if a error happens on asuswrt API."""
await async_setup(
hass, config, retry_delay=min(2 * retry_delay, MAX_RETRY_TIME)
)
async_call_later(hass, retry_delay, retry_setup)
return True
if not api.is_connected:
_LOGGER.error("Error connecting %s to %s", DOMAIN, conf[CONF_HOST])
return False
hass.data[DATA_ASUSWRT] = api
hass.async_create_task(
async_load_platform(
hass, "sensor", DOMAIN, config[DOMAIN].get(CONF_SENSORS), config
)
)
hass.async_create_task(
async_load_platform(hass, "device_tracker", DOMAIN, {}, config)
)
return True
|
from lemur import database
from lemur.policies.models import RotationPolicy
def get(policy_id):
"""
Retrieves policy by its ID.
:param policy_id:
:return:
"""
return database.get(RotationPolicy, policy_id)
def get_by_name(policy_name):
"""
Retrieves policy by its name.
:param policy_name:
:return:
"""
return database.get_all(RotationPolicy, policy_name, field="name").all()
def delete(policy_id):
"""
Delete a rotation policy.
:param policy_id:
:return:
"""
database.delete(get(policy_id))
def get_all_policies():
"""
Retrieves all rotation policies.
:return:
"""
return RotationPolicy.query.all()
def create(**kwargs):
"""
Creates a new rotation policy.
:param kwargs:
:return:
"""
policy = RotationPolicy(**kwargs)
database.create(policy)
return policy
def update(policy_id, **kwargs):
"""
Updates a policy.
:param policy_id:
:param kwargs:
:return:
"""
policy = get(policy_id)
for key, value in kwargs.items():
setattr(policy, key, value)
return database.update(policy)
|
import logging
import time
from typing import Any, Dict, List, Optional
import attr
from huawei_lte_api.exceptions import ResponseErrorException
from homeassistant.components.notify import ATTR_TARGET, BaseNotificationService
from homeassistant.const import CONF_RECIPIENT, CONF_URL
from homeassistant.helpers.typing import HomeAssistantType
from . import Router
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_get_service(
hass: HomeAssistantType,
config: Dict[str, Any],
discovery_info: Optional[Dict[str, Any]] = None,
) -> Optional["HuaweiLteSmsNotificationService"]:
"""Get the notification service."""
if discovery_info is None:
return None
router = hass.data[DOMAIN].routers[discovery_info[CONF_URL]]
default_targets = discovery_info[CONF_RECIPIENT] or []
return HuaweiLteSmsNotificationService(router, default_targets)
@attr.s
class HuaweiLteSmsNotificationService(BaseNotificationService):
"""Huawei LTE router SMS notification service."""
router: Router = attr.ib()
default_targets: List[str] = attr.ib()
def send_message(self, message: str = "", **kwargs: Any) -> None:
"""Send message to target numbers."""
targets = kwargs.get(ATTR_TARGET, self.default_targets)
if not targets or not message:
return
if self.router.suspended:
_LOGGER.debug(
"Integration suspended, not sending notification to %s", targets
)
return
try:
resp = self.router.client.sms.send_sms(
phone_numbers=targets, message=message
)
_LOGGER.debug("Sent to %s: %s", targets, resp)
except ResponseErrorException as ex:
_LOGGER.error("Could not send to %s: %s", targets, ex)
finally:
self.router.notify_last_attempt = time.monotonic()
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl import logging
from absl.testing import flagsaver
from absl.testing import parameterized
from compare_gan import eval_gan_lib
from compare_gan import runner_lib
from compare_gan import test_utils
from compare_gan.architectures import arch_ops
from compare_gan.gans.modular_gan import ModularGAN
import gin
import numpy as np
from six.moves import range
import tensorflow as tf
FLAGS = flags.FLAGS
class RunnerLibTest(parameterized.TestCase, test_utils.CompareGanTestCase):
@parameterized.named_parameters([
("SameSeeds", 42, 42),
("DifferentSeeds", 1, 42),
("NoSeeds", None, None),
])
def testWeightInitialization(self, seed1, seed2):
gin.bind_parameter("dataset.name", "cifar10")
gin.bind_parameter("ModularGAN.g_optimizer_fn",
tf.train.GradientDescentOptimizer)
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 1,
"z_dim": 128,
}
work_dir = self._get_empty_model_dir()
seeds = [seed1, seed2]
for i in range(2):
model_dir = os.path.join(work_dir, str(i))
seed = seeds[i]
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir, tf_random_seed=seed)
task_manager = runner_lib.TaskManager(model_dir)
runner_lib.run_with_schedule(
"train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=False)
checkpoint_path_0 = os.path.join(work_dir, "0/model.ckpt-0")
checkpoint_path_1 = os.path.join(work_dir, "1/model.ckpt-0")
checkpoint_reader_0 = tf.train.load_checkpoint(checkpoint_path_0)
checkpoint_reader_1 = tf.train.load_checkpoint(checkpoint_path_1)
for name, _ in tf.train.list_variables(checkpoint_path_0):
tf.logging.info(name)
t0 = checkpoint_reader_0.get_tensor(name)
t1 = checkpoint_reader_1.get_tensor(name)
zero_initialized_vars = [
"bias", "biases", "beta", "moving_mean", "global_step",
"global_step_disc"
]
one_initialized_vars = ["gamma", "moving_variance"]
if any(name.endswith(e) for e in zero_initialized_vars):
# Variables that are always initialized to 0.
self.assertAllClose(t0, np.zeros_like(t0))
self.assertAllClose(t1, np.zeros_like(t1))
elif any(name.endswith(e) for e in one_initialized_vars):
# Variables that are always initialized to 1.
self.assertAllClose(t0, np.ones_like(t0))
self.assertAllClose(t1, np.ones_like(t1))
elif seed1 is not None and seed1 == seed2:
# Same random seed.
self.assertAllClose(t0, t1)
else:
# Different random seeds.
logging.info("name=%s, t0=%s, t1=%s", name, t0, t1)
self.assertNotAllClose(t0, t1)
@parameterized.named_parameters([
("WithRealData", False),
("WithFakeData", True),
])
@flagsaver.flagsaver
def testTrainingIsDeterministic(self, fake_dataset):
FLAGS.data_fake_dataset = fake_dataset
gin.bind_parameter("dataset.name", "cifar10")
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 3,
"z_dim": 128,
}
work_dir = self._get_empty_model_dir()
for i in range(2):
model_dir = os.path.join(work_dir, str(i))
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir, tf_random_seed=3)
task_manager = runner_lib.TaskManager(model_dir)
runner_lib.run_with_schedule(
"train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=False,
num_eval_averaging_runs=1)
checkpoint_path_0 = os.path.join(work_dir, "0/model.ckpt-3")
checkpoint_path_1 = os.path.join(work_dir, "1/model.ckpt-3")
checkpoint_reader_0 = tf.train.load_checkpoint(checkpoint_path_0)
checkpoint_reader_1 = tf.train.load_checkpoint(checkpoint_path_1)
for name, _ in tf.train.list_variables(checkpoint_path_0):
tf.logging.info(name)
t0 = checkpoint_reader_0.get_tensor(name)
t1 = checkpoint_reader_1.get_tensor(name)
self.assertAllClose(t0, t1, msg=name)
@parameterized.parameters([
{"use_tpu": False},
# {"use_tpu": True},
])
def testTrainAndEval(self, use_tpu):
gin.bind_parameter("dataset.name", "cifar10")
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 1,
"z_dim": 128,
}
model_dir = self._get_empty_model_dir()
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
task_manager = runner_lib.TaskManager(model_dir)
runner_lib.run_with_schedule(
"eval_after_train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=use_tpu,
num_eval_averaging_runs=1,
eval_every_steps=None)
expected_files = [
"TRAIN_DONE", "checkpoint", "model.ckpt-0.data-00000-of-00001",
"model.ckpt-0.index", "model.ckpt-0.meta",
"model.ckpt-1.data-00000-of-00001", "model.ckpt-1.index",
"model.ckpt-1.meta", "operative_config-0.gin", "tfhub"]
self.assertAllInSet(expected_files, tf.gfile.ListDirectory(model_dir))
def testTrainAndEvalWithSpectralNormAndEma(self):
gin.bind_parameter("dataset.name", "cifar10")
gin.bind_parameter("ModularGAN.g_use_ema", True)
gin.bind_parameter("G.spectral_norm", True)
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 1,
"z_dim": 128,
}
model_dir = self._get_empty_model_dir()
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
task_manager = runner_lib.TaskManager(model_dir)
runner_lib.run_with_schedule(
"eval_after_train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=False,
num_eval_averaging_runs=1,
eval_every_steps=None)
expected_files = [
"TRAIN_DONE", "checkpoint", "model.ckpt-0.data-00000-of-00001",
"model.ckpt-0.index", "model.ckpt-0.meta",
"model.ckpt-1.data-00000-of-00001", "model.ckpt-1.index",
"model.ckpt-1.meta", "operative_config-0.gin", "tfhub"]
self.assertAllInSet(expected_files, tf.gfile.ListDirectory(model_dir))
def testTrainAndEvalWithBatchNormAccu(self):
gin.bind_parameter("dataset.name", "cifar10")
gin.bind_parameter("standardize_batch.use_moving_averages", False)
gin.bind_parameter("G.batch_norm_fn", arch_ops.batch_norm)
options = {
"architecture": "resnet_cifar_arch",
"batch_size": 2,
"disc_iters": 1,
"gan_class": ModularGAN,
"lambda": 1,
"training_steps": 1,
"z_dim": 128,
}
model_dir = self._get_empty_model_dir()
run_config = tf.contrib.tpu.RunConfig(
model_dir=model_dir,
tpu_config=tf.contrib.tpu.TPUConfig(iterations_per_loop=1))
task_manager = runner_lib.TaskManager(model_dir)
# Wrap _UpdateBnAccumulators to only perform one accumulator update step.
# Otherwise the test case would time out.
orig_update_bn_accumulators = eval_gan_lib._update_bn_accumulators
def mock_update_bn_accumulators(sess, generated, num_accu_examples):
del num_accu_examples
return orig_update_bn_accumulators(sess, generated, num_accu_examples=64)
eval_gan_lib._update_bn_accumulators = mock_update_bn_accumulators
runner_lib.run_with_schedule(
"eval_after_train",
run_config=run_config,
task_manager=task_manager,
options=options,
use_tpu=False,
num_eval_averaging_runs=1,
eval_every_steps=None)
expected_tfhub_files = [
"checkpoint", "model-with-accu.ckpt.data-00000-of-00001",
"model-with-accu.ckpt.index", "model-with-accu.ckpt.meta"]
self.assertAllInSet(
expected_tfhub_files,
tf.gfile.ListDirectory(os.path.join(model_dir, "tfhub/0")))
if __name__ == "__main__":
tf.test.main()
|
import sys
import mne
from mne.bem import make_watershed_bem
from mne.utils import _check_option
def run():
"""Run command."""
from mne.commands.utils import get_optparser, _add_verbose_flag
parser = get_optparser(__file__)
parser.add_option("-s", "--subject", dest="subject",
help="Subject name (required)", default=None)
parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
help="Subjects directory", default=None)
parser.add_option("-o", "--overwrite", dest="overwrite",
help="Write over existing files", action="store_true")
parser.add_option("-v", "--volume", dest="volume",
help="Defaults to T1", default='T1')
parser.add_option("-a", "--atlas", dest="atlas",
help="Specify the --atlas option for mri_watershed",
default=False, action="store_true")
parser.add_option("-g", "--gcaatlas", dest="gcaatlas",
help="Specify the --brain_atlas option for "
"mri_watershed", default=False, action="store_true")
parser.add_option("-p", "--preflood", dest="preflood",
help="Change the preflood height", default=None)
parser.add_option("--copy", dest="copy",
help="Use copies instead of symlinks for surfaces",
action="store_true")
parser.add_option("-t", "--T1", dest="T1",
help="Whether or not to pass the -T1 flag "
"(can be true, false, 0, or 1). "
"By default it takes the same value as gcaatlas.",
default=None)
parser.add_option("-b", "--brainmask", dest="brainmask",
help="The filename for the brainmask output file "
"relative to the "
"$SUBJECTS_DIR/$SUBJECT/bem/watershed/ directory.",
default="ws")
_add_verbose_flag(parser)
options, args = parser.parse_args()
if options.subject is None:
parser.print_help()
sys.exit(1)
subject = options.subject
subjects_dir = options.subjects_dir
overwrite = options.overwrite
volume = options.volume
atlas = options.atlas
gcaatlas = options.gcaatlas
preflood = options.preflood
copy = options.copy
brainmask = options.brainmask
T1 = options.T1
if T1 is not None:
T1 = T1.lower()
_check_option("--T1", T1, ('true', 'false', '0', '1'))
T1 = T1 in ('true', '1')
verbose = options.verbose
make_watershed_bem(subject=subject, subjects_dir=subjects_dir,
overwrite=overwrite, volume=volume, atlas=atlas,
gcaatlas=gcaatlas, preflood=preflood, copy=copy,
T1=T1, brainmask=brainmask, verbose=verbose)
mne.utils.run_command_if_main()
|
from openrazer_daemon.dbus_services import endpoint
@endpoint('razer.device.lighting.bw2013', 'setPulsate')
def bw_set_pulsate(self):
"""
Set pulsate mode
"""
self.logger.debug("DBus call bw_set_pulsate")
driver_path = self.get_driver_path('matrix_effect_pulsate')
# remember effect
self.set_persistence("backlight", "effect", 'pulsate')
with open(driver_path, 'w') as driver_file:
driver_file.write('1')
# Notify others
self.send_effect_event('setPulsate')
@endpoint('razer.device.lighting.bw2013', 'setStatic')
def bw_set_static(self):
"""
Set static mode
"""
self.logger.debug("DBus call bw_set_static")
driver_path = self.get_driver_path('matrix_effect_static')
# remember effect
self.set_persistence("backlight", "effect", 'static')
with open(driver_path, 'w') as driver_file:
driver_file.write('1')
# Notify others
self.send_effect_event('setStatic')
|
from pyflunearyou.errors import FluNearYouError
from homeassistant import data_entry_flow
from homeassistant.components.flunearyou import DOMAIN
from homeassistant.config_entries import SOURCE_USER
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from tests.async_mock import patch
from tests.common import MockConfigEntry
async def test_duplicate_error(hass):
"""Test that an error is shown when duplicates are added."""
conf = {CONF_LATITUDE: "51.528308", CONF_LONGITUDE: "-0.3817765"}
MockConfigEntry(
domain=DOMAIN, unique_id="51.528308, -0.3817765", data=conf
).add_to_hass(hass)
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
async def test_general_error(hass):
"""Test that an error is shown on a library error."""
conf = {CONF_LATITUDE: "51.528308", CONF_LONGITUDE: "-0.3817765"}
with patch(
"pyflunearyou.cdc.CdcReport.status_by_coordinates",
side_effect=FluNearYouError,
):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["errors"] == {"base": "unknown"}
async def test_show_form(hass):
"""Test that the form is served with no input."""
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["step_id"] == "user"
async def test_step_user(hass):
"""Test that the user step works."""
conf = {CONF_LATITUDE: "51.528308", CONF_LONGITUDE: "-0.3817765"}
with patch(
"homeassistant.components.flunearyou.async_setup_entry", return_value=True
), patch("pyflunearyou.cdc.CdcReport.status_by_coordinates"):
result = await hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_USER}, data=conf
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "51.528308, -0.3817765"
assert result["data"] == {
CONF_LATITUDE: "51.528308",
CONF_LONGITUDE: "-0.3817765",
}
|
from collections import namedtuple
import pytest
from homeassistant.components.esphome import DATA_KEY
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_PORT
from homeassistant.data_entry_flow import (
RESULT_TYPE_ABORT,
RESULT_TYPE_CREATE_ENTRY,
RESULT_TYPE_FORM,
)
from tests.async_mock import AsyncMock, MagicMock, patch
from tests.common import MockConfigEntry
MockDeviceInfo = namedtuple("DeviceInfo", ["uses_password", "name"])
@pytest.fixture
def mock_client():
"""Mock APIClient."""
with patch("homeassistant.components.esphome.config_flow.APIClient") as mock_client:
def mock_constructor(loop, host, port, password, zeroconf_instance=None):
"""Fake the client constructor."""
mock_client.host = host
mock_client.port = port
mock_client.password = password
mock_client.zeroconf_instance = zeroconf_instance
return mock_client
mock_client.side_effect = mock_constructor
mock_client.connect = AsyncMock()
mock_client.disconnect = AsyncMock()
yield mock_client
@pytest.fixture(autouse=True)
def mock_api_connection_error():
"""Mock out the try login method."""
with patch(
"homeassistant.components.esphome.config_flow.APIConnectionError",
new_callable=lambda: OSError,
) as mock_error:
yield mock_error
async def test_user_connection_works(hass, mock_client):
"""Test we can finish a config flow."""
result = await hass.config_entries.flow.async_init(
"esphome",
context={"source": "user"},
data=None,
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
mock_client.device_info = AsyncMock(return_value=MockDeviceInfo(False, "test"))
result = await hass.config_entries.flow.async_init(
"esphome",
context={"source": "user"},
data={CONF_HOST: "127.0.0.1", CONF_PORT: 80},
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {CONF_HOST: "127.0.0.1", CONF_PORT: 80, CONF_PASSWORD: ""}
assert result["title"] == "test"
assert len(mock_client.connect.mock_calls) == 1
assert len(mock_client.device_info.mock_calls) == 1
assert len(mock_client.disconnect.mock_calls) == 1
assert mock_client.host == "127.0.0.1"
assert mock_client.port == 80
assert mock_client.password == ""
async def test_user_resolve_error(hass, mock_api_connection_error, mock_client):
"""Test user step with IP resolve error."""
class MockResolveError(mock_api_connection_error):
"""Create an exception with a specific error message."""
def __init__(self):
"""Initialize."""
super().__init__("Error resolving IP address")
with patch(
"homeassistant.components.esphome.config_flow.APIConnectionError",
new_callable=lambda: MockResolveError,
) as exc:
mock_client.device_info.side_effect = exc
result = await hass.config_entries.flow.async_init(
"esphome",
context={"source": "user"},
data={CONF_HOST: "127.0.0.1", CONF_PORT: 6053},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "resolve_error"}
assert len(mock_client.connect.mock_calls) == 1
assert len(mock_client.device_info.mock_calls) == 1
assert len(mock_client.disconnect.mock_calls) == 1
async def test_user_connection_error(hass, mock_api_connection_error, mock_client):
"""Test user step with connection error."""
mock_client.device_info.side_effect = mock_api_connection_error
result = await hass.config_entries.flow.async_init(
"esphome",
context={"source": "user"},
data={CONF_HOST: "127.0.0.1", CONF_PORT: 6053},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "user"
assert result["errors"] == {"base": "connection_error"}
assert len(mock_client.connect.mock_calls) == 1
assert len(mock_client.device_info.mock_calls) == 1
assert len(mock_client.disconnect.mock_calls) == 1
async def test_user_with_password(hass, mock_client):
"""Test user step with password."""
mock_client.device_info = AsyncMock(return_value=MockDeviceInfo(True, "test"))
result = await hass.config_entries.flow.async_init(
"esphome",
context={"source": "user"},
data={CONF_HOST: "127.0.0.1", CONF_PORT: 6053},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_PASSWORD: "password1"}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["data"] == {
CONF_HOST: "127.0.0.1",
CONF_PORT: 6053,
CONF_PASSWORD: "password1",
}
assert mock_client.password == "password1"
async def test_user_invalid_password(hass, mock_api_connection_error, mock_client):
"""Test user step with invalid password."""
mock_client.device_info = AsyncMock(return_value=MockDeviceInfo(True, "test"))
result = await hass.config_entries.flow.async_init(
"esphome",
context={"source": "user"},
data={CONF_HOST: "127.0.0.1", CONF_PORT: 6053},
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
mock_client.connect.side_effect = mock_api_connection_error
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={CONF_PASSWORD: "invalid"}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "authenticate"
assert result["errors"] == {"base": "invalid_auth"}
async def test_discovery_initiation(hass, mock_client):
"""Test discovery importing works."""
mock_client.device_info = AsyncMock(return_value=MockDeviceInfo(False, "test8266"))
service_info = {
"host": "192.168.43.183",
"port": 6053,
"hostname": "test8266.local.",
"properties": {},
}
flow = await hass.config_entries.flow.async_init(
"esphome", context={"source": "zeroconf"}, data=service_info
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], user_input={}
)
assert result["type"] == RESULT_TYPE_CREATE_ENTRY
assert result["title"] == "test8266"
assert result["data"][CONF_HOST] == "192.168.43.183"
assert result["data"][CONF_PORT] == 6053
assert result["result"]
assert result["result"].unique_id == "test8266"
async def test_discovery_already_configured_hostname(hass, mock_client):
"""Test discovery aborts if already configured via hostname."""
entry = MockConfigEntry(
domain="esphome",
data={CONF_HOST: "test8266.local", CONF_PORT: 6053, CONF_PASSWORD: ""},
)
entry.add_to_hass(hass)
service_info = {
"host": "192.168.43.183",
"port": 6053,
"hostname": "test8266.local.",
"properties": {},
}
result = await hass.config_entries.flow.async_init(
"esphome", context={"source": "zeroconf"}, data=service_info
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert entry.unique_id == "test8266"
async def test_discovery_already_configured_ip(hass, mock_client):
"""Test discovery aborts if already configured via static IP."""
entry = MockConfigEntry(
domain="esphome",
data={CONF_HOST: "192.168.43.183", CONF_PORT: 6053, CONF_PASSWORD: ""},
)
entry.add_to_hass(hass)
service_info = {
"host": "192.168.43.183",
"port": 6053,
"hostname": "test8266.local.",
"properties": {"address": "192.168.43.183"},
}
result = await hass.config_entries.flow.async_init(
"esphome", context={"source": "zeroconf"}, data=service_info
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert entry.unique_id == "test8266"
async def test_discovery_already_configured_name(hass, mock_client):
"""Test discovery aborts if already configured via name."""
entry = MockConfigEntry(
domain="esphome",
data={CONF_HOST: "192.168.43.183", CONF_PORT: 6053, CONF_PASSWORD: ""},
)
entry.add_to_hass(hass)
mock_entry_data = MagicMock()
mock_entry_data.device_info.name = "test8266"
hass.data[DATA_KEY] = {entry.entry_id: mock_entry_data}
service_info = {
"host": "192.168.43.184",
"port": 6053,
"hostname": "test8266.local.",
"properties": {"address": "test8266.local"},
}
result = await hass.config_entries.flow.async_init(
"esphome", context={"source": "zeroconf"}, data=service_info
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert entry.unique_id == "test8266"
assert entry.data[CONF_HOST] == "192.168.43.184"
async def test_discovery_duplicate_data(hass, mock_client):
"""Test discovery aborts if same mDNS packet arrives."""
service_info = {
"host": "192.168.43.183",
"port": 6053,
"hostname": "test8266.local.",
"properties": {"address": "test8266.local"},
}
mock_client.device_info = AsyncMock(return_value=MockDeviceInfo(False, "test8266"))
result = await hass.config_entries.flow.async_init(
"esphome", data=service_info, context={"source": "zeroconf"}
)
assert result["type"] == RESULT_TYPE_FORM
assert result["step_id"] == "discovery_confirm"
result = await hass.config_entries.flow.async_init(
"esphome", data=service_info, context={"source": "zeroconf"}
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_in_progress"
async def test_discovery_updates_unique_id(hass, mock_client):
"""Test a duplicate discovery host aborts and updates existing entry."""
entry = MockConfigEntry(
domain="esphome",
data={CONF_HOST: "192.168.43.183", CONF_PORT: 6053, CONF_PASSWORD: ""},
)
entry.add_to_hass(hass)
service_info = {
"host": "192.168.43.183",
"port": 6053,
"hostname": "test8266.local.",
"properties": {"address": "test8266.local"},
}
result = await hass.config_entries.flow.async_init(
"esphome", context={"source": "zeroconf"}, data=service_info
)
assert result["type"] == RESULT_TYPE_ABORT
assert result["reason"] == "already_configured"
assert entry.unique_id == "test8266"
|
import numpy as np
from os import path
import datetime
import calendar
from ...utils import logger, fill_doc
from ..utils import _read_segments_file, _find_channels, _create_chs
from ..base import BaseRaw
from ..meas_info import _empty_info
from ..constants import FIFF
@fill_doc
def read_raw_nicolet(input_fname, ch_type, eog=(),
ecg=(), emg=(), misc=(), preload=False, verbose=None):
"""Read Nicolet data as raw object.
Note: This reader takes data files with the extension ``.data`` as an
input. The header file with the same file name stem and an extension
``.head`` is expected to be found in the same directory.
Parameters
----------
input_fname : str
Path to the data file.
ch_type : str
Channel type to designate to the data channels. Supported data types
include 'eeg', 'seeg'.
eog : list | tuple | 'auto'
Names of channels or list of indices that should be designated
EOG channels. If 'auto', the channel names beginning with
``EOG`` are used. Defaults to empty tuple.
ecg : list or tuple | 'auto'
Names of channels or list of indices that should be designated
ECG channels. If 'auto', the channel names beginning with
``ECG`` are used. Defaults to empty tuple.
emg : list or tuple | 'auto'
Names of channels or list of indices that should be designated
EMG channels. If 'auto', the channel names beginning with
``EMG`` are used. Defaults to empty tuple.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Defaults to empty tuple.
%(preload)s
%(verbose)s
Returns
-------
raw : instance of Raw
A Raw object containing the data.
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
return RawNicolet(input_fname, ch_type, eog=eog, ecg=ecg,
emg=emg, misc=misc, preload=preload, verbose=verbose)
def _get_nicolet_info(fname, ch_type, eog, ecg, emg, misc):
"""Extract info from Nicolet header files."""
fname = path.splitext(fname)[0]
header = fname + '.head'
logger.info('Reading header...')
header_info = dict()
with open(header, 'r') as fid:
for line in fid:
var, value = line.split('=')
if var == 'elec_names':
value = value[1:-2].split(',') # strip brackets
elif var == 'conversion_factor':
value = float(value)
elif var != 'start_ts':
value = int(value)
header_info[var] = value
ch_names = header_info['elec_names']
if eog == 'auto':
eog = _find_channels(ch_names, 'EOG')
if ecg == 'auto':
ecg = _find_channels(ch_names, 'ECG')
if emg == 'auto':
emg = _find_channels(ch_names, 'EMG')
date, time = header_info['start_ts'].split()
date = date.split('-')
time = time.split(':')
sec, msec = time[2].split('.')
date = datetime.datetime(int(date[0]), int(date[1]), int(date[2]),
int(time[0]), int(time[1]), int(sec), int(msec))
info = _empty_info(header_info['sample_freq'])
info['meas_date'] = (calendar.timegm(date.utctimetuple()), 0)
if ch_type == 'eeg':
ch_coil = FIFF.FIFFV_COIL_EEG
ch_kind = FIFF.FIFFV_EEG_CH
elif ch_type == 'seeg':
ch_coil = FIFF.FIFFV_COIL_EEG
ch_kind = FIFF.FIFFV_SEEG_CH
else:
raise TypeError("Channel type not recognized. Available types are "
"'eeg' and 'seeg'.")
cals = np.repeat(header_info['conversion_factor'] * 1e-6, len(ch_names))
info['chs'] = _create_chs(ch_names, cals, ch_coil, ch_kind, eog, ecg, emg,
misc)
info['highpass'] = 0.
info['lowpass'] = info['sfreq'] / 2.0
info._update_redundant()
return info, header_info
class RawNicolet(BaseRaw):
"""Raw object from Nicolet file.
Parameters
----------
input_fname : str
Path to the Nicolet file.
ch_type : str
Channel type to designate to the data channels. Supported data types
include 'eeg', 'seeg'.
eog : list | tuple | 'auto'
Names of channels or list of indices that should be designated
EOG channels. If 'auto', the channel names beginning with
``EOG`` are used. Defaults to empty tuple.
ecg : list or tuple | 'auto'
Names of channels or list of indices that should be designated
ECG channels. If 'auto', the channel names beginning with
``ECG`` are used. Defaults to empty tuple.
emg : list or tuple | 'auto'
Names of channels or list of indices that should be designated
EMG channels. If 'auto', the channel names beginning with
``EMG`` are used. Defaults to empty tuple.
misc : list or tuple
Names of channels or list of indices that should be designated
MISC channels. Defaults to empty tuple.
%(preload)s
%(verbose)s
See Also
--------
mne.io.Raw : Documentation of attribute and methods.
"""
def __init__(self, input_fname, ch_type, eog=(),
ecg=(), emg=(), misc=(), preload=False,
verbose=None): # noqa: D102
input_fname = path.abspath(input_fname)
info, header_info = _get_nicolet_info(input_fname, ch_type, eog, ecg,
emg, misc)
last_samps = [header_info['num_samples'] - 1]
super(RawNicolet, self).__init__(
info, preload, filenames=[input_fname], raw_extras=[header_info],
last_samps=last_samps, orig_format='int',
verbose=verbose)
def _read_segment_file(self, data, idx, fi, start, stop, cals, mult):
"""Read a chunk of raw data."""
_read_segments_file(
self, data, idx, fi, start, stop, cals, mult, dtype='<i2')
|
import os.path as op
import mne
from mne.utils import ETSContext
def run():
"""Run command."""
from mne.commands.utils import get_optparser, _add_verbose_flag
parser = get_optparser(__file__)
parser.add_option("-d", "--subjects-dir", dest="subjects_dir",
default=None, help="Subjects directory")
parser.add_option("-s", "--subject", dest="subject", default=None,
help="Subject name")
parser.add_option("-f", "--fiff", dest="inst", default=None,
help="FIFF file with digitizer data for coregistration")
parser.add_option("-t", "--tabbed", dest="tabbed", action="store_true",
default=False, help="Option for small screens: Combine "
"the data source panel and the coregistration panel "
"into a single panel with tabs.")
parser.add_option("--no-guess-mri", dest="guess_mri_subject",
action='store_false', default=None,
help="Prevent the GUI from automatically guessing and "
"changing the MRI subject when a new head shape source "
"file is selected.")
parser.add_option("--head-opacity", type=float, default=None,
dest="head_opacity",
help="The opacity of the head surface, in the range "
"[0, 1].")
parser.add_option("--high-res-head",
action='store_true', default=False, dest="high_res_head",
help="Use a high-resolution head surface.")
parser.add_option("--low-res-head",
action='store_true', default=False, dest="low_res_head",
help="Use a low-resolution head surface.")
parser.add_option('--trans', dest='trans', default=None,
help='Head<->MRI transform FIF file ("-trans.fif")')
parser.add_option('--project-eeg', dest='project_eeg',
action='store_true', default=None,
help="Project EEG electrodes to the head surface ("
"for visualization purposes only)")
parser.add_option('--orient-to-surface',
action='store_true', default=None,
dest='orient_to_surface',
help='Orient points to the surface.')
parser.add_option('--scale-by-distance',
action='store_true', default=None,
dest='scale_by_distance',
help='Scale points by distance from the surface.')
parser.add_option('--mark-inside',
action='store_true', default=None,
dest='mark_inside',
help='Mark points inside the head using a different '
'color.')
parser.add_option('--interaction',
type=str, default=None, dest='interaction',
help='Interaction style to use, can be "trackball" or '
'"terrain".')
parser.add_option('--scale',
type=float, default=None, dest='scale',
help='Scale factor for the scene.')
parser.add_option('--simple-rendering', action='store_false',
dest='advanced_rendering',
help='Use simplified OpenGL rendering')
_add_verbose_flag(parser)
options, args = parser.parse_args()
if options.low_res_head:
if options.high_res_head:
raise ValueError("Can't specify --high-res-head and "
"--low-res-head at the same time.")
head_high_res = False
elif options.high_res_head:
head_high_res = True
else:
head_high_res = None
# expanduser allows ~ for --subjects-dir
subjects_dir = options.subjects_dir
if subjects_dir is not None:
subjects_dir = op.expanduser(subjects_dir)
trans = options.trans
if trans is not None:
trans = op.expanduser(trans)
try:
import faulthandler
faulthandler.enable()
except ImportError:
pass # old Python2
with ETSContext():
mne.gui.coregistration(
options.tabbed, inst=options.inst, subject=options.subject,
subjects_dir=subjects_dir,
guess_mri_subject=options.guess_mri_subject,
head_opacity=options.head_opacity, head_high_res=head_high_res,
trans=trans, scrollable=True, project_eeg=options.project_eeg,
orient_to_surface=options.orient_to_surface,
scale_by_distance=options.scale_by_distance,
mark_inside=options.mark_inside, interaction=options.interaction,
scale=options.scale,
advanced_rendering=options.advanced_rendering,
verbose=options.verbose)
mne.utils.run_command_if_main()
|
from trashcli.put import describe
from .files import require_empty_dir, make_empty_file
import os
import unittest
class TestDescritions(unittest.TestCase):
def setUp(self):
require_empty_dir('sandbox')
def test_on_directories(self):
assert "directory" == describe('.')
assert "directory" == describe("..")
assert "directory" == describe("sandbox")
def test_on_dot_directories(self):
assert "'.' directory" == describe("sandbox/.")
assert "'.' directory" == describe("./.")
def test_on_dot_dot_directories(self):
assert "'..' directory" == describe("./..")
assert "'..' directory" == describe("sandbox/..")
def test_name_for_regular_files_non_empty_files(self):
write_file("sandbox/non-empty", "contents")
assert "regular file" == describe("sandbox/non-empty")
def test_name_for_empty_file(self):
make_empty_file('sandbox/empty')
assert "regular empty file" == describe("sandbox/empty")
def test_name_for_symbolic_links(self):
os.symlink('nowhere', "sandbox/symlink")
assert "symbolic link" == describe("sandbox/symlink")
def test_name_for_non_existent_entries(self):
assert not os.path.exists('non-existent')
assert "non existent" == describe('non-existent')
def write_file(path, contents):
f = open(path, 'w')
f.write(contents)
f.close()
|
from typing import Any
from pyisy.constants import COMMAND_FRIENDLY_NAME
import voluptuous as vol
from homeassistant.const import (
CONF_ADDRESS,
CONF_COMMAND,
CONF_NAME,
CONF_TYPE,
CONF_UNIT_OF_MEASUREMENT,
SERVICE_RELOAD,
)
from homeassistant.core import ServiceCall, callback
from homeassistant.helpers import entity_platform
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity_platform import async_get_platforms
import homeassistant.helpers.entity_registry as er
from homeassistant.helpers.typing import HomeAssistantType
from .const import (
_LOGGER,
DOMAIN,
ISY994_ISY,
ISY994_NODES,
ISY994_PROGRAMS,
ISY994_VARIABLES,
SUPPORTED_PLATFORMS,
SUPPORTED_PROGRAM_PLATFORMS,
)
# Common Services for All Platforms:
SERVICE_SYSTEM_QUERY = "system_query"
SERVICE_SET_VARIABLE = "set_variable"
SERVICE_SEND_PROGRAM_COMMAND = "send_program_command"
SERVICE_RUN_NETWORK_RESOURCE = "run_network_resource"
SERVICE_CLEANUP = "cleanup_entities"
INTEGRATION_SERVICES = [
SERVICE_SYSTEM_QUERY,
SERVICE_SET_VARIABLE,
SERVICE_SEND_PROGRAM_COMMAND,
SERVICE_RUN_NETWORK_RESOURCE,
SERVICE_CLEANUP,
]
# Entity specific methods (valid for most Groups/ISY Scenes, Lights, Switches, Fans)
SERVICE_SEND_RAW_NODE_COMMAND = "send_raw_node_command"
SERVICE_SEND_NODE_COMMAND = "send_node_command"
# Services valid only for dimmable lights.
SERVICE_SET_ON_LEVEL = "set_on_level"
SERVICE_SET_RAMP_RATE = "set_ramp_rate"
CONF_PARAMETERS = "parameters"
CONF_VALUE = "value"
CONF_INIT = "init"
CONF_ISY = "isy"
VALID_NODE_COMMANDS = [
"beep",
"brighten",
"dim",
"disable",
"enable",
"fade_down",
"fade_stop",
"fade_up",
"fast_off",
"fast_on",
"query",
]
VALID_PROGRAM_COMMANDS = [
"run",
"run_then",
"run_else",
"stop",
"enable",
"disable",
"enable_run_at_startup",
"disable_run_at_startup",
]
def valid_isy_commands(value: Any) -> str:
"""Validate the command is valid."""
value = str(value).upper()
if value in COMMAND_FRIENDLY_NAME:
return value
raise vol.Invalid("Invalid ISY Command.")
SCHEMA_GROUP = "name-address"
SERVICE_SYSTEM_QUERY_SCHEMA = vol.Schema(
{vol.Optional(CONF_ADDRESS): cv.string, vol.Optional(CONF_ISY): cv.string}
)
SERVICE_SET_RAMP_RATE_SCHEMA = {
vol.Required(CONF_VALUE): vol.All(vol.Coerce(int), vol.Range(0, 31))
}
SERVICE_SET_VALUE_SCHEMA = {
vol.Required(CONF_VALUE): vol.All(vol.Coerce(int), vol.Range(0, 255))
}
SERVICE_SEND_RAW_NODE_COMMAND_SCHEMA = {
vol.Required(CONF_COMMAND): vol.All(cv.string, valid_isy_commands),
vol.Optional(CONF_VALUE): vol.All(vol.Coerce(int), vol.Range(0, 255)),
vol.Optional(CONF_UNIT_OF_MEASUREMENT): vol.All(vol.Coerce(int), vol.Range(0, 120)),
vol.Optional(CONF_PARAMETERS, default={}): {cv.string: cv.string},
}
SERVICE_SEND_NODE_COMMAND_SCHEMA = {
vol.Required(CONF_COMMAND): vol.In(VALID_NODE_COMMANDS)
}
SERVICE_SET_VARIABLE_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_ADDRESS, CONF_TYPE, CONF_NAME),
vol.Schema(
{
vol.Exclusive(CONF_NAME, SCHEMA_GROUP): cv.string,
vol.Inclusive(CONF_ADDRESS, SCHEMA_GROUP): vol.Coerce(int),
vol.Inclusive(CONF_TYPE, SCHEMA_GROUP): vol.All(
vol.Coerce(int), vol.Range(1, 2)
),
vol.Optional(CONF_INIT, default=False): bool,
vol.Required(CONF_VALUE): vol.Coerce(int),
vol.Optional(CONF_ISY): cv.string,
}
),
)
SERVICE_SEND_PROGRAM_COMMAND_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_ADDRESS, CONF_NAME),
vol.Schema(
{
vol.Exclusive(CONF_NAME, SCHEMA_GROUP): cv.string,
vol.Exclusive(CONF_ADDRESS, SCHEMA_GROUP): cv.string,
vol.Required(CONF_COMMAND): vol.In(VALID_PROGRAM_COMMANDS),
vol.Optional(CONF_ISY): cv.string,
}
),
)
SERVICE_RUN_NETWORK_RESOURCE_SCHEMA = vol.All(
cv.has_at_least_one_key(CONF_ADDRESS, CONF_NAME),
vol.Schema(
{
vol.Exclusive(CONF_NAME, SCHEMA_GROUP): cv.string,
vol.Exclusive(CONF_ADDRESS, SCHEMA_GROUP): vol.Coerce(int),
vol.Optional(CONF_ISY): cv.string,
}
),
)
@callback
def async_setup_services(hass: HomeAssistantType):
"""Create and register services for the ISY integration."""
existing_services = hass.services.async_services().get(DOMAIN)
if existing_services and any(
service in INTEGRATION_SERVICES for service in existing_services
):
# Integration-level services have already been added. Return.
return
async def async_system_query_service_handler(service):
"""Handle a system query service call."""
address = service.data.get(CONF_ADDRESS)
isy_name = service.data.get(CONF_ISY)
for config_entry_id in hass.data[DOMAIN]:
isy = hass.data[DOMAIN][config_entry_id][ISY994_ISY]
if isy_name and not isy_name == isy.configuration["name"]:
continue
# If an address is provided, make sure we query the correct ISY.
# Otherwise, query the whole system on all ISY's connected.
if address and isy.nodes.get_by_id(address) is not None:
_LOGGER.debug(
"Requesting query of device %s on ISY %s",
address,
isy.configuration["uuid"],
)
await hass.async_add_executor_job(isy.query, address)
return
_LOGGER.debug(
"Requesting system query of ISY %s", isy.configuration["uuid"]
)
await hass.async_add_executor_job(isy.query)
async def async_run_network_resource_service_handler(service):
"""Handle a network resource service call."""
address = service.data.get(CONF_ADDRESS)
name = service.data.get(CONF_NAME)
isy_name = service.data.get(CONF_ISY)
for config_entry_id in hass.data[DOMAIN]:
isy = hass.data[DOMAIN][config_entry_id][ISY994_ISY]
if isy_name and not isy_name == isy.configuration["name"]:
continue
if not hasattr(isy, "networking") or isy.networking is None:
continue
command = None
if address:
command = isy.networking.get_by_id(address)
if name:
command = isy.networking.get_by_name(name)
if command is not None:
await hass.async_add_executor_job(command.run)
return
_LOGGER.error(
"Could not run network resource command. Not found or enabled on the ISY"
)
async def async_send_program_command_service_handler(service):
"""Handle a send program command service call."""
address = service.data.get(CONF_ADDRESS)
name = service.data.get(CONF_NAME)
command = service.data.get(CONF_COMMAND)
isy_name = service.data.get(CONF_ISY)
for config_entry_id in hass.data[DOMAIN]:
isy = hass.data[DOMAIN][config_entry_id][ISY994_ISY]
if isy_name and not isy_name == isy.configuration["name"]:
continue
program = None
if address:
program = isy.programs.get_by_id(address)
if name:
program = isy.programs.get_by_name(name)
if program is not None:
await hass.async_add_executor_job(getattr(program, command))
return
_LOGGER.error("Could not send program command. Not found or enabled on the ISY")
async def async_set_variable_service_handler(service):
"""Handle a set variable service call."""
address = service.data.get(CONF_ADDRESS)
vtype = service.data.get(CONF_TYPE)
name = service.data.get(CONF_NAME)
value = service.data.get(CONF_VALUE)
init = service.data.get(CONF_INIT, False)
isy_name = service.data.get(CONF_ISY)
for config_entry_id in hass.data[DOMAIN]:
isy = hass.data[DOMAIN][config_entry_id][ISY994_ISY]
if isy_name and not isy_name == isy.configuration["name"]:
continue
variable = None
if name:
variable = isy.variables.get_by_name(name)
if address and vtype:
variable = isy.variables.vobjs[vtype].get(address)
if variable is not None:
await hass.async_add_executor_job(variable.set_value, value, init)
return
_LOGGER.error("Could not set variable value. Not found or enabled on the ISY")
async def async_cleanup_registry_entries(service) -> None:
"""Remove extra entities that are no longer part of the integration."""
entity_registry = await er.async_get_registry(hass)
config_ids = []
current_unique_ids = []
for config_entry_id in hass.data[DOMAIN]:
entries_for_this_config = er.async_entries_for_config_entry(
entity_registry, config_entry_id
)
config_ids.extend(
[
(entity.unique_id, entity.entity_id)
for entity in entries_for_this_config
]
)
hass_isy_data = hass.data[DOMAIN][config_entry_id]
uuid = hass_isy_data[ISY994_ISY].configuration["uuid"]
for platform in SUPPORTED_PLATFORMS:
for node in hass_isy_data[ISY994_NODES][platform]:
if hasattr(node, "address"):
current_unique_ids.append(f"{uuid}_{node.address}")
for platform in SUPPORTED_PROGRAM_PLATFORMS:
for _, node, _ in hass_isy_data[ISY994_PROGRAMS][platform]:
if hasattr(node, "address"):
current_unique_ids.append(f"{uuid}_{node.address}")
for node in hass_isy_data[ISY994_VARIABLES]:
if hasattr(node, "address"):
current_unique_ids.append(f"{uuid}_{node.address}")
extra_entities = [
entity_id
for unique_id, entity_id in config_ids
if unique_id not in current_unique_ids
]
for entity_id in extra_entities:
if entity_registry.async_is_registered(entity_id):
entity_registry.async_remove(entity_id)
_LOGGER.debug(
"Cleaning up ISY994 Entities and devices: Config Entries: %s, Current Entries: %s, "
"Extra Entries Removed: %s",
len(config_ids),
len(current_unique_ids),
len(extra_entities),
)
async def async_reload_config_entries(service) -> None:
"""Trigger a reload of all ISY994 config entries."""
for config_entry_id in hass.data[DOMAIN]:
hass.async_create_task(hass.config_entries.async_reload(config_entry_id))
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SYSTEM_QUERY,
service_func=async_system_query_service_handler,
schema=SERVICE_SYSTEM_QUERY_SCHEMA,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_RUN_NETWORK_RESOURCE,
service_func=async_run_network_resource_service_handler,
schema=SERVICE_RUN_NETWORK_RESOURCE_SCHEMA,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SEND_PROGRAM_COMMAND,
service_func=async_send_program_command_service_handler,
schema=SERVICE_SEND_PROGRAM_COMMAND_SCHEMA,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SET_VARIABLE,
service_func=async_set_variable_service_handler,
schema=SERVICE_SET_VARIABLE_SCHEMA,
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_CLEANUP,
service_func=async_cleanup_registry_entries,
)
hass.services.async_register(
domain=DOMAIN, service=SERVICE_RELOAD, service_func=async_reload_config_entries
)
async def _async_send_raw_node_command(call: ServiceCall):
await hass.helpers.service.entity_service_call(
async_get_platforms(hass, DOMAIN), SERVICE_SEND_RAW_NODE_COMMAND, call
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SEND_RAW_NODE_COMMAND,
schema=cv.make_entity_service_schema(SERVICE_SEND_RAW_NODE_COMMAND_SCHEMA),
service_func=_async_send_raw_node_command,
)
async def _async_send_node_command(call: ServiceCall):
await hass.helpers.service.entity_service_call(
async_get_platforms(hass, DOMAIN), SERVICE_SEND_NODE_COMMAND, call
)
hass.services.async_register(
domain=DOMAIN,
service=SERVICE_SEND_NODE_COMMAND,
schema=cv.make_entity_service_schema(SERVICE_SEND_NODE_COMMAND_SCHEMA),
service_func=_async_send_node_command,
)
@callback
def async_unload_services(hass: HomeAssistantType):
"""Unload services for the ISY integration."""
if hass.data[DOMAIN]:
# There is still another config entry for this domain, don't remove services.
return
existing_services = hass.services.async_services().get(DOMAIN)
if not existing_services or not any(
service in INTEGRATION_SERVICES for service in existing_services
):
return
_LOGGER.info("Unloading ISY994 Services")
hass.services.async_remove(domain=DOMAIN, service=SERVICE_SYSTEM_QUERY)
hass.services.async_remove(domain=DOMAIN, service=SERVICE_RUN_NETWORK_RESOURCE)
hass.services.async_remove(domain=DOMAIN, service=SERVICE_SEND_PROGRAM_COMMAND)
hass.services.async_remove(domain=DOMAIN, service=SERVICE_SET_VARIABLE)
hass.services.async_remove(domain=DOMAIN, service=SERVICE_CLEANUP)
hass.services.async_remove(domain=DOMAIN, service=SERVICE_RELOAD)
hass.services.async_remove(domain=DOMAIN, service=SERVICE_SEND_RAW_NODE_COMMAND)
hass.services.async_remove(domain=DOMAIN, service=SERVICE_SEND_NODE_COMMAND)
@callback
def async_setup_light_services(hass: HomeAssistantType):
"""Create device-specific services for the ISY Integration."""
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
SERVICE_SET_ON_LEVEL, SERVICE_SET_VALUE_SCHEMA, SERVICE_SET_ON_LEVEL
)
platform.async_register_entity_service(
SERVICE_SET_RAMP_RATE, SERVICE_SET_RAMP_RATE_SCHEMA, SERVICE_SET_RAMP_RATE
)
|
from django.utils.translation import gettext_lazy as _
from weblate.addons.base import StoreBaseAddon
from weblate.addons.forms import JSONCustomizeForm
class JSONCustomizeAddon(StoreBaseAddon):
name = "weblate.json.customize"
verbose = _("Customize JSON output")
description = _(
"Allows adjusting JSON output behavior, for example " "indentation or sorting."
)
settings_form = JSONCustomizeForm
compat = {
"file_format": {
"json",
"json-nested",
"webextension",
"i18next",
"arb",
"go-i18n-json",
}
}
def store_post_load(self, translation, store):
config = self.instance.configuration
store.store.dump_args["indent"] = int(config.get("indent", 4))
store.store.dump_args["sort_keys"] = bool(int(config.get("sort_keys", 0)))
|
import os
import importlib
import inspect
import shutil
import sys
sys.path.insert(0, os.path.abspath('.'))
import pytest
from sandman2 import get_app, db
@pytest.yield_fixture(scope='function')
def app(request):
"""Yield the application instance."""
database = getattr(request.module, 'database', 'db.sqlite3')
read_only = getattr(request.module, 'read_only', False)
exclude_tables = getattr(request.module, 'exclude_tables', None)
test_database_path = os.path.join('tests', 'data', 'test_db.sqlite3')
pristine_database_path = os.path.join('tests', 'data', database)
shutil.copy(pristine_database_path, test_database_path)
model_module = getattr(request.module, 'model_module', None)
user_models = []
if model_module:
module = importlib.import_module(model_module)
for name, obj in inspect.getmembers(module):
if inspect.isclass(obj):
if name not in ('Model', 'AutomapModel'):
user_models.append(obj)
application = get_app(
'sqlite+pysqlite:///{}'.format(
test_database_path),
user_models=user_models,
exclude_tables=exclude_tables,
read_only=read_only)
application.testing = True
yield application
with application.app_context():
db.session.remove()
db.drop_all()
os.unlink(test_database_path)
|
import logging
from pydanfossair.commands import ReadCommand, UpdateCommand
from homeassistant.components.switch import SwitchEntity
from . import DOMAIN as DANFOSS_AIR_DOMAIN
_LOGGER = logging.getLogger(__name__)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Danfoss Air HRV switch platform."""
data = hass.data[DANFOSS_AIR_DOMAIN]
switches = [
[
"Danfoss Air Boost",
ReadCommand.boost,
UpdateCommand.boost_activate,
UpdateCommand.boost_deactivate,
],
[
"Danfoss Air Bypass",
ReadCommand.bypass,
UpdateCommand.bypass_activate,
UpdateCommand.bypass_deactivate,
],
[
"Danfoss Air Automatic Bypass",
ReadCommand.automatic_bypass,
UpdateCommand.bypass_activate,
UpdateCommand.bypass_deactivate,
],
]
dev = []
for switch in switches:
dev.append(DanfossAir(data, switch[0], switch[1], switch[2], switch[3]))
add_entities(dev)
class DanfossAir(SwitchEntity):
"""Representation of a Danfoss Air HRV Switch."""
def __init__(self, data, name, state_command, on_command, off_command):
"""Initialize the switch."""
self._data = data
self._name = name
self._state_command = state_command
self._on_command = on_command
self._off_command = off_command
self._state = None
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def is_on(self):
"""Return true if switch is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the switch on."""
_LOGGER.debug("Turning on switch with command %s", self._on_command)
self._data.update_state(self._on_command, self._state_command)
def turn_off(self, **kwargs):
"""Turn the switch off."""
_LOGGER.debug("Turning off switch with command %s", self._off_command)
self._data.update_state(self._off_command, self._state_command)
def update(self):
"""Update the switch's state."""
self._data.update()
self._state = self._data.get_value(self._state_command)
if self._state is None:
_LOGGER.debug("Could not get data for %s", self._state_command)
|
revision = "9392b9f9a805"
down_revision = "5ae0ecefb01f"
from alembic import op
from sqlalchemy_utils import ArrowType
import sqlalchemy as sa
def upgrade():
op.add_column(
"pending_certs",
sa.Column(
"last_updated",
ArrowType,
server_default=sa.text("now()"),
onupdate=sa.text("now()"),
nullable=False,
),
)
def downgrade():
op.drop_column("pending_certs", "last_updated")
|
from copy import deepcopy
from homeassistant.components.cover import (
DOMAIN as COVER_DOMAIN,
SERVICE_CLOSE_COVER,
SERVICE_OPEN_COVER,
SERVICE_STOP_COVER,
)
from homeassistant.components.deconz.const import DOMAIN as DECONZ_DOMAIN
from homeassistant.components.deconz.gateway import get_gateway_from_config_entry
from homeassistant.const import ATTR_ENTITY_ID, STATE_CLOSED, STATE_OPEN
from homeassistant.setup import async_setup_component
from .test_gateway import DECONZ_WEB_REQUEST, setup_deconz_integration
from tests.async_mock import patch
COVERS = {
"1": {
"id": "Level controllable cover id",
"name": "Level controllable cover",
"type": "Level controllable output",
"state": {"bri": 254, "on": False, "reachable": True},
"modelid": "Not zigbee spec",
"uniqueid": "00:00:00:00:00:00:00:00-00",
},
"2": {
"id": "Window covering device id",
"name": "Window covering device",
"type": "Window covering device",
"state": {"bri": 254, "on": True, "reachable": True},
"modelid": "lumi.curtain",
"uniqueid": "00:00:00:00:00:00:00:01-00",
},
"3": {
"id": "Unsupported cover id",
"name": "Unsupported cover",
"type": "Not a cover",
"state": {"reachable": True},
"uniqueid": "00:00:00:00:00:00:00:02-00",
},
"4": {
"id": "deconz old brightness cover id",
"name": "deconz old brightness cover",
"type": "Level controllable output",
"state": {"bri": 255, "on": False, "reachable": True},
"modelid": "Not zigbee spec",
"uniqueid": "00:00:00:00:00:00:00:03-00",
},
"5": {
"id": "Window covering controller id",
"name": "Window covering controller",
"type": "Window covering controller",
"state": {"bri": 254, "on": True, "reachable": True},
"modelid": "Motor controller",
"uniqueid": "00:00:00:00:00:00:00:04-00",
},
}
async def test_platform_manually_configured(hass):
"""Test that we do not discover anything or try to set up a gateway."""
assert (
await async_setup_component(
hass, COVER_DOMAIN, {"cover": {"platform": DECONZ_DOMAIN}}
)
is True
)
assert DECONZ_DOMAIN not in hass.data
async def test_no_covers(hass):
"""Test that no cover entities are created."""
await setup_deconz_integration(hass)
assert len(hass.states.async_all()) == 0
async def test_cover(hass):
"""Test that all supported cover entities are created."""
data = deepcopy(DECONZ_WEB_REQUEST)
data["lights"] = deepcopy(COVERS)
config_entry = await setup_deconz_integration(hass, get_state_response=data)
gateway = get_gateway_from_config_entry(hass, config_entry)
assert len(hass.states.async_all()) == 5
assert hass.states.get("cover.level_controllable_cover").state == STATE_OPEN
assert hass.states.get("cover.window_covering_device").state == STATE_CLOSED
assert hass.states.get("cover.unsupported_cover") is None
assert hass.states.get("cover.deconz_old_brightness_cover").state == STATE_OPEN
assert hass.states.get("cover.window_covering_controller").state == STATE_CLOSED
# Event signals cover is closed
state_changed_event = {
"t": "event",
"e": "changed",
"r": "lights",
"id": "1",
"state": {"on": True},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
assert hass.states.get("cover.level_controllable_cover").state == STATE_CLOSED
# Verify service calls
level_controllable_cover_device = gateway.api.lights["1"]
# Service open cover
with patch.object(
level_controllable_cover_device, "_request", return_value=True
) as set_callback:
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_OPEN_COVER,
{ATTR_ENTITY_ID: "cover.level_controllable_cover"},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/lights/1/state", json={"on": False})
# Service close cover
with patch.object(
level_controllable_cover_device, "_request", return_value=True
) as set_callback:
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_CLOSE_COVER,
{ATTR_ENTITY_ID: "cover.level_controllable_cover"},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with(
"put", "/lights/1/state", json={"on": True, "bri": 254}
)
# Service stop cover movement
with patch.object(
level_controllable_cover_device, "_request", return_value=True
) as set_callback:
await hass.services.async_call(
COVER_DOMAIN,
SERVICE_STOP_COVER,
{ATTR_ENTITY_ID: "cover.level_controllable_cover"},
blocking=True,
)
await hass.async_block_till_done()
set_callback.assert_called_with("put", "/lights/1/state", json={"bri_inc": 0})
# Test that a reported cover position of 255 (deconz-rest-api < 2.05.73) is interpreted correctly.
assert hass.states.get("cover.deconz_old_brightness_cover").state == STATE_OPEN
state_changed_event = {
"t": "event",
"e": "changed",
"r": "lights",
"id": "4",
"state": {"on": True},
}
gateway.api.event_handler(state_changed_event)
await hass.async_block_till_done()
deconz_old_brightness_cover = hass.states.get("cover.deconz_old_brightness_cover")
assert deconz_old_brightness_cover.state == STATE_CLOSED
assert deconz_old_brightness_cover.attributes["current_position"] == 0
await hass.config_entries.async_unload(config_entry.entry_id)
assert len(hass.states.async_all()) == 0
|
from __future__ import absolute_import
import logging
from ..auth import get_auth, authenticate
logger = logging.getLogger(__name__)
def do_db_auth(host, connection, db_name):
"""
Attempts to authenticate against the mongo instance.
Tries:
- Auth'ing against admin as 'admin' ; credentials: <host>/arctic/admin/admin
- Auth'ing against db_name (which may be None if auth'ing against admin above)
returns True if authentication succeeded.
"""
admin_creds = get_auth(host, 'admin', 'admin')
user_creds = get_auth(host, 'arctic', db_name)
# Attempt to authenticate the connection
# Try at 'admin level' first as this allows us to enableSharding, which we want
if admin_creds is None:
# Get ordinary credentials for authenticating against the DB
if user_creds is None:
logger.error("You need credentials for db '%s' on '%s', or admin credentials" % (db_name, host))
return False
if not authenticate(connection[db_name], user_creds.user, user_creds.password):
logger.error("Failed to authenticate to db '%s' on '%s', using user credentials" % (db_name, host))
return False
return True
elif not authenticate(connection.admin, admin_creds.user, admin_creds.password):
logger.error("Failed to authenticate to '%s' as Admin. Giving up." % (host))
return False
# Ensure we attempt to auth against the user DB, for non-priviledged users to get access
authenticate(connection[db_name], user_creds.user, user_creds.password)
return True
def setup_logging():
""" Logging setup for console scripts
"""
logging.basicConfig(format='%(asctime)s %(message)s', level='INFO')
|
import unittest
from perfkitbenchmarker import regex_util
class ExtractGroupTestCase(unittest.TestCase):
def testMatches(self):
regex = r'test ([\da-f]+) text'
string = 'test 12a3de text'
self.assertEqual('12a3de', regex_util.ExtractGroup(regex, string, group=1))
def testNoMatch(self):
regex = r'test ([\da-f]+) text'
string = 'test text'
self.assertRaises(regex_util.NoMatchError, regex_util.ExtractGroup, regex,
string, group=1)
def testMatches_Unanchored(self):
regex = r'([\da-f]+) text'
string = 'test 12a3de text'
self.assertEqual('12a3de', regex_util.ExtractGroup(regex, string, group=1))
def testNamedGroup(self):
regex = r'test (?P<hex>[\da-f]+) text'
string = 'test 12a3de text'
self.assertEqual('12a3de', regex_util.ExtractGroup(regex, string,
group='hex'))
def testNumberedGroup_Invalid(self):
regex = r'test ([\da-f]+) (.*)'
string = 'test 12a3de text'
self.assertRaisesRegexp(IndexError, 'No such group 3 in',
regex_util.ExtractGroup, regex, string, group=3)
def testNumberedGroup_Valid(self):
regex = r'test ([\da-f]+) (.*)'
string = 'test 12a3de text'
self.assertEqual('text', regex_util.ExtractGroup(regex, string, group=2))
def testNumberedGroup_WholeMatch(self):
regex = r'test [\da-f]+ (.*)'
string = 'test 12a3de text'
self.assertEqual(string, regex_util.ExtractGroup(regex, string, group=0))
class ExtractFloatTestCase(unittest.TestCase):
def testParsesSuccessfully(self):
regex = r'test (\d+|\.\d+|\d+\.\d+) string'
string = 'test 12.435 string'
self.assertAlmostEqual(12.435, regex_util.ExtractFloat(regex, string,
group=1))
def testRaisesValueErrorOnInvalidInput(self):
regex = r'test (invalid_float) string'
string = 'test invalid_float string'
self.assertRaises(ValueError, regex_util.ExtractFloat, regex, string,
group=1)
class ExtractIntTestCase(unittest.TestCase):
def testParsesSuccessfully(self):
regex = r'test (\d+) string'
string = 'test 12435 string'
self.assertEqual(12435, regex_util.ExtractInt(regex, string, group=1))
def testRaisesValueErrorOnInvalidInput(self):
regex = r'test (invalid_int) string'
string = 'test invalid_int string'
self.assertRaises(ValueError, regex_util.ExtractInt, regex, string, group=1)
class ExtractAllFloatMetricsTestCase(unittest.TestCase):
def testParseSuccessful(self):
matches = regex_util.ExtractAllFloatMetrics(
"""
metric=value
a=1
b=2.0
c=.3
d=-4.5
ef=3.2e+2
""")
self.assertEqual(len(matches), 5)
self.assertEqual(1.0, matches['a'])
self.assertEqual(2.0, matches['b'])
self.assertEqual(0.3, matches['c'])
self.assertEqual(-4.5, matches['d'])
self.assertEqual(3.2e+2, matches['ef'])
def testInvalidMetricRegex(self):
self.assertRaises(
NotImplementedError,
regex_util.ExtractAllFloatMetrics,
'metric=1.0',
metric_regex=r'\w(\w)')
def testIntegerValueRegex(self):
matches = regex_util.ExtractAllFloatMetrics(
'a=1.2,b=3', value_regex=r'\d+')
self.assertEqual(len(matches), 2)
self.assertEqual(1.0, matches['a'])
self.assertEqual(3, matches['b'])
class ExtractAllMatchesTestCase(unittest.TestCase):
def testParseSuccessfully(self):
regex = r'(\d+) (\w+)'
string = 'test 10 sec 33 Mbps multiple matching'
matches = regex_util.ExtractAllMatches(regex, string)
self.assertEqual(len(matches), 2)
self.assertEqual(matches[0][0], '10')
self.assertEqual(matches[0][1], 'sec')
self.assertEqual(matches[1][0], '33')
self.assertEqual(matches[1][1], 'Mbps')
def testNoMatch(self):
regex = r'test (\d\w\d) no match'
string = 'test no match'
self.assertRaises(regex_util.NoMatchError, regex_util.ExtractAllMatches,
regex, string)
class ExtractExactlyOneMatchTestCase(unittest.TestCase):
def testNoMatch(self):
with self.assertRaises(regex_util.NoMatchError):
regex_util.ExtractExactlyOneMatch('foo', 'bar')
def testNonUniqueMatch(self):
with self.assertRaises(regex_util.TooManyMatchesError):
regex_util.ExtractExactlyOneMatch('spam', 'spam spam spam')
def testNoCapturingGroup(self):
self.assertEqual(regex_util.ExtractExactlyOneMatch('bar+', 'foo barrr baz'),
'barrr')
def testCapturingGroup(self):
self.assertEqual(
regex_util.ExtractExactlyOneMatch('ba(r+)', 'foo barrr baz'),
'rrr')
class SubstituteTestCase(unittest.TestCase):
def testSubstituteSuccess(self):
pattern = r'<(\w+)>'
repl = r'[\1]'
text = 'foo <bar> <foo> bar'
sub_text = regex_util.Substitute(pattern, repl, text)
self.assertEqual(sub_text, 'foo [bar] [foo] bar')
def testNoMatch(self):
pattern = r'\[(\w+)\]'
repl = r'\1'
text = 'foo <bar> <foo> bar'
self.assertRaises(regex_util.NoMatchError, regex_util.Substitute,
pattern, repl, text)
if __name__ == '__main__':
unittest.main()
|
import collections
import contextlib
import copy
import sys
import inspect
import logging
import wrapt
from .errors import UnhandledHTTPRequestError
from .matchers import requests_match, uri, method, get_matchers_results
from .patch import CassettePatcherBuilder
from .serializers import yamlserializer
from .persisters.filesystem import FilesystemPersister
from .util import partition_dict
from ._handle_coroutine import handle_coroutine
from .record_mode import RecordMode
try:
from asyncio import iscoroutinefunction
except ImportError:
def iscoroutinefunction(*args, **kwargs):
return False
log = logging.getLogger(__name__)
class CassetteContextDecorator:
"""Context manager/decorator that handles installing the cassette and
removing cassettes.
This class defers the creation of a new cassette instance until
the point at which it is installed by context manager or
decorator. The fact that a new cassette is used with each
application prevents the state of any cassette from interfering
with another.
Instances of this class are NOT reentrant as context managers.
However, functions that are decorated by
``CassetteContextDecorator`` instances ARE reentrant. See the
implementation of ``__call__`` on this class for more details.
There is also a guard against attempts to reenter instances of
this class as a context manager in ``__exit__``.
"""
_non_cassette_arguments = ("path_transformer", "func_path_generator")
@classmethod
def from_args(cls, cassette_class, **kwargs):
return cls(cassette_class, lambda: dict(kwargs))
def __init__(self, cls, args_getter):
self.cls = cls
self._args_getter = args_getter
self.__finish = None
def _patch_generator(self, cassette):
with contextlib.ExitStack() as exit_stack:
for patcher in CassettePatcherBuilder(cassette).build():
exit_stack.enter_context(patcher)
log_format = "{action} context for cassette at {path}."
log.debug(log_format.format(action="Entering", path=cassette._path))
yield cassette
log.debug(log_format.format(action="Exiting", path=cassette._path))
# TODO(@IvanMalison): Hmmm. it kind of feels like this should be
# somewhere else.
cassette._save()
def __enter__(self):
# This assertion is here to prevent the dangerous behavior
# that would result from forgetting about a __finish before
# completing it.
# How might this condition be met? Here is an example:
# context_decorator = Cassette.use('whatever')
# with context_decorator:
# with context_decorator:
# pass
assert self.__finish is None, "Cassette already open."
other_kwargs, cassette_kwargs = partition_dict(
lambda key, _: key in self._non_cassette_arguments, self._args_getter()
)
if other_kwargs.get("path_transformer"):
transformer = other_kwargs["path_transformer"]
cassette_kwargs["path"] = transformer(cassette_kwargs["path"])
self.__finish = self._patch_generator(self.cls.load(**cassette_kwargs))
return next(self.__finish)
def __exit__(self, *args):
next(self.__finish, None)
self.__finish = None
@wrapt.decorator
def __call__(self, function, instance, args, kwargs):
# This awkward cloning thing is done to ensure that decorated
# functions are reentrant. This is required for thread
# safety and the correct operation of recursive functions.
args_getter = self._build_args_getter_for_decorator(function)
return type(self)(self.cls, args_getter)._execute_function(function, args, kwargs)
def _execute_function(self, function, args, kwargs):
def handle_function(cassette):
if cassette.inject:
return function(cassette, *args, **kwargs)
else:
return function(*args, **kwargs)
if iscoroutinefunction(function):
return handle_coroutine(vcr=self, fn=handle_function)
if inspect.isgeneratorfunction(function):
return self._handle_generator(fn=handle_function)
return self._handle_function(fn=handle_function)
def _handle_generator(self, fn):
"""Wraps a generator so that we're inside the cassette context for the
duration of the generator.
"""
with self as cassette:
coroutine = fn(cassette)
# We don't need to catch StopIteration. The caller (Tornado's
# gen.coroutine, for example) will handle that.
to_yield = next(coroutine)
while True:
try:
to_send = yield to_yield
except Exception:
to_yield = coroutine.throw(*sys.exc_info())
else:
try:
to_yield = coroutine.send(to_send)
except StopIteration:
break
def _handle_function(self, fn):
with self as cassette:
return fn(cassette)
@staticmethod
def get_function_name(function):
return function.__name__
def _build_args_getter_for_decorator(self, function):
def new_args_getter():
kwargs = self._args_getter()
if "path" not in kwargs:
name_generator = kwargs.get("func_path_generator") or self.get_function_name
path = name_generator(function)
kwargs["path"] = path
return kwargs
return new_args_getter
class Cassette:
"""A container for recorded requests and responses"""
@classmethod
def load(cls, **kwargs):
"""Instantiate and load the cassette stored at the specified path."""
new_cassette = cls(**kwargs)
new_cassette._load()
return new_cassette
@classmethod
def use_arg_getter(cls, arg_getter):
return CassetteContextDecorator(cls, arg_getter)
@classmethod
def use(cls, **kwargs):
return CassetteContextDecorator.from_args(cls, **kwargs)
def __init__(
self,
path,
serializer=None,
persister=None,
record_mode=RecordMode.ONCE,
match_on=(uri, method),
before_record_request=None,
before_record_response=None,
custom_patches=(),
inject=False,
allow_playback_repeats=False,
):
self._persister = persister or FilesystemPersister
self._path = path
self._serializer = serializer or yamlserializer
self._match_on = match_on
self._before_record_request = before_record_request or (lambda x: x)
log.info(self._before_record_request)
self._before_record_response = before_record_response or (lambda x: x)
self.inject = inject
self.record_mode = record_mode
self.custom_patches = custom_patches
self.allow_playback_repeats = allow_playback_repeats
# self.data is the list of (req, resp) tuples
self.data = []
self.play_counts = collections.Counter()
self.dirty = False
self.rewound = False
@property
def play_count(self):
return sum(self.play_counts.values())
@property
def all_played(self):
"""Returns True if all responses have been played, False otherwise."""
return len(self.play_counts.values()) == len(self)
@property
def requests(self):
return [request for (request, response) in self.data]
@property
def responses(self):
return [response for (request, response) in self.data]
@property
def write_protected(self):
return self.rewound and self.record_mode == RecordMode.ONCE or self.record_mode == RecordMode.NONE
def append(self, request, response):
"""Add a request, response pair to this cassette"""
log.info("Appending request %s and response %s", request, response)
request = self._before_record_request(request)
if not request:
return
# Deepcopy is here because mutation of `response` will corrupt the
# real response.
response = copy.deepcopy(response)
response = self._before_record_response(response)
if response is None:
return
self.data.append((request, response))
self.dirty = True
def filter_request(self, request):
return self._before_record_request(request)
def _responses(self, request):
"""
internal API, returns an iterator with all responses matching
the request.
"""
request = self._before_record_request(request)
for index, (stored_request, response) in enumerate(self.data):
if requests_match(request, stored_request, self._match_on):
yield index, response
def can_play_response_for(self, request):
request = self._before_record_request(request)
return request and request in self and self.record_mode != RecordMode.ALL and self.rewound
def play_response(self, request):
"""
Get the response corresponding to a request, but only if it
hasn't been played back before, and mark it as played
"""
for index, response in self._responses(request):
if self.play_counts[index] == 0 or self.allow_playback_repeats:
self.play_counts[index] += 1
return response
# The cassette doesn't contain the request asked for.
raise UnhandledHTTPRequestError(
"The cassette (%r) doesn't contain the request (%r) asked for" % (self._path, request)
)
def responses_of(self, request):
"""
Find the responses corresponding to a request.
This function isn't actually used by VCR internally, but is
provided as an external API.
"""
responses = [response for index, response in self._responses(request)]
if responses:
return responses
# The cassette doesn't contain the request asked for.
raise UnhandledHTTPRequestError(
"The cassette (%r) doesn't contain the request (%r) asked for" % (self._path, request)
)
def rewind(self):
self.play_counts = collections.Counter()
def find_requests_with_most_matches(self, request):
"""
Get the most similar request(s) stored in the cassette
of a given request as a list of tuples like this:
- the request object
- the successful matchers as string
- the failed matchers and the related assertion message with the difference details as strings tuple
This is useful when a request failed to be found,
we can get the similar request(s) in order to know what have changed in the request parts.
"""
best_matches = []
request = self._before_record_request(request)
for index, (stored_request, response) in enumerate(self.data):
successes, fails = get_matchers_results(request, stored_request, self._match_on)
best_matches.append((len(successes), stored_request, successes, fails))
best_matches.sort(key=lambda t: t[0], reverse=True)
# Get the first best matches (multiple if equal matches)
final_best_matches = []
if not best_matches:
return final_best_matches
previous_nb_success = best_matches[0][0]
for best_match in best_matches:
nb_success = best_match[0]
# Do not keep matches that have 0 successes,
# it means that the request is totally different from
# the ones stored in the cassette
if nb_success < 1 or previous_nb_success != nb_success:
break
previous_nb_success = nb_success
final_best_matches.append(best_match[1:])
return final_best_matches
def _as_dict(self):
return {"requests": self.requests, "responses": self.responses}
def _save(self, force=False):
if force or self.dirty:
self._persister.save_cassette(self._path, self._as_dict(), serializer=self._serializer)
self.dirty = False
def _load(self):
try:
requests, responses = self._persister.load_cassette(self._path, serializer=self._serializer)
for request, response in zip(requests, responses):
self.append(request, response)
self.dirty = False
self.rewound = True
except ValueError:
pass
def __str__(self):
return "<Cassette containing {} recorded response(s)>".format(len(self))
def __len__(self):
"""Return the number of request,response pairs stored in here"""
return len(self.data)
def __contains__(self, request):
"""Return whether or not a request has been stored"""
for index, response in self._responses(request):
if self.play_counts[index] == 0 or self.allow_playback_repeats:
return True
return False
|
import logging
from oauthlib.oauth2 import AccessDeniedError, MissingTokenError
from ring_doorbell import Auth
import voluptuous as vol
from homeassistant import config_entries, const, core, exceptions
from . import DOMAIN # pylint: disable=unused-import
_LOGGER = logging.getLogger(__name__)
async def validate_input(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect."""
auth = Auth(f"HomeAssistant/{const.__version__}")
try:
token = await hass.async_add_executor_job(
auth.fetch_token,
data["username"],
data["password"],
data.get("2fa"),
)
except MissingTokenError as err:
raise Require2FA from err
except AccessDeniedError as err:
raise InvalidAuth from err
return token
class RingConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for Ring."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
user_pass = None
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
try:
token = await validate_input(self.hass, user_input)
await self.async_set_unique_id(user_input["username"])
return self.async_create_entry(
title=user_input["username"],
data={"username": user_input["username"], "token": token},
)
except Require2FA:
self.user_pass = user_input
return await self.async_step_2fa()
except InvalidAuth:
errors["base"] = "invalid_auth"
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected exception")
errors["base"] = "unknown"
return self.async_show_form(
step_id="user",
data_schema=vol.Schema({"username": str, "password": str}),
errors=errors,
)
async def async_step_2fa(self, user_input=None):
"""Handle 2fa step."""
if user_input:
return await self.async_step_user({**self.user_pass, **user_input})
return self.async_show_form(
step_id="2fa",
data_schema=vol.Schema({"2fa": str}),
)
class Require2FA(exceptions.HomeAssistantError):
"""Error to indicate we require 2FA."""
class InvalidAuth(exceptions.HomeAssistantError):
"""Error to indicate there is invalid auth."""
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import logging
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import publisher
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.windows_packages import ntttcp
# When adding new configs to ntttcp_config_list, increase this value
_NUM_PARAMS_IN_CONFIG = 3
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'ntttcp'
BENCHMARK_CONFIG = """
ntttcp:
description: Run ntttcp between two VMs.
vm_groups:
default:
vm_spec: *default_single_core
vm_count: 2
"""
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def Prepare(benchmark_spec):
"""Install ntttcp and open any ports we need."""
vms = benchmark_spec.vms
for vm in vms:
vm.Install('ntttcp')
vm.AllowPort(ntttcp.CONTROL_PORT)
# get the number of ports needed based on the flags
num_ports = max([c.threads for c in ntttcp.ParseConfigList()])
vm.AllowPort(ntttcp.BASE_DATA_PORT, ntttcp.BASE_DATA_PORT + num_ports)
def _RunTest(benchmark_spec, sender, receiver, dest_ip, ip_type, conf,
cooldown_s):
"""Run a single NTTTCP test, and publish the results."""
try:
results = ntttcp.RunNtttcp(sender, receiver, dest_ip, ip_type, conf.udp,
conf.threads, conf.time_s, conf.packet_size,
cooldown_s)
publisher.PublishRunStageSamples(benchmark_spec, results)
return True
except IOError:
logging.info('Failed to publish %s IP results for config %s', ip_type,
str(conf))
return False
def Run(benchmark_spec):
"""Measure TCP stream throughput between two VMs.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects with the benchmark results.
"""
vm_sets = [(benchmark_spec.vms[0], benchmark_spec.vms[1]),
(benchmark_spec.vms[1], benchmark_spec.vms[0])]
parsed_configs = ntttcp.ParseConfigList()
# Keep accounting of failed configs.
failed_confs = []
# Send traffic in both directions
for ((sender, receiver), conf) in itertools.product(vm_sets, parsed_configs):
# Send using external IP addresses
if vm_util.ShouldRunOnExternalIpAddress(conf.ip_type):
if not _RunTest(benchmark_spec, sender, receiver, receiver.ip_address,
'external', conf, True):
failed_confs.append(('external', conf))
# Send using internal IP addresses
if vm_util.ShouldRunOnInternalIpAddress(sender, receiver, conf.ip_type):
if not _RunTest(benchmark_spec, sender, receiver, receiver.internal_ip,
'internal', conf,
len(parsed_configs) > 1):
failed_confs.append(('internal', conf))
if failed_confs:
logging.info('Failed to run test and/or gather results for %s',
str(failed_confs))
return []
def Cleanup(unused_benchmark_spec):
pass
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import act
from six.moves import range
BENCHMARK_NAME = 'aerospike_certification_tool'
BENCHMARK_CONFIG = """
aerospike_certification_tool:
description: Runs aerospike certification tool.
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
vm_count: 1
disk_count: 0
"""
FLAGS = flags.FLAGS
flags.DEFINE_boolean(
'act_stop_on_complete', True,
'Stop the benchmark when completing current load. This can be useful '
'deciding maximum sustained load for stress tests.')
flags.DEFINE_boolean('act_dynamic_load', False,
'Dynamically adjust act test load. We start at initial '
'load from --act_load, if the underlying driver not '
'able to keep up, reduce the load and retry.')
ACT_DYNAMIC_LOAD_STEP = 0.9
def GetConfig(user_config):
config = configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
if FLAGS.data_disk_type == disk.LOCAL:
config['vm_groups']['default']['disk_count'] = (
config['vm_groups']['default']['disk_count'] or None)
else:
config['vm_groups']['default']['disk_count'] = (
config['vm_groups']['default']['disk_count'] or 1)
return config
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources are present.
Args:
benchmark_config: Unused.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
del benchmark_config
if FLAGS.act_dynamic_load and len(FLAGS.act_load) > 1:
raise errors.Config.InvalidValue(
'Attempting to apply dynamic load while setting multiple act_load '
'steps.')
def Prepare(benchmark_spec):
"""Prepares act benchmark."""
vm = benchmark_spec.vms[0]
vm.Install('act')
for d in vm.scratch_disks:
vm.RemoteCommand('sudo umount %s' % d.mount_point)
def PrepareActConfig(vm, load):
"""Prepare config file for act benchmark."""
if FLAGS.act_parallel:
for i in range(FLAGS.act_reserved_partitions, len(vm.scratch_disks)):
act.PrepActConfig(vm, float(load), i)
else:
act.PrepActConfig(vm, float(load))
def GenerateLoad():
"""Generate load for act test."""
if FLAGS.act_dynamic_load:
load = float(FLAGS.act_load[0])
while load:
yield load
load = math.floor(ACT_DYNAMIC_LOAD_STEP * load)
else:
for load in FLAGS.act_load:
yield load
def Run(benchmark_spec):
"""Runs act and reports the results."""
vm = benchmark_spec.vms[0]
act.RunActPrep(vm)
samples = []
run_samples = []
for load in GenerateLoad():
def _Run(act_load, index):
run_samples.extend(act.RunAct(vm, act_load, index))
PrepareActConfig(vm, load)
if FLAGS.act_parallel:
args = [((float(load), idx), {})
for idx in range(
FLAGS.act_reserved_partitions, len(vm.scratch_disks))]
vm_util.RunThreaded(_Run, args)
else:
run_samples.extend(act.RunAct(vm, float(load)))
samples.extend(run_samples)
if FLAGS.act_stop_on_complete and act.IsRunComplete(run_samples):
break
run_samples = []
return samples
def Cleanup(benchmark_spec):
del benchmark_spec
|
import os.path
import contextlib
from pylint import interfaces, checkers
class ModelineChecker(checkers.BaseChecker):
"""Check for vim modelines in files."""
__implements__ = interfaces.IRawChecker
name = 'modeline'
msgs = {'W9002': ('Does not have vim modeline', 'modeline-missing', None),
'W9003': ('Modeline is invalid', 'invalid-modeline', None),
'W9004': ('Modeline position is wrong', 'modeline-position', None)}
options = ()
priority = -1
def process_module(self, node):
"""Process the module."""
if os.path.basename(os.path.splitext(node.file)[0]) == '__init__':
return
max_lineno = 1
with contextlib.closing(node.stream()) as stream:
for (lineno, line) in enumerate(stream):
if lineno == 1 and line.startswith(b'#!'):
max_lineno += 1
continue
elif line.startswith(b'# vim:'):
if lineno > max_lineno:
self.add_message('modeline-position', line=lineno)
if (line.rstrip() != b'# vim: ft=python '
b'fileencoding=utf-8 sts=4 sw=4 et:'):
self.add_message('invalid-modeline', line=lineno)
break
else:
self.add_message('modeline-missing', line=1)
def register(linter):
"""Register the checker."""
linter.register_checker(ModelineChecker(linter))
|
import asyncio
from typing import Callable, List
from plumlightpad import Plum
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_HS_COLOR,
SUPPORT_BRIGHTNESS,
SUPPORT_COLOR,
LightEntity,
)
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from homeassistant.helpers.entity import Entity
import homeassistant.util.color as color_util
from .const import DOMAIN
async def async_setup_entry(
hass: HomeAssistant,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity]], None],
) -> None:
"""Set up Plum Lightpad dimmer lights and glow rings."""
plum: Plum = hass.data[DOMAIN][entry.entry_id]
def setup_entities(device) -> None:
entities = []
if "lpid" in device:
lightpad = plum.get_lightpad(device["lpid"])
entities.append(GlowRing(lightpad=lightpad))
if "llid" in device:
logical_load = plum.get_load(device["llid"])
entities.append(PlumLight(load=logical_load))
if entities:
async_add_entities(entities)
async def new_load(device):
setup_entities(device)
async def new_lightpad(device):
setup_entities(device)
device_web_session = async_get_clientsession(hass, verify_ssl=False)
asyncio.create_task(
plum.discover(
hass.loop,
loadListener=new_load,
lightpadListener=new_lightpad,
websession=device_web_session,
)
)
class PlumLight(LightEntity):
"""Representation of a Plum Lightpad dimmer."""
def __init__(self, load):
"""Initialize the light."""
self._load = load
self._brightness = load.level
async def async_added_to_hass(self):
"""Subscribe to dimmerchange events."""
self._load.add_event_listener("dimmerchange", self.dimmerchange)
def dimmerchange(self, event):
"""Change event handler updating the brightness."""
self._brightness = event["level"]
self.schedule_update_ha_state()
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Combine logical load ID with .light to guarantee it is unique."""
return f"{self._load.llid}.light"
@property
def name(self):
"""Return the name of the switch if any."""
return self._load.name
@property
def device_info(self):
"""Return the device info."""
return {
"name": self.name,
"identifiers": {(DOMAIN, self.unique_id)},
"model": "Dimmer",
"manufacturer": "Plum",
}
@property
def brightness(self) -> int:
"""Return the brightness of this switch between 0..255."""
return self._brightness
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._brightness > 0
@property
def supported_features(self):
"""Flag supported features."""
if self._load.dimmable:
return SUPPORT_BRIGHTNESS
return 0
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
await self._load.turn_on(kwargs[ATTR_BRIGHTNESS])
else:
await self._load.turn_on()
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
await self._load.turn_off()
class GlowRing(LightEntity):
"""Representation of a Plum Lightpad dimmer glow ring."""
def __init__(self, lightpad):
"""Initialize the light."""
self._lightpad = lightpad
self._name = f"{lightpad.friendly_name} Glow Ring"
self._state = lightpad.glow_enabled
self._glow_intensity = lightpad.glow_intensity
self._red = lightpad.glow_color["red"]
self._green = lightpad.glow_color["green"]
self._blue = lightpad.glow_color["blue"]
async def async_added_to_hass(self):
"""Subscribe to configchange events."""
self._lightpad.add_event_listener("configchange", self.configchange_event)
def configchange_event(self, event):
"""Handle Configuration change event."""
config = event["changes"]
self._state = config["glowEnabled"]
self._glow_intensity = config["glowIntensity"]
self._red = config["glowColor"]["red"]
self._green = config["glowColor"]["green"]
self._blue = config["glowColor"]["blue"]
self.schedule_update_ha_state()
@property
def hs_color(self):
"""Return the hue and saturation color value [float, float]."""
return color_util.color_RGB_to_hs(self._red, self._green, self._blue)
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def unique_id(self):
"""Combine LightPad ID with .glow to guarantee it is unique."""
return f"{self._lightpad.lpid}.glow"
@property
def name(self):
"""Return the name of the switch if any."""
return self._name
@property
def device_info(self):
"""Return the device info."""
return {
"name": self.name,
"identifiers": {(DOMAIN, self.unique_id)},
"model": "Glow Ring",
"manufacturer": "Plum",
}
@property
def brightness(self) -> int:
"""Return the brightness of this switch between 0..255."""
return min(max(int(round(self._glow_intensity * 255, 0)), 0), 255)
@property
def glow_intensity(self):
"""Brightness in float form."""
return self._glow_intensity
@property
def is_on(self) -> bool:
"""Return true if light is on."""
return self._state
@property
def icon(self):
"""Return the crop-portrait icon representing the glow ring."""
return "mdi:crop-portrait"
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_BRIGHTNESS | SUPPORT_COLOR
async def async_turn_on(self, **kwargs):
"""Turn the light on."""
if ATTR_BRIGHTNESS in kwargs:
brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0
await self._lightpad.set_config({"glowIntensity": brightness_pct})
elif ATTR_HS_COLOR in kwargs:
hs_color = kwargs[ATTR_HS_COLOR]
red, green, blue = color_util.color_hs_to_RGB(*hs_color)
await self._lightpad.set_glow_color(red, green, blue, 0)
else:
await self._lightpad.set_config({"glowEnabled": True})
async def async_turn_off(self, **kwargs):
"""Turn the light off."""
if ATTR_BRIGHTNESS in kwargs:
brightness_pct = kwargs[ATTR_BRIGHTNESS] / 255.0
await self._lightpad.set_config({"glowIntensity": brightness_pct})
else:
await self._lightpad.set_config({"glowEnabled": False})
|
from datetime import timedelta
from ovoenergy import OVODailyUsage
from ovoenergy.ovoenergy import OVOEnergy
from homeassistant.config_entries import ConfigEntry
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import OVOEnergyDeviceEntity
from .const import DATA_CLIENT, DATA_COORDINATOR, DOMAIN
SCAN_INTERVAL = timedelta(seconds=300)
PARALLEL_UPDATES = 4
async def async_setup_entry(
hass: HomeAssistantType, entry: ConfigEntry, async_add_entities
) -> None:
"""Set up OVO Energy sensor based on a config entry."""
coordinator: DataUpdateCoordinator = hass.data[DOMAIN][entry.entry_id][
DATA_COORDINATOR
]
client: OVOEnergy = hass.data[DOMAIN][entry.entry_id][DATA_CLIENT]
entities = []
if coordinator.data:
if coordinator.data.electricity:
entities.append(OVOEnergyLastElectricityReading(coordinator, client))
entities.append(
OVOEnergyLastElectricityCost(
coordinator,
client,
coordinator.data.electricity[
len(coordinator.data.electricity) - 1
].cost.currency_unit,
)
)
if coordinator.data.gas:
entities.append(OVOEnergyLastGasReading(coordinator, client))
entities.append(
OVOEnergyLastGasCost(
coordinator,
client,
coordinator.data.gas[
len(coordinator.data.gas) - 1
].cost.currency_unit,
)
)
async_add_entities(
entities,
True,
)
class OVOEnergySensor(OVOEnergyDeviceEntity):
"""Defines a OVO Energy sensor."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
client: OVOEnergy,
key: str,
name: str,
icon: str,
unit_of_measurement: str = "",
) -> None:
"""Initialize OVO Energy sensor."""
self._unit_of_measurement = unit_of_measurement
super().__init__(coordinator, client, key, name, icon)
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
class OVOEnergyLastElectricityReading(OVOEnergySensor):
"""Defines a OVO Energy last reading sensor."""
def __init__(self, coordinator: DataUpdateCoordinator, client: OVOEnergy):
"""Initialize OVO Energy sensor."""
super().__init__(
coordinator,
client,
f"{client.account_id}_last_electricity_reading",
"OVO Last Electricity Reading",
"mdi:flash",
"kWh",
)
@property
def state(self) -> str:
"""Return the state of the sensor."""
usage: OVODailyUsage = self.coordinator.data
if usage is None or not usage.electricity:
return None
return usage.electricity[-1].consumption
@property
def device_state_attributes(self) -> object:
"""Return the attributes of the sensor."""
usage: OVODailyUsage = self.coordinator.data
if usage is None or not usage.electricity:
return None
return {
"start_time": usage.electricity[-1].interval.start,
"end_time": usage.electricity[-1].interval.end,
}
class OVOEnergyLastGasReading(OVOEnergySensor):
"""Defines a OVO Energy last reading sensor."""
def __init__(self, coordinator: DataUpdateCoordinator, client: OVOEnergy):
"""Initialize OVO Energy sensor."""
super().__init__(
coordinator,
client,
f"{DOMAIN}_{client.account_id}_last_gas_reading",
"OVO Last Gas Reading",
"mdi:gas-cylinder",
"kWh",
)
@property
def state(self) -> str:
"""Return the state of the sensor."""
usage: OVODailyUsage = self.coordinator.data
if usage is None or not usage.gas:
return None
return usage.gas[-1].consumption
@property
def device_state_attributes(self) -> object:
"""Return the attributes of the sensor."""
usage: OVODailyUsage = self.coordinator.data
if usage is None or not usage.gas:
return None
return {
"start_time": usage.gas[-1].interval.start,
"end_time": usage.gas[-1].interval.end,
}
class OVOEnergyLastElectricityCost(OVOEnergySensor):
"""Defines a OVO Energy last cost sensor."""
def __init__(
self, coordinator: DataUpdateCoordinator, client: OVOEnergy, currency: str
):
"""Initialize OVO Energy sensor."""
super().__init__(
coordinator,
client,
f"{DOMAIN}_{client.account_id}_last_electricity_cost",
"OVO Last Electricity Cost",
"mdi:cash-multiple",
currency,
)
@property
def state(self) -> str:
"""Return the state of the sensor."""
usage: OVODailyUsage = self.coordinator.data
if usage is None or not usage.electricity:
return None
return usage.electricity[-1].cost.amount
@property
def device_state_attributes(self) -> object:
"""Return the attributes of the sensor."""
usage: OVODailyUsage = self.coordinator.data
if usage is None or not usage.electricity:
return None
return {
"start_time": usage.electricity[-1].interval.start,
"end_time": usage.electricity[-1].interval.end,
}
class OVOEnergyLastGasCost(OVOEnergySensor):
"""Defines a OVO Energy last cost sensor."""
def __init__(
self, coordinator: DataUpdateCoordinator, client: OVOEnergy, currency: str
):
"""Initialize OVO Energy sensor."""
super().__init__(
coordinator,
client,
f"{DOMAIN}_{client.account_id}_last_gas_cost",
"OVO Last Gas Cost",
"mdi:cash-multiple",
currency,
)
@property
def state(self) -> str:
"""Return the state of the sensor."""
usage: OVODailyUsage = self.coordinator.data
if usage is None or not usage.gas:
return None
return usage.gas[-1].cost.amount
@property
def device_state_attributes(self) -> object:
"""Return the attributes of the sensor."""
usage: OVODailyUsage = self.coordinator.data
if usage is None or not usage.gas:
return None
return {
"start_time": usage.gas[-1].interval.start,
"end_time": usage.gas[-1].interval.end,
}
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from absl import flags
import contextlib2
import mock
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import context
from perfkitbenchmarker import os_types
from perfkitbenchmarker import providers
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.linux_benchmarks import ping_benchmark
from tests import pkb_common_test_case
import six
from six.moves import zip_longest
FLAGS = flags.FLAGS
NAME = 'ping'
UID = 'name0'
CONFIG_WITH_BACKGROUND_CPU = """
ping:
description: Benchmarks ping latency over internal IP addresses
vm_groups:
vm_1:
vm_spec:
GCP:
machine_type: n1-standard-1
vm_2:
vm_spec:
GCP:
background_cpu_threads: 3
machine_type: n1-standard-1
"""
_GROUP_1 = 'vm_1'
_GROUP_2 = 'vm_2'
_MOCKED_VM_FUNCTIONS = 'Install', 'RemoteCommand'
class TestBackgroundWorkload(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
super(TestBackgroundWorkload, self).setUp()
FLAGS.run_uri = 'fake_run_uri'
FLAGS.cloud = providers.GCP
FLAGS.temp_dir = 'tmp'
self.addCleanup(context.SetThreadBenchmarkSpec, None)
def _CreateBenchmarkSpec(self, benchmark_config_yaml):
config = configs.LoadConfig(benchmark_config_yaml, {}, NAME)
config_spec = benchmark_config_spec.BenchmarkConfigSpec(
NAME, flag_values=FLAGS, **config)
return benchmark_spec.BenchmarkSpec(ping_benchmark, config_spec, UID)
def _CheckVmCallCounts(self, spec, working_groups, working_expected_counts,
non_working_groups, non_working_expected_counts):
# TODO(skschneider): This is also used in TestBackgroundNetworkWorkload.
# Consider moving to a shared function or base class.
expected_call_counts = {group: working_expected_counts
for group in working_groups}
expected_call_counts.update({group: non_working_expected_counts
for group in non_working_groups})
for group_name, vm_expected_call_counts in six.iteritems(
expected_call_counts):
group_vms = spec.vm_groups[group_name]
self.assertEqual(len(group_vms), 1,
msg='VM group "{0}" had {1} VMs'.format(group_name,
len(group_vms)))
vm = group_vms[0]
iter_mocked_functions = zip_longest(_MOCKED_VM_FUNCTIONS,
vm_expected_call_counts)
for function_name, expected_call_count in iter_mocked_functions:
call_count = getattr(vm, function_name).call_count
self.assertEqual(call_count, expected_call_count, msg=(
'Expected {0} from VM group "{1}" to be called {2} times, but it '
'was called {3} times.'.format(function_name, group_name,
expected_call_count, call_count)))
def _CheckVMFromSpec(self, spec, working_groups=(), non_working_groups=()):
with contextlib2.ExitStack() as stack:
for vm in spec.vms:
for function_name in _MOCKED_VM_FUNCTIONS:
stack.enter_context(mock.patch.object(vm, function_name))
working, non_working = working_groups, non_working_groups
self._CheckVmCallCounts(spec, working, (0, 0), non_working, (0, 0))
spec.Prepare()
self._CheckVmCallCounts(spec, working, (1, 0), non_working, (0, 0))
spec.StartBackgroundWorkload()
self._CheckVmCallCounts(spec, working, (1, 1), non_working, (0, 0))
spec.StopBackgroundWorkload()
self._CheckVmCallCounts(spec, working, (1, 2), non_working, (0, 0))
def testWindowsVMCausesError(self):
"""windows vm with background_cpu_threads raises exception."""
FLAGS['background_cpu_threads'].parse(1)
FLAGS['os_type'].parse(os_types.WINDOWS2019_CORE)
spec = self._CreateBenchmarkSpec(ping_benchmark.BENCHMARK_CONFIG)
spec.ConstructVirtualMachines()
with self.assertRaisesRegexp(Exception, 'NotImplementedError'):
spec.Prepare()
with self.assertRaisesRegexp(Exception, 'NotImplementedError'):
spec.StartBackgroundWorkload()
with self.assertRaisesRegexp(Exception, 'NotImplementedError'):
spec.StopBackgroundWorkload()
def testBackgroundWorkloadVM(self):
"""Check that the background_cpu_threads causes calls."""
FLAGS['background_cpu_threads'].parse(1)
spec = self._CreateBenchmarkSpec(ping_benchmark.BENCHMARK_CONFIG)
spec.ConstructVirtualMachines()
self._CheckVMFromSpec(spec, working_groups=(_GROUP_1, _GROUP_2))
def testBackgroundWorkloadVanillaConfig(self):
"""Test that nothing happens with the vanilla config."""
spec = self._CreateBenchmarkSpec(ping_benchmark.BENCHMARK_CONFIG)
spec.ConstructVirtualMachines()
for vm in spec.vms:
self.assertIsNone(vm.background_cpu_threads)
self.assertIsNone(vm.background_network_mbits_per_sec)
self._CheckVMFromSpec(spec, non_working_groups=(_GROUP_1, _GROUP_2))
def testBackgroundWorkloadWindows(self):
"""Test that nothing happens with the vanilla config."""
FLAGS['os_type'].parse(os_types.WINDOWS2019_CORE)
spec = self._CreateBenchmarkSpec(ping_benchmark.BENCHMARK_CONFIG)
spec.ConstructVirtualMachines()
for vm in spec.vms:
self.assertIsNone(vm.background_cpu_threads)
self.assertIsNone(vm.background_network_mbits_per_sec)
self._CheckVMFromSpec(spec, non_working_groups=(_GROUP_1, _GROUP_2))
def testBackgroundWorkloadVanillaConfigFlag(self):
"""Check that the background_cpu_threads flags overrides the config."""
FLAGS['background_cpu_threads'].parse(2)
spec = self._CreateBenchmarkSpec(ping_benchmark.BENCHMARK_CONFIG)
spec.ConstructVirtualMachines()
for vm in spec.vms:
self.assertEqual(vm.background_cpu_threads, 2)
self._CheckVMFromSpec(spec, working_groups=(_GROUP_1, _GROUP_2))
def testBackgroundWorkloadConfig(self):
"""Check that the config can be used to set background_cpu_threads."""
spec = self._CreateBenchmarkSpec(CONFIG_WITH_BACKGROUND_CPU)
spec.ConstructVirtualMachines()
for vm in spec.vm_groups[_GROUP_1]:
self.assertIsNone(vm.background_cpu_threads)
for vm in spec.vm_groups[_GROUP_2]:
self.assertEqual(vm.background_cpu_threads, 3)
self._CheckVMFromSpec(spec, working_groups=[_GROUP_2],
non_working_groups=[_GROUP_1])
if __name__ == '__main__':
unittest.main()
|
import shlex
import subprocess
import sys
import functools
from contextlib import contextmanager
from plumbum.commands.processes import run_proc, iter_lines
import plumbum.commands.modifiers
from plumbum.lib import six
from tempfile import TemporaryFile
from subprocess import PIPE, Popen
from types import MethodType
class RedirectionError(Exception):
"""Raised when an attempt is made to redirect an process' standard handle,
which was already redirected to/from a file"""
#===================================================================================================
# Utilities
#===================================================================================================
# modified from the stdlib pipes module for windows
_safechars = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789!@%_-+=:,./'
_funnychars = '"`$\\'
def shquote(text):
"""Quotes the given text with shell escaping (assumes as syntax similar to ``sh``)"""
text = six.str(text)
if sys.version_info >= (3, 3):
return shlex.quote(text)
else:
import pipes
return pipes.quote(text)
def shquote_list(seq):
return [shquote(item) for item in seq]
#===================================================================================================
# Commands
#===================================================================================================
class BaseCommand(object):
"""Base of all command objects"""
__slots__ = ("cwd", "env", "custom_encoding", "__weakref__")
def __str__(self):
return " ".join(self.formulate())
def __or__(self, other):
"""Creates a pipe with the other command"""
return Pipeline(self, other)
def __gt__(self, file):
"""Redirects the process' stdout to the given file"""
return StdoutRedirection(self, file)
def __rshift__(self, file):
"""Redirects the process' stdout to the given file (appending)"""
return AppendingStdoutRedirection(self, file)
def __ge__(self, file):
"""Redirects the process' stderr to the given file"""
return StderrRedirection(self, file)
def __lt__(self, file):
"""Redirects the given file into the process' stdin"""
return StdinRedirection(self, file)
def __lshift__(self, data):
"""Redirects the given data into the process' stdin"""
return StdinDataRedirection(self, data)
def __getitem__(self, args):
"""Creates a bound-command with the given arguments. Shortcut for
bound_command."""
if not isinstance(args, (tuple, list)):
args = [
args,
]
return self.bound_command(*args)
def bound_command(self, *args):
"""Creates a bound-command with the given arguments"""
if not args:
return self
if isinstance(self, BoundCommand):
return BoundCommand(self.cmd, self.args + list(args))
else:
return BoundCommand(self, args)
def __call__(self, *args, **kwargs):
"""A shortcut for `run(args)`, returning only the process' stdout"""
return self.run(args, **kwargs)[1]
def _get_encoding(self):
raise NotImplementedError()
def with_env(self, **envvars):
"""Returns a BoundEnvCommand with the given environment variables"""
if not envvars:
return self
return BoundEnvCommand(self, envvars)
setenv = with_env
@property
def machine(self):
raise NotImplementedError()
def formulate(self, level=0, args=()):
"""Formulates the command into a command-line, i.e., a list of shell-quoted strings
that can be executed by ``Popen`` or shells.
:param level: The nesting level of the formulation; it dictates how much shell-quoting
(if any) should be performed
:param args: The arguments passed to this command (a tuple)
:returns: A list of strings
"""
raise NotImplementedError()
def popen(self, args=(), **kwargs):
"""Spawns the given command, returning a ``Popen``-like object.
.. note::
When processes run in the **background** (either via ``popen`` or
:class:`& BG <plumbum.commands.BG>`), their stdout/stderr pipes might fill up,
causing them to hang. If you know a process produces output, be sure to consume it
every once in a while, using a monitoring thread/reactor in the background.
For more info, see `#48 <https://github.com/tomerfiliba/plumbum/issues/48>`_
:param args: Any arguments to be passed to the process (a tuple)
:param kwargs: Any keyword-arguments to be passed to the ``Popen`` constructor
:returns: A ``Popen``-like object
"""
raise NotImplementedError()
def nohup(self,
cwd='.',
stdout='nohup.out',
stderr=None,
append=True):
"""Runs a command detached."""
return self.machine.daemonic_popen(self, cwd, stdout, stderr, append)
@contextmanager
def bgrun(self, args=(), **kwargs):
"""Runs the given command as a context manager, allowing you to create a
`pipeline <http://en.wikipedia.org/wiki/Pipeline_(computing)>`_ (not in the UNIX sense)
of programs, parallelizing their work. In other words, instead of running programs
one after the other, you can start all of them at the same time and wait for them to
finish. For a more thorough review, see
`Lightweight Asynchronism <http://tomerfiliba.com/blog/Toying-with-Context-Managers/>`_.
Example::
from plumbum.cmd import mkfs
with mkfs["-t", "ext3", "/dev/sda1"] as p1:
with mkfs["-t", "ext3", "/dev/sdb1"] as p2:
pass
.. note::
When processes run in the **background** (either via ``popen`` or
:class:`& BG <plumbum.commands.BG>`), their stdout/stderr pipes might fill up,
causing them to hang. If you know a process produces output, be sure to consume it
every once in a while, using a monitoring thread/reactor in the background.
For more info, see `#48 <https://github.com/tomerfiliba/plumbum/issues/48>`_
For the arguments, see :func:`run <BaseCommand.run>`.
:returns: A Popen object, augmented with a ``.run()`` method, which returns a tuple of
(return code, stdout, stderr)
"""
retcode = kwargs.pop("retcode", 0)
timeout = kwargs.pop("timeout", None)
p = self.popen(args, **kwargs)
was_run = [False]
def runner():
if was_run[0]:
return # already done
was_run[0] = True
try:
return run_proc(p, retcode, timeout)
finally:
del p.run # to break cyclic reference p -> cell -> p
for f in [p.stdin, p.stdout, p.stderr]:
try:
f.close()
except Exception:
pass
p.run = runner
yield p
runner()
def run(self, args=(), **kwargs):
"""Runs the given command (equivalent to popen() followed by
:func:`run_proc <plumbum.commands.run_proc>`). If the exit code of the process does
not match the expected one, :class:`ProcessExecutionError
<plumbum.commands.ProcessExecutionError>` is raised.
:param args: Any arguments to be passed to the process (a tuple)
:param retcode: The expected return code of this process (defaults to 0).
In order to disable exit-code validation, pass ``None``. It may also
be a tuple (or any iterable) of expected exit codes.
.. note:: this argument must be passed as a keyword argument.
:param timeout: The maximal amount of time (in seconds) to allow the process to run.
``None`` means no timeout is imposed; otherwise, if the process hasn't
terminated after that many seconds, the process will be forcefully
terminated an exception will be raised
.. note:: this argument must be passed as a keyword argument.
:param kwargs: Any keyword-arguments to be passed to the ``Popen`` constructor
:returns: A tuple of (return code, stdout, stderr)
"""
with self.bgrun(args, **kwargs) as p:
return p.run()
def _use_modifier(self, modifier, args):
"""
Applies a modifier to the current object (e.g. FG, NOHUP)
:param modifier: The modifier class to apply (e.g. FG)
:param args: A dictionary of arguments to pass to this modifier
:return:
"""
modifier_instance = modifier(**args)
return self & modifier_instance
def run_bg(self, **kwargs):
"""
Run this command in the background. Uses all arguments from the BG construct
:py:class: `plumbum.commands.modifiers.BG`
"""
return self._use_modifier(plumbum.commands.modifiers.BG, kwargs)
def run_fg(self, **kwargs):
"""
Run this command in the foreground. Uses all arguments from the FG construct
:py:class: `plumbum.commands.modifiers.FG`
"""
return self._use_modifier(plumbum.commands.modifiers.FG, kwargs)
def run_tee(self, **kwargs):
"""
Run this command using the TEE construct. Inherits all arguments from TEE
:py:class: `plumbum.commands.modifiers.TEE`
"""
return self._use_modifier(plumbum.commands.modifiers.TEE, kwargs)
def run_tf(self, **kwargs):
"""
Run this command using the TF construct. Inherits all arguments from TF
:py:class: `plumbum.commands.modifiers.TF`
"""
return self._use_modifier(plumbum.commands.modifiers.TF, kwargs)
def run_retcode(self, **kwargs):
"""
Run this command using the RETCODE construct. Inherits all arguments from RETCODE
:py:class: `plumbum.commands.modifiers.RETCODE`
"""
return self._use_modifier(plumbum.commands.modifiers.RETCODE, kwargs)
def run_nohup(self, **kwargs):
"""
Run this command using the NOHUP construct. Inherits all arguments from NOHUP
:py:class: `plumbum.commands.modifiers.NOHUP`
"""
return self._use_modifier(plumbum.commands.modifiers.NOHUP, kwargs)
class BoundCommand(BaseCommand):
__slots__ = ("cmd", "args")
def __init__(self, cmd, args):
self.cmd = cmd
self.args = list(args)
def __repr__(self):
return "BoundCommand(%r, %r)" % (self.cmd, self.args)
def _get_encoding(self):
return self.cmd._get_encoding()
def formulate(self, level=0, args=()):
return self.cmd.formulate(level + 1, self.args + list(args))
@property
def machine(self):
return self.cmd.machine
def popen(self, args=(), **kwargs):
if isinstance(args, six.string_types):
args = [
args,
]
return self.cmd.popen(self.args + list(args), **kwargs)
class BoundEnvCommand(BaseCommand):
__slots__ = ("cmd", "envvars")
def __init__(self, cmd, envvars):
self.cmd = cmd
self.envvars = envvars
def __repr__(self):
return "BoundEnvCommand(%r, %r)" % (self.cmd, self.envvars)
def _get_encoding(self):
return self.cmd._get_encoding()
def formulate(self, level=0, args=()):
return self.cmd.formulate(level, args)
@property
def machine(self):
return self.cmd.machine
def popen(self, args=(), **kwargs):
with self.machine.env(**self.envvars):
return self.cmd.popen(args, **kwargs)
class Pipeline(BaseCommand):
__slots__ = ("srccmd", "dstcmd")
def __init__(self, srccmd, dstcmd):
self.srccmd = srccmd
self.dstcmd = dstcmd
def __repr__(self):
return "Pipeline(%r, %r)" % (self.srccmd, self.dstcmd)
def _get_encoding(self):
return self.srccmd._get_encoding() or self.dstcmd._get_encoding()
def formulate(self, level=0, args=()):
return self.srccmd.formulate(level + 1) + ["|"
] + self.dstcmd.formulate(
level + 1, args)
@property
def machine(self):
return self.srccmd.machine
def popen(self, args=(), **kwargs):
src_kwargs = kwargs.copy()
src_kwargs["stdout"] = PIPE
if "stdin" in kwargs:
src_kwargs["stdin"] = kwargs["stdin"]
srcproc = self.srccmd.popen(args, **src_kwargs)
kwargs["stdin"] = srcproc.stdout
dstproc = self.dstcmd.popen(**kwargs)
# allow p1 to receive a SIGPIPE if p2 exits
srcproc.stdout.close()
if srcproc.stderr is not None:
dstproc.stderr = srcproc.stderr
if srcproc.stdin and src_kwargs.get('stdin') != PIPE:
srcproc.stdin.close()
dstproc.srcproc = srcproc
# monkey-patch .wait() to wait on srcproc as well (it's expected to die when dstproc dies)
dstproc_wait = dstproc.wait
@functools.wraps(Popen.wait)
def wait2(*args, **kwargs):
rc_dst = dstproc_wait(*args, **kwargs)
rc_src = srcproc.wait(*args, **kwargs)
dstproc.returncode = rc_dst or rc_src
return dstproc.returncode
dstproc._proc.wait = wait2
dstproc_verify = dstproc.verify
def verify(proc, retcode, timeout, stdout, stderr):
#TODO: right now it's impossible to specify different expected
# return codes for different stages of the pipeline
try:
or_retcode = [0] + list(retcode)
except TypeError:
if (retcode is None):
or_retcode = None # no-retcode-verification acts "greedily"
else:
or_retcode = [0, retcode]
proc.srcproc.verify(or_retcode, timeout, stdout, stderr)
dstproc_verify(retcode, timeout, stdout, stderr)
dstproc.verify = MethodType(verify, dstproc)
dstproc.stdin = srcproc.stdin
return dstproc
class BaseRedirection(BaseCommand):
__slots__ = ("cmd", "file")
SYM = None # type: str
KWARG = None # type: str
MODE = None # type: str
def __init__(self, cmd, file):
self.cmd = cmd
self.file = file
def _get_encoding(self):
return self.cmd._get_encoding()
def __repr__(self):
return "%s(%r, %r)" % (self.__class__.__name__, self.cmd, self.file)
def formulate(self, level=0, args=()):
return self.cmd.formulate(level + 1, args) + [
self.SYM, shquote(getattr(self.file, "name", self.file))
]
@property
def machine(self):
return self.cmd.machine
def popen(self, args=(), **kwargs):
from plumbum.machines.local import LocalPath
from plumbum.machines.remote import RemotePath
if self.KWARG in kwargs and kwargs[self.KWARG] not in (PIPE, None):
raise RedirectionError("%s is already redirected" % (self.KWARG, ))
if isinstance(self.file, RemotePath):
raise TypeError("Cannot redirect to/from remote paths")
if isinstance(self.file, six.string_types + (LocalPath, )):
f = kwargs[self.KWARG] = open(str(self.file), self.MODE)
else:
kwargs[self.KWARG] = self.file
f = None
try:
return self.cmd.popen(args, **kwargs)
finally:
if f:
f.close()
class StdinRedirection(BaseRedirection):
__slots__ = ()
SYM = "<"
KWARG = "stdin"
MODE = "r"
class StdoutRedirection(BaseRedirection):
__slots__ = ()
SYM = ">"
KWARG = "stdout"
MODE = "w"
class AppendingStdoutRedirection(BaseRedirection):
__slots__ = ()
SYM = ">>"
KWARG = "stdout"
MODE = "a"
class StderrRedirection(BaseRedirection):
__slots__ = ()
SYM = "2>"
KWARG = "stderr"
MODE = "w"
class _ERROUT(int):
def __repr__(self):
return "ERROUT"
def __str__(self):
return "&1"
ERROUT = _ERROUT(subprocess.STDOUT)
class StdinDataRedirection(BaseCommand):
__slots__ = ("cmd", "data")
CHUNK_SIZE = 16000
def __init__(self, cmd, data):
self.cmd = cmd
self.data = data
def _get_encoding(self):
return self.cmd._get_encoding()
def formulate(self, level=0, args=()):
return [
"echo %s" % (shquote(self.data), ), "|",
self.cmd.formulate(level + 1, args)
]
@property
def machine(self):
return self.cmd.machine
def popen(self, args=(), **kwargs):
if "stdin" in kwargs and kwargs["stdin"] != PIPE:
raise RedirectionError("stdin is already redirected")
data = self.data
if isinstance(data,
six.unicode_type) and self._get_encoding() is not None:
data = data.encode(self._get_encoding())
f = TemporaryFile()
while data:
chunk = data[:self.CHUNK_SIZE]
f.write(chunk)
data = data[self.CHUNK_SIZE:]
f.seek(0)
# try:
return self.cmd.popen(args, stdin=f, **kwargs)
# finally:
# f.close()
class ConcreteCommand(BaseCommand):
QUOTE_LEVEL = None # type: int
__slots__ = ("executable", "custom_encoding")
def __init__(self, executable, encoding):
self.executable = executable
self.custom_encoding = encoding
self.cwd = None
self.env = None
def __str__(self):
return str(self.executable)
def __repr__(self):
return "{0}({1})".format(type(self).__name__, self.executable)
def _get_encoding(self):
return self.custom_encoding
def formulate(self, level=0, args=()):
argv = [six.str(self.executable)]
for a in args:
if a is None:
continue
if isinstance(a, BaseCommand):
if level >= self.QUOTE_LEVEL:
argv.extend(shquote_list(a.formulate(level + 1)))
else:
argv.extend(a.formulate(level + 1))
elif isinstance(a, (list, tuple)):
argv.extend(
shquote(b) if level >= self.QUOTE_LEVEL else six.str(b)
for b in a)
else:
argv.append(
shquote(a) if level >= self.QUOTE_LEVEL else six.str(a))
# if self.custom_encoding:
# argv = [a.encode(self.custom_encoding) for a in argv if isinstance(a, six.string_types)]
return argv
|
from typing import Tuple
import numpy as np
from tensornetwork import network_components, CopyNode, Node
from tensornetwork.contractors import bucket_contractor
from tensornetwork.contractors import greedy
bucket = bucket_contractor.bucket
def add_cnot(
q0: network_components.Edge,
q1: network_components.Edge,
backend: str = "numpy"
) -> Tuple[network_components.CopyNode, network_components.Edge,
network_components.Edge]:
"""Adds the CNOT quantum gate to tensor network.
CNOT consists of two rank-3 tensors: a COPY tensor on the control qubit and
a XOR tensor on the target qubit.
Args:
q0: Input edge for the control qubit.
q1: Input edge for the target qubit.
backend: backend to use
Returns:
Tuple with three elements:
- copy tensor corresponding to the control qubit
- output edge for the control qubit and
- output edge for the target qubit.
"""
control = CopyNode(rank=3, dimension=2, backend=backend)
xor = np.array([[[1, 0], [0, 1]], [[0, 1], [1, 0]]], dtype=np.float64)
target = Node(xor, backend=backend)
network_components.connect(q0, control[0])
network_components.connect(q1, target[0])
network_components.connect(control[1], target[1])
return (control, control[2], target[2])
def test_cnot_gate():
# Prepare input state: |11>
q0_in = Node(np.array([0, 1], dtype=np.float64))
q1_in = Node(np.array([0, 1], dtype=np.float64))
# Prepare output state: |10>
q0_out = Node(np.array([0, 1], dtype=np.float64))
q1_out = Node(np.array([1, 0], dtype=np.float64))
# Build quantum circuit
copy_node, q0_t1, q1_t1 = add_cnot(q0_in[0], q1_in[0])
network_components.connect(q0_t1, q0_out[0])
network_components.connect(q1_t1, q1_out[0])
# Contract the network, first using Bucket Elimination, then once
# no more copy tensors are left to exploit, fall back to the naive
# contractor.
contraction_order = (copy_node,)
net = bucket([q0_in, q1_in, q0_out, q1_out, copy_node], contraction_order)
result = greedy(net)
# Verify that CNOT has turned |11> into |10>.
np.testing.assert_allclose(result.get_tensor(), 1.0)
def test_swap_gate():
# Prepare input state: 0.6|00> + 0.8|10>
q0_in = Node(np.array([0.6, 0.8], dtype=np.float64), backend="jax")
q1_in = Node(np.array([1, 0], dtype=np.float64), backend="jax")
# Prepare output state: 0.6|00> + 0.8|01>
q0_out = Node(np.array([1, 0], dtype=np.float64), backend="jax")
q1_out = Node(np.array([0.6, 0.8], dtype=np.float64), backend="jax")
# Build quantum circuit: three CNOTs implement a SWAP
copy_node_1, q0_t1, q1_t1 = add_cnot(q0_in[0], q1_in[0], backend="jax")
copy_node_2, q1_t2, q0_t2 = add_cnot(q1_t1, q0_t1, backend="jax")
copy_node_3, q0_t3, q1_t3 = add_cnot(q0_t2, q1_t2, backend="jax")
network_components.connect(q0_t3, q0_out[0])
network_components.connect(q1_t3, q1_out[0])
# Contract the network, first Bucket Elimination, then greedy to complete.
contraction_order = (copy_node_1, copy_node_2, copy_node_3)
nodes = [q0_in, q0_out, q1_in, q1_out, copy_node_1, copy_node_2, copy_node_3]
net = bucket(nodes, contraction_order)
result = greedy(net)
# Verify that SWAP has turned |10> into |01> and kept |00> unchanged.
np.testing.assert_allclose(result.get_tensor(), 1.0)
|
import logging
from aiohttp import web
import voluptuous as vol
from homeassistant.const import CONF_WEBHOOK_ID
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_entry_flow, intent, template
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
SOURCE = "Home Assistant Dialogflow"
CONFIG_SCHEMA = vol.Schema({DOMAIN: {}}, extra=vol.ALLOW_EXTRA)
V1 = 1
V2 = 2
class DialogFlowError(HomeAssistantError):
"""Raised when a DialogFlow error happens."""
async def async_setup(hass, config):
"""Set up the Dialogflow component."""
return True
async def handle_webhook(hass, webhook_id, request):
"""Handle incoming webhook with Dialogflow requests."""
message = await request.json()
_LOGGER.debug("Received Dialogflow request: %s", message)
try:
response = await async_handle_message(hass, message)
return b"" if response is None else web.json_response(response)
except DialogFlowError as err:
_LOGGER.warning(str(err))
return web.json_response(dialogflow_error_response(message, str(err)))
except intent.UnknownIntent as err:
_LOGGER.warning(str(err))
return web.json_response(
dialogflow_error_response(
message, "This intent is not yet configured within Home Assistant."
)
)
except intent.InvalidSlotInfo as err:
_LOGGER.warning(str(err))
return web.json_response(
dialogflow_error_response(
message, "Invalid slot information received for this intent."
)
)
except intent.IntentError as err:
_LOGGER.warning(str(err))
return web.json_response(
dialogflow_error_response(message, "Error handling intent.")
)
async def async_setup_entry(hass, entry):
"""Configure based on config entry."""
hass.components.webhook.async_register(
DOMAIN, "DialogFlow", entry.data[CONF_WEBHOOK_ID], handle_webhook
)
return True
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
hass.components.webhook.async_unregister(entry.data[CONF_WEBHOOK_ID])
return True
async_remove_entry = config_entry_flow.webhook_async_remove_entry
def dialogflow_error_response(message, error):
"""Return a response saying the error message."""
api_version = get_api_version(message)
if api_version is V1:
parameters = message["result"]["parameters"]
elif api_version is V2:
parameters = message["queryResult"]["parameters"]
dialogflow_response = DialogflowResponse(parameters, api_version)
dialogflow_response.add_speech(error)
return dialogflow_response.as_dict()
def get_api_version(message):
"""Get API version of Dialogflow message."""
if message.get("id") is not None:
return V1
if message.get("responseId") is not None:
return V2
async def async_handle_message(hass, message):
"""Handle a DialogFlow message."""
_api_version = get_api_version(message)
if _api_version is V1:
_LOGGER.warning(
"Dialogflow V1 API will be removed on October 23, 2019. Please change your DialogFlow settings to use the V2 api"
)
req = message.get("result")
action_incomplete = req.get("actionIncomplete", True)
if action_incomplete:
return
elif _api_version is V2:
req = message.get("queryResult")
if req.get("allRequiredParamsPresent", False) is False:
return
action = req.get("action", "")
parameters = req.get("parameters").copy()
parameters["dialogflow_query"] = message
dialogflow_response = DialogflowResponse(parameters, _api_version)
if action == "":
raise DialogFlowError(
"You have not defined an action in your Dialogflow intent."
)
intent_response = await intent.async_handle(
hass,
DOMAIN,
action,
{key: {"value": value} for key, value in parameters.items()},
)
if "plain" in intent_response.speech:
dialogflow_response.add_speech(intent_response.speech["plain"]["speech"])
return dialogflow_response.as_dict()
class DialogflowResponse:
"""Help generating the response for Dialogflow."""
def __init__(self, parameters, api_version):
"""Initialize the Dialogflow response."""
self.speech = None
self.parameters = {}
self.api_version = api_version
# Parameter names replace '.' and '-' for '_'
for key, value in parameters.items():
underscored_key = key.replace(".", "_").replace("-", "_")
self.parameters[underscored_key] = value
def add_speech(self, text):
"""Add speech to the response."""
assert self.speech is None
if isinstance(text, template.Template):
text = text.async_render(self.parameters, parse_result=False)
self.speech = text
def as_dict(self):
"""Return response in a Dialogflow valid dictionary."""
if self.api_version is V1:
return {"speech": self.speech, "displayText": self.speech, "source": SOURCE}
if self.api_version is V2:
return {"fulfillmentText": self.speech, "source": SOURCE}
|
import os
import json
_subcmd_cfgfile = os.path.join(os.environ['STASH_ROOT'], '.completer_subcmd.json')
_subcmd_cfg = {
"git":
{
"1":
{
'candidates':
[
'branch',
'checkout',
'clone',
'commit',
'help',
'log',
'modified',
'pull',
'push',
'remote',
'reset',
'rm',
'status',
'add',
'diff',
'merge',
'init',
'fetch'
],
'blank_completion': True,
'with_normal_completion': False,
},
'-':
{
'blank_completion': False,
'with_normal_completion': False,
'candidate_groups': [['log',
['-l',
'--length',
'-f',
'--format',
'-o',
'--output']]],
}
},
"ls":
{
'-':
{
'blank_completion': False,
'with_normal_completion': False,
'candidate_groups': [[None,
['-1',
'--one-line',
'-a',
'--all',
'-l',
'--long']],
]
}
},
"pip":
{
"1":
{
'candidates': ['install',
'list',
'download',
'search',
'update',
'versions',
'uninstall'],
'blank_completion': True,
'with_normal_completion': False,
},
},
"gci":
{
"1":
{
'candidates': [
'enable',
'disable',
'status',
'collect',
'threshold',
'debug',
'break',
],
'blank_completion': True,
'with_normal_completion': False,
},
},
"stashconf":
{
"1":
{
'candidates':
[
'input_encoding_utf8',
'ipython_style_history_search',
'py_pdb',
'py_traceback',
"enable_styles",
"colored_errors",
"enable_styles",
],
'blank_completion': True,
'with_normal_completion': False,
},
"-":
{
'candidate_groups': [[None,
['-l',
'--list',
'-h',
'--help']],
],
'blank_completion': False,
'with_normal_completion': False,
},
},
"webviewer":
{
"-":
{
'candidate_groups': [[None,
['-i',
'--insecure',
'-h',
'--help']],
],
'blank_completion': True,
'with_normal_completion': False,
},
},
"monkeylord":
{
"1": {
"candidates": ["list",
"enable",
"disable"],
"blank_completion": True,
"with_normal_completion": False,
},
"-":
{
"candidate_groups": [[None,
["-h",
"--help"]],
],
"blank_completion": True,
"with_normal_completion": False,
},
},
"mount":
{
"-":
{
"candidate_groups":
[
[
None,
[
"-h",
"--help",
"-l",
"--show-labels",
"-v",
"--verbose",
"-y",
"--yes",
"-f",
"--fake",
"-r",
"--read-only",
"-t",
"--type",
]
]
],
"blank_completion": True,
"with_normal_completion": False,
},
},
"umount":
{
"-":
{
"candidate_groups": [[None,
[
"-h",
"--help",
"-a",
"--all",
"-v",
"--verbose",
"-f",
"--force",
]]],
"with_normal_completion": False,
"blank_completion": True,
},
},
}
if os.path.exists(_subcmd_cfgfile) and os.path.isfile(_subcmd_cfgfile):
try:
with open(_subcmd_cfgfile) as ins:
_subcmd_cfg.update(json.loads(ins.read()))
except IOError:
pass
def _select_from_candidates(candidates, tok):
return [cand for cand in candidates if cand.startswith(tok)]
def _select_from_candidate_groups(candidate_groups, tok, after=None):
for cg in candidate_groups:
if cg[0] == after:
return _select_from_candidates(cg[1], tok)
return None
def subcmd_complete(toks):
# Only one token, this is still command, not sub-command yet
if len(toks) == 1:
return None, None
word_to_complete = toks[-1]
is_blank_completion = word_to_complete == ''
cmd_word = toks[0]
if cmd_word.endswith('.py'):
cmd_word = cmd_word[:-3]
pos = str(len(toks) - 1)
try:
cfg = _subcmd_cfg[cmd_word]
if pos in cfg.keys() \
and (not is_blank_completion
or (is_blank_completion and cfg[pos]['blank_completion'])):
cands = _select_from_candidates(cfg[pos]['candidates'], '' if is_blank_completion else word_to_complete)
return cands, cfg[pos]['with_normal_completion']
elif '-' in cfg.keys() \
and ((not is_blank_completion and word_to_complete.startswith('-'))
or (is_blank_completion and cfg['-']['blank_completion'])):
subcmd = None
for t in toks[-1:0:-1]:
if not t.startswith('-'):
subcmd = t
break
cands = _select_from_candidate_groups(
cfg['-']['candidate_groups'],
'' if is_blank_completion else word_to_complete,
subcmd
)
if cands is not None:
return cands, cfg['-']['with_normal_completion']
except KeyError as e:
pass
return None, None
|
import logging
from pygti.auth import GTI_DEFAULT_HOST
from pygti.exceptions import CannotConnect, InvalidAuth
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_OFFSET, CONF_PASSWORD, CONF_USERNAME
from homeassistant.core import callback
from homeassistant.helpers import aiohttp_client
import homeassistant.helpers.config_validation as cv
from .const import ( # pylint:disable=unused-import
CONF_FILTER,
CONF_REAL_TIME,
CONF_STATION,
DOMAIN,
)
from .hub import GTIHub
_LOGGER = logging.getLogger(__name__)
SCHEMA_STEP_USER = vol.Schema(
{
vol.Required(CONF_HOST, default=GTI_DEFAULT_HOST): str,
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
}
)
SCHEMA_STEP_STATION = vol.Schema({vol.Required(CONF_STATION): str})
SCHEMA_STEP_OPTIONS = vol.Schema(
{
vol.Required(CONF_FILTER): vol.In([]),
vol.Required(CONF_OFFSET, default=0): cv.positive_int,
vol.Optional(CONF_REAL_TIME, default=True): bool,
}
)
class ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for HVV."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize component."""
self.hub = None
self.data = None
self.stations = {}
async def async_step_user(self, user_input=None):
"""Handle the initial step."""
errors = {}
if user_input is not None:
session = aiohttp_client.async_get_clientsession(self.hass)
self.hub = GTIHub(
user_input[CONF_HOST],
user_input[CONF_USERNAME],
user_input[CONF_PASSWORD],
session,
)
try:
response = await self.hub.authenticate()
_LOGGER.debug("Init gti: %r", response)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
self.data = user_input
return await self.async_step_station()
return self.async_show_form(
step_id="user", data_schema=SCHEMA_STEP_USER, errors=errors
)
async def async_step_station(self, user_input=None):
"""Handle the step where the user inputs his/her station."""
if user_input is not None:
errors = {}
check_name = await self.hub.gti.checkName(
{"theName": {"name": user_input[CONF_STATION]}, "maxList": 20}
)
stations = check_name.get("results")
self.stations = {
f"{station.get('name')}": station
for station in stations
if station.get("type") == "STATION"
}
if not self.stations:
errors["base"] = "no_results"
return self.async_show_form(
step_id="station", data_schema=SCHEMA_STEP_STATION, errors=errors
)
# schema
return await self.async_step_station_select()
return self.async_show_form(step_id="station", data_schema=SCHEMA_STEP_STATION)
async def async_step_station_select(self, user_input=None):
"""Handle the step where the user inputs his/her station."""
schema = vol.Schema({vol.Required(CONF_STATION): vol.In(list(self.stations))})
if user_input is None:
return self.async_show_form(step_id="station_select", data_schema=schema)
self.data.update({"station": self.stations[user_input[CONF_STATION]]})
title = self.data[CONF_STATION]["name"]
return self.async_create_entry(title=title, data=self.data)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get options flow."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Options flow handler."""
def __init__(self, config_entry):
"""Initialize HVV Departures options flow."""
self.config_entry = config_entry
self.options = dict(config_entry.options)
self.departure_filters = {}
self.hub = None
async def async_step_init(self, user_input=None):
"""Manage the options."""
errors = {}
if not self.departure_filters:
departure_list = {}
self.hub = self.hass.data[DOMAIN][self.config_entry.entry_id]
try:
departure_list = await self.hub.gti.departureList(
{
"station": self.config_entry.data[CONF_STATION],
"time": {"date": "heute", "time": "jetzt"},
"maxList": 5,
"maxTimeOffset": 200,
"useRealtime": True,
"returnFilters": True,
}
)
except CannotConnect:
errors["base"] = "cannot_connect"
except InvalidAuth:
errors["base"] = "invalid_auth"
if not errors:
self.departure_filters = {
str(i): departure_filter
for i, departure_filter in enumerate(departure_list.get("filter"))
}
if user_input is not None and not errors:
options = {
CONF_FILTER: [
self.departure_filters[x] for x in user_input[CONF_FILTER]
],
CONF_OFFSET: user_input[CONF_OFFSET],
CONF_REAL_TIME: user_input[CONF_REAL_TIME],
}
return self.async_create_entry(title="", data=options)
if CONF_FILTER in self.config_entry.options:
old_filter = [
i
for (i, f) in self.departure_filters.items()
if f in self.config_entry.options.get(CONF_FILTER)
]
else:
old_filter = []
return self.async_show_form(
step_id="init",
data_schema=vol.Schema(
{
vol.Optional(CONF_FILTER, default=old_filter): cv.multi_select(
{
key: f"{departure_filter['serviceName']}, {departure_filter['label']}"
for key, departure_filter in self.departure_filters.items()
}
),
vol.Required(
CONF_OFFSET,
default=self.config_entry.options.get(CONF_OFFSET, 0),
): cv.positive_int,
vol.Optional(
CONF_REAL_TIME,
default=self.config_entry.options.get(CONF_REAL_TIME, True),
): bool,
}
),
errors=errors,
)
|
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
from tests.components.homekit_controller.common import setup_test_component
V1_ON = ("fan", "on")
V1_ROTATION_DIRECTION = ("fan", "rotation.direction")
V1_ROTATION_SPEED = ("fan", "rotation.speed")
V2_ACTIVE = ("fanv2", "active")
V2_ROTATION_DIRECTION = ("fanv2", "rotation.direction")
V2_ROTATION_SPEED = ("fanv2", "rotation.speed")
V2_SWING_MODE = ("fanv2", "swing-mode")
def create_fan_service(accessory):
"""
Define fan v1 characteristics as per HAP spec.
This service is no longer documented in R2 of the public HAP spec but existing
devices out there use it (like the SIMPLEconnect fan)
"""
service = accessory.add_service(ServicesTypes.FAN)
cur_state = service.add_char(CharacteristicsTypes.ON)
cur_state.value = 0
direction = service.add_char(CharacteristicsTypes.ROTATION_DIRECTION)
direction.value = 0
speed = service.add_char(CharacteristicsTypes.ROTATION_SPEED)
speed.value = 0
def create_fanv2_service(accessory):
"""Define fan v2 characteristics as per HAP spec."""
service = accessory.add_service(ServicesTypes.FAN_V2)
cur_state = service.add_char(CharacteristicsTypes.ACTIVE)
cur_state.value = 0
direction = service.add_char(CharacteristicsTypes.ROTATION_DIRECTION)
direction.value = 0
speed = service.add_char(CharacteristicsTypes.ROTATION_SPEED)
speed.value = 0
swing_mode = service.add_char(CharacteristicsTypes.SWING_MODE)
swing_mode.value = 0
async def test_fan_read_state(hass, utcnow):
"""Test that we can read the state of a HomeKit fan accessory."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ON].value = False
state = await helper.poll_and_get_state()
assert state.state == "off"
helper.characteristics[V1_ON].value = True
state = await helper.poll_and_get_state()
assert state.state == "on"
async def test_turn_on(hass, utcnow):
"""Test that we can turn a fan on."""
helper = await setup_test_component(hass, create_fan_service)
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "high"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 1
assert helper.characteristics[V1_ROTATION_SPEED].value == 100
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "medium"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 1
assert helper.characteristics[V1_ROTATION_SPEED].value == 50
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "low"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 1
assert helper.characteristics[V1_ROTATION_SPEED].value == 25
async def test_turn_off(hass, utcnow):
"""Test that we can turn a fan off."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ON].value = 1
await hass.services.async_call(
"fan",
"turn_off",
{"entity_id": "fan.testdevice"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 0
async def test_set_speed(hass, utcnow):
"""Test that we set fan speed."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ON].value = 1
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "high"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_SPEED].value == 100
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "medium"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_SPEED].value == 50
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "low"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_SPEED].value == 25
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "off"},
blocking=True,
)
assert helper.characteristics[V1_ON].value == 0
async def test_speed_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ON].value = 1
helper.characteristics[V1_ROTATION_SPEED].value = 100
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "high"
helper.characteristics[V1_ROTATION_SPEED].value = 50
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "medium"
helper.characteristics[V1_ROTATION_SPEED].value = 25
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "low"
helper.characteristics[V1_ON].value = 0
helper.characteristics[V1_ROTATION_SPEED].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "off"
async def test_set_direction(hass, utcnow):
"""Test that we can set fan spin direction."""
helper = await setup_test_component(hass, create_fan_service)
await hass.services.async_call(
"fan",
"set_direction",
{"entity_id": "fan.testdevice", "direction": "reverse"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_DIRECTION].value == 1
await hass.services.async_call(
"fan",
"set_direction",
{"entity_id": "fan.testdevice", "direction": "forward"},
blocking=True,
)
assert helper.characteristics[V1_ROTATION_DIRECTION].value == 0
async def test_direction_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fan_service)
helper.characteristics[V1_ROTATION_DIRECTION].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["direction"] == "forward"
helper.characteristics[V1_ROTATION_DIRECTION].value = 1
state = await helper.poll_and_get_state()
assert state.attributes["direction"] == "reverse"
async def test_fanv2_read_state(hass, utcnow):
"""Test that we can read the state of a HomeKit fan accessory."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ACTIVE].value = False
state = await helper.poll_and_get_state()
assert state.state == "off"
helper.characteristics[V2_ACTIVE].value = True
state = await helper.poll_and_get_state()
assert state.state == "on"
async def test_v2_turn_on(hass, utcnow):
"""Test that we can turn a fan on."""
helper = await setup_test_component(hass, create_fanv2_service)
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "high"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 1
assert helper.characteristics[V2_ROTATION_SPEED].value == 100
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "medium"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 1
assert helper.characteristics[V2_ROTATION_SPEED].value == 50
await hass.services.async_call(
"fan",
"turn_on",
{"entity_id": "fan.testdevice", "speed": "low"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 1
assert helper.characteristics[V2_ROTATION_SPEED].value == 25
async def test_v2_turn_off(hass, utcnow):
"""Test that we can turn a fan off."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ACTIVE].value = 1
await hass.services.async_call(
"fan",
"turn_off",
{"entity_id": "fan.testdevice"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 0
async def test_v2_set_speed(hass, utcnow):
"""Test that we set fan speed."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ACTIVE].value = 1
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "high"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_SPEED].value == 100
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "medium"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_SPEED].value == 50
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "low"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_SPEED].value == 25
await hass.services.async_call(
"fan",
"set_speed",
{"entity_id": "fan.testdevice", "speed": "off"},
blocking=True,
)
assert helper.characteristics[V2_ACTIVE].value == 0
async def test_v2_speed_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ACTIVE].value = 1
helper.characteristics[V2_ROTATION_SPEED].value = 100
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "high"
helper.characteristics[V2_ROTATION_SPEED].value = 50
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "medium"
helper.characteristics[V2_ROTATION_SPEED].value = 25
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "low"
helper.characteristics[V2_ACTIVE].value = 0
helper.characteristics[V2_ROTATION_SPEED].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["speed"] == "off"
async def test_v2_set_direction(hass, utcnow):
"""Test that we can set fan spin direction."""
helper = await setup_test_component(hass, create_fanv2_service)
await hass.services.async_call(
"fan",
"set_direction",
{"entity_id": "fan.testdevice", "direction": "reverse"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_DIRECTION].value == 1
await hass.services.async_call(
"fan",
"set_direction",
{"entity_id": "fan.testdevice", "direction": "forward"},
blocking=True,
)
assert helper.characteristics[V2_ROTATION_DIRECTION].value == 0
async def test_v2_direction_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_ROTATION_DIRECTION].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["direction"] == "forward"
helper.characteristics[V2_ROTATION_DIRECTION].value = 1
state = await helper.poll_and_get_state()
assert state.attributes["direction"] == "reverse"
async def test_v2_oscillate(hass, utcnow):
"""Test that we can control a fans oscillation."""
helper = await setup_test_component(hass, create_fanv2_service)
await hass.services.async_call(
"fan",
"oscillate",
{"entity_id": "fan.testdevice", "oscillating": True},
blocking=True,
)
assert helper.characteristics[V2_SWING_MODE].value == 1
await hass.services.async_call(
"fan",
"oscillate",
{"entity_id": "fan.testdevice", "oscillating": False},
blocking=True,
)
assert helper.characteristics[V2_SWING_MODE].value == 0
async def test_v2_oscillate_read(hass, utcnow):
"""Test that we can read a fans oscillation."""
helper = await setup_test_component(hass, create_fanv2_service)
helper.characteristics[V2_SWING_MODE].value = 0
state = await helper.poll_and_get_state()
assert state.attributes["oscillating"] is False
helper.characteristics[V2_SWING_MODE].value = 1
state = await helper.poll_and_get_state()
assert state.attributes["oscillating"] is True
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from six.moves import urllib
import tensorflow as tf
from datasets import dataset_utils
slim = tf.contrib.slim
# TODO(nsilberman): Add tfrecord file type once the script is updated.
_FILE_PATTERN = '%s-*'
_SPLITS_TO_SIZES = {
'train': 1281167,
'validation': 50000,
}
_ITEMS_TO_DESCRIPTIONS = {
'image': 'A color image of varying height and width.',
'label': 'The label id of the image, integer between 0 and 999',
'label_text': 'The text of the label.',
'object/bbox': 'A list of bounding boxes.',
'object/label': 'A list of labels, one per each object.',
}
_NUM_CLASSES = 1001
def create_readable_names_for_imagenet_labels():
"""Create a dict mapping label id to human readable string.
Returns:
labels_to_names: dictionary where keys are integers from to 1000
and values are human-readable names.
We retrieve a synset file, which contains a list of valid synset labels used
by ILSVRC competition. There is one synset one per line, eg.
# n01440764
# n01443537
We also retrieve a synset_to_human_file, which contains a mapping from synsets
to human-readable names for every synset in Imagenet. These are stored in a
tsv format, as follows:
# n02119247 black fox
# n02119359 silver fox
We assign each synset (in alphabetical order) an integer, starting from 1
(since 0 is reserved for the background class).
Code is based on
https://github.com/tensorflow/models/blob/master/inception/inception/data/build_imagenet_data.py#L463
"""
# pylint: disable=g-line-too-long
base_url = 'https://raw.githubusercontent.com/tensorflow/models/master/inception/inception/data/'
synset_url = '{}/imagenet_lsvrc_2015_synsets.txt'.format(base_url)
synset_to_human_url = '{}/imagenet_metadata.txt'.format(base_url)
filename, _ = urllib.request.urlretrieve(synset_url)
synset_list = [s.strip() for s in open(filename).readlines()]
num_synsets_in_ilsvrc = len(synset_list)
assert num_synsets_in_ilsvrc == 1000
filename, _ = urllib.request.urlretrieve(synset_to_human_url)
synset_to_human_list = open(filename).readlines()
num_synsets_in_all_imagenet = len(synset_to_human_list)
assert num_synsets_in_all_imagenet == 21842
synset_to_human = {}
for s in synset_to_human_list:
parts = s.strip().split('\t')
assert len(parts) == 2
synset = parts[0]
human = parts[1]
synset_to_human[synset] = human
label_index = 1
labels_to_names = {0: 'background'}
for synset in synset_list:
name = synset_to_human[synset]
labels_to_names[label_index] = name
label_index += 1
return labels_to_names
def get_split(split_name, dataset_dir, file_pattern=None, reader=None):
"""Gets a dataset tuple with instructions for reading ImageNet.
Args:
split_name: A train/test split name.
dataset_dir: The base directory of the dataset sources.
file_pattern: The file pattern to use when matching the dataset sources.
It is assumed that the pattern contains a '%s' string so that the split
name can be inserted.
reader: The TensorFlow reader type.
Returns:
A `Dataset` namedtuple.
Raises:
ValueError: if `split_name` is not a valid train/test split.
"""
if split_name not in _SPLITS_TO_SIZES:
raise ValueError('split name %s was not recognized.' % split_name)
if not file_pattern:
file_pattern = _FILE_PATTERN
file_pattern = os.path.join(dataset_dir, file_pattern % split_name)
# Allowing None in the signature so that dataset_factory can use the default.
if reader is None:
reader = tf.TFRecordReader
keys_to_features = {
'image/encoded': tf.FixedLenFeature(
(), tf.string, default_value=''),
'image/format': tf.FixedLenFeature(
(), tf.string, default_value='jpeg'),
'image/class/label': tf.FixedLenFeature(
[], dtype=tf.int64, default_value=-1),
'image/class/text': tf.FixedLenFeature(
[], dtype=tf.string, default_value=''),
'image/object/bbox/xmin': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/ymin': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/xmax': tf.VarLenFeature(
dtype=tf.float32),
'image/object/bbox/ymax': tf.VarLenFeature(
dtype=tf.float32),
'image/object/class/label': tf.VarLenFeature(
dtype=tf.int64),
}
items_to_handlers = {
'image': slim.tfexample_decoder.Image('image/encoded', 'image/format'),
'label': slim.tfexample_decoder.Tensor('image/class/label'),
'label_text': slim.tfexample_decoder.Tensor('image/class/text'),
'object/bbox': slim.tfexample_decoder.BoundingBox(
['ymin', 'xmin', 'ymax', 'xmax'], 'image/object/bbox/'),
'object/label': slim.tfexample_decoder.Tensor('image/object/class/label'),
}
decoder = slim.tfexample_decoder.TFExampleDecoder(
keys_to_features, items_to_handlers)
labels_to_names = None
if dataset_utils.has_labels(dataset_dir):
labels_to_names = dataset_utils.read_label_file(dataset_dir)
else:
labels_to_names = create_readable_names_for_imagenet_labels()
dataset_utils.write_label_file(labels_to_names, dataset_dir)
return slim.dataset.Dataset(
data_sources=file_pattern,
reader=reader,
decoder=decoder,
num_samples=_SPLITS_TO_SIZES[split_name],
items_to_descriptions=_ITEMS_TO_DESCRIPTIONS,
num_classes=_NUM_CLASSES,
labels_to_names=labels_to_names)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker.linux_benchmarks import mnist_benchmark
from perfkitbenchmarker.linux_benchmarks import resnet_benchmark
from perfkitbenchmarker.linux_packages import cloud_tpu_models
from perfkitbenchmarker.linux_packages import tensorflow
from six.moves import range
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'inception3'
BENCHMARK_CONFIG = """
inception3:
description: Runs Inception V3 Benchmark.
vm_groups:
default:
os_type: ubuntu1604
vm_spec:
GCP:
machine_type: n1-standard-4
zone: us-east1-d
boot_disk_size: 200
AWS:
machine_type: p2.xlarge
zone: us-east-1
boot_disk_size: 200
Azure:
machine_type: Standard_NC6
zone: eastus
"""
flags.DEFINE_float('inception3_learning_rate', 0.165, 'Learning rate.')
flags.DEFINE_integer('inception3_train_epochs', 200,
'Number of epochs use for training.', lower_bound=1)
flags.DEFINE_enum('inception3_use_data', 'real', ['real', 'fake'],
'Whether to use real or fake data. If real, the data is '
'downloaded from imagenet_data_dir. Otherwise, synthetic '
'data is generated.')
flags.DEFINE_enum('inception3_mode', 'train_and_eval',
['train', 'eval', 'train_and_eval'],
'Mode to run: train, eval, train_and_eval')
flags.DEFINE_integer('inception3_epochs_per_eval', 2,
'Number of training epochs to run between evaluations.')
flags.DEFINE_integer('inception3_save_checkpoints_secs', 0, 'Interval (in '
'seconds) at which the model data should be checkpointed. '
'Set to 0 to disable.')
flags.DEFINE_integer('inception3_train_batch_size', 1024,
'Global (not per-shard) batch size for training')
flags.DEFINE_integer('inception3_eval_batch_size', 1024,
'Global (not per-shard) batch size for evaluation')
def GetConfig(user_config):
"""Load and return benchmark config.
Args:
user_config: user supplied configuration (flags and config file)
Returns:
loaded benchmark configuration
"""
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def _UpdateBenchmarkSpecWithFlags(benchmark_spec):
"""Update the benchmark_spec with supplied command line flags.
Args:
benchmark_spec: benchmark specification to update
"""
benchmark_spec.learning_rate = FLAGS.inception3_learning_rate
benchmark_spec.use_data = FLAGS.inception3_use_data
benchmark_spec.mode = FLAGS.inception3_mode
benchmark_spec.save_checkpoints_secs = FLAGS.inception3_save_checkpoints_secs
benchmark_spec.train_batch_size = FLAGS.inception3_train_batch_size
benchmark_spec.eval_batch_size = FLAGS.inception3_eval_batch_size
benchmark_spec.commit = cloud_tpu_models.GetCommit(benchmark_spec.vms[0])
benchmark_spec.data_dir = FLAGS.imagenet_data_dir
benchmark_spec.num_train_images = FLAGS.imagenet_num_train_images
benchmark_spec.num_eval_images = FLAGS.imagenet_num_eval_images
benchmark_spec.num_examples_per_epoch = (
float(benchmark_spec.num_train_images) / benchmark_spec.train_batch_size)
benchmark_spec.train_epochs = FLAGS.inception3_train_epochs
benchmark_spec.train_steps = int(
benchmark_spec.train_epochs * benchmark_spec.num_examples_per_epoch)
benchmark_spec.epochs_per_eval = FLAGS.inception3_epochs_per_eval
benchmark_spec.steps_per_eval = int(
benchmark_spec.epochs_per_eval *
benchmark_spec.num_examples_per_epoch)
def Prepare(benchmark_spec):
"""Install and set up Inception V3 on the target vm.
Args:
benchmark_spec: The benchmark specification
"""
mnist_benchmark.Prepare(benchmark_spec)
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
def _CreateMetadataDict(benchmark_spec):
"""Create metadata dict to be used in run results.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
metadata dict
"""
metadata = mnist_benchmark.CreateMetadataDict(benchmark_spec)
metadata.update({
'learning_rate': benchmark_spec.learning_rate,
'use_data': benchmark_spec.use_data,
'mode': benchmark_spec.mode,
'save_checkpoints_secs': benchmark_spec.save_checkpoints_secs,
'epochs_per_eval': benchmark_spec.epochs_per_eval,
'steps_per_eval': benchmark_spec.steps_per_eval,
'precision': benchmark_spec.precision,
'train_batch_size': benchmark_spec.train_batch_size,
'eval_batch_size': benchmark_spec.eval_batch_size
})
return metadata
def Run(benchmark_spec):
"""Run Inception V3 on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
_UpdateBenchmarkSpecWithFlags(benchmark_spec)
vm = benchmark_spec.vms[0]
inception3_benchmark_script = (
'tpu/models/experimental/inception/inception_v3.py')
inception3_benchmark_cmd = (
'{env_cmd} && python {script} '
'--learning_rate={learning_rate} '
'--iterations={iterations} '
'--use_tpu={use_tpu} '
'--use_data={use_data} '
'--train_steps_per_eval={steps_per_eval} '
'--data_dir={data_dir} '
'--model_dir={model_dir} '
'--save_checkpoints_secs={save_checkpoints_secs} '
'--train_batch_size={train_batch_size} '
'--eval_batch_size={eval_batch_size} '
'--precision={precision}'.format(
env_cmd=benchmark_spec.env_cmd,
script=inception3_benchmark_script,
learning_rate=benchmark_spec.learning_rate,
iterations=benchmark_spec.iterations,
use_tpu=bool(benchmark_spec.tpus),
use_data=benchmark_spec.use_data,
steps_per_eval=benchmark_spec.steps_per_eval,
data_dir=benchmark_spec.data_dir,
model_dir=benchmark_spec.model_dir,
save_checkpoints_secs=benchmark_spec.save_checkpoints_secs,
train_batch_size=benchmark_spec.train_batch_size,
eval_batch_size=benchmark_spec.eval_batch_size,
precision=benchmark_spec.precision))
if FLAGS.tf_device == 'gpu':
inception3_benchmark_cmd = '{env} {cmd}'.format(
env=tensorflow.GetEnvironmentVars(vm), cmd=inception3_benchmark_cmd)
samples = []
metadata = _CreateMetadataDict(benchmark_spec)
elapsed_seconds = 0
steps_per_eval = benchmark_spec.steps_per_eval
train_steps = benchmark_spec.train_steps
for step in range(steps_per_eval, train_steps + steps_per_eval,
steps_per_eval):
step = min(step, train_steps)
inception3_benchmark_cmd_step = '{cmd} --train_steps={step}'.format(
cmd=inception3_benchmark_cmd, step=step)
if benchmark_spec.mode in ('train', 'train_and_eval'):
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['train'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['train'].GetNumShards())
else:
tpu = num_shards = ''
inception3_benchmark_train_cmd = (
'{cmd} --tpu={tpu} --mode=train {num_shards}'.format(
cmd=inception3_benchmark_cmd_step,
tpu=tpu, num_shards=num_shards))
start = time.time()
stdout, stderr = vm.RobustRemoteCommand(inception3_benchmark_train_cmd,
should_log=True)
elapsed_seconds += (time.time() - start)
samples.extend(mnist_benchmark.MakeSamplesFromTrainOutput(
metadata, stdout + stderr, elapsed_seconds, step))
if benchmark_spec.mode in ('train_and_eval', 'eval'):
if benchmark_spec.tpus:
tpu = benchmark_spec.tpu_groups['eval'].GetName()
num_shards = '--num_shards={}'.format(
benchmark_spec.tpu_groups['eval'].GetNumShards())
else:
tpu = num_shards = ''
inception3_benchmark_eval_cmd = (
'{cmd} --tpu={tpu} --mode=eval {num_shards}'.format(
cmd=inception3_benchmark_cmd_step,
tpu=tpu, num_shards=num_shards))
stdout, stderr = vm.RobustRemoteCommand(inception3_benchmark_eval_cmd,
should_log=True)
samples.extend(resnet_benchmark.MakeSamplesFromEvalOutput(
metadata, stdout + stderr, elapsed_seconds))
return samples
def Cleanup(benchmark_spec):
"""Cleanup Inception V3 on the cluster.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
mnist_benchmark.Cleanup(benchmark_spec)
|
import dedupe
from collections import defaultdict
import unittest
from future.utils import viewitems, viewvalues
class BlockingTest(unittest.TestCase):
def setUp(self):
field_definition = [{'field': 'name', 'type': 'String'}]
self.data_model = dedupe.Dedupe(field_definition).data_model
self.training_pairs = {
'match': [({"name": "Bob", "age": "50"},
{"name": "Bob", "age": "75"}),
({"name": "Meredith", "age": "40"},
{"name": "Sue", "age": "10"})],
'distinct': [({"name": "Jimmy", "age": "20"},
{"name": "Jimbo", "age": "21"}),
({"name": "Willy", "age": "35"},
{"name": "William", "age": "35"}),
({"name": "William", "age": "36"},
{"name": "William", "age": "35"})]
}
self.training = self.training_pairs['match'] + \
self.training_pairs['distinct']
self.training_records = []
for pair in self.training:
for record in pair:
if record not in self.training_records:
self.training_records.append(record)
self.simple = lambda x: set([str(k) for k in x
if "CompoundPredicate" not in str(k)])
class TfidfTest(unittest.TestCase):
def setUp(self):
self.data_d = {
100: {"name": "Bob", "age": "50", "dataset": 0},
105: {"name": "Charlie", "age": "75", "dataset": 1},
110: {"name": "Meredith", "age": "40", "dataset": 1},
115: {"name": "Sue", "age": "10", "dataset": 0},
120: {"name": "Jimbo", "age": "21", "dataset": 0},
125: {"name": "Jimbo", "age": "21", "dataset": 0},
130: {"name": "Willy", "age": "35", "dataset": 0},
135: {"name": "Willy", "age": "35", "dataset": 1},
140: {"name": "Martha", "age": "19", "dataset": 1},
145: {"name": "Kyle", "age": "27", "dataset": 0},
}
def test_unconstrained_inverted_index(self):
blocker = dedupe.blocking.Fingerprinter(
[dedupe.predicates.TfidfTextSearchPredicate(0.0, "name")])
blocker.index(set(record["name"]
for record
in viewvalues(self.data_d)),
"name")
blocks = defaultdict(set)
for block_key, record_id in blocker(self.data_d.items()):
blocks[block_key].add(record_id)
blocks = set([frozenset(block) for block in blocks.values()
if len(block) > 1])
assert blocks ==\
set([frozenset([120, 125]), frozenset([130, 135])])
class TfIndexUnindex(unittest.TestCase):
def setUp(self):
data_d = {
100: {"name": "Bob", "age": "50", "dataset": 0},
105: {"name": "Charlie", "age": "75", "dataset": 1},
110: {"name": "Meredith", "age": "40", "dataset": 1},
115: {"name": "Sue", "age": "10", "dataset": 0},
120: {"name": "Jimbo", "age": "21", "dataset": 0},
125: {"name": "Jimbo", "age": "21", "dataset": 0},
130: {"name": "Willy", "age": "35", "dataset": 0},
135: {"name": "Willy", "age": "35", "dataset": 1},
140: {"name": "Martha", "age": "19", "dataset": 1},
145: {"name": "Kyle", "age": "27", "dataset": 0},
}
self.blocker = dedupe.blocking.Fingerprinter(
[dedupe.predicates.TfidfTextSearchPredicate(0.0, "name")])
self.records_1 = dict((record_id, record)
for record_id, record
in viewitems(data_d)
if record["dataset"] == 0)
self.fields_2 = dict((record_id, record["name"])
for record_id, record
in viewitems(data_d)
if record["dataset"] == 1)
def test_index(self):
self.blocker.index(set(self.fields_2.values()), "name")
blocks = defaultdict(set)
for block_key, record_id in self.blocker(self.records_1.items()):
blocks[block_key].add(record_id)
assert list(blocks.items())[0][1] == set([130])
def test_doubled_index(self):
self.blocker.index(self.fields_2.values(), "name")
self.blocker.index(self.fields_2.values(), "name")
blocks = defaultdict(set)
for block_key, record_id in self.blocker(self.records_1.items()):
blocks[block_key].add(record_id)
result = list(blocks.items())
assert len(result) == 1
assert result[0][1] == set([130])
def test_unindex(self):
self.blocker.index(self.fields_2.values(), "name")
self.blocker.unindex(self.fields_2.values(), "name")
blocks = defaultdict(set)
for block_key, record_id in self.blocker(self.records_1.items()):
blocks[block_key].add(record_id)
assert len(blocks.values()) == 0
if __name__ == "__main__":
unittest.main()
|
from homeassistant.helpers.entityfilter import (
FILTER_SCHEMA,
INCLUDE_EXCLUDE_FILTER_SCHEMA,
generate_filter,
)
def test_no_filters_case_1():
"""If include and exclude not included, pass everything."""
incl_dom = {}
incl_ent = {}
excl_dom = {}
excl_ent = {}
testfilter = generate_filter(incl_dom, incl_ent, excl_dom, excl_ent)
for value in ("sensor.test", "sun.sun", "light.test"):
assert testfilter(value)
def test_includes_only_case_2():
"""If include specified, only pass if specified (Case 2)."""
incl_dom = {"light", "sensor"}
incl_ent = {"binary_sensor.working"}
excl_dom = {}
excl_ent = {}
testfilter = generate_filter(incl_dom, incl_ent, excl_dom, excl_ent)
assert testfilter("sensor.test")
assert testfilter("light.test")
assert testfilter("binary_sensor.working")
assert testfilter("binary_sensor.notworking") is False
assert testfilter("sun.sun") is False
def test_includes_only_with_glob_case_2():
"""If include specified, only pass if specified (Case 2)."""
incl_dom = {"light", "sensor"}
incl_glob = {"cover.*_window"}
incl_ent = {"binary_sensor.working"}
excl_dom = {}
excl_glob = {}
excl_ent = {}
testfilter = generate_filter(
incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob
)
assert testfilter("sensor.test")
assert testfilter("light.test")
assert testfilter("cover.bedroom_window")
assert testfilter("binary_sensor.working")
assert testfilter("binary_sensor.notworking") is False
assert testfilter("sun.sun") is False
assert testfilter("cover.garage_door") is False
def test_excludes_only_case_3():
"""If exclude specified, pass all but specified (Case 3)."""
incl_dom = {}
incl_ent = {}
excl_dom = {"light", "sensor"}
excl_ent = {"binary_sensor.working"}
testfilter = generate_filter(incl_dom, incl_ent, excl_dom, excl_ent)
assert testfilter("sensor.test") is False
assert testfilter("light.test") is False
assert testfilter("binary_sensor.working") is False
assert testfilter("binary_sensor.another")
assert testfilter("sun.sun") is True
def test_excludes_only_with_glob_case_3():
"""If exclude specified, pass all but specified (Case 3)."""
incl_dom = {}
incl_glob = {}
incl_ent = {}
excl_dom = {"light", "sensor"}
excl_glob = {"cover.*_window"}
excl_ent = {"binary_sensor.working"}
testfilter = generate_filter(
incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob
)
assert testfilter("sensor.test") is False
assert testfilter("light.test") is False
assert testfilter("cover.bedroom_window") is False
assert testfilter("binary_sensor.working") is False
assert testfilter("binary_sensor.another")
assert testfilter("sun.sun") is True
assert testfilter("cover.garage_door")
def test_with_include_domain_case4a():
"""Test case 4a - include and exclude specified, with included domain."""
incl_dom = {"light", "sensor"}
incl_ent = {"binary_sensor.working"}
excl_dom = {}
excl_ent = {"light.ignoreme", "sensor.notworking"}
testfilter = generate_filter(incl_dom, incl_ent, excl_dom, excl_ent)
assert testfilter("sensor.test")
assert testfilter("sensor.notworking") is False
assert testfilter("light.test")
assert testfilter("light.ignoreme") is False
assert testfilter("binary_sensor.working")
assert testfilter("binary_sensor.another") is False
assert testfilter("sun.sun") is False
def test_with_include_glob_case4a():
"""Test case 4a - include and exclude specified, with included glob."""
incl_dom = {}
incl_glob = {"light.*", "sensor.*"}
incl_ent = {"binary_sensor.working"}
excl_dom = {}
excl_glob = {}
excl_ent = {"light.ignoreme", "sensor.notworking"}
testfilter = generate_filter(
incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob
)
assert testfilter("sensor.test")
assert testfilter("sensor.notworking") is False
assert testfilter("light.test")
assert testfilter("light.ignoreme") is False
assert testfilter("binary_sensor.working")
assert testfilter("binary_sensor.another") is False
assert testfilter("sun.sun") is False
def test_with_include_domain_glob_filtering_case4a():
"""Test case 4a - include and exclude specified, both have domains and globs."""
incl_dom = {"light"}
incl_glob = {"*working"}
incl_ent = {}
excl_dom = {"binary_sensor"}
excl_glob = {"*notworking"}
excl_ent = {"light.ignoreme"}
testfilter = generate_filter(
incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob
)
assert testfilter("sensor.working")
assert testfilter("sensor.notworking") is False
assert testfilter("light.test")
assert testfilter("light.notworking") is False
assert testfilter("light.ignoreme") is False
assert testfilter("binary_sensor.not_working") is False
assert testfilter("binary_sensor.another") is False
assert testfilter("sun.sun") is False
def test_exclude_domain_case4b():
"""Test case 4b - include and exclude specified, with excluded domain."""
incl_dom = {}
incl_ent = {"binary_sensor.working"}
excl_dom = {"binary_sensor"}
excl_ent = {"light.ignoreme", "sensor.notworking"}
testfilter = generate_filter(incl_dom, incl_ent, excl_dom, excl_ent)
assert testfilter("sensor.test")
assert testfilter("sensor.notworking") is False
assert testfilter("light.test")
assert testfilter("light.ignoreme") is False
assert testfilter("binary_sensor.working")
assert testfilter("binary_sensor.another") is False
assert testfilter("sun.sun") is True
def test_exclude_glob_case4b():
"""Test case 4b - include and exclude specified, with excluded glob."""
incl_dom = {}
incl_glob = {}
incl_ent = {"binary_sensor.working"}
excl_dom = {}
excl_glob = {"binary_sensor.*"}
excl_ent = {"light.ignoreme", "sensor.notworking"}
testfilter = generate_filter(
incl_dom, incl_ent, excl_dom, excl_ent, incl_glob, excl_glob
)
assert testfilter("sensor.test")
assert testfilter("sensor.notworking") is False
assert testfilter("light.test")
assert testfilter("light.ignoreme") is False
assert testfilter("binary_sensor.working")
assert testfilter("binary_sensor.another") is False
assert testfilter("sun.sun") is True
def test_no_domain_case4c():
"""Test case 4c - include and exclude specified, with no domains."""
incl_dom = {}
incl_ent = {"binary_sensor.working"}
excl_dom = {}
excl_ent = {"light.ignoreme", "sensor.notworking"}
testfilter = generate_filter(incl_dom, incl_ent, excl_dom, excl_ent)
assert testfilter("sensor.test") is False
assert testfilter("sensor.notworking") is False
assert testfilter("light.test") is False
assert testfilter("light.ignoreme") is False
assert testfilter("binary_sensor.working")
assert testfilter("binary_sensor.another") is False
assert testfilter("sun.sun") is False
def test_filter_schema():
"""Test filter schema."""
conf = {
"include_domains": ["light"],
"include_entities": ["switch.kitchen"],
"exclude_domains": ["cover"],
"exclude_entities": ["light.kitchen"],
}
filt = FILTER_SCHEMA(conf)
conf.update({"include_entity_globs": [], "exclude_entity_globs": []})
assert filt.config == conf
def test_filter_schema_with_globs():
"""Test filter schema with glob options."""
conf = {
"include_domains": ["light"],
"include_entity_globs": ["sensor.kitchen_*"],
"include_entities": ["switch.kitchen"],
"exclude_domains": ["cover"],
"exclude_entity_globs": ["sensor.weather_*"],
"exclude_entities": ["light.kitchen"],
}
filt = FILTER_SCHEMA(conf)
assert filt.config == conf
def test_filter_schema_include_exclude():
"""Test the include exclude filter schema."""
conf = {
"include": {
"domains": ["light"],
"entity_globs": ["sensor.kitchen_*"],
"entities": ["switch.kitchen"],
},
"exclude": {
"domains": ["cover"],
"entity_globs": ["sensor.weather_*"],
"entities": ["light.kitchen"],
},
}
filt = INCLUDE_EXCLUDE_FILTER_SCHEMA(conf)
assert filt.config == conf
|
import numpy as np
from typing import List, Union, Tuple, Optional
# pylint: disable=line-too-long
from tensornetwork.contractors.custom_path_solvers.pathsolvers import full_solve_complete
def ncon_solver(tensors: List[np.ndarray],
labels: List[List[int]],
max_branch: Optional[int] = None):
"""
Solve for the contraction order of a tensor network (encoded in the `ncon`
syntax) that minimizes the computational cost.
Args:
tensors: list of the tensors in the network.
labels: list of the tensor connections (in standard `ncon` format).
max_branch: maximum number of contraction paths to search at each step.
Returns:
np.ndarray: the cheapest contraction order found (in ncon format).
float: the cost of the network contraction, given as log10(total_FLOPS).
bool: specifies if contraction order is guaranteed optimal.
"""
# build log-adjacency matrix
log_adj = ncon_to_adj(tensors, labels)
# run search algorithm
order, costs, is_optimal = full_solve_complete(log_adj, max_branch=max_branch)
# put contraction order back into ncon format
con_order = ord_to_ncon(labels, order)
return con_order, costs, is_optimal
def ncon_to_adj(tensors: List[np.ndarray], labels: List[List[int]]):
"""
Create a log-adjacency matrix, where element [i,j] is the log10 of the total
dimension of the indices connecting ith and jth tensors, for a network
defined in the `ncon` syntax.
Args:
tensors: list of the tensors in the network.
labels: list of the tensor connections (in standard `ncon` format).
Returns:
np.ndarray: the log-adjacency matrix.
"""
# process inputs
N = len(labels)
ranks = [len(labels[i]) for i in range(N)]
flat_labels = np.hstack([labels[i] for i in range(N)])
tensor_counter = np.hstack(
[i * np.ones(ranks[i], dtype=int) for i in range(N)])
index_counter = np.hstack([np.arange(ranks[i]) for i in range(N)])
# build log-adjacency index-by-index
log_adj = np.zeros([N, N])
unique_labels = np.unique(flat_labels)
for ele in unique_labels:
# identify tensor/index location of each edge
tnr = tensor_counter[flat_labels == ele]
ind = index_counter[flat_labels == ele]
if len(ind) == 1: # external index
log_adj[tnr[0], tnr[0]] += np.log10(tensors[tnr[0]].shape[ind[0]])
elif len(ind) == 2: # internal index
if tnr[0] != tnr[1]: # ignore partial traces
log_adj[tnr[0], tnr[1]] += np.log10(tensors[tnr[0]].shape[ind[0]])
log_adj[tnr[1], tnr[0]] += np.log10(tensors[tnr[0]].shape[ind[0]])
return log_adj
def ord_to_ncon(labels: List[List[int]], orders: np.ndarray):
"""
Produces a `ncon` compatible index contraction order from the sequence of
pairwise contractions.
Args:
labels: list of the tensor connections (in standard `ncon` format).
orders: array of dim (2,N-1) specifying the set of N-1 pairwise
tensor contractions.
Returns:
np.ndarray: the contraction order (in `ncon` format).
"""
N = len(labels)
orders = orders.reshape(2, N - 1)
new_labels = [np.array(labels[i]) for i in range(N)]
con_order = np.zeros([0], dtype=int)
# remove all partial trace indices
for counter, temp_label in enumerate(new_labels):
uni_inds, counts = np.unique(temp_label, return_counts=True)
tr_inds = uni_inds[np.flatnonzero(counts == 2)]
con_order = np.concatenate((con_order, tr_inds))
new_labels[counter] = temp_label[np.isin(temp_label, uni_inds[counts == 1])]
for i in range(N - 1):
# find common indices between tensor pair
cont_many, A_cont, B_cont = np.intersect1d(
new_labels[orders[0, i]], new_labels[orders[1, i]], return_indices=True)
temp_labels = np.append(
np.delete(new_labels[orders[0, i]], A_cont),
np.delete(new_labels[orders[1, i]], B_cont))
con_order = list(np.concatenate((con_order, cont_many), axis=0))
# build new set of labels
new_labels[orders[0, i]] = temp_labels
del new_labels[orders[1, i]]
return con_order
def ncon_cost_check(tensors: List[np.ndarray],
labels: List[Union[List[int], Tuple[int]]],
con_order: Optional[Union[List[int], str]] = None):
"""
Checks the computational cost of an `ncon` contraction (without actually
doing the contraction). Ignore the cost contributions from partial traces
(which are always sub-leading).
Args:
tensors: list of the tensors in the network.
labels: length-N list of lists (or tuples) specifying the network
connections. The jth entry of the ith list in labels labels the edge
connected to the jth index of the ith tensor. Labels should be positive
integers for internal indices and negative integers for free indices.
con_order: optional argument to specify the order for contracting the
positive indices. Defaults to ascending order if omitted.
Returns:
float: the cost of the network contraction, given as log10(total_FLOPS).
"""
total_cost = np.float('-inf')
N = len(tensors)
tensor_dims = [np.array(np.log10(ele.shape)) for ele in tensors]
connect_list = [np.array(ele) for ele in labels]
# generate contraction order if necessary
flat_connect = np.concatenate(connect_list)
if con_order is None:
con_order = np.unique(flat_connect[flat_connect > 0])
else:
con_order = np.array(con_order)
# do all partial traces
for counter, temp_connect in enumerate(connect_list):
uni_inds, counts = np.unique(temp_connect, return_counts=True)
tr_inds = np.isin(temp_connect, uni_inds[counts == 1])
tensor_dims[counter] = tensor_dims[counter][tr_inds]
connect_list[counter] = temp_connect[tr_inds]
con_order = con_order[np.logical_not(
np.isin(con_order, uni_inds[counts == 2]))]
# do all binary contractions
while len(con_order) > 0:
# identify tensors to be contracted
cont_ind = con_order[0]
locs = [
ele for ele in range(len(connect_list))
if sum(connect_list[ele] == cont_ind) > 0
]
# identify indices to be contracted
c1 = connect_list.pop(locs[1])
c0 = connect_list.pop(locs[0])
cont_many, A_cont, B_cont = np.intersect1d(
c0, c1, assume_unique=True, return_indices=True)
# identify dimensions of contracted
d1 = tensor_dims.pop(locs[1])
d0 = tensor_dims.pop(locs[0])
single_cost = np.sum(d0) + np.sum(d1) - np.sum(d0[A_cont])
total_cost = single_cost + np.log10(1 + 10**(total_cost - single_cost))
# update lists
tensor_dims.append(np.append(np.delete(d0, A_cont), np.delete(d1, B_cont)))
connect_list.append(np.append(np.delete(c0, A_cont), np.delete(c1, B_cont)))
con_order = con_order[np.logical_not(np.isin(con_order, cont_many))]
# do all outer products
N = len(tensor_dims)
if N > 1:
tensor_sizes = np.sort([np.sum(tensor_dims[ele]) for ele in range(N)])
for _ in range(N - 1):
single_cost = tensor_sizes[0] + tensor_sizes[1]
tensor_sizes[0] += tensor_sizes[1]
tensor_sizes = np.sort(np.delete(tensor_sizes, 1))
total_cost = single_cost + np.log10(1 + 10**(total_cost - single_cost))
return total_cost
|
import pytest
from .common import build_device_info_mock, build_device_mock
from tests.async_mock import AsyncMock, patch
@pytest.fixture(name="discovery")
def discovery_fixture():
"""Patch the discovery service."""
with patch(
"homeassistant.components.gree.bridge.Discovery.search_devices",
new_callable=AsyncMock,
return_value=[build_device_info_mock()],
) as mock:
yield mock
@pytest.fixture(name="device")
def device_fixture():
"""Path the device search and bind."""
with patch(
"homeassistant.components.gree.bridge.Device",
return_value=build_device_mock(),
) as mock:
yield mock
@pytest.fixture(name="setup")
def setup_fixture():
"""Patch the climate setup."""
with patch(
"homeassistant.components.gree.climate.async_setup_entry", return_value=True
) as setup:
yield setup
|
from itertools import *
import benchbase
from benchbase import onlylib, children, nochange
############################################################
# Benchmarks
############################################################
class XPathBenchMark(benchbase.TreeBenchMark):
@nochange
@onlylib('lxe')
@children
def bench_xpath_class(self, children):
xpath = self.etree.XPath("./*[1]")
for child in children:
xpath(child)
@nochange
@onlylib('lxe')
@children
def bench_xpath_class_repeat(self, children):
for child in children:
xpath = self.etree.XPath("./*[1]")
xpath(child)
@nochange
@onlylib('lxe')
def bench_xpath_element(self, root):
xpath = self.etree.XPathElementEvaluator(root)
for child in root:
xpath.evaluate("./*[1]")
@nochange
@onlylib('lxe')
@children
def bench_xpath_method(self, children):
for child in children:
child.xpath("./*[1]")
@nochange
@onlylib('lxe')
@children
def bench_multiple_xpath_or(self, children):
xpath = self.etree.XPath(".//p:a00001|.//p:b00001|.//p:c00001",
namespaces={'p':'cdefg'})
for child in children:
xpath(child)
@nochange
@onlylib('lxe')
@children
def bench_multiple_iter_tag(self, children):
for child in children:
list(child.iter("{cdefg}a00001"))
list(child.iter("{cdefg}b00001"))
list(child.iter("{cdefg}c00001"))
@nochange
@onlylib('lxe')
@children
def bench_xpath_old_extensions(self, children):
def return_child(_, elements):
if elements:
return elements[0][0]
else:
return ()
extensions = {("test", "child") : return_child}
xpath = self.etree.XPath("t:child(.)", namespaces={"t":"test"},
extensions=extensions)
for child in children:
xpath(child)
@nochange
@onlylib('lxe')
@children
def bench_xpath_extensions(self, children):
def return_child(_, elements):
if elements:
return elements[0][0]
else:
return ()
self.etree.FunctionNamespace("testns")["t"] = return_child
try:
xpath = self.etree.XPath("test:t(.)", namespaces={"test":"testns"})
for child in children:
xpath(child)
finally:
del self.etree.FunctionNamespace("testns")["t"]
if __name__ == '__main__':
benchbase.main(XPathBenchMark)
|
from openrazer_daemon.dbus_services import endpoint
@endpoint('razer.device.lighting.profile_led', 'getRedLED', out_sig='b')
def keypad_get_profile_led_red(self):
"""
Get red profile LED state
:return: Red profile LED state
:rtype: bool
"""
self.logger.debug("DBus call keypad_profile_led_red")
driver_path = self.get_driver_path('profile_led_red')
with open(driver_path, 'r') as driver_file:
return driver_file.read().strip() == '1'
@endpoint('razer.device.lighting.profile_led', 'setRedLED', in_sig='b')
def keypad_set_profile_led_red(self, enable):
"""
Set red profile LED state
:param enable: Status of red profile LED
:type enable: bool
"""
self.logger.debug("DBus call keypad_set_profile_led_red")
driver_path = self.get_driver_path('profile_led_red')
with open(driver_path, 'w') as driver_file:
if enable:
driver_file.write('1')
else:
driver_file.write('0')
@endpoint('razer.device.lighting.profile_led', 'getGreenLED', out_sig='b')
def keypad_get_profile_led_green(self):
"""
Get green profile LED state
:return: Green profile LED state
:rtype: bool
"""
self.logger.debug("DBus call keypad_get_profile_led_green")
driver_path = self.get_driver_path('profile_led_green')
with open(driver_path, 'r') as driver_file:
return driver_file.read().strip() == '1'
@endpoint('razer.device.lighting.profile_led', 'setGreenLED', in_sig='b')
def keypad_set_profile_led_green(self, enable):
"""
Set green profile LED state
:param enable: Status of green profile LED
:type enable: bool
"""
self.logger.debug("DBus call keypad_set_profile_led_green")
driver_path = self.get_driver_path('profile_led_green')
with open(driver_path, 'w') as driver_file:
if enable:
driver_file.write('1')
else:
driver_file.write('0')
@endpoint('razer.device.lighting.profile_led', 'getBlueLED', out_sig='b')
def keypad_get_profile_led_blue(self):
"""
Get blue profile LED state
:return: Blue profile LED state
:rtype: bool
"""
self.logger.debug("DBus call keypad_get_profile_led_blue")
driver_path = self.get_driver_path('profile_led_blue')
with open(driver_path, 'r') as driver_file:
return driver_file.read().strip() == '1'
@endpoint('razer.device.lighting.profile_led', 'setBlueLED', in_sig='b')
def keypad_set_profile_led_blue(self, enable):
"""
Set blue profile LED state
:param enable: Status of blue profile LED
:type enable: bool
"""
self.logger.debug("DBus call keypad_set_profile_led_blue")
driver_path = self.get_driver_path('profile_led_blue')
with open(driver_path, 'w') as driver_file:
if enable:
driver_file.write('1')
else:
driver_file.write('0')
@endpoint('razer.device.macro', 'getModeModifier', out_sig='b')
def keypad_get_mode_modifier(self):
"""
Get if the mode key is a modifier
:return: State
:rtype: bool
"""
self.logger.debug("DBus call keypad_get_mode_modifier")
return self.key_manager.mode_modifier
@endpoint('razer.device.macro', 'setModeModifier', in_sig='b')
def keypad_set_mode_modifier(self, modifier):
"""
Set if the mode key is a modifier
:param modifier: State
:type modifier: bool
"""
self.logger.debug("DBus call keypad_set_mode_modifier")
self.key_manager.mode_modifier = modifier
|
import argparse
import glob
import os
import struct
import sys
def clamp_to_u8(value):
if value > 255:
value = 255
elif value < 0:
value = 0
return value
def parse_args():
parser = argparse.ArgumentParser(description="Set the breathing effect")
parser.add_argument('-d', '--device', type=str, help="Device string like \"0003:1532:0045.000C\"")
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument('--random', action="store_true", help="Random breathing effect")
group.add_argument('--single', nargs=3, metavar=("R", "G", "B"), type=int, help="Single colour breathing effect")
group.add_argument('--dual', nargs=6, metavar=("R1", "G1", "B1", "R2", "G2", "B2"), type=int, help="Dual colour breathing effect")
args = parser.parse_args()
return args
def run():
args = parse_args()
if args.device is None:
mouse_dirs = glob.glob(os.path.join('/sys/bus/hid/drivers/razermouse/', "*:*:*.*"))
if len(mouse_dirs) > 1:
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
if len(mouse_dirs) < 1:
print("No mouse directories found. Make sure the driver is binded", file=sys.stderr)
sys.exit(1)
mouse_dir = mouse_dirs[0]
else:
mouse_dir = os.path.join('/sys/bus/hid/drivers/razermouse/', args.device)
if not os.path.isdir(mouse_dir):
print("Multiple mouse directories found. Rerun with -d", file=sys.stderr)
sys.exit(1)
if args.random:
byte_string = struct.pack(">B", 0x01)
elif args.single is not None:
values = map(clamp_to_u8, args.single)
byte_string = struct.pack(">BBB", *values)
elif args.dual is not None:
values = map(clamp_to_u8, args.dual)
byte_string = struct.pack(">BBBBBB", *values)
else:
# Should never get here
byte_string = struct.pack(">B", 0x01)
breathing_mode_filepath = os.path.join(mouse_dir, "mode_breath")
with open(breathing_mode_filepath, 'wb') as breathing_mode_file:
breathing_mode_file.write(byte_string)
print("Done")
if __name__ == '__main__':
run()
|
import os
import os.path
import tempfile
from typing import cast, Any, MutableMapping, Tuple
from PyQt5.QtCore import pyqtSignal, pyqtSlot, QObject, QSocketNotifier
from qutebrowser.utils import message, log, objreg, standarddir, utils
from qutebrowser.commands import runners
from qutebrowser.config import websettings
from qutebrowser.misc import guiprocess
from qutebrowser.browser import downloads
from qutebrowser.qt import sip
class _QtFIFOReader(QObject):
"""A FIFO reader based on a QSocketNotifier.
Attributes:
_filepath: The path to the opened FIFO.
_fifo: The Python file object for the FIFO.
_notifier: The QSocketNotifier used.
Signals:
got_line: Emitted when a whole line arrived.
"""
got_line = pyqtSignal(str)
def __init__(self, filepath, parent=None):
super().__init__(parent)
self._filepath = filepath
# We open as R/W so we never get EOF and have to reopen the pipe.
# See http://www.outflux.net/blog/archives/2008/03/09/using-select-on-a-fifo/
# We also use os.open and os.fdopen rather than built-in open so we
# can add O_NONBLOCK.
# pylint: disable=no-member,useless-suppression
fd = os.open(filepath, os.O_RDWR | os.O_NONBLOCK)
# pylint: enable=no-member,useless-suppression
self._fifo = os.fdopen(fd, 'r')
self._notifier = QSocketNotifier(cast(sip.voidptr, fd),
QSocketNotifier.Read, self)
self._notifier.activated.connect( # type: ignore[attr-defined]
self.read_line)
@pyqtSlot()
def read_line(self):
"""(Try to) read a line from the FIFO."""
log.procs.debug("QSocketNotifier triggered!")
try:
self._notifier.setEnabled(False)
try:
for line in self._fifo:
self.got_line.emit(line.rstrip('\r\n'))
self._notifier.setEnabled(True)
except UnicodeDecodeError as e:
log.misc.error("Invalid unicode in userscript output: {}"
.format(e))
except RuntimeError as e:
# For unknown reasons, read_line can still get called after the
# QSocketNotifier was already deleted...
log.procs.debug("While reading userscript output: {}".format(e))
def cleanup(self):
"""Clean up so the FIFO can be closed."""
self._notifier.setEnabled(False)
for line in self._fifo:
self.got_line.emit(line.rstrip('\r\n'))
self._fifo.close()
class _BaseUserscriptRunner(QObject):
"""Common part between the Windows and the POSIX userscript runners.
Attributes:
_filepath: The path of the file/FIFO which is being read.
_proc: The GUIProcess which is being executed.
_cleaned_up: Whether temporary files were cleaned up.
_text_stored: Set when the page text was stored async.
_html_stored: Set when the page html was stored async.
_args: Arguments to pass to _run_process.
_kwargs: Keyword arguments to pass to _run_process.
Signals:
got_cmd: Emitted when a new command arrived and should be executed.
finished: Emitted when the userscript finished running.
"""
got_cmd = pyqtSignal(str)
finished = pyqtSignal()
def __init__(self, parent=None):
super().__init__(parent)
self._cleaned_up = False
self._filepath = None
self._proc = None
self._env: MutableMapping[str, str] = {}
self._text_stored = False
self._html_stored = False
self._args: Tuple[Any, ...] = ()
self._kwargs = {}
def store_text(self, text):
"""Called as callback when the text is ready from the web backend."""
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8',
suffix='.txt',
delete=False) as txt_file:
txt_file.write(text)
self._env['QUTE_TEXT'] = txt_file.name
self._text_stored = True
log.procs.debug("Text stored from webview")
if self._text_stored and self._html_stored:
log.procs.debug("Both text/HTML stored, kicking off userscript!")
self._run_process(*self._args, **self._kwargs)
def store_html(self, html):
"""Called as callback when the html is ready from the web backend."""
with tempfile.NamedTemporaryFile(mode='w', encoding='utf-8',
suffix='.html',
delete=False) as html_file:
html_file.write(html)
self._env['QUTE_HTML'] = html_file.name
self._html_stored = True
log.procs.debug("HTML stored from webview")
if self._text_stored and self._html_stored:
log.procs.debug("Both text/HTML stored, kicking off userscript!")
self._run_process(*self._args, **self._kwargs)
def _run_process(self, cmd, *args, env=None, verbose=False,
output_messages=False):
"""Start the given command.
Args:
cmd: The command to be started.
*args: The arguments to hand to the command
env: A dictionary of environment variables to add.
verbose: Show notifications when the command started/exited.
output_messages: Show the output as messages.
"""
assert self._filepath is not None
self._env['QUTE_FIFO'] = self._filepath
if env is not None:
self._env.update(env)
self._proc = guiprocess.GUIProcess(
'userscript', additional_env=self._env,
output_messages=output_messages, verbose=verbose, parent=self)
self._proc.finished.connect(self.on_proc_finished)
self._proc.error.connect(self.on_proc_error)
self._proc.start(cmd, args)
def _cleanup(self):
"""Clean up temporary files."""
if self._cleaned_up:
return
assert self._filepath is not None
self._cleaned_up = True
tempfiles = [self._filepath]
if 'QUTE_HTML' in self._env:
tempfiles.append(self._env['QUTE_HTML'])
if 'QUTE_TEXT' in self._env:
tempfiles.append(self._env['QUTE_TEXT'])
for fn in tempfiles:
log.procs.debug("Deleting temporary file {}.".format(fn))
try:
os.remove(fn)
except OSError as e:
# NOTE: Do not replace this with "raise CommandError" as it's
# executed async.
message.error("Failed to delete tempfile {} ({})!".format(
fn, e))
self._filepath = None
self._proc = None
self._env = {}
self._text_stored = False
self._html_stored = False
def prepare_run(self, *args, **kwargs):
"""Prepare running the userscript given.
Needs to be overridden by subclasses.
The script will actually run after store_text and store_html have been
called.
Args:
Passed to _run_process.
"""
raise NotImplementedError
@pyqtSlot()
def on_proc_finished(self):
"""Called when the process has finished.
Needs to be overridden by subclasses.
"""
raise NotImplementedError
@pyqtSlot()
def on_proc_error(self):
"""Called when the process encountered an error.
Needs to be overridden by subclasses.
"""
raise NotImplementedError
class _POSIXUserscriptRunner(_BaseUserscriptRunner):
"""Userscript runner to be used on POSIX. Uses _QtFIFOReader.
Commands are executed immediately when they arrive in the FIFO.
Attributes:
_reader: The _QtFIFOReader instance.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._reader = None
def prepare_run(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
try:
# tempfile.mktemp is deprecated and discouraged, but we use it here
# to create a FIFO since the only other alternative would be to
# create a directory and place the FIFO there, which sucks. Since
# os.mkfifo will raise an exception anyways when the path doesn't
# exist, it shouldn't be a big issue.
self._filepath = tempfile.mktemp(prefix='qutebrowser-userscript-',
dir=standarddir.runtime())
# pylint: disable=no-member,useless-suppression
os.mkfifo(self._filepath, mode=0o600)
# pylint: enable=no-member,useless-suppression
except OSError as e:
self._filepath = None # Make sure it's not used
message.error("Error while creating FIFO: {}".format(e))
return
self._reader = _QtFIFOReader(self._filepath)
self._reader.got_line.connect(self.got_cmd)
@pyqtSlot()
def on_proc_finished(self):
self._cleanup()
@pyqtSlot()
def on_proc_error(self):
self._cleanup()
def _cleanup(self):
"""Clean up reader and temporary files."""
if self._cleaned_up:
return
assert self._reader is not None
log.procs.debug("Cleaning up")
self._reader.cleanup()
self._reader.deleteLater()
self._reader = None
super()._cleanup()
self.finished.emit()
class _WindowsUserscriptRunner(_BaseUserscriptRunner):
"""Userscript runner to be used on Windows.
This is a much more dumb implementation compared to POSIXUserscriptRunner.
It uses a normal flat file for commands and executes them all at once when
the process has finished, as Windows doesn't really understand the concept
of using files as named pipes.
This also means the userscript *has* to use >> (append) rather than >
(overwrite) to write to the file!
"""
def _cleanup(self):
"""Clean up temporary files after the userscript finished."""
if self._cleaned_up:
return
assert self._filepath is not None
try:
with open(self._filepath, 'r', encoding='utf-8') as f:
for line in f:
self.got_cmd.emit(line.rstrip())
except OSError:
log.procs.exception("Failed to read command file!")
except UnicodeDecodeError as e:
log.misc.error("Invalid unicode in userscript output: {}"
.format(e))
super()._cleanup()
self.finished.emit()
@pyqtSlot()
def on_proc_error(self):
self._cleanup()
@pyqtSlot()
def on_proc_finished(self):
"""Read back the commands when the process finished."""
self._cleanup()
def prepare_run(self, *args, **kwargs):
self._args = args
self._kwargs = kwargs
try:
handle = tempfile.NamedTemporaryFile(delete=False)
handle.close()
self._filepath = handle.name
except OSError as e:
message.error("Error while creating tempfile: {}".format(e))
return
class Error(Exception):
"""Base class for userscript exceptions."""
class NotFoundError(Error):
"""Raised when spawning a userscript that doesn't exist.
Attributes:
script_name: name of the userscript as called
paths: path names that were searched for the userscript
"""
def __init__(self, script_name, paths=None):
super().__init__()
self.script_name = script_name
self.paths = paths
def __str__(self):
msg = "Userscript '{}' not found".format(self.script_name)
if self.paths:
msg += " in userscript directories {}".format(
', '.join(repr(path) for path in self.paths))
return msg
class UnsupportedError(Error):
"""Raised when userscripts aren't supported on this platform."""
def __str__(self):
return "Userscripts are not supported on this platform!"
def _lookup_path(cmd):
"""Search userscript directories for given command.
Raises:
NotFoundError if the command could not be found.
Args:
cmd: The command to look for.
Returns:
A path to the userscript.
"""
directories = [
os.path.join(standarddir.data(), "userscripts"),
os.path.join(standarddir.data(system=True), "userscripts"),
]
for directory in directories:
cmd_path = os.path.join(directory, cmd)
if os.path.exists(cmd_path):
return cmd_path
raise NotFoundError(cmd, directories)
def run_async(tab, cmd, *args, win_id, env, verbose=False,
output_messages=False):
"""Run a userscript after dumping page html/source.
Raises:
UnsupportedError if userscripts are not supported on the current
platform.
NotFoundError if the command could not be found.
Args:
tab: The WebKitTab/WebEngineTab to get the source from.
cmd: The userscript binary to run.
*args: The arguments to pass to the userscript.
win_id: The window id the userscript is executed in.
env: A dictionary of variables to add to the process environment.
verbose: Show notifications when the command started/exited.
output_messages: Show the output as messages.
"""
tb = objreg.get('tabbed-browser', scope='window', window=win_id)
commandrunner = runners.CommandRunner(win_id, parent=tb)
if utils.is_posix:
runner: _BaseUserscriptRunner = _POSIXUserscriptRunner(tb)
elif utils.is_windows: # pragma: no cover
runner = _WindowsUserscriptRunner(tb)
else: # pragma: no cover
raise UnsupportedError
runner.got_cmd.connect(
lambda cmd:
log.commands.debug("Got userscript command: {}".format(cmd)))
runner.got_cmd.connect(commandrunner.run_safely)
env['QUTE_USER_AGENT'] = websettings.user_agent()
env['QUTE_CONFIG_DIR'] = standarddir.config()
env['QUTE_DATA_DIR'] = standarddir.data()
env['QUTE_DOWNLOAD_DIR'] = downloads.download_dir()
env['QUTE_COMMANDLINE_TEXT'] = objreg.get('status-command', scope='window',
window=win_id).text()
cmd_path = os.path.expanduser(cmd)
# if cmd is not given as an absolute path, look it up
# ~/.local/share/qutebrowser/userscripts (or $XDG_DATA_HOME)
if not os.path.isabs(cmd_path):
log.misc.debug("{} is no absolute path".format(cmd_path))
cmd_path = _lookup_path(cmd)
elif not os.path.exists(cmd_path):
raise NotFoundError(cmd_path)
log.misc.debug("Userscript to run: {}".format(cmd_path))
runner.finished.connect(commandrunner.deleteLater)
runner.finished.connect(runner.deleteLater)
runner.prepare_run(cmd_path, *args, env=env, verbose=verbose,
output_messages=output_messages)
tab.dump_async(runner.store_html)
tab.dump_async(runner.store_text, plain=True)
return runner
|
import asyncio
import async_timeout
import axis
from axis.configuration import Configuration
from axis.errors import Unauthorized
from axis.event_stream import OPERATION_INITIALIZED
from axis.mqtt import mqtt_json_to_event
from axis.streammanager import SIGNAL_PLAYING, STATE_STOPPED
from homeassistant.components import mqtt
from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN
from homeassistant.components.mqtt.models import Message
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_TRIGGER_TIME,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant, callback
from homeassistant.exceptions import ConfigEntryNotReady
from homeassistant.helpers.device_registry import CONNECTION_NETWORK_MAC
from homeassistant.helpers.dispatcher import async_dispatcher_send
from homeassistant.setup import async_when_setup
from .const import (
ATTR_MANUFACTURER,
CONF_EVENTS,
CONF_MODEL,
CONF_STREAM_PROFILE,
DEFAULT_EVENTS,
DEFAULT_STREAM_PROFILE,
DEFAULT_TRIGGER_TIME,
DOMAIN as AXIS_DOMAIN,
LOGGER,
PLATFORMS,
)
from .errors import AuthenticationRequired, CannotConnect
class AxisNetworkDevice:
"""Manages a Axis device."""
def __init__(self, hass, config_entry):
"""Initialize the device."""
self.hass = hass
self.config_entry = config_entry
self.available = True
self.api = None
self.fw_version = None
self.product_type = None
self.listeners = []
@property
def host(self):
"""Return the host of this device."""
return self.config_entry.data[CONF_HOST]
@property
def model(self):
"""Return the model of this device."""
return self.config_entry.data[CONF_MODEL]
@property
def name(self):
"""Return the name of this device."""
return self.config_entry.data[CONF_NAME]
@property
def serial(self):
"""Return the serial number of this device."""
return self.config_entry.unique_id
# Options
@property
def option_events(self):
"""Config entry option defining if platforms based on events should be created."""
return self.config_entry.options.get(CONF_EVENTS, DEFAULT_EVENTS)
@property
def option_stream_profile(self):
"""Config entry option defining what stream profile camera platform should use."""
return self.config_entry.options.get(
CONF_STREAM_PROFILE, DEFAULT_STREAM_PROFILE
)
@property
def option_trigger_time(self):
"""Config entry option defining minimum number of seconds to keep trigger high."""
return self.config_entry.options.get(CONF_TRIGGER_TIME, DEFAULT_TRIGGER_TIME)
# Signals
@property
def signal_reachable(self):
"""Device specific event to signal a change in connection status."""
return f"axis_reachable_{self.serial}"
@property
def signal_new_event(self):
"""Device specific event to signal new device event available."""
return f"axis_new_event_{self.serial}"
@property
def signal_new_address(self):
"""Device specific event to signal a change in device address."""
return f"axis_new_address_{self.serial}"
# Callbacks
@callback
def async_connection_status_callback(self, status):
"""Handle signals of device connection status.
This is called on every RTSP keep-alive message.
Only signal state change if state change is true.
"""
if self.available != (status == SIGNAL_PLAYING):
self.available = not self.available
async_dispatcher_send(self.hass, self.signal_reachable, True)
@callback
def async_event_callback(self, action, event_id):
"""Call to configure events when initialized on event stream."""
if action == OPERATION_INITIALIZED:
async_dispatcher_send(self.hass, self.signal_new_event, event_id)
@staticmethod
async def async_new_address_callback(hass, entry):
"""Handle signals of device getting new address.
Called when config entry is updated.
This is a static method because a class method (bound method),
can not be used with weak references.
"""
device = hass.data[AXIS_DOMAIN][entry.unique_id]
device.api.config.host = device.host
async_dispatcher_send(hass, device.signal_new_address)
async def async_update_device_registry(self):
"""Update device registry."""
device_registry = await self.hass.helpers.device_registry.async_get_registry()
device_registry.async_get_or_create(
config_entry_id=self.config_entry.entry_id,
connections={(CONNECTION_NETWORK_MAC, self.serial)},
identifiers={(AXIS_DOMAIN, self.serial)},
manufacturer=ATTR_MANUFACTURER,
model=f"{self.model} {self.product_type}",
name=self.name,
sw_version=self.fw_version,
)
async def use_mqtt(self, hass: HomeAssistant, component: str) -> None:
"""Set up to use MQTT."""
try:
status = await self.api.vapix.mqtt.get_client_status()
except Unauthorized:
# This means the user has too low privileges
status = {}
if status.get("data", {}).get("status", {}).get("state") == "active":
self.listeners.append(
await mqtt.async_subscribe(hass, f"{self.serial}/#", self.mqtt_message)
)
@callback
def mqtt_message(self, message: Message) -> None:
"""Receive Axis MQTT message."""
self.disconnect_from_stream()
event = mqtt_json_to_event(message.payload)
self.api.event.process_event(event)
# Setup and teardown methods
async def async_setup(self):
"""Set up the device."""
try:
self.api = await get_device(
self.hass,
host=self.config_entry.data[CONF_HOST],
port=self.config_entry.data[CONF_PORT],
username=self.config_entry.data[CONF_USERNAME],
password=self.config_entry.data[CONF_PASSWORD],
)
except CannotConnect as err:
raise ConfigEntryNotReady from err
except Exception: # pylint: disable=broad-except
LOGGER.error("Unknown error connecting with Axis device on %s", self.host)
return False
self.fw_version = self.api.vapix.firmware_version
self.product_type = self.api.vapix.product_type
async def start_platforms():
await asyncio.gather(
*[
self.hass.config_entries.async_forward_entry_setup(
self.config_entry, platform
)
for platform in PLATFORMS
]
)
if self.option_events:
self.api.stream.connection_status_callback.append(
self.async_connection_status_callback
)
self.api.enable_events(event_callback=self.async_event_callback)
self.api.stream.start()
if self.api.vapix.mqtt:
async_when_setup(self.hass, MQTT_DOMAIN, self.use_mqtt)
self.hass.async_create_task(start_platforms())
self.config_entry.add_update_listener(self.async_new_address_callback)
return True
@callback
def disconnect_from_stream(self):
"""Stop stream."""
if self.api.stream.state != STATE_STOPPED:
self.api.stream.connection_status_callback.remove(
self.async_connection_status_callback
)
self.api.stream.stop()
async def shutdown(self, event):
"""Stop the event stream."""
self.disconnect_from_stream()
await self.api.vapix.close()
async def async_reset(self):
"""Reset this device to default state."""
self.disconnect_from_stream()
await self.api.vapix.close()
unload_ok = all(
await asyncio.gather(
*[
self.hass.config_entries.async_forward_entry_unload(
self.config_entry, platform
)
for platform in PLATFORMS
]
)
)
if not unload_ok:
return False
for unsubscribe_listener in self.listeners:
unsubscribe_listener()
return True
async def get_device(hass, host, port, username, password):
"""Create a Axis device."""
device = axis.AxisDevice(
Configuration(host, port=port, username=username, password=password)
)
try:
with async_timeout.timeout(15):
await device.vapix.initialize()
return device
except axis.Unauthorized as err:
LOGGER.warning("Connected to device at %s but not registered.", host)
await device.vapix.close()
raise AuthenticationRequired from err
except (asyncio.TimeoutError, axis.RequestError) as err:
LOGGER.error("Error connecting to the Axis device at %s", host)
await device.vapix.close()
raise CannotConnect from err
except axis.AxisException as err:
LOGGER.exception("Unknown Axis communication error occurred")
await device.vapix.close()
raise AuthenticationRequired from err
|
import asyncio
import logging
import shlex
import voluptuous as vol
from homeassistant.core import ServiceCall
from homeassistant.exceptions import TemplateError
from homeassistant.helpers import config_validation as cv, template
from homeassistant.helpers.typing import ConfigType, HomeAssistantType
DOMAIN = "shell_command"
COMMAND_TIMEOUT = 60
_LOGGER = logging.getLogger(__name__)
CONFIG_SCHEMA = vol.Schema(
{DOMAIN: cv.schema_with_slug_keys(cv.string)}, extra=vol.ALLOW_EXTRA
)
async def async_setup(hass: HomeAssistantType, config: ConfigType) -> bool:
"""Set up the shell_command component."""
conf = config.get(DOMAIN, {})
cache = {}
async def async_service_handler(service: ServiceCall) -> None:
"""Execute a shell command service."""
cmd = conf[service.service]
if cmd in cache:
prog, args, args_compiled = cache[cmd]
elif " " not in cmd:
prog = cmd
args = None
args_compiled = None
cache[cmd] = prog, args, args_compiled
else:
prog, args = cmd.split(" ", 1)
args_compiled = template.Template(args, hass)
cache[cmd] = prog, args, args_compiled
if args_compiled:
try:
rendered_args = args_compiled.async_render(
variables=service.data, parse_result=False
)
except TemplateError as ex:
_LOGGER.exception("Error rendering command template: %s", ex)
return
else:
rendered_args = None
if rendered_args == args:
# No template used. default behavior
# pylint: disable=no-member
create_process = asyncio.subprocess.create_subprocess_shell(
cmd,
stdin=None,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
else:
# Template used. Break into list and use create_subprocess_exec
# (which uses shell=False) for security
shlexed_cmd = [prog] + shlex.split(rendered_args)
# pylint: disable=no-member
create_process = asyncio.subprocess.create_subprocess_exec(
*shlexed_cmd,
stdin=None,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE,
)
process = await create_process
try:
stdout_data, stderr_data = await asyncio.wait_for(
process.communicate(), COMMAND_TIMEOUT
)
except asyncio.TimeoutError:
_LOGGER.exception(
"Timed out running command: `%s`, after: %ss", cmd, COMMAND_TIMEOUT
)
if process:
try:
await process.kill()
except TypeError:
pass
del process
return
if stdout_data:
_LOGGER.debug(
"Stdout of command: `%s`, return code: %s:\n%s",
cmd,
process.returncode,
stdout_data,
)
if stderr_data:
_LOGGER.debug(
"Stderr of command: `%s`, return code: %s:\n%s",
cmd,
process.returncode,
stderr_data,
)
if process.returncode != 0:
_LOGGER.exception(
"Error running command: `%s`, return code: %s", cmd, process.returncode
)
for name in conf:
hass.services.async_register(DOMAIN, name, async_service_handler)
return True
|
from homeassistant.const import STATE_IDLE
from tests.async_mock import patch
from tests.components.august.mocks import (
_create_august_with_devices,
_mock_doorbell_from_fixture,
)
async def test_create_doorbell(hass, aiohttp_client):
"""Test creation of a doorbell."""
doorbell_one = await _mock_doorbell_from_fixture(hass, "get_doorbell.json")
with patch.object(
doorbell_one, "async_get_doorbell_image", create=False, return_value="image"
):
await _create_august_with_devices(hass, [doorbell_one])
camera_k98gidt45gul_name_camera = hass.states.get(
"camera.k98gidt45gul_name_camera"
)
assert camera_k98gidt45gul_name_camera.state == STATE_IDLE
url = hass.states.get("camera.k98gidt45gul_name_camera").attributes[
"entity_picture"
]
client = await aiohttp_client(hass.http.app)
resp = await client.get(url)
assert resp.status == 200
body = await resp.text()
assert body == "image"
|
import aiohttp
from pytest import raises
import homeassistant.components.wunderground.sensor as wunderground
from homeassistant.const import (
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_INCHES,
STATE_UNKNOWN,
TEMP_CELSIUS,
)
from homeassistant.exceptions import PlatformNotReady
from homeassistant.setup import async_setup_component
from tests.common import assert_setup_component, load_fixture
VALID_CONFIG_PWS = {
"platform": "wunderground",
"api_key": "foo",
"pws_id": "bar",
"monitored_conditions": [
"weather",
"feelslike_c",
"alerts",
"elevation",
"location",
],
}
VALID_CONFIG = {
"platform": "wunderground",
"api_key": "foo",
"lang": "EN",
"monitored_conditions": [
"weather",
"feelslike_c",
"alerts",
"elevation",
"location",
"weather_1d_metric",
"precip_1d_in",
],
}
INVALID_CONFIG = {
"platform": "wunderground",
"api_key": "BOB",
"pws_id": "bar",
"lang": "foo",
"monitored_conditions": ["weather", "feelslike_c", "alerts"],
}
URL = (
"http://api.wunderground.com/api/foo/alerts/conditions/forecast/lang"
":EN/q/32.87336,-117.22743.json"
)
PWS_URL = "http://api.wunderground.com/api/foo/alerts/conditions/lang:EN/q/pws:bar.json"
INVALID_URL = (
"http://api.wunderground.com/api/BOB/alerts/conditions/lang:foo/q/pws:bar.json"
)
async def test_setup(hass, aioclient_mock):
"""Test that the component is loaded."""
aioclient_mock.get(URL, text=load_fixture("wunderground-valid.json"))
with assert_setup_component(1, "sensor"):
await async_setup_component(hass, "sensor", {"sensor": VALID_CONFIG})
await hass.async_block_till_done()
async def test_setup_pws(hass, aioclient_mock):
"""Test that the component is loaded with PWS id."""
aioclient_mock.get(PWS_URL, text=load_fixture("wunderground-valid.json"))
with assert_setup_component(1, "sensor"):
await async_setup_component(hass, "sensor", {"sensor": VALID_CONFIG_PWS})
async def test_setup_invalid(hass, aioclient_mock):
"""Test that the component is not loaded with invalid config."""
aioclient_mock.get(INVALID_URL, text=load_fixture("wunderground-error.json"))
with assert_setup_component(0, "sensor"):
await async_setup_component(hass, "sensor", {"sensor": INVALID_CONFIG})
async def test_sensor(hass, aioclient_mock):
"""Test the WUnderground sensor class and methods."""
aioclient_mock.get(URL, text=load_fixture("wunderground-valid.json"))
await async_setup_component(hass, "sensor", {"sensor": VALID_CONFIG})
await hass.async_block_till_done()
state = hass.states.get("sensor.pws_weather")
assert state.state == "Clear"
assert state.name == "Weather Summary"
assert ATTR_UNIT_OF_MEASUREMENT not in state.attributes
assert (
state.attributes["entity_picture"] == "https://icons.wxug.com/i/c/k/clear.gif"
)
state = hass.states.get("sensor.pws_alerts")
assert state.state == "1"
assert state.name == "Alerts"
assert state.attributes["Message"] == "This is a test alert message"
assert state.attributes["icon"] == "mdi:alert-circle-outline"
assert "entity_picture" not in state.attributes
state = hass.states.get("sensor.pws_location")
assert state.state == "Holly Springs, NC"
assert state.name == "Location"
state = hass.states.get("sensor.pws_elevation")
assert state.state == "413"
assert state.name == "Elevation"
state = hass.states.get("sensor.pws_feelslike_c")
assert state.state == "40"
assert state.name == "Feels Like"
assert "entity_picture" not in state.attributes
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == TEMP_CELSIUS
state = hass.states.get("sensor.pws_weather_1d_metric")
assert state.state == "Mostly Cloudy. Fog overnight."
assert state.name == "Tuesday"
state = hass.states.get("sensor.pws_precip_1d_in")
assert state.state == "0.03"
assert state.name == "Precipitation Intensity Today"
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == LENGTH_INCHES
async def test_connect_failed(hass, aioclient_mock):
"""Test the WUnderground connection error."""
aioclient_mock.get(URL, exc=aiohttp.ClientError())
with raises(PlatformNotReady):
await wunderground.async_setup_platform(hass, VALID_CONFIG, lambda _: None)
async def test_invalid_data(hass, aioclient_mock):
"""Test the WUnderground invalid data."""
aioclient_mock.get(URL, text=load_fixture("wunderground-invalid.json"))
await async_setup_component(hass, "sensor", {"sensor": VALID_CONFIG})
await hass.async_block_till_done()
for condition in VALID_CONFIG["monitored_conditions"]:
state = hass.states.get(f"sensor.pws_{condition}")
assert state.state == STATE_UNKNOWN
async def test_entity_id_with_multiple_stations(hass, aioclient_mock):
"""Test not generating duplicate entity ids with multiple stations."""
aioclient_mock.get(URL, text=load_fixture("wunderground-valid.json"))
aioclient_mock.get(PWS_URL, text=load_fixture("wunderground-valid.json"))
config = [VALID_CONFIG, {**VALID_CONFIG_PWS, "entity_namespace": "hi"}]
await async_setup_component(hass, "sensor", {"sensor": config})
await hass.async_block_till_done()
state = hass.states.get("sensor.pws_weather")
assert state is not None
assert state.state == "Clear"
state = hass.states.get("sensor.hi_pws_weather")
assert state is not None
assert state.state == "Clear"
async def test_fails_because_of_unique_id(hass, aioclient_mock):
"""Test same config twice fails because of unique_id."""
aioclient_mock.get(URL, text=load_fixture("wunderground-valid.json"))
aioclient_mock.get(PWS_URL, text=load_fixture("wunderground-valid.json"))
config = [
VALID_CONFIG,
{**VALID_CONFIG, "entity_namespace": "hi"},
VALID_CONFIG_PWS,
]
await async_setup_component(hass, "sensor", {"sensor": config})
await hass.async_block_till_done()
states = hass.states.async_all()
expected = len(VALID_CONFIG["monitored_conditions"]) + len(
VALID_CONFIG_PWS["monitored_conditions"]
)
assert len(states) == expected
|
import logging
from typing import Any, Dict, Optional
from homematicip.aio.device import AsyncDevice
from homematicip.aio.group import AsyncGroup
from homeassistant.core import callback
from homeassistant.helpers import device_registry as dr, entity_registry as er
from homeassistant.helpers.entity import Entity
from .const import DOMAIN as HMIPC_DOMAIN
from .hap import HomematicipHAP
_LOGGER = logging.getLogger(__name__)
ATTR_MODEL_TYPE = "model_type"
ATTR_LOW_BATTERY = "low_battery"
ATTR_CONFIG_PENDING = "config_pending"
ATTR_CONNECTION_TYPE = "connection_type"
ATTR_DUTY_CYCLE_REACHED = "duty_cycle_reached"
ATTR_ID = "id"
ATTR_IS_GROUP = "is_group"
# RSSI HAP -> Device
ATTR_RSSI_DEVICE = "rssi_device"
# RSSI Device -> HAP
ATTR_RSSI_PEER = "rssi_peer"
ATTR_SABOTAGE = "sabotage"
ATTR_GROUP_MEMBER_UNREACHABLE = "group_member_unreachable"
ATTR_DEVICE_OVERHEATED = "device_overheated"
ATTR_DEVICE_OVERLOADED = "device_overloaded"
ATTR_DEVICE_UNTERVOLTAGE = "device_undervoltage"
ATTR_EVENT_DELAY = "event_delay"
DEVICE_ATTRIBUTE_ICONS = {
"lowBat": "mdi:battery-outline",
"sabotage": "mdi:shield-alert",
"dutyCycle": "mdi:alert",
"deviceOverheated": "mdi:alert",
"deviceOverloaded": "mdi:alert",
"deviceUndervoltage": "mdi:alert",
"configPending": "mdi:alert-circle",
}
DEVICE_ATTRIBUTES = {
"modelType": ATTR_MODEL_TYPE,
"connectionType": ATTR_CONNECTION_TYPE,
"sabotage": ATTR_SABOTAGE,
"dutyCycle": ATTR_DUTY_CYCLE_REACHED,
"rssiDeviceValue": ATTR_RSSI_DEVICE,
"rssiPeerValue": ATTR_RSSI_PEER,
"deviceOverheated": ATTR_DEVICE_OVERHEATED,
"deviceOverloaded": ATTR_DEVICE_OVERLOADED,
"deviceUndervoltage": ATTR_DEVICE_UNTERVOLTAGE,
"configPending": ATTR_CONFIG_PENDING,
"eventDelay": ATTR_EVENT_DELAY,
"id": ATTR_ID,
}
GROUP_ATTRIBUTES = {
"modelType": ATTR_MODEL_TYPE,
"lowBat": ATTR_LOW_BATTERY,
"sabotage": ATTR_SABOTAGE,
"dutyCycle": ATTR_DUTY_CYCLE_REACHED,
"configPending": ATTR_CONFIG_PENDING,
"unreach": ATTR_GROUP_MEMBER_UNREACHABLE,
}
class HomematicipGenericEntity(Entity):
"""Representation of the HomematicIP generic entity."""
def __init__(
self,
hap: HomematicipHAP,
device,
post: Optional[str] = None,
channel: Optional[int] = None,
) -> None:
"""Initialize the generic entity."""
self._hap = hap
self._home = hap.home
self._device = device
self._post = post
self._channel = channel
# Marker showing that the HmIP device hase been removed.
self.hmip_device_removed = False
_LOGGER.info("Setting up %s (%s)", self.name, self._device.modelType)
@property
def device_info(self) -> Dict[str, Any]:
"""Return device specific attributes."""
# Only physical devices should be HA devices.
if isinstance(self._device, AsyncDevice):
return {
"identifiers": {
# Serial numbers of Homematic IP device
(HMIPC_DOMAIN, self._device.id)
},
"name": self._device.label,
"manufacturer": self._device.oem,
"model": self._device.modelType,
"sw_version": self._device.firmwareVersion,
# Link to the homematic ip access point.
"via_device": (HMIPC_DOMAIN, self._device.homeId),
}
return None
async def async_added_to_hass(self) -> None:
"""Register callbacks."""
self._hap.hmip_device_by_entity_id[self.entity_id] = self._device
self._device.on_update(self._async_device_changed)
self._device.on_remove(self._async_device_removed)
@callback
def _async_device_changed(self, *args, **kwargs) -> None:
"""Handle device state changes."""
# Don't update disabled entities
if self.enabled:
_LOGGER.debug("Event %s (%s)", self.name, self._device.modelType)
self.async_write_ha_state()
else:
_LOGGER.debug(
"Device Changed Event for %s (%s) not fired. Entity is disabled",
self.name,
self._device.modelType,
)
async def async_will_remove_from_hass(self) -> None:
"""Run when hmip device will be removed from hass."""
# Only go further if the device/entity should be removed from registries
# due to a removal of the HmIP device.
if self.hmip_device_removed:
try:
del self._hap.hmip_device_by_entity_id[self.entity_id]
await self.async_remove_from_registries()
except KeyError as err:
_LOGGER.debug("Error removing HMIP device from registry: %s", err)
async def async_remove_from_registries(self) -> None:
"""Remove entity/device from registry."""
# Remove callback from device.
self._device.remove_callback(self._async_device_changed)
self._device.remove_callback(self._async_device_removed)
if not self.registry_entry:
return
device_id = self.registry_entry.device_id
if device_id:
# Remove from device registry.
device_registry = await dr.async_get_registry(self.hass)
if device_id in device_registry.devices:
# This will also remove associated entities from entity registry.
device_registry.async_remove_device(device_id)
else:
# Remove from entity registry.
# Only relevant for entities that do not belong to a device.
entity_id = self.registry_entry.entity_id
if entity_id:
entity_registry = await er.async_get_registry(self.hass)
if entity_id in entity_registry.entities:
entity_registry.async_remove(entity_id)
@callback
def _async_device_removed(self, *args, **kwargs) -> None:
"""Handle hmip device removal."""
# Set marker showing that the HmIP device hase been removed.
self.hmip_device_removed = True
self.hass.async_create_task(self.async_remove())
@property
def name(self) -> str:
"""Return the name of the generic entity."""
name = None
# Try to get a label from a channel.
if hasattr(self._device, "functionalChannels"):
if self._channel:
name = self._device.functionalChannels[self._channel].label
else:
if len(self._device.functionalChannels) > 1:
name = self._device.functionalChannels[1].label
# Use device label, if name is not defined by channel label.
if not name:
name = self._device.label
if self._post:
name = f"{name} {self._post}"
elif self._channel:
name = f"{name} Channel{self._channel}"
# Add a prefix to the name if the homematic ip home has a name.
if name and self._home.name:
name = f"{self._home.name} {name}"
return name
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@property
def available(self) -> bool:
"""Return if entity is available."""
return not self._device.unreach
@property
def unique_id(self) -> str:
"""Return a unique ID."""
unique_id = f"{self.__class__.__name__}_{self._device.id}"
if self._channel:
unique_id = (
f"{self.__class__.__name__}_Channel{self._channel}_{self._device.id}"
)
return unique_id
@property
def icon(self) -> Optional[str]:
"""Return the icon."""
for attr, icon in DEVICE_ATTRIBUTE_ICONS.items():
if getattr(self._device, attr, None):
return icon
return None
@property
def device_state_attributes(self) -> Dict[str, Any]:
"""Return the state attributes of the generic entity."""
state_attr = {}
if isinstance(self._device, AsyncDevice):
for attr, attr_key in DEVICE_ATTRIBUTES.items():
attr_value = getattr(self._device, attr, None)
if attr_value:
state_attr[attr_key] = attr_value
state_attr[ATTR_IS_GROUP] = False
if isinstance(self._device, AsyncGroup):
for attr, attr_key in GROUP_ATTRIBUTES.items():
attr_value = getattr(self._device, attr, None)
if attr_value:
state_attr[attr_key] = attr_value
state_attr[ATTR_IS_GROUP] = True
return state_attr
|
from datetime import timedelta
import uuid
from aiohttp import web
import voluptuous as vol
from homeassistant.auth.models import (
TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
Credentials,
User,
)
from homeassistant.components import websocket_api
from homeassistant.components.http.auth import async_sign_path
from homeassistant.components.http.ban import log_invalid_auth
from homeassistant.components.http.data_validator import RequestDataValidator
from homeassistant.components.http.view import HomeAssistantView
from homeassistant.const import HTTP_BAD_REQUEST, HTTP_FORBIDDEN, HTTP_OK
from homeassistant.core import HomeAssistant, callback
from homeassistant.loader import bind_hass
from homeassistant.util import dt as dt_util
from . import indieauth, login_flow, mfa_setup_flow
DOMAIN = "auth"
WS_TYPE_CURRENT_USER = "auth/current_user"
SCHEMA_WS_CURRENT_USER = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_CURRENT_USER}
)
WS_TYPE_LONG_LIVED_ACCESS_TOKEN = "auth/long_lived_access_token"
SCHEMA_WS_LONG_LIVED_ACCESS_TOKEN = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_LONG_LIVED_ACCESS_TOKEN,
vol.Required("lifespan"): int, # days
vol.Required("client_name"): str,
vol.Optional("client_icon"): str,
}
)
WS_TYPE_REFRESH_TOKENS = "auth/refresh_tokens"
SCHEMA_WS_REFRESH_TOKENS = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{vol.Required("type"): WS_TYPE_REFRESH_TOKENS}
)
WS_TYPE_DELETE_REFRESH_TOKEN = "auth/delete_refresh_token"
SCHEMA_WS_DELETE_REFRESH_TOKEN = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_DELETE_REFRESH_TOKEN,
vol.Required("refresh_token_id"): str,
}
)
WS_TYPE_SIGN_PATH = "auth/sign_path"
SCHEMA_WS_SIGN_PATH = websocket_api.BASE_COMMAND_MESSAGE_SCHEMA.extend(
{
vol.Required("type"): WS_TYPE_SIGN_PATH,
vol.Required("path"): str,
vol.Optional("expires", default=30): int,
}
)
RESULT_TYPE_CREDENTIALS = "credentials"
RESULT_TYPE_USER = "user"
@bind_hass
def create_auth_code(hass, client_id: str, user: User) -> str:
"""Create an authorization code to fetch tokens."""
return hass.data[DOMAIN](client_id, user)
async def async_setup(hass, config):
"""Component to allow users to login."""
store_result, retrieve_result = _create_auth_code_store()
hass.data[DOMAIN] = store_result
hass.http.register_view(TokenView(retrieve_result))
hass.http.register_view(LinkUserView(retrieve_result))
hass.components.websocket_api.async_register_command(
WS_TYPE_CURRENT_USER, websocket_current_user, SCHEMA_WS_CURRENT_USER
)
hass.components.websocket_api.async_register_command(
WS_TYPE_LONG_LIVED_ACCESS_TOKEN,
websocket_create_long_lived_access_token,
SCHEMA_WS_LONG_LIVED_ACCESS_TOKEN,
)
hass.components.websocket_api.async_register_command(
WS_TYPE_REFRESH_TOKENS, websocket_refresh_tokens, SCHEMA_WS_REFRESH_TOKENS
)
hass.components.websocket_api.async_register_command(
WS_TYPE_DELETE_REFRESH_TOKEN,
websocket_delete_refresh_token,
SCHEMA_WS_DELETE_REFRESH_TOKEN,
)
hass.components.websocket_api.async_register_command(
WS_TYPE_SIGN_PATH, websocket_sign_path, SCHEMA_WS_SIGN_PATH
)
await login_flow.async_setup(hass, store_result)
await mfa_setup_flow.async_setup(hass)
return True
class TokenView(HomeAssistantView):
"""View to issue or revoke tokens."""
url = "/auth/token"
name = "api:auth:token"
requires_auth = False
cors_allowed = True
def __init__(self, retrieve_user):
"""Initialize the token view."""
self._retrieve_user = retrieve_user
@log_invalid_auth
async def post(self, request):
"""Grant a token."""
hass = request.app["hass"]
data = await request.post()
grant_type = data.get("grant_type")
# IndieAuth 6.3.5
# The revocation endpoint is the same as the token endpoint.
# The revocation request includes an additional parameter,
# action=revoke.
if data.get("action") == "revoke":
return await self._async_handle_revoke_token(hass, data)
if grant_type == "authorization_code":
return await self._async_handle_auth_code(hass, data, request.remote)
if grant_type == "refresh_token":
return await self._async_handle_refresh_token(hass, data, request.remote)
return self.json(
{"error": "unsupported_grant_type"}, status_code=HTTP_BAD_REQUEST
)
async def _async_handle_revoke_token(self, hass, data):
"""Handle revoke token request."""
# OAuth 2.0 Token Revocation [RFC7009]
# 2.2 The authorization server responds with HTTP status code 200
# if the token has been revoked successfully or if the client
# submitted an invalid token.
token = data.get("token")
if token is None:
return web.Response(status=HTTP_OK)
refresh_token = await hass.auth.async_get_refresh_token_by_token(token)
if refresh_token is None:
return web.Response(status=HTTP_OK)
await hass.auth.async_remove_refresh_token(refresh_token)
return web.Response(status=HTTP_OK)
async def _async_handle_auth_code(self, hass, data, remote_addr):
"""Handle authorization code request."""
client_id = data.get("client_id")
if client_id is None or not indieauth.verify_client_id(client_id):
return self.json(
{"error": "invalid_request", "error_description": "Invalid client id"},
status_code=HTTP_BAD_REQUEST,
)
code = data.get("code")
if code is None:
return self.json(
{"error": "invalid_request", "error_description": "Invalid code"},
status_code=HTTP_BAD_REQUEST,
)
user = self._retrieve_user(client_id, RESULT_TYPE_USER, code)
if user is None or not isinstance(user, User):
return self.json(
{"error": "invalid_request", "error_description": "Invalid code"},
status_code=HTTP_BAD_REQUEST,
)
# refresh user
user = await hass.auth.async_get_user(user.id)
if not user.is_active:
return self.json(
{"error": "access_denied", "error_description": "User is not active"},
status_code=HTTP_FORBIDDEN,
)
refresh_token = await hass.auth.async_create_refresh_token(user, client_id)
access_token = hass.auth.async_create_access_token(refresh_token, remote_addr)
return self.json(
{
"access_token": access_token,
"token_type": "Bearer",
"refresh_token": refresh_token.token,
"expires_in": int(
refresh_token.access_token_expiration.total_seconds()
),
}
)
async def _async_handle_refresh_token(self, hass, data, remote_addr):
"""Handle authorization code request."""
client_id = data.get("client_id")
if client_id is not None and not indieauth.verify_client_id(client_id):
return self.json(
{"error": "invalid_request", "error_description": "Invalid client id"},
status_code=HTTP_BAD_REQUEST,
)
token = data.get("refresh_token")
if token is None:
return self.json({"error": "invalid_request"}, status_code=HTTP_BAD_REQUEST)
refresh_token = await hass.auth.async_get_refresh_token_by_token(token)
if refresh_token is None:
return self.json({"error": "invalid_grant"}, status_code=HTTP_BAD_REQUEST)
if refresh_token.client_id != client_id:
return self.json({"error": "invalid_request"}, status_code=HTTP_BAD_REQUEST)
access_token = hass.auth.async_create_access_token(refresh_token, remote_addr)
return self.json(
{
"access_token": access_token,
"token_type": "Bearer",
"expires_in": int(
refresh_token.access_token_expiration.total_seconds()
),
}
)
class LinkUserView(HomeAssistantView):
"""View to link existing users to new credentials."""
url = "/auth/link_user"
name = "api:auth:link_user"
def __init__(self, retrieve_credentials):
"""Initialize the link user view."""
self._retrieve_credentials = retrieve_credentials
@RequestDataValidator(vol.Schema({"code": str, "client_id": str}))
async def post(self, request, data):
"""Link a user."""
hass = request.app["hass"]
user = request["hass_user"]
credentials = self._retrieve_credentials(
data["client_id"], RESULT_TYPE_CREDENTIALS, data["code"]
)
if credentials is None:
return self.json_message("Invalid code", status_code=HTTP_BAD_REQUEST)
await hass.auth.async_link_user(user, credentials)
return self.json_message("User linked")
@callback
def _create_auth_code_store():
"""Create an in memory store."""
temp_results = {}
@callback
def store_result(client_id, result):
"""Store flow result and return a code to retrieve it."""
if isinstance(result, User):
result_type = RESULT_TYPE_USER
elif isinstance(result, Credentials):
result_type = RESULT_TYPE_CREDENTIALS
else:
raise ValueError("result has to be either User or Credentials")
code = uuid.uuid4().hex
temp_results[(client_id, result_type, code)] = (
dt_util.utcnow(),
result_type,
result,
)
return code
@callback
def retrieve_result(client_id, result_type, code):
"""Retrieve flow result."""
key = (client_id, result_type, code)
if key not in temp_results:
return None
created, _, result = temp_results.pop(key)
# OAuth 4.2.1
# The authorization code MUST expire shortly after it is issued to
# mitigate the risk of leaks. A maximum authorization code lifetime of
# 10 minutes is RECOMMENDED.
if dt_util.utcnow() - created < timedelta(minutes=10):
return result
return None
return store_result, retrieve_result
@websocket_api.ws_require_user()
@websocket_api.async_response
async def websocket_current_user(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Return the current user."""
user = connection.user
enabled_modules = await hass.auth.async_get_enabled_mfa(user)
connection.send_message(
websocket_api.result_message(
msg["id"],
{
"id": user.id,
"name": user.name,
"is_owner": user.is_owner,
"is_admin": user.is_admin,
"credentials": [
{
"auth_provider_type": c.auth_provider_type,
"auth_provider_id": c.auth_provider_id,
}
for c in user.credentials
],
"mfa_modules": [
{
"id": module.id,
"name": module.name,
"enabled": module.id in enabled_modules,
}
for module in hass.auth.auth_mfa_modules
],
},
)
)
@websocket_api.ws_require_user()
@websocket_api.async_response
async def websocket_create_long_lived_access_token(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Create or a long-lived access token."""
refresh_token = await hass.auth.async_create_refresh_token(
connection.user,
client_name=msg["client_name"],
client_icon=msg.get("client_icon"),
token_type=TOKEN_TYPE_LONG_LIVED_ACCESS_TOKEN,
access_token_expiration=timedelta(days=msg["lifespan"]),
)
access_token = hass.auth.async_create_access_token(refresh_token)
connection.send_message(websocket_api.result_message(msg["id"], access_token))
@websocket_api.ws_require_user()
@callback
def websocket_refresh_tokens(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Return metadata of users refresh tokens."""
current_id = connection.refresh_token_id
connection.send_message(
websocket_api.result_message(
msg["id"],
[
{
"id": refresh.id,
"client_id": refresh.client_id,
"client_name": refresh.client_name,
"client_icon": refresh.client_icon,
"type": refresh.token_type,
"created_at": refresh.created_at,
"is_current": refresh.id == current_id,
"last_used_at": refresh.last_used_at,
"last_used_ip": refresh.last_used_ip,
}
for refresh in connection.user.refresh_tokens.values()
],
)
)
@websocket_api.ws_require_user()
@websocket_api.async_response
async def websocket_delete_refresh_token(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Handle a delete refresh token request."""
refresh_token = connection.user.refresh_tokens.get(msg["refresh_token_id"])
if refresh_token is None:
return websocket_api.error_message(
msg["id"], "invalid_token_id", "Received invalid token"
)
await hass.auth.async_remove_refresh_token(refresh_token)
connection.send_message(websocket_api.result_message(msg["id"], {}))
@websocket_api.ws_require_user()
@callback
def websocket_sign_path(
hass: HomeAssistant, connection: websocket_api.ActiveConnection, msg
):
"""Handle a sign path request."""
connection.send_message(
websocket_api.result_message(
msg["id"],
{
"path": async_sign_path(
hass,
connection.refresh_token_id,
msg["path"],
timedelta(seconds=msg["expires"]),
)
},
)
)
|
from collections import OrderedDict
from homeassistant.components import fan
from homeassistant.components.climate import const as climate
from homeassistant.const import TEMP_CELSIUS, TEMP_FAHRENHEIT
DOMAIN = "alexa"
EVENT_ALEXA_SMART_HOME = "alexa_smart_home"
# Flash briefing constants
CONF_UID = "uid"
CONF_TITLE = "title"
CONF_AUDIO = "audio"
CONF_TEXT = "text"
CONF_DISPLAY_URL = "display_url"
CONF_FILTER = "filter"
CONF_ENTITY_CONFIG = "entity_config"
CONF_ENDPOINT = "endpoint"
CONF_LOCALE = "locale"
CONF_PASSWORD = "password"
ATTR_UID = "uid"
ATTR_UPDATE_DATE = "updateDate"
ATTR_TITLE_TEXT = "titleText"
ATTR_STREAM_URL = "streamUrl"
ATTR_MAIN_TEXT = "mainText"
ATTR_REDIRECTION_URL = "redirectionURL"
SYN_RESOLUTION_MATCH = "ER_SUCCESS_MATCH"
DATE_FORMAT = "%Y-%m-%dT%H:%M:%S.0Z"
API_DIRECTIVE = "directive"
API_ENDPOINT = "endpoint"
API_EVENT = "event"
API_CONTEXT = "context"
API_HEADER = "header"
API_PAYLOAD = "payload"
API_SCOPE = "scope"
API_CHANGE = "change"
API_PASSWORD = "password"
CONF_DESCRIPTION = "description"
CONF_DISPLAY_CATEGORIES = "display_categories"
CONF_SUPPORTED_LOCALES = (
"de-DE",
"en-AU",
"en-CA",
"en-GB",
"en-IN",
"en-US",
"es-ES",
"es-MX",
"fr-CA",
"fr-FR",
"it-IT",
"ja-JP",
)
API_TEMP_UNITS = {TEMP_FAHRENHEIT: "FAHRENHEIT", TEMP_CELSIUS: "CELSIUS"}
# Needs to be ordered dict for `async_api_set_thermostat_mode` which does a
# reverse mapping of this dict and we want to map the first occurrence of OFF
# back to HA state.
API_THERMOSTAT_MODES = OrderedDict(
[
(climate.HVAC_MODE_HEAT, "HEAT"),
(climate.HVAC_MODE_COOL, "COOL"),
(climate.HVAC_MODE_HEAT_COOL, "AUTO"),
(climate.HVAC_MODE_AUTO, "AUTO"),
(climate.HVAC_MODE_OFF, "OFF"),
(climate.HVAC_MODE_FAN_ONLY, "OFF"),
(climate.HVAC_MODE_DRY, "CUSTOM"),
]
)
API_THERMOSTAT_MODES_CUSTOM = {climate.HVAC_MODE_DRY: "DEHUMIDIFY"}
API_THERMOSTAT_PRESETS = {climate.PRESET_ECO: "ECO"}
PERCENTAGE_FAN_MAP = {
fan.SPEED_OFF: 0,
fan.SPEED_LOW: 33,
fan.SPEED_MEDIUM: 66,
fan.SPEED_HIGH: 100,
}
class Cause:
"""Possible causes for property changes.
https://developer.amazon.com/docs/smarthome/state-reporting-for-a-smart-home-skill.html#cause-object
"""
# Indicates that the event was caused by a customer interaction with an
# application. For example, a customer switches on a light, or locks a door
# using the Alexa app or an app provided by a device vendor.
APP_INTERACTION = "APP_INTERACTION"
# Indicates that the event was caused by a physical interaction with an
# endpoint. For example manually switching on a light or manually locking a
# door lock
PHYSICAL_INTERACTION = "PHYSICAL_INTERACTION"
# Indicates that the event was caused by the periodic poll of an appliance,
# which found a change in value. For example, you might poll a temperature
# sensor every hour, and send the updated temperature to Alexa.
PERIODIC_POLL = "PERIODIC_POLL"
# Indicates that the event was caused by the application of a device rule.
# For example, a customer configures a rule to switch on a light if a
# motion sensor detects motion. In this case, Alexa receives an event from
# the motion sensor, and another event from the light to indicate that its
# state change was caused by the rule.
RULE_TRIGGER = "RULE_TRIGGER"
# Indicates that the event was caused by a voice interaction with Alexa.
# For example a user speaking to their Echo device.
VOICE_INTERACTION = "VOICE_INTERACTION"
class Inputs:
"""Valid names for the InputController.
https://developer.amazon.com/docs/device-apis/alexa-property-schemas.html#input
"""
VALID_SOURCE_NAME_MAP = {
"antenna": "TUNER",
"antennatv": "TUNER",
"aux": "AUX 1",
"aux1": "AUX 1",
"aux2": "AUX 2",
"aux3": "AUX 3",
"aux4": "AUX 4",
"aux5": "AUX 5",
"aux6": "AUX 6",
"aux7": "AUX 7",
"bluray": "BLURAY",
"blurayplayer": "BLURAY",
"cable": "CABLE",
"cd": "CD",
"coax": "COAX 1",
"coax1": "COAX 1",
"coax2": "COAX 2",
"composite": "COMPOSITE 1",
"composite1": "COMPOSITE 1",
"dvd": "DVD",
"game": "GAME",
"gameconsole": "GAME",
"hdradio": "HD RADIO",
"hdmi": "HDMI 1",
"hdmi1": "HDMI 1",
"hdmi2": "HDMI 2",
"hdmi3": "HDMI 3",
"hdmi4": "HDMI 4",
"hdmi5": "HDMI 5",
"hdmi6": "HDMI 6",
"hdmi7": "HDMI 7",
"hdmi8": "HDMI 8",
"hdmi9": "HDMI 9",
"hdmi10": "HDMI 10",
"hdmiarc": "HDMI ARC",
"input": "INPUT 1",
"input1": "INPUT 1",
"input2": "INPUT 2",
"input3": "INPUT 3",
"input4": "INPUT 4",
"input5": "INPUT 5",
"input6": "INPUT 6",
"input7": "INPUT 7",
"input8": "INPUT 8",
"input9": "INPUT 9",
"input10": "INPUT 10",
"ipod": "IPOD",
"line": "LINE 1",
"line1": "LINE 1",
"line2": "LINE 2",
"line3": "LINE 3",
"line4": "LINE 4",
"line5": "LINE 5",
"line6": "LINE 6",
"line7": "LINE 7",
"mediaplayer": "MEDIA PLAYER",
"optical": "OPTICAL 1",
"optical1": "OPTICAL 1",
"optical2": "OPTICAL 2",
"phono": "PHONO",
"playstation": "PLAYSTATION",
"playstation3": "PLAYSTATION 3",
"playstation4": "PLAYSTATION 4",
"rokumediaplayer": "MEDIA PLAYER",
"satellite": "SATELLITE",
"satellitetv": "SATELLITE",
"smartcast": "SMARTCAST",
"tuner": "TUNER",
"tv": "TV",
"usbdac": "USB DAC",
"video": "VIDEO 1",
"video1": "VIDEO 1",
"video2": "VIDEO 2",
"video3": "VIDEO 3",
"xbox": "XBOX",
}
VALID_SOUND_MODE_MAP = {
"movie": "MOVIE",
"music": "MUSIC",
"night": "NIGHT",
"sport": "SPORT",
"tv": "TV",
}
|
import pytest
from datetime import datetime, timezone, timedelta
from twtxt.models import Tweet, Source
def test_source():
source = Source("foo", "bar")
assert source.nick == "foo"
assert source.url == "bar"
with pytest.raises(TypeError):
Source()
def test_tweet_init():
with pytest.raises(ValueError) as e:
Tweet("")
assert "empty text" in str(e.value)
with pytest.raises(TypeError) as e:
Tweet("foobar", 0)
assert "created_at is of invalid type" in str(e.value)
source = Source("foo", "bar")
created_at = datetime.now(timezone.utc)
tweet = Tweet("foobar", created_at, source)
assert tweet.text == "foobar"
assert tweet.created_at == created_at.replace(microsecond=0)
assert tweet.source == source
def test_tweet_str():
tweet = Tweet("foobar", datetime(2000, 1, 1, 1, 1, 1, 1, tzinfo=timezone.utc))
assert str(tweet) == "2000-01-01T01:01:01+00:00\tfoobar"
def test_tweet_relative_datetime():
tweet = Tweet("foobar")
assert tweet.relative_datetime == "a moment ago"
tweet = Tweet("foobar", datetime.now(timezone.utc) + timedelta(hours=1, minutes=1))
assert tweet.relative_datetime == "an hour from now"
tweet = Tweet("foobar", datetime.now(timezone.utc) - timedelta(hours=1, minutes=1))
assert tweet.relative_datetime == "an hour ago"
def test_tweet_absolute_datetime():
tweet = Tweet("foobar", datetime(2000, 1, 1, 1, 1, 1, 1, tzinfo=timezone.utc))
assert tweet.absolute_datetime == "Sat, 01 Jan 2000 01:01:01"
def test_tweet_ordering():
now = datetime.now(timezone.utc)
tweet_1 = Tweet("A", now)
tweet_2 = Tweet("B", now + timedelta(hours=1))
tweet_3 = Tweet("C", now + timedelta(hours=2))
tweet_4 = Tweet("D", now + timedelta(hours=2))
tweet_5 = Tweet("D", now + timedelta(hours=2))
source = Source("foo", "bar")
# explicit testing
with pytest.raises(TypeError):
tweet_1 < source
with pytest.raises(TypeError):
tweet_1 <= source
with pytest.raises(TypeError):
tweet_1 > source
with pytest.raises(TypeError):
tweet_1 >= source
assert tweet_1 != source
assert tweet_1 < tweet_2
assert tweet_1 <= tweet_2
assert tweet_2 > tweet_1
assert tweet_2 >= tweet_1
assert tweet_3 != tweet_4
assert tweet_5 == tweet_4
assert tweet_5 >= tweet_4
assert tweet_5 <= tweet_4
assert not(tweet_3 <= tweet_4)
assert not(tweet_3 >= tweet_4)
|
AUTOCAPITALIZE_NONE = 0
def measure_string(*args, **kwargs):
return 12.0
def in_background(func):
return func
def get_screen_size():
return 100, 100
class View(object):
def __init__(self, *args, **kwargs):
self.on_screen = True
self.width = 100
self.height = 100
self.content_size = (100, 100)
self.content_offset = (0, 0)
self.superview = None
self.subviews = []
self.delegate = None
def add_subview(self, v):
self.subviews.append(v)
v.superview = self
def remove_subview(self, v):
self.subviews.remove(v)
def present(self, style='popover'):
pass
def wait_modal(self):
pass
def size_to_fit(self):
pass
def send_to_back(self):
pass
def bring_to_front(self):
pass
class TextField(View):
def __init__(self, *args, **kwargs):
super(TextField, self).__init__(*args, **kwargs)
self.text = ''
class TextView(View):
def __init__(self, *args, **kwargs):
super(TextView, self).__init__(*args, **kwargs)
self.text = ''
self.selected_range = (0, 0)
def replace_range(self, rng, s):
self.text = self.text[:rng[0]] + s + self.text[rng[1]:]
tot_len = len(self.text)
self.selected_range = (tot_len, tot_len)
def begin_editing(self):
pass
def end_editing(self):
pass
class ScrollView(View):
pass
class Button(View):
def __init__(self, *args, **kwargs):
super(Button, self).__init__(*args, **kwargs)
class TableView(View):
def __init__(self, *args, **kwargs):
super(TableView, self).__init__(*args, **kwargs)
class ListDataSource(object):
def __init__(self, lst):
pass
|
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.