text
stringlengths 213
32.3k
|
---|
import json
import os
import re
from shlex import quote
from paasta_tools.cli.utils import lazy_choices_completer
from paasta_tools.cli.utils import list_clusters
from paasta_tools.cli.utils import list_instances
from paasta_tools.cli.utils import run_on_master
from paasta_tools.utils import list_services
from paasta_tools.utils import load_system_paasta_config
from paasta_tools.utils import PaastaColors
from paasta_tools.utils import PaastaNotConfiguredError
from paasta_tools.utils import SystemPaastaConfig
ARG_DEFAULTS = dict(
common=dict(
service=None,
instance=None,
cluster=None, # load from system paasta config later
verbose=False,
),
start=dict(
cmd=None,
detach=False,
staging_timeout=240.0,
instances=1,
docker_image=None,
dry_run=False,
constraint=[],
notification_email=None,
retries=0,
),
stop=dict(run_id=None, framework_id=None),
list=dict(),
)
def get_system_paasta_config():
try:
return load_system_paasta_config()
except PaastaNotConfiguredError:
print(
PaastaColors.yellow(
"Warning: Couldn't load config files from '/etc/paasta'. This "
"indicates PaaSTA is not configured locally on this host, and "
"remote-run may not behave the same way it would behave on a "
"server configured for PaaSTA."
),
sep="\n",
)
return SystemPaastaConfig({"volumes": []}, "/etc/paasta")
def add_common_args_to_parser(parser):
parser.add_argument(
"-s",
"--service",
help="The name of the service you wish to inspect. Required.",
required=True,
).completer = lazy_choices_completer(list_services)
parser.add_argument(
"-i",
"--instance",
help=(
"Simulate a docker run for a particular instance of the "
"service, like 'main' or 'canary'. Required."
),
required=True,
).completer = lazy_choices_completer(list_instances)
parser.add_argument(
"-c",
"--cluster",
help=(
"The name of the cluster you wish to run your task on. "
"If omitted, uses the default cluster defined in the paasta "
f"remote-run configs."
),
default=ARG_DEFAULTS["common"]["cluster"],
).completer = lazy_choices_completer(list_clusters)
parser.add_argument(
"-v",
"--verbose",
help="Show more output",
action="store_true",
default=ARG_DEFAULTS["common"]["verbose"],
)
def add_start_parser(subparser):
parser = subparser.add_parser("start", help="Start task subcommand")
add_common_args_to_parser(parser)
parser.add_argument(
"-C",
"--cmd",
help=(
"Run Docker container with particular command, for example: "
'"bash". By default will use the command or args specified by the '
"soa-configs or what was specified in the Dockerfile"
),
default=ARG_DEFAULTS["start"]["cmd"],
),
parser.add_argument(
"-D",
"--detach",
help="Launch in background",
action="store_true",
default=ARG_DEFAULTS["start"]["detach"],
)
default_staging_timeout = ARG_DEFAULTS["start"]["staging_timeout"]
parser.add_argument(
"-t",
"--staging-timeout",
help=(
"A timeout in seconds for the task to be launching before killed. "
f"Default: {default_staging_timeout}s"
),
default=ARG_DEFAULTS["start"]["staging_timeout"],
type=float,
)
parser.add_argument(
"-j",
"--instances",
help="Number of copies of the task to launch",
default=ARG_DEFAULTS["start"]["instances"],
type=int,
)
parser.add_argument(
"--docker-image",
help=(
"URL of docker image to use. "
"Defaults to using the deployed docker image."
),
default=ARG_DEFAULTS["start"]["docker_image"],
)
parser.add_argument(
"-R",
"--run-id",
help="ID of task to stop",
default=ARG_DEFAULTS["stop"]["run_id"],
)
parser.add_argument(
"-d",
"--dry-run",
help=(
"Don't launch the task. "
"Instead output task that would have been launched"
),
action="store_true",
default=ARG_DEFAULTS["start"]["dry_run"],
)
parser.add_argument(
"-X",
"--constraint",
help="Constraint option, format: <attr>,OP[,<value>], OP can be one "
"of the following: EQUALS matches attribute value exactly, LIKE and "
"UNLIKE match on regular expression, MAX_PER constrains number of "
"tasks per attribute value, UNIQUE is the same as MAX_PER,1",
action="append",
default=ARG_DEFAULTS["start"]["constraint"],
)
default_email = os.environ.get("EMAIL", None)
parser.add_argument(
"-E",
"--notification-email",
help=(
"Email address to send remote-run notifications to. "
"A notification will be sent when a task either succeeds or fails. "
"Defaults to env variable $EMAIL: "
)
+ (default_email if default_email else "(currently not set)"),
default=default_email,
)
default_retries = ARG_DEFAULTS["start"]["retries"]
parser.add_argument(
"-r",
"--retries",
help=(
"Number of times to retry if task fails at launch or at runtime. "
f"Default: {default_retries}"
),
type=int,
default=default_retries,
)
return parser
def add_stop_parser(subparser):
parser = subparser.add_parser("stop", help="Stop task subcommand")
add_common_args_to_parser(parser)
group = parser.add_mutually_exclusive_group(required=True)
group.add_argument(
"-R",
"--run-id",
help="ID of task to stop",
default=ARG_DEFAULTS["stop"]["run_id"],
)
group.add_argument(
"-F",
"--framework-id",
help=(
"ID of framework to stop. Must belong to remote-run of selected "
"service instance."
),
type=str,
default=ARG_DEFAULTS["stop"]["framework_id"],
)
return parser
def add_list_parser(subparser):
parser = subparser.add_parser("list", help="List tasks subcommand")
add_common_args_to_parser(parser)
return parser
def add_subparser(subparsers):
main_parser = subparsers.add_parser(
"remote-run",
help="Schedule Mesos to run adhoc command in context of a service",
description=(
"`paasta remote-run` is useful for running adhoc commands in "
"context of a service's Docker image. The command will be "
"scheduled on a Mesos cluster and stdout/stderr printed after "
"execution is finished."
),
epilog=(
"Note: `paasta remote-run` uses Mesos API that may require "
"authentication."
),
)
main_subs = main_parser.add_subparsers(
dest="action", help="Subcommands of remote-run"
)
add_start_parser(main_subs)
add_stop_parser(main_subs)
add_list_parser(main_subs)
main_parser.set_defaults(command=paasta_remote_run)
def split_constraints(constraints):
return [c.split(",", 2) for c in constraints]
def create_remote_run_command(args):
cmd_parts = ["/usr/bin/paasta_remote_run", args.action]
arg_vars = vars(args)
arg_defaults = dict(ARG_DEFAULTS[args.action]) # copy dict
arg_defaults.update(ARG_DEFAULTS["common"])
arg_defaults.pop("constraint") # needs conversion to json
for k in arg_vars.keys():
if k not in arg_defaults: # skip keys we don't know about
continue
v = arg_vars[k]
if v == arg_defaults[k]: # skip values that have default value
continue
k = re.sub(r"_", "-", k)
if isinstance(v, bool) and v:
cmd_parts.append(f"--{k}")
else:
cmd_parts.extend([f"--{k}", quote(str(v))])
# constraint, convert to json
if len(arg_vars["constraint"]) > 0:
constraints = split_constraints(arg_vars["constraint"])
cmd_parts.extend(["--constraints-json", quote(json.dumps(constraints))])
return cmd_parts
def paasta_remote_run(args):
system_paasta_config = get_system_paasta_config()
if not args.cluster:
default_cluster = system_paasta_config.get_remote_run_config().get(
"default_cluster"
)
if not default_cluster:
print(
PaastaColors.red(
"Error: no cluster specified and no default cluster available"
)
)
return 1
args.cluster = default_cluster
cmd_parts = create_remote_run_command(args)
graceful_exit = args.action == "start" and not args.detach
return_code, status = run_on_master(
cluster=args.cluster,
system_paasta_config=system_paasta_config,
cmd_parts=cmd_parts,
graceful_exit=graceful_exit,
)
# Status results are streamed. This print is for possible error messages.
if status is not None:
for line in status.rstrip().split("\n"):
print(" %s" % line)
return return_code
|
from datetime import timedelta
import pytest
import zigpy.profiles.zha as zha
import zigpy.types
import zigpy.zcl.clusters.general as general
import zigpy.zcl.clusters.lighting as lighting
import zigpy.zcl.foundation as zcl_f
from homeassistant.components.light import DOMAIN, FLASH_LONG, FLASH_SHORT
from homeassistant.components.zha.core.group import GroupMember
from homeassistant.components.zha.light import FLASH_EFFECTS
from homeassistant.const import STATE_OFF, STATE_ON, STATE_UNAVAILABLE
import homeassistant.util.dt as dt_util
from .common import (
async_enable_traffic,
async_find_group_entity_id,
async_test_rejoin,
find_entity_id,
get_zha_gateway,
send_attributes_report,
)
from tests.async_mock import AsyncMock, MagicMock, call, patch, sentinel
from tests.common import async_fire_time_changed
ON = 1
OFF = 0
IEEE_GROUPABLE_DEVICE = "01:2d:6f:00:0a:90:69:e8"
IEEE_GROUPABLE_DEVICE2 = "02:2d:6f:00:0a:90:69:e9"
IEEE_GROUPABLE_DEVICE3 = "03:2d:6f:00:0a:90:69:e7"
LIGHT_ON_OFF = {
1: {
"device_type": zha.DeviceType.ON_OFF_LIGHT,
"in_clusters": [
general.Basic.cluster_id,
general.Identify.cluster_id,
general.OnOff.cluster_id,
],
"out_clusters": [general.Ota.cluster_id],
}
}
LIGHT_LEVEL = {
1: {
"device_type": zha.DeviceType.DIMMABLE_LIGHT,
"in_clusters": [
general.Basic.cluster_id,
general.LevelControl.cluster_id,
general.OnOff.cluster_id,
],
"out_clusters": [general.Ota.cluster_id],
}
}
LIGHT_COLOR = {
1: {
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
"in_clusters": [
general.Basic.cluster_id,
general.Identify.cluster_id,
general.LevelControl.cluster_id,
general.OnOff.cluster_id,
lighting.Color.cluster_id,
],
"out_clusters": [general.Ota.cluster_id],
}
}
@pytest.fixture
async def coordinator(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [general.Groups.cluster_id],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee="00:15:8d:00:02:32:4f:32",
nwk=0x0000,
node_descriptor=b"\xf8\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff",
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_light_1(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.LevelControl.cluster_id,
lighting.Color.cluster_id,
general.Groups.cluster_id,
general.Identify.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE,
nwk=0xB79D,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_light_2(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.LevelControl.cluster_id,
lighting.Color.cluster_id,
general.Groups.cluster_id,
general.Identify.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE2,
nwk=0xC79E,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@pytest.fixture
async def device_light_3(hass, zigpy_device_mock, zha_device_joined):
"""Test zha light platform."""
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [
general.OnOff.cluster_id,
general.LevelControl.cluster_id,
lighting.Color.cluster_id,
general.Groups.cluster_id,
general.Identify.cluster_id,
],
"out_clusters": [],
"device_type": zha.DeviceType.COLOR_DIMMABLE_LIGHT,
}
},
ieee=IEEE_GROUPABLE_DEVICE3,
nwk=0xB89F,
)
zha_device = await zha_device_joined(zigpy_device)
zha_device.available = True
return zha_device
@patch("zigpy.zcl.clusters.general.OnOff.read_attributes", new=MagicMock())
async def test_light_refresh(hass, zigpy_device_mock, zha_device_joined_restored):
"""Test zha light platform refresh."""
# create zigpy devices
zigpy_device = zigpy_device_mock(LIGHT_ON_OFF)
zha_device = await zha_device_joined_restored(zigpy_device)
on_off_cluster = zigpy_device.endpoints[1].on_off
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
on_off_cluster.read_attributes.reset_mock()
# not enough time passed
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=20))
await hass.async_block_till_done()
assert on_off_cluster.read_attributes.call_count == 0
assert on_off_cluster.read_attributes.await_count == 0
assert hass.states.get(entity_id).state == STATE_OFF
# 1 interval - 1 call
on_off_cluster.read_attributes.return_value = [{"on_off": 1}, {}]
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=80))
await hass.async_block_till_done()
assert on_off_cluster.read_attributes.call_count == 1
assert on_off_cluster.read_attributes.await_count == 1
assert hass.states.get(entity_id).state == STATE_ON
# 2 intervals - 2 calls
on_off_cluster.read_attributes.return_value = [{"on_off": 0}, {}]
async_fire_time_changed(hass, dt_util.utcnow() + timedelta(minutes=80))
await hass.async_block_till_done()
assert on_off_cluster.read_attributes.call_count == 2
assert on_off_cluster.read_attributes.await_count == 2
assert hass.states.get(entity_id).state == STATE_OFF
@patch(
"zigpy.zcl.clusters.lighting.Color.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.Identify.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.LevelControl.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.OnOff.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@pytest.mark.parametrize(
"device, reporting",
[(LIGHT_ON_OFF, (1, 0, 0)), (LIGHT_LEVEL, (1, 1, 0)), (LIGHT_COLOR, (1, 1, 3))],
)
async def test_light(
hass, zigpy_device_mock, zha_device_joined_restored, device, reporting
):
"""Test zha light platform."""
# create zigpy devices
zigpy_device = zigpy_device_mock(device)
zha_device = await zha_device_joined_restored(zigpy_device)
entity_id = await find_entity_id(DOMAIN, zha_device, hass)
assert entity_id is not None
cluster_on_off = zigpy_device.endpoints[1].on_off
cluster_level = getattr(zigpy_device.endpoints[1], "level", None)
cluster_color = getattr(zigpy_device.endpoints[1], "light_color", None)
cluster_identify = getattr(zigpy_device.endpoints[1], "identify", None)
assert hass.states.get(entity_id).state == STATE_OFF
await async_enable_traffic(hass, [zha_device], enabled=False)
# test that the lights were created and that they are unavailable
assert hass.states.get(entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [zha_device])
# test that the lights were created and are off
assert hass.states.get(entity_id).state == STATE_OFF
# test turning the lights on and off from the light
await async_test_on_off_from_light(hass, cluster_on_off, entity_id)
# test turning the lights on and off from the HA
await async_test_on_off_from_hass(hass, cluster_on_off, entity_id)
# test short flashing the lights from the HA
if cluster_identify:
await async_test_flash_from_hass(hass, cluster_identify, entity_id, FLASH_SHORT)
# test turning the lights on and off from the HA
if cluster_level:
await async_test_level_on_off_from_hass(
hass, cluster_on_off, cluster_level, entity_id
)
# test getting a brightness change from the network
await async_test_on_from_light(hass, cluster_on_off, entity_id)
await async_test_dimmer_from_light(
hass, cluster_level, entity_id, 150, STATE_ON
)
# test rejoin
await async_test_off_from_hass(hass, cluster_on_off, entity_id)
clusters = [cluster_on_off]
if cluster_level:
clusters.append(cluster_level)
if cluster_color:
clusters.append(cluster_color)
await async_test_rejoin(hass, zigpy_device, clusters, reporting)
# test long flashing the lights from the HA
if cluster_identify:
await async_test_flash_from_hass(hass, cluster_identify, entity_id, FLASH_LONG)
async def async_test_on_off_from_light(hass, cluster, entity_id):
"""Test on off functionality from the light."""
# turn on at light
await send_attributes_report(hass, cluster, {1: 0, 0: 1, 2: 3})
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
# turn off at light
await send_attributes_report(hass, cluster, {1: 1, 0: 0, 2: 3})
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_OFF
async def async_test_on_from_light(hass, cluster, entity_id):
"""Test on off functionality from the light."""
# turn on at light
await send_attributes_report(hass, cluster, {1: -1, 0: 1, 2: 2})
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == STATE_ON
async def async_test_on_off_from_hass(hass, cluster, entity_id):
"""Test on off functionality from hass."""
# turn on via UI
cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.await_count == 1
assert cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
await async_test_off_from_hass(hass, cluster, entity_id)
async def async_test_off_from_hass(hass, cluster, entity_id):
"""Test turning off the light from Home Assistant."""
# turn off via UI
cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_off", {"entity_id": entity_id}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.await_count == 1
assert cluster.request.call_args == call(
False, OFF, (), expect_reply=True, manufacturer=None, tsn=None
)
async def async_test_level_on_off_from_hass(
hass, on_off_cluster, level_cluster, entity_id
):
"""Test on off functionality from hass."""
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
# turn on via UI
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id}, blocking=True
)
assert on_off_cluster.request.call_count == 1
assert on_off_cluster.request.await_count == 1
assert level_cluster.request.call_count == 0
assert level_cluster.request.await_count == 0
assert on_off_cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id, "transition": 10}, blocking=True
)
assert on_off_cluster.request.call_count == 1
assert on_off_cluster.request.await_count == 1
assert level_cluster.request.call_count == 1
assert level_cluster.request.await_count == 1
assert on_off_cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
assert level_cluster.request.call_args == call(
False,
4,
(zigpy.types.uint8_t, zigpy.types.uint16_t),
254,
100.0,
expect_reply=True,
manufacturer=None,
tsn=None,
)
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id, "brightness": 10}, blocking=True
)
assert on_off_cluster.request.call_count == 1
assert on_off_cluster.request.await_count == 1
assert level_cluster.request.call_count == 1
assert level_cluster.request.await_count == 1
assert on_off_cluster.request.call_args == call(
False, ON, (), expect_reply=True, manufacturer=None, tsn=None
)
assert level_cluster.request.call_args == call(
False,
4,
(zigpy.types.uint8_t, zigpy.types.uint16_t),
10,
1,
expect_reply=True,
manufacturer=None,
tsn=None,
)
on_off_cluster.request.reset_mock()
level_cluster.request.reset_mock()
await async_test_off_from_hass(hass, on_off_cluster, entity_id)
async def async_test_dimmer_from_light(hass, cluster, entity_id, level, expected_state):
"""Test dimmer functionality from the light."""
await send_attributes_report(
hass, cluster, {1: level + 10, 0: level, 2: level - 10 or 22}
)
await hass.async_block_till_done()
assert hass.states.get(entity_id).state == expected_state
# hass uses None for brightness of 0 in state attributes
if level == 0:
level = None
assert hass.states.get(entity_id).attributes.get("brightness") == level
async def async_test_flash_from_hass(hass, cluster, entity_id, flash):
"""Test flash functionality from hass."""
# turn on via UI
cluster.request.reset_mock()
await hass.services.async_call(
DOMAIN, "turn_on", {"entity_id": entity_id, "flash": flash}, blocking=True
)
assert cluster.request.call_count == 1
assert cluster.request.await_count == 1
assert cluster.request.call_args == call(
False,
64,
(zigpy.types.uint8_t, zigpy.types.uint8_t),
FLASH_EFFECTS[flash],
0,
expect_reply=True,
manufacturer=None,
tsn=None,
)
@patch(
"zigpy.zcl.clusters.lighting.Color.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.Identify.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.LevelControl.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
@patch(
"zigpy.zcl.clusters.general.OnOff.request",
new=AsyncMock(return_value=[sentinel.data, zcl_f.Status.SUCCESS]),
)
async def test_zha_group_light_entity(
hass, device_light_1, device_light_2, device_light_3, coordinator
):
"""Test the light entity for a ZHA group."""
zha_gateway = get_zha_gateway(hass)
assert zha_gateway is not None
zha_gateway.coordinator_zha_device = coordinator
coordinator._zha_gateway = zha_gateway
device_light_1._zha_gateway = zha_gateway
device_light_2._zha_gateway = zha_gateway
member_ieee_addresses = [device_light_1.ieee, device_light_2.ieee]
members = [GroupMember(device_light_1.ieee, 1), GroupMember(device_light_2.ieee, 1)]
assert coordinator.is_coordinator
# test creating a group with 2 members
zha_group = await zha_gateway.async_create_zigpy_group("Test Group", members)
await hass.async_block_till_done()
assert zha_group is not None
assert len(zha_group.members) == 2
for member in zha_group.members:
assert member.device.ieee in member_ieee_addresses
assert member.group == zha_group
assert member.endpoint is not None
device_1_entity_id = await find_entity_id(DOMAIN, device_light_1, hass)
device_2_entity_id = await find_entity_id(DOMAIN, device_light_2, hass)
device_3_entity_id = await find_entity_id(DOMAIN, device_light_3, hass)
assert (
device_1_entity_id != device_2_entity_id
and device_1_entity_id != device_3_entity_id
)
assert device_2_entity_id != device_3_entity_id
group_entity_id = async_find_group_entity_id(hass, DOMAIN, zha_group)
assert hass.states.get(group_entity_id) is not None
assert device_1_entity_id in zha_group.member_entity_ids
assert device_2_entity_id in zha_group.member_entity_ids
assert device_3_entity_id not in zha_group.member_entity_ids
group_cluster_on_off = zha_group.endpoint[general.OnOff.cluster_id]
group_cluster_level = zha_group.endpoint[general.LevelControl.cluster_id]
group_cluster_identify = zha_group.endpoint[general.Identify.cluster_id]
dev1_cluster_on_off = device_light_1.device.endpoints[1].on_off
dev2_cluster_on_off = device_light_2.device.endpoints[1].on_off
dev3_cluster_on_off = device_light_3.device.endpoints[1].on_off
dev1_cluster_level = device_light_1.device.endpoints[1].level
await async_enable_traffic(
hass, [device_light_1, device_light_2, device_light_3], enabled=False
)
await hass.async_block_till_done()
# test that the lights were created and that they are unavailable
assert hass.states.get(group_entity_id).state == STATE_UNAVAILABLE
# allow traffic to flow through the gateway and device
await async_enable_traffic(hass, [device_light_1, device_light_2, device_light_3])
await hass.async_block_till_done()
# test that the lights were created and are off
assert hass.states.get(group_entity_id).state == STATE_OFF
# test turning the lights on and off from the HA
await async_test_on_off_from_hass(hass, group_cluster_on_off, group_entity_id)
# test short flashing the lights from the HA
await async_test_flash_from_hass(
hass, group_cluster_identify, group_entity_id, FLASH_SHORT
)
# test turning the lights on and off from the light
await async_test_on_off_from_light(hass, dev1_cluster_on_off, group_entity_id)
# test turning the lights on and off from the HA
await async_test_level_on_off_from_hass(
hass, group_cluster_on_off, group_cluster_level, group_entity_id
)
# test getting a brightness change from the network
await async_test_on_from_light(hass, dev1_cluster_on_off, group_entity_id)
await async_test_dimmer_from_light(
hass, dev1_cluster_level, group_entity_id, 150, STATE_ON
)
# test long flashing the lights from the HA
await async_test_flash_from_hass(
hass, group_cluster_identify, group_entity_id, FLASH_LONG
)
assert len(zha_group.members) == 2
# test some of the group logic to make sure we key off states correctly
await send_attributes_report(hass, dev1_cluster_on_off, {0: 1})
await send_attributes_report(hass, dev2_cluster_on_off, {0: 1})
await hass.async_block_till_done()
# test that group light is on
assert hass.states.get(device_1_entity_id).state == STATE_ON
assert hass.states.get(device_2_entity_id).state == STATE_ON
assert hass.states.get(group_entity_id).state == STATE_ON
await send_attributes_report(hass, dev1_cluster_on_off, {0: 0})
await hass.async_block_till_done()
# test that group light is still on
assert hass.states.get(device_1_entity_id).state == STATE_OFF
assert hass.states.get(device_2_entity_id).state == STATE_ON
assert hass.states.get(group_entity_id).state == STATE_ON
await send_attributes_report(hass, dev2_cluster_on_off, {0: 0})
await hass.async_block_till_done()
# test that group light is now off
assert hass.states.get(device_1_entity_id).state == STATE_OFF
assert hass.states.get(device_2_entity_id).state == STATE_OFF
assert hass.states.get(group_entity_id).state == STATE_OFF
await send_attributes_report(hass, dev1_cluster_on_off, {0: 1})
await hass.async_block_till_done()
# test that group light is now back on
assert hass.states.get(device_1_entity_id).state == STATE_ON
assert hass.states.get(device_2_entity_id).state == STATE_OFF
assert hass.states.get(group_entity_id).state == STATE_ON
# turn it off to test a new member add being tracked
await send_attributes_report(hass, dev1_cluster_on_off, {0: 0})
await hass.async_block_till_done()
assert hass.states.get(device_1_entity_id).state == STATE_OFF
assert hass.states.get(device_2_entity_id).state == STATE_OFF
assert hass.states.get(group_entity_id).state == STATE_OFF
# add a new member and test that his state is also tracked
await zha_group.async_add_members([GroupMember(device_light_3.ieee, 1)])
await send_attributes_report(hass, dev3_cluster_on_off, {0: 1})
await hass.async_block_till_done()
assert device_3_entity_id in zha_group.member_entity_ids
assert len(zha_group.members) == 3
assert hass.states.get(device_1_entity_id).state == STATE_OFF
assert hass.states.get(device_2_entity_id).state == STATE_OFF
assert hass.states.get(device_3_entity_id).state == STATE_ON
assert hass.states.get(group_entity_id).state == STATE_ON
# make the group have only 1 member and now there should be no entity
await zha_group.async_remove_members(
[GroupMember(device_light_2.ieee, 1), GroupMember(device_light_3.ieee, 1)]
)
assert len(zha_group.members) == 1
assert hass.states.get(group_entity_id) is None
assert device_2_entity_id not in zha_group.member_entity_ids
assert device_3_entity_id not in zha_group.member_entity_ids
# make sure the entity registry entry is still there
assert zha_gateway.ha_entity_registry.async_get(group_entity_id) is not None
# add a member back and ensure that the group entity was created again
await zha_group.async_add_members([GroupMember(device_light_3.ieee, 1)])
await send_attributes_report(hass, dev3_cluster_on_off, {0: 1})
await hass.async_block_till_done()
assert len(zha_group.members) == 2
assert hass.states.get(group_entity_id).state == STATE_ON
# add a 3rd member and ensure we still have an entity and we track the new one
await send_attributes_report(hass, dev1_cluster_on_off, {0: 0})
await send_attributes_report(hass, dev3_cluster_on_off, {0: 0})
await hass.async_block_till_done()
assert hass.states.get(group_entity_id).state == STATE_OFF
# this will test that _reprobe_group is used correctly
await zha_group.async_add_members(
[GroupMember(device_light_2.ieee, 1), GroupMember(coordinator.ieee, 1)]
)
await send_attributes_report(hass, dev2_cluster_on_off, {0: 1})
await hass.async_block_till_done()
assert len(zha_group.members) == 4
assert hass.states.get(group_entity_id).state == STATE_ON
await zha_group.async_remove_members([GroupMember(coordinator.ieee, 1)])
await hass.async_block_till_done()
assert hass.states.get(group_entity_id).state == STATE_ON
assert len(zha_group.members) == 3
# remove the group and ensure that there is no entity and that the entity registry is cleaned up
assert zha_gateway.ha_entity_registry.async_get(group_entity_id) is not None
await zha_gateway.async_remove_zigpy_group(zha_group.group_id)
assert hass.states.get(group_entity_id) is None
assert zha_gateway.ha_entity_registry.async_get(group_entity_id) is None
|
from operator import xor
# ----------------------------------------------------------------------------
def diff3(yourtext, origtext, theirtext):
"""Three-way diff based on the GNU diff3.c by R. Smith.
@param [in] yourtext Array of lines of your text.
@param [in] origtext Array of lines of original text.
@param [in] theirtext Array of lines of their text.
@returns Array of tuples containing diff results. The tuples consist of
(cmd, loA, hiA, loB, hiB), where cmd is either one of
'0', '1', '2', or 'A'.
"""
# diff result => [(cmd, loA, hiA, loB, hiB), ...]
d2 = (diff(origtext, yourtext), diff(origtext, theirtext))
d3 = []
r3 = [None, 0, 0, 0, 0, 0, 0]
while d2[0] or d2[1]:
# find a continual range in origtext lo2..hi2
# changed by yourtext or by theirtext.
#
# d2[0] 222 222222222
# origtext ...L!!!!!!!!!!!!!!!!!!!!H...
# d2[1] 222222 22 2222222
r2 = ([], [])
if not d2[0]: i = 1
else:
if not d2[1]: i = 0
else:
if d2[0][0][1] <= d2[1][0][1]: i = 0
else: i = 1
j = i
k = xor(i, 1)
hi = d2[j][0][2]
r2[j].append(d2[j].pop(0))
while d2[k] and d2[k][0][1] <= hi + 1:
hi_k = d2[k][0][2]
r2[k].append(d2[k].pop(0))
if hi < hi_k:
hi = hi_k
j = k
k = xor(k, 1)
lo2 = r2[i][0][1]
hi2 = r2[j][-1][2]
# take the corresponding ranges in yourtext lo0..hi0
# and in theirtext lo1..hi1.
#
# yourtext ..L!!!!!!!!!!!!!!!!!!!!!!!!!!!!H...
# d2[0] 222 222222222
# origtext ...00!1111!000!!00!111111...
# d2[1] 222222 22 2222222
# theirtext ...L!!!!!!!!!!!!!!!!H...
if r2[0]:
lo0 = r2[0][0][3] - r2[0][0][1] + lo2
hi0 = r2[0][-1][4] - r2[0][-1][2] + hi2
else:
lo0 = r3[2] - r3[6] + lo2
hi0 = r3[2] - r3[6] + hi2
if r2[1]:
lo1 = r2[1][0][3] - r2[1][0][1] + lo2
hi1 = r2[1][-1][4] - r2[1][-1][2] + hi2
else:
lo1 = r3[4] - r3[6] + lo2
hi1 = r3[4] - r3[6] + hi2
# detect type of changes
if not r2[0]:
cmd = '1'
elif not r2[1]:
cmd = '0'
elif hi0 - lo0 != hi1 - lo1:
cmd = 'A'
else:
cmd = '2'
for d in range(0, hi0 - lo0 + 1):
(i0, i1) = (lo0 + d - 1, lo1 + d - 1)
ok0 = (0 <= i0 and i0 < len(yourtext))
ok1 = (0 <= i1 and i1 < len(theirtext))
if xor(ok0, ok1) or (ok0 and yourtext[i0] != theirtext[i1]):
cmd = 'A'
break
d3.append((cmd, lo0, hi0, lo1, hi1, lo2, hi2))
return d3
# ----------------------------------------------------------------------------
def merge(yourtext, origtext, theirtext):
res = {'conflict': 0, 'body': []}
d3 = diff3(yourtext, origtext, theirtext)
text3 = (yourtext, theirtext, origtext)
i2 = 1
for r3 in d3:
for lineno in range(i2, r3[5]):
res['body'].append(text3[2][lineno - 1])
if r3[0] == '0':
for lineno in range(r3[1], r3[2] + 1):
res['body'].append(text3[0][lineno - 1])
elif r3[0] != 'A':
for lineno in range(r3[3], r3[4] + 1):
res['body'].append(text3[1][lineno - 1])
else:
res = _conflict_range(text3, r3, res)
i2 = r3[6] + 1
for lineno in range(i2, len(text3[2]) + 1):
res['body'].append(text3[2][lineno - 1])
return res
# ----------------------------------------------------------------------------
def _conflict_range(text3, r3, res):
text_a = [] # their text
for i in range(r3[3], r3[4] + 1):
text_a.append(text3[1][i - 1])
text_b = [] # your text
for i in range(r3[1], r3[2] + 1):
text_b.append(text3[0][i - 1])
d = diff(text_a, text_b)
if _assoc_range(d, 'c') and r3[5] <= r3[6]:
res['conflict'] += 1
res['body'].append('<<<<<<<\n')
for lineno in range(r3[1], r3[2] + 1):
res['body'].append(text3[0][lineno - 1])
res['body'].append('|||||||\n')
for lineno in range(r3[5], r3[6] + 1):
res['body'].append(text3[2][lineno - 1])
res['body'].append('=======\n')
for lineno in range(r3[3], r3[4] + 1):
res['body'].append(text3[1][lineno - 1])
res['body'].append('>>>>>>>\n')
return res
ia = 1
for r2 in d:
for lineno in range(ia, r2[1]):
res['body'].append(text_a[lineno - 1])
if r2[0] == 'c':
res['conflict'] += 1
res['body'].append('<<<<<<<\n')
for lineno in range(r2[3], r2[4] + 1):
res['body'].append(text_b[lineno - 1])
res['body'].append('=======\n')
for lineno in range(r2[1], r2[2] + 1):
res['body'].append(text_a[lineno - 1])
res['body'].append('>>>>>>>\n')
elif r2[0] == 'a':
for lineno in range(r2[3], r2[4] + 1):
res['body'].append(text_b[lineno - 1])
ia = r2[2] + 1
for lineno in range(ia, len(text_a)):
res['body'].append(text_a[lineno - 1])
return res
# ----------------------------------------------------------------------------
def _assoc_range(diff, diff_type):
for d in diff:
if d[0] == diff_type: return d
return None
# ----------------------------------------------------------------------------
def _diff_heckel(text_a, text_b):
"""Two-way diff based on the algorithm by P. Heckel.
@param [in] text_a Array of lines of first text.
@param [in] text_b Array of lines of second text.
@returns TODO
"""
d = []
uniq = [(len(text_a), len(text_b))]
(freq, ap, bp) = ({}, {}, {})
for i in range(len(text_a)):
s = text_a[i]
freq[s] = freq.get(s,
0) + 2
ap[s] = i
for i in range(len(text_b)):
s = text_b[i]
freq[s] = freq.get(s,
0) + 3
bp[s] = i
for s, x in freq.items():
if x == 5: uniq.append((ap[s], bp[s]))
(freq, ap, bp) = ({}, {}, {})
uniq.sort(key=lambda x: x[0])
(a1, b1) = (0, 0)
while a1 < len(text_a) and b1 < len(text_b):
if text_a[a1] != text_b[b1]: break
a1 += 1
b1 += 1
for a_uniq, b_uniq in uniq:
if a_uniq < a1 or b_uniq < b1: continue
(a0, b0) = (a1, b1)
(a1, b1) = (a_uniq - 1, b_uniq - 1)
while a0 <= a1 and b0 <= b1:
if text_a[a1] != text_b[b1]: break
a1 -= 1
b1 -= 1
if a0 <= a1 and b0 <= b1:
d.append(('c', a0 + 1, a1 + 1, b0 + 1, b1 + 1))
elif a0 <= a1:
d.append(('d', a0 + 1, a1 + 1, b0 + 1, b0))
elif b0 <= b1:
d.append(('a', a0 + 1, a0, b0 + 1, b1 + 1))
(a1, b1) = (a_uniq + 1, b_uniq + 1)
while a1 < len(text_a) and b1 < len(text_b):
if text_a[a1] != text_b[b1]: break
a1 += 1
b1 += 1
return d
# ----------------------------------------------------------------------------
diff = _diff_heckel # default two-way diff function used by diff3()
|
from collections import Counter
import numpy as np
from scattertext.Common import DEFAULT_PMI_THRESHOLD_COEFFICIENT
def filter_bigrams_by_pmis(word_freq_df,
threshold_coef=DEFAULT_PMI_THRESHOLD_COEFFICIENT):
# type: (pd.DataFrame, int) -> pd.DataFrame
if len(word_freq_df.index) == 0:
return word_freq_df
low_pmi_bigrams = get_low_pmi_bigrams(threshold_coef, word_freq_df)
return word_freq_df.drop(low_pmi_bigrams.index)
def filter_out_unigrams_that_only_occur_in_one_bigram(df):
# type: (pd.DataFrame) -> pd.DataFrame
bigrams = {bigram for bigram in df.index if ' ' in bigram}
unigrams_to_remove = unigrams_that_only_occur_in_one_bigram(bigrams)
return df.drop(unigrams_to_remove)
def unigrams_that_only_occur_in_one_bigram(bigrams):
# type: (set) -> set
tok_bigram_counts = Counter()
for bigram in bigrams:
for tok in bigram.split():
tok_bigram_counts[tok] += 1
return {tok for tok, count in tok_bigram_counts.items() if count == 1}
def get_low_pmi_bigrams(threshold_coef, word_freq_df):
# type: (float, pd.DataFrame) -> object
is_bigram = np.array([' ' in word for word in word_freq_df.index])
unigram_freq = word_freq_df[~is_bigram].sum(axis=1)
bigram_freq = word_freq_df[is_bigram].sum(axis=1)
bigram_prob = bigram_freq / bigram_freq.sum()
unigram_prob = unigram_freq / unigram_freq.sum()
def get_pmi(bigram):
try:
return np.log(
bigram_prob[bigram] / np.product([unigram_prob[word] for word in bigram.split(' ')])
) / np.log(2)
except:
return 0
low_pmi_bigrams = bigram_prob[bigram_prob.index.map(get_pmi) < threshold_coef * 2]
return low_pmi_bigrams
class AtLeastOneCategoryHasNoTermsException(Exception):
pass
class TermDocMatrixFilter(object):
'''
Filter out terms below a particular frequency or pmi threshold.
'''
def __init__(self,
pmi_threshold_coef=DEFAULT_PMI_THRESHOLD_COEFFICIENT,
minimum_term_freq=3):
'''
Parameters
----------
pmi_threshold_coef : float
Bigram filtering threshold (2 * PMI). Default 2.
minimum_term_freq : int
Minimum number of times term has to appear. Default 3.
'''
self._threshold_coef = pmi_threshold_coef
self._min_freq = minimum_term_freq
def filter(self, term_doc_matrix):
'''
Parameters
----------
term_doc_matrix : TermDocMatrix
Returns
-------
TermDocMatrix pmi-filterd term doc matrix
'''
df = term_doc_matrix.get_term_freq_df()
if len(df) == 0:
return term_doc_matrix
low_pmi_bigrams = get_low_pmi_bigrams(self._threshold_coef, df).index
infrequent_terms = df[df.sum(axis=1) < self._min_freq].index
filtered_term_doc_mat = term_doc_matrix.remove_terms(set(low_pmi_bigrams | infrequent_terms))
try:
filtered_term_doc_mat.get_term_freq_df()
except ValueError:
raise AtLeastOneCategoryHasNoTermsException()
return filtered_term_doc_mat
|
from abc import ABCMeta, abstractmethod
class Broker(object):
"""
This abstract class provides an interface to a
generic broker entity. Both simulated and live brokers
will be derived from this ABC. This ensures that trading
algorithm specific logic is completely identical for both
simulated and live environments.
The Broker has an associated master denominated currency
through which all subscriptions and withdrawals will occur.
The Broker entity can support multiple sub-portfolios, each
with their own separate handling of PnL. The individual PnLs
from each sub-portfolio can be aggregated to generate an
account-wide PnL.
The Broker can execute orders. It contains a queue of
open orders, needed for handling closed market situations.
The Broker also supports individual history events for each
sub-portfolio, which can be aggregated, along with the
account history, to produce a full trading history for the
account.
"""
__metaclass__ = ABCMeta
@abstractmethod
def subscribe_funds_to_account(self, amount):
raise NotImplementedError(
"Should implement subscribe_funds_to_account()"
)
@abstractmethod
def withdraw_funds_from_account(self, amount):
raise NotImplementedError(
"Should implement withdraw_funds_from_account()"
)
@abstractmethod
def get_account_cash_balance(self, currency=None):
raise NotImplementedError(
"Should implement get_account_cash_balance()"
)
@abstractmethod
def get_account_total_non_cash_equity(self):
raise NotImplementedError(
"Should implement get_account_total_non_cash_equity()"
)
@abstractmethod
def get_account_total_equity(self):
raise NotImplementedError(
"Should implement get_account_total_equity()"
)
@abstractmethod
def create_portfolio(self, portfolio_id, name):
raise NotImplementedError(
"Should implement create_portfolio()"
)
@abstractmethod
def list_all_portfolios(self):
raise NotImplementedError(
"Should implement list_all_portfolios()"
)
@abstractmethod
def subscribe_funds_to_portfolio(self, portfolio_id, amount):
raise NotImplementedError(
"Should implement subscribe_funds_to_portfolio()"
)
@abstractmethod
def withdraw_funds_from_portfolio(self, portfolio_id, amount):
raise NotImplementedError(
"Should implement withdraw_funds_from_portfolio()"
)
@abstractmethod
def get_portfolio_cash_balance(self, portfolio_id):
raise NotImplementedError(
"Should implement get_portfolio_cash_balance()"
)
@abstractmethod
def get_portfolio_total_non_cash_equity(self, portfolio_id):
raise NotImplementedError(
"Should implement get_portfolio_total_non_cash_equity()"
)
@abstractmethod
def get_portfolio_total_equity(self, portfolio_id):
raise NotImplementedError(
"Should implement get_portfolio_total_equity()"
)
@abstractmethod
def get_portfolio_as_dict(self, portfolio_id):
raise NotImplementedError(
"Should implement get_portfolio_as_dict()"
)
@abstractmethod
def submit_order(self, portfolio_id, order):
raise NotImplementedError(
"Should implement submit_order()"
)
|
from functools import partial
from collections import OrderedDict
import numpy as np
from ..annotations import _annotations_starts_stops
from ..filter import create_filter
from ..io.pick import pick_types, _pick_data_channels, pick_info, pick_channels
from ..utils import verbose, _validate_type, _check_option
from ..time_frequency import psd_welch
from ..defaults import _handle_default
from .topo import _plot_topo, _plot_timeseries, _plot_timeseries_unified
from .utils import (plt_show, _compute_scalings, _handle_decim, _check_cov,
_shorten_path_from_middle,
_get_channel_plotting_order, _make_event_color_dict)
_RAW_CLIP_DEF = 1.5
@verbose
def plot_raw(raw, events=None, duration=10.0, start=0.0, n_channels=20,
bgcolor='w', color=None, bad_color=(0.8, 0.8, 0.8),
event_color='cyan', scalings=None, remove_dc=True, order=None,
show_options=False, title=None, show=True, block=False,
highpass=None, lowpass=None, filtorder=4,
clipping=_RAW_CLIP_DEF,
show_first_samp=False, proj=True, group_by='type',
butterfly=False, decim='auto', noise_cov=None, event_id=None,
show_scrollbars=True, show_scalebars=True, verbose=None):
"""Plot raw data.
Parameters
----------
raw : instance of Raw
The raw data to plot.
events : array | None
Events to show with vertical bars.
duration : float
Time window (s) to plot. The lesser of this value and the duration
of the raw file will be used.
start : float
Initial time to show (can be changed dynamically once plotted). If
show_first_samp is True, then it is taken relative to
``raw.first_samp``.
n_channels : int
Number of channels to plot at once. Defaults to 20. The lesser of
``n_channels`` and ``len(raw.ch_names)`` will be shown.
Has no effect if ``order`` is 'position', 'selection' or 'butterfly'.
bgcolor : color object
Color of the background.
color : dict | color object | None
Color for the data traces. If None, defaults to::
dict(mag='darkblue', grad='b', eeg='k', eog='k', ecg='m',
emg='k', ref_meg='steelblue', misc='k', stim='k',
resp='k', chpi='k')
bad_color : color object
Color to make bad channels.
%(event_color)s
Defaults to ``'cyan'``.
scalings : 'auto' | dict | None
Scaling factors for the traces. If any fields in scalings are 'auto',
the scaling factor is set to match the 99.5th percentile of a subset of
the corresponding data. If scalings == 'auto', all scalings fields are
set to 'auto'. If any fields are 'auto' and data is not preloaded, a
subset of times up to 100mb will be loaded. If None, defaults to::
dict(mag=1e-12, grad=4e-11, eeg=20e-6, eog=150e-6, ecg=5e-4,
emg=1e-3, ref_meg=1e-12, misc=1e-3, stim=1,
resp=1, chpi=1e-4, whitened=1e2)
remove_dc : bool
If True remove DC component when plotting data.
order : array of int | None
Order in which to plot data. If the array is shorter than the number of
channels, only the given channels are plotted. If None (default), all
channels are plotted. If ``group_by`` is ``'position'`` or
``'selection'``, the ``order`` parameter is used only for selecting the
channels to be plotted.
show_options : bool
If True, a dialog for options related to projection is shown.
title : str | None
The title of the window. If None, and either the filename of the
raw object or '<unknown>' will be displayed as title.
show : bool
Show figure if True.
block : bool
Whether to halt program execution until the figure is closed.
Useful for setting bad channels on the fly by clicking on a line.
May not work on all systems / platforms.
highpass : float | None
Highpass to apply when displaying data.
lowpass : float | None
Lowpass to apply when displaying data.
If highpass > lowpass, a bandstop rather than bandpass filter
will be applied.
filtorder : int
Filtering order. 0 will use FIR filtering with MNE defaults.
Other values will construct an IIR filter of the given order
and apply it with :func:`~scipy.signal.filtfilt` (making the effective
order twice ``filtorder``). Filtering may produce some edge artifacts
(at the left and right edges) of the signals during display.
.. versionchanged:: 0.18
Support for ``filtorder=0`` to use FIR filtering.
clipping : str | float | None
If None, channels are allowed to exceed their designated bounds in
the plot. If "clamp", then values are clamped to the appropriate
range for display, creating step-like artifacts. If "transparent",
then excessive values are not shown, creating gaps in the traces.
If float, clipping occurs for values beyond the ``clipping`` multiple
of their dedicated range, so ``clipping=1.`` is an alias for
``clipping='transparent'``.
.. versionchanged:: 0.21
Support for float, and default changed from None to 1.5.
show_first_samp : bool
If True, show time axis relative to the ``raw.first_samp``.
proj : bool
Whether to apply projectors prior to plotting (default is ``True``).
Individual projectors can be enabled/disabled interactively (see
Notes). This argument only affects the plot; use ``raw.apply_proj()``
to modify the data stored in the Raw object.
%(browse_group_by)s
butterfly : bool
Whether to start in butterfly mode. Defaults to False.
decim : int | 'auto'
Amount to decimate the data during display for speed purposes.
You should only decimate if the data are sufficiently low-passed,
otherwise aliasing can occur. The 'auto' mode (default) uses
the decimation that results in a sampling rate least three times
larger than ``min(info['lowpass'], lowpass)`` (e.g., a 40 Hz lowpass
will result in at least a 120 Hz displayed sample rate).
noise_cov : instance of Covariance | str | None
Noise covariance used to whiten the data while plotting.
Whitened data channels are scaled by ``scalings['whitened']``,
and their channel names are shown in italic.
Can be a string to load a covariance from disk.
See also :meth:`mne.Evoked.plot_white` for additional inspection
of noise covariance properties when whitening evoked data.
For data processed with SSS, the effective dependence between
magnetometers and gradiometers may introduce differences in scaling,
consider using :meth:`mne.Evoked.plot_white`.
.. versionadded:: 0.16.0
event_id : dict | None
Event IDs used to show at event markers (default None shows
the event numbers).
.. versionadded:: 0.16.0
%(show_scrollbars)s
show_scalebars : bool
Whether or not to show the scale bars. Defaults to True.
.. versionadded:: 0.20.0
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Raw traces.
Notes
-----
The arrow keys (up/down/left/right) can typically be used to navigate
between channels and time ranges, but this depends on the backend
matplotlib is configured to use (e.g., mpl.use('TkAgg') should work). The
left/right arrows will scroll by 25%% of ``duration``, whereas
shift+left/shift+right will scroll by 100%% of ``duration``. The scaling
can be adjusted with - and + (or =) keys. The viewport dimensions can be
adjusted with page up/page down and home/end keys. Full screen mode can be
toggled with the F11 key, and scrollbars can be hidden/shown by pressing
'z'. Right-click a channel label to view its location. To mark or un-mark a
channel as bad, click on a channel label or a channel trace. The changes
will be reflected immediately in the raw object's ``raw.info['bads']``
entry.
If projectors are present, a button labelled "Prj" in the lower right
corner of the plot window opens a secondary control window, which allows
enabling/disabling specific projectors individually. This provides a means
of interactively observing how each projector would affect the raw data if
it were applied.
Annotation mode is toggled by pressing 'a', butterfly mode by pressing
'b', and whitening mode (when ``noise_cov is not None``) by pressing 'w'.
By default, the channel means are removed when ``remove_dc`` is set to
``True``. This flag can be toggled by pressing 'd'.
"""
from ..io.base import BaseRaw
from ._figure import _browse_figure
info = raw.info.copy()
sfreq = info['sfreq']
projs = info['projs']
# this will be an attr for which projectors are currently "on" in the plot
projs_on = np.full_like(projs, proj, dtype=bool)
# disable projs in info if user doesn't want to see them right away
if not proj:
info['projs'] = list()
# handle defaults / check arg validity
color = _handle_default('color', color)
scalings = _compute_scalings(scalings, raw, remove_dc=remove_dc,
duration=duration)
if scalings['whitened'] == 'auto':
scalings['whitened'] = 1.
_validate_type(raw, BaseRaw, 'raw', 'Raw')
decim, picks_data = _handle_decim(info, decim, lowpass)
noise_cov = _check_cov(noise_cov, info)
units = _handle_default('units', None)
unit_scalings = _handle_default('scalings', None)
_check_option('group_by', group_by,
('selection', 'position', 'original', 'type'))
# clipping
_validate_type(clipping, (None, 'numeric', str), 'clipping')
if isinstance(clipping, str):
_check_option('clipping', clipping, ('clamp', 'transparent'),
extra='when a string')
clipping = 1. if clipping == 'transparent' else clipping
elif clipping is not None:
clipping = float(clipping)
# be forgiving if user asks for too much time
duration = min(raw.times[-1], float(duration))
# determine IIR filtering parameters
if highpass is not None and highpass <= 0:
raise ValueError(f'highpass must be > 0, got {highpass}')
if highpass is None and lowpass is None:
ba = filt_bounds = None
else:
filtorder = int(filtorder)
if filtorder == 0:
method = 'fir'
iir_params = None
else:
method = 'iir'
iir_params = dict(order=filtorder, output='sos', ftype='butter')
ba = create_filter(np.zeros((1, int(round(duration * sfreq)))),
sfreq, highpass, lowpass, method=method,
iir_params=iir_params)
filt_bounds = _annotations_starts_stops(
raw, ('edge', 'bad_acq_skip'), invert=True)
# compute event times in seconds
if events is not None:
event_times = (events[:, 0] - raw.first_samp).astype(float)
event_times /= sfreq
event_nums = events[:, 2]
else:
event_times = event_nums = None
# determine trace order
ch_names = np.array(raw.ch_names)
ch_types = np.array(raw.get_channel_types())
order = _get_channel_plotting_order(order, ch_types)
n_channels = min(info['nchan'], n_channels, len(order))
# adjust order based on channel selection, if needed
selections = None
if group_by in ('selection', 'position'):
selections = _setup_channel_selections(raw, group_by, order)
order = np.concatenate(list(selections.values()))
default_selection = list(selections)[0]
n_channels = len(selections[default_selection])
# handle event colors
event_color_dict = _make_event_color_dict(event_color, events, event_id)
# handle first_samp
first_time = raw._first_time if show_first_samp else 0
start += first_time
event_id_rev = {v: k for k, v in (event_id or {}).items()}
# generate window title; allow instances without a filename (e.g., ICA)
if title is None:
title = '<unknown>'
fnames = raw._filenames.copy()
if len(fnames):
title = fnames.pop(0)
extra = f' ... (+ {len(fnames)} more)' if len(fnames) else ''
title = f'{title}{extra}'
if len(title) > 60:
title = _shorten_path_from_middle(title)
elif not isinstance(title, str):
raise TypeError(f'title must be None or a string, got a {type(title)}')
# gather parameters and initialize figure
params = dict(inst=raw,
info=info,
# channels and channel order
ch_names=ch_names,
ch_types=ch_types,
ch_order=order,
picks=order[:n_channels],
n_channels=n_channels,
picks_data=picks_data,
group_by=group_by,
ch_selections=selections,
# time
t_start=start,
duration=duration,
n_times=raw.n_times,
first_time=first_time,
decim=decim,
# events
event_color_dict=event_color_dict,
event_times=event_times,
event_nums=event_nums,
event_id_rev=event_id_rev,
# preprocessing
projs=projs,
projs_on=projs_on,
apply_proj=proj,
remove_dc=remove_dc,
filter_coefs=ba,
filter_bounds=filt_bounds,
noise_cov=noise_cov,
# scalings
scalings=scalings,
units=units,
unit_scalings=unit_scalings,
# colors
ch_color_bad=bad_color,
ch_color_dict=color,
# display
butterfly=butterfly,
clipping=clipping,
scrollbars_visible=show_scrollbars,
scalebars_visible=show_scalebars,
window_title=title)
fig = _browse_figure(**params)
fig._update_picks()
# make channel selection dialog, if requested (doesn't work well in init)
if group_by in ('selection', 'position'):
fig._create_selection_fig()
# update projector and data, and plot
fig._update_projector()
fig._update_trace_offsets()
fig._update_data()
fig._draw_traces()
# plot annotations (if any)
fig._setup_annotation_colors()
fig._draw_annotations()
# start with projectors dialog open, if requested
if show_options:
fig._toggle_proj_fig()
# for blitting
fig.canvas.flush_events()
fig.mne.bg = fig.canvas.copy_from_bbox(fig.bbox)
plt_show(show, block=block)
return fig
@verbose
def plot_raw_psd(raw, fmin=0, fmax=np.inf, tmin=None, tmax=None, proj=False,
n_fft=None, n_overlap=0, reject_by_annotation=True,
picks=None, ax=None, color='black', xscale='linear',
area_mode='std', area_alpha=0.33, dB=True, estimate='auto',
show=True, n_jobs=1, average=False, line_alpha=None,
spatial_colors=True, sphere=None, window='hamming',
verbose=None):
"""%(plot_psd_doc)s.
Parameters
----------
raw : instance of Raw
The raw object.
fmin : float
Start frequency to consider.
fmax : float
End frequency to consider.
tmin : float | None
Start time to consider.
tmax : float | None
End time to consider.
proj : bool
Apply projection.
n_fft : int | None
Number of points to use in Welch FFT calculations.
Default is None, which uses the minimum of 2048 and the
number of time points.
n_overlap : int
The number of points of overlap between blocks. The default value
is 0 (no overlap).
%(reject_by_annotation_raw)s
%(plot_psd_picks_good_data)s
ax : instance of Axes | None
Axes to plot into. If None, axes will be created.
%(plot_psd_color)s
%(plot_psd_xscale)s
%(plot_psd_area_mode)s
%(plot_psd_area_alpha)s
%(plot_psd_dB)s
%(plot_psd_estimate)s
%(show)s
%(n_jobs)s
%(plot_psd_average)s
%(plot_psd_line_alpha)s
%(plot_psd_spatial_colors)s
%(topomap_sphere_auto)s
%(window-psd)s
.. versionadded:: 0.22.0
%(verbose)s
Returns
-------
fig : instance of Figure
Figure with frequency spectra of the data channels.
"""
from ._figure import _psd_figure
# handle FFT
if n_fft is None:
if tmax is None or not np.isfinite(tmax):
tmax = raw.times[-1]
tmin = 0. if tmin is None else tmin
n_fft = min(np.diff(raw.time_as_index([tmin, tmax]))[0] + 1, 2048)
# generate figure
fig = _psd_figure(
inst=raw, proj=proj, picks=picks, axes=ax, tmin=tmin, tmax=tmax,
fmin=fmin, fmax=fmax, sphere=sphere, xscale=xscale, dB=dB,
average=average, estimate=estimate, area_mode=area_mode,
line_alpha=line_alpha, area_alpha=area_alpha, color=color,
spatial_colors=spatial_colors, n_jobs=n_jobs, n_fft=n_fft,
n_overlap=n_overlap, reject_by_annotation=reject_by_annotation,
window=window)
plt_show(show)
return fig
@verbose
def plot_raw_psd_topo(raw, tmin=0., tmax=None, fmin=0., fmax=100., proj=False,
n_fft=2048, n_overlap=0, layout=None, color='w',
fig_facecolor='k', axis_facecolor='k', dB=True,
show=True, block=False, n_jobs=1, axes=None,
verbose=None):
"""Plot channel-wise frequency spectra as topography.
Parameters
----------
raw : instance of io.Raw
The raw instance to use.
tmin : float
Start time for calculations. Defaults to zero.
tmax : float | None
End time for calculations. If None (default), the end of data is used.
fmin : float
Start frequency to consider. Defaults to zero.
fmax : float
End frequency to consider. Defaults to 100.
proj : bool
Apply projection. Defaults to False.
n_fft : int
Number of points to use in Welch FFT calculations. Defaults to 2048.
n_overlap : int
The number of points of overlap between blocks. Defaults to 0
(no overlap).
layout : instance of Layout | None
Layout instance specifying sensor positions (does not need to be
specified for Neuromag data). If None (default), the correct layout is
inferred from the data.
color : str | tuple
A matplotlib-compatible color to use for the curves. Defaults to white.
fig_facecolor : str | tuple
A matplotlib-compatible color to use for the figure background.
Defaults to black.
axis_facecolor : str | tuple
A matplotlib-compatible color to use for the axis background.
Defaults to black.
dB : bool
If True, transform data to decibels. Defaults to True.
show : bool
Show figure if True. Defaults to True.
block : bool
Whether to halt program execution until the figure is closed.
May not work on all systems / platforms. Defaults to False.
%(n_jobs)s
axes : instance of matplotlib Axes | None
Axes to plot into. If None, axes will be created.
%(verbose)s
Returns
-------
fig : instance of matplotlib.figure.Figure
Figure distributing one image per channel across sensor topography.
"""
if layout is None:
from ..channels.layout import find_layout
layout = find_layout(raw.info)
psds, freqs = psd_welch(raw, tmin=tmin, tmax=tmax, fmin=fmin,
fmax=fmax, proj=proj, n_fft=n_fft,
n_overlap=n_overlap, n_jobs=n_jobs)
if dB:
psds = 10 * np.log10(psds)
y_label = 'dB'
else:
y_label = 'Power'
show_func = partial(_plot_timeseries_unified, data=[psds], color=color,
times=[freqs])
click_func = partial(_plot_timeseries, data=[psds], color=color,
times=[freqs])
picks = _pick_data_channels(raw.info)
info = pick_info(raw.info, picks)
fig = _plot_topo(info, times=freqs, show_func=show_func,
click_func=click_func, layout=layout,
axis_facecolor=axis_facecolor,
fig_facecolor=fig_facecolor, x_label='Frequency (Hz)',
unified=True, y_label=y_label, axes=axes)
try:
plt_show(show, block=block)
except TypeError: # not all versions have this
plt_show(show)
return fig
def _setup_channel_selections(raw, kind, order):
"""Get dictionary of channel groupings."""
from ..selection import (read_selection, _SELECTIONS, _EEG_SELECTIONS,
_divide_to_regions)
from ..utils import _get_stim_channel
_check_option('group_by', kind, ('position', 'selection'))
if kind == 'position':
selections_dict = _divide_to_regions(raw.info)
keys = _SELECTIONS[1:] # omit 'Vertex'
else: # kind == 'selection'
from ..channels.channels import _get_ch_info
(has_vv_mag, has_vv_grad, *_, has_neuromag_122_grad, has_csd_coils
) = _get_ch_info(raw.info)
if not (has_vv_grad or has_vv_mag or has_neuromag_122_grad):
raise ValueError("order='selection' only works for Neuromag "
"data. Use order='position' instead.")
selections_dict = OrderedDict()
# get stim channel (if any)
stim_ch = _get_stim_channel(None, raw.info, raise_error=False)
stim_ch = stim_ch if len(stim_ch) else ['']
stim_ch = pick_channels(raw.ch_names, stim_ch)
# loop over regions
keys = np.concatenate([_SELECTIONS, _EEG_SELECTIONS])
for key in keys:
channels = read_selection(key, info=raw.info)
picks = pick_channels(raw.ch_names, channels)
picks = np.intersect1d(picks, order)
if not len(picks):
continue # omit empty selections
selections_dict[key] = np.concatenate([picks, stim_ch])
# add misc channels
misc = pick_types(raw.info, meg=False, eeg=False, stim=True, eog=True,
ecg=True, emg=True, ref_meg=False, misc=True,
resp=True, chpi=True, exci=True, ias=True, syst=True,
seeg=False, bio=True, ecog=False, fnirs=False,
exclude=())
if len(misc) and np.in1d(misc, order).any():
selections_dict['Misc'] = misc
return selections_dict
|
from collections.abc import Iterable
import os
import os.path as op
import logging
import tempfile
from threading import Thread
import time
import numpy as np
from .check import _check_option
from .config import get_config
from ._logging import logger
class ProgressBar(object):
"""Generate a command-line progressbar.
Parameters
----------
iterable : iterable | int | None
The iterable to use. Can also be an int for backward compatibility
(acts like ``max_value``).
initial_value : int
Initial value of process, useful when resuming process from a specific
value, defaults to 0.
mesg : str
Message to include at end of progress bar.
max_total_width : int | str
Maximum total message width. Can use "auto" (default) to try to set
a sane value based on the current terminal width.
max_value : int | None
The max value. If None, the length of ``iterable`` will be used.
**kwargs : dict
Additional keyword arguments for tqdm.
"""
def __init__(self, iterable=None, initial_value=0, mesg=None,
max_total_width='auto', max_value=None,
**kwargs): # noqa: D102
# The following mimics this, but with configurable module to use
# from ..externals.tqdm import auto
from ..externals import tqdm
which_tqdm = get_config('MNE_TQDM', 'tqdm.auto')
_check_option('MNE_TQDM', which_tqdm[:5], ('tqdm', 'tqdm.', 'off'),
extra='beginning')
logger.debug(f'Using ProgressBar with {which_tqdm}')
if which_tqdm not in ('tqdm', 'off'):
tqdm = getattr(tqdm, which_tqdm.split('.', 1)[1])
tqdm = tqdm.tqdm
defaults = dict(
leave=True, mininterval=0.016, miniters=1, smoothing=0.05,
bar_format='{percentage:3.0f}%|{bar}| {desc} : {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt:>11}{postfix}]', # noqa: E501
)
for key, val in defaults.items():
if key not in kwargs:
kwargs.update({key: val})
if isinstance(iterable, Iterable):
self.iterable = iterable
if max_value is None:
self.max_value = len(iterable)
else:
self.max_value = max_value
else: # ignore max_value then
self.max_value = int(iterable)
self.iterable = None
if max_total_width == 'auto':
max_total_width = None # tqdm's auto
with tempfile.NamedTemporaryFile('wb', prefix='tmp_mne_prog') as tf:
self._mmap_fname = tf.name
del tf # should remove the file
self._mmap = None
disable = logger.level > logging.INFO or which_tqdm == 'off'
self._tqdm = tqdm(
iterable=self.iterable, desc=mesg, total=self.max_value,
initial=initial_value, ncols=max_total_width,
disable=disable, **kwargs)
def update(self, cur_value):
"""Update progressbar with current value of process.
Parameters
----------
cur_value : number
Current value of process. Should be <= max_value (but this is not
enforced). The percent of the progressbar will be computed as
``(cur_value / max_value) * 100``.
"""
self.update_with_increment_value(cur_value - self._tqdm.n)
def update_with_increment_value(self, increment_value):
"""Update progressbar with an increment.
Parameters
----------
increment_value : int
Value of the increment of process. The percent of the progressbar
will be computed as
``(self.cur_value + increment_value / max_value) * 100``.
"""
self._tqdm.update(increment_value)
def __iter__(self):
"""Iterate to auto-increment the pbar with 1."""
for x in self._tqdm:
yield x
def subset(self, idx):
"""Make a joblib-friendly index subset updater.
Parameters
----------
idx : ndarray
List of indices for this subset.
Returns
-------
updater : instance of PBSubsetUpdater
Class with a ``.update(ii)`` method.
"""
return _PBSubsetUpdater(self, idx)
def __enter__(self): # noqa: D105
# This should only be used with pb.subset and parallelization
if op.isfile(self._mmap_fname):
os.remove(self._mmap_fname)
# prevent corner cases where self.max_value == 0
self._mmap = np.memmap(self._mmap_fname, bool, 'w+',
shape=max(self.max_value, 1))
self.update(0) # must be zero as we just created the memmap
# We need to control how the pickled bars exit: remove print statements
self._thread = _UpdateThread(self)
self._thread.start()
return self
def __exit__(self, type_, value, traceback): # noqa: D105
# Restore exit behavior for our one from the main thread
self.update(self._mmap.sum())
self._tqdm.close()
self._thread._mne_run = False
self._thread.join()
self._mmap = None
if op.isfile(self._mmap_fname):
os.remove(self._mmap_fname)
def __del__(self):
"""Ensure output completes."""
if getattr(self, '_tqdm', None) is not None:
self._tqdm.close()
class _UpdateThread(Thread):
def __init__(self, pb):
super(_UpdateThread, self).__init__(daemon=True)
self._mne_run = True
self._mne_pb = pb
def run(self):
while self._mne_run:
self._mne_pb.update(self._mne_pb._mmap.sum())
time.sleep(1. / 30.) # 30 Hz refresh is plenty
class _PBSubsetUpdater(object):
def __init__(self, pb, idx):
self.mmap = pb._mmap
self.idx = idx
def update(self, ii):
self.mmap[self.idx[ii - 1]] = True
|
from homeassistant.components.counter import (
DOMAIN,
SERVICE_DECREMENT,
SERVICE_INCREMENT,
SERVICE_RESET,
)
from homeassistant.const import ATTR_ENTITY_ID
from homeassistant.core import callback
from homeassistant.loader import bind_hass
@callback
@bind_hass
def async_increment(hass, entity_id):
"""Increment a counter."""
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_INCREMENT, {ATTR_ENTITY_ID: entity_id})
)
@callback
@bind_hass
def async_decrement(hass, entity_id):
"""Decrement a counter."""
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_DECREMENT, {ATTR_ENTITY_ID: entity_id})
)
@callback
@bind_hass
def async_reset(hass, entity_id):
"""Reset a counter."""
hass.async_add_job(
hass.services.async_call(DOMAIN, SERVICE_RESET, {ATTR_ENTITY_ID: entity_id})
)
|
from functools import partial
import logging
from homeassistant.const import ATTR_BATTERY_LEVEL, STATE_OFF, STATE_ON
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.entity import Entity
from .const import CHILD_CALLBACK, NODE_CALLBACK, UPDATE_DELAY
_LOGGER = logging.getLogger(__name__)
ATTR_CHILD_ID = "child_id"
ATTR_DESCRIPTION = "description"
ATTR_DEVICE = "device"
ATTR_NODE_ID = "node_id"
ATTR_HEARTBEAT = "heartbeat"
MYSENSORS_PLATFORM_DEVICES = "mysensors_devices_{}"
def get_mysensors_devices(hass, domain):
"""Return MySensors devices for a platform."""
if MYSENSORS_PLATFORM_DEVICES.format(domain) not in hass.data:
hass.data[MYSENSORS_PLATFORM_DEVICES.format(domain)] = {}
return hass.data[MYSENSORS_PLATFORM_DEVICES.format(domain)]
class MySensorsDevice:
"""Representation of a MySensors device."""
def __init__(self, gateway, node_id, child_id, name, value_type):
"""Set up the MySensors device."""
self.gateway = gateway
self.node_id = node_id
self.child_id = child_id
self._name = name
self.value_type = value_type
child = gateway.sensors[node_id].children[child_id]
self.child_type = child.type
self._values = {}
self._update_scheduled = False
self.hass = None
@property
def name(self):
"""Return the name of this entity."""
return self._name
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
attr = {
ATTR_BATTERY_LEVEL: node.battery_level,
ATTR_HEARTBEAT: node.heartbeat,
ATTR_CHILD_ID: self.child_id,
ATTR_DESCRIPTION: child.description,
ATTR_DEVICE: self.gateway.device,
ATTR_NODE_ID: self.node_id,
}
set_req = self.gateway.const.SetReq
for value_type, value in self._values.items():
attr[set_req(value_type).name] = value
return attr
async def async_update(self):
"""Update the controller with the latest value from a sensor."""
node = self.gateway.sensors[self.node_id]
child = node.children[self.child_id]
set_req = self.gateway.const.SetReq
for value_type, value in child.values.items():
_LOGGER.debug(
"Entity update: %s: value_type %s, value = %s",
self._name,
value_type,
value,
)
if value_type in (
set_req.V_ARMED,
set_req.V_LIGHT,
set_req.V_LOCK_STATUS,
set_req.V_TRIPPED,
):
self._values[value_type] = STATE_ON if int(value) == 1 else STATE_OFF
elif value_type == set_req.V_DIMMER:
self._values[value_type] = int(value)
else:
self._values[value_type] = value
async def _async_update_callback(self):
"""Update the device."""
raise NotImplementedError
@callback
def async_update_callback(self):
"""Update the device after delay."""
if self._update_scheduled:
return
async def update():
"""Perform update."""
try:
await self._async_update_callback()
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error updating %s", self.name)
finally:
self._update_scheduled = False
self._update_scheduled = True
delayed_update = partial(self.hass.async_create_task, update())
self.hass.loop.call_later(UPDATE_DELAY, delayed_update)
class MySensorsEntity(MySensorsDevice, Entity):
"""Representation of a MySensors entity."""
@property
def should_poll(self):
"""Return the polling state. The gateway pushes its states."""
return False
@property
def available(self):
"""Return true if entity is available."""
return self.value_type in self._values
async def _async_update_callback(self):
"""Update the entity."""
await self.async_update_ha_state(True)
async def async_added_to_hass(self):
"""Register update callback."""
gateway_id = id(self.gateway)
dev_id = gateway_id, self.node_id, self.child_id, self.value_type
self.async_on_remove(
async_dispatcher_connect(
self.hass, CHILD_CALLBACK.format(*dev_id), self.async_update_callback
)
)
self.async_on_remove(
async_dispatcher_connect(
self.hass,
NODE_CALLBACK.format(gateway_id, self.node_id),
self.async_update_callback,
)
)
|
from datetime import timedelta
from rokuecp import RokuError
from homeassistant.components.media_player import DEVICE_CLASS_RECEIVER, DEVICE_CLASS_TV
from homeassistant.components.media_player.const import (
ATTR_APP_ID,
ATTR_APP_NAME,
ATTR_INPUT_SOURCE,
ATTR_MEDIA_CHANNEL,
ATTR_MEDIA_CONTENT_ID,
ATTR_MEDIA_CONTENT_TYPE,
ATTR_MEDIA_DURATION,
ATTR_MEDIA_POSITION,
ATTR_MEDIA_TITLE,
ATTR_MEDIA_VOLUME_MUTED,
DOMAIN as MP_DOMAIN,
MEDIA_CLASS_APP,
MEDIA_CLASS_CHANNEL,
MEDIA_CLASS_DIRECTORY,
MEDIA_TYPE_APP,
MEDIA_TYPE_APPS,
MEDIA_TYPE_CHANNEL,
MEDIA_TYPE_CHANNELS,
SERVICE_PLAY_MEDIA,
SERVICE_SELECT_SOURCE,
SUPPORT_BROWSE_MEDIA,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SELECT_SOURCE,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
SUPPORT_VOLUME_MUTE,
SUPPORT_VOLUME_STEP,
)
from homeassistant.components.roku.const import ATTR_KEYWORD, DOMAIN, SERVICE_SEARCH
from homeassistant.components.websocket_api.const import TYPE_RESULT
from homeassistant.const import (
ATTR_ENTITY_ID,
SERVICE_MEDIA_NEXT_TRACK,
SERVICE_MEDIA_PAUSE,
SERVICE_MEDIA_PLAY,
SERVICE_MEDIA_PLAY_PAUSE,
SERVICE_MEDIA_PREVIOUS_TRACK,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
SERVICE_VOLUME_DOWN,
SERVICE_VOLUME_MUTE,
SERVICE_VOLUME_UP,
STATE_HOME,
STATE_IDLE,
STATE_ON,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
STATE_UNAVAILABLE,
)
from homeassistant.helpers.typing import HomeAssistantType
from homeassistant.util import dt as dt_util
from tests.async_mock import patch
from tests.common import async_fire_time_changed
from tests.components.roku import UPNP_SERIAL, setup_integration
from tests.test_util.aiohttp import AiohttpClientMocker
MAIN_ENTITY_ID = f"{MP_DOMAIN}.my_roku_3"
TV_ENTITY_ID = f"{MP_DOMAIN}.58_onn_roku_tv"
TV_HOST = "192.168.1.161"
TV_SERIAL = "YN00H5555555"
async def test_setup(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test setup with basic config."""
await setup_integration(hass, aioclient_mock)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
main = entity_registry.async_get(MAIN_ENTITY_ID)
assert hass.states.get(MAIN_ENTITY_ID)
assert main
assert main.device_class == DEVICE_CLASS_RECEIVER
assert main.unique_id == UPNP_SERIAL
async def test_idle_setup(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test setup with idle device."""
await setup_integration(hass, aioclient_mock, power=False)
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_STANDBY
async def test_tv_setup(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test Roku TV setup."""
await setup_integration(
hass,
aioclient_mock,
device="rokutv",
app="tvinput-dtv",
host=TV_HOST,
unique_id=TV_SERIAL,
)
entity_registry = await hass.helpers.entity_registry.async_get_registry()
tv = entity_registry.async_get(TV_ENTITY_ID)
assert hass.states.get(TV_ENTITY_ID)
assert tv
assert tv.device_class == DEVICE_CLASS_TV
assert tv.unique_id == TV_SERIAL
async def test_availability(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test entity availability."""
now = dt_util.utcnow()
future = now + timedelta(minutes=1)
with patch("homeassistant.util.dt.utcnow", return_value=now):
await setup_integration(hass, aioclient_mock)
with patch(
"homeassistant.components.roku.Roku.update", side_effect=RokuError
), patch("homeassistant.util.dt.utcnow", return_value=future):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(MAIN_ENTITY_ID).state == STATE_UNAVAILABLE
future += timedelta(minutes=1)
with patch("homeassistant.util.dt.utcnow", return_value=future):
async_fire_time_changed(hass, future)
await hass.async_block_till_done()
assert hass.states.get(MAIN_ENTITY_ID).state == STATE_HOME
async def test_supported_features(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test supported features."""
await setup_integration(hass, aioclient_mock)
# Features supported for Rokus
state = hass.states.get(MAIN_ENTITY_ID)
assert (
SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_SELECT_SOURCE
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_BROWSE_MEDIA
== state.attributes.get("supported_features")
)
async def test_tv_supported_features(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test supported features for Roku TV."""
await setup_integration(
hass,
aioclient_mock,
device="rokutv",
app="tvinput-dtv",
host=TV_HOST,
unique_id=TV_SERIAL,
)
state = hass.states.get(TV_ENTITY_ID)
assert (
SUPPORT_PREVIOUS_TRACK
| SUPPORT_NEXT_TRACK
| SUPPORT_VOLUME_STEP
| SUPPORT_VOLUME_MUTE
| SUPPORT_SELECT_SOURCE
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_PLAY_MEDIA
| SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_BROWSE_MEDIA
== state.attributes.get("supported_features")
)
async def test_attributes(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test attributes."""
await setup_integration(hass, aioclient_mock)
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_HOME
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None
assert state.attributes.get(ATTR_APP_ID) is None
assert state.attributes.get(ATTR_APP_NAME) == "Roku"
assert state.attributes.get(ATTR_INPUT_SOURCE) == "Roku"
async def test_attributes_app(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test attributes for app."""
await setup_integration(hass, aioclient_mock, app="netflix")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_APP
assert state.attributes.get(ATTR_APP_ID) == "12"
assert state.attributes.get(ATTR_APP_NAME) == "Netflix"
assert state.attributes.get(ATTR_INPUT_SOURCE) == "Netflix"
async def test_attributes_app_media_playing(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test attributes for app with playing media."""
await setup_integration(hass, aioclient_mock, app="pluto", media_state="play")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PLAYING
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_APP
assert state.attributes.get(ATTR_MEDIA_DURATION) == 6496
assert state.attributes.get(ATTR_MEDIA_POSITION) == 38
assert state.attributes.get(ATTR_APP_ID) == "74519"
assert state.attributes.get(ATTR_APP_NAME) == "Pluto TV - It's Free TV"
assert state.attributes.get(ATTR_INPUT_SOURCE) == "Pluto TV - It's Free TV"
async def test_attributes_app_media_paused(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test attributes for app with paused media."""
await setup_integration(hass, aioclient_mock, app="pluto", media_state="pause")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_PAUSED
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_APP
assert state.attributes.get(ATTR_MEDIA_DURATION) == 6496
assert state.attributes.get(ATTR_MEDIA_POSITION) == 313
assert state.attributes.get(ATTR_APP_ID) == "74519"
assert state.attributes.get(ATTR_APP_NAME) == "Pluto TV - It's Free TV"
assert state.attributes.get(ATTR_INPUT_SOURCE) == "Pluto TV - It's Free TV"
async def test_attributes_screensaver(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test attributes for app with screensaver."""
await setup_integration(hass, aioclient_mock, app="screensaver")
state = hass.states.get(MAIN_ENTITY_ID)
assert state.state == STATE_IDLE
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) is None
assert state.attributes.get(ATTR_APP_ID) is None
assert state.attributes.get(ATTR_APP_NAME) == "Roku"
assert state.attributes.get(ATTR_INPUT_SOURCE) == "Roku"
async def test_tv_attributes(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test attributes for Roku TV."""
await setup_integration(
hass,
aioclient_mock,
device="rokutv",
app="tvinput-dtv",
host=TV_HOST,
unique_id=TV_SERIAL,
)
state = hass.states.get(TV_ENTITY_ID)
assert state.state == STATE_ON
assert state.attributes.get(ATTR_APP_ID) == "tvinput.dtv"
assert state.attributes.get(ATTR_APP_NAME) == "Antenna TV"
assert state.attributes.get(ATTR_INPUT_SOURCE) == "Antenna TV"
assert state.attributes.get(ATTR_MEDIA_CONTENT_TYPE) == MEDIA_TYPE_CHANNEL
assert state.attributes.get(ATTR_MEDIA_CHANNEL) == "getTV (14.3)"
assert state.attributes.get(ATTR_MEDIA_TITLE) == "Airwolf"
async def test_services(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the different media player services."""
await setup_integration(hass, aioclient_mock)
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN, SERVICE_TURN_OFF, {ATTR_ENTITY_ID: MAIN_ENTITY_ID}, blocking=True
)
remote_mock.assert_called_once_with("poweroff")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: MAIN_ENTITY_ID}, blocking=True
)
remote_mock.assert_called_once_with("poweron")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_MEDIA_PAUSE,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID},
blocking=True,
)
remote_mock.assert_called_once_with("play")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_MEDIA_PLAY,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID},
blocking=True,
)
remote_mock.assert_called_once_with("play")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_MEDIA_PLAY_PAUSE,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID},
blocking=True,
)
remote_mock.assert_called_once_with("play")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_MEDIA_NEXT_TRACK,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID},
blocking=True,
)
remote_mock.assert_called_once_with("forward")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_MEDIA_PREVIOUS_TRACK,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID},
blocking=True,
)
remote_mock.assert_called_once_with("reverse")
with patch("homeassistant.components.roku.Roku.launch") as launch_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: MAIN_ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_APP,
ATTR_MEDIA_CONTENT_ID: "11",
},
blocking=True,
)
launch_mock.assert_called_once_with("11")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_INPUT_SOURCE: "Home"},
blocking=True,
)
remote_mock.assert_called_once_with("home")
with patch("homeassistant.components.roku.Roku.launch") as launch_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_INPUT_SOURCE: "Netflix"},
blocking=True,
)
launch_mock.assert_called_once_with("12")
with patch("homeassistant.components.roku.Roku.launch") as launch_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_SELECT_SOURCE,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_INPUT_SOURCE: 12},
blocking=True,
)
launch_mock.assert_called_once_with("12")
async def test_tv_services(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test the media player services related to Roku TV."""
await setup_integration(
hass,
aioclient_mock,
device="rokutv",
app="tvinput-dtv",
host=TV_HOST,
unique_id=TV_SERIAL,
)
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN, SERVICE_VOLUME_UP, {ATTR_ENTITY_ID: TV_ENTITY_ID}, blocking=True
)
remote_mock.assert_called_once_with("volume_up")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_VOLUME_DOWN,
{ATTR_ENTITY_ID: TV_ENTITY_ID},
blocking=True,
)
remote_mock.assert_called_once_with("volume_down")
with patch("homeassistant.components.roku.Roku.remote") as remote_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_VOLUME_MUTE,
{ATTR_ENTITY_ID: TV_ENTITY_ID, ATTR_MEDIA_VOLUME_MUTED: True},
blocking=True,
)
remote_mock.assert_called_once_with("volume_mute")
with patch("homeassistant.components.roku.Roku.tune") as tune_mock:
await hass.services.async_call(
MP_DOMAIN,
SERVICE_PLAY_MEDIA,
{
ATTR_ENTITY_ID: TV_ENTITY_ID,
ATTR_MEDIA_CONTENT_TYPE: MEDIA_TYPE_CHANNEL,
ATTR_MEDIA_CONTENT_ID: "55",
},
blocking=True,
)
tune_mock.assert_called_once_with("55")
async def test_media_browse(hass, aioclient_mock, hass_ws_client):
"""Test browsing media."""
await setup_integration(
hass,
aioclient_mock,
device="rokutv",
app="tvinput-dtv",
host=TV_HOST,
unique_id=TV_SERIAL,
)
client = await hass_ws_client(hass)
await client.send_json(
{
"id": 1,
"type": "media_player/browse_media",
"entity_id": TV_ENTITY_ID,
}
)
msg = await client.receive_json()
assert msg["id"] == 1
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]
assert msg["result"]["title"] == "Media Library"
assert msg["result"]["media_class"] == MEDIA_CLASS_DIRECTORY
assert msg["result"]["media_content_type"] == "library"
assert msg["result"]["can_expand"]
assert not msg["result"]["can_play"]
assert len(msg["result"]["children"]) == 2
# test apps
await client.send_json(
{
"id": 2,
"type": "media_player/browse_media",
"entity_id": TV_ENTITY_ID,
"media_content_type": MEDIA_TYPE_APPS,
"media_content_id": "apps",
}
)
msg = await client.receive_json()
assert msg["id"] == 2
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]
assert msg["result"]["title"] == "Apps"
assert msg["result"]["media_class"] == MEDIA_CLASS_DIRECTORY
assert msg["result"]["media_content_type"] == MEDIA_TYPE_APPS
assert msg["result"]["children_media_class"] == MEDIA_CLASS_APP
assert msg["result"]["can_expand"]
assert not msg["result"]["can_play"]
assert len(msg["result"]["children"]) == 11
assert msg["result"]["children_media_class"] == MEDIA_CLASS_APP
assert msg["result"]["children"][0]["title"] == "Satellite TV"
assert msg["result"]["children"][0]["media_content_type"] == MEDIA_TYPE_APP
assert msg["result"]["children"][0]["media_content_id"] == "tvinput.hdmi2"
assert (
msg["result"]["children"][0]["thumbnail"]
== "http://192.168.1.161:8060/query/icon/tvinput.hdmi2"
)
assert msg["result"]["children"][0]["can_play"]
assert msg["result"]["children"][3]["title"] == "Roku Channel Store"
assert msg["result"]["children"][3]["media_content_type"] == MEDIA_TYPE_APP
assert msg["result"]["children"][3]["media_content_id"] == "11"
assert (
msg["result"]["children"][3]["thumbnail"]
== "http://192.168.1.161:8060/query/icon/11"
)
assert msg["result"]["children"][3]["can_play"]
# test channels
await client.send_json(
{
"id": 3,
"type": "media_player/browse_media",
"entity_id": TV_ENTITY_ID,
"media_content_type": MEDIA_TYPE_CHANNELS,
"media_content_id": "channels",
}
)
msg = await client.receive_json()
assert msg["id"] == 3
assert msg["type"] == TYPE_RESULT
assert msg["success"]
assert msg["result"]
assert msg["result"]["title"] == "Channels"
assert msg["result"]["media_class"] == MEDIA_CLASS_DIRECTORY
assert msg["result"]["media_content_type"] == MEDIA_TYPE_CHANNELS
assert msg["result"]["children_media_class"] == MEDIA_CLASS_CHANNEL
assert msg["result"]["can_expand"]
assert not msg["result"]["can_play"]
assert len(msg["result"]["children"]) == 2
assert msg["result"]["children_media_class"] == MEDIA_CLASS_CHANNEL
assert msg["result"]["children"][0]["title"] == "WhatsOn"
assert msg["result"]["children"][0]["media_content_type"] == MEDIA_TYPE_CHANNEL
assert msg["result"]["children"][0]["media_content_id"] == "1.1"
assert msg["result"]["children"][0]["can_play"]
# test invalid media type
await client.send_json(
{
"id": 4,
"type": "media_player/browse_media",
"entity_id": TV_ENTITY_ID,
"media_content_type": "invalid",
"media_content_id": "invalid",
}
)
msg = await client.receive_json()
assert msg["id"] == 4
assert msg["type"] == TYPE_RESULT
assert not msg["success"]
async def test_integration_services(
hass: HomeAssistantType, aioclient_mock: AiohttpClientMocker
) -> None:
"""Test integration services."""
await setup_integration(hass, aioclient_mock)
with patch("homeassistant.components.roku.Roku.search") as search_mock:
await hass.services.async_call(
DOMAIN,
SERVICE_SEARCH,
{ATTR_ENTITY_ID: MAIN_ENTITY_ID, ATTR_KEYWORD: "Space Jam"},
blocking=True,
)
search_mock.assert_called_once_with("Space Jam")
|
import json
from absl import flags
from perfkitbenchmarker import disk
from perfkitbenchmarker import errors
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import GCP
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
PD_STANDARD = 'pd-standard'
PD_SSD = 'pd-ssd'
DISK_TYPE = {disk.STANDARD: PD_STANDARD, disk.REMOTE_SSD: PD_SSD}
DISK_METADATA = {
PD_STANDARD: {
disk.MEDIA: disk.HDD,
disk.REPLICATION: disk.ZONE,
},
PD_SSD: {
disk.MEDIA: disk.SSD,
disk.REPLICATION: disk.ZONE,
},
disk.LOCAL: {
disk.MEDIA: disk.SSD,
disk.REPLICATION: disk.NONE,
}
}
SCSI = 'SCSI'
NVME = 'NVME'
disk.RegisterDiskTypeMap(GCP, DISK_TYPE)
class GceDisk(disk.BaseDisk):
"""Object representing an GCE Disk."""
def __init__(self, disk_spec, name, zone, project,
image=None, image_project=None):
super(GceDisk, self).__init__(disk_spec)
self.attached_vm_name = None
self.image = image
self.image_project = image_project
self.name = name
self.zone = zone
self.project = project
self.metadata.update(DISK_METADATA[disk_spec.disk_type])
if self.disk_type == disk.LOCAL:
self.metadata['interface'] = FLAGS.gce_ssd_interface
def _Create(self):
"""Creates the disk."""
cmd = util.GcloudCommand(self, 'compute', 'disks', 'create', self.name)
cmd.flags['size'] = self.disk_size
cmd.flags['type'] = self.disk_type
cmd.flags['labels'] = util.MakeFormattedDefaultTags()
if self.image:
cmd.flags['image'] = self.image
if self.image_project:
cmd.flags['image-project'] = self.image_project
_, stderr, retcode = cmd.Issue(raise_on_failure=False)
util.CheckGcloudResponseKnownFailures(stderr, retcode)
def _Delete(self):
"""Deletes the disk."""
cmd = util.GcloudCommand(self, 'compute', 'disks', 'delete', self.name)
cmd.Issue(raise_on_failure=False)
def _Exists(self):
"""Returns true if the disk exists."""
cmd = util.GcloudCommand(self, 'compute', 'disks', 'describe', self.name)
stdout, _, _ = cmd.Issue(suppress_warning=True, raise_on_failure=False)
try:
json.loads(stdout)
except ValueError:
return False
return True
@vm_util.Retry()
def Attach(self, vm):
"""Attaches the disk to a VM.
Args:
vm: The GceVirtualMachine instance to which the disk will be attached.
"""
self.attached_vm_name = vm.name
cmd = util.GcloudCommand(self, 'compute', 'instances', 'attach-disk',
self.attached_vm_name)
cmd.flags['device-name'] = self.name
cmd.flags['disk'] = self.name
stdout, stderr, retcode = cmd.Issue(raise_on_failure=False)
# Gcloud attach-disk commands may still attach disks despite being rate
# limited.
if retcode:
if (cmd.rate_limited and 'is already being used' in stderr and
FLAGS.retry_on_rate_limited):
return
debug_text = ('Ran: {%s}\nReturnCode:%s\nSTDOUT: %s\nSTDERR: %s' %
(' '.join(cmd.GetCommand()), retcode, stdout, stderr))
raise errors.VmUtil.CalledProcessException(
'Command returned a non-zero exit code:\n{}'.format(debug_text))
def Detach(self):
"""Detaches the disk from a VM."""
cmd = util.GcloudCommand(self, 'compute', 'instances', 'detach-disk',
self.attached_vm_name)
cmd.flags['device-name'] = self.name
cmd.IssueRetryable()
self.attached_vm_name = None
def GetDevicePath(self):
"""Returns the path to the device inside the VM."""
if self.disk_type == disk.LOCAL and FLAGS.gce_ssd_interface == NVME:
return '/dev/%s' % self.name
else:
# by default, gce_ssd_interface == SCSI and returns this name id
return '/dev/disk/by-id/google-%s' % self.name
|
import logging
import pyatv.const as atv_const
from homeassistant.components.media_player import MediaPlayerEntity
from homeassistant.components.media_player.const import (
MEDIA_TYPE_MUSIC,
MEDIA_TYPE_TVSHOW,
MEDIA_TYPE_VIDEO,
SUPPORT_NEXT_TRACK,
SUPPORT_PAUSE,
SUPPORT_PLAY,
SUPPORT_PLAY_MEDIA,
SUPPORT_PREVIOUS_TRACK,
SUPPORT_SEEK,
SUPPORT_STOP,
SUPPORT_TURN_OFF,
SUPPORT_TURN_ON,
)
from homeassistant.const import (
CONF_HOST,
CONF_NAME,
EVENT_HOMEASSISTANT_STOP,
STATE_IDLE,
STATE_OFF,
STATE_PAUSED,
STATE_PLAYING,
STATE_STANDBY,
)
from homeassistant.core import callback
import homeassistant.util.dt as dt_util
from . import ATTR_ATV, ATTR_POWER, DATA_APPLE_TV, DATA_ENTITIES
_LOGGER = logging.getLogger(__name__)
SUPPORT_APPLE_TV = (
SUPPORT_TURN_ON
| SUPPORT_TURN_OFF
| SUPPORT_PLAY_MEDIA
| SUPPORT_PAUSE
| SUPPORT_PLAY
| SUPPORT_SEEK
| SUPPORT_STOP
| SUPPORT_NEXT_TRACK
| SUPPORT_PREVIOUS_TRACK
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up the Apple TV platform."""
if not discovery_info:
return
# Manage entity cache for service handler
if DATA_ENTITIES not in hass.data:
hass.data[DATA_ENTITIES] = []
name = discovery_info[CONF_NAME]
host = discovery_info[CONF_HOST]
atv = hass.data[DATA_APPLE_TV][host][ATTR_ATV]
power = hass.data[DATA_APPLE_TV][host][ATTR_POWER]
entity = AppleTvDevice(atv, name, power)
@callback
def on_hass_stop(event):
"""Stop push updates when hass stops."""
atv.push_updater.stop()
hass.bus.async_listen_once(EVENT_HOMEASSISTANT_STOP, on_hass_stop)
if entity not in hass.data[DATA_ENTITIES]:
hass.data[DATA_ENTITIES].append(entity)
async_add_entities([entity])
class AppleTvDevice(MediaPlayerEntity):
"""Representation of an Apple TV device."""
def __init__(self, atv, name, power):
"""Initialize the Apple TV device."""
self.atv = atv
self._name = name
self._playing = None
self._power = power
self._power.listeners.append(self)
self.atv.push_updater.listener = self
async def async_added_to_hass(self):
"""Handle when an entity is about to be added to Home Assistant."""
self._power.init()
@property
def name(self):
"""Return the name of the device."""
return self._name
@property
def unique_id(self):
"""Return a unique ID."""
return self.atv.metadata.device_id
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def state(self):
"""Return the state of the device."""
if not self._power.turned_on:
return STATE_OFF
if self._playing:
state = self._playing.play_state
if state in (
atv_const.PLAY_STATE_IDLE,
atv_const.PLAY_STATE_NO_MEDIA,
atv_const.PLAY_STATE_LOADING,
):
return STATE_IDLE
if state == atv_const.PLAY_STATE_PLAYING:
return STATE_PLAYING
if state in (
atv_const.PLAY_STATE_PAUSED,
atv_const.PLAY_STATE_FAST_FORWARD,
atv_const.PLAY_STATE_FAST_BACKWARD,
atv_const.PLAY_STATE_STOPPED,
):
# Catch fast forward/backward here so "play" is default action
return STATE_PAUSED
return STATE_STANDBY # Bad or unknown state?
@callback
def playstatus_update(self, updater, playing):
"""Print what is currently playing when it changes."""
self._playing = playing
self.async_write_ha_state()
@callback
def playstatus_error(self, updater, exception):
"""Inform about an error and restart push updates."""
_LOGGER.warning("A %s error occurred: %s", exception.__class__, exception)
# This will wait 10 seconds before restarting push updates. If the
# connection continues to fail, it will flood the log (every 10
# seconds) until it succeeds. A better approach should probably be
# implemented here later.
updater.start(initial_delay=10)
self._playing = None
self.async_write_ha_state()
@property
def media_content_type(self):
"""Content type of current playing media."""
if self._playing:
media_type = self._playing.media_type
if media_type == atv_const.MEDIA_TYPE_VIDEO:
return MEDIA_TYPE_VIDEO
if media_type == atv_const.MEDIA_TYPE_MUSIC:
return MEDIA_TYPE_MUSIC
if media_type == atv_const.MEDIA_TYPE_TV:
return MEDIA_TYPE_TVSHOW
@property
def media_duration(self):
"""Duration of current playing media in seconds."""
if self._playing:
return self._playing.total_time
@property
def media_position(self):
"""Position of current playing media in seconds."""
if self._playing:
return self._playing.position
@property
def media_position_updated_at(self):
"""Last valid time of media position."""
state = self.state
if state in (STATE_PLAYING, STATE_PAUSED):
return dt_util.utcnow()
async def async_play_media(self, media_type, media_id, **kwargs):
"""Send the play_media command to the media player."""
await self.atv.airplay.play_url(media_id)
@property
def media_image_hash(self):
"""Hash value for media image."""
state = self.state
if self._playing and state not in [STATE_OFF, STATE_IDLE]:
return self._playing.hash
async def async_get_media_image(self):
"""Fetch media image of current playing image."""
state = self.state
if self._playing and state not in [STATE_OFF, STATE_IDLE]:
return (await self.atv.metadata.artwork()), "image/png"
return None, None
@property
def media_title(self):
"""Title of current playing media."""
if self._playing:
if self.state == STATE_IDLE:
return "Nothing playing"
title = self._playing.title
return title if title else "No title"
return f"Establishing a connection to {self._name}..."
@property
def supported_features(self):
"""Flag media player features that are supported."""
return SUPPORT_APPLE_TV
async def async_turn_on(self):
"""Turn the media player on."""
self._power.set_power_on(True)
async def async_turn_off(self):
"""Turn the media player off."""
self._playing = None
self._power.set_power_on(False)
async def async_media_play_pause(self):
"""Pause media on media player."""
if not self._playing:
return
state = self.state
if state == STATE_PAUSED:
await self.atv.remote_control.play()
elif state == STATE_PLAYING:
await self.atv.remote_control.pause()
async def async_media_play(self):
"""Play media."""
if self._playing:
await self.atv.remote_control.play()
async def async_media_stop(self):
"""Stop the media player."""
if self._playing:
await self.atv.remote_control.stop()
async def async_media_pause(self):
"""Pause the media player."""
if self._playing:
await self.atv.remote_control.pause()
async def async_media_next_track(self):
"""Send next track command."""
if self._playing:
await self.atv.remote_control.next()
async def async_media_previous_track(self):
"""Send previous track command."""
if self._playing:
await self.atv.remote_control.previous()
async def async_media_seek(self, position):
"""Send seek command."""
if self._playing:
await self.atv.remote_control.set_position(position)
|
from datetime import date
from pytest import mark
from cerberus import errors
from cerberus.tests import assert_fail, assert_success
@mark.parametrize(
("field", "increment"), [("an_integer", 1), ("a_float", 1.0), ("a_number", 1)]
)
def test_max(schema, field, increment):
max_value = schema[field]['max']
value = max_value + increment
assert_fail(
{field: value}, error=(field, (field, 'max'), errors.MAX_VALUE, max_value)
)
@mark.parametrize(
("field", "decrement"), [("an_integer", 1), ("a_float", 1.0), ("a_number", 1)]
)
def test_min(schema, field, decrement):
min_value = schema[field]['min']
value = min_value - decrement
assert_fail(
{field: value}, error=(field, (field, 'min'), errors.MIN_VALUE, min_value)
)
def test_min_and_max_with_date():
schema = {'date': {'min': date(1900, 1, 1), 'max': date(1999, 12, 31)}}
assert_success({'date': date(1945, 5, 8)}, schema)
assert_fail({'date': date(1871, 5, 10)}, schema)
|
from aiohue.sensors import TYPE_ZLL_PRESENCE
from homeassistant.components.binary_sensor import (
DEVICE_CLASS_MOTION,
BinarySensorEntity,
)
from .const import DOMAIN as HUE_DOMAIN
from .sensor_base import SENSOR_CONFIG_MAP, GenericZLLSensor
PRESENCE_NAME_FORMAT = "{} motion"
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Defer binary sensor setup to the shared sensor module."""
await hass.data[HUE_DOMAIN][
config_entry.entry_id
].sensor_manager.async_register_component("binary_sensor", async_add_entities)
class HuePresence(GenericZLLSensor, BinarySensorEntity):
"""The presence sensor entity for a Hue motion sensor device."""
device_class = DEVICE_CLASS_MOTION
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return self.sensor.presence
@property
def device_state_attributes(self):
"""Return the device state attributes."""
attributes = super().device_state_attributes
if "sensitivity" in self.sensor.config:
attributes["sensitivity"] = self.sensor.config["sensitivity"]
if "sensitivitymax" in self.sensor.config:
attributes["sensitivity_max"] = self.sensor.config["sensitivitymax"]
return attributes
SENSOR_CONFIG_MAP.update(
{
TYPE_ZLL_PRESENCE: {
"platform": "binary_sensor",
"name_format": PRESENCE_NAME_FORMAT,
"class": HuePresence,
}
}
)
|
import numpy as np
from hypertools.tools.normalize import normalize
from hypertools.plot.plot import plot
cluster1 = np.random.multivariate_normal(np.zeros(3), np.eye(3), size=100)
cluster2 = np.random.multivariate_normal(np.zeros(3)+100, np.eye(3), size=100)
data = [cluster1, cluster2]
def test_normalize_returns_list():
assert type(normalize(data)) is list
def test_normalize_across():
norm_data = normalize(data, normalize='across')
assert np.allclose(np.mean(np.vstack(norm_data),axis=0),0)
def test_normalize_within():
norm_data = normalize(data, normalize='within')
assert np.allclose([np.mean(i,axis=0) for i in norm_data],0)
def test_normalize_row():
norm_data = normalize(data, normalize='row')
assert np.allclose(np.mean(np.vstack(norm_data), axis=1),0)
def test_normalize_geo():
geo = plot(data, show=False)
norm_data = normalize(geo, normalize='row')
assert np.allclose(np.mean(np.vstack(norm_data), axis=1),0)
|
import itertools
from jinja2 import Template
from jinja2.runtime import LoopContext
TEST_IDX_TEMPLATE_STR_1 = (
"[{% for i in lst|reverse %}(len={{ loop.length }},"
" revindex={{ loop.revindex }}, index={{ loop.index }}, val={{ i }}){% endfor %}]"
)
TEST_IDX0_TEMPLATE_STR_1 = (
"[{% for i in lst|reverse %}(len={{ loop.length }},"
" revindex0={{ loop.revindex0 }}, index0={{ loop.index0 }}, val={{ i }})"
"{% endfor %}]"
)
def test_loop_idx():
t = Template(TEST_IDX_TEMPLATE_STR_1)
lst = [10]
excepted_render = "[(len=1, revindex=1, index=1, val=10)]"
assert excepted_render == t.render(lst=lst)
def test_loop_idx0():
t = Template(TEST_IDX0_TEMPLATE_STR_1)
lst = [10]
excepted_render = "[(len=1, revindex0=0, index0=0, val=10)]"
assert excepted_render == t.render(lst=lst)
def test_loopcontext0():
in_lst = []
lc = LoopContext(reversed(in_lst), None)
assert lc.length == len(in_lst)
def test_loopcontext1():
in_lst = [10]
lc = LoopContext(reversed(in_lst), None)
assert lc.length == len(in_lst)
def test_loopcontext2():
in_lst = [10, 11]
lc = LoopContext(reversed(in_lst), None)
assert lc.length == len(in_lst)
def test_iterator_not_advanced_early():
t = Template("{% for _, g in gs %}{{ loop.index }} {{ g|list }}\n{% endfor %}")
out = t.render(
gs=itertools.groupby([(1, "a"), (1, "b"), (2, "c"), (3, "d")], lambda x: x[0])
)
# groupby groups depend on the current position of the iterator. If
# it was advanced early, the lists would appear empty.
assert out == "1 [(1, 'a'), (1, 'b')]\n2 [(2, 'c')]\n3 [(3, 'd')]\n"
def test_mock_not_contextfunction():
"""If a callable class has a ``__getattr__`` that returns True-like
values for arbitrary attrs, it should not be incorrectly identified
as a ``contextfunction``.
"""
class Calc:
def __getattr__(self, item):
return object()
def __call__(self, *args, **kwargs):
return len(args) + len(kwargs)
t = Template("{{ calc() }}")
out = t.render(calc=Calc())
# Would be "1" if context argument was passed.
assert out == "0"
|
import logging
_LOGGER = logging.getLogger(__name__)
class Subscriber:
"""Subscriber class for the publisher in mprm websocket class."""
def __init__(self, name, callback):
"""Initiate the subscriber."""
self.name = name
self.callback = callback
def update(self, message):
"""Trigger hass to update the device."""
_LOGGER.debug('%s got message "%s"', self.name, message)
self.callback(message)
|
import logging
import logging.handlers
import platform
import sys
from . import toolkit
from .extras import cors
from .extras import ebugsnag
from .lib import config
from .server import __version__
import flask
# configure logging prior to subsequent imports which assume
# logging has been configured
cfg = config.load()
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=getattr(logging, cfg.loglevel.upper()),
datefmt="%d/%b/%Y:%H:%M:%S %z")
from .lib import mirroring # noqa
app = flask.Flask('docker-registry')
@app.route('/_ping')
@app.route('/v1/_ping')
def ping():
headers = {
'X-Docker-Registry-Standalone': 'mirror' if mirroring.is_mirror()
else (cfg.standalone is True)
}
infos = {}
if cfg.debug:
# Versions
versions = infos['versions'] = {}
headers['X-Docker-Registry-Config'] = cfg.flavor
for name, module in sys.modules.items():
if name.startswith('_'):
continue
try:
version = module.__version__
except AttributeError:
continue
versions[name] = version
versions['python'] = sys.version
# Hosts infos
infos['host'] = platform.uname()
infos['launch'] = sys.argv
return toolkit.response(infos, headers=headers)
@app.route('/')
def root():
return toolkit.response(cfg.issue)
def init():
# Configure the email exceptions
info = cfg.email_exceptions
if info and info.smtp_host:
mailhost = info.smtp_host
mailport = info.smtp_port
if mailport:
mailhost = (mailhost, mailport)
smtp_secure = info.smtp_secure
secure_args = _adapt_smtp_secure(smtp_secure)
mail_handler = logging.handlers.SMTPHandler(
mailhost=mailhost,
fromaddr=info.from_addr,
toaddrs=[info.to_addr],
subject='Docker registry exception',
credentials=(info.smtp_login,
info.smtp_password),
secure=secure_args)
mail_handler.setLevel(logging.ERROR)
app.logger.addHandler(mail_handler)
# Optional bugsnag support
ebugsnag.boot(app, cfg.bugsnag, cfg.flavor, __version__)
# Optional cors support
cors.boot(app, cfg.cors)
def _adapt_smtp_secure(value):
"""Adapt the value to arguments of ``SMTP.starttls()``
.. seealso:: <http://docs.python.org/2/library/smtplib.html\
#smtplib.SMTP.starttls>
"""
if isinstance(value, basestring):
# a string - wrap it in the tuple
return (value,)
if isinstance(value, config.Config):
assert set(value.keys()) <= set(['keyfile', 'certfile'])
return (value.keyfile, value.certfile)
if value:
return ()
init()
|
import logging
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import data
from perfkitbenchmarker import regex_util
from perfkitbenchmarker import sample
from perfkitbenchmarker.linux_packages import unixbench
FLAGS = flags.FLAGS
BENCHMARK_NAME = 'unixbench'
BENCHMARK_CONFIG = """
unixbench:
description: Runs UnixBench.
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
"""
flags.DEFINE_boolean('unixbench_all_cores', default=False,
help='Setting this flag changes the default behavior of '
'Unix bench. It will now scale to the number of CPUs on '
'the machine vs the limit of 16 CPUs today.')
UNIXBENCH_PATCH_FILE = 'unixbench-16core-limitation.patch'
SYSTEM_SCORE_REGEX = r'\nSystem Benchmarks Index Score\s+([-+]?[0-9]*\.?[0-9]+)'
RESULT_REGEX = (
r'\n([A-Z][\w\-\(\) ]+)\s+([-+]?[0-9]*\.?[0-9]+) (\w+)\s+\('
r'([-+]?[0-9]*\.?[0-9]+) (\w+), (\d+) samples\)')
SCORE_REGEX = (
r'\n([A-Z][\w\-\(\) ]+)\s+([-+]?[0-9]*\.?[0-9]+)\s+([-+]?[0-9]*\.?[0-9]+)'
r'\s+([-+]?[0-9]*\.?[0-9]+)')
PARALLEL_COPIES_REGEX = r'running (\d+) parallel cop[yies]+ of tests'
RESULT_START_STRING = 'Benchmark Run:'
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config):
"""Verifies that the required resources for UnixBench are present.
Raises:
perfkitbenchmarker.data.ResourceNotFound: On missing resource.
"""
if FLAGS.unixbench_all_cores:
data.ResourcePath(UNIXBENCH_PATCH_FILE)
def Prepare(benchmark_spec):
"""Install Unixbench on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('Unixbench prepare on %s', vm)
vm.Install('unixbench')
if FLAGS.unixbench_all_cores:
vm.PushDataFile(UNIXBENCH_PATCH_FILE)
vm.RemoteCommand('cp %s %s' %
(UNIXBENCH_PATCH_FILE, unixbench.UNIXBENCH_DIR))
vm.RemoteCommand('cd %s && patch ./Run %s' %
(unixbench.UNIXBENCH_DIR, UNIXBENCH_PATCH_FILE))
def ParseResults(results):
"""Result parser for UnixBench.
Sample Results:
1 CPUs in system; running 1 parallel copy of tests
8 CPUs in system; running 8 parallel copies of tests
Double-Precision Whetstone 4022.0 MWIPS (9.9 s, 7 samples)
Execl Throughput 4735.8 lps (29.8 s, 2 samples)
File Copy 1024 bufsize 2000 maxblocks 1294367.0 KBps (30.0 s, 2 samples)
File Copy 256 bufsize 500 maxblocks 396912.9 KBps (30.0 s, 2 samples)
File Copy 4096 bufsize 8000 maxblocks 2513158.7 KBps (30.0 s, 2 samples)
Pipe Throughput 2221775.6 lps (10.0 s, 7 samples)
Pipe-based Context Switching 369000.7 lps (10.0 s, 7 samples)
Process Creation 12587.7 lps (30.0 s, 2 samples)
Shell Scripts (1 concurrent) 8234.3 lpm (60.0 s, 2 samples)
Shell Scripts (8 concurrent) 1064.5 lpm (60.0 s, 2 samples)
System Call Overhead 4439274.5 lps (10.0 s, 7 samples)
System Benchmarks Index Values BASELINE RESULT INDEX
Dhrystone 2 using register variables 116700.0 34872897.7 2988.3
Double-Precision Whetstone 55.0 4022.0 731.3
Execl Throughput 43.0 4735.8 1101.4
File Copy 1024 bufsize 2000 maxblocks 3960.0 1294367.0 3268.6
File Copy 256 bufsize 500 maxblocks 1655.0 396912.9 2398.3
File Copy 4096 bufsize 8000 maxblocks 5800.0 2513158.7 4333.0
Pipe Throughput 12440.0 2221775.6 1786.0
Pipe-based Context Switching 4000.0 369000.7 922.5
Process Creation 126.0 12587.7 999.0
Shell Scripts (1 concurrent) 42.4 8234.3 1942.1
Shell Scripts (8 concurrent) 6.0 1064.5 1774.2
System Call Overhead 15000.0 4439274.5 2959.5
========
System Benchmarks Index Score 1825.8
Args:
results: UnixBench result.
Returns:
A list of sample.Sample objects.
"""
samples = []
start_index = results.find(RESULT_START_STRING)
while start_index != -1:
next_start_index = results.find(RESULT_START_STRING, start_index + 1)
result = results[start_index: next_start_index]
parallel_copies = regex_util.ExtractAllMatches(
PARALLEL_COPIES_REGEX, result)
parallel_copy_metadata = {'num_parallel_copies': int(parallel_copies[0])}
match = regex_util.ExtractAllMatches(RESULT_REGEX, result)
for groups in match:
metadata = {'samples': int(groups[5]), 'time': groups[3] + groups[4]}
metadata.update(parallel_copy_metadata)
samples.append(sample.Sample(
groups[0].strip(), float(groups[1]), groups[2], metadata))
match = regex_util.ExtractAllMatches(SCORE_REGEX, result)
for groups in match:
metadata = {'baseline': float(groups[1]), 'index': float(groups[3])}
metadata.update(parallel_copy_metadata)
samples.append(sample.Sample('%s:score' % groups[0].strip(),
value=float(groups[2]),
unit='',
metadata=metadata))
match = regex_util.ExtractAllMatches(SYSTEM_SCORE_REGEX, result)
samples.append(sample.Sample('System Benchmarks Index Score',
float(match[0]), unit='',
metadata=parallel_copy_metadata))
start_index = next_start_index
return samples
def Run(benchmark_spec):
"""Run UnixBench on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of sample.Sample objects.
"""
vms = benchmark_spec.vms
vm = vms[0]
logging.info('UnixBench running on %s', vm)
unixbench_command = 'cd {0} && UB_TMPDIR={1} ./Run'.format(
unixbench.UNIXBENCH_DIR, vm.GetScratchDir())
logging.info('Unixbench Results:')
stdout, _ = vm.RemoteCommand(unixbench_command, should_log=True)
return ParseResults(stdout)
def Cleanup(benchmark_spec):
"""Cleanup UnixBench on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
pass
|
from botocore.exceptions import ClientError
from flask import current_app
from lemur.extensions import sentry
from .sts import sts_client
@sts_client("s3", service_type="resource")
def put(bucket_name, region_name, prefix, data, encrypt, **kwargs):
"""
Use STS to write to an S3 bucket
"""
bucket = kwargs["resource"].Bucket(bucket_name)
current_app.logger.debug(
"Persisting data to S3. Bucket: {0} Prefix: {1}".format(bucket_name, prefix)
)
# get data ready for writing
if isinstance(data, str):
data = data.encode("utf-8")
if encrypt:
bucket.put_object(
Key=prefix,
Body=data,
ACL="bucket-owner-full-control",
ServerSideEncryption="AES256",
)
else:
try:
bucket.put_object(Key=prefix, Body=data, ACL="bucket-owner-full-control")
return True
except ClientError:
sentry.captureException()
return False
@sts_client("s3", service_type="client")
def delete(bucket_name, prefixed_object_name, **kwargs):
"""
Use STS to delete an object
"""
try:
response = kwargs["client"].delete_object(Bucket=bucket_name, Key=prefixed_object_name)
current_app.logger.debug(f"Delete data from S3."
f"Bucket: {bucket_name},"
f"Prefix: {prefixed_object_name},"
f"Status_code: {response}")
return response['ResponseMetadata']['HTTPStatusCode'] < 300
except ClientError:
sentry.captureException()
return False
@sts_client("s3", service_type="client")
def get(bucket_name, prefixed_object_name, **kwargs):
"""
Use STS to get an object
"""
try:
response = kwargs["client"].get_object(Bucket=bucket_name, Key=prefixed_object_name)
current_app.logger.debug(f"Get data from S3. Bucket: {bucket_name},"
f"object_name: {prefixed_object_name}")
return response['Body'].read().decode("utf-8")
except ClientError:
sentry.captureException()
return None
|
from homeassistant.components.switch import SwitchEntity
from . import DOMAIN as QWIKSWITCH, QSToggleEntity
async def async_setup_platform(hass, _, add_entities, discovery_info=None):
"""Add switches from the main Qwikswitch component."""
if discovery_info is None:
return
qsusb = hass.data[QWIKSWITCH]
devs = [QSSwitch(qsid, qsusb) for qsid in discovery_info[QWIKSWITCH]]
add_entities(devs)
class QSSwitch(QSToggleEntity, SwitchEntity):
"""Switch based on a Qwikswitch relay module."""
|
import pytest
from mock import patch, call
from pandas.util.testing import assert_frame_equal
from arctic import arctic as m
from arctic.scripts import arctic_copy_data as mcd
from ...util import read_str_as_pandas, run_as_main
@pytest.fixture(scope='function', autouse=True)
def init(arctic):
arctic.initialize_library('user.library', m.VERSION_STORE, segment='month')
arctic.initialize_library('user.library2', m.VERSION_STORE, segment='month')
ts = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 2.0
2012-10-09 17:06:11.040 | 2.5
2012-11-08 17:06:11.040 | 3.0""")
ts1 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 4.0
2012-10-08 17:06:11.040 | 5.0
2012-10-09 17:06:11.040 | 6.5
2012-11-08 17:06:11.040 | 7.0""")
ts2 = read_str_as_pandas(""" times | near
2012-10-08 17:06:11.040 | 5.0
2012-10-09 17:06:11.040 | 6.5""")
ts3 = read_str_as_pandas(""" times | near
2012-09-08 17:06:11.040 | 1.0
2012-10-08 17:06:11.040 | 5.0
2012-10-09 17:06:11.040 | 6.5
2012-11-08 17:06:11.040 | 3.0""")
def test_copy_data_no_force(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Put ts, ts1 in library
arctic[src].write('some_ts', ts1)
arctic[src].write('some_ts1', ts1)
# Put some other value for ts in library2
arctic[dest].write('some_ts', ts)
# Create the user against the current mongo database
src_host = 'arctic_' + src + '@' + mongo_host
dest_host = 'arctic_' + dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', 'some_ts', 'some_ts1')
assert_frame_equal(ts, arctic[dest].read('some_ts').data)
assert_frame_equal(ts1, arctic[dest].read('some_ts1').data)
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 2 symbols')]
assert logger.warn.call_args_list == [call('Symbol: some_ts already exists in %s, use --force to overwrite or --splice to join with existing data' % dest_host)]
assert arctic[dest].read_audit_log('some_ts1')[0]['message'] == 'CR101'
def test_copy_data_force(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Put ts, ts1 in library
arctic[src].write('some_ts', ts)
arctic[src].write('some_ts1', ts1)
# Put some other value for ts in library2
arctic[dest].write('some_ts', ts1)
# Create the user against the current mongo database
src_host = src + '@' + mongo_host
dest_host = dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', '--force', 'some_ts', 'some_ts1')
assert_frame_equal(ts, arctic[dest].read('some_ts').data)
assert_frame_equal(ts1, arctic[dest].read('some_ts1').data)
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 2 symbols')]
assert logger.warn.call_args_list == [call('Symbol: some_ts already exists in destination, OVERWRITING')]
assert arctic[dest].read_audit_log('some_ts1')[0]['message'] == 'CR101'
def test_copy_data_splice(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Put ts, ts1 in library
arctic[src].write('some_ts', ts2)
arctic[src].write('some_ts1', ts1)
# Put some other value for ts in library2
arctic[dest].write('some_ts', ts)
# Create the user against the current mongo database
src_host = src + '@' + mongo_host
dest_host = dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', '--splice', 'some_ts', 'some_ts1')
assert_frame_equal(ts3, arctic[dest].read('some_ts').data)
assert_frame_equal(ts1, arctic[dest].read('some_ts1').data)
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 2 symbols')]
assert logger.warn.call_args_list == [call('Symbol: some_ts already exists in destination, splicing in new data')]
assert arctic[dest].read_audit_log('some_ts')[0]['message'] == 'CR101'
def test_copy_data_wild(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Put ts, ts1 in library
arctic[src].write('some_a_ts', ts)
arctic[src].write('some_a_ts1', ts1)
arctic[src].write('some_b_ts1', ts1)
arctic[src].write('some_c_ts1', ts1)
# Create the user against the current mongo database
src_host = 'arctic_' + src + '@' + mongo_host
dest_host = 'arctic_' + dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', '.*_a_.*', '.*_b_.*')
assert_frame_equal(ts, arctic[dest].read('some_a_ts').data)
assert_frame_equal(ts1, arctic[dest].read('some_a_ts1').data)
assert_frame_equal(ts1, arctic[dest].read('some_b_ts1').data)
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 3 symbols')]
assert arctic[dest].read_audit_log('some_a_ts1')[0]['message'] == 'CR101'
def test_copy_data_doesnt_exist(arctic, mongo_host):
src = 'user.library'
dest = 'user.library2'
# Create the user against the current mongo database
src_host = src + '@' + mongo_host
dest_host = dest + '@' + mongo_host
with patch('arctic.scripts.arctic_copy_data.logger') as logger:
run_as_main(mcd.main, '--src', src_host, '--dest', dest_host, '--log', 'CR101', 'some_ts')
assert logger.info.call_args_list == [call('Copying data from %s -> %s' % (src_host, dest_host)),
call('Copying: 0 symbols')]
assert logger.warn.call_args_list == [call('No symbols found that matched those provided.')]
|
from unittest.mock import patch
import pytest
import zigpy.profiles.zha
import zigpy.zcl.clusters.general as general
import zigpy.zcl.clusters.security as security
import zigpy.zcl.foundation as zcl_f
import homeassistant.components.automation as automation
from homeassistant.components.device_automation import (
_async_get_device_automations as async_get_device_automations,
)
from homeassistant.components.zha import DOMAIN
from homeassistant.helpers.device_registry import async_get_registry
from homeassistant.setup import async_setup_component
from tests.common import async_mock_service, mock_coro
SHORT_PRESS = "remote_button_short_press"
COMMAND = "command"
COMMAND_SINGLE = "single"
@pytest.fixture
async def device_ias(hass, zigpy_device_mock, zha_device_joined_restored):
"""IAS device fixture."""
clusters = [general.Basic, security.IasZone, security.IasWd]
zigpy_device = zigpy_device_mock(
{
1: {
"in_clusters": [c.cluster_id for c in clusters],
"out_clusters": [general.OnOff.cluster_id],
"device_type": zigpy.profiles.zha.DeviceType.ON_OFF_SWITCH,
}
},
)
zha_device = await zha_device_joined_restored(zigpy_device)
zha_device.update_available(True)
await hass.async_block_till_done()
return zigpy_device, zha_device
async def test_get_actions(hass, device_ias):
"""Test we get the expected actions from a zha device."""
ieee_address = str(device_ias[0].ieee)
ha_device_registry = await async_get_registry(hass)
reg_device = ha_device_registry.async_get_device({(DOMAIN, ieee_address)}, set())
actions = await async_get_device_automations(hass, "action", reg_device.id)
expected_actions = [
{"domain": DOMAIN, "type": "squawk", "device_id": reg_device.id},
{"domain": DOMAIN, "type": "warn", "device_id": reg_device.id},
]
assert actions == expected_actions
async def test_action(hass, device_ias):
"""Test for executing a zha device action."""
zigpy_device, zha_device = device_ias
zigpy_device.device_automation_triggers = {
(SHORT_PRESS, SHORT_PRESS): {COMMAND: COMMAND_SINGLE}
}
ieee_address = str(zha_device.ieee)
ha_device_registry = await async_get_registry(hass)
reg_device = ha_device_registry.async_get_device({(DOMAIN, ieee_address)}, set())
with patch(
"zigpy.zcl.Cluster.request",
return_value=mock_coro([0x00, zcl_f.Status.SUCCESS]),
):
assert await async_setup_component(
hass,
automation.DOMAIN,
{
automation.DOMAIN: [
{
"trigger": {
"device_id": reg_device.id,
"domain": "zha",
"platform": "device",
"type": SHORT_PRESS,
"subtype": SHORT_PRESS,
},
"action": {
"domain": DOMAIN,
"device_id": reg_device.id,
"type": "warn",
},
}
]
},
)
await hass.async_block_till_done()
calls = async_mock_service(hass, DOMAIN, "warning_device_warn")
channel = zha_device.channels.pools[0].client_channels["1:0x0006"]
channel.zha_send_event(COMMAND_SINGLE, [])
await hass.async_block_till_done()
assert len(calls) == 1
assert calls[0].domain == DOMAIN
assert calls[0].service == "warning_device_warn"
assert calls[0].data["ieee"] == ieee_address
|
import json
import logging
from absl import flags
from perfkitbenchmarker import cloud_tpu
from perfkitbenchmarker import errors
from perfkitbenchmarker.providers import gcp
from perfkitbenchmarker.providers.gcp import util
FLAGS = flags.FLAGS
TPU_TIMEOUT = 1200
_INSUFFICIENT_CAPACITY = 'There is no more capacity in the zone'
class GcpTpu(cloud_tpu.BaseTpu):
"""class representing a GCP cloud TPU.
Attributes:
name: Name of the cloud TPU to create.
project: the GCP project.
version: the TPU version.
zone: the GCP zone.
tpu_ip: the TPU IP.
"""
CLOUD = gcp.CLOUD
SERVICE_NAME = 'tpu'
TPU_IP = '10.240.{}.2'
DEFAULT_TPU_VERSION = '1.6'
def __init__(self, tpu_spec):
super(GcpTpu, self).__init__(tpu_spec)
self.spec = tpu_spec
self.project = FLAGS.project or util.GetDefaultProject()
def _Create(self):
"""Create Cloud TPU."""
cmd = util.GcloudCommand(self, 'compute', 'tpus', 'create',
self.spec.tpu_name)
cmd.flags['range'] = self.spec.tpu_cidr_range
if self.spec.tpu_accelerator_type:
cmd.flags['accelerator-type'] = self.spec.tpu_accelerator_type
if self.spec.tpu_description:
cmd.flags['description'] = self.spec.tpu_description
if self.spec.tpu_network:
cmd.flags['network'] = self.spec.tpu_network
if self.spec.tpu_tf_version:
cmd.flags['version'] = self.spec.tpu_tf_version
if self.spec.tpu_zone:
cmd.flags['zone'] = self.spec.tpu_zone
if self.spec.tpu_preemptible:
cmd.flags['preemptible'] = self.spec.tpu_preemptible
cmd.flags['project'] = self.project
_, stderr, retcode = cmd.Issue(raise_on_failure=False)
if _INSUFFICIENT_CAPACITY in stderr:
logging.error(util.STOCKOUT_MESSAGE)
raise errors.Benchmarks.InsufficientCapacityCloudFailure(
util.STOCKOUT_MESSAGE)
if retcode != 0:
logging.error('Create GCP cloud TPU failed.')
def _Delete(self):
"""Deletes the cloud TPU."""
cmd = util.GcloudCommand(self, 'compute', 'tpus', 'delete',
self.spec.tpu_name)
if self.spec.tpu_zone:
cmd.flags['zone'] = self.spec.tpu_zone
cmd.flags['project'] = self.project
_, _, retcode = cmd.Issue(timeout=TPU_TIMEOUT, raise_on_failure=False)
if retcode != 0:
logging.error('Delete GCP cloud TPU failed.')
else:
logging.info('Deleted GCP cloud TPU.')
def _GetTpuDescription(self):
"""Gets the cloud TPU description."""
cmd = util.GcloudCommand(self, 'compute', 'tpus', 'describe',
self.spec.tpu_name)
if self.spec.tpu_zone:
cmd.flags['zone'] = self.spec.tpu_zone
cmd.flags['project'] = self.project
stdout, _, retcode = cmd.Issue(raise_on_failure=False)
if retcode != 0:
logging.info('Could not found GCP cloud TPU %s.',
self.spec.tpu_name)
return stdout and json.loads(stdout), retcode
def _Exists(self):
"""Returns true if the cloud TPU exists."""
_, retcode = self._GetTpuDescription()
return retcode == 0
def GetName(self):
"""Gets the name of the cloud TPU."""
return self.spec.tpu_name
def GetMasterGrpcAddress(self):
"""Gets the grpc address of the 0th NetworkEndpoint."""
master_network_endpoint = self._GetTpuDescription()[0]['networkEndpoints'][
0]
return 'grpc://{ip_address}:{port}'.format(
ip_address=master_network_endpoint['ipAddress'],
port=master_network_endpoint['port'])
def GetNumShards(self):
"""Gets the number of TPU shards."""
num_tpus = len(self._GetTpuDescription()[0]['networkEndpoints'])
return num_tpus * FLAGS.tpu_cores_per_donut
def GetZone(self):
"""Gets the TPU zone."""
return self.spec.tpu_zone
def GetAcceleratorType(self):
"""Gets the TPU accelerator type."""
return self.spec.tpu_accelerator_type
def GetResourceMetadata(self):
"""Returns the metadata associated with the resource.
All keys will be prefaced with tpu before
being published (done in publisher.py).
Returns:
metadata: dict of GCP cloud TPU metadata.
"""
metadata = super(GcpTpu, self).GetResourceMetadata()
metadata.update({
'project': self.project,
'cloud': self.CLOUD
})
return metadata
|
from datetime import timedelta
import logging
from aiohttp.hdrs import USER_AGENT
import requests
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_ATTRIBUTION,
CONF_API_KEY,
CONF_EMAIL,
HTTP_NOT_FOUND,
HTTP_OK,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import track_point_in_time
from homeassistant.util import Throttle
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by Have I Been Pwned (HIBP)"
DATE_STR_FORMAT = "%Y-%m-%d %H:%M:%S"
HA_USER_AGENT = "Home Assistant HaveIBeenPwned Sensor Component"
MIN_TIME_BETWEEN_FORCED_UPDATES = timedelta(seconds=5)
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=15)
URL = "https://haveibeenpwned.com/api/v3/breachedaccount/"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_EMAIL): vol.All(cv.ensure_list, [cv.string]),
vol.Required(CONF_API_KEY): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the HaveIBeenPwned sensor."""
emails = config.get(CONF_EMAIL)
api_key = config[CONF_API_KEY]
data = HaveIBeenPwnedData(emails, api_key)
devices = []
for email in emails:
devices.append(HaveIBeenPwnedSensor(data, email))
add_entities(devices)
class HaveIBeenPwnedSensor(Entity):
"""Implementation of a HaveIBeenPwned sensor."""
def __init__(self, data, email):
"""Initialize the HaveIBeenPwned sensor."""
self._state = None
self._data = data
self._email = email
self._unit_of_measurement = "Breaches"
@property
def name(self):
"""Return the name of the sensor."""
return f"Breaches {self._email}"
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._unit_of_measurement
@property
def state(self):
"""Return the state of the device."""
return self._state
@property
def device_state_attributes(self):
"""Return the attributes of the sensor."""
val = {ATTR_ATTRIBUTION: ATTRIBUTION}
if self._email not in self._data.data:
return val
for idx, value in enumerate(self._data.data[self._email]):
tmpname = f"breach {idx + 1}"
datetime_local = dt_util.as_local(
dt_util.parse_datetime(value["AddedDate"])
)
tmpvalue = f"{value['Title']} {datetime_local.strftime(DATE_STR_FORMAT)}"
val[tmpname] = tmpvalue
return val
async def async_added_to_hass(self):
"""Get initial data."""
# To make sure we get initial data for the sensors ignoring the normal
# throttle of 15 minutes but using an update throttle of 5 seconds
self.hass.async_add_executor_job(self.update_nothrottle)
def update_nothrottle(self, dummy=None):
"""Update sensor without throttle."""
self._data.update_no_throttle()
# Schedule a forced update 5 seconds in the future if the update above
# returned no data for this sensors email. This is mainly to make sure
# that we don't get HTTP Error "too many requests" and to have initial
# data after hass startup once we have the data it will update as
# normal using update
if self._email not in self._data.data:
track_point_in_time(
self.hass,
self.update_nothrottle,
dt_util.now() + MIN_TIME_BETWEEN_FORCED_UPDATES,
)
return
self._state = len(self._data.data[self._email])
self.schedule_update_ha_state()
def update(self):
"""Update data and see if it contains data for our email."""
self._data.update()
if self._email in self._data.data:
self._state = len(self._data.data[self._email])
class HaveIBeenPwnedData:
"""Class for handling the data retrieval."""
def __init__(self, emails, api_key):
"""Initialize the data object."""
self._email_count = len(emails)
self._current_index = 0
self.data = {}
self._email = emails[0]
self._emails = emails
self._api_key = api_key
def set_next_email(self):
"""Set the next email to be looked up."""
self._current_index = (self._current_index + 1) % self._email_count
self._email = self._emails[self._current_index]
def update_no_throttle(self):
"""Get the data for a specific email."""
self.update(no_throttle=True)
@Throttle(MIN_TIME_BETWEEN_UPDATES, MIN_TIME_BETWEEN_FORCED_UPDATES)
def update(self, **kwargs):
"""Get the latest data for current email from REST service."""
try:
url = f"{URL}{self._email}?truncateResponse=false"
header = {USER_AGENT: HA_USER_AGENT, "hibp-api-key": self._api_key}
_LOGGER.debug("Checking for breaches for email: %s", self._email)
req = requests.get(url, headers=header, allow_redirects=True, timeout=5)
except requests.exceptions.RequestException:
_LOGGER.error("Failed fetching data for %s", self._email)
return
if req.status_code == HTTP_OK:
self.data[self._email] = sorted(
req.json(), key=lambda k: k["AddedDate"], reverse=True
)
# Only goto next email if we had data so that
# the forced updates try this current email again
self.set_next_email()
elif req.status_code == HTTP_NOT_FOUND:
self.data[self._email] = []
# only goto next email if we had data so that
# the forced updates try this current email again
self.set_next_email()
else:
_LOGGER.error(
"Failed fetching data for %s (HTTP Status_code = %d)",
self._email,
req.status_code,
)
|
from __future__ import absolute_import
from docstructure import SITE_STRUCTURE, HREF_MAP, BASENAME_MAP
from lxml.etree import (parse, fromstring, ElementTree,
Element, SubElement, XPath, XML)
import glob
import hashlib
import os
import re
import sys
import copy
import shutil
import textwrap
import subprocess
from io import open as open_file
RST2HTML_OPTIONS = " ".join([
'--no-toc-backlinks',
'--strip-comments',
'--language en',
'--date',
])
XHTML_NS = 'http://www.w3.org/1999/xhtml'
htmlnsmap = {"h": XHTML_NS}
find_head = XPath("/h:html/h:head[1]", namespaces=htmlnsmap)
find_body = XPath("/h:html/h:body[1]", namespaces=htmlnsmap)
find_title = XPath("/h:html/h:head/h:title/text()", namespaces=htmlnsmap)
find_title_tag = XPath("/h:html/h:head/h:title", namespaces=htmlnsmap)
find_headings = XPath("//h:h1[not(@class)]//text()", namespaces=htmlnsmap)
find_heading_tag = XPath("//h:h1[@class = 'title'][1]", namespaces=htmlnsmap)
find_menu = XPath("//h:ul[@id=$name]", namespaces=htmlnsmap)
find_page_end = XPath("/h:html/h:body/h:div[last()]", namespaces=htmlnsmap)
find_words = re.compile(r'(\w+)').findall
replace_invalid = re.compile(r'[-_/.\s\\]').sub
def make_menu_section_head(section, menuroot):
section_id = section + '-section'
section_head = menuroot.xpath("//ul[@id=$section]/li", section=section_id)
if not section_head:
ul = SubElement(menuroot, "ul", id=section_id)
section_head = SubElement(ul, "li")
title = SubElement(section_head, "span", {"class":"section title"})
title.text = section
else:
section_head = section_head[0]
return section_head
def build_menu(tree, basename, section_head):
page_title = find_title(tree)
if page_title:
page_title = page_title[0]
else:
page_title = replace_invalid('', basename.capitalize())
build_menu_entry(page_title, basename+".html", section_head,
headings=find_headings(tree))
def build_menu_entry(page_title, url, section_head, headings=None):
page_id = replace_invalid(' ', os.path.splitext(url)[0]) + '-menu'
ul = SubElement(section_head, "ul", {"class":"menu foreign", "id":page_id})
title = SubElement(ul, "li", {"class":"menu title"})
a = SubElement(title, "a", href=url)
a.text = page_title
if headings:
subul = SubElement(title, "ul", {"class":"submenu"})
for heading in headings:
li = SubElement(subul, "li", {"class":"menu item"})
try:
ref = heading.getparent().getparent().get('id')
except AttributeError:
ref = None
if ref is None:
ref = '-'.join(find_words(replace_invalid(' ', heading.lower())))
a = SubElement(li, "a", href=url+'#'+ref)
a.text = heading
def merge_menu(tree, menu, name):
menu_root = copy.deepcopy(menu)
tree.getroot()[1][0].insert(0, menu_root) # html->body->div[class=document]
for el in menu_root.iter():
tag = el.tag
if tag[0] != '{':
el.tag = "{http://www.w3.org/1999/xhtml}" + tag
current_menu = find_menu(
menu_root, name=replace_invalid(' ', name) + '-menu')
if not current_menu:
current_menu = find_menu(
menu_root, name=replace_invalid('-', name) + '-menu')
if current_menu:
for submenu in current_menu:
submenu.set("class", submenu.get("class", "").
replace("foreign", "current"))
return tree
def inject_flatter_button(tree):
head = tree.xpath('h:head[1]', namespaces=htmlnsmap)[0]
script = SubElement(head, '{%s}script' % XHTML_NS, type='text/javascript')
script.text = """
(function() {
var s = document.createElement('script');
var t = document.getElementsByTagName('script')[0];
s.type = 'text/javascript';
s.async = true;
s.src = 'http://api.flattr.com/js/0.6/load.js?mode=auto';
t.parentNode.insertBefore(s, t);
})();
"""
script.tail = '\n'
intro_div = tree.xpath('h:body//h:div[@id = "introduction"][1]', namespaces=htmlnsmap)[0]
intro_div.insert(-1, XML(
'<p style="text-align: center;">Like working with lxml? '
'Happy about the time that it just saved you? <br />'
'Show your appreciation with <a href="http://flattr.com/thing/268156/lxml-The-Python-XML-Toolkit">Flattr</a>.<br />'
'<a class="FlattrButton" style="display:none;" rev="flattr;button:compact;" href="http://lxml.de/"></a>'
'</p>'
))
def inject_donate_buttons(lxml_path, rst2html_script, tree):
command = ([sys.executable, rst2html_script]
+ RST2HTML_OPTIONS.split() + [os.path.join(lxml_path, 'README.rst')])
rst2html = subprocess.Popen(command, stdout=subprocess.PIPE)
stdout, _ = rst2html.communicate()
readme = fromstring(stdout)
intro_div = tree.xpath('h:body//h:div[@id = "introduction"][1]',
namespaces=htmlnsmap)[0]
support_div = readme.xpath('h:body//h:div[@id = "support-the-project"][1]',
namespaces=htmlnsmap)[0]
intro_div.append(support_div)
finance_div = readme.xpath('h:body//h:div[@id = "project-income-report"][1]',
namespaces=htmlnsmap)[0]
legal = readme.xpath('h:body//h:div[@id = "legal-notice-for-donations"][1]',
namespaces=htmlnsmap)[0]
last_div = tree.xpath('h:body//h:div//h:div', namespaces=htmlnsmap)[-1]
last_div.addnext(finance_div)
finance_div.addnext(legal)
def inject_banner(parent):
banner = parent.makeelement('div', {'class': 'banner'})
parent.insert(0, banner)
banner_image = SubElement(banner, 'div', {'class': "banner_image"})
SubElement(banner_image, 'img', src="python-xml-title.png")
banner_text = SubElement(banner, 'div', {'class': "banner_link"})
banner_link = SubElement(banner_text, 'a', href="index.html#support-the-project")
banner_link.text = "Like the tool? "
SubElement(banner_link, 'br', {'class': "first"}).tail = "Help making it better! "
SubElement(banner_link, 'br', {'class': "second"}).tail = "Your donation helps!"
def rest2html(script, source_path, dest_path, stylesheet_url):
command = ('%s %s %s --stylesheet=%s --link-stylesheet %s > %s' %
(sys.executable, script, RST2HTML_OPTIONS,
stylesheet_url, source_path, dest_path))
subprocess.call(command, shell=True)
def convert_changelog(lxml_path, changelog_file_path, rst2html_script, stylesheet_url):
f = open_file(os.path.join(lxml_path, 'CHANGES.txt'), 'r', encoding='utf-8')
try:
content = f.read()
finally:
f.close()
links = dict(LP='`%s <https://bugs.launchpad.net/lxml/+bug/%s>`_',
GH='`%s <https://github.com/lxml/lxml/issues/%s>`_')
replace_tracker_links = re.compile('((LP|GH)#([0-9]+))').sub
def insert_link(match):
text, ref_type, ref_id = match.groups()
return links[ref_type] % (text, ref_id)
content = replace_tracker_links(insert_link, content)
command = [sys.executable, rst2html_script] + RST2HTML_OPTIONS.split() + [
'--link-stylesheet', '--stylesheet', stylesheet_url ]
out_file = open(changelog_file_path, 'wb')
try:
rst2html = subprocess.Popen(command, stdin=subprocess.PIPE, stdout=out_file)
rst2html.communicate(content.encode('utf8'))
finally:
out_file.close()
def publish(dirname, lxml_path, release):
if not os.path.exists(dirname):
os.mkdir(dirname)
doc_dir = os.path.join(lxml_path, 'doc')
script = os.path.join(doc_dir, 'rest2html.py')
pubkey = os.path.join(doc_dir, 'pubkey.asc')
stylesheet_file = 'style.css'
shutil.copy(pubkey, dirname)
# FIXME: find a way to make hashed filenames work both locally and in the versioned directories.
stylesheet_url = stylesheet_file
"""
style_file_pattern = "style_%s.css"
for old_stylesheet in glob.iglob(os.path.join(dirname, style_file_pattern % "*")):
os.unlink(old_stylesheet)
with open(os.path.join(dirname, stylesheet_file), 'rb') as f:
css = f.read()
checksum = hashlib.sha256(css).hexdigest()[:32]
stylesheet_url = style_file_pattern % checksum
with open(os.path.join(dirname, stylesheet_url), 'wb') as out:
out.write(css)
"""
href_map = HREF_MAP.copy()
changelog_basename = 'changes-%s' % release
href_map['Release Changelog'] = changelog_basename + '.html'
menu_js = textwrap.dedent('''
function trigger_menu(event) {
var sidemenu = document.getElementById("sidemenu");
var classes = sidemenu.getAttribute("class");
classes = (classes.indexOf(" visible") === -1) ? classes + " visible" : classes.replace(" visible", "");
sidemenu.setAttribute("class", classes);
event.preventDefault();
event.stopPropagation();
}
function hide_menu() {
var sidemenu = document.getElementById("sidemenu");
var classes = sidemenu.getAttribute("class");
if (classes.indexOf(" visible") !== -1) {
sidemenu.setAttribute("class", classes.replace(" visible", ""));
}
}
''')
trees = {}
menu = Element("div", {'class': 'sidemenu', 'id': 'sidemenu'})
SubElement(menu, 'div', {'class': 'menutrigger', 'onclick': 'trigger_menu(event)'}).text = "Menu"
menu_div = SubElement(menu, 'div', {'class': 'menu'})
inject_banner(menu_div)
# build HTML pages and parse them back
for section, text_files in SITE_STRUCTURE:
section_head = make_menu_section_head(section, menu_div)
for filename in text_files:
if filename.startswith('@'):
# special menu entry
page_title = filename[1:]
url = href_map[page_title]
build_menu_entry(page_title, url, section_head)
else:
path = os.path.join(doc_dir, filename)
basename = os.path.splitext(os.path.basename(filename))[0]
basename = BASENAME_MAP.get(basename, basename)
outname = basename + '.html'
outpath = os.path.join(dirname, outname)
rest2html(script, path, outpath, stylesheet_url)
tree = parse(outpath)
page_div = tree.getroot()[1][0] # html->body->div[class=document]
inject_banner(page_div)
if filename == 'main.txt':
# inject donation buttons
#inject_flatter_button(tree)
inject_donate_buttons(lxml_path, script, tree)
trees[filename] = (tree, basename, outpath)
build_menu(tree, basename, section_head)
# also convert CHANGES.txt
convert_changelog(lxml_path, os.path.join(dirname, 'changes-%s.html' % release),
script, stylesheet_url)
# generate sitemap from menu
sitemap = XML(textwrap.dedent('''\
<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN" "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html xmlns="http://www.w3.org/1999/xhtml" xml:lang="en" lang="en">
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<title>Sitemap of lxml.de - Processing XML and HTML with Python</title>
<meta content="lxml - the most feature-rich and easy-to-use library for processing XML and HTML in the Python language"
name="description" />
<meta content="Python XML, XML, XML processing, HTML, lxml, simple XML, ElementTree, etree, lxml.etree, objectify, XML parsing, XML validation, XPath, XSLT"
name="keywords" />
</head>
<body>
<h1>Sitemap of lxml.de - Processing XML and HTML with Python</h1>
</body>
</html>
'''))
sitemap_menu = copy.deepcopy(menu)
SubElement(SubElement(sitemap_menu[-1], 'li'), 'a', href='http://lxml.de/files/').text = 'Download files'
sitemap[-1].append(sitemap_menu) # append to body
ElementTree(sitemap).write(os.path.join(dirname, 'sitemap.html'))
# integrate sitemap into the menu
SubElement(SubElement(menu_div[-1], 'li'), 'a', href='/sitemap.html').text = 'Sitemap'
# integrate menu into web pages
for tree, basename, outpath in trees.values():
head = find_head(tree)[0]
SubElement(head, 'script', type='text/javascript').text = menu_js
SubElement(head, 'meta', name='viewport', content="width=device-width, initial-scale=1")
find_body(tree)[0].set('onclick', 'hide_menu()')
new_tree = merge_menu(tree, menu, basename)
title = find_title_tag(new_tree)
if title and title[0].text == 'lxml':
title[0].text = "lxml - Processing XML and HTML with Python"
heading = find_heading_tag(new_tree)
if heading:
heading[0].text = "lxml - XML and HTML with Python"
new_tree.write(outpath)
if __name__ == '__main__':
publish(sys.argv[1], sys.argv[2], sys.argv[3])
|
import importlib
import logging
import os
from perfkitbenchmarker import events
from perfkitbenchmarker import import_util
from perfkitbenchmarker import requirements
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers import azure
from perfkitbenchmarker.providers import gcp
GCP = gcp.CLOUD
AZURE = azure.CLOUD
AWS = aws.CLOUD
ALICLOUD = 'AliCloud'
KUBERNETES = 'Kubernetes'
DIGITALOCEAN = 'DigitalOcean'
OPENSTACK = 'OpenStack'
CLOUDSTACK = 'CloudStack'
RACKSPACE = 'Rackspace'
MESOS = 'Mesos'
PROFITBRICKS = 'ProfitBricks'
# Though Docker is not a cloud provider, it's inclusion is useful
# for performing on premise to cloud benchmarks
DOCKER = 'Docker'
VALID_CLOUDS = (GCP, AZURE, AWS, DIGITALOCEAN, KUBERNETES, OPENSTACK,
RACKSPACE, CLOUDSTACK, ALICLOUD, MESOS, PROFITBRICKS, DOCKER)
_imported_providers = set()
def LoadProviderFlags(providers):
"""Imports just the flags module for each provider.
This allows PKB to load flag definitions from each provider to include in the
help text without actually loading any other provider-specific modules.
Args:
providers: series of strings. Each element is a value from VALID_CLOUDS
indicating a cloud provider for which to import the flags module.
"""
for provider_name in providers:
normalized_name = provider_name.lower()
flags_module_name = '.'.join((__name__, normalized_name, 'flags'))
importlib.import_module(flags_module_name)
# Import flag definitions for all cloud providers.
LoadProviderFlags(VALID_CLOUDS)
def LoadProvider(provider_name, ignore_package_requirements=True):
"""Loads the all modules in the 'provider_name' package.
This function first checks the specified provider's Python package
requirements file, if one exists, and verifies that all requirements are met.
Next, it loads all modules in the specified provider's package. By loading
these modules, relevant classes (e.g. VMs) will register themselves.
Args:
provider_name: string chosen from VALID_CLOUDS. The name of the provider
whose modules should be loaded.
ignore_package_requirements: boolean. If True, the provider's Python package
requirements file is ignored.
"""
if provider_name in _imported_providers:
return
# Check package requirements from the provider's pip requirements file.
normalized_name = provider_name.lower()
if not ignore_package_requirements:
requirements.CheckProviderRequirements(normalized_name)
# Load all modules in the provider's directory. Simply loading those modules
# will cause relevant classes (e.g. VM and disk classes) to register
# themselves so that they can be instantiated during resource provisioning.
provider_package_path = os.path.join(__path__[0], normalized_name)
try:
modules = tuple(import_util.LoadModulesForPath(
[provider_package_path], __name__ + '.' + normalized_name))
if not modules:
raise ImportError('No modules found for provider %s.' % provider_name)
except Exception:
logging.error('Unable to load provider %s.', provider_name)
raise
# Signal that the provider's modules have been imported.
_imported_providers.add(provider_name)
events.provider_imported.send(provider_name)
|
from weblate.checks.angularjs import AngularJSInterpolationCheck
from weblate.checks.tests.test_checks import CheckTestCase, MockUnit
class AngularJSInterpolationCheckTest(CheckTestCase):
check = AngularJSInterpolationCheck()
def test_no_format(self):
self.assertFalse(self.check.check_format("strins", "string", False))
def test_format(self):
self.assertFalse(
self.check.check_format(
"{{name}} string {{other}}", "{{name}} {{other}} string", False
)
)
def test_format_ignore_position(self):
self.assertFalse(
self.check.check_format(
"{{name}} string {{other}}", "{{other}} string {{name}}", False
)
)
def test_different_whitespace(self):
self.assertFalse(
self.check.check_format("{{ name }} string", "{{name}} string", False)
)
def test_missing_format(self):
self.assertTrue(self.check.check_format("{{name}} string", "string", False))
def test_wrong_value(self):
self.assertTrue(
self.check.check_format("{{name}} string", "{{nameerror}} string", False)
)
def test_extended_formatting(self):
self.assertFalse(
self.check.check_format(
"Value: {{ something.value | currency }}",
"Wert: {{ something.value | currency }}",
False,
)
)
self.assertTrue(
self.check.check_format(
"Value: {{ something.value | currency }}",
"Value: {{ something.value }}",
False,
)
)
def test_check_highlight(self):
highlights = self.check.check_highlight(
"{{name}} {{ something.value | currency }} string",
MockUnit("angularjs_format", flags="angularjs-format"),
)
self.assertEqual(2, len(highlights))
self.assertEqual(0, highlights[0][0])
self.assertEqual(8, highlights[0][1])
self.assertEqual(9, highlights[1][0])
self.assertEqual(41, highlights[1][1])
def test_check_highlight_ignored(self):
highlights = self.check.check_highlight(
"{{name}} {{other}} string",
MockUnit("angularjs_format", flags="ignore-angularjs-format"),
)
self.assertEqual([], highlights)
|
import mock
from zake.fake_client import FakeClient
from paasta_tools.api import settings
from paasta_tools.api.views import deploy_queue
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.utils import SystemPaastaConfig
@mock.patch("paasta_tools.api.views.deploy_queue.KazooClient", autospec=True)
@mock.patch("paasta_tools.api.views.deploy_queue.ZKDelayDeadlineQueue", autospec=True)
def test_list_deploy_queue(mock_delay_deadline_queue_class, mock_kazoo_client):
mock_request = mock.Mock()
settings.system_paasta_config = mock.create_autospec(SystemPaastaConfig)
mock_kazoo_client.return_value = FakeClient()
available_service_instance = ServiceInstance(
service="fake_service1",
instance="fake_instance1",
watcher="worker0",
bounce_by=1577952000,
wait_until=1577952000,
enqueue_time=1577952000,
bounce_start_time=1577952000,
failures=1,
processed_count=2,
)
unavailable_service_instance = ServiceInstance(
service="fake_service2",
instance="fake_instance2",
watcher="worker1",
bounce_by=1577952100,
wait_until=1577952200,
enqueue_time=1577952100,
bounce_start_time=1577952100,
failures=2,
processed_count=3,
)
mock_delay_deadline_queue = mock_delay_deadline_queue_class.return_value
mock_delay_deadline_queue.get_available_service_instances.return_value = [
(mock.Mock(), available_service_instance)
]
mock_delay_deadline_queue.get_unavailable_service_instances.return_value = [
(mock.Mock(), mock.Mock(), unavailable_service_instance)
]
output = deploy_queue.list_deploy_queue(mock_request)
assert output == {
"available_service_instances": [
{
"service": "fake_service1",
"instance": "fake_instance1",
"watcher": "worker0",
"bounce_by": 1577952000,
"wait_until": 1577952000,
"enqueue_time": 1577952000,
"bounce_start_time": 1577952000,
"failures": 1,
"processed_count": 2,
}
],
"unavailable_service_instances": [
{
"service": "fake_service2",
"instance": "fake_instance2",
"watcher": "worker1",
"bounce_by": 1577952100,
"wait_until": 1577952200,
"enqueue_time": 1577952100,
"bounce_start_time": 1577952100,
"failures": 2,
"processed_count": 3,
}
],
}
|
import logging
import unittest
import numpy as np
from gensim.models import LdaModel
from gensim.test.utils import common_dictionary, common_corpus
class TestLdaDiff(unittest.TestCase):
def setUp(self):
self.dictionary = common_dictionary
self.corpus = common_corpus
self.num_topics = 5
self.n_ann_terms = 10
self.model = LdaModel(corpus=self.corpus, id2word=self.dictionary, num_topics=self.num_topics, passes=10)
def testBasic(self):
# test for matrix case
mdiff, annotation = self.model.diff(self.model, n_ann_terms=self.n_ann_terms)
self.assertEqual(mdiff.shape, (self.num_topics, self.num_topics))
self.assertEqual(len(annotation), self.num_topics)
self.assertEqual(len(annotation[0]), self.num_topics)
# test for diagonal case
mdiff, annotation = self.model.diff(self.model, n_ann_terms=self.n_ann_terms, diagonal=True)
self.assertEqual(mdiff.shape, (self.num_topics,))
self.assertEqual(len(annotation), self.num_topics)
def testIdentity(self):
for dist_name in ["hellinger", "kullback_leibler", "jaccard"]:
# test for matrix case
mdiff, annotation = self.model.diff(self.model, n_ann_terms=self.n_ann_terms, distance=dist_name)
for row in annotation:
for (int_tokens, diff_tokens) in row:
self.assertEqual(diff_tokens, [])
self.assertEqual(len(int_tokens), self.n_ann_terms)
self.assertTrue(np.allclose(np.diag(mdiff), np.zeros(mdiff.shape[0], dtype=mdiff.dtype)))
if dist_name == "jaccard":
self.assertTrue(np.allclose(mdiff, np.zeros(mdiff.shape, dtype=mdiff.dtype)))
# test for diagonal case
mdiff, annotation = \
self.model.diff(self.model, n_ann_terms=self.n_ann_terms, distance=dist_name, diagonal=True)
for (int_tokens, diff_tokens) in annotation:
self.assertEqual(diff_tokens, [])
self.assertEqual(len(int_tokens), self.n_ann_terms)
self.assertTrue(np.allclose(mdiff, np.zeros(mdiff.shape, dtype=mdiff.dtype)))
if dist_name == "jaccard":
self.assertTrue(np.allclose(mdiff, np.zeros(mdiff.shape, dtype=mdiff.dtype)))
def testInput(self):
self.assertRaises(ValueError, self.model.diff, self.model, n_ann_terms=self.n_ann_terms, distance='something')
self.assertRaises(ValueError, self.model.diff, [], n_ann_terms=self.n_ann_terms, distance='something')
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)
unittest.main()
|
import asyncio
import logging.handlers
from timeit import default_timer as timer
from types import ModuleType
from typing import Awaitable, Callable, Optional, Set
from homeassistant import config as conf_util, core, loader, requirements
from homeassistant.config import async_notify_setup_error
from homeassistant.const import EVENT_COMPONENT_LOADED, PLATFORM_FORMAT
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers.typing import ConfigType
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_COMPONENT = "component"
DATA_SETUP_DONE = "setup_done"
DATA_SETUP_STARTED = "setup_started"
DATA_SETUP = "setup_tasks"
DATA_DEPS_REQS = "deps_reqs_processed"
SLOW_SETUP_WARNING = 10
SLOW_SETUP_MAX_WAIT = 300
@core.callback
def async_set_domains_to_be_loaded(hass: core.HomeAssistant, domains: Set[str]) -> None:
"""Set domains that are going to be loaded from the config.
This will allow us to properly handle after_dependencies.
"""
hass.data[DATA_SETUP_DONE] = {domain: asyncio.Event() for domain in domains}
def setup_component(hass: core.HomeAssistant, domain: str, config: ConfigType) -> bool:
"""Set up a component and all its dependencies."""
return asyncio.run_coroutine_threadsafe(
async_setup_component(hass, domain, config), hass.loop
).result()
async def async_setup_component(
hass: core.HomeAssistant, domain: str, config: ConfigType
) -> bool:
"""Set up a component and all its dependencies.
This method is a coroutine.
"""
if domain in hass.config.components:
return True
setup_tasks = hass.data.setdefault(DATA_SETUP, {})
if domain in setup_tasks:
return await setup_tasks[domain] # type: ignore
task = setup_tasks[domain] = hass.async_create_task(
_async_setup_component(hass, domain, config)
)
try:
return await task # type: ignore
finally:
if domain in hass.data.get(DATA_SETUP_DONE, {}):
hass.data[DATA_SETUP_DONE].pop(domain).set()
async def _async_process_dependencies(
hass: core.HomeAssistant, config: ConfigType, integration: loader.Integration
) -> bool:
"""Ensure all dependencies are set up."""
dependencies_tasks = {
dep: hass.loop.create_task(async_setup_component(hass, dep, config))
for dep in integration.dependencies
if dep not in hass.config.components
}
after_dependencies_tasks = {}
to_be_loaded = hass.data.get(DATA_SETUP_DONE, {})
for dep in integration.after_dependencies:
if (
dep not in dependencies_tasks
and dep in to_be_loaded
and dep not in hass.config.components
):
after_dependencies_tasks[dep] = hass.loop.create_task(
to_be_loaded[dep].wait()
)
if not dependencies_tasks and not after_dependencies_tasks:
return True
if dependencies_tasks:
_LOGGER.debug(
"Dependency %s will wait for dependencies %s",
integration.domain,
list(dependencies_tasks),
)
if after_dependencies_tasks:
_LOGGER.debug(
"Dependency %s will wait for after dependencies %s",
integration.domain,
list(after_dependencies_tasks),
)
async with hass.timeout.async_freeze(integration.domain):
results = await asyncio.gather(
*dependencies_tasks.values(), *after_dependencies_tasks.values()
)
failed = [
domain for idx, domain in enumerate(dependencies_tasks) if not results[idx]
]
if failed:
_LOGGER.error(
"Unable to set up dependencies of %s. Setup failed for dependencies: %s",
integration.domain,
", ".join(failed),
)
return False
return True
async def _async_setup_component(
hass: core.HomeAssistant, domain: str, config: ConfigType
) -> bool:
"""Set up a component for Home Assistant.
This method is a coroutine.
"""
def log_error(msg: str, link: Optional[str] = None) -> None:
"""Log helper."""
_LOGGER.error("Setup failed for %s: %s", domain, msg)
async_notify_setup_error(hass, domain, link)
try:
integration = await loader.async_get_integration(hass, domain)
except loader.IntegrationNotFound:
log_error("Integration not found.")
return False
if integration.disabled:
log_error(f"dependency is disabled - {integration.disabled}")
return False
# Validate all dependencies exist and there are no circular dependencies
if not await integration.resolve_dependencies():
return False
# Process requirements as soon as possible, so we can import the component
# without requiring imports to be in functions.
try:
await async_process_deps_reqs(hass, config, integration)
except HomeAssistantError as err:
log_error(str(err), integration.documentation)
return False
# Some integrations fail on import because they call functions incorrectly.
# So we do it before validating config to catch these errors.
try:
component = integration.get_component()
except ImportError as err:
log_error(f"Unable to import component: {err}", integration.documentation)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Setup failed for %s: unknown error", domain)
return False
processed_config = await conf_util.async_process_component_config(
hass, config, integration
)
if processed_config is None:
log_error("Invalid config.", integration.documentation)
return False
start = timer()
_LOGGER.info("Setting up %s", domain)
hass.data.setdefault(DATA_SETUP_STARTED, {})[domain] = dt_util.utcnow()
if hasattr(component, "PLATFORM_SCHEMA"):
# Entity components have their own warning
warn_task = None
else:
warn_task = hass.loop.call_later(
SLOW_SETUP_WARNING,
_LOGGER.warning,
"Setup of %s is taking over %s seconds.",
domain,
SLOW_SETUP_WARNING,
)
try:
if hasattr(component, "async_setup"):
task = component.async_setup(hass, processed_config) # type: ignore
elif hasattr(component, "setup"):
# This should not be replaced with hass.async_add_executor_job because
# we don't want to track this task in case it blocks startup.
task = hass.loop.run_in_executor(
None, component.setup, hass, processed_config # type: ignore
)
else:
log_error("No setup function defined.")
hass.data[DATA_SETUP_STARTED].pop(domain)
return False
async with hass.timeout.async_timeout(SLOW_SETUP_MAX_WAIT, domain):
result = await task
except asyncio.TimeoutError:
_LOGGER.error(
"Setup of %s is taking longer than %s seconds."
" Startup will proceed without waiting any longer",
domain,
SLOW_SETUP_MAX_WAIT,
)
hass.data[DATA_SETUP_STARTED].pop(domain)
return False
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error during setup of component %s", domain)
async_notify_setup_error(hass, domain, integration.documentation)
hass.data[DATA_SETUP_STARTED].pop(domain)
return False
finally:
end = timer()
if warn_task:
warn_task.cancel()
_LOGGER.info("Setup of domain %s took %.1f seconds", domain, end - start)
if result is False:
log_error("Integration failed to initialize.")
hass.data[DATA_SETUP_STARTED].pop(domain)
return False
if result is not True:
log_error(
f"Integration {domain!r} did not return boolean if setup was "
"successful. Disabling component."
)
hass.data[DATA_SETUP_STARTED].pop(domain)
return False
# Flush out async_setup calling create_task. Fragile but covered by test.
await asyncio.sleep(0)
await hass.config_entries.flow.async_wait_init_flow_finish(domain)
await asyncio.gather(
*[
entry.async_setup(hass, integration=integration)
for entry in hass.config_entries.async_entries(domain)
]
)
hass.config.components.add(domain)
hass.data[DATA_SETUP_STARTED].pop(domain)
# Cleanup
if domain in hass.data[DATA_SETUP]:
hass.data[DATA_SETUP].pop(domain)
hass.bus.async_fire(EVENT_COMPONENT_LOADED, {ATTR_COMPONENT: domain})
return True
async def async_prepare_setup_platform(
hass: core.HomeAssistant, hass_config: ConfigType, domain: str, platform_name: str
) -> Optional[ModuleType]:
"""Load a platform and makes sure dependencies are setup.
This method is a coroutine.
"""
platform_path = PLATFORM_FORMAT.format(domain=domain, platform=platform_name)
def log_error(msg: str) -> None:
"""Log helper."""
_LOGGER.error("Unable to prepare setup for platform %s: %s", platform_path, msg)
async_notify_setup_error(hass, platform_path)
try:
integration = await loader.async_get_integration(hass, platform_name)
except loader.IntegrationNotFound:
log_error("Integration not found")
return None
# Process deps and reqs as soon as possible, so that requirements are
# available when we import the platform.
try:
await async_process_deps_reqs(hass, hass_config, integration)
except HomeAssistantError as err:
log_error(str(err))
return None
try:
platform = integration.get_platform(domain)
except ImportError as exc:
log_error(f"Platform not found ({exc}).")
return None
# Already loaded
if platform_path in hass.config.components:
return platform
# Platforms cannot exist on their own, they are part of their integration.
# If the integration is not set up yet, and can be set up, set it up.
if integration.domain not in hass.config.components:
try:
component = integration.get_component()
except ImportError as exc:
log_error(f"Unable to import the component ({exc}).")
return None
if hasattr(component, "setup") or hasattr(component, "async_setup"):
if not await async_setup_component(hass, integration.domain, hass_config):
log_error("Unable to set up component.")
return None
return platform
async def async_process_deps_reqs(
hass: core.HomeAssistant, config: ConfigType, integration: loader.Integration
) -> None:
"""Process all dependencies and requirements for a module.
Module is a Python module of either a component or platform.
"""
processed = hass.data.get(DATA_DEPS_REQS)
if processed is None:
processed = hass.data[DATA_DEPS_REQS] = set()
elif integration.domain in processed:
return
if not await _async_process_dependencies(hass, config, integration):
raise HomeAssistantError("Could not set up all dependencies.")
if not hass.config.skip_pip and integration.requirements:
async with hass.timeout.async_freeze(integration.domain):
await requirements.async_get_integration_with_requirements(
hass, integration.domain
)
processed.add(integration.domain)
@core.callback
def async_when_setup(
hass: core.HomeAssistant,
component: str,
when_setup_cb: Callable[[core.HomeAssistant, str], Awaitable[None]],
) -> None:
"""Call a method when a component is setup."""
async def when_setup() -> None:
"""Call the callback."""
try:
await when_setup_cb(hass, component)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error handling when_setup callback for %s", component)
# Running it in a new task so that it always runs after
if component in hass.config.components:
hass.async_create_task(when_setup())
return
unsub = None
async def loaded_event(event: core.Event) -> None:
"""Call the callback."""
if event.data[ATTR_COMPONENT] != component:
return
unsub() # type: ignore
await when_setup()
unsub = hass.bus.async_listen(EVENT_COMPONENT_LOADED, loaded_event)
|
from typing import Type
from homeassistant.config_entries import ConfigEntry
from .board import FirmataPinType
from .const import DOMAIN, FIRMATA_MANUFACTURER
from .pin import FirmataBoardPin
class FirmataEntity:
"""Representation of a Firmata entity."""
def __init__(self, api):
"""Initialize the entity."""
self._api = api
@property
def device_info(self) -> dict:
"""Return device info."""
return {
"connections": {},
"identifiers": {(DOMAIN, self._api.board.name)},
"manufacturer": FIRMATA_MANUFACTURER,
"name": self._api.board.name,
"sw_version": self._api.board.firmware_version,
}
class FirmataPinEntity(FirmataEntity):
"""Representation of a Firmata pin entity."""
def __init__(
self,
api: Type[FirmataBoardPin],
config_entry: ConfigEntry,
name: str,
pin: FirmataPinType,
):
"""Initialize the pin entity."""
super().__init__(api)
self._name = name
location = (config_entry.entry_id, "pin", pin)
self._unique_id = "_".join(str(i) for i in location)
@property
def name(self) -> str:
"""Get the name of the pin."""
return self._name
@property
def should_poll(self) -> bool:
"""No polling needed."""
return False
@property
def unique_id(self) -> str:
"""Return a unique identifier for this device."""
return self._unique_id
|
from datetime import timedelta
import logging
from pyblockchain import get_balance, validate_address
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import ATTR_ATTRIBUTION, CONF_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTRIBUTION = "Data provided by blockchain.com"
CONF_ADDRESSES = "addresses"
DEFAULT_NAME = "Bitcoin Balance"
ICON = "mdi:currency-btc"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_ADDRESSES): [cv.string],
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Blockchain.com sensors."""
addresses = config[CONF_ADDRESSES]
name = config[CONF_NAME]
for address in addresses:
if not validate_address(address):
_LOGGER.error("Bitcoin address is not valid: %s", address)
return False
add_entities([BlockchainSensor(name, addresses)], True)
class BlockchainSensor(Entity):
"""Representation of a Blockchain.com sensor."""
def __init__(self, name, addresses):
"""Initialize the sensor."""
self._name = name
self.addresses = addresses
self._state = None
self._unit_of_measurement = "BTC"
@property
def name(self):
"""Return the name of the sensor."""
return self._name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement this sensor expresses itself in."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return ICON
@property
def device_state_attributes(self):
"""Return the state attributes of the sensor."""
return {ATTR_ATTRIBUTION: ATTRIBUTION}
def update(self):
"""Get the latest state of the sensor."""
self._state = get_balance(self.addresses)
|
import os
import json
from app.utils.HookDataParse import (
get_repo_name,
get_repo_branch,
get_push_name,
get_push_email
)
WEBHOOKDATA_DIR = os.path.join(os.path.dirname(__file__), 'webhookdata')
WEBHOOKDATA = {}
for filename in os.listdir(WEBHOOKDATA_DIR):
name = os.path.splitext(filename)[0]
with open(os.path.join(WEBHOOKDATA_DIR, filename)) as f:
data = json.load(f)
WEBHOOKDATA[name] = data
def test():
for name, data in WEBHOOKDATA.items():
print('\n' + name.center(60, '-'))
print(get_repo_name(data))
print(get_repo_branch(data))
print(get_push_name(data))
print(get_push_email(data))
|
import os
from babelfish import Language, language_converters
import pytest
from vcr import VCR
from subliminal.providers.thesubdb import TheSubDBProvider, TheSubDBSubtitle
vcr = VCR(path_transformer=lambda path: path + '.yaml',
record_mode=os.environ.get('VCR_RECORD_MODE', 'once'),
cassette_library_dir=os.path.realpath(os.path.join('tests', 'cassettes', 'thesubdb')))
@pytest.mark.converter
def test_converter_convert_alpha3_country():
assert language_converters['thesubdb'].convert('por', 'BR') == 'pt'
@pytest.mark.converter
def test_converter_convert_alpha3():
assert language_converters['thesubdb'].convert('eng') == 'en'
@pytest.mark.converter
def test_converter_convert_alpha3_alpha2_converter():
assert language_converters['thesubdb'].convert('fra') == 'fr'
@pytest.mark.converter
def test_converter_reverse():
assert language_converters['thesubdb'].reverse('en') == ('eng', )
@pytest.mark.converter
def test_converter_reverse_alpha3_country():
assert language_converters['thesubdb'].reverse('pt') == ('por', 'BR')
def test_get_matches(movies):
subtitle = TheSubDBSubtitle(Language('eng'), 'ad32876133355929d814457537e12dc2')
matches = subtitle.get_matches(movies['man_of_steel'])
assert matches == {'hash'}
def test_get_matches_no_match(episodes):
subtitle = TheSubDBSubtitle(Language('eng'), 'ad32876133355929d814457537e12dc2')
matches = subtitle.get_matches(episodes['got_s03e10'])
assert matches == set()
@pytest.mark.integration
@vcr.use_cassette
def test_query(movies):
video = movies['man_of_steel']
expected_languages = {Language('eng'), Language('por', 'BR')}
with TheSubDBProvider() as provider:
subtitles = provider.query(video.hashes['thesubdb'])
assert len(subtitles) == 2
assert {subtitle.language for subtitle in subtitles} == expected_languages
@pytest.mark.integration
@vcr.use_cassette
def test_query_wrong_hash():
with TheSubDBProvider() as provider:
subtitles = provider.query('11223344556677899877665544332211')
assert len(subtitles) == 0
@pytest.mark.integration
@vcr.use_cassette
def test_list_subtitles(episodes):
video = episodes['bbt_s07e05']
languages = {Language('eng'), Language('fra')}
with TheSubDBProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
assert len(subtitles) == 2
assert {subtitle.language for subtitle in subtitles} == languages
@pytest.mark.integration
@vcr.use_cassette
def test_download_subtitle(episodes):
video = episodes['bbt_s07e05']
languages = {Language('eng'), Language('fra')}
with TheSubDBProvider() as provider:
subtitles = provider.list_subtitles(video, languages)
provider.download_subtitle(subtitles[0])
assert subtitles[0].content is not None
assert subtitles[0].is_valid() is True
|
import logging
from tuyaha import TuyaApi
from tuyaha.tuyaapi import TuyaAPIException, TuyaNetException, TuyaServerException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import (
CONF_PASSWORD,
CONF_PLATFORM,
CONF_UNIT_OF_MEASUREMENT,
CONF_USERNAME,
ENTITY_MATCH_NONE,
TEMP_CELSIUS,
TEMP_FAHRENHEIT,
)
from homeassistant.core import callback
import homeassistant.helpers.config_validation as cv
# pylint:disable=unused-import
from .const import (
CONF_BRIGHTNESS_RANGE_MODE,
CONF_COUNTRYCODE,
CONF_CURR_TEMP_DIVIDER,
CONF_DISCOVERY_INTERVAL,
CONF_EXT_TEMP_SENSOR,
CONF_MAX_KELVIN,
CONF_MAX_TEMP,
CONF_MIN_KELVIN,
CONF_MIN_TEMP,
CONF_QUERY_DEVICE,
CONF_QUERY_INTERVAL,
CONF_SUPPORT_COLOR,
CONF_TEMP_DIVIDER,
CONF_TUYA_MAX_COLTEMP,
DEFAULT_DISCOVERY_INTERVAL,
DEFAULT_QUERY_INTERVAL,
DEFAULT_TUYA_MAX_COLTEMP,
DOMAIN,
TUYA_DATA,
TUYA_PLATFORMS,
TUYA_TYPE_NOT_QUERY,
)
_LOGGER = logging.getLogger(__name__)
CONF_LIST_DEVICES = "list_devices"
DATA_SCHEMA_USER = vol.Schema(
{
vol.Required(CONF_USERNAME): str,
vol.Required(CONF_PASSWORD): str,
vol.Required(CONF_COUNTRYCODE): vol.Coerce(int),
vol.Required(CONF_PLATFORM): vol.In(TUYA_PLATFORMS),
}
)
ERROR_DEV_MULTI_TYPE = "dev_multi_type"
ERROR_DEV_NOT_CONFIG = "dev_not_config"
ERROR_DEV_NOT_FOUND = "dev_not_found"
RESULT_AUTH_FAILED = "invalid_auth"
RESULT_CONN_ERROR = "cannot_connect"
RESULT_SUCCESS = "success"
RESULT_LOG_MESSAGE = {
RESULT_AUTH_FAILED: "Invalid credential",
RESULT_CONN_ERROR: "Connection error",
}
TUYA_TYPE_CONFIG = ["climate", "light"]
class TuyaConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a tuya config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
def __init__(self):
"""Initialize flow."""
self._country_code = None
self._password = None
self._platform = None
self._username = None
self._is_import = False
def _save_entry(self):
return self.async_create_entry(
title=self._username,
data={
CONF_COUNTRYCODE: self._country_code,
CONF_PASSWORD: self._password,
CONF_PLATFORM: self._platform,
CONF_USERNAME: self._username,
},
)
def _try_connect(self):
"""Try to connect and check auth."""
tuya = TuyaApi()
try:
tuya.init(
self._username, self._password, self._country_code, self._platform
)
except (TuyaNetException, TuyaServerException):
return RESULT_CONN_ERROR
except TuyaAPIException:
return RESULT_AUTH_FAILED
return RESULT_SUCCESS
async def async_step_import(self, user_input=None):
"""Handle configuration by yaml file."""
self._is_import = True
return await self.async_step_user(user_input)
async def async_step_user(self, user_input=None):
"""Handle a flow initialized by the user."""
if self._async_current_entries():
return self.async_abort(reason="single_instance_allowed")
errors = {}
if user_input is not None:
self._country_code = str(user_input[CONF_COUNTRYCODE])
self._password = user_input[CONF_PASSWORD]
self._platform = user_input[CONF_PLATFORM]
self._username = user_input[CONF_USERNAME]
result = await self.hass.async_add_executor_job(self._try_connect)
if result == RESULT_SUCCESS:
return self._save_entry()
if result != RESULT_AUTH_FAILED or self._is_import:
if self._is_import:
_LOGGER.error(
"Error importing from configuration.yaml: %s",
RESULT_LOG_MESSAGE.get(result, "Generic Error"),
)
return self.async_abort(reason=result)
errors["base"] = result
return self.async_show_form(
step_id="user", data_schema=DATA_SCHEMA_USER, errors=errors
)
@staticmethod
@callback
def async_get_options_flow(config_entry):
"""Get the options flow for this handler."""
return OptionsFlowHandler(config_entry)
class OptionsFlowHandler(config_entries.OptionsFlow):
"""Handle a option flow for Tuya."""
def __init__(self, config_entry: config_entries.ConfigEntry):
"""Initialize options flow."""
self.config_entry = config_entry
self._conf_devs_id = None
self._conf_devs_option = {}
self._form_error = None
def _get_form_error(self):
"""Set the error to be shown in the options form."""
errors = {}
if self._form_error:
errors["base"] = self._form_error
self._form_error = None
return errors
def _get_tuya_devices_filtered(self, types, exclude_mode=False, type_prefix=True):
"""Get the list of Tuya device to filtered by types."""
config_list = {}
types_filter = set(types)
tuya = self.hass.data[DOMAIN][TUYA_DATA]
devices_list = tuya.get_all_devices()
for device in devices_list:
dev_type = device.device_type()
exclude = (
dev_type in types_filter
if exclude_mode
else dev_type not in types_filter
)
if exclude:
continue
dev_id = device.object_id()
if type_prefix:
dev_id = f"{dev_type}-{dev_id}"
config_list[dev_id] = f"{device.name()} ({dev_type})"
return config_list
def _get_device(self, dev_id):
"""Get specific device from tuya library."""
tuya = self.hass.data[DOMAIN][TUYA_DATA]
return tuya.get_device_by_id(dev_id)
def _save_config(self, data):
"""Save the updated options."""
curr_conf = self.config_entry.options.copy()
curr_conf.update(data)
curr_conf.update(self._conf_devs_option)
return self.async_create_entry(title="", data=curr_conf)
async def _async_device_form(self, devs_id):
"""Return configuration form for devices."""
conf_devs_id = []
for count, dev_id in enumerate(devs_id):
device_info = dev_id.split("-")
if count == 0:
device_type = device_info[0]
device_id = device_info[1]
elif device_type != device_info[0]:
self._form_error = ERROR_DEV_MULTI_TYPE
return await self.async_step_init()
conf_devs_id.append(device_info[1])
device = self._get_device(device_id)
if not device:
self._form_error = ERROR_DEV_NOT_FOUND
return await self.async_step_init()
curr_conf = self._conf_devs_option.get(
device_id, self.config_entry.options.get(device_id, {})
)
config_schema = await self._get_device_schema(device_type, curr_conf, device)
if not config_schema:
self._form_error = ERROR_DEV_NOT_CONFIG
return await self.async_step_init()
self._conf_devs_id = conf_devs_id
device_name = (
"(multiple devices selected)" if len(conf_devs_id) > 1 else device.name()
)
return self.async_show_form(
step_id="device",
data_schema=config_schema,
description_placeholders={
"device_type": device_type,
"device_name": device_name,
},
)
async def async_step_init(self, user_input=None):
"""Handle options flow."""
if user_input is not None:
dev_ids = user_input.get(CONF_LIST_DEVICES)
if dev_ids:
return await self.async_step_device(None, dev_ids)
user_input.pop(CONF_LIST_DEVICES, [])
return self._save_config(data=user_input)
data_schema = vol.Schema(
{
vol.Optional(
CONF_DISCOVERY_INTERVAL,
default=self.config_entry.options.get(
CONF_DISCOVERY_INTERVAL, DEFAULT_DISCOVERY_INTERVAL
),
): vol.All(vol.Coerce(int), vol.Clamp(min=30, max=900)),
}
)
query_devices = self._get_tuya_devices_filtered(
TUYA_TYPE_NOT_QUERY, True, False
)
if query_devices:
devices = {ENTITY_MATCH_NONE: "Default"}
devices.update(query_devices)
def_val = self.config_entry.options.get(CONF_QUERY_DEVICE)
if not def_val or not query_devices.get(def_val):
def_val = ENTITY_MATCH_NONE
data_schema = data_schema.extend(
{
vol.Optional(
CONF_QUERY_INTERVAL,
default=self.config_entry.options.get(
CONF_QUERY_INTERVAL, DEFAULT_QUERY_INTERVAL
),
): vol.All(vol.Coerce(int), vol.Clamp(min=30, max=240)),
vol.Optional(CONF_QUERY_DEVICE, default=def_val): vol.In(devices),
}
)
config_devices = self._get_tuya_devices_filtered(TUYA_TYPE_CONFIG, False, True)
if config_devices:
data_schema = data_schema.extend(
{vol.Optional(CONF_LIST_DEVICES): cv.multi_select(config_devices)}
)
return self.async_show_form(
step_id="init",
data_schema=data_schema,
errors=self._get_form_error(),
)
async def async_step_device(self, user_input=None, dev_ids=None):
"""Handle options flow for device."""
if dev_ids is not None:
return await self._async_device_form(dev_ids)
if user_input is not None:
for device_id in self._conf_devs_id:
self._conf_devs_option[device_id] = user_input
return await self.async_step_init()
async def _get_device_schema(self, device_type, curr_conf, device):
"""Return option schema for device."""
if device_type == "light":
return self._get_light_schema(curr_conf, device)
if device_type == "climate":
entities_list = await _get_entities_matching_domains(self.hass, ["sensor"])
return self._get_climate_schema(curr_conf, device, entities_list)
return None
@staticmethod
def _get_light_schema(curr_conf, device):
"""Create option schema for light device."""
min_kelvin = device.max_color_temp()
max_kelvin = device.min_color_temp()
config_schema = vol.Schema(
{
vol.Optional(
CONF_SUPPORT_COLOR,
default=curr_conf.get(CONF_SUPPORT_COLOR, False),
): bool,
vol.Optional(
CONF_BRIGHTNESS_RANGE_MODE,
default=curr_conf.get(CONF_BRIGHTNESS_RANGE_MODE, 0),
): vol.In({0: "Range 1-255", 1: "Range 10-1000"}),
vol.Optional(
CONF_MIN_KELVIN,
default=curr_conf.get(CONF_MIN_KELVIN, min_kelvin),
): vol.All(vol.Coerce(int), vol.Clamp(min=min_kelvin, max=max_kelvin)),
vol.Optional(
CONF_MAX_KELVIN,
default=curr_conf.get(CONF_MAX_KELVIN, max_kelvin),
): vol.All(vol.Coerce(int), vol.Clamp(min=min_kelvin, max=max_kelvin)),
vol.Optional(
CONF_TUYA_MAX_COLTEMP,
default=curr_conf.get(
CONF_TUYA_MAX_COLTEMP, DEFAULT_TUYA_MAX_COLTEMP
),
): vol.All(
vol.Coerce(int),
vol.Clamp(
min=DEFAULT_TUYA_MAX_COLTEMP, max=DEFAULT_TUYA_MAX_COLTEMP * 10
),
),
}
)
return config_schema
@staticmethod
def _get_climate_schema(curr_conf, device, entities_list):
"""Create option schema for climate device."""
unit = device.temperature_unit()
def_unit = TEMP_FAHRENHEIT if unit == "FAHRENHEIT" else TEMP_CELSIUS
entities_list.insert(0, ENTITY_MATCH_NONE)
config_schema = vol.Schema(
{
vol.Optional(
CONF_UNIT_OF_MEASUREMENT,
default=curr_conf.get(CONF_UNIT_OF_MEASUREMENT, def_unit),
): vol.In({TEMP_CELSIUS: "Celsius", TEMP_FAHRENHEIT: "Fahrenheit"}),
vol.Optional(
CONF_TEMP_DIVIDER,
default=curr_conf.get(CONF_TEMP_DIVIDER, 0),
): vol.All(vol.Coerce(int), vol.Clamp(min=0)),
vol.Optional(
CONF_CURR_TEMP_DIVIDER,
default=curr_conf.get(CONF_CURR_TEMP_DIVIDER, 0),
): vol.All(vol.Coerce(int), vol.Clamp(min=0)),
vol.Optional(
CONF_MIN_TEMP,
default=curr_conf.get(CONF_MIN_TEMP, 0),
): int,
vol.Optional(
CONF_MAX_TEMP,
default=curr_conf.get(CONF_MAX_TEMP, 0),
): int,
vol.Optional(
CONF_EXT_TEMP_SENSOR,
default=curr_conf.get(CONF_EXT_TEMP_SENSOR, ENTITY_MATCH_NONE),
): vol.In(entities_list),
}
)
return config_schema
async def _get_entities_matching_domains(hass, domains):
"""List entities in the given domains."""
included_domains = set(domains)
entity_ids = hass.states.async_entity_ids(included_domains)
entity_ids.sort()
return entity_ids
|
from Handler import Handler
import MySQLdb
class MySQLHandler(Handler):
"""
Implements the abstract Handler class, sending data to a mysql table
"""
conn = None
def __init__(self, config=None):
"""
Create a new instance of the MySQLHandler class
"""
# Initialize Handler
Handler.__init__(self, config)
# Initialize Options
self.hostname = self.config['hostname']
self.port = int(self.config['port'])
self.username = self.config['username']
self.password = self.config['password']
self.database = self.config['database']
self.table = self.config['table']
self.col_time = self.config['col_time']
self.col_metric = self.config['col_metric']
self.col_value = self.config['col_value']
# Connect
self._connect()
def get_default_config_help(self):
"""
Returns the help text for the configuration options for this handler
"""
config = super(MySQLHandler, self).get_default_config_help()
config.update({
})
return config
def get_default_config(self):
"""
Return the default config for the handler
"""
config = super(MySQLHandler, self).get_default_config()
config.update({
})
return config
def __del__(self):
"""
Destroy instance of the MySQLHandler class
"""
self._close()
def process(self, metric):
"""
Process a metric
"""
# Just send the data
self._send(str(metric))
def _send(self, data):
"""
Insert the data
"""
data = data.strip().split(' ')
try:
cursor = self.conn.cursor()
cursor.execute("INSERT INTO %s (%s, %s, %s) VALUES(%%s, %%s, %%s)"
% (self.table, self.col_metric,
self.col_time, self.col_value),
(data[0], data[2], data[1]))
cursor.close()
self.conn.commit()
except BaseException as e:
# Log Error
self.log.error("MySQLHandler: Failed sending data. %s.", e)
# Attempt to restablish connection
self._connect()
def _connect(self):
"""
Connect to the MySQL server
"""
self._close()
self.conn = MySQLdb.Connect(host=self.hostname,
port=self.port,
user=self.username,
passwd=self.password,
db=self.database)
def _close(self):
"""
Close the connection
"""
if self.conn:
self.conn.commit()
self.conn.close()
|
import requests
from homeassistant.components.switch import SwitchEntity
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_DEVICES,
ENERGY_KILO_WATT_HOUR,
TEMP_CELSIUS,
)
from .const import (
ATTR_STATE_DEVICE_LOCKED,
ATTR_STATE_LOCKED,
ATTR_TEMPERATURE_UNIT,
ATTR_TOTAL_CONSUMPTION,
ATTR_TOTAL_CONSUMPTION_UNIT,
CONF_CONNECTIONS,
DOMAIN as FRITZBOX_DOMAIN,
LOGGER,
)
ATTR_TOTAL_CONSUMPTION_UNIT_VALUE = ENERGY_KILO_WATT_HOUR
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Fritzbox smarthome switch from config_entry."""
entities = []
devices = hass.data[FRITZBOX_DOMAIN][CONF_DEVICES]
fritz = hass.data[FRITZBOX_DOMAIN][CONF_CONNECTIONS][config_entry.entry_id]
for device in await hass.async_add_executor_job(fritz.get_devices):
if device.has_switch and device.ain not in devices:
entities.append(FritzboxSwitch(device, fritz))
devices.add(device.ain)
async_add_entities(entities)
class FritzboxSwitch(SwitchEntity):
"""The switch class for Fritzbox switches."""
def __init__(self, device, fritz):
"""Initialize the switch."""
self._device = device
self._fritz = fritz
@property
def device_info(self):
"""Return device specific attributes."""
return {
"name": self.name,
"identifiers": {(FRITZBOX_DOMAIN, self._device.ain)},
"manufacturer": self._device.manufacturer,
"model": self._device.productname,
"sw_version": self._device.fw_version,
}
@property
def unique_id(self):
"""Return the unique ID of the device."""
return self._device.ain
@property
def available(self):
"""Return if switch is available."""
return self._device.present
@property
def name(self):
"""Return the name of the device."""
return self._device.name
@property
def is_on(self):
"""Return true if the switch is on."""
return self._device.switch_state
def turn_on(self, **kwargs):
"""Turn the switch on."""
self._device.set_switch_state_on()
def turn_off(self, **kwargs):
"""Turn the switch off."""
self._device.set_switch_state_off()
def update(self):
"""Get latest data and states from the device."""
try:
self._device.update()
except requests.exceptions.HTTPError as ex:
LOGGER.warning("Fritzhome connection error: %s", ex)
self._fritz.login()
@property
def device_state_attributes(self):
"""Return the state attributes of the device."""
attrs = {}
attrs[ATTR_STATE_DEVICE_LOCKED] = self._device.device_lock
attrs[ATTR_STATE_LOCKED] = self._device.lock
if self._device.has_powermeter:
attrs[
ATTR_TOTAL_CONSUMPTION
] = f"{((self._device.energy or 0.0) / 1000):.3f}"
attrs[ATTR_TOTAL_CONSUMPTION_UNIT] = ATTR_TOTAL_CONSUMPTION_UNIT_VALUE
if self._device.has_temperature_sensor:
attrs[ATTR_TEMPERATURE] = str(
self.hass.config.units.temperature(
self._device.temperature, TEMP_CELSIUS
)
)
attrs[ATTR_TEMPERATURE_UNIT] = self.hass.config.units.temperature_unit
return attrs
@property
def current_power_w(self):
"""Return the current power usage in W."""
return self._device.power / 1000
|
import base64
import io
import aiohttp
import pytest
from voluptuous.error import MultipleInvalid
from homeassistant.components.color_extractor import (
ATTR_PATH,
ATTR_URL,
DOMAIN,
SERVICE_TURN_ON,
)
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_RGB_COLOR,
DOMAIN as LIGHT_DOMAIN,
SERVICE_TURN_OFF as LIGHT_SERVICE_TURN_OFF,
)
from homeassistant.const import ATTR_ENTITY_ID, STATE_OFF, STATE_ON
from homeassistant.setup import async_setup_component
import homeassistant.util.color as color_util
from tests.async_mock import Mock, mock_open, patch
from tests.common import load_fixture
LIGHT_ENTITY = "light.kitchen_lights"
CLOSE_THRESHOLD = 10
def _close_enough(actual_rgb, testing_rgb):
"""Validate the given RGB value is in acceptable tolerance."""
# Convert the given RGB values to hue / saturation and then back again
# as it wasn't reading the same RGB value set against it.
actual_hs = color_util.color_RGB_to_hs(*actual_rgb)
actual_rgb = color_util.color_hs_to_RGB(*actual_hs)
testing_hs = color_util.color_RGB_to_hs(*testing_rgb)
testing_rgb = color_util.color_hs_to_RGB(*testing_hs)
actual_red, actual_green, actual_blue = actual_rgb
testing_red, testing_green, testing_blue = testing_rgb
r_diff = abs(actual_red - testing_red)
g_diff = abs(actual_green - testing_green)
b_diff = abs(actual_blue - testing_blue)
return (
r_diff <= CLOSE_THRESHOLD
and g_diff <= CLOSE_THRESHOLD
and b_diff <= CLOSE_THRESHOLD
)
@pytest.fixture(autouse=True)
async def setup_light(hass):
"""Configure our light component to work against for testing."""
assert await async_setup_component(
hass, LIGHT_DOMAIN, {LIGHT_DOMAIN: {"platform": "demo"}}
)
await hass.async_block_till_done()
state = hass.states.get(LIGHT_ENTITY)
assert state
# Validate starting values
assert state.state == STATE_ON
assert state.attributes.get(ATTR_BRIGHTNESS) == 180
assert state.attributes.get(ATTR_RGB_COLOR) == (255, 63, 111)
await hass.services.async_call(
LIGHT_DOMAIN,
LIGHT_SERVICE_TURN_OFF,
{ATTR_ENTITY_ID: LIGHT_ENTITY},
blocking=True,
)
await hass.async_block_till_done()
state = hass.states.get(LIGHT_ENTITY)
assert state
assert state.state == STATE_OFF
async def test_missing_url_and_path(hass):
"""Test that nothing happens when url and path are missing."""
# Load our color_extractor component
await async_setup_component(
hass,
DOMAIN,
{},
)
await hass.async_block_till_done()
# Validate pre service call
state = hass.states.get(LIGHT_ENTITY)
assert state
assert state.state == STATE_OFF
# Missing url and path attributes, should cause error log
service_data = {
ATTR_ENTITY_ID: LIGHT_ENTITY,
}
with pytest.raises(MultipleInvalid):
await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, service_data, blocking=True
)
await hass.async_block_till_done()
# check light is still off, unchanged due to bad parameters on service call
state = hass.states.get(LIGHT_ENTITY)
assert state
assert state.state == STATE_OFF
async def _async_load_color_extractor_url(hass, service_data):
# Load our color_extractor component
await async_setup_component(
hass,
DOMAIN,
{},
)
await hass.async_block_till_done()
# Validate pre service call
state = hass.states.get(LIGHT_ENTITY)
assert state
assert state.state == STATE_OFF
# Call the shared service, our above mock should return the base64 decoded fixture 1x1 pixel
assert await hass.services.async_call(
DOMAIN, SERVICE_TURN_ON, service_data, blocking=True
)
await hass.async_block_till_done()
async def test_url_success(hass, aioclient_mock):
"""Test that a successful image GET translate to light RGB."""
service_data = {
ATTR_URL: "http://example.com/images/logo.png",
ATTR_ENTITY_ID: LIGHT_ENTITY,
# Standard light service data which we pass
ATTR_BRIGHTNESS_PCT: 50,
}
# Mock the HTTP Response with a base64 encoded 1x1 pixel
aioclient_mock.get(
url=service_data[ATTR_URL],
content=base64.b64decode(load_fixture("color_extractor_url.txt")),
)
# Allow access to this URL using the proper mechanism
hass.config.allowlist_external_urls.add("http://example.com/images/")
await _async_load_color_extractor_url(hass, service_data)
state = hass.states.get(LIGHT_ENTITY)
assert state
# Ensure we turned it on
assert state.state == STATE_ON
# Brightness has changed, optional service call field
assert state.attributes[ATTR_BRIGHTNESS] == 128
# Ensure the RGB values are correct
assert _close_enough(state.attributes[ATTR_RGB_COLOR], (50, 100, 150))
async def test_url_not_allowed(hass, aioclient_mock):
"""Test that a not allowed external URL fails to turn light on."""
service_data = {
ATTR_URL: "http://denied.com/images/logo.png",
ATTR_ENTITY_ID: LIGHT_ENTITY,
}
await _async_load_color_extractor_url(hass, service_data)
# Light has not been modified due to failure
state = hass.states.get(LIGHT_ENTITY)
assert state
assert state.state == STATE_OFF
async def test_url_exception(hass, aioclient_mock):
"""Test that a HTTPError fails to turn light on."""
service_data = {
ATTR_URL: "http://example.com/images/logo.png",
ATTR_ENTITY_ID: LIGHT_ENTITY,
}
# Don't let the URL not being allowed sway our exception test
hass.config.allowlist_external_urls.add("http://example.com/images/")
# Mock the HTTP Response with an HTTPError
aioclient_mock.get(url=service_data[ATTR_URL], exc=aiohttp.ClientError)
await _async_load_color_extractor_url(hass, service_data)
# Light has not been modified due to failure
state = hass.states.get(LIGHT_ENTITY)
assert state
assert state.state == STATE_OFF
async def test_url_error(hass, aioclient_mock):
"""Test that a HTTP Error (non 200) doesn't turn light on."""
service_data = {
ATTR_URL: "http://example.com/images/logo.png",
ATTR_ENTITY_ID: LIGHT_ENTITY,
}
# Don't let the URL not being allowed sway our exception test
hass.config.allowlist_external_urls.add("http://example.com/images/")
# Mock the HTTP Response with a 400 Bad Request error
aioclient_mock.get(url=service_data[ATTR_URL], status=400)
await _async_load_color_extractor_url(hass, service_data)
# Light has not been modified due to failure
state = hass.states.get(LIGHT_ENTITY)
assert state
assert state.state == STATE_OFF
@patch(
"builtins.open",
mock_open(read_data=base64.b64decode(load_fixture("color_extractor_file.txt"))),
create=True,
)
def _get_file_mock(file_path):
"""Convert file to BytesIO for testing due to PIL UnidentifiedImageError."""
_file = None
with open(file_path) as file_handler:
_file = io.BytesIO(file_handler.read())
_file.name = "color_extractor.jpg"
_file.seek(0)
return _file
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def test_file(hass):
"""Test that the file only service reads a file and translates to light RGB."""
service_data = {
ATTR_PATH: "/opt/image.png",
ATTR_ENTITY_ID: LIGHT_ENTITY,
# Standard light service data which we pass
ATTR_BRIGHTNESS_PCT: 100,
}
# Add our /opt/ path to the allowed list of paths
hass.config.allowlist_external_dirs.add("/opt/")
await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
# Verify pre service check
state = hass.states.get(LIGHT_ENTITY)
assert state
assert state.state == STATE_OFF
# Mock the file handler read with our 1x1 base64 encoded fixture image
with patch("homeassistant.components.color_extractor._get_file", _get_file_mock):
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, service_data)
await hass.async_block_till_done()
state = hass.states.get(LIGHT_ENTITY)
assert state
# Ensure we turned it on
assert state.state == STATE_ON
# And set the brightness
assert state.attributes[ATTR_BRIGHTNESS] == 255
# Ensure the RGB values are correct
assert _close_enough(state.attributes[ATTR_RGB_COLOR], (25, 75, 125))
@patch("os.path.isfile", Mock(return_value=True))
@patch("os.access", Mock(return_value=True))
async def test_file_denied_dir(hass):
"""Test that the file only service fails to read an image in a dir not explicitly allowed."""
service_data = {
ATTR_PATH: "/path/to/a/dir/not/allowed/image.png",
ATTR_ENTITY_ID: LIGHT_ENTITY,
# Standard light service data which we pass
ATTR_BRIGHTNESS_PCT: 100,
}
await async_setup_component(hass, DOMAIN, {})
await hass.async_block_till_done()
# Verify pre service check
state = hass.states.get(LIGHT_ENTITY)
assert state
assert state.state == STATE_OFF
# Mock the file handler read with our 1x1 base64 encoded fixture image
with patch("homeassistant.components.color_extractor._get_file", _get_file_mock):
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, service_data)
await hass.async_block_till_done()
state = hass.states.get(LIGHT_ENTITY)
assert state
# Ensure it's still off due to access error (dir not explicitly allowed)
assert state.state == STATE_OFF
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import inspect
import json
import os
import sys
import tensorflow as tf
from tensorflow import errors
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.training import training
def _load_library(filename, lib="op"):
"""_load_library"""
f = inspect.getfile(sys._getframe(1)) # pylint: disable=protected-access
# Construct filename
f = os.path.join(os.path.dirname(f), filename)
filenames = [f]
# Function to load the library, return True if file system library is loaded
load_fn = tf.load_op_library if lib == "op" \
else lambda f: tf.compat.v1.load_file_system_library(f) is None
# Try to load all paths for file, fail if none succeed
errs = []
for f in filenames:
try:
l = load_fn(f)
if l is not None:
return l
except errors.NotFoundError as e:
errs.append(str(e))
raise NotImplementedError(
"unable to open file: " +
"{}, from paths: {}\ncaused by: {}".format(filename, filenames, errs))
_gcs_config_so = _load_library("_gcs_config_ops.so")
gcs_configure_credentials = _gcs_config_so.gcs_configure_credentials
gcs_configure_block_cache = _gcs_config_so.gcs_configure_block_cache
class BlockCacheParams(object): # pylint: disable=useless-object-inheritance
"""BlockCacheParams is a struct used for configuring the GCS Block Cache."""
def __init__(self, block_size=None, max_bytes=None, max_staleness=None):
self._block_size = block_size or 128 * 1024 * 1024
self._max_bytes = max_bytes or 2 * self._block_size
self._max_staleness = max_staleness or 0
@property
def block_size(self):
return self._block_size
@property
def max_bytes(self):
return self._max_bytes
@property
def max_staleness(self):
return self._max_staleness
def configure_gcs(credentials=None, block_cache=None, device=None):
"""Configures the GCS file system for a given a session.
Warning: GCS `credentials` may be transmitted over the network unencrypted.
Please ensure that the network is trusted before using this function. For
users running code entirely within Google Cloud, your data is protected by
encryption in between data centers. For more information, please take a look
at https://cloud.google.com/security/encryption-in-transit/.
Args:
credentials: [Optional.] A JSON string
block_cache: [Optional.] A BlockCacheParams to configure the block cache .
device: [Optional.] The device to place the configure ops.
"""
def configure(credentials, block_cache):
"""Helper function to actually configure GCS."""
if credentials:
if isinstance(credentials, dict):
credentials = json.dumps(credentials)
creds = gcs_configure_credentials(credentials)
else:
creds = tf.constant(0)
if block_cache:
cache = gcs_configure_block_cache(
max_cache_size=block_cache.max_bytes,
block_size=block_cache.block_size,
max_staleness=block_cache.max_staleness)
else:
cache = tf.constant(0)
return tf.tuple([creds, cache])
if device:
with ops.device(device):
return configure(credentials, block_cache)
return configure(credentials, block_cache)
def configure_gcs_from_colab_auth(device='/job:worker/replica:0/task:0/device:CPU:0'):
"""ConfigureColabSession configures the GCS file system in Colab.
Args:
"""
# Read from the application default credentials (adc).
adc_filename = os.environ.get(
'GOOGLE_APPLICATION_CREDENTIALS', '/content/adc.json')
with open(adc_filename) as f:
data = json.load(f)
return configure_gcs(credentials=data, device=device)
|
import logging
from homeassistant.components.media_player import BrowseMedia
from homeassistant.components.media_player.const import (
MEDIA_CLASS_ALBUM,
MEDIA_CLASS_ARTIST,
MEDIA_CLASS_DIRECTORY,
MEDIA_CLASS_EPISODE,
MEDIA_CLASS_MOVIE,
MEDIA_CLASS_PLAYLIST,
MEDIA_CLASS_SEASON,
MEDIA_CLASS_TRACK,
MEDIA_CLASS_TV_SHOW,
MEDIA_CLASS_VIDEO,
)
from homeassistant.components.media_player.errors import BrowseError
from .const import DOMAIN
class UnknownMediaType(BrowseError):
"""Unknown media type."""
EXPANDABLES = ["album", "artist", "playlist", "season", "show"]
PLAYLISTS_BROWSE_PAYLOAD = {
"title": "Playlists",
"media_class": MEDIA_CLASS_DIRECTORY,
"media_content_id": "all",
"media_content_type": "playlists",
"can_play": False,
"can_expand": True,
}
SPECIAL_METHODS = {
"On Deck": "onDeck",
"Recently Added": "recentlyAdded",
}
ITEM_TYPE_MEDIA_CLASS = {
"album": MEDIA_CLASS_ALBUM,
"artist": MEDIA_CLASS_ARTIST,
"episode": MEDIA_CLASS_EPISODE,
"movie": MEDIA_CLASS_MOVIE,
"playlist": MEDIA_CLASS_PLAYLIST,
"season": MEDIA_CLASS_SEASON,
"show": MEDIA_CLASS_TV_SHOW,
"track": MEDIA_CLASS_TRACK,
"video": MEDIA_CLASS_VIDEO,
}
_LOGGER = logging.getLogger(__name__)
def browse_media(
entity_id, plex_server, media_content_type=None, media_content_id=None
):
"""Implement the websocket media browsing helper."""
def build_item_response(payload):
"""Create response payload for the provided media query."""
media = plex_server.lookup_media(**payload)
if media is None:
return None
try:
media_info = item_payload(media)
except UnknownMediaType:
return None
if media_info.can_expand:
media_info.children = []
for item in media:
try:
media_info.children.append(item_payload(item))
except UnknownMediaType:
continue
return media_info
if media_content_id and ":" in media_content_id:
media_content_id, special_folder = media_content_id.split(":")
else:
special_folder = None
if (
media_content_type
and media_content_type == "server"
and media_content_id != plex_server.machine_identifier
):
raise BrowseError(
f"Plex server with ID '{media_content_id}' is not associated with {entity_id}"
)
if special_folder:
if media_content_type == "server":
library_or_section = plex_server.library
children_media_class = MEDIA_CLASS_DIRECTORY
title = plex_server.friendly_name
elif media_content_type == "library":
library_or_section = plex_server.library.sectionByID(media_content_id)
title = library_or_section.title
try:
children_media_class = ITEM_TYPE_MEDIA_CLASS[library_or_section.TYPE]
except KeyError as err:
raise BrowseError(
f"Unknown type received: {library_or_section.TYPE}"
) from err
else:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
)
payload = {
"title": title,
"media_class": MEDIA_CLASS_DIRECTORY,
"media_content_id": f"{media_content_id}:{special_folder}",
"media_content_type": media_content_type,
"can_play": False,
"can_expand": True,
"children": [],
"children_media_class": children_media_class,
}
method = SPECIAL_METHODS[special_folder]
items = getattr(library_or_section, method)()
for item in items:
try:
payload["children"].append(item_payload(item))
except UnknownMediaType:
continue
return BrowseMedia(**payload)
try:
if media_content_type in ["server", None]:
return server_payload(plex_server)
if media_content_type == "library":
return library_payload(plex_server, media_content_id)
except UnknownMediaType as err:
raise BrowseError(
f"Media not found: {media_content_type} / {media_content_id}"
) from err
if media_content_type == "playlists":
return playlists_payload(plex_server)
payload = {
"media_type": DOMAIN,
"plex_key": int(media_content_id),
}
response = build_item_response(payload)
if response is None:
raise BrowseError(f"Media not found: {media_content_type} / {media_content_id}")
return response
def item_payload(item):
"""Create response payload for a single media item."""
try:
media_class = ITEM_TYPE_MEDIA_CLASS[item.type]
except KeyError as err:
_LOGGER.debug("Unknown type received: %s", item.type)
raise UnknownMediaType from err
payload = {
"title": item.title,
"media_class": media_class,
"media_content_id": str(item.ratingKey),
"media_content_type": item.type,
"can_play": True,
"can_expand": item.type in EXPANDABLES,
}
if hasattr(item, "thumbUrl"):
payload["thumbnail"] = item.thumbUrl
return BrowseMedia(**payload)
def library_section_payload(section):
"""Create response payload for a single library section."""
try:
children_media_class = ITEM_TYPE_MEDIA_CLASS[section.TYPE]
except KeyError as err:
_LOGGER.debug("Unknown type received: %s", section.TYPE)
raise UnknownMediaType from err
return BrowseMedia(
title=section.title,
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id=section.key,
media_content_type="library",
can_play=False,
can_expand=True,
children_media_class=children_media_class,
)
def special_library_payload(parent_payload, special_type):
"""Create response payload for special library folders."""
title = f"{special_type} ({parent_payload.title})"
return BrowseMedia(
title=title,
media_class=parent_payload.media_class,
media_content_id=f"{parent_payload.media_content_id}:{special_type}",
media_content_type=parent_payload.media_content_type,
can_play=False,
can_expand=True,
children_media_class=parent_payload.children_media_class,
)
def server_payload(plex_server):
"""Create response payload to describe libraries of the Plex server."""
server_info = BrowseMedia(
title=plex_server.friendly_name,
media_class=MEDIA_CLASS_DIRECTORY,
media_content_id=plex_server.machine_identifier,
media_content_type="server",
can_play=False,
can_expand=True,
children_media_class=MEDIA_CLASS_DIRECTORY,
)
server_info.children = []
server_info.children.append(special_library_payload(server_info, "On Deck"))
server_info.children.append(special_library_payload(server_info, "Recently Added"))
for library in plex_server.library.sections():
if library.type == "photo":
continue
server_info.children.append(library_section_payload(library))
server_info.children.append(BrowseMedia(**PLAYLISTS_BROWSE_PAYLOAD))
return server_info
def library_payload(plex_server, library_id):
"""Create response payload to describe contents of a specific library."""
library = plex_server.library.sectionByID(library_id)
library_info = library_section_payload(library)
library_info.children = []
library_info.children.append(special_library_payload(library_info, "On Deck"))
library_info.children.append(
special_library_payload(library_info, "Recently Added")
)
for item in library.all():
try:
library_info.children.append(item_payload(item))
except UnknownMediaType:
continue
return library_info
def playlists_payload(plex_server):
"""Create response payload for all available playlists."""
playlists_info = {**PLAYLISTS_BROWSE_PAYLOAD, "children": []}
for playlist in plex_server.playlists():
try:
playlists_info["children"].append(item_payload(playlist))
except UnknownMediaType:
continue
response = BrowseMedia(**playlists_info)
response.children_media_class = MEDIA_CLASS_PLAYLIST
return response
|
import numpy as np
import unittest
from chainer import testing
from chainercv.visualizations import vis_semantic_segmentation
try:
import matplotlib # NOQA
_available = True
except ImportError:
_available = False
@testing.parameterize(*testing.product({
'label_names': [None, ('class0', 'class1', 'class2')],
'label_colors': [None, ((255, 0, 0), (0, 255, 0), (0, 0, 255))],
'all_label_names_in_legend': [False, True],
'no_img': [False, True],
}))
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisSemanticSegmentation(unittest.TestCase):
def setUp(self):
if self.no_img:
self.img = None
else:
self.img = np.random.randint(0, 255, size=(3, 32, 48))
self.label = np.random.randint(
-1, 3, size=(48, 64)).astype(np.int32)
def test_vis_semantic_segmentation(self):
ax, legend_handles = vis_semantic_segmentation(
self.img, self.label,
label_names=self.label_names, label_colors=self.label_colors,
all_label_names_in_legend=self.all_label_names_in_legend)
self.assertIsInstance(ax, matplotlib.axes.Axes)
for handle in legend_handles:
self.assertIsInstance(handle, matplotlib.patches.Patch)
@unittest.skipUnless(_available, 'Matplotlib is not installed')
class TestVisSemanticSegmentationInvalidArguments(unittest.TestCase):
def test_vis_semantic_segmentation_mismatch_names_and_colors(self):
label = np.random.randint(-1, 2, size=(48, 64)).astype(np.int32)
with self.assertRaises(ValueError):
vis_semantic_segmentation(
None, label,
label_names=('class0', 'class1', 'class2'),
label_colors=((255, 0, 0), (0, 255, 0)))
def test_vis_semantic_segmentation_exceed_value(self):
label = np.random.randint(10, 20, size=(48, 64)).astype(np.int32)
with self.assertRaises(ValueError):
vis_semantic_segmentation(
None, label,
label_names=('class0', 'class1', 'class2'))
testing.run_module(__name__, __file__)
|
from homeassistant.components.group import GroupIntegrationRegistry
from homeassistant.const import STATE_OFF
from homeassistant.core import callback
from homeassistant.helpers.typing import HomeAssistantType
from . import (
STATE_ECO,
STATE_ELECTRIC,
STATE_GAS,
STATE_HEAT_PUMP,
STATE_HIGH_DEMAND,
STATE_PERFORMANCE,
)
@callback
def async_describe_on_off_states(
hass: HomeAssistantType, registry: GroupIntegrationRegistry
) -> None:
"""Describe group on off states."""
registry.on_off_states(
{
STATE_ECO,
STATE_ELECTRIC,
STATE_PERFORMANCE,
STATE_HIGH_DEMAND,
STATE_HEAT_PUMP,
STATE_GAS,
},
STATE_OFF,
)
|
import os.path
import subprocess
GIT_PATHS = [
"/usr/lib/git",
"/usr/lib/git-core",
"/usr/libexec/git",
"/usr/libexec/git-core",
]
def find_git_http_backend():
"""Find Git HTTP back-end."""
if hasattr(find_git_http_backend, "result"):
return find_git_http_backend.result
try:
path = subprocess.run(
["git", "--exec-path"],
universal_newlines=True,
check=True,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
).stdout.strip()
if path:
GIT_PATHS.insert(0, path)
except OSError:
pass
for path in GIT_PATHS:
name = os.path.join(path, "git-http-backend")
if os.path.exists(name):
find_git_http_backend.result = name
return name
return None
|
from app.wraps.login_wrap import login_required
from app import app, v
from app.utils import ResponseUtil, RequestUtil, AuthUtil
from app.database.model import Collaborator, User
# get server list
@app.route('/api/collaborator/list', methods=['GET'])
@login_required()
@v.param({'webhook_id': v.int()})
def api_collaborator_list(webhook_id):
# login user
user_id = RequestUtil.get_login_user().get('id', '')
if not AuthUtil.has_readonly_auth(user_id, webhook_id):
return ResponseUtil.standard_response(0, 'Permission deny!')
collaborators = Collaborator.query.filter_by(webhook_id=webhook_id).all()
collaborators = [collaborator.dict() for collaborator in collaborators]
return ResponseUtil.standard_response(1, collaborators)
# new server
@app.route('/api/collaborator/new', methods=['POST'])
@login_required()
@v.param({'webhook_id': v.int(), 'user_id': v.str()})
def api_collaborator_new(webhook_id, user_id):
# login user
login_user_id = RequestUtil.get_login_user().get('id', '')
if login_user_id == user_id:
return ResponseUtil.standard_response(0, '`%s` is Creator!' % user_id)
if not AuthUtil.has_admin_auth(login_user_id, webhook_id):
return ResponseUtil.standard_response(0, 'Permission deny!')
collaborator = Collaborator.query.filter_by(webhook_id=webhook_id,
user_id=user_id).first()
# not exist
if collaborator:
return ResponseUtil.standard_response(0, 'Collaborator exist!')
# 开始添加
user = User.query.get(user_id)
if not user:
user = User(id=user_id, name=user_id)
user.save()
collaborator = Collaborator(webhook_id=webhook_id, user=user)
collaborator.save()
return ResponseUtil.standard_response(1, collaborator.dict())
@app.route('/api/collaborator/delete', methods=['POST'])
@login_required()
@v.param({'collaborator_id': v.int()})
def api_collaborator_delete(collaborator_id):
# login user
user_id = RequestUtil.get_login_user().get('id', '')
collaborator = Collaborator.query.get(collaborator_id)
if not collaborator:
return ResponseUtil.standard_response(0, 'Permission deny!')
webhook_id = collaborator.webhook_id
if not AuthUtil.has_admin_auth(user_id, webhook_id):
return ResponseUtil.standard_response(0, 'Permission deny!')
collaborator.delete()
return ResponseUtil.standard_response(1, 'Success')
|
from cerberus.base import normalize_rulesset, UnconcernedValidator
from cerberus.schema import ValidatedSchema
class Validator(UnconcernedValidator):
@property
def allow_unknown(self):
"""
If ``True`` unknown fields that are not defined in the schema will be ignored.
If a mapping with a validation schema is given, any undefined field will be
validated against its rules. Also see :ref:`allowing-the-unknown`.
Type: :class:`bool` or any :term:`mapping`
"""
return self._config.get('allow_unknown', False)
@allow_unknown.setter
def allow_unknown(self, value):
if not (self.is_child or isinstance(value, (bool, ValidatedSchema))):
value = normalize_rulesset(value)
ValidatedSchema(self, {'allow_unknown': value})
self._config['allow_unknown'] = value
@property # type: ignore
def schema(self):
"""
The validation schema of a validator. When a schema is passed to a validator
method (e.g. ``validate``), it replaces this attribute.
Type: any :term:`mapping` or :obj:`None`
"""
return self._schema
@schema.setter
def schema(self, schema):
if schema is None:
self._schema = None
elif self.is_child or isinstance(schema, ValidatedSchema):
self._schema = schema
else:
self._schema = ValidatedSchema(self, schema)
|
from pyflunearyou import Client
from pyflunearyou.errors import FluNearYouError
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_LATITUDE, CONF_LONGITUDE
from homeassistant.helpers import aiohttp_client, config_validation as cv
from .const import DOMAIN, LOGGER # pylint: disable=unused-import
class FluNearYouFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle an FluNearYou config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_CLOUD_POLL
@property
def data_schema(self):
"""Return the data schema for integration."""
return vol.Schema(
{
vol.Required(
CONF_LATITUDE, default=self.hass.config.latitude
): cv.latitude,
vol.Required(
CONF_LONGITUDE, default=self.hass.config.longitude
): cv.longitude,
}
)
async def async_step_user(self, user_input=None):
"""Handle the start of the config flow."""
if not user_input:
return self.async_show_form(step_id="user", data_schema=self.data_schema)
unique_id = f"{user_input[CONF_LATITUDE]}, {user_input[CONF_LONGITUDE]}"
await self.async_set_unique_id(unique_id)
self._abort_if_unique_id_configured()
websession = aiohttp_client.async_get_clientsession(self.hass)
client = Client(websession)
try:
await client.cdc_reports.status_by_coordinates(
user_input[CONF_LATITUDE], user_input[CONF_LONGITUDE]
)
except FluNearYouError as err:
LOGGER.error("Error while configuring integration: %s", err)
return self.async_show_form(step_id="user", errors={"base": "unknown"})
return self.async_create_entry(title=unique_id, data=user_input)
|
import numpy as np
from jax import config
import pytest
import tensornetwork
import tensornetwork.linalg.operations
from tensornetwork.linalg.operations import kron
from tensornetwork.linalg.initialization import random_uniform
from tensornetwork.tensor import Tensor
from tensornetwork.ncon_interface import ncon
from tensornetwork import backends
from tensornetwork.tests import testing_utils
# pylint: disable=no-member
config.update("jax_enable_x64", True)
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensordot_invalid_backend_raises_value_error(backend, dtype):
"""
Tests that tensordot raises ValueError when fed Tensors with different
backends. Other failure modes are tested at the backend level.
"""
backend_names = set(["jax", "numpy", "tensorflow", "pytorch"])
this_name = set([backend])
other_backend_names = list(backend_names - this_name)
shape = (4, 4, 4)
dtype1 = testing_utils.np_dtype_to_backend(backend, dtype)
testing_utils.check_contraction_dtype(backend, dtype1)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype1)
for other_backend in other_backend_names:
dtype2 = testing_utils.np_dtype_to_backend(other_backend, dtype)
testing_utils.check_contraction_dtype(other_backend, dtype2)
tensor2 = tensornetwork.ones(shape, backend=other_backend, dtype=dtype2)
with pytest.raises(ValueError):
_ = tensornetwork.tensordot(tensor1, tensor2, [[2, 0, 1], [1, 2, 0]])
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensordot_vs_backend(backend, dtype):
"""
Tests that tensordot yields the same result as the backend equivalent.
"""
shape = (4, 4, 4)
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
testing_utils.check_contraction_dtype(backend, dtype)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensor2 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensors = [tensor1, tensor2]
dims = [[2, 0, 1], [1, 2, 0]]
result = tensornetwork.tensordot(*tensors, dims)
backend_obj = backends.backend_factory.get_backend(backend)
arrays = [t.array for t in tensors]
backend_result = backend_obj.tensordot(*arrays, axes=dims)
np.testing.assert_allclose(backend_result, result.array)
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_tensordot_int_vs_backend(backend, dtype):
"""
Tests that tensordot yields the same result as the backend equivalent.
"""
shape = (4, 4, 4)
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
testing_utils.check_contraction_dtype(backend, dtype)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensor2 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensors = [tensor1, tensor2]
dim = 1
result = tensornetwork.tensordot(*tensors, dim)
backend_obj = backends.backend_factory.get_backend(backend)
arrays = [t.array for t in tensors]
backend_result = backend_obj.tensordot(*arrays, axes=dim)
np.testing.assert_allclose(backend_result, result.array)
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_reshape_vs_backend(backend, dtype):
"""
Tests that reshape yields the same result as the backend equivalent.
"""
shape = (3, 2, 4)
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
tensor = tensornetwork.ones(shape, backend=backend, dtype=dtype)
result = tensornetwork.reshape(tensor, (6, 4))
backend_obj = backends.backend_factory.get_backend(backend)
backend_result = backend_obj.reshape(tensor.array, (6, 4))
assert result.shape == backend_result.shape
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_transpose_vs_backend(backend, dtype):
"""
Tests that transpose yields the same result as the backend equivalent.
"""
shape = (3, 2, 4)
permutation = (1, 2, 0)
tensor, array = testing_utils.safe_randn(shape, backend, dtype)
if tensor is not None:
backend_obj = backends.backend_factory.get_backend(backend)
test = backend_obj.convert_to_tensor(array)
test = backend_obj.transpose(test, perm=permutation)
tensor_test = tensornetwork.transpose(tensor, perm=permutation)
np.testing.assert_allclose(test, tensor_test.array)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_hconj_vs_backend(backend, dtype):
"""
Tests that hconj yields the same result as the equivalent backend sequence.
"""
shape = (3, 2, 4)
permutation = (1, 2, 0)
tensor, array = testing_utils.safe_randn(shape, backend, dtype)
if tensor is not None:
backend_obj = backends.backend_factory.get_backend(backend)
test = backend_obj.convert_to_tensor(array)
test = backend_obj.transpose(test, perm=permutation)
test = backend_obj.conj(test)
tensor_test = tensornetwork.hconj(tensor, perm=permutation)
np.testing.assert_allclose(test, tensor_test.array)
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_take_slice_vs_backend(backend, dtype):
"""
Tests that take_slice yields the same result as the backend equivalent.
"""
shape = (5, 6, 7)
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
tensor = tensornetwork.ones(shape, backend=backend, dtype=dtype)
start_indices = (1, 2, 3)
slice_sizes = (2, 3, 3)
result = tensornetwork.take_slice(tensor, start_indices, slice_sizes)
backend_obj = backends.backend_factory.get_backend(backend)
backend_result = backend_obj.slice(tensor.array, start_indices, slice_sizes)
assert result.shape == backend_result.shape
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
@pytest.mark.parametrize("fname", ["sin", "cos", "exp", "log", "conj", "sign"])
def test_unary_ops_vs_backend(backend, dtype, fname):
shape = (4, 5, 6)
dtype_b = testing_utils.np_dtype_to_backend(backend, dtype)
backend_obj = backends.backend_factory.get_backend(backend)
backend_func = getattr(backend_obj, fname)
tn_func = getattr(tensornetwork.linalg.operations, fname)
tensor = tensornetwork.ones(shape, backend=backend, dtype=dtype_b)
if backend == "pytorch" and fname in ["sin", "log", "exp", "cos"]:
with pytest.raises(NotImplementedError):
backend_result = backend_func(tensor.array)
with pytest.raises(NotImplementedError):
tn_result = tn_func(tensor).array
else:
backend_result = backend_func(tensor.array)
tn_result = tn_func(tensor).array
np.testing.assert_allclose(backend_result, tn_result)
@pytest.mark.parametrize("dtype", testing_utils.np_not_half)
def test_abs_vs_backend(backend, dtype):
shape = (4, 5, 6)
dtype_b = testing_utils.np_dtype_to_backend(backend, dtype)
backend_obj = backends.backend_factory.get_backend(backend)
tensor = tensornetwork.ones(shape, backend=backend, dtype=dtype_b)
if (backend == "pytorch" and dtype == np.float16):
pytest.skip("Prod not supported with this dtype and backend.")
else:
backend_result = backend_obj.sqrt(tensor.array)
tn_result = tensornetwork.sqrt(tensor).array
np.testing.assert_allclose(backend_result, tn_result)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_sqrt_vs_backend(backend, dtype):
shape = (4, 5, 6)
dtype_b = testing_utils.np_dtype_to_backend(backend, dtype)
backend_obj = backends.backend_factory.get_backend(backend)
tensor = tensornetwork.ones(shape, backend=backend, dtype=dtype_b)
if (backend == "pytorch" and dtype == np.float16):
pytest.skip("Prod not supported with this dtype and backend.")
else:
backend_result = backend_obj.sqrt(tensor.array)
tn_result = tensornetwork.sqrt(tensor).array
np.testing.assert_allclose(backend_result, tn_result)
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_shape(backend, dtype):
shape = (4, 5, 6)
dtype_b = testing_utils.np_dtype_to_backend(backend, dtype)
tensor = tensornetwork.ones(shape, backend=backend, dtype=dtype_b)
tn_result = tensornetwork.shape(tensor)
assert tensor.shape == tn_result
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_einsum_invalid_backends(dtype, backend):
backend_names = set(["jax", "numpy", "tensorflow", "pytorch"])
this_name = set([backend])
other_backend_names = list(backend_names - this_name)
shape = (4, 3)
dtype1 = testing_utils.np_dtype_to_backend(backend, dtype)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype1)
for other_backend in other_backend_names:
dtype2 = testing_utils.np_dtype_to_backend(other_backend, dtype)
tensor2 = tensornetwork.ones(shape, backend=other_backend, dtype=dtype2)
for other_other_backend in backend_names:
dtype3 = testing_utils.np_dtype_to_backend(other_other_backend, dtype)
tensor3 = tensornetwork.zeros(shape, backend=other_other_backend,
dtype=dtype3)
with pytest.raises(ValueError):
_ = tensornetwork.einsum("ba, bc, dc", tensor1, tensor2, tensor3,
optimize=True)
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_einsum_vs_backend(dtype, backend):
shape = (4, 3)
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
testing_utils.check_contraction_dtype(backend, dtype)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensor2 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensor3 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
result = tensornetwork.einsum("ba, bc, dc", tensor1, tensor2, tensor3,
optimize=True)
backend_obj = backends.backend_factory.get_backend(backend)
arrays = [t.array for t in [tensor1, tensor2, tensor3]]
backend_result = backend_obj.einsum("ba, bc, dc", *arrays, optimize=True)
np.testing.assert_allclose(backend_result, result.array)
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_outer_invalid_backends(dtype, backend):
backend_names = set(["jax", "numpy", "tensorflow", "pytorch"])
this_name = set([backend])
other_backend_names = list(backend_names - this_name)
shape = (4, 3)
dtype1 = testing_utils.np_dtype_to_backend(backend, dtype)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype1)
for other_backend in other_backend_names:
dtype2 = testing_utils.np_dtype_to_backend(other_backend, dtype)
tensor2 = tensornetwork.ones(shape, backend=other_backend, dtype=dtype2)
with pytest.raises(ValueError):
_ = tensornetwork.outer(tensor1, tensor2)
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_outer_vs_backend(dtype, backend):
shape = (4, 3)
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
testing_utils.check_contraction_dtype(backend, dtype)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensor2 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
result = tensornetwork.outer(tensor1, tensor2)
backend_obj = backends.backend_factory.get_backend(backend)
arrays = [t.array for t in [tensor1, tensor2]]
backend_result = backend_obj.outer_product(*arrays)
np.testing.assert_allclose(backend_result, result.array)
@pytest.mark.parametrize("dtype", testing_utils.np_all_dtypes)
def test_ncon_invalid_backends(dtype, backend):
backend_names = set(["jax", "numpy", "tensorflow", "pytorch"])
this_name = set([backend])
other_backend_names = list(backend_names - this_name)
shape = (4, 3)
dtype1 = testing_utils.np_dtype_to_backend(backend, dtype)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype1)
for other_backend in other_backend_names:
dtype2 = testing_utils.np_dtype_to_backend(other_backend, dtype)
tensor2 = tensornetwork.ones(shape, backend=other_backend, dtype=dtype2)
for other_other_backend in backend_names:
dtype3 = testing_utils.np_dtype_to_backend(other_other_backend, dtype)
tensor3 = tensornetwork.zeros(shape, backend=other_other_backend,
dtype=dtype3)
tensors = [tensor1, tensor2, tensor3]
idxs = [[1, -1], [1, 2], [-2, 2]]
with pytest.raises(ValueError):
_ = ncon(tensors, idxs)
@pytest.mark.parametrize("dtype", testing_utils.np_not_bool)
def test_ncon_vs_backend(dtype, backend):
shape = (4, 3)
dtype = testing_utils.np_dtype_to_backend(backend, dtype)
testing_utils.check_contraction_dtype(backend, dtype)
tensor1 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensor2 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensor3 = tensornetwork.ones(shape, backend=backend, dtype=dtype)
tensors = [tensor1, tensor2, tensor3]
arrays = [tensor1.array, tensor2.array, tensor3.array]
idxs = [[1, -1], [1, 2], [-2, 2]]
result = ncon(tensors, idxs, backend=backend)
old_result = tensornetwork.ncon(arrays, idxs, backend=backend)
np.testing.assert_allclose(old_result, result.array)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_diagonal(backend, dtype):
""" Checks that Tensor.diagonal() works.
"""
shape = (2, 3, 3)
A, _ = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(tensornetwork.diagonal(A).array,
A.backend.diagonal(A.array))
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_diagflat(backend, dtype):
""" Checks that Tensor.diagflat() works.
"""
shape = (2, 3, 3)
A, _ = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(tensornetwork.diagflat(A).array,
A.backend.diagflat(A.array))
@pytest.mark.parametrize("dtype", testing_utils.np_not_half)
def test_trace(backend, dtype):
""" Checks that Tensor.trace() works.
"""
shape = (2, 3, 3)
A, _ = testing_utils.safe_randn(shape, backend, dtype)
if A is not None:
np.testing.assert_allclose(tensornetwork.trace(A).array,
A.backend.trace(A.array))
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
@pytest.mark.parametrize("pivotA", [None, 1, 2, 0, -1])
def test_pivot(backend, dtype, pivotA):
""" Checks that Tensor.pivot() works.
"""
shapeA = (2, 3, 4, 2)
A, _ = testing_utils.safe_randn(shapeA, backend, dtype)
if A is not None:
if pivotA is None:
matrixA = tensornetwork.pivot(A)
tA = A.backend.pivot(A.array, pivot_axis=-1)
else:
matrixA = tensornetwork.pivot(A, pivot_axis=pivotA)
tA = A.backend.pivot(A.array, pivot_axis=pivotA)
np.testing.assert_allclose(matrixA.array, tA)
@pytest.mark.parametrize("dtype", testing_utils.np_float_dtypes)
def test_kron(backend, dtype):
""" Checks that Tensor.kron() works.
"""
if (backend == "pytorch" and dtype in (np.complex64, np.complex128)):
pytest.skip("pytorch support for complex dtypes is currently poor.")
np.random.seed(10)
t1 = Tensor(np.random.rand(2, 2).astype(dtype), backend=backend)
t2 = Tensor(np.random.rand(3, 3).astype(dtype), backend=backend)
res_kron = kron(t1, t2)
res_ncon = ncon([t1.array, t2.array], [[-1, -3], [-2, -4]], backend=backend)
np.testing.assert_allclose(res_kron.array, res_ncon)
mat1 = res_kron.reshape((6, 6))
mat2 = np.kron(t1.array, t2.array)
np.testing.assert_allclose(mat1.array, mat2)
t1 = Tensor(np.random.rand(2, 2, 2, 2).astype(dtype), backend=backend)
t2 = Tensor(np.random.rand(3, 3, 3, 3).astype(dtype), backend=backend)
res_kron = kron(t1, t2)
res_ncon = ncon([t1.array, t2.array], [[-1, -2, -5, -6], [-3, -4, -7, -8]],
backend=backend)
np.testing.assert_allclose(res_kron.array, res_ncon)
mat1 = res_kron.reshape((36, 36))
mat2 = np.kron(
np.array(t1.array).reshape(4, 4),
np.array(t2.array).reshape(9, 9))
np.testing.assert_allclose(mat1.array, mat2)
def test_kron_raises(backend):
np.random.seed(10)
t1 = Tensor(np.random.rand(2, 2, 2), backend=backend)
t2 = Tensor(np.random.rand(3, 3), backend=backend)
with pytest.raises(ValueError, match="tensorA.ndim"):
kron(t1, t2)
with pytest.raises(ValueError, match="tensorB.ndim"):
kron(t2, t1)
t1 = Tensor(np.random.rand(2, 2, 2), backend='numpy')
t2 = Tensor(np.random.rand(3, 3), backend='tensorflow')
with pytest.raises(ValueError, match="kron"):
kron(t1, t2)
|
from collections import deque
from copy import deepcopy
from datetime import timedelta
import aiounifi
import pytest
from homeassistant.components.device_tracker import DOMAIN as TRACKER_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.components.switch import DOMAIN as SWITCH_DOMAIN
from homeassistant.components.unifi.const import (
CONF_CONTROLLER,
CONF_SITE_ID,
DEFAULT_ALLOW_BANDWIDTH_SENSORS,
DEFAULT_ALLOW_UPTIME_SENSORS,
DEFAULT_DETECTION_TIME,
DEFAULT_TRACK_CLIENTS,
DEFAULT_TRACK_DEVICES,
DEFAULT_TRACK_WIRED_CLIENTS,
DOMAIN as UNIFI_DOMAIN,
UNIFI_WIRELESS_CLIENTS,
)
from homeassistant.components.unifi.controller import (
SUPPORTED_PLATFORMS,
get_controller,
)
from homeassistant.components.unifi.errors import AuthenticationRequired, CannotConnect
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import MockConfigEntry
CONTROLLER_HOST = {
"hostname": "controller_host",
"ip": "1.2.3.4",
"is_wired": True,
"last_seen": 1562600145,
"mac": "10:00:00:00:00:01",
"name": "Controller host",
"oui": "Producer",
"sw_mac": "00:00:00:00:01:01",
"sw_port": 1,
"wired-rx_bytes": 1234000000,
"wired-tx_bytes": 5678000000,
"uptime": 1562600160,
}
CONTROLLER_DATA = {
CONF_HOST: "1.2.3.4",
CONF_USERNAME: "username",
CONF_PASSWORD: "password",
CONF_PORT: 1234,
CONF_SITE_ID: "site_id",
CONF_VERIFY_SSL: False,
}
ENTRY_CONFIG = {CONF_CONTROLLER: CONTROLLER_DATA}
ENTRY_OPTIONS = {}
CONFIGURATION = []
SITES = {"Site name": {"desc": "Site name", "name": "site_id", "role": "admin"}}
DESCRIPTION = [{"name": "username", "site_name": "site_id", "site_role": "admin"}]
async def setup_unifi_integration(
hass,
config=ENTRY_CONFIG,
options=ENTRY_OPTIONS,
sites=SITES,
site_description=DESCRIPTION,
clients_response=None,
devices_response=None,
clients_all_response=None,
wlans_response=None,
known_wireless_clients=None,
controllers=None,
):
"""Create the UniFi controller."""
assert await async_setup_component(hass, UNIFI_DOMAIN, {})
config_entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data=deepcopy(config),
options=deepcopy(options),
entry_id=1,
)
config_entry.add_to_hass(hass)
if known_wireless_clients:
hass.data[UNIFI_WIRELESS_CLIENTS].update_data(
known_wireless_clients, config_entry
)
mock_client_responses = deque()
if clients_response:
mock_client_responses.append(clients_response)
mock_device_responses = deque()
if devices_response:
mock_device_responses.append(devices_response)
mock_client_all_responses = deque()
if clients_all_response:
mock_client_all_responses.append(clients_all_response)
mock_wlans_responses = deque()
if wlans_response:
mock_wlans_responses.append(wlans_response)
mock_requests = []
async def mock_request(self, method, path, json=None):
mock_requests.append({"method": method, "path": path, "json": json})
if path == "/stat/sta" and mock_client_responses:
return mock_client_responses.popleft()
if path == "/stat/device" and mock_device_responses:
return mock_device_responses.popleft()
if path == "/rest/user" and mock_client_all_responses:
return mock_client_all_responses.popleft()
if path == "/rest/wlanconf" and mock_wlans_responses:
return mock_wlans_responses.popleft()
return {}
with patch("aiounifi.Controller.check_unifi_os", return_value=True), patch(
"aiounifi.Controller.login",
return_value=True,
), patch("aiounifi.Controller.sites", return_value=sites), patch(
"aiounifi.Controller.site_description", return_value=site_description
), patch(
"aiounifi.Controller.request", new=mock_request
), patch.object(
aiounifi.websocket.WSClient, "start", return_value=True
):
await hass.config_entries.async_setup(config_entry.entry_id)
await hass.async_block_till_done()
if config_entry.entry_id not in hass.data[UNIFI_DOMAIN]:
return None
controller = hass.data[UNIFI_DOMAIN][config_entry.entry_id]
controller.mock_client_responses = mock_client_responses
controller.mock_device_responses = mock_device_responses
controller.mock_client_all_responses = mock_client_all_responses
controller.mock_wlans_responses = mock_wlans_responses
controller.mock_requests = mock_requests
return controller
async def test_controller_setup(hass):
"""Successful setup."""
with patch(
"homeassistant.config_entries.ConfigEntries.async_forward_entry_setup",
return_value=True,
) as forward_entry_setup:
controller = await setup_unifi_integration(hass)
entry = controller.config_entry
assert len(forward_entry_setup.mock_calls) == len(SUPPORTED_PLATFORMS)
assert forward_entry_setup.mock_calls[0][1] == (entry, TRACKER_DOMAIN)
assert forward_entry_setup.mock_calls[1][1] == (entry, SENSOR_DOMAIN)
assert forward_entry_setup.mock_calls[2][1] == (entry, SWITCH_DOMAIN)
assert controller.host == CONTROLLER_DATA[CONF_HOST]
assert controller.site == CONTROLLER_DATA[CONF_SITE_ID]
assert controller.site_name in SITES
assert controller.site_role == SITES[controller.site_name]["role"]
assert controller.option_allow_bandwidth_sensors == DEFAULT_ALLOW_BANDWIDTH_SENSORS
assert controller.option_allow_uptime_sensors == DEFAULT_ALLOW_UPTIME_SENSORS
assert isinstance(controller.option_block_clients, list)
assert controller.option_track_clients == DEFAULT_TRACK_CLIENTS
assert controller.option_track_devices == DEFAULT_TRACK_DEVICES
assert controller.option_track_wired_clients == DEFAULT_TRACK_WIRED_CLIENTS
assert controller.option_detection_time == timedelta(seconds=DEFAULT_DETECTION_TIME)
assert isinstance(controller.option_ssid_filter, list)
assert controller.mac is None
assert controller.signal_update == "unifi-update-1.2.3.4-site_id"
assert controller.signal_remove == "unifi-remove-1.2.3.4-site_id"
assert controller.signal_options_update == "unifi-options-1.2.3.4-site_id"
async def test_controller_mac(hass):
"""Test that it is possible to identify controller mac."""
controller = await setup_unifi_integration(hass, clients_response=[CONTROLLER_HOST])
assert controller.mac == CONTROLLER_HOST["mac"]
async def test_controller_not_accessible(hass):
"""Retry to login gets scheduled when connection fails."""
with patch(
"homeassistant.components.unifi.controller.get_controller",
side_effect=CannotConnect,
):
await setup_unifi_integration(hass)
assert hass.data[UNIFI_DOMAIN] == {}
async def test_controller_unknown_error(hass):
"""Unknown errors are handled."""
with patch(
"homeassistant.components.unifi.controller.get_controller",
side_effect=Exception,
):
await setup_unifi_integration(hass)
assert hass.data[UNIFI_DOMAIN] == {}
async def test_reset_after_successful_setup(hass):
"""Calling reset when the entry has been setup."""
controller = await setup_unifi_integration(hass)
assert len(controller.listeners) == 6
result = await controller.async_reset()
await hass.async_block_till_done()
assert result is True
assert len(controller.listeners) == 0
async def test_wireless_client_event_calls_update_wireless_devices(hass):
"""Call update_wireless_devices method when receiving wireless client event."""
controller = await setup_unifi_integration(hass)
with patch(
"homeassistant.components.unifi.controller.UniFiController.update_wireless_clients",
return_value=None,
) as wireless_clients_mock:
controller.api.websocket._data = {
"meta": {"rc": "ok", "message": "events"},
"data": [
{
"datetime": "2020-01-20T19:37:04Z",
"key": aiounifi.events.WIRELESS_CLIENT_CONNECTED,
"msg": "User[11:22:33:44:55:66] has connected to WLAN",
"time": 1579549024893,
}
],
}
controller.api.session_handler("data")
assert wireless_clients_mock.assert_called_once
async def test_get_controller(hass):
"""Successful call."""
with patch("aiounifi.Controller.check_unifi_os", return_value=True), patch(
"aiounifi.Controller.login", return_value=True
):
assert await get_controller(hass, **CONTROLLER_DATA)
async def test_get_controller_verify_ssl_false(hass):
"""Successful call with verify ssl set to false."""
controller_data = dict(CONTROLLER_DATA)
controller_data[CONF_VERIFY_SSL] = False
with patch("aiounifi.Controller.check_unifi_os", return_value=True), patch(
"aiounifi.Controller.login", return_value=True
):
assert await get_controller(hass, **controller_data)
async def test_get_controller_login_failed(hass):
"""Check that get_controller can handle a failed login."""
with patch("aiounifi.Controller.check_unifi_os", return_value=True), patch(
"aiounifi.Controller.login", side_effect=aiounifi.Unauthorized
), pytest.raises(AuthenticationRequired):
await get_controller(hass, **CONTROLLER_DATA)
async def test_get_controller_controller_unavailable(hass):
"""Check that get_controller can handle controller being unavailable."""
with patch("aiounifi.Controller.check_unifi_os", return_value=True), patch(
"aiounifi.Controller.login", side_effect=aiounifi.RequestError
), pytest.raises(CannotConnect):
await get_controller(hass, **CONTROLLER_DATA)
async def test_get_controller_unknown_error(hass):
"""Check that get_controller can handle unknown errors."""
with patch("aiounifi.Controller.check_unifi_os", return_value=True), patch(
"aiounifi.Controller.login", side_effect=aiounifi.AiounifiException
), pytest.raises(AuthenticationRequired):
await get_controller(hass, **CONTROLLER_DATA)
|
import diamond.collector
class XFSCollector(diamond.collector.Collector):
PROC = '/proc/fs/xfs/stat'
def get_default_config_help(self):
config_help = super(XFSCollector, self).get_default_config_help()
config_help.update({
})
return config_help
def get_default_config(self):
"""
Returns the xfs collector settings
"""
config = super(XFSCollector, self).get_default_config()
config.update({
'path': 'xfs'
})
return config
def collect(self):
"""
Collect xfs stats.
For an explanation of the following metrics visit
http://xfs.org/index.php/Runtime_Stats
https://github.com/torvalds/linux/blob/master/fs/xfs/xfs_stats.h
"""
data_structure = {
'extent_alloc': (
'alloc_extent',
'alloc_block',
'free_extent',
'free_block'
),
'abt': (
'lookup',
'compare',
'insrec',
'delrec'
),
'blk_map': (
'read_ops',
'write_ops',
'unmap',
'add_exlist',
'del_exlist',
'look_exlist',
'cmp_exlist'
),
'bmbt': (
'lookup',
'compare',
'insrec',
'delrec'
),
'dir': (
'lookup',
'create',
'remove',
'getdents'
),
'trans': (
'sync',
'async',
'empty'
),
'ig': (
'ig_attempts',
'ig_found',
'ig_frecycle',
'ig_missed',
'ig_dup',
'ig_reclaims',
'ig_attrchg'
),
'log': (
'writes',
'blocks',
'noiclogs',
'force',
'force_sleep'
),
'push_ail': (
'try_logspace',
'sleep_logspace',
'pushes',
'success',
'pushbuf',
'pinned',
'locked',
'flushing',
'restarts',
'flush'
),
'xstrat': (
'quick',
'split'
),
'rw': (
'write_calls',
'read_calls'
),
'attr': (
'get',
'set',
'remove',
'list'
),
'icluster': (
'iflush_count',
'icluster_flushcnt',
'icluster_flushinode'
),
'vnodes': (
'vn_active',
'vn_alloc',
'vn_get',
'vn_hold',
'vn_rele',
'vn_reclaim',
'vn_remove',
'vn_free'
),
'buf': (
'xb_get',
'xb_create',
'xb_get_locked',
'xb_get_locked_waited',
'xb_busy_locked',
'xb_miss_locked',
'xb_page_retries',
'xb_page_found',
'xb_get_read'
),
'abtb2': (
'xs_abtb_2_lookup',
'xs_abtb_2_compare',
'xs_abtb_2_insrec',
'xs_abtb_2_delrec',
'xs_abtb_2_newroot',
'xs_abtb_2_killroot',
'xs_abtb_2_increment',
'xs_abtb_2_decrement',
'xs_abtb_2_lshift',
'xs_abtb_2_rshift',
'xs_abtb_2_split',
'xs_abtb_2_join',
'xs_abtb_2_alloc',
'xs_abtb_2_free',
'xs_abtb_2_moves'
),
'abtc2': (
'xs_abtc_2_lookup',
'xs_abtc_2_compare',
'xs_abtc_2_insrec',
'xs_abtc_2_delrec',
'xs_abtc_2_newroot',
'xs_abtc_2_killroot',
'xs_abtc_2_increment',
'xs_abtc_2_decrement',
'xs_abtc_2_lshift',
'xs_abtc_2_rshift',
'xs_abtc_2_split',
'xs_abtc_2_join',
'xs_abtc_2_alloc',
'xs_abtc_2_free',
'xs_abtc_2_moves'
),
'bmbt2': (
'xs_bmbt_2_lookup',
'xs_bmbt_2_compare',
'xs_bmbt_2_insrec',
'xs_bmbt_2_delrec',
'xs_bmbt_2_newroot',
'xs_bmbt_2_killroot',
'xs_bmbt_2_increment',
'xs_bmbt_2_decrement',
'xs_bmbt_2_lshift',
'xs_bmbt_2_rshift',
'xs_bmbt_2_split',
'xs_bmbt_2_join',
'xs_bmbt_2_alloc',
'xs_bmbt_2_free',
'xs_bmbt_2_moves'
),
'ibt2': (
'lookup',
'compare',
'insrec',
'delrec',
'newroot',
'killroot',
'increment',
'decrement',
'lshift',
'rshift',
'split',
'join',
'alloc',
'free',
'moves'
),
'fibt2': (
'lookup',
'compare',
'insrec',
'delrec',
'newroot',
'killroot',
'increment',
'decrement',
'lshift',
'rshift',
'split',
'join',
'alloc',
'free',
'moves'
),
'qm': (
'xs_qm_dquot',
'xs_qm_dquot_unused'
),
'xpc': (
'xs_xstrat_bytes',
'xs_write_bytes',
'xs_read_bytes'
),
'debug': (
'debug',
)
}
f = open(self.PROC)
new_stats = f.readlines()
f.close()
stats = {}
for line in new_stats:
items = line.rstrip().split()
stats[items[0]] = [int(a) for a in items[1:]]
for key in stats.keys():
for item in enumerate(data_structure[key]):
metric_name = '.'.join([key, item[1]])
value = stats[key][item[0]]
self.publish_counter(metric_name, value)
|
from __future__ import division
import numpy as np
import chainer
from chainer.backends import cuda
import chainer.functions as F
from chainer import initializers
import chainer.links as L
from chainercv.links.model.fpn.misc import argsort
from chainercv.links.model.fpn.misc import choice
from chainercv.links.model.fpn.misc import exp_clip
from chainercv.links.model.fpn.misc import smooth_l1
from chainercv import utils
class BboxHead(chainer.Chain):
"""Bounding box head network of Feature Pyramid Networks.
Args:
n_class (int): The number of classes including background.
scales (tuple of floats): The scales of feature maps.
"""
_canonical_level = 2
_canonical_scale = 224
_roi_size = 7
_roi_sample_ratio = 2
std = (0.1, 0.2)
def __init__(self, n_class, scales):
super(BboxHead, self).__init__()
fc_init = {
'initialW': Caffe2FCUniform(),
'initial_bias': Caffe2FCUniform(),
}
with self.init_scope():
self.fc1 = L.Linear(1024, **fc_init)
self.fc2 = L.Linear(1024, **fc_init)
self.loc = L.Linear(
n_class * 4, initialW=initializers.Normal(0.001))
self.conf = L.Linear(n_class, initialW=initializers.Normal(0.01))
self._n_class = n_class
self._scales = scales
def forward(self, hs, rois, roi_indices):
"""Calculates RoIs.
Args:
hs (iterable of array): An iterable of feature maps.
rois (list of arrays): A list of arrays of shape: math: `(R_l, 4)`,
where: math: `R_l` is the number of RoIs in the: math: `l`- th
feature map.
roi_indices (list of arrays): A list of arrays of
shape :math:`(R_l,)`.
Returns:
tuple of two arrays:
:obj:`locs` and :obj:`confs`.
* **locs**: An arrays whose shape is \
:math:`(R, n\_class, 4)`, where :math:`R` is the total number \
of RoIs in the batch.
* **confs**: A list of array whose shape is :math:`(R, n\_class)`.
"""
hs_ = []
for l, h in enumerate(hs):
if len(rois[l]) == 0:
continue
h = F.roi_average_align_2d(
h, rois[l], roi_indices[l], self._roi_size,
self._scales[l], self._roi_sample_ratio)
hs_.append(h)
hs = hs_
if len(hs) == 0:
locs = chainer.Variable(
self.xp.empty((0, self._n_class, 4), dtype=np.float32))
confs = chainer.Variable(
self.xp.empty((0, self._n_class), dtype=np.float32))
return locs, confs
h = F.concat(hs, axis=0)
h = F.reshape(h, (h.shape[0], -1))
h = F.relu(self.fc1(h))
h = F.relu(self.fc2(h))
locs = self.loc(h)
locs = F.reshape(locs, (locs.shape[0], -1, 4))
confs = self.conf(h)
return locs, confs
def distribute(self, rois, roi_indices):
"""Assigns Rois to feature maps according to their size.
Args:
rois (array): An array of shape :math:`(R, 4)`, \
where :math:`R` is the total number of RoIs in the given batch.
roi_indices (array): An array of shape :math:`(R,)`.
Returns:
tuple of two lists:
:obj:`rois` and :obj:`roi_indices`.
* **rois**: A list of arrays of shape :math:`(R_l, 4)`, \
where :math:`R_l` is the number of RoIs in the :math:`l`-th \
feature map.
* **roi_indices** : A list of arrays of shape :math:`(R_l,)`.
"""
size = self.xp.sqrt(self.xp.prod(rois[:, 2:] - rois[:, :2], axis=1))
level = self.xp.floor(self.xp.log2(
size / self._canonical_scale + 1e-6)).astype(np.int32)
# skip last level
level = self.xp.clip(
level + self._canonical_level, 0, len(self._scales) - 2)
masks = [level == l for l in range(len(self._scales))]
rois = [rois[mask] for mask in masks]
roi_indices = [roi_indices[mask] for mask in masks]
return rois, roi_indices
def decode(self, rois, roi_indices, locs, confs,
scales, sizes, nms_thresh, score_thresh):
"""Decodes back to coordinates of RoIs.
This method decodes :obj:`locs` and :obj:`confs` returned
by a FPN network back to :obj:`bboxes`,
:obj:`labels` and :obj:`scores`.
Args:
rois (iterable of arrays): An iterable of arrays of
shape :math:`(R_l, 4)`, where :math:`R_l` is the number
of RoIs in the :math:`l`-th feature map.
roi_indices (iterable of arrays): An iterable of arrays of
shape :math:`(R_l,)`.
locs (array): An array whose shape is :math:`(R, n\_class, 4)`,
where :math:`R` is the total number of RoIs in the given batch.
confs (array): An array whose shape is :math:`(R, n\_class)`.
scales (list of floats): A list of floats returned
by :meth:`~chainercv.links.model.fpn.faster_rcnn.prepare`
sizes (list of tuples of two ints): A list of
:math:`(H_n, W_n)`, where :math:`H_n` and :math:`W_n`
are height and width of the :math:`n`-th image.
nms_thresh (float): The threshold value
for :func:`~chainercv.utils.non_maximum_suppression`.
score_thresh (float): The threshold value for confidence score.
Returns:
tuple of three list of arrays:
:obj:`bboxes`, :obj:`labels` and :obj:`scores`.
* **bboxes**: A list of float arrays of shape :math:`(R'_n, 4)`, \
where :math:`R'_n` is the number of bounding boxes in \
the :math:`n`-th image. \
Each bounding box is organized by \
:math:`(y_{min}, x_{min}, y_{max}, x_{max})` \
in the second axis.
* **labels** : A list of integer arrays of shape :math:`(R'_n,)`. \
Each value indicates the class of the bounding box. \
Values are in range :math:`[0, L - 1]`, where :math:`L` is the \
number of the foreground classes.
* **scores** : A list of float arrays of shape :math:`(R'_n,)`. \
Each value indicates how confident the prediction is.
"""
rois = self.xp.vstack(rois)
roi_indices = self.xp.hstack(roi_indices)
locs = locs.array
confs = confs.array
bboxes = []
labels = []
scores = []
for i in range(len(scales)):
mask = roi_indices == i
roi = rois[mask]
loc = locs[mask]
conf = confs[mask]
bbox = self.xp.broadcast_to(roi[:, None], loc.shape) / scales[i]
# tlbr -> yxhw
bbox[:, :, 2:] -= bbox[:, :, :2]
bbox[:, :, :2] += bbox[:, :, 2:] / 2
# offset
bbox[:, :, :2] += loc[:, :, :2] * bbox[:, :, 2:] * self.std[0]
bbox[:, :, 2:] *= self.xp.exp(
self.xp.minimum(loc[:, :, 2:] * self.std[1], exp_clip))
# yxhw -> tlbr
bbox[:, :, :2] -= bbox[:, :, 2:] / 2
bbox[:, :, 2:] += bbox[:, :, :2]
# clip
bbox[:, :, :2] = self.xp.maximum(bbox[:, :, :2], 0)
bbox[:, :, 2:] = self.xp.minimum(
bbox[:, :, 2:], self.xp.array(sizes[i]))
conf = self.xp.exp(conf)
score = conf / self.xp.sum(conf, axis=1, keepdims=True)
bbox, label, score = _suppress(
bbox, score, nms_thresh, score_thresh)
bboxes.append(bbox)
labels.append(label)
scores.append(score)
return bboxes, labels, scores
def bbox_head_loss_pre(rois, roi_indices, std, bboxes, labels):
"""Loss function for Head (pre).
This function processes RoIs for :func:`bbox_head_loss_post`.
Args:
rois (iterable of arrays): An iterable of arrays of
shape :math:`(R_l, 4)`, where :math:`R_l` is the number
of RoIs in the :math:`l`-th feature map.
roi_indices (iterable of arrays): An iterable of arrays of
shape :math:`(R_l,)`.
std (tuple of floats): Two coefficients used for encoding
bounding boxes.
bboxes (list of arrays): A list of arrays whose shape is
:math:`(R_n, 4)`, where :math:`R_n` is the number of
ground truth bounding boxes.
labels (list of arrays): A list of arrays whose shape is
:math:`(R_n,)`.
Returns:
tuple of four lists:
:obj:`rois`, :obj:`roi_indices`, :obj:`gt_locs`, and :obj:`gt_labels`.
* **rois**: A list of arrays of shape :math:`(R'_l, 4)`, \
where :math:`R'_l` is the number of RoIs in the :math:`l`-th \
feature map.
* **roi_indices**: A list of arrays of shape :math:`(R'_l,)`.
* **gt_locs**: A list of arrays of shape :math:`(R'_l, 4) \
indicating the bounding boxes of ground truth.
* **roi_indices**: A list of arrays of shape :math:`(R'_l,)` \
indicating the classes of ground truth.
"""
thresh = 0.5
batchsize_per_image = 512
fg_ratio = 0.25
xp = cuda.get_array_module(*rois)
n_level = len(rois)
roi_levels = xp.hstack(
xp.array((l,) * len(rois[l])) for l in range(n_level)).astype(np.int32)
rois = xp.vstack(rois).astype(np.float32)
roi_indices = xp.hstack(roi_indices).astype(np.int32)
rois_yx = (rois[:, 2:] + rois[:, :2]) / 2
rois_hw = rois[:, 2:] - rois[:, :2]
indices = np.unique(cuda.to_cpu(roi_indices))
gt_locs = xp.empty_like(rois)
gt_labels = xp.empty_like(roi_indices)
for i in indices:
mask = roi_indices == i
if len(bboxes[i]) > 0:
iou = utils.bbox_iou(rois[mask], bboxes[i])
gt_index = iou.argmax(axis=1)
gt_loc = bboxes[i][gt_index].copy()
else:
gt_loc = xp.empty_like(rois[mask])
# tlbr -> yxhw
gt_loc[:, 2:] -= gt_loc[:, :2]
gt_loc[:, :2] += gt_loc[:, 2:] / 2
# offset
gt_loc[:, :2] = (gt_loc[:, :2] - rois_yx[mask]) / \
rois_hw[mask] / std[0]
gt_loc[:, 2:] = xp.log(gt_loc[:, 2:] / rois_hw[mask]) / std[1]
if len(bboxes[i]) > 0:
gt_label = labels[i][gt_index] + 1
gt_label[iou.max(axis=1) < thresh] = 0
else:
gt_label = xp.zeros(int(mask.sum()), dtype=np.int32)
fg_index = xp.where(gt_label > 0)[0]
n_fg = int(batchsize_per_image * fg_ratio)
if len(fg_index) > n_fg:
gt_label[choice(fg_index, size=len(fg_index) - n_fg)] = -1
bg_index = xp.where(gt_label == 0)[0]
n_bg = batchsize_per_image - int((gt_label > 0).sum())
if len(bg_index) > n_bg:
gt_label[choice(bg_index, size=len(bg_index) - n_bg)] = -1
gt_locs[mask] = gt_loc
gt_labels[mask] = gt_label
mask = gt_labels >= 0
rois = rois[mask]
roi_indices = roi_indices[mask]
roi_levels = roi_levels[mask]
gt_locs = gt_locs[mask]
gt_labels = gt_labels[mask]
masks = [roi_levels == l for l in range(n_level)]
rois = [rois[m] for m in masks]
roi_indices = [roi_indices[m] for m in masks]
gt_locs = [gt_locs[m] for m in masks]
gt_labels = [gt_labels[m] for m in masks]
return rois, roi_indices, gt_locs, gt_labels
def bbox_head_loss_post(
locs, confs, roi_indices, gt_locs, gt_labels, batchsize):
"""Loss function for Head (post).
Args:
locs (array): An array whose shape is :math:`(R, n\_class, 4)`,
where :math:`R` is the total number of RoIs in the given batch.
confs (array): An iterable of arrays whose shape is
:math:`(R, n\_class)`.
roi_indices (list of arrays): A list of arrays returned by
:func:`bbox_head_locs_pre`.
gt_locs (list of arrays): A list of arrays returned by
:func:`bbox_head_locs_pre`.
gt_labels (list of arrays): A list of arrays returned by
:func:`bbox_head_locs_pre`.
batchsize (int): The size of batch.
Returns:
tuple of two variables:
:obj:`loc_loss` and :obj:`conf_loss`.
"""
xp = cuda.get_array_module(locs.array, confs.array)
roi_indices = xp.hstack(roi_indices).astype(np.int32)
gt_locs = xp.vstack(gt_locs).astype(np.float32)
gt_labels = xp.hstack(gt_labels).astype(np.int32)
loc_loss = 0
conf_loss = 0
for i in np.unique(cuda.to_cpu(roi_indices)):
mask = roi_indices == i
gt_loc = gt_locs[mask]
gt_label = gt_labels[mask]
n_sample = mask.sum()
loc_loss += F.sum(smooth_l1(
locs[mask][xp.where(gt_label > 0)[0], gt_label[gt_label > 0]],
gt_loc[gt_label > 0], 1)) / n_sample
conf_loss += F.softmax_cross_entropy(confs[mask], gt_label)
loc_loss /= batchsize
conf_loss /= batchsize
return loc_loss, conf_loss
class Caffe2FCUniform(chainer.initializer.Initializer):
"""Initializer used in Caffe2.
"""
def __call__(self, array):
scale = 1 / np.sqrt(array.shape[-1])
initializers.Uniform(scale)(array)
def _suppress(raw_bbox, raw_score, nms_thresh, score_thresh):
xp = cuda.get_array_module(raw_bbox, raw_score)
bbox = []
label = []
score = []
for l in range(raw_score.shape[1] - 1):
bbox_l = raw_bbox[:, l + 1]
score_l = raw_score[:, l + 1]
mask = score_l >= score_thresh
bbox_l = bbox_l[mask]
score_l = score_l[mask]
order = argsort(-score_l)
bbox_l = bbox_l[order]
score_l = score_l[order]
indices = utils.non_maximum_suppression(bbox_l, nms_thresh)
bbox_l = bbox_l[indices]
score_l = score_l[indices]
bbox.append(bbox_l)
label.append(xp.array((l,) * len(bbox_l)))
score.append(score_l)
bbox = xp.vstack(bbox).astype(np.float32)
label = xp.hstack(label).astype(np.int32)
score = xp.hstack(score).astype(np.float32)
return bbox, label, score
|
import logging
from homeassistant.components.binary_sensor import BinarySensorEntity
from .const import BSH_DOOR_STATE, DOMAIN
from .entity import HomeConnectEntity
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Home Connect binary sensor."""
def get_entities():
entities = []
hc_api = hass.data[DOMAIN][config_entry.entry_id]
for device_dict in hc_api.devices:
entity_dicts = device_dict.get("entities", {}).get("binary_sensor", [])
entities += [HomeConnectBinarySensor(**d) for d in entity_dicts]
return entities
async_add_entities(await hass.async_add_executor_job(get_entities), True)
class HomeConnectBinarySensor(HomeConnectEntity, BinarySensorEntity):
"""Binary sensor for Home Connect."""
def __init__(self, device, desc, device_class):
"""Initialize the entity."""
super().__init__(device, desc)
self._device_class = device_class
self._state = None
@property
def is_on(self):
"""Return true if the binary sensor is on."""
return bool(self._state)
@property
def available(self):
"""Return true if the binary sensor is available."""
return self._state is not None
async def async_update(self):
"""Update the binary sensor's status."""
state = self.device.appliance.status.get(BSH_DOOR_STATE, {})
if not state:
self._state = None
elif state.get("value") in [
"BSH.Common.EnumType.DoorState.Closed",
"BSH.Common.EnumType.DoorState.Locked",
]:
self._state = False
elif state.get("value") == "BSH.Common.EnumType.DoorState.Open":
self._state = True
else:
_LOGGER.warning("Unexpected value for HomeConnect door state: %s", state)
self._state = None
_LOGGER.debug("Updated, new state: %s", self._state)
@property
def device_class(self):
"""Return the device class."""
return self._device_class
|
from unittest import expectedFailure
from stash.tests.stashtest import StashTestCase
class EchoTests(StashTestCase):
"""tests for the 'echo' command."""
def do_echo(self, s):
"""echo a string and return the echoed output."""
return self.run_command("echo " + s, exitcode=0)
def test_simple(self):
"""test 'echo test'"""
o = self.do_echo("test")
self.assertEqual(o, "test\n")
def test_multi(self):
"""test 'echo test1 test2 test:'"""
o = self.do_echo("test1 test2 test3")
self.assertEqual(o, "test1 test2 test3\n")
def test_help_ignore(self):
"""test that -h and --help will be ignored by echo."""
ho = self.do_echo("-h")
self.assertEqual(ho, "-h\n")
helpo = self.do_echo("--help")
self.assertEqual(helpo, "--help\n")
def test_empty(self):
"""test the behavior without arguments."""
output = self.run_command("echo", exitcode=0)
self.assertEqual(output, "\n")
@expectedFailure
def test_non_ascii(self):
"""test echo with non-ascii characters."""
output = self.do_echo(u"Non-Ascii: äöüß end")
self.assertEqual(output, u"Non-Ascii: äöüß end\n")
|
import os.path
import coverage
class Plugin(coverage.CoveragePlugin):
"""A file tracer plugin for testing."""
def file_tracer(self, filename):
if "render.py" in filename:
return RenderFileTracer()
return None
def file_reporter(self, filename):
return FileReporter(filename)
class RenderFileTracer(coverage.FileTracer):
"""A FileTracer using information from the caller."""
def has_dynamic_source_filename(self):
return True
def dynamic_source_filename(self, filename, frame):
if frame.f_code.co_name != "render":
return None
source_filename = os.path.abspath(frame.f_locals['filename'])
return source_filename
def line_number_range(self, frame):
lineno = frame.f_locals['linenum']
return lineno, lineno+1
class FileReporter(coverage.FileReporter):
"""A goofy file reporter."""
def lines(self):
# Goofy test arrangement: claim that the file has as many lines as the
# number in its name.
num = os.path.basename(self.filename).split(".")[0].split("_")[1]
return set(range(1, int(num)+1))
def coverage_init(reg, options): # pylint: disable=unused-argument
"""Called by coverage to initialize the plugins here."""
reg.add_file_tracer(Plugin())
|
import os
import operator
from typing import cast, Any, List, Optional, Tuple, Union
from PyQt5.QtGui import QFont
from PyQt5.QtWebEngineWidgets import QWebEngineSettings, QWebEngineProfile
from qutebrowser.browser.webengine import spell, webenginequtescheme
from qutebrowser.config import config, websettings
from qutebrowser.config.websettings import AttributeInfo as Attr
from qutebrowser.utils import standarddir, qtutils, message, log, urlmatch, usertypes
# The default QWebEngineProfile
default_profile = cast(QWebEngineProfile, None)
# The QWebEngineProfile used for private (off-the-record) windows
private_profile: Optional[QWebEngineProfile] = None
# The global WebEngineSettings object
global_settings = cast('WebEngineSettings', None)
parsed_user_agent = None
class _SettingsWrapper:
"""Expose a QWebEngineSettings interface which acts on all profiles.
For read operations, the default profile value is always used.
"""
def _settings(self):
yield default_profile.settings()
if private_profile:
yield private_profile.settings()
def setAttribute(self, attribute, on):
for settings in self._settings():
settings.setAttribute(attribute, on)
def setFontFamily(self, which, family):
for settings in self._settings():
settings.setFontFamily(which, family)
def setFontSize(self, fonttype, size):
for settings in self._settings():
settings.setFontSize(fonttype, size)
def setDefaultTextEncoding(self, encoding):
for settings in self._settings():
settings.setDefaultTextEncoding(encoding)
def setUnknownUrlSchemePolicy(self, policy):
for settings in self._settings():
settings.setUnknownUrlSchemePolicy(policy)
def testAttribute(self, attribute):
return default_profile.settings().testAttribute(attribute)
def fontSize(self, fonttype):
return default_profile.settings().fontSize(fonttype)
def fontFamily(self, which):
return default_profile.settings().fontFamily(which)
def defaultTextEncoding(self):
return default_profile.settings().defaultTextEncoding()
def unknownUrlSchemePolicy(self):
return default_profile.settings().unknownUrlSchemePolicy()
class WebEngineSettings(websettings.AbstractSettings):
"""A wrapper for the config for QWebEngineSettings."""
_ATTRIBUTES = {
'content.xss_auditing':
Attr(QWebEngineSettings.XSSAuditingEnabled),
'content.images':
Attr(QWebEngineSettings.AutoLoadImages),
'content.javascript.enabled':
Attr(QWebEngineSettings.JavascriptEnabled),
'content.javascript.can_open_tabs_automatically':
Attr(QWebEngineSettings.JavascriptCanOpenWindows),
'content.javascript.can_access_clipboard':
Attr(QWebEngineSettings.JavascriptCanAccessClipboard),
'content.plugins':
Attr(QWebEngineSettings.PluginsEnabled),
'content.hyperlink_auditing':
Attr(QWebEngineSettings.HyperlinkAuditingEnabled),
'content.local_content_can_access_remote_urls':
Attr(QWebEngineSettings.LocalContentCanAccessRemoteUrls),
'content.local_content_can_access_file_urls':
Attr(QWebEngineSettings.LocalContentCanAccessFileUrls),
'content.webgl':
Attr(QWebEngineSettings.WebGLEnabled),
'content.local_storage':
Attr(QWebEngineSettings.LocalStorageEnabled),
'content.desktop_capture':
Attr(QWebEngineSettings.ScreenCaptureEnabled,
converter=lambda val: True if val == 'ask' else val),
# 'ask' is handled via the permission system
'input.spatial_navigation':
Attr(QWebEngineSettings.SpatialNavigationEnabled),
'input.links_included_in_focus_chain':
Attr(QWebEngineSettings.LinksIncludedInFocusChain),
'scrolling.smooth':
Attr(QWebEngineSettings.ScrollAnimatorEnabled),
'content.print_element_backgrounds':
Attr(QWebEngineSettings.PrintElementBackgrounds),
'content.autoplay':
Attr(QWebEngineSettings.PlaybackRequiresUserGesture,
converter=operator.not_),
'content.dns_prefetch':
Attr(QWebEngineSettings.DnsPrefetchEnabled),
}
_FONT_SIZES = {
'fonts.web.size.minimum':
QWebEngineSettings.MinimumFontSize,
'fonts.web.size.minimum_logical':
QWebEngineSettings.MinimumLogicalFontSize,
'fonts.web.size.default':
QWebEngineSettings.DefaultFontSize,
'fonts.web.size.default_fixed':
QWebEngineSettings.DefaultFixedFontSize,
}
_FONT_FAMILIES = {
'fonts.web.family.standard': QWebEngineSettings.StandardFont,
'fonts.web.family.fixed': QWebEngineSettings.FixedFont,
'fonts.web.family.serif': QWebEngineSettings.SerifFont,
'fonts.web.family.sans_serif': QWebEngineSettings.SansSerifFont,
'fonts.web.family.cursive': QWebEngineSettings.CursiveFont,
'fonts.web.family.fantasy': QWebEngineSettings.FantasyFont,
}
_UNKNOWN_URL_SCHEME_POLICY = {
'disallow':
QWebEngineSettings.DisallowUnknownUrlSchemes,
'allow-from-user-interaction':
QWebEngineSettings.AllowUnknownUrlSchemesFromUserInteraction,
'allow-all':
QWebEngineSettings.AllowAllUnknownUrlSchemes,
}
# Mapping from WebEngineSettings::initDefaults in
# qtwebengine/src/core/web_engine_settings.cpp
_FONT_TO_QFONT = {
QWebEngineSettings.StandardFont: QFont.Serif,
QWebEngineSettings.FixedFont: QFont.Monospace,
QWebEngineSettings.SerifFont: QFont.Serif,
QWebEngineSettings.SansSerifFont: QFont.SansSerif,
QWebEngineSettings.CursiveFont: QFont.Cursive,
QWebEngineSettings.FantasyFont: QFont.Fantasy,
}
def set_unknown_url_scheme_policy(
self, policy: Union[str, usertypes.Unset]) -> bool:
"""Set the UnknownUrlSchemePolicy to use.
Return:
True if there was a change, False otherwise.
"""
old_value = self._settings.unknownUrlSchemePolicy()
if isinstance(policy, usertypes.Unset):
self._settings.resetUnknownUrlSchemePolicy()
new_value = self._settings.unknownUrlSchemePolicy()
else:
new_value = self._UNKNOWN_URL_SCHEME_POLICY[policy]
self._settings.setUnknownUrlSchemePolicy(new_value)
return old_value != new_value
def _update_setting(self, setting, value):
if setting == 'content.unknown_url_scheme_policy':
return self.set_unknown_url_scheme_policy(value)
return super()._update_setting(setting, value)
def init_settings(self):
super().init_settings()
self.update_setting('content.unknown_url_scheme_policy')
class ProfileSetter:
"""Helper to set various settings on a profile."""
def __init__(self, profile):
self._profile = profile
def init_profile(self):
"""Initialize settings on the given profile."""
self.set_http_headers()
self.set_http_cache_size()
self._set_hardcoded_settings()
self.set_dictionary_language()
def _set_hardcoded_settings(self):
"""Set up settings with a fixed value."""
settings = self._profile.settings()
settings.setAttribute(
QWebEngineSettings.FullScreenSupportEnabled, True)
settings.setAttribute(
QWebEngineSettings.FocusOnNavigationEnabled, False)
try:
settings.setAttribute(QWebEngineSettings.PdfViewerEnabled, False)
except AttributeError:
# Added in Qt 5.13
pass
def set_http_headers(self):
"""Set the user agent and accept-language for the given profile.
We override those per request in the URL interceptor (to allow for
per-domain values), but this one still gets used for things like
window.navigator.userAgent/.languages in JS.
"""
user_agent = websettings.user_agent()
self._profile.setHttpUserAgent(user_agent)
accept_language = config.val.content.headers.accept_language
if accept_language is not None:
self._profile.setHttpAcceptLanguage(accept_language)
def set_http_cache_size(self):
"""Initialize the HTTP cache size for the given profile."""
size = config.val.content.cache.size
if size is None:
size = 0
else:
size = qtutils.check_overflow(size, 'int', fatal=False)
# 0: automatically managed by QtWebEngine
self._profile.setHttpCacheMaximumSize(size)
def set_persistent_cookie_policy(self):
"""Set the HTTP Cookie size for the given profile."""
assert not self._profile.isOffTheRecord()
if config.val.content.cookies.store:
value = QWebEngineProfile.AllowPersistentCookies
else:
value = QWebEngineProfile.NoPersistentCookies
self._profile.setPersistentCookiesPolicy(value)
def set_dictionary_language(self, warn=True):
"""Load the given dictionaries."""
filenames = []
for code in config.val.spellcheck.languages or []:
local_filename = spell.local_filename(code)
if not local_filename:
if warn:
message.warning("Language {} is not installed - see "
"scripts/dictcli.py in qutebrowser's "
"sources".format(code))
continue
filenames.append(os.path.splitext(local_filename)[0])
log.config.debug("Found dicts: {}".format(filenames))
self._profile.setSpellCheckLanguages(filenames)
self._profile.setSpellCheckEnabled(bool(filenames))
def _update_settings(option):
"""Update global settings when qwebsettings changed."""
global_settings.update_setting(option)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-75884
# (note this isn't actually fixed properly before Qt 5.15)
header_bug_fixed = qtutils.version_check('5.15', compiled=False)
if option in ['content.headers.user_agent',
'content.headers.accept_language'] and header_bug_fixed:
default_profile.setter.set_http_headers()
if private_profile:
private_profile.setter.set_http_headers()
elif option == 'content.cache.size':
default_profile.setter.set_http_cache_size()
if private_profile:
private_profile.setter.set_http_cache_size()
elif option == 'content.cookies.store':
default_profile.setter.set_persistent_cookie_policy()
# We're not touching the private profile's cookie policy.
elif option == 'spellcheck.languages':
default_profile.setter.set_dictionary_language()
if private_profile:
private_profile.setter.set_dictionary_language(warn=False)
def _init_user_agent_str(ua):
global parsed_user_agent
parsed_user_agent = websettings.UserAgent.parse(ua)
def init_user_agent():
_init_user_agent_str(QWebEngineProfile.defaultProfile().httpUserAgent())
def _init_default_profile():
"""Init the default QWebEngineProfile."""
global default_profile
default_profile = QWebEngineProfile.defaultProfile()
init_user_agent()
default_profile.setter = ProfileSetter( # type: ignore[attr-defined]
default_profile)
default_profile.setCachePath(
os.path.join(standarddir.cache(), 'webengine'))
default_profile.setPersistentStoragePath(
os.path.join(standarddir.data(), 'webengine'))
default_profile.setter.init_profile()
default_profile.setter.set_persistent_cookie_policy()
def init_private_profile():
"""Init the private QWebEngineProfile."""
global private_profile
if not qtutils.is_single_process():
private_profile = QWebEngineProfile()
private_profile.setter = ProfileSetter( # type: ignore[attr-defined]
private_profile)
assert private_profile.isOffTheRecord()
private_profile.setter.init_profile()
def _init_site_specific_quirks():
"""Add custom user-agent settings for problematic sites.
See https://github.com/qutebrowser/qutebrowser/issues/4810
"""
if not config.val.content.site_specific_quirks:
return
# Please leave this here as a template for new UAs.
# default_ua = ("Mozilla/5.0 ({os_info}) "
# "AppleWebKit/{webkit_version} (KHTML, like Gecko) "
# "{qt_key}/{qt_version} "
# "{upstream_browser_key}/{upstream_browser_version} "
# "Safari/{webkit_version}")
no_qtwe_ua = ("Mozilla/5.0 ({os_info}) "
"AppleWebKit/{webkit_version} (KHTML, like Gecko) "
"{upstream_browser_key}/{upstream_browser_version} "
"Safari/{webkit_version}")
new_chrome_ua = ("Mozilla/5.0 ({os_info}) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/99 "
"Safari/537.36")
edge_ua = ("Mozilla/5.0 ({os_info}) "
"AppleWebKit/{webkit_version} (KHTML, like Gecko) "
"{upstream_browser_key}/{upstream_browser_version} "
"Safari/{webkit_version} "
"Edg/{upstream_browser_version}")
user_agents = {
# Needed to avoid a ""WhatsApp works with Google Chrome 36+" error
# page which doesn't allow to use WhatsApp Web at all. Also see the
# additional JS quirk: qutebrowser/javascript/whatsapp_web_quirk.user.js
# https://github.com/qutebrowser/qutebrowser/issues/4445
'https://web.whatsapp.com/': no_qtwe_ua,
# Needed to avoid a "you're using a browser [...] that doesn't allow us
# to keep your account secure" error.
# https://github.com/qutebrowser/qutebrowser/issues/5182
'https://accounts.google.com/*': edge_ua,
# Needed because Slack adds an error which prevents using it relatively
# aggressively, despite things actually working fine.
# September 2020: Qt 5.12 works, but Qt <= 5.11 shows the error.
# https://github.com/qutebrowser/qutebrowser/issues/4669
'https://*.slack.com/*': new_chrome_ua,
}
for pattern, ua in user_agents.items():
config.instance.set_obj('content.headers.user_agent', ua,
pattern=urlmatch.UrlPattern(pattern),
hide_userconfig=True)
def _init_devtools_settings():
"""Make sure the devtools always get images/JS permissions."""
settings: List[Tuple[str, Any]] = [
('content.javascript.enabled', True),
('content.images', True),
('content.cookies.accept', 'all'),
]
for setting, value in settings:
for pattern in ['chrome-devtools://*', 'devtools://*']:
config.instance.set_obj(setting, value,
pattern=urlmatch.UrlPattern(pattern),
hide_userconfig=True)
def init():
"""Initialize the global QWebSettings."""
webenginequtescheme.init()
spell.init()
_init_default_profile()
init_private_profile()
config.instance.changed.connect(_update_settings)
global global_settings
global_settings = WebEngineSettings(_SettingsWrapper())
global_settings.init_settings()
_init_site_specific_quirks()
_init_devtools_settings()
def shutdown():
pass
|
from lightwave.lightwave import LWLink
import voluptuous as vol
from homeassistant.components.climate import DOMAIN as CLIMATE_DOMAIN
from homeassistant.components.sensor import DOMAIN as SENSOR_DOMAIN
from homeassistant.const import CONF_HOST, CONF_LIGHTS, CONF_NAME, CONF_SWITCHES
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import async_load_platform
CONF_SERIAL = "serial"
CONF_PROXY_IP = "proxy_ip"
CONF_PROXY_PORT = "proxy_port"
CONF_TRV = "trv"
CONF_TRVS = "trvs"
DEFAULT_PROXY_PORT = 7878
DEFAULT_PROXY_IP = "127.0.0.1"
DOMAIN = "lightwave"
LIGHTWAVE_LINK = f"{DOMAIN}_link"
LIGHTWAVE_TRV_PROXY = f"{DOMAIN}_proxy"
LIGHTWAVE_TRV_PROXY_PORT = f"{DOMAIN}_proxy_port"
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.Schema(
vol.All(
cv.has_at_least_one_key(CONF_LIGHTS, CONF_SWITCHES, CONF_TRV),
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_LIGHTS, default={}): {
cv.string: vol.Schema({vol.Required(CONF_NAME): cv.string})
},
vol.Optional(CONF_SWITCHES, default={}): {
cv.string: vol.Schema({vol.Required(CONF_NAME): cv.string})
},
vol.Optional(CONF_TRV, default={}): {
vol.Optional(
CONF_PROXY_PORT, default=DEFAULT_PROXY_PORT
): cv.port,
vol.Optional(
CONF_PROXY_IP, default=DEFAULT_PROXY_IP
): cv.string,
vol.Required(CONF_TRVS, default={}): {
cv.string: vol.Schema(
{
vol.Required(CONF_NAME): cv.string,
vol.Required(CONF_SERIAL): cv.string,
}
)
},
},
},
)
)
},
extra=vol.ALLOW_EXTRA,
)
async def async_setup(hass, config):
"""Try to start embedded Lightwave broker."""
host = config[DOMAIN][CONF_HOST]
lwlink = LWLink(host)
hass.data[LIGHTWAVE_LINK] = lwlink
lights = config[DOMAIN][CONF_LIGHTS]
if lights:
hass.async_create_task(
async_load_platform(hass, "light", DOMAIN, lights, config)
)
switches = config[DOMAIN][CONF_SWITCHES]
if switches:
hass.async_create_task(
async_load_platform(hass, "switch", DOMAIN, switches, config)
)
trv = config[DOMAIN][CONF_TRV]
if trv:
trvs = trv[CONF_TRVS]
proxy_ip = trv[CONF_PROXY_IP]
proxy_port = trv[CONF_PROXY_PORT]
lwlink.set_trv_proxy(proxy_ip, proxy_port)
platforms = [CLIMATE_DOMAIN, SENSOR_DOMAIN]
for platform in platforms:
hass.async_create_task(
async_load_platform(hass, platform, DOMAIN, trvs, config)
)
return True
|
import asyncio
from haffmpeg.camera import CameraMjpeg
from haffmpeg.tools import IMAGE_JPEG, ImageFrame
from onvif.exceptions import ONVIFError
import voluptuous as vol
from homeassistant.components.camera import SUPPORT_STREAM, Camera
from homeassistant.components.ffmpeg import CONF_EXTRA_ARGUMENTS, DATA_FFMPEG
from homeassistant.const import HTTP_BASIC_AUTHENTICATION
from homeassistant.helpers import config_validation as cv, entity_platform
from homeassistant.helpers.aiohttp_client import async_aiohttp_proxy_stream
from .base import ONVIFBaseEntity
from .const import (
ABSOLUTE_MOVE,
ATTR_CONTINUOUS_DURATION,
ATTR_DISTANCE,
ATTR_MOVE_MODE,
ATTR_PAN,
ATTR_PRESET,
ATTR_SPEED,
ATTR_TILT,
ATTR_ZOOM,
CONF_RTSP_TRANSPORT,
CONF_SNAPSHOT_AUTH,
CONTINUOUS_MOVE,
DIR_DOWN,
DIR_LEFT,
DIR_RIGHT,
DIR_UP,
DOMAIN,
GOTOPRESET_MOVE,
LOGGER,
RELATIVE_MOVE,
SERVICE_PTZ,
ZOOM_IN,
ZOOM_OUT,
)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the ONVIF camera video stream."""
platform = entity_platform.current_platform.get()
# Create PTZ service
platform.async_register_entity_service(
SERVICE_PTZ,
{
vol.Optional(ATTR_PAN): vol.In([DIR_LEFT, DIR_RIGHT]),
vol.Optional(ATTR_TILT): vol.In([DIR_UP, DIR_DOWN]),
vol.Optional(ATTR_ZOOM): vol.In([ZOOM_OUT, ZOOM_IN]),
vol.Optional(ATTR_DISTANCE, default=0.1): cv.small_float,
vol.Optional(ATTR_SPEED, default=0.5): cv.small_float,
vol.Optional(ATTR_MOVE_MODE, default=RELATIVE_MOVE): vol.In(
[CONTINUOUS_MOVE, RELATIVE_MOVE, ABSOLUTE_MOVE, GOTOPRESET_MOVE]
),
vol.Optional(ATTR_CONTINUOUS_DURATION, default=0.5): cv.small_float,
vol.Optional(ATTR_PRESET, default="0"): cv.string,
},
"async_perform_ptz",
)
device = hass.data[DOMAIN][config_entry.unique_id]
async_add_entities(
[ONVIFCameraEntity(device, profile) for profile in device.profiles]
)
return True
class ONVIFCameraEntity(ONVIFBaseEntity, Camera):
"""Representation of an ONVIF camera."""
def __init__(self, device, profile):
"""Initialize ONVIF camera entity."""
ONVIFBaseEntity.__init__(self, device, profile)
Camera.__init__(self)
self.stream_options[CONF_RTSP_TRANSPORT] = device.config_entry.options.get(
CONF_RTSP_TRANSPORT
)
self._basic_auth = (
device.config_entry.data.get(CONF_SNAPSHOT_AUTH)
== HTTP_BASIC_AUTHENTICATION
)
self._stream_uri = None
@property
def supported_features(self) -> int:
"""Return supported features."""
return SUPPORT_STREAM
@property
def name(self) -> str:
"""Return the name of this camera."""
return f"{self.device.name} - {self.profile.name}"
@property
def unique_id(self) -> str:
"""Return a unique ID."""
if self.profile.index:
return f"{self.device.info.mac or self.device.info.serial_number}_{self.profile.index}"
return self.device.info.mac or self.device.info.serial_number
@property
def entity_registry_enabled_default(self) -> bool:
"""Return if the entity should be enabled when first added to the entity registry."""
return self.device.max_resolution == self.profile.video.resolution.width
async def stream_source(self):
"""Return the stream source."""
return self._stream_uri
async def async_camera_image(self):
"""Return a still image response from the camera."""
image = None
if self.device.capabilities.snapshot:
try:
image = await self.device.device.get_snapshot(
self.profile.token, self._basic_auth
)
except ONVIFError as err:
LOGGER.error(
"Fetch snapshot image failed from %s, falling back to FFmpeg; %s",
self.device.name,
err,
)
if image is None:
ffmpeg = ImageFrame(self.hass.data[DATA_FFMPEG].binary, loop=self.hass.loop)
image = await asyncio.shield(
ffmpeg.get_image(
self._stream_uri,
output_format=IMAGE_JPEG,
extra_cmd=self.device.config_entry.options.get(
CONF_EXTRA_ARGUMENTS
),
)
)
return image
async def handle_async_mjpeg_stream(self, request):
"""Generate an HTTP MJPEG stream from the camera."""
LOGGER.debug("Handling mjpeg stream from camera '%s'", self.device.name)
ffmpeg_manager = self.hass.data[DATA_FFMPEG]
stream = CameraMjpeg(ffmpeg_manager.binary, loop=self.hass.loop)
await stream.open_camera(
self._stream_uri,
extra_cmd=self.device.config_entry.options.get(CONF_EXTRA_ARGUMENTS),
)
try:
stream_reader = await stream.get_reader()
return await async_aiohttp_proxy_stream(
self.hass,
request,
stream_reader,
ffmpeg_manager.ffmpeg_stream_content_type,
)
finally:
await stream.close()
async def async_added_to_hass(self):
"""Run when entity about to be added to hass."""
uri_no_auth = await self.device.async_get_stream_uri(self.profile)
self._stream_uri = uri_no_auth.replace(
"rtsp://", f"rtsp://{self.device.username}:{self.device.password}@", 1
)
async def async_perform_ptz(
self,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan=None,
tilt=None,
zoom=None,
) -> None:
"""Perform a PTZ action on the camera."""
await self.device.async_perform_ptz(
self.profile,
distance,
speed,
move_mode,
continuous_duration,
preset,
pan,
tilt,
zoom,
)
|
import asyncio
import io
import logging
import aiohttp
import hangups
from hangups import ChatMessageEvent, ChatMessageSegment, Client, get_auth, hangouts_pb2
from homeassistant.const import HTTP_OK
from homeassistant.core import callback
from homeassistant.helpers import dispatcher, intent
from homeassistant.helpers.aiohttp_client import async_get_clientsession
from .const import (
ATTR_DATA,
ATTR_MESSAGE,
ATTR_TARGET,
CONF_CONVERSATION_ID,
CONF_CONVERSATION_NAME,
CONF_CONVERSATIONS,
CONF_MATCHERS,
DOMAIN,
EVENT_HANGOUTS_CONNECTED,
EVENT_HANGOUTS_CONVERSATIONS_CHANGED,
EVENT_HANGOUTS_CONVERSATIONS_RESOLVED,
EVENT_HANGOUTS_DISCONNECTED,
EVENT_HANGOUTS_MESSAGE_RECEIVED,
INTENT_HELP,
)
from .hangups_utils import HangoutsCredentials, HangoutsRefreshToken
_LOGGER = logging.getLogger(__name__)
class HangoutsBot:
"""The Hangouts Bot."""
def __init__(
self, hass, refresh_token, intents, default_convs, error_suppressed_convs
):
"""Set up the client."""
self.hass = hass
self._connected = False
self._refresh_token = refresh_token
self._intents = intents
self._conversation_intents = None
self._client = None
self._user_list = None
self._conversation_list = None
self._default_convs = default_convs
self._default_conv_ids = None
self._error_suppressed_convs = error_suppressed_convs
self._error_suppressed_conv_ids = None
dispatcher.async_dispatcher_connect(
self.hass,
EVENT_HANGOUTS_MESSAGE_RECEIVED,
self._async_handle_conversation_message,
)
def _resolve_conversation_id(self, obj):
if CONF_CONVERSATION_ID in obj:
return obj[CONF_CONVERSATION_ID]
if CONF_CONVERSATION_NAME in obj:
conv = self._resolve_conversation_name(obj[CONF_CONVERSATION_NAME])
if conv is not None:
return conv.id_
return None
def _resolve_conversation_name(self, name):
for conv in self._conversation_list.get_all():
if conv.name == name:
return conv
return None
@callback
def async_update_conversation_commands(self):
"""Refresh the commands for every conversation."""
self._conversation_intents = {}
for intent_type, data in self._intents.items():
if data.get(CONF_CONVERSATIONS):
conversations = []
for conversation in data.get(CONF_CONVERSATIONS):
conv_id = self._resolve_conversation_id(conversation)
if conv_id is not None:
conversations.append(conv_id)
data[f"_{CONF_CONVERSATIONS}"] = conversations
elif self._default_conv_ids:
data[f"_{CONF_CONVERSATIONS}"] = self._default_conv_ids
else:
data[f"_{CONF_CONVERSATIONS}"] = [
conv.id_ for conv in self._conversation_list.get_all()
]
for conv_id in data[f"_{CONF_CONVERSATIONS}"]:
if conv_id not in self._conversation_intents:
self._conversation_intents[conv_id] = {}
self._conversation_intents[conv_id][intent_type] = data
try:
self._conversation_list.on_event.remove_observer(
self._async_handle_conversation_event
)
except ValueError:
pass
self._conversation_list.on_event.add_observer(
self._async_handle_conversation_event
)
@callback
def async_resolve_conversations(self, _):
"""Resolve the list of default and error suppressed conversations."""
self._default_conv_ids = []
self._error_suppressed_conv_ids = []
for conversation in self._default_convs:
conv_id = self._resolve_conversation_id(conversation)
if conv_id is not None:
self._default_conv_ids.append(conv_id)
for conversation in self._error_suppressed_convs:
conv_id = self._resolve_conversation_id(conversation)
if conv_id is not None:
self._error_suppressed_conv_ids.append(conv_id)
dispatcher.async_dispatcher_send(
self.hass, EVENT_HANGOUTS_CONVERSATIONS_RESOLVED
)
async def _async_handle_conversation_event(self, event):
if isinstance(event, ChatMessageEvent):
dispatcher.async_dispatcher_send(
self.hass,
EVENT_HANGOUTS_MESSAGE_RECEIVED,
event.conversation_id,
event.user_id,
event,
)
async def _async_handle_conversation_message(self, conv_id, user_id, event):
"""Handle a message sent to a conversation."""
user = self._user_list.get_user(user_id)
if user.is_self:
return
message = event.text
_LOGGER.debug("Handling message '%s' from %s", message, user.full_name)
intents = self._conversation_intents.get(conv_id)
if intents is not None:
is_error = False
try:
intent_result = await self._async_process(intents, message, conv_id)
except (intent.UnknownIntent, intent.IntentHandleError) as err:
is_error = True
intent_result = intent.IntentResponse()
intent_result.async_set_speech(str(err))
if intent_result is None:
is_error = True
intent_result = intent.IntentResponse()
intent_result.async_set_speech("Sorry, I didn't understand that")
message = (
intent_result.as_dict().get("speech", {}).get("plain", {}).get("speech")
)
if (message is not None) and not (
is_error and conv_id in self._error_suppressed_conv_ids
):
await self._async_send_message(
[{"text": message, "parse_str": True}],
[{CONF_CONVERSATION_ID: conv_id}],
None,
)
async def _async_process(self, intents, text, conv_id):
"""Detect a matching intent."""
for intent_type, data in intents.items():
for matcher in data.get(CONF_MATCHERS, []):
match = matcher.match(text)
if not match:
continue
if intent_type == INTENT_HELP:
return await self.hass.helpers.intent.async_handle(
DOMAIN, intent_type, {"conv_id": {"value": conv_id}}, text
)
return await self.hass.helpers.intent.async_handle(
DOMAIN,
intent_type,
{key: {"value": value} for key, value in match.groupdict().items()},
text,
)
async def async_connect(self):
"""Login to the Google Hangouts."""
session = await self.hass.async_add_executor_job(
get_auth,
HangoutsCredentials(None, None, None),
HangoutsRefreshToken(self._refresh_token),
)
self._client = Client(session)
self._client.on_connect.add_observer(self._on_connect)
self._client.on_disconnect.add_observer(self._on_disconnect)
self.hass.loop.create_task(self._client.connect())
def _on_connect(self):
_LOGGER.debug("Connected!")
self._connected = True
dispatcher.async_dispatcher_send(self.hass, EVENT_HANGOUTS_CONNECTED)
async def _on_disconnect(self):
"""Handle disconnecting."""
if self._connected:
_LOGGER.debug("Connection lost! Reconnect...")
await self.async_connect()
else:
dispatcher.async_dispatcher_send(self.hass, EVENT_HANGOUTS_DISCONNECTED)
async def async_disconnect(self):
"""Disconnect the client if it is connected."""
if self._connected:
self._connected = False
await self._client.disconnect()
async def async_handle_hass_stop(self, _):
"""Run once when Home Assistant stops."""
await self.async_disconnect()
async def _async_send_message(self, message, targets, data):
conversations = []
for target in targets:
conversation = None
if CONF_CONVERSATION_ID in target:
conversation = self._conversation_list.get(target[CONF_CONVERSATION_ID])
elif CONF_CONVERSATION_NAME in target:
conversation = self._resolve_conversation_name(
target[CONF_CONVERSATION_NAME]
)
if conversation is not None:
conversations.append(conversation)
if not conversations:
return False
messages = []
for segment in message:
if messages:
messages.append(
ChatMessageSegment(
"", segment_type=hangouts_pb2.SEGMENT_TYPE_LINE_BREAK
)
)
if "parse_str" in segment and segment["parse_str"]:
messages.extend(ChatMessageSegment.from_str(segment["text"]))
else:
if "parse_str" in segment:
del segment["parse_str"]
messages.append(ChatMessageSegment(**segment))
image_file = None
if data:
if data.get("image_url"):
uri = data.get("image_url")
try:
websession = async_get_clientsession(self.hass)
async with websession.get(uri, timeout=5) as response:
if response.status != HTTP_OK:
_LOGGER.error(
"Fetch image failed, %s, %s", response.status, response
)
image_file = None
else:
image_data = await response.read()
image_file = io.BytesIO(image_data)
image_file.name = "image.png"
except (asyncio.TimeoutError, aiohttp.ClientError) as error:
_LOGGER.error("Failed to fetch image, %s", type(error))
image_file = None
elif data.get("image_file"):
uri = data.get("image_file")
if self.hass.config.is_allowed_path(uri):
try:
image_file = open(uri, "rb")
except OSError as error:
_LOGGER.error(
"Image file I/O error(%s): %s", error.errno, error.strerror
)
else:
_LOGGER.error('Path "%s" not allowed', uri)
if not messages:
return False
for conv in conversations:
await conv.send_message(messages, image_file)
async def _async_list_conversations(self):
(
self._user_list,
self._conversation_list,
) = await hangups.build_user_conversation_list(self._client)
conversations = {}
for i, conv in enumerate(self._conversation_list.get_all()):
users_in_conversation = []
for user in conv.users:
users_in_conversation.append(user.full_name)
conversations[str(i)] = {
CONF_CONVERSATION_ID: str(conv.id_),
CONF_CONVERSATION_NAME: conv.name,
"users": users_in_conversation,
}
self.hass.states.async_set(
f"{DOMAIN}.conversations",
len(self._conversation_list.get_all()),
attributes=conversations,
)
dispatcher.async_dispatcher_send(
self.hass, EVENT_HANGOUTS_CONVERSATIONS_CHANGED, conversations
)
async def async_handle_send_message(self, service):
"""Handle the send_message service."""
await self._async_send_message(
service.data[ATTR_MESSAGE],
service.data[ATTR_TARGET],
service.data.get(ATTR_DATA, {}),
)
async def async_handle_update_users_and_conversations(self, _=None):
"""Handle the update_users_and_conversations service."""
await self._async_list_conversations()
async def async_handle_reconnect(self, _=None):
"""Handle the reconnect service."""
await self.async_disconnect()
await self.async_connect()
def get_intents(self, conv_id):
"""Return the intents for a specific conversation."""
return self._conversation_intents.get(conv_id)
|
import pandas as pd
def get_p_vals(df, positive_category, term_significance):
'''
Parameters
----------
df : A data frame from, e.g., get_term_freq_df : pd.DataFrame
positive_category : str
The positive category name.
term_significance : TermSignificance
A TermSignificance instance from which to extract p-values.
'''
df_pos = df[[positive_category]]
df_pos.columns = ['pos']
df_neg = pd.DataFrame(df[[c for c in df.columns if
c != positive_category
and c.endswith(' freq')]].sum(axis=1))
df_neg.columns = ['neg']
X = df_pos.join(df_neg)[['pos','neg']].values
return term_significance.get_p_vals(X)
|
from unittest.mock import patch
from homeassistant import setup
from homeassistant.core import callback
from homeassistant.helpers import discovery
from tests.common import (
MockModule,
MockPlatform,
get_test_home_assistant,
mock_coro,
mock_entity_platform,
mock_integration,
)
class TestHelpersDiscovery:
"""Tests for discovery helper methods."""
def setup_method(self, method):
"""Set up things to be run when tests are started."""
self.hass = get_test_home_assistant()
def teardown_method(self, method):
"""Stop everything that was started."""
self.hass.stop()
@patch("homeassistant.setup.async_setup_component", return_value=mock_coro())
def test_listen(self, mock_setup_component):
"""Test discovery listen/discover combo."""
helpers = self.hass.helpers
calls_single = []
calls_multi = []
@callback
def callback_single(service, info):
"""Service discovered callback."""
calls_single.append((service, info))
@callback
def callback_multi(service, info):
"""Service discovered callback."""
calls_multi.append((service, info))
helpers.discovery.listen("test service", callback_single)
helpers.discovery.listen(["test service", "another service"], callback_multi)
helpers.discovery.discover(
"test service", "discovery info", "test_component", {}
)
self.hass.block_till_done()
assert mock_setup_component.called
assert mock_setup_component.call_args[0] == (self.hass, "test_component", {})
assert len(calls_single) == 1
assert calls_single[0] == ("test service", "discovery info")
helpers.discovery.discover(
"another service", "discovery info", "test_component", {}
)
self.hass.block_till_done()
assert len(calls_single) == 1
assert len(calls_multi) == 2
assert ["test service", "another service"] == [info[0] for info in calls_multi]
@patch("homeassistant.setup.async_setup_component", return_value=mock_coro(True))
def test_platform(self, mock_setup_component):
"""Test discover platform method."""
calls = []
@callback
def platform_callback(platform, info):
"""Platform callback method."""
calls.append((platform, info))
discovery.listen_platform(self.hass, "test_component", platform_callback)
discovery.load_platform(
self.hass,
"test_component",
"test_platform",
"discovery info",
{"test_component": {}},
)
self.hass.block_till_done()
assert mock_setup_component.called
assert mock_setup_component.call_args[0] == (
self.hass,
"test_component",
{"test_component": {}},
)
self.hass.block_till_done()
discovery.load_platform(
self.hass,
"test_component_2",
"test_platform",
"discovery info",
{"test_component": {}},
)
self.hass.block_till_done()
assert len(calls) == 1
assert calls[0] == ("test_platform", "discovery info")
self.hass.bus.fire(
discovery.EVENT_PLATFORM_DISCOVERED,
{
discovery.ATTR_SERVICE: discovery.EVENT_LOAD_PLATFORM.format(
"test_component"
)
},
)
self.hass.block_till_done()
assert len(calls) == 1
def test_circular_import(self):
"""Test we don't break doing circular import.
This test will have test_component discover the switch.test_circular
component while setting up.
The supplied config will load test_component and will load
switch.test_circular.
That means that after startup, we will have test_component and switch
setup. The test_circular platform has been loaded twice.
"""
component_calls = []
platform_calls = []
def component_setup(hass, config):
"""Set up mock component."""
discovery.load_platform(hass, "switch", "test_circular", "disc", config)
component_calls.append(1)
return True
def setup_platform(hass, config, add_entities_callback, discovery_info=None):
"""Set up mock platform."""
platform_calls.append("disc" if discovery_info else "component")
mock_integration(self.hass, MockModule("test_component", setup=component_setup))
# dependencies are only set in component level
# since we are using manifest to hold them
mock_integration(
self.hass, MockModule("test_circular", dependencies=["test_component"])
)
mock_entity_platform(
self.hass, "switch.test_circular", MockPlatform(setup_platform)
)
setup.setup_component(
self.hass,
"test_component",
{"test_component": None, "switch": [{"platform": "test_circular"}]},
)
self.hass.block_till_done()
# test_component will only be setup once
assert len(component_calls) == 1
# The platform will be setup once via the config in `setup_component`
# and once via the discovery inside test_component.
assert len(platform_calls) == 2
assert "test_component" in self.hass.config.components
assert "switch" in self.hass.config.components
@patch("homeassistant.helpers.signal.async_register_signal_handling")
def test_1st_discovers_2nd_component(self, mock_signal):
"""Test that we don't break if one component discovers the other.
If the first component fires a discovery event to set up the
second component while the second component is about to be set up,
it should not set up the second component twice.
"""
component_calls = []
def component1_setup(hass, config):
"""Set up mock component."""
print("component1 setup")
discovery.discover(hass, "test_component2", {}, "test_component2", {})
return True
def component2_setup(hass, config):
"""Set up mock component."""
component_calls.append(1)
return True
mock_integration(
self.hass, MockModule("test_component1", setup=component1_setup)
)
mock_integration(
self.hass, MockModule("test_component2", setup=component2_setup)
)
@callback
def do_setup():
"""Set up 2 components."""
self.hass.async_add_job(
setup.async_setup_component(self.hass, "test_component1", {})
)
self.hass.async_add_job(
setup.async_setup_component(self.hass, "test_component2", {})
)
self.hass.add_job(do_setup)
self.hass.block_till_done()
# test_component will only be setup once
assert len(component_calls) == 1
|
import struct
from plumbum.lib import six
LFANEW_OFFSET = 30 * 2
FILE_HEADER_SIZE = 5 * 4
SUBSYSTEM_OFFSET = 17 * 4
IMAGE_SUBSYSTEM_WINDOWS_GUI = 2
IMAGE_SUBSYSTEM_WINDOWS_CUI = 3
def get_pe_subsystem(filename):
with open(filename, "rb") as f:
if f.read(2) != six.b("MZ"):
return None
f.seek(LFANEW_OFFSET)
lfanew = struct.unpack("L", f.read(4))[0]
f.seek(lfanew)
if f.read(4) != six.b("PE\x00\x00"):
return None
f.seek(FILE_HEADER_SIZE + SUBSYSTEM_OFFSET, 1)
subsystem = struct.unpack("H", f.read(2))[0]
return subsystem
# print(get_pe_subsystem("c:\\windows\\notepad.exe")) == 2
# print(get_pe_subsystem("c:\\python32\\python.exe")) == 3
# print(get_pe_subsystem("c:\\python32\\pythonw.exe")) == 2
|
import asyncio
from contextlib import suppress
from datetime import timedelta
import logging
from aiohttp import web
from aiohttp.web_exceptions import HTTPNotFound
import async_timeout
from homeassistant.components.http import HomeAssistantView
from homeassistant.const import HTTP_INTERNAL_SERVER_ERROR
from homeassistant.core import callback
from homeassistant.exceptions import HomeAssistantError
from homeassistant.helpers import config_per_platform, discovery
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.setup import async_prepare_setup_platform
# mypy: allow-untyped-defs, no-check-untyped-defs
_LOGGER = logging.getLogger(__name__)
DOMAIN = "mailbox"
EVENT = "mailbox_updated"
CONTENT_TYPE_MPEG = "audio/mpeg"
CONTENT_TYPE_NONE = "none"
SCAN_INTERVAL = timedelta(seconds=30)
async def async_setup(hass, config):
"""Track states and offer events for mailboxes."""
mailboxes = []
hass.components.frontend.async_register_built_in_panel(
"mailbox", "mailbox", "mdi:mailbox"
)
hass.http.register_view(MailboxPlatformsView(mailboxes))
hass.http.register_view(MailboxMessageView(mailboxes))
hass.http.register_view(MailboxMediaView(mailboxes))
hass.http.register_view(MailboxDeleteView(mailboxes))
async def async_setup_platform(p_type, p_config=None, discovery_info=None):
"""Set up a mailbox platform."""
if p_config is None:
p_config = {}
if discovery_info is None:
discovery_info = {}
platform = await async_prepare_setup_platform(hass, config, DOMAIN, p_type)
if platform is None:
_LOGGER.error("Unknown mailbox platform specified")
return
_LOGGER.info("Setting up %s.%s", DOMAIN, p_type)
mailbox = None
try:
if hasattr(platform, "async_get_handler"):
mailbox = await platform.async_get_handler(
hass, p_config, discovery_info
)
elif hasattr(platform, "get_handler"):
mailbox = await hass.async_add_executor_job(
platform.get_handler, hass, p_config, discovery_info
)
else:
raise HomeAssistantError("Invalid mailbox platform.")
if mailbox is None:
_LOGGER.error("Failed to initialize mailbox platform %s", p_type)
return
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Error setting up platform %s", p_type)
return
mailboxes.append(mailbox)
mailbox_entity = MailboxEntity(mailbox)
component = EntityComponent(
logging.getLogger(__name__), DOMAIN, hass, SCAN_INTERVAL
)
await component.async_add_entities([mailbox_entity])
setup_tasks = [
async_setup_platform(p_type, p_config)
for p_type, p_config in config_per_platform(config, DOMAIN)
]
if setup_tasks:
await asyncio.wait(setup_tasks)
async def async_platform_discovered(platform, info):
"""Handle for discovered platform."""
await async_setup_platform(platform, discovery_info=info)
discovery.async_listen_platform(hass, DOMAIN, async_platform_discovered)
return True
class MailboxEntity(Entity):
"""Entity for each mailbox platform to provide a badge display."""
def __init__(self, mailbox):
"""Initialize mailbox entity."""
self.mailbox = mailbox
self.message_count = 0
async def async_added_to_hass(self):
"""Complete entity initialization."""
@callback
def _mailbox_updated(event):
self.async_schedule_update_ha_state(True)
self.hass.bus.async_listen(EVENT, _mailbox_updated)
self.async_schedule_update_ha_state(True)
@property
def state(self):
"""Return the state of the binary sensor."""
return str(self.message_count)
@property
def name(self):
"""Return the name of the entity."""
return self.mailbox.name
async def async_update(self):
"""Retrieve messages from platform."""
messages = await self.mailbox.async_get_messages()
self.message_count = len(messages)
class Mailbox:
"""Represent a mailbox device."""
def __init__(self, hass, name):
"""Initialize mailbox object."""
self.hass = hass
self.name = name
@callback
def async_update(self):
"""Send event notification of updated mailbox."""
self.hass.bus.async_fire(EVENT)
@property
def media_type(self):
"""Return the supported media type."""
raise NotImplementedError()
@property
def can_delete(self):
"""Return if messages can be deleted."""
return False
@property
def has_media(self):
"""Return if messages have attached media files."""
return False
async def async_get_media(self, msgid):
"""Return the media blob for the msgid."""
raise NotImplementedError()
async def async_get_messages(self):
"""Return a list of the current messages."""
raise NotImplementedError()
async def async_delete(self, msgid):
"""Delete the specified messages."""
raise NotImplementedError()
class StreamError(Exception):
"""Media streaming exception."""
class MailboxView(HomeAssistantView):
"""Base mailbox view."""
def __init__(self, mailboxes):
"""Initialize a basic mailbox view."""
self.mailboxes = mailboxes
def get_mailbox(self, platform):
"""Retrieve the specified mailbox."""
for mailbox in self.mailboxes:
if mailbox.name == platform:
return mailbox
raise HTTPNotFound
class MailboxPlatformsView(MailboxView):
"""View to return the list of mailbox platforms."""
url = "/api/mailbox/platforms"
name = "api:mailbox:platforms"
async def get(self, request: web.Request) -> web.Response:
"""Retrieve list of platforms."""
platforms = []
for mailbox in self.mailboxes:
platforms.append(
{
"name": mailbox.name,
"has_media": mailbox.has_media,
"can_delete": mailbox.can_delete,
}
)
return self.json(platforms)
class MailboxMessageView(MailboxView):
"""View to return the list of messages."""
url = "/api/mailbox/messages/{platform}"
name = "api:mailbox:messages"
async def get(self, request, platform):
"""Retrieve messages."""
mailbox = self.get_mailbox(platform)
messages = await mailbox.async_get_messages()
return self.json(messages)
class MailboxDeleteView(MailboxView):
"""View to delete selected messages."""
url = "/api/mailbox/delete/{platform}/{msgid}"
name = "api:mailbox:delete"
async def delete(self, request, platform, msgid):
"""Delete items."""
mailbox = self.get_mailbox(platform)
await mailbox.async_delete(msgid)
class MailboxMediaView(MailboxView):
"""View to return a media file."""
url = r"/api/mailbox/media/{platform}/{msgid}"
name = "api:asteriskmbox:media"
async def get(self, request, platform, msgid):
"""Retrieve media."""
mailbox = self.get_mailbox(platform)
with suppress(asyncio.CancelledError, asyncio.TimeoutError):
with async_timeout.timeout(10):
try:
stream = await mailbox.async_get_media(msgid)
except StreamError as err:
error_msg = "Error getting media: %s" % (err)
_LOGGER.error(error_msg)
return web.Response(status=HTTP_INTERNAL_SERVER_ERROR)
if stream:
return web.Response(body=stream, content_type=mailbox.media_type)
return web.Response(status=HTTP_INTERNAL_SERVER_ERROR)
|
import datetime
import json
import logging
from absl import flags
from perfkitbenchmarker import capacity_reservation
from perfkitbenchmarker import errors
from perfkitbenchmarker import os_types
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.providers import aws
from perfkitbenchmarker.providers.aws import util
FLAGS = flags.FLAGS
_INSUFFICIENT_CAPACITY = 'InsufficientInstanceCapacity'
class InvalidVmGroupSizeError(Exception):
pass
class UnsupportedOsTypeError(Exception):
pass
class CreationError(Exception):
pass
class AwsCapacityReservation(capacity_reservation.BaseCapacityReservation):
"""An object representing an AWS EC2 CapacityReservation."""
CLOUD = aws.CLOUD
def __init__(self, vm_group):
if not vm_group:
raise InvalidVmGroupSizeError(
'AwsCapacityReservation must be initialized with at least one '
'VM in the vm_group.')
super(AwsCapacityReservation, self).__init__(vm_group)
self.zone_or_region = vm_group[0].zone
self.region = util.GetRegionFromZone(self.zone_or_region)
self.machine_type = vm_group[0].machine_type
self.os_type = vm_group[0].OS_TYPE
self.vm_count = len(vm_group)
def _Create(self):
"""Creates the AWS CapacaityReservation.
A reservation will be created given the VM shape in self.vm_groups.
Count is determined by the number of VMs in said group. The reservation
will have a lifetime determined by the general PKB concept of
timeout_minutes. If the reservation exceeds this timeout, AWS will
cancel it automatically. The VMs in the reservation will not be deleted.
Note that an empty capacity reservation will encur costs for the
VM shape / count, even if no VMs are using it.
After the reservation is created, this method updates all the VMs
in self.vm_groups by setting the capacity_reservation_id, as well
as the zone attributes on the VM, and the VM's network instance.
Raises:
UnsupportedOsTypeError: If creating a capacity reservation for the
given os type is not supported.
CreationError: If a capacity reservation cannot be created in the
region (typically indicates a stockout).
"""
if self.os_type in os_types.LINUX_OS_TYPES:
instance_platform = 'Linux/UNIX'
elif self.os_type in os_types.WINDOWS_OS_TYPES:
instance_platform = 'Windows'
else:
raise UnsupportedOsTypeError(
'Unsupported os_type for AWS CapacityReservation: %s.'
% self.os_type)
# If the user did not specify an AZ, we need to try to create the
# CapacityReservation in a specifc AZ until it succeeds.
# Then update the zone attribute on all the VMs in the group,
# as well as the zone attribute on the VMs' network instance.
if util.IsRegion(self.zone_or_region):
zones_to_try = util.GetZonesInRegion(self.region)
else:
zones_to_try = [self.zone_or_region]
end_date = (
datetime.datetime.utcnow() +
datetime.timedelta(minutes=FLAGS.timeout_minutes))
for zone in zones_to_try:
cmd = util.AWS_PREFIX + [
'ec2',
'create-capacity-reservation',
'--instance-type=%s' % self.machine_type,
'--instance-platform=%s' % instance_platform,
'--availability-zone=%s' % zone,
'--instance-count=%s' % self.vm_count,
'--instance-match-criteria=targeted',
'--region=%s' % self.region,
'--end-date-type=limited',
'--end-date=%s' % end_date.isoformat(),
]
stdout, stderr, retcode = vm_util.IssueCommand(cmd,
raise_on_failure=False)
if retcode:
logging.info('Unable to create CapacityReservation in %s. '
'This may be retried. Details: %s', zone, stderr)
if _INSUFFICIENT_CAPACITY in stderr:
logging.error(util.STOCKOUT_MESSAGE)
raise errors.Benchmarks.InsufficientCapacityCloudFailure(
util.STOCKOUT_MESSAGE + ' CapacityReservation in ' + zone)
continue
json_output = json.loads(stdout)
self.capacity_reservation_id = (
json_output['CapacityReservation']['CapacityReservationId'])
self._UpdateVmsInGroup(self.capacity_reservation_id, zone)
return
raise CreationError('Unable to create CapacityReservation in any of the '
'following zones: %s.' % zones_to_try)
def _Delete(self):
"""Deletes the capacity reservation."""
cmd = util.AWS_PREFIX + [
'ec2',
'cancel-capacity-reservation',
'--capacity-reservation-id=%s' % self.capacity_reservation_id,
'--region=%s' % self.region,
]
vm_util.IssueCommand(cmd, raise_on_failure=False)
def _Exists(self):
"""Returns true if the underlying reservation exists and is active."""
cmd = util.AWS_PREFIX + [
'ec2',
'describe-capacity-reservations',
'--capacity-reservation-id=%s' % self.capacity_reservation_id,
'--region=%s' % self.region,
]
stdout, _, retcode = vm_util.IssueCommand(cmd, raise_on_failure=False)
if retcode != 0:
return False
json_output = json.loads(stdout)
return json_output['CapacityReservations'][0]['State'] == 'active'
def _UpdateVmsInGroup(self, capacity_reservation_id, zone):
"""Updates the VMs in a group with necessary reservation details.
AWS virtual machines need to reference the capacity reservation id
during creation, so it is set on all VMs in the group. Additionally,
this class may determine which zone to run in, so that needs to be
updated too (on the VM, and the VM's network instance).
Args:
capacity_reservation_id: ID of the reservation created by this instance.
zone: Zone chosen by this class, or if it was supplied, the zone
provided by the user. In the latter case, setting the zone is equivalent
to a no-op.
"""
for vm in self.vm_group:
vm.capacity_reservation_id = capacity_reservation_id
vm.zone = zone
vm.network.zone = zone
|
from app import SQLAlchemyDB as db
# 一些公共的方法,仅仅适合单独操作,对于事务操作,还是需要手工写db.session代码
class BaseMethod(object):
__table_args__ = {'mysql_engine': 'MyISAM', 'mysql_charset': 'utf8'}
# insert and update
def save(self):
db.session.add(self)
db.session.commit()
# delete
def delete(self):
db.session.delete(self)
db.session.commit()
|
from collections import UserDict
from copy import deepcopy
from requests.cookies import RequestsCookieJar
import os.path
from httpobs.scanner.utils import parse_http_equiv_headers
def empty_requests(http_equiv_file=None) -> dict:
req = {
'hostname': 'http-observatory.security.mozilla.org',
'resources': {
'__path__': None,
'/': None,
'/clientaccesspolicy.xml': None,
'/contribute.json': None,
'/crossdomain.xml': None,
'/robots.txt': None,
},
'responses': {
'auto': UserDict(),
'cors': None,
'http': None,
'https': None,
},
'session': UserDict(),
}
# Parse the HTML file for its own headers, if requested
if http_equiv_file:
__dirname = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(__dirname, 'unittests', 'files', http_equiv_file), 'r') as f:
html = f.read()
# Load the HTML file into the object for content tests.
req['resources']['__path__'] = html
req['responses']['auto'].headers = {
'Content-Type': 'text/html',
}
req['responses']['auto'].history = []
req['responses']['auto'].request = UserDict()
req['responses']['auto'].request.headers = UserDict()
req['responses']['auto'].status_code = 200
req['responses']['auto'].url = 'https://http-observatory.security.mozilla.org/'
req['responses']['auto'].verified = True
req['session'].cookies = RequestsCookieJar()
req['responses']['cors'] = deepcopy(req['responses']['auto'])
req['responses']['http'] = deepcopy(req['responses']['auto'])
req['responses']['https'] = deepcopy(req['responses']['auto'])
# Parse the HTML file for its own headers, if requested
if http_equiv_file:
req['responses']['auto'].http_equiv = parse_http_equiv_headers(req['resources']['__path__'])
else:
req['responses']['auto'].http_equiv = {}
return req
|
import logging
from miio import DeviceException, WifiRepeater # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_TOKEN
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_TOKEN): vol.All(cv.string, vol.Length(min=32, max=32)),
}
)
def get_scanner(hass, config):
"""Return a Xiaomi MiIO device scanner."""
scanner = None
host = config[DOMAIN][CONF_HOST]
token = config[DOMAIN][CONF_TOKEN]
_LOGGER.info("Initializing with host %s (token %s...)", host, token[:5])
try:
device = WifiRepeater(host, token)
device_info = device.info()
_LOGGER.info(
"%s %s %s detected",
device_info.model,
device_info.firmware_version,
device_info.hardware_version,
)
scanner = XiaomiMiioDeviceScanner(device)
except DeviceException as ex:
_LOGGER.error("Device unavailable or token incorrect: %s", ex)
return scanner
class XiaomiMiioDeviceScanner(DeviceScanner):
"""This class queries a Xiaomi Mi WiFi Repeater."""
def __init__(self, device):
"""Initialize the scanner."""
self.device = device
async def async_scan_devices(self):
"""Scan for devices and return a list containing found device IDs."""
devices = []
try:
station_info = await self.hass.async_add_executor_job(self.device.status)
_LOGGER.debug("Got new station info: %s", station_info)
for device in station_info.associated_stations:
devices.append(device["mac"])
except DeviceException as ex:
_LOGGER.error("Unable to fetch the state: %s", ex)
return devices
async def async_get_device_name(self, device):
"""Return None.
The repeater doesn't provide the name of the associated device.
"""
return None
|
from datetime import timedelta
import logging
import voluptuous as vol
from homeassistant.const import (
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
STATE_ON,
)
from homeassistant.helpers.config_validation import ( # noqa: F401
PLATFORM_SCHEMA,
PLATFORM_SCHEMA_BASE,
)
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.entity_component import EntityComponent
from homeassistant.loader import bind_hass
# mypy: allow-untyped-defs, no-check-untyped-defs
DOMAIN = "switch"
SCAN_INTERVAL = timedelta(seconds=30)
ENTITY_ID_FORMAT = DOMAIN + ".{}"
ATTR_TODAY_ENERGY_KWH = "today_energy_kwh"
ATTR_CURRENT_POWER_W = "current_power_w"
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
PROP_TO_ATTR = {
"current_power_w": ATTR_CURRENT_POWER_W,
"today_energy_kwh": ATTR_TODAY_ENERGY_KWH,
}
DEVICE_CLASS_OUTLET = "outlet"
DEVICE_CLASS_SWITCH = "switch"
DEVICE_CLASSES = [DEVICE_CLASS_OUTLET, DEVICE_CLASS_SWITCH]
DEVICE_CLASSES_SCHEMA = vol.All(vol.Lower, vol.In(DEVICE_CLASSES))
_LOGGER = logging.getLogger(__name__)
@bind_hass
def is_on(hass, entity_id):
"""Return if the switch is on based on the statemachine.
Async friendly.
"""
return hass.states.is_state(entity_id, STATE_ON)
async def async_setup(hass, config):
"""Track states and offer events for switches."""
component = hass.data[DOMAIN] = EntityComponent(
_LOGGER, DOMAIN, hass, SCAN_INTERVAL
)
await component.async_setup(config)
component.async_register_entity_service(SERVICE_TURN_OFF, {}, "async_turn_off")
component.async_register_entity_service(SERVICE_TURN_ON, {}, "async_turn_on")
component.async_register_entity_service(SERVICE_TOGGLE, {}, "async_toggle")
return True
async def async_setup_entry(hass, entry):
"""Set up a config entry."""
return await hass.data[DOMAIN].async_setup_entry(entry)
async def async_unload_entry(hass, entry):
"""Unload a config entry."""
return await hass.data[DOMAIN].async_unload_entry(entry)
class SwitchEntity(ToggleEntity):
"""Representation of a switch."""
@property
def current_power_w(self):
"""Return the current power usage in W."""
return None
@property
def today_energy_kwh(self):
"""Return the today total energy usage in kWh."""
return None
@property
def is_standby(self):
"""Return true if device is in standby."""
return None
@property
def state_attributes(self):
"""Return the optional state attributes."""
data = {}
for prop, attr in PROP_TO_ATTR.items():
value = getattr(self, prop)
if value is not None:
data[attr] = value
return data
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
return None
class SwitchDevice(SwitchEntity):
"""Representation of a switch (for backwards compatibility)."""
def __init_subclass__(cls, **kwargs):
"""Print deprecation warning."""
super().__init_subclass__(**kwargs)
_LOGGER.warning(
"SwitchDevice is deprecated, modify %s to extend SwitchEntity",
cls.__name__,
)
|
from babelfish import LanguageReverseConverter
from ..exceptions import ConfigurationError
class TheSubDBConverter(LanguageReverseConverter):
def __init__(self):
self.from_thesubdb = {'en': ('eng',), 'es': ('spa',), 'fr': ('fra',), 'it': ('ita',), 'nl': ('nld',),
'pl': ('pol',), 'pt': ('por', 'BR'), 'ro': ('ron',), 'sv': ('swe',), 'tr': ('tur',)}
self.to_thesubdb = {v: k for k, v in self.from_thesubdb.items()}
self.codes = set(self.from_thesubdb.keys())
def convert(self, alpha3, country=None, script=None):
if (alpha3, country) in self.to_thesubdb:
return self.to_thesubdb[(alpha3, country)]
if (alpha3,) in self.to_thesubdb:
return self.to_thesubdb[(alpha3,)]
raise ConfigurationError('Unsupported language for thesubdb: %s, %s, %s' % (alpha3, country, script))
def reverse(self, thesubdb):
if thesubdb in self.from_thesubdb:
return self.from_thesubdb[thesubdb]
raise ConfigurationError('Unsupported language code for thesubdb: %s' % thesubdb)
|
from gi.repository import Gdk, GObject, Gtk
class DiffGrid(Gtk.Grid):
__gtype_name__ = "DiffGrid"
column_count = 10
handle_columns = (2, 6)
def __init__(self):
super().__init__()
self._in_drag = False
self._drag_pos = -1
self._drag_handle = None
self._handle1 = HandleWindow()
self._handle2 = HandleWindow()
def do_realize(self):
Gtk.Grid.do_realize(self)
self._handle1.realize(self)
self._handle2.realize(self)
def do_unrealize(self):
self._handle1.unrealize()
self._handle2.unrealize()
Gtk.Grid.do_unrealize(self)
def do_map(self):
Gtk.Grid.do_map(self)
drag = self.get_child_at(2, 0)
self._handle1.set_visible(drag and drag.get_visible())
drag = self.get_child_at(6, 0)
self._handle2.set_visible(drag and drag.get_visible())
def do_unmap(self):
self._handle1.set_visible(False)
self._handle2.set_visible(False)
Gtk.Grid.do_unmap(self)
def _handle_set_prelight(self, window, flag):
if hasattr(window, "handle"):
window.handle.set_prelight(flag)
def do_enter_notify_event(self, event):
if hasattr(event.window, "handle"):
event.window.handle.set_prelight(True)
def do_leave_notify_event(self, event):
if self._in_drag:
return
if hasattr(event.window, "handle"):
event.window.handle.set_prelight(False)
def do_button_press_event(self, event):
if event.button & Gdk.BUTTON_PRIMARY:
self._drag_pos = event.x
self._in_drag = True
return True
return False
def do_button_release_event(self, event):
if event.button & Gdk.BUTTON_PRIMARY:
self._in_drag = False
return True
return False
def do_motion_notify_event(self, event):
if event.state & Gdk.ModifierType.BUTTON1_MASK:
if hasattr(event.window, "handle"):
x, y = event.window.get_position()
pos = round(x + event.x - self._drag_pos)
event.window.handle.set_position(pos)
self._drag_handle = event.window.handle
self.queue_resize_no_redraw()
return True
return False
def _calculate_positions(
self, xmin, xmax, pane_sep_width_1, pane_sep_width_2,
wpane1, wpane2, wpane3):
wremain = max(0, xmax - xmin - pane_sep_width_1 - pane_sep_width_2)
pos1 = self._handle1.get_position(wremain, xmin)
pos2 = self._handle2.get_position(wremain, xmin + pane_sep_width_1)
if not self._drag_handle:
npanes = 0
if wpane1 > 0:
npanes += 1
if wpane2 > 0:
npanes += 1
if wpane3 > 0:
npanes += 1
wpane = float(wremain) / max(1, npanes)
if wpane1 > 0:
wpane1 = wpane
if wpane2 > 0:
wpane2 = wpane
if wpane3 > 0:
wpane3 = wpane
xminlink1 = xmin + wpane1
xmaxlink2 = xmax - wpane3 - pane_sep_width_2
wlinkpane = pane_sep_width_1 + wpane2
if wpane1 == 0:
pos1 = xminlink1
if wpane3 == 0:
pos2 = xmaxlink2
if wpane2 == 0:
if wpane3 == 0:
pos1 = pos2 - pane_sep_width_2
else:
pos2 = pos1 + pane_sep_width_1
if self._drag_handle == self._handle2:
xminlink2 = xminlink1 + wlinkpane
pos2 = min(max(xminlink2, pos2), xmaxlink2)
xmaxlink1 = pos2 - wlinkpane
pos1 = min(max(xminlink1, pos1), xmaxlink1)
else:
xmaxlink1 = xmaxlink2 - wlinkpane
pos1 = min(max(xminlink1, pos1), xmaxlink1)
xminlink2 = pos1 + wlinkpane
pos2 = min(max(xminlink2, pos2), xmaxlink2)
self._handle1.set_position(pos1)
self._handle2.set_position(pos2)
return int(round(pos1)), int(round(pos2))
def do_size_allocate(self, allocation):
# We should be chaining up here to:
# Gtk.Grid.do_size_allocate(self, allocation)
# However, when we do this, we hit issues with doing multiple
# allocations in a single allocation cycle (see bgo#779883).
self.set_allocation(allocation)
wcols, hrows = self._get_min_sizes()
yrows = [allocation.y,
allocation.y + hrows[0],
# Roughly equivalent to hard-coding row 1 to expand=True
allocation.y + (allocation.height - hrows[2] - hrows[3]),
allocation.y + (allocation.height - hrows[3]),
allocation.y + allocation.height]
(wpane1, wgutter1, wlink1, wgutter2, wpane2, wgutter3, wlink2,
wgutter4, wpane3, wmap) = wcols
xmin = allocation.x
xmax = allocation.x + allocation.width - wmap
pane_sep_width_1 = wgutter1 + wlink1 + wgutter2
pane_sep_width_2 = wgutter3 + wlink2 + wgutter4
pos1, pos2 = self._calculate_positions(
xmin, xmax, pane_sep_width_1, pane_sep_width_2,
wpane1, wpane2, wpane3
)
wpane1 = pos1 - allocation.x
wpane2 = pos2 - (pos1 + pane_sep_width_1)
wpane3 = xmax - (pos2 + pane_sep_width_2)
wcols = (
allocation.x, wpane1, wgutter1, wlink1, wgutter2, wpane2,
wgutter3, wlink2, wgutter4, wpane3, wmap)
columns = [sum(wcols[:i + 1]) for i in range(len(wcols))]
def child_allocate(child):
if not child.get_visible():
return
left, top, width, height = self.child_get(
child, 'left-attach', 'top-attach', 'width', 'height')
# This is a copy, and we have to do this because there's no Python
# access to Gtk.Allocation.
child_alloc = self.get_allocation()
child_alloc.x = columns[left]
child_alloc.y = yrows[top]
child_alloc.width = columns[left + width] - columns[left]
child_alloc.height = yrows[top + height] - yrows[top]
if self.get_direction() == Gtk.TextDirection.RTL:
child_alloc.x = (
allocation.x + allocation.width -
(child_alloc.x - allocation.x) - child_alloc.width)
child.size_allocate(child_alloc)
for child in self.get_children():
child_allocate(child)
if self.get_realized():
mapped = self.get_mapped()
ydrag = yrows[0]
hdrag = yrows[1] - yrows[0]
self._handle1.set_visible(mapped and pane_sep_width_1 > 0)
self._handle1.move_resize(pos1, ydrag, pane_sep_width_1, hdrag)
self._handle2.set_visible(mapped and pane_sep_width_2 > 0)
self._handle2.move_resize(pos2, ydrag, pane_sep_width_2, hdrag)
def _get_min_sizes(self):
hrows = [0] * 4
wcols = [0] * self.column_count
for row in range(4):
for col in range(self.column_count):
child = self.get_child_at(col, row)
if child and child.get_visible():
msize, nsize = child.get_preferred_size()
# Ignore spanning columns in width calculations; we should
# do this properly, but it's difficult.
spanning = GObject.Value(int)
self.child_get_property(child, 'width', spanning)
spanning = spanning.get_int()
# We ignore natural size when calculating required
# width, but use it when doing required height. The
# logic here is that height-for-width means that
# minimum width requisitions mean more-than-minimum
# heights. This is all extremely dodgy, but works
# for now.
if spanning == 1:
wcols[col] = max(wcols[col], msize.width)
hrows[row] = max(hrows[row], msize.height, nsize.height)
return wcols, hrows
def do_draw(self, context):
Gtk.Grid.do_draw(self, context)
self._handle1.draw(context)
self._handle2.draw(context)
class HandleWindow():
# We restrict the handle width because render_handle doesn't pay
# attention to orientation.
handle_width = 10
def __init__(self):
self._widget = None
self._window = None
self._area_x = -1
self._area_y = -1
self._area_width = 1
self._area_height = 1
self._prelit = False
self._pos = 0.0
self._transform = (0, 0)
def get_position(self, width, xtrans):
self._transform = (width, xtrans)
return float(self._pos * width) + xtrans
def set_position(self, pos):
width, xtrans = self._transform
self._pos = float(pos - xtrans) / width
def realize(self, widget):
attr = Gdk.WindowAttr()
attr.window_type = Gdk.WindowType.CHILD
attr.x = self._area_x
attr.y = self._area_y
attr.width = self._area_width
attr.height = self._area_height
attr.wclass = Gdk.WindowWindowClass.INPUT_OUTPUT
attr.event_mask = (widget.get_events() |
Gdk.EventMask.BUTTON_PRESS_MASK |
Gdk.EventMask.BUTTON_RELEASE_MASK |
Gdk.EventMask.ENTER_NOTIFY_MASK |
Gdk.EventMask.LEAVE_NOTIFY_MASK |
Gdk.EventMask.POINTER_MOTION_MASK)
attr.cursor = Gdk.Cursor.new_for_display(widget.get_display(),
Gdk.CursorType.
SB_H_DOUBLE_ARROW)
attr_mask = (Gdk.WindowAttributesType.X |
Gdk.WindowAttributesType.Y |
Gdk.WindowAttributesType.CURSOR)
parent = widget.get_parent_window()
self._window = Gdk.Window(parent, attr, attr_mask)
self._window.handle = self
self._widget = widget
self._widget.register_window(self._window)
def unrealize(self):
self._widget.unregister_window(self._window)
def set_visible(self, visible):
if visible:
self._window.show()
else:
self._window.hide()
def move_resize(self, x, y, width, height):
self._window.move_resize(x, y, width, height)
self._area_x = x
self._area_y = y
self._area_width = width
self._area_height = height
def set_prelight(self, flag):
self._prelit = flag
self._widget.queue_draw_area(self._area_x, self._area_y,
self._area_width, self._area_height)
def draw(self, cairocontext):
alloc = self._widget.get_allocation()
padding = 5
x = self._area_x - alloc.x + padding
y = self._area_y - alloc.y + padding
width = max(0, self._area_width - 2 * padding)
height = max(0, self._area_height - 2 * padding)
if width == 0 or height == 0:
return
stylecontext = self._widget.get_style_context()
state = stylecontext.get_state()
if self._widget.is_focus():
state |= Gtk.StateFlags.SELECTED
if self._prelit:
state |= Gtk.StateFlags.PRELIGHT
if Gtk.cairo_should_draw_window(cairocontext, self._window):
stylecontext.save()
stylecontext.set_state(state)
stylecontext.add_class(Gtk.STYLE_CLASS_PANE_SEPARATOR)
stylecontext.add_class(Gtk.STYLE_CLASS_VERTICAL)
color = stylecontext.get_background_color(state)
if color.alpha > 0.0:
xcenter = x + width / 2.0 - self.handle_width / 2.0
Gtk.render_handle(
stylecontext, cairocontext,
xcenter, y, self.handle_width, height)
else:
xcenter = x + width / 2.0
Gtk.render_line(stylecontext, cairocontext,
xcenter, y, xcenter, y + height)
stylecontext.restore()
|
import json
from pushbullet import PushBullet
import pytest
import homeassistant.components.notify as notify
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import assert_setup_component, load_fixture
@pytest.fixture
def mock_pushbullet():
"""Mock pushbullet."""
with patch.object(
PushBullet,
"_get_data",
return_value=json.loads(load_fixture("pushbullet_devices.json")),
):
yield
async def test_pushbullet_config(hass, mock_pushbullet):
"""Test setup."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
async def test_pushbullet_config_bad(hass):
"""Test set up the platform with bad/missing configuration."""
config = {notify.DOMAIN: {"platform": "pushbullet"}}
with assert_setup_component(0) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert not handle_config[notify.DOMAIN]
async def test_pushbullet_push_default(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {"title": "Test Title", "message": "Test Message"}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
expected_body = {"body": "Test Message", "title": "Test Title", "type": "note"}
assert requests_mock.last_request.json() == expected_body
async def test_pushbullet_push_device(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.last_request.json() == expected_body
async def test_pushbullet_push_devices(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "device/My iPhone"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 2
assert len(requests_mock.request_history) == 2
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
expected_body = {
"body": "Test Message",
"device_iden": "identity2",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[1].json() == expected_body
async def test_pushbullet_push_email(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["email/[email protected]"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 1
assert len(requests_mock.request_history) == 1
expected_body = {
"body": "Test Message",
"email": "[email protected]",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
async def test_pushbullet_push_mixed(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "email/[email protected]"],
}
await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
assert requests_mock.called
assert requests_mock.call_count == 2
assert len(requests_mock.request_history) == 2
expected_body = {
"body": "Test Message",
"device_iden": "identity1",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[0].json() == expected_body
expected_body = {
"body": "Test Message",
"email": "[email protected]",
"title": "Test Title",
"type": "note",
}
assert requests_mock.request_history[1].json() == expected_body
async def test_pushbullet_push_no_file(hass, requests_mock, mock_pushbullet):
"""Test pushbullet push to default target."""
config = {
notify.DOMAIN: {
"name": "test",
"platform": "pushbullet",
"api_key": "MYFAKEKEY",
}
}
with assert_setup_component(1) as handle_config:
assert await async_setup_component(hass, notify.DOMAIN, config)
await hass.async_block_till_done()
assert handle_config[notify.DOMAIN]
requests_mock.register_uri(
"POST",
"https://api.pushbullet.com/v2/pushes",
status_code=200,
json={"mock_response": "Ok"},
)
data = {
"title": "Test Title",
"message": "Test Message",
"target": ["device/DESKTOP", "device/My iPhone"],
"data": {"file": "not_a_file"},
}
assert not await hass.services.async_call(notify.DOMAIN, "test", data)
await hass.async_block_till_done()
|
from perfkitbenchmarker import errors
RHEL_REPO = ('[nginx]\n'
'name=nginx repo\n'
'baseurl=https://nginx.org/packages/rhel/$releasever/$basearch/\n'
'gpgcheck=0\n'
'enabled=1')
def YumInstall(vm):
"""Installs nginx on the VM."""
vm.RemoteCommand('echo \'%s\' | '
'sudo tee /etc/yum.repos.d/nginx.repo' % RHEL_REPO)
try:
vm.InstallPackages('nginx')
except errors.VmUtil.SshConnectionError:
# Amazon Linux does not have a releasever configured.
vm.RemoteCommand('sudo sed -i -e "s/\\$releasever/6/" '
'/etc/yum.repos.d/nginx.repo')
vm.InstallPackages('nginx')
def AptInstall(vm):
"""Installs nginx on the VM."""
vm.InstallPackages('nginx')
|
import unittest
import numpy as np
from chainer import testing
from chainercv.transforms import translate_bbox
from chainercv.utils.testing.generate_random_bbox import generate_random_bbox
class TestTranslateBbox(unittest.TestCase):
def test_translate_bbox(self):
size = (32, 24)
y_offset, x_offset = 5, 3
bbox = generate_random_bbox(10, size, 0, min(size))
out = translate_bbox(bbox, y_offset=y_offset, x_offset=x_offset)
bbox_expected = np.empty_like(bbox)
bbox_expected[:, 0] = bbox[:, 0] + y_offset
bbox_expected[:, 1] = bbox[:, 1] + x_offset
bbox_expected[:, 2] = bbox[:, 2] + y_offset
bbox_expected[:, 3] = bbox[:, 3] + x_offset
np.testing.assert_equal(out, bbox_expected)
testing.run_module(__name__, __file__)
|
from mne.io.utils import _check_orig_units
def test_check_orig_units():
"""Test the checking of original units."""
orig_units = dict(FC1='nV', Hfp3erz='n/a', Pz='uV', greekMu='μV',
microSign='µV')
orig_units = _check_orig_units(orig_units)
assert orig_units['FC1'] == 'nV'
assert orig_units['Hfp3erz'] == 'n/a'
assert orig_units['Pz'] == 'µV'
assert orig_units['greekMu'] == 'µV'
assert orig_units['microSign'] == 'µV'
|
from qstrader.asset.asset import Asset
class Equity(Asset):
"""
Stores meta data about an equity common stock or ETF.
Parameters
----------
name : `str`
The asset's name (e.g. the company name and/or
share class).
symbol : `str`
The asset's original ticker symbol.
TODO: This will require modification to handle proper
ticker mapping.
tax_exempt: `boolean`, optional
Is the share exempt from government taxation?
Necessary for taxation on share transactions, such
as UK stamp duty.
"""
def __init__(
self,
name,
symbol,
tax_exempt=True
):
self.cash_like = False
self.name = name
self.symbol = symbol
self.tax_exempt = tax_exempt
def __repr__(self):
"""
String representation of the Equity Asset.
"""
return (
"Equity(name='%s', symbol='%s', tax_exempt=%s)" % (
self.name, self.symbol, self.tax_exempt
)
)
|
from weblate.billing.models import Billing
from weblate.billing.tasks import billing_notify
from weblate.utils.management.base import BaseCommand
class Command(BaseCommand):
"""Command for billing check."""
help = "checks billing limits"
def add_arguments(self, parser):
parser.add_argument("--valid", action="store_true", help="list valid ones")
parser.add_argument(
"--notify", action="store_true", help="send email notifications"
)
def handle(self, *args, **options):
if options["notify"]:
billing_notify()
return
Billing.objects.check_limits()
if options["valid"]:
for bill in Billing.objects.get_valid():
self.stdout.write(f" * {bill}")
return
limit = Billing.objects.get_out_of_limits()
due = Billing.objects.get_unpaid()
if limit:
self.stdout.write("Following billings are over limit:")
for bill in limit:
self.stdout.write(f" * {bill}")
if due:
self.stdout.write("Following billings are past due date:")
for bill in due:
self.stdout.write(f" * {bill}")
|
import os
from mimetypes import guess_type
from urllib.parse import urljoin
from bs4 import BeautifulSoup
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import get_object_or_404
from django.template.defaultfilters import slugify
from django.urls import NoReverseMatch
from django.urls import reverse
from django.utils.encoding import smart_str
from django.utils.feedgenerator import Atom1Feed
from django.utils.translation import gettext as _
import django_comments as comments
from tagging.models import Tag
from tagging.models import TaggedItem
from zinnia.models.author import Author
from zinnia.models.entry import Entry
from zinnia.settings import COPYRIGHT
from zinnia.settings import FEEDS_FORMAT
from zinnia.settings import FEEDS_MAX_ITEMS
from zinnia.settings import PROTOCOL
from zinnia.templatetags.zinnia import get_gravatar
from zinnia.views.categories import get_category_or_404
class ZinniaFeed(Feed):
"""
Base Feed class for the Zinnia application,
enriched for a more convenient usage.
"""
protocol = PROTOCOL
feed_copyright = COPYRIGHT
feed_format = FEEDS_FORMAT
limit = FEEDS_MAX_ITEMS
def __init__(self):
if self.feed_format == 'atom':
self.feed_type = Atom1Feed
self.subtitle = getattr(self, 'description', None)
def title(self, obj=None):
"""
Title of the feed prefixed with the site name.
"""
return '%s - %s' % (self.site.name, self.get_title(obj))
def get_title(self, obj):
raise NotImplementedError
@property
def site(self):
"""
Acquire the current site used.
"""
return Site.objects.get_current()
@property
def site_url(self):
"""
Return the URL of the current site.
"""
return '%s://%s' % (self.protocol, self.site.domain)
class EntryFeed(ZinniaFeed):
"""
Base Entry Feed.
"""
title_template = 'feeds/entry_title.html'
description_template = 'feeds/entry_description.html'
def item_pubdate(self, item):
"""
Publication date of an entry.
"""
return item.publication_date
def item_updateddate(self, item):
"""
Update date of an entry.
"""
return item.last_update
def item_categories(self, item):
"""
Entry's categories.
"""
return [category.title for category in item.categories.all()]
def item_author_name(self, item):
"""
Return the first author of an entry.
"""
if item.authors.count():
self.item_author = item.authors.all()[0]
return self.item_author.__str__()
def item_author_email(self, item):
"""
Return the first author's email.
Should not be called if self.item_author_name has returned None.
"""
return self.item_author.email
def item_author_link(self, item):
"""
Return the author's URL.
Should not be called if self.item_author_name has returned None.
"""
try:
author_url = self.item_author.get_absolute_url()
return self.site_url + author_url
except NoReverseMatch:
return self.site_url
def item_enclosure_url(self, item):
"""
Return an image for enclosure.
"""
try:
url = item.image.url
except (AttributeError, ValueError):
img = BeautifulSoup(item.html_content, 'html.parser').find('img')
url = img.get('src') if img else None
self.cached_enclosure_url = url
if url:
url = urljoin(self.site_url, url)
if self.feed_format == 'rss':
url = url.replace('https://', 'http://')
return url
def item_enclosure_length(self, item):
"""
Try to obtain the size of the enclosure if it's present on the FS,
otherwise returns an hardcoded value.
Note: this method is only called if item_enclosure_url
has returned something.
"""
try:
return str(item.image.size)
except (AttributeError, ValueError, os.error):
pass
return '100000'
def item_enclosure_mime_type(self, item):
"""
Guess the enclosure's mimetype.
Note: this method is only called if item_enclosure_url
has returned something.
"""
mime_type, encoding = guess_type(self.cached_enclosure_url)
if mime_type:
return mime_type
return 'image/jpeg'
class LastEntries(EntryFeed):
"""
Feed for the last entries.
"""
def link(self):
"""
URL of last entries.
"""
return reverse('zinnia:entry_archive_index')
def items(self):
"""
Items are published entries.
"""
return Entry.published.all()[:self.limit]
def get_title(self, obj):
"""
Title of the feed
"""
return _('Last entries')
def description(self):
"""
Description of the feed.
"""
return _('The last entries on the site %(object)s') % {
'object': self.site.name}
class CategoryEntries(EntryFeed):
"""
Feed filtered by a category.
"""
def get_object(self, request, path):
"""
Retrieve the category by his path.
"""
return get_category_or_404(path)
def items(self, obj):
"""
Items are the published entries of the category.
"""
return obj.entries_published()[:self.limit]
def link(self, obj):
"""
URL of the category.
"""
return obj.get_absolute_url()
def get_title(self, obj):
"""
Title of the feed.
"""
return _('Entries for the category %(object)s') % {'object': obj.title}
def description(self, obj):
"""
Description of the feed.
"""
return (obj.description or
_('The last entries categorized under %(object)s') % {
'object': obj.title})
class AuthorEntries(EntryFeed):
"""
Feed filtered by an author.
"""
def get_object(self, request, username):
"""
Retrieve the author by his username.
"""
return get_object_or_404(Author, **{Author.USERNAME_FIELD: username})
def items(self, obj):
"""
Items are the published entries of the author.
"""
return obj.entries_published()[:self.limit]
def link(self, obj):
"""
URL of the author.
"""
return obj.get_absolute_url()
def get_title(self, obj):
"""
Title of the feed.
"""
return _('Entries for the author %(object)s') % {
'object': smart_str(obj.__str__())}
def description(self, obj):
"""
Description of the feed.
"""
return _('The last entries by %(object)s') % {
'object': smart_str(obj.__str__())}
class TagEntries(EntryFeed):
"""
Feed filtered by a tag.
"""
def get_object(self, request, tag):
"""
Retrieve the tag by his name.
"""
return get_object_or_404(Tag, name=tag)
def items(self, obj):
"""
Items are the published entries of the tag.
"""
return TaggedItem.objects.get_by_model(
Entry.published.all(), obj)[:self.limit]
def link(self, obj):
"""
URL of the tag.
"""
return reverse('zinnia:tag_detail', args=[obj.name])
def get_title(self, obj):
"""
Title of the feed.
"""
return _('Entries for the tag %(object)s') % {'object': obj.name}
def description(self, obj):
"""
Description of the feed.
"""
return _('The last entries tagged with %(object)s') % {
'object': obj.name}
class SearchEntries(EntryFeed):
"""
Feed filtered by a search pattern.
"""
def get_object(self, request):
"""
The GET parameter 'pattern' is the object.
"""
pattern = request.GET.get('pattern', '')
if len(pattern) < 3:
raise ObjectDoesNotExist
return pattern
def items(self, obj):
"""
Items are the published entries founds.
"""
return Entry.published.search(obj)[:self.limit]
def link(self, obj):
"""
URL of the search request.
"""
return '%s?pattern=%s' % (reverse('zinnia:entry_search'), obj)
def get_title(self, obj):
"""
Title of the feed.
"""
return _("Search results for '%(pattern)s'") % {'pattern': obj}
def description(self, obj):
"""
Description of the feed.
"""
return _("The last entries containing the pattern '%(pattern)s'") % {
'pattern': obj}
class DiscussionFeed(ZinniaFeed):
"""
Base class for discussion Feed.
"""
title_template = 'feeds/discussion_title.html'
description_template = 'feeds/discussion_description.html'
def item_pubdate(self, item):
"""
Publication date of a discussion.
"""
return item.submit_date
def item_link(self, item):
"""
URL of the discussion item.
"""
return item.get_absolute_url()
def item_author_name(self, item):
"""
Author of the discussion item.
"""
return item.name
def item_author_email(self, item):
"""
Author's email of the discussion item.
"""
return item.email
def item_author_link(self, item):
"""
Author's URL of the discussion.
"""
return item.url
class LastDiscussions(DiscussionFeed):
"""
Feed for the last discussions.
"""
def items(self):
"""
Items are the discussions on the entries.
"""
content_type = ContentType.objects.get_for_model(Entry)
return comments.get_model().objects.filter(
content_type=content_type, is_public=True).order_by(
'-submit_date')[:self.limit]
def link(self):
"""
URL of last discussions.
"""
return reverse('zinnia:entry_archive_index')
def get_title(self, obj):
"""
Title of the feed.
"""
return _('Last discussions')
def description(self):
"""
Description of the feed.
"""
return _('The last discussions on the site %(object)s') % {
'object': self.site.name}
class EntryDiscussions(DiscussionFeed):
"""
Feed for discussions on an entry.
"""
def get_object(self, request, year, month, day, slug):
"""
Retrieve the discussions by entry's slug.
"""
return get_object_or_404(Entry, slug=slug,
publication_date__year=year,
publication_date__month=month,
publication_date__day=day)
def items(self, obj):
"""
Items are the discussions on the entry.
"""
return obj.discussions[:self.limit]
def link(self, obj):
"""
URL of the entry.
"""
return obj.get_absolute_url()
def get_title(self, obj):
"""
Title of the feed.
"""
return _('Discussions on %(object)s') % {'object': obj.title}
def description(self, obj):
"""
Description of the feed.
"""
return _('The last discussions on the entry %(object)s') % {
'object': obj.title}
class EntryComments(EntryDiscussions):
"""
Feed for comments on an entry.
"""
title_template = 'feeds/comment_title.html'
description_template = 'feeds/comment_description.html'
def items(self, obj):
"""
Items are the comments on the entry.
"""
return obj.comments[:self.limit]
def item_link(self, item):
"""
URL of the comment.
"""
return item.get_absolute_url('#comment-%(id)s-by-'
) + slugify(item.user_name)
def get_title(self, obj):
"""
Title of the feed.
"""
return _('Comments on %(object)s') % {'object': obj.title}
def description(self, obj):
"""
Description of the feed.
"""
return _('The last comments on the entry %(object)s') % {
'object': obj.title}
def item_enclosure_url(self, item):
"""
Return a gravatar image for enclosure.
"""
return get_gravatar(item.email)
def item_enclosure_length(self, item):
"""
Hardcoded enclosure length.
"""
return '100000'
def item_enclosure_mime_type(self, item):
"""
Hardcoded enclosure mimetype.
"""
return 'image/jpeg'
class EntryPingbacks(EntryDiscussions):
"""
Feed for pingbacks on an entry.
"""
title_template = 'feeds/pingback_title.html'
description_template = 'feeds/pingback_description.html'
def items(self, obj):
"""
Items are the pingbacks on the entry.
"""
return obj.pingbacks[:self.limit]
def item_link(self, item):
"""
URL of the pingback.
"""
return item.get_absolute_url('#pingback-%(id)s')
def get_title(self, obj):
"""
Title of the feed.
"""
return _('Pingbacks on %(object)s') % {'object': obj.title}
def description(self, obj):
"""
Description of the feed.
"""
return _('The last pingbacks on the entry %(object)s') % {
'object': obj.title}
class EntryTrackbacks(EntryDiscussions):
"""
Feed for trackbacks on an entry.
"""
title_template = 'feeds/trackback_title.html'
description_template = 'feeds/trackback_description.html'
def items(self, obj):
"""
Items are the trackbacks on the entry.
"""
return obj.trackbacks[:self.limit]
def item_link(self, item):
"""
URL of the trackback.
"""
return item.get_absolute_url('#trackback-%(id)s')
def get_title(self, obj):
"""
Title of the feed.
"""
return _('Trackbacks on %(object)s') % {'object': obj.title}
def description(self, obj):
"""
Description of the feed.
"""
return _('The last trackbacks on the entry %(object)s') % {
'object': obj.title}
|
from unittest.mock import Mock, patch
from homeassistant.components import unifi
from homeassistant.components.unifi.const import DOMAIN as UNIFI_DOMAIN
from homeassistant.setup import async_setup_component
from .test_controller import setup_unifi_integration
from tests.async_mock import AsyncMock
from tests.common import MockConfigEntry, mock_coro
async def test_setup_with_no_config(hass):
"""Test that we do not discover anything or try to set up a bridge."""
assert await async_setup_component(hass, UNIFI_DOMAIN, {}) is True
assert UNIFI_DOMAIN not in hass.data
async def test_successful_config_entry(hass):
"""Test that configured options for a host are loaded via config entry."""
await setup_unifi_integration(hass)
assert hass.data[UNIFI_DOMAIN]
async def test_controller_fail_setup(hass):
"""Test that a failed setup still stores controller."""
with patch("homeassistant.components.unifi.UniFiController") as mock_controller:
mock_controller.return_value.async_setup = AsyncMock(return_value=False)
await setup_unifi_integration(hass)
assert hass.data[UNIFI_DOMAIN] == {}
async def test_controller_no_mac(hass):
"""Test that configured options for a host are loaded via config entry."""
entry = MockConfigEntry(
domain=UNIFI_DOMAIN,
data={
"controller": {
"host": "0.0.0.0",
"username": "user",
"password": "pass",
"port": 80,
"site": "default",
"verify_ssl": True,
},
"poe_control": True,
},
)
entry.add_to_hass(hass)
mock_registry = Mock()
with patch(
"homeassistant.components.unifi.UniFiController"
) as mock_controller, patch(
"homeassistant.helpers.device_registry.async_get_registry",
return_value=mock_coro(mock_registry),
):
mock_controller.return_value.async_setup = AsyncMock(return_value=True)
mock_controller.return_value.mac = None
assert await unifi.async_setup_entry(hass, entry) is True
assert len(mock_controller.mock_calls) == 2
assert len(mock_registry.mock_calls) == 0
async def test_unload_entry(hass):
"""Test being able to unload an entry."""
controller = await setup_unifi_integration(hass)
assert hass.data[UNIFI_DOMAIN]
assert await unifi.async_unload_entry(hass, controller.config_entry)
assert not hass.data[UNIFI_DOMAIN]
|
from logilab.common.testlib import TestCase, unittest_main
from logilab.common.tree import *
tree = ('root', (
('child_1_1', (
('child_2_1', ()), ('child_2_2', (
('child_3_1', ()),
)))),
('child_1_2', (('child_2_3', ()),))))
def make_tree(tuple):
n = Node(tuple[0])
for child in tuple[1]:
n.append(make_tree(child))
return n
class Node_ClassTest(TestCase):
""" a basic tree node, caracterised by an id"""
def setUp(self):
""" called before each test from this class """
self.o = make_tree(tree)
def test_flatten(self):
result = [r.id for r in self.o.flatten()]
expected = ['root', 'child_1_1', 'child_2_1', 'child_2_2', 'child_3_1', 'child_1_2', 'child_2_3']
self.assertListEqual(result, expected)
def test_flatten_with_outlist(self):
resultnodes = []
self.o.flatten(resultnodes)
result = [r.id for r in resultnodes]
expected = ['root', 'child_1_1', 'child_2_1', 'child_2_2', 'child_3_1', 'child_1_2', 'child_2_3']
self.assertListEqual(result, expected)
def test_known_values_remove(self):
"""
remove a child node
"""
self.o.remove(self.o.get_node_by_id('child_1_1'))
self.assertRaises(NodeNotFound, self.o.get_node_by_id, 'child_1_1')
def test_known_values_replace(self):
"""
replace a child node with another
"""
self.o.replace(self.o.get_node_by_id('child_1_1'), Node('hoho'))
self.assertRaises(NodeNotFound, self.o.get_node_by_id, 'child_1_1')
self.assertEqual(self.o.get_node_by_id('hoho'), self.o.children[0])
def test_known_values_get_sibling(self):
"""
return the sibling node that has given id
"""
self.assertEqual(self.o.children[0].get_sibling('child_1_2'), self.o.children[1], None)
def test_raise_get_sibling_NodeNotFound(self):
self.assertRaises(NodeNotFound, self.o.children[0].get_sibling, 'houhou')
def test_known_values_get_node_by_id(self):
"""
return node in whole hierarchy that has given id
"""
self.assertEqual(self.o.get_node_by_id('child_1_1'), self.o.children[0])
def test_raise_get_node_by_id_NodeNotFound(self):
self.assertRaises(NodeNotFound, self.o.get_node_by_id, 'houhou')
def test_known_values_get_child_by_id(self):
"""
return child of given id
"""
self.assertEqual(self.o.get_child_by_id('child_2_1', recurse=1), self.o.children[0].children[0])
def test_raise_get_child_by_id_NodeNotFound(self):
self.assertRaises(NodeNotFound, self.o.get_child_by_id, nid='child_2_1')
self.assertRaises(NodeNotFound, self.o.get_child_by_id, 'houhou')
def test_known_values_get_child_by_path(self):
"""
return child of given path (path is a list of ids)
"""
self.assertEqual(self.o.get_child_by_path(['root', 'child_1_1', 'child_2_1']), self.o.children[0].children[0])
def test_raise_get_child_by_path_NodeNotFound(self):
self.assertRaises(NodeNotFound, self.o.get_child_by_path, ['child_1_1', 'child_2_11'])
def test_known_values_depth_down(self):
"""
return depth of this node in the tree
"""
self.assertEqual(self.o.depth_down(), 4)
self.assertEqual(self.o.get_child_by_id('child_2_1', True).depth_down(), 1)
def test_known_values_depth(self):
"""
return depth of this node in the tree
"""
self.assertEqual(self.o.depth(), 0)
self.assertEqual(self.o.get_child_by_id('child_2_1', True).depth(), 2)
def test_known_values_width(self):
"""
return depth of this node in the tree
"""
self.assertEqual(self.o.width(), 3)
self.assertEqual(self.o.get_child_by_id('child_2_1', True).width(), 1)
def test_known_values_root(self):
"""
return the root node of the tree
"""
self.assertEqual(self.o.get_child_by_id('child_2_1', True).root(), self.o)
def test_known_values_leaves(self):
"""
return a list with all the leaf nodes descendant from this task
"""
self.assertEqual(self.o.leaves(), [self.o.get_child_by_id('child_2_1', True),
self.o.get_child_by_id('child_3_1', True),
self.o.get_child_by_id('child_2_3', True)])
def test_known_values_lineage(self):
c31 = self.o.get_child_by_id('child_3_1', True)
self.assertEqual(c31.lineage(), [self.o.get_child_by_id('child_3_1', True),
self.o.get_child_by_id('child_2_2', True),
self.o.get_child_by_id('child_1_1', True),
self.o])
class post_order_list_FunctionTest(TestCase):
""""""
def setUp(self):
""" called before each test from this class """
self.o = make_tree(tree)
def test_known_values_post_order_list(self):
"""
create a list with tree nodes for which the <filter> function returned true
in a post order foashion
"""
L = ['child_2_1', 'child_3_1', 'child_2_2', 'child_1_1', 'child_2_3', 'child_1_2', 'root']
l = [n.id for n in post_order_list(self.o)]
self.assertEqual(l, L, l)
def test_known_values_post_order_list2(self):
"""
create a list with tree nodes for which the <filter> function returned true
in a post order foashion
"""
def filter(node):
if node.id == 'child_2_2':
return 0
return 1
L = ['child_2_1', 'child_1_1', 'child_2_3', 'child_1_2', 'root']
l = [n.id for n in post_order_list(self.o, filter)]
self.assertEqual(l, L, l)
class PostfixedDepthFirstIterator_ClassTest(TestCase):
""""""
def setUp(self):
""" called before each test from this class """
self.o = make_tree(tree)
def test_known_values_next(self):
L = ['child_2_1', 'child_3_1', 'child_2_2', 'child_1_1', 'child_2_3', 'child_1_2', 'root']
iter = PostfixedDepthFirstIterator(self.o)
o = next(iter)
i = 0
while o:
self.assertEqual(o.id, L[i])
o = next(iter)
i += 1
class pre_order_list_FunctionTest(TestCase):
""""""
def setUp(self):
""" called before each test from this class """
self.o = make_tree(tree)
def test_known_values_pre_order_list(self):
"""
create a list with tree nodes for which the <filter> function returned true
in a pre order fashion
"""
L = ['root', 'child_1_1', 'child_2_1', 'child_2_2', 'child_3_1', 'child_1_2', 'child_2_3']
l = [n.id for n in pre_order_list(self.o)]
self.assertEqual(l, L, l)
def test_known_values_pre_order_list2(self):
"""
create a list with tree nodes for which the <filter> function returned true
in a pre order fashion
"""
def filter(node):
if node.id == 'child_2_2':
return 0
return 1
L = ['root', 'child_1_1', 'child_2_1', 'child_1_2', 'child_2_3']
l = [n.id for n in pre_order_list(self.o, filter)]
self.assertEqual(l, L, l)
class PrefixedDepthFirstIterator_ClassTest(TestCase):
""""""
def setUp(self):
""" called before each test from this class """
self.o = make_tree(tree)
def test_known_values_next(self):
L = ['root', 'child_1_1', 'child_2_1', 'child_2_2', 'child_3_1', 'child_1_2', 'child_2_3']
iter = PrefixedDepthFirstIterator(self.o)
o = next(iter)
i = 0
while o:
self.assertEqual(o.id, L[i])
o = next(iter)
i += 1
if __name__ == '__main__':
unittest_main()
|
from homeassistant.helpers import discovery
from homeassistant.setup import async_setup_component
from .common import NUMATO_CFG, mockup_raise
MOCKUP_ENTITY_IDS = {
"binary_sensor.numato_binary_sensor_mock_port2",
"binary_sensor.numato_binary_sensor_mock_port3",
"binary_sensor.numato_binary_sensor_mock_port4",
}
async def test_failing_setups_no_entities(hass, numato_fixture, monkeypatch):
"""When port setup fails, no entity shall be created."""
monkeypatch.setattr(numato_fixture.NumatoDeviceMock, "setup", mockup_raise)
assert await async_setup_component(hass, "numato", NUMATO_CFG)
await hass.async_block_till_done()
for entity_id in MOCKUP_ENTITY_IDS:
assert entity_id not in hass.states.async_entity_ids()
async def test_setup_callbacks(hass, numato_fixture, monkeypatch):
"""During setup a callback shall be registered."""
numato_fixture.discover()
def mock_add_event_detect(self, port, callback, direction):
assert self == numato_fixture.devices[0]
assert port == 1
assert callback is callable
assert direction == numato_fixture.BOTH
monkeypatch.setattr(
numato_fixture.devices[0], "add_event_detect", mock_add_event_detect
)
assert await async_setup_component(hass, "numato", NUMATO_CFG)
async def test_hass_binary_sensor_notification(hass, numato_fixture):
"""Test regular operations from within Home Assistant."""
assert await async_setup_component(hass, "numato", NUMATO_CFG)
await hass.async_block_till_done() # wait until services are registered
assert (
hass.states.get("binary_sensor.numato_binary_sensor_mock_port2").state == "on"
)
await hass.async_add_executor_job(numato_fixture.devices[0].callbacks[2], 2, False)
await hass.async_block_till_done()
assert (
hass.states.get("binary_sensor.numato_binary_sensor_mock_port2").state == "off"
)
async def test_binary_sensor_setup_without_discovery_info(hass, config, numato_fixture):
"""Test handling of empty discovery_info."""
numato_fixture.discover()
await discovery.async_load_platform(hass, "binary_sensor", "numato", None, config)
for entity_id in MOCKUP_ENTITY_IDS:
assert entity_id not in hass.states.async_entity_ids()
await hass.async_block_till_done() # wait for numato platform to be loaded
for entity_id in MOCKUP_ENTITY_IDS:
assert entity_id in hass.states.async_entity_ids()
|
from homeassistant.components.cover import (
ATTR_POSITION,
SUPPORT_CLOSE,
SUPPORT_CLOSE_TILT,
SUPPORT_OPEN,
SUPPORT_OPEN_TILT,
SUPPORT_SET_POSITION,
SUPPORT_SET_TILT_POSITION,
SUPPORT_STOP,
SUPPORT_STOP_TILT,
CoverEntity,
)
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .base import AcmedaBase
from .const import ACMEDA_HUB_UPDATE, DOMAIN
from .helpers import async_add_acmeda_entities
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Acmeda Rollers from a config entry."""
hub = hass.data[DOMAIN][config_entry.entry_id]
current = set()
@callback
def async_add_acmeda_covers():
async_add_acmeda_entities(
hass, AcmedaCover, config_entry, current, async_add_entities
)
hub.cleanup_callbacks.append(
async_dispatcher_connect(
hass,
ACMEDA_HUB_UPDATE.format(config_entry.entry_id),
async_add_acmeda_covers,
)
)
class AcmedaCover(AcmedaBase, CoverEntity):
"""Representation of a Acmeda cover device."""
@property
def current_cover_position(self):
"""Return the current position of the roller blind.
None is unknown, 0 is closed, 100 is fully open.
"""
position = None
if self.roller.type != 7:
position = 100 - self.roller.closed_percent
return position
@property
def current_cover_tilt_position(self):
"""Return the current tilt of the roller blind.
None is unknown, 0 is closed, 100 is fully open.
"""
position = None
if self.roller.type in [7, 10]:
position = 100 - self.roller.closed_percent
return position
@property
def supported_features(self):
"""Flag supported features."""
supported_features = 0
if self.current_cover_position is not None:
supported_features |= (
SUPPORT_OPEN | SUPPORT_CLOSE | SUPPORT_STOP | SUPPORT_SET_POSITION
)
if self.current_cover_tilt_position is not None:
supported_features |= (
SUPPORT_OPEN_TILT
| SUPPORT_CLOSE_TILT
| SUPPORT_STOP_TILT
| SUPPORT_SET_TILT_POSITION
)
return supported_features
@property
def is_closed(self):
"""Return if the cover is closed."""
return self.roller.closed_percent == 100
async def async_close_cover(self, **kwargs):
"""Close the roller."""
await self.roller.move_down()
async def async_open_cover(self, **kwargs):
"""Open the roller."""
await self.roller.move_up()
async def async_stop_cover(self, **kwargs):
"""Stop the roller."""
await self.roller.move_stop()
async def async_set_cover_position(self, **kwargs):
"""Move the roller shutter to a specific position."""
await self.roller.move_to(100 - kwargs[ATTR_POSITION])
async def async_close_cover_tilt(self, **kwargs):
"""Close the roller."""
await self.roller.move_down()
async def async_open_cover_tilt(self, **kwargs):
"""Open the roller."""
await self.roller.move_up()
async def async_stop_cover_tilt(self, **kwargs):
"""Stop the roller."""
await self.roller.move_stop()
async def async_set_cover_tilt(self, **kwargs):
"""Tilt the roller shutter to a specific position."""
await self.roller.move_to(100 - kwargs[ATTR_POSITION])
|
import logging
import synology_srm
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_USERNAME,
CONF_VERIFY_SSL,
)
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_USERNAME = "admin"
DEFAULT_PORT = 8001
DEFAULT_SSL = True
DEFAULT_VERIFY_SSL = False
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Required(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_SSL, default=DEFAULT_SSL): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=DEFAULT_VERIFY_SSL): cv.boolean,
}
)
ATTRIBUTE_ALIAS = {
"band": None,
"connection": None,
"current_rate": None,
"dev_type": None,
"hostname": None,
"ip6_addr": None,
"ip_addr": None,
"is_baned": "is_banned",
"is_beamforming_on": None,
"is_guest": None,
"is_high_qos": None,
"is_low_qos": None,
"is_manual_dev_type": None,
"is_manual_hostname": None,
"is_online": None,
"is_parental_controled": "is_parental_controlled",
"is_qos": None,
"is_wireless": None,
"mac": None,
"max_rate": None,
"mesh_node_id": None,
"rate_quality": None,
"signalstrength": "signal_strength",
"transferRXRate": "transfer_rx_rate",
"transferTXRate": "transfer_tx_rate",
}
def get_scanner(hass, config):
"""Validate the configuration and return Synology SRM scanner."""
scanner = SynologySrmDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class SynologySrmDeviceScanner(DeviceScanner):
"""This class scans for devices connected to a Synology SRM router."""
def __init__(self, config):
"""Initialize the scanner."""
self.client = synology_srm.Client(
host=config[CONF_HOST],
port=config[CONF_PORT],
username=config[CONF_USERNAME],
password=config[CONF_PASSWORD],
https=config[CONF_SSL],
)
if not config[CONF_VERIFY_SSL]:
self.client.http.disable_https_verify()
self.devices = []
self.success_init = self._update_info()
_LOGGER.info("Synology SRM scanner initialized")
def scan_devices(self):
"""Scan for new devices and return a list with found device IDs."""
self._update_info()
return [device["mac"] for device in self.devices]
def get_extra_attributes(self, device) -> dict:
"""Get the extra attributes of a device."""
device = next(
(result for result in self.devices if result["mac"] == device), None
)
filtered_attributes = {}
if not device:
return filtered_attributes
for attribute, alias in ATTRIBUTE_ALIAS.items():
value = device.get(attribute)
if value is None:
continue
attr = alias or attribute
filtered_attributes[attr] = value
return filtered_attributes
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
filter_named = [
result["hostname"] for result in self.devices if result["mac"] == device
]
if filter_named:
return filter_named[0]
return None
def _update_info(self):
"""Check the router for connected devices."""
_LOGGER.debug("Scanning for connected devices")
try:
self.devices = self.client.core.get_network_nsm_device({"is_online": True})
except synology_srm.http.SynologyException as ex:
_LOGGER.error("Error with the Synology SRM: %s", ex)
return False
_LOGGER.debug("Found %d device(s) connected to the router", len(self.devices))
return True
|
from datetime import timedelta
import logging
import Adafruit_DHT # pylint: disable=import-error
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_MONITORED_CONDITIONS,
CONF_NAME,
PERCENTAGE,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
from homeassistant.util.temperature import celsius_to_fahrenheit
_LOGGER = logging.getLogger(__name__)
CONF_PIN = "pin"
CONF_SENSOR = "sensor"
CONF_HUMIDITY_OFFSET = "humidity_offset"
CONF_TEMPERATURE_OFFSET = "temperature_offset"
DEFAULT_NAME = "DHT Sensor"
# DHT11 is able to deliver data once per second, DHT22 once every two
MIN_TIME_BETWEEN_UPDATES = timedelta(seconds=30)
SENSOR_TEMPERATURE = "temperature"
SENSOR_HUMIDITY = "humidity"
SENSOR_TYPES = {
SENSOR_TEMPERATURE: ["Temperature", None],
SENSOR_HUMIDITY: ["Humidity", PERCENTAGE],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_SENSOR): cv.string,
vol.Required(CONF_PIN): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS, default=[]): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_TEMPERATURE_OFFSET, default=0): vol.All(
vol.Coerce(float), vol.Range(min=-100, max=100)
),
vol.Optional(CONF_HUMIDITY_OFFSET, default=0): vol.All(
vol.Coerce(float), vol.Range(min=-100, max=100)
),
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the DHT sensor."""
SENSOR_TYPES[SENSOR_TEMPERATURE][1] = hass.config.units.temperature_unit
available_sensors = {
"AM2302": Adafruit_DHT.AM2302,
"DHT11": Adafruit_DHT.DHT11,
"DHT22": Adafruit_DHT.DHT22,
}
sensor = available_sensors.get(config[CONF_SENSOR])
pin = config[CONF_PIN]
temperature_offset = config[CONF_TEMPERATURE_OFFSET]
humidity_offset = config[CONF_HUMIDITY_OFFSET]
if not sensor:
_LOGGER.error("DHT sensor type is not supported")
return False
data = DHTClient(Adafruit_DHT, sensor, pin)
dev = []
name = config[CONF_NAME]
try:
for variable in config[CONF_MONITORED_CONDITIONS]:
dev.append(
DHTSensor(
data,
variable,
SENSOR_TYPES[variable][1],
name,
temperature_offset,
humidity_offset,
)
)
except KeyError:
pass
add_entities(dev, True)
class DHTSensor(Entity):
"""Implementation of the DHT sensor."""
def __init__(
self,
dht_client,
sensor_type,
temp_unit,
name,
temperature_offset,
humidity_offset,
):
"""Initialize the sensor."""
self.client_name = name
self._name = SENSOR_TYPES[sensor_type][0]
self.dht_client = dht_client
self.temp_unit = temp_unit
self.type = sensor_type
self.temperature_offset = temperature_offset
self.humidity_offset = humidity_offset
self._state = None
self._unit_of_measurement = SENSOR_TYPES[sensor_type][1]
@property
def name(self):
"""Return the name of the sensor."""
return f"{self.client_name} {self._name}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement of this entity, if any."""
return self._unit_of_measurement
def update(self):
"""Get the latest data from the DHT and updates the states."""
self.dht_client.update()
temperature_offset = self.temperature_offset
humidity_offset = self.humidity_offset
data = self.dht_client.data
if self.type == SENSOR_TEMPERATURE and SENSOR_TEMPERATURE in data:
temperature = data[SENSOR_TEMPERATURE]
_LOGGER.debug(
"Temperature %.1f \u00b0C + offset %.1f",
temperature,
temperature_offset,
)
if -20 <= temperature < 80:
self._state = round(temperature + temperature_offset, 1)
if self.temp_unit == TEMP_FAHRENHEIT:
self._state = round(celsius_to_fahrenheit(temperature), 1)
elif self.type == SENSOR_HUMIDITY and SENSOR_HUMIDITY in data:
humidity = data[SENSOR_HUMIDITY]
_LOGGER.debug("Humidity %.1f%% + offset %.1f", humidity, humidity_offset)
if 0 <= humidity <= 100:
self._state = round(humidity + humidity_offset, 1)
class DHTClient:
"""Get the latest data from the DHT sensor."""
def __init__(self, adafruit_dht, sensor, pin):
"""Initialize the sensor."""
self.adafruit_dht = adafruit_dht
self.sensor = sensor
self.pin = pin
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Get the latest data the DHT sensor."""
humidity, temperature = self.adafruit_dht.read_retry(self.sensor, self.pin)
if temperature:
self.data[SENSOR_TEMPERATURE] = temperature
if humidity:
self.data[SENSOR_HUMIDITY] = humidity
|
from __future__ import unicode_literals
from builtins import object
import copy
import deepdish as dd
import numpy as np
from .tools.normalize import normalize as normalizer
from .tools.reduce import reduce as reducer
from .tools.align import align as aligner
from .tools.format_data import format_data
from ._shared.helpers import convert_text, get_dtype
from .config import __version__
class DataGeometry(object):
"""
Hypertools data object class
A DataGeometry object contains the data, figure handles and transform
functions used to create a plot. Note: this class should not be called
directly, but is used by the `hyp.plot` function to create a plot object.
Parameters
----------
fig : matplotlib.Figure
The matplotlib figure handle for the plot
ax : matplotlib.Axes
The matplotlib axes handle for the plot
line_ani : matplotlib.animation.FuncAnimation
The matplotlib animation handle (if the plot is an animation)
data : list
A list of numpy arrays representing the raw data
xform_data : list
A list of numpy arrays representing the transformed data
reduce : dict
A dictionary containing the reduction model and parameters
align : dict
A dictionary containing align model and parameters
normalize : str
A string representing the kind of normalization
kwargs : dict
A dictionary containing all kwargs passed to the plot function
version : str
The version of the software used to create the class instance
"""
def __init__(self, fig=None, ax=None, line_ani=None, data=None, xform_data=None,
reduce=None, align=None, normalize=None, semantic=None,
vectorizer=None, corpus=None, kwargs=None, version=__version__,
dtype=None):
# matplotlib figure handle
self.fig = fig
# matplotlib axis handle
self.ax = ax
# matplotlib line_ani handle (if its an animation)
self.line_ani = line_ani
# convert to numpy array if text
if isinstance(data, list):
data = list(map(convert_text, data))
self.data = data
self.dtype = get_dtype(data)
# the transformed data
self.xform_data = xform_data
# dictionary of model and model_params
self.reduce = reduce
# 'hyper', 'SRM' or None
self.align = align
# 'within', 'across', 'row' or False
self.normalize = normalize
# text params
self.semantic = semantic
self.vectorizer = vectorizer
self.corpus = corpus
# dictionary of kwargs
self.kwargs = kwargs
# hypertools version
self.version = version
def get_data(self):
"""Return a copy of the data"""
return copy.copy(self.data)
def get_formatted_data(self):
"""Return a formatted copy of the data"""
return format_data(self.data)
# a function to transform new data
def transform(self, data=None):
"""
Return transformed data, or transform new data using the same model
parameters
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to transform. If no data is passed, the xform_data from
the DataGeometry object will be returned.
Returns
----------
xformed_data : list of numpy arrays
The transformed data
"""
# if no new data passed,
if data is None:
return self.xform_data
else:
formatted = format_data(
data,
semantic=self.semantic,
vectorizer=self.vectorizer,
corpus=self.corpus,
ppca=True)
norm = normalizer(formatted, normalize=self.normalize)
reduction = reducer(
norm,
reduce=self.reduce,
ndims=self.reduce['params']['n_components'])
return aligner(reduction, align=self.align)
# a function to plot the data
def plot(self, data=None, **kwargs):
"""
Plot the data
Parameters
----------
data : numpy array, pandas dataframe or list of arrays/dfs
The data to plot. If no data is passed, the xform_data from
the DataGeometry object will be returned.
kwargs : keyword arguments
Any keyword arguments supported by `hypertools.plot` are also supported
by this method
Returns
----------
geo : hypertools.DataGeometry
A new data geometry object
"""
# import plot here to avoid circular imports
from .plot.plot import plot as plotter
if data is None:
d = copy.copy(self.data)
transform = copy.copy(self.xform_data)
if any([k in kwargs for k in ['reduce', 'align', 'normalize',
'semantic', 'vectorizer', 'corpus']]):
d = copy.copy(self.data)
transform = None
else:
d = data
transform = None
# get kwargs and update with new kwargs
new_kwargs = copy.copy(self.kwargs)
update_kwargs = dict(transform=transform, reduce=self.reduce,
align=self.align, normalize=self.normalize,
semantic=self.semantic, vectorizer=self.vectorizer,
corpus=self.corpus)
new_kwargs.update(update_kwargs)
for key in kwargs:
new_kwargs.update({key : kwargs[key]})
return plotter(d, **new_kwargs)
def save(self, fname, compression='blosc'):
"""
Save method for the data geometry object
The data will be saved as a 'geo' file, which is a dictionary containing
the elements of a data geometry object saved in the hd5 format using
`deepdish`.
Parameters
----------
fname : str
A name for the file. If the file extension (.geo) is not specified,
it will be appended.
compression : str
The kind of compression to use. See the deepdish documentation for
options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save
"""
if hasattr(self, 'dtype'):
if 'list' in self.dtype:
data = np.array(self.data)
elif 'df' in self.dtype:
data = {k: np.array(v).astype('str') for k, v in self.data.to_dict('list').items()}
else:
data = self.data
# put geo vars into a dict
geo = {
'data' : data,
'xform_data' : np.array(self.xform_data),
'reduce' : self.reduce,
'align' : self.align,
'normalize' : self.normalize,
'semantic' : self.semantic,
'corpus' : np.array(self.corpus) if isinstance(self.corpus, list) else self.corpus,
'kwargs' : self.kwargs,
'version' : self.version,
'dtype' : self.dtype
}
# if extension wasn't included, add it
if fname[-4:]!='.geo':
fname+='.geo'
# save
dd.io.save(fname, geo, compression=compression)
|
import numpy as np
import pandas as pd
import xarray as xr
from . import parameterized, randn, requires_dask
nx = 3000
long_nx = 30000000
ny = 2000
nt = 1000
window = 20
randn_xy = randn((nx, ny), frac_nan=0.1)
randn_xt = randn((nx, nt))
randn_t = randn((nt,))
randn_long = randn((long_nx,), frac_nan=0.1)
new_x_short = np.linspace(0.3 * nx, 0.7 * nx, 100)
new_x_long = np.linspace(0.3 * nx, 0.7 * nx, 1000)
new_y_long = np.linspace(0.1, 0.9, 1000)
class Interpolation:
def setup(self, *args, **kwargs):
self.ds = xr.Dataset(
{
"var1": (("x", "y"), randn_xy),
"var2": (("x", "t"), randn_xt),
"var3": (("t",), randn_t),
},
coords={
"x": np.arange(nx),
"y": np.linspace(0, 1, ny),
"t": pd.date_range("1970-01-01", periods=nt, freq="D"),
"x_coords": ("x", np.linspace(1.1, 2.1, nx)),
},
)
@parameterized(["method", "is_short"], (["linear", "cubic"], [True, False]))
def time_interpolation(self, method, is_short):
new_x = new_x_short if is_short else new_x_long
self.ds.interp(x=new_x, method=method).load()
@parameterized(["method"], (["linear", "nearest"]))
def time_interpolation_2d(self, method):
self.ds.interp(x=new_x_long, y=new_y_long, method=method).load()
class InterpolationDask(Interpolation):
def setup(self, *args, **kwargs):
requires_dask()
super().setup(**kwargs)
self.ds = self.ds.chunk({"t": 50})
|
import imp
import os
import base
from nose import tools
from docker_registry.lib import checksums
from docker_registry.lib import xtarfile
# setting like this in test, due to flake8 H302
tarfile = xtarfile.tarfile
# To test whether the UnicodeDecodeError still exists
# (it's still present in python 3.4.0)
# ((loading this way, since we've monkey patched currently loaded tarfile))
tarfile_vanilla = imp.load_module('test_failing', *imp.find_module('tarfile'))
class TestTarfile(base.TestCase):
@tools.raises(UnicodeDecodeError)
def test_vanilla_tarfile(self):
layer_fh = open(os.path.join(base.data_dir, "xattr/layer.tar"))
tar = tarfile_vanilla.open(mode='r|*', fileobj=layer_fh)
assert tar
def test_headers(self):
expected = {
"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457/layer.tar": { # noqa
"dev": {
"headers": {"size": 0, "mode": 0o40755, "type": "5"},
"pax": {},
},
"dev/core": {
"headers": {"size": 0, "mode": 0o120777, "type": "2"},
"pax": {},
},
"dev/stderr": {
"headers": {"size": 0, "mode": 0o120777, "type": "2"},
"pax": {},
},
"dev/stdout": {
"headers": {"size": 0, "mode": 0o120777, "type": "2"},
"pax": {},
},
"dev/fd": {
"headers": {"size": 0, "mode": 0o120777, "type": "2"},
"pax": {},
},
"dev/ptmx": {
"headers": {"size": 0, "mode": 0o120777, "type": "2"},
"pax": {},
},
"dev/stdin": {
"headers": {"size": 0, "mode": 0o120777, "type": "2"},
"pax": {},
},
"etc": {
"headers": {"size": 0, "mode": 0o40755, "type": "5"},
"pax": {},
},
"etc/sudoers": {
"headers": {"size": 3348, "mode": 0o100440, "type": "0"},
"pax": {},
},
},
"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158/layer.tar": { # noqa
".": {
"headers": {"size": 0, "mode": 0o40755, "type": "5"},
"pax": {},
},
},
"xattr/layer.tar": {
"file": {
"headers": {"size": 0, "mode": 0o100644, "type": "0"},
"pax": {u"SCHILY.xattr.security.capability": "\x01\x00\x00\x02\xc0\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"}, # noqa
},
},
}
for file in expected.keys():
layer_fh = open(os.path.join(base.data_dir, file))
tar = tarfile.open(mode='r|*', fileobj=layer_fh)
member_count = 0
for member in tar:
member_count += 1
# check that we know the file names
msg = "in %s, did not find file %s" % (file, member.path)
l = len(filter(lambda x: member.path in x,
expected[file].keys()))
assert (l > 0), msg
e = expected[file][member.path]
for attr in e["headers"].keys():
msg = "in %s:%s, expected %s of %s, but got %s" % (
file, member.path, attr, e["headers"][attr],
getattr(member, attr))
assert e["headers"][attr] == getattr(member, attr), msg
for attr in e["pax"].keys():
msg = b"in %s:%s, expected %s of %s, but got %s".format(
file, member.path, attr, e["pax"][attr],
member.pax_headers[attr])
assert e["pax"][attr] == member.pax_headers[attr], msg
assert member_count == len(expected[file])
layer_fh.close()
def test_tarsum(self):
expected = {
"46af0962ab5afeb5ce6740d4d91652e69206fc991fd5328c1a94d364ad00e457": "tarsum+sha256:e58fcf7418d4390dec8e8fb69d88c06ec07039d651fedd3aa72af9972e7d046b", # noqa
"511136ea3c5a64f264b78b5433614aec563103b4d4702f3ba7d4d2698e22c158": "tarsum+sha256:ac672ee85da9ab7f9667ae3c32841d3e42f33cc52c273c23341dabba1c8b0c8b", # noqa
"xattr": "tarsum+sha256:e86f81a4d552f13039b1396ed03ca968ea9717581f9577ef1876ea6ff9b38c98", # noqa
}
for layer in expected.keys():
layer_fh = open(os.path.join(base.data_dir, layer, "layer.tar"))
json_fh = open(os.path.join(base.data_dir, layer, "json"))
tarsum = checksums.TarSum(json_fh.read())
tar = tarfile.open(mode='r|*', fileobj=layer_fh)
for member in tar:
tarsum.append(member, tar)
sum = tarsum.compute()
msg = "layer %s, expected [%s] but got [%s]" % (
layer, expected[layer], sum)
assert expected[layer] == sum, msg
layer_fh.close()
json_fh.close()
|
import unittest
import numpy as np
import chainer
from chainer.initializers import Zero
from chainer.testing import attr
from chainer import Variable
from chainercv.links import VGG16
from chainercv.utils import testing
@testing.parameterize(
{'pick': 'prob', 'shapes': (1, 200), 'n_class': 200},
{'pick': 'pool5', 'shapes': (1, 512, 7, 7), 'n_class': None},
{'pick': ['conv5_3', 'conv4_2'],
'shapes': ((1, 512, 14, 14), (1, 512, 28, 28)), 'n_class': None},
)
class TestVGG16Call(unittest.TestCase):
def setUp(self):
self.link = VGG16(
n_class=self.n_class, pretrained_model=None,
initialW=Zero())
self.link.pick = self.pick
def check_call(self):
xp = self.link.xp
x1 = Variable(xp.asarray(np.random.uniform(
-1, 1, (1, 3, 224, 224)).astype(np.float32)))
with chainer.no_backprop_mode():
features = self.link(x1)
if isinstance(features, tuple):
for activation, shape in zip(features, self.shapes):
self.assertEqual(activation.shape, shape)
else:
self.assertEqual(features.shape, self.shapes)
self.assertEqual(features.dtype, np.float32)
@attr.slow
def test_call_cpu(self):
self.check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self.check_call()
@testing.parameterize(*testing.product({
'n_class': [None, 500, 1000],
'pretrained_model': ['imagenet'],
'mean': [None, np.random.uniform((3, 1, 1)).astype(np.float32)],
}))
class TestVGG16Pretrained(unittest.TestCase):
@attr.slow
def test_pretrained(self):
kwargs = {
'n_class': self.n_class,
'pretrained_model': self.pretrained_model,
'mean': self.mean,
}
if self.pretrained_model == 'imagenet':
valid = self.n_class in {None, 1000}
if valid:
VGG16(**kwargs)
else:
with self.assertRaises(ValueError):
VGG16(**kwargs)
testing.run_module(__name__, __file__)
|
from homeassistant.components.light import (
ATTR_BRIGHTNESS,
ATTR_BRIGHTNESS_PCT,
ATTR_COLOR_NAME,
ATTR_COLOR_TEMP,
ATTR_EFFECT,
ATTR_FLASH,
ATTR_HS_COLOR,
ATTR_KELVIN,
ATTR_PROFILE,
ATTR_RGB_COLOR,
ATTR_TRANSITION,
ATTR_WHITE_VALUE,
ATTR_XY_COLOR,
DOMAIN,
)
from homeassistant.const import (
ATTR_ENTITY_ID,
ENTITY_MATCH_ALL,
SERVICE_TOGGLE,
SERVICE_TURN_OFF,
SERVICE_TURN_ON,
)
from homeassistant.loader import bind_hass
@bind_hass
def turn_on(
hass,
entity_id=ENTITY_MATCH_ALL,
transition=None,
brightness=None,
brightness_pct=None,
rgb_color=None,
xy_color=None,
hs_color=None,
color_temp=None,
kelvin=None,
white_value=None,
profile=None,
flash=None,
effect=None,
color_name=None,
):
"""Turn all or specified light on."""
hass.add_job(
async_turn_on,
hass,
entity_id,
transition,
brightness,
brightness_pct,
rgb_color,
xy_color,
hs_color,
color_temp,
kelvin,
white_value,
profile,
flash,
effect,
color_name,
)
async def async_turn_on(
hass,
entity_id=ENTITY_MATCH_ALL,
transition=None,
brightness=None,
brightness_pct=None,
rgb_color=None,
xy_color=None,
hs_color=None,
color_temp=None,
kelvin=None,
white_value=None,
profile=None,
flash=None,
effect=None,
color_name=None,
):
"""Turn all or specified light on."""
data = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_PROFILE, profile),
(ATTR_TRANSITION, transition),
(ATTR_BRIGHTNESS, brightness),
(ATTR_BRIGHTNESS_PCT, brightness_pct),
(ATTR_RGB_COLOR, rgb_color),
(ATTR_XY_COLOR, xy_color),
(ATTR_HS_COLOR, hs_color),
(ATTR_COLOR_TEMP, color_temp),
(ATTR_KELVIN, kelvin),
(ATTR_WHITE_VALUE, white_value),
(ATTR_FLASH, flash),
(ATTR_EFFECT, effect),
(ATTR_COLOR_NAME, color_name),
]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_TURN_ON, data, blocking=True)
@bind_hass
def turn_off(hass, entity_id=ENTITY_MATCH_ALL, transition=None):
"""Turn all or specified light off."""
hass.add_job(async_turn_off, hass, entity_id, transition)
async def async_turn_off(hass, entity_id=ENTITY_MATCH_ALL, transition=None):
"""Turn all or specified light off."""
data = {
key: value
for key, value in [(ATTR_ENTITY_ID, entity_id), (ATTR_TRANSITION, transition)]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_TURN_OFF, data, blocking=True)
@bind_hass
def toggle(
hass,
entity_id=ENTITY_MATCH_ALL,
transition=None,
brightness=None,
brightness_pct=None,
rgb_color=None,
xy_color=None,
hs_color=None,
color_temp=None,
kelvin=None,
white_value=None,
profile=None,
flash=None,
effect=None,
color_name=None,
):
"""Toggle all or specified light."""
hass.add_job(
async_toggle,
hass,
entity_id,
transition,
brightness,
brightness_pct,
rgb_color,
xy_color,
hs_color,
color_temp,
kelvin,
white_value,
profile,
flash,
effect,
color_name,
)
async def async_toggle(
hass,
entity_id=ENTITY_MATCH_ALL,
transition=None,
brightness=None,
brightness_pct=None,
rgb_color=None,
xy_color=None,
hs_color=None,
color_temp=None,
kelvin=None,
white_value=None,
profile=None,
flash=None,
effect=None,
color_name=None,
):
"""Turn all or specified light on."""
data = {
key: value
for key, value in [
(ATTR_ENTITY_ID, entity_id),
(ATTR_PROFILE, profile),
(ATTR_TRANSITION, transition),
(ATTR_BRIGHTNESS, brightness),
(ATTR_BRIGHTNESS_PCT, brightness_pct),
(ATTR_RGB_COLOR, rgb_color),
(ATTR_XY_COLOR, xy_color),
(ATTR_HS_COLOR, hs_color),
(ATTR_COLOR_TEMP, color_temp),
(ATTR_KELVIN, kelvin),
(ATTR_WHITE_VALUE, white_value),
(ATTR_FLASH, flash),
(ATTR_EFFECT, effect),
(ATTR_COLOR_NAME, color_name),
]
if value is not None
}
await hass.services.async_call(DOMAIN, SERVICE_TOGGLE, data, blocking=True)
|
from docker_registry.core import compat
from docker_registry.core import lru
# In case you want to mock (and that doesn't work well)
# import mock
# import mockredis
# @mock.patch('docker_registry.core.lru.redis.StrictRedis',
# mockredis.mock_strict_redis_client)
# def boot():
# lru.init()
# boot()
lru.init()
class Dumb(object):
value = {}
@lru.get
def get(self, key):
if key not in self.value:
return None
return self.value[key]
@lru.set
def set(self, key, value):
self.value[key] = value
@lru.remove
def remove(self, key):
if key not in self.value:
return
del self.value[key]
class TestLru(object):
def setUp(self):
self._dumb = Dumb()
def testNonExistentGet(self):
assert not self._dumb.get('nonexistent')
assert not self._dumb.get('nonexistent')
def testSetSimple1(self):
content = 'bar'
result = b'bar'
self._dumb.set('foo', content)
assert self._dumb.get('foo') == result
assert self._dumb.get('foo') == result
def testSetBytes1(self):
content = b'foo'
result = b'foo'
self._dumb.set('foo', content)
assert self._dumb.get('foo') == result
def testSetBytes2(self):
content = b'\xc3\x9f'
result = b'\xc3\x9f'
self._dumb.set('foo', content)
assert self._dumb.get('foo') == result
def testSetUnicode1(self):
content = u'foo'
result = b'foo'
self._dumb.set('foo', content)
assert self._dumb.get('foo') == result
def testSetUnicode2(self):
content = u'ß'
result = b'\xc3\x9f'
self._dumb.set('foo', content)
assert self._dumb.get('foo') == result
def testSetUnicode3(self):
content = u'ß'.encode('utf8')
result = b'\xc3\x9f'
self._dumb.set('foo', content)
assert self._dumb.get('foo') == result
def testSetUnicode4(self):
content = 'ß'
if compat.is_py2:
content = content.decode('utf8')
content = content.encode('utf8')
result = b'\xc3\x9f'
self._dumb.set('foo', content)
assert self._dumb.get('foo') == result
def testRemove(self):
self._dumb.set('foo', 'bar')
assert self._dumb.get('foo')
self._dumb.remove('foo')
assert not self._dumb.get('foo')
assert not self._dumb.get('foo')
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import patch
from diamond.collector import Collector
from example import ExampleCollector
##########################################################################
class TestExampleCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('ExampleCollector', {
'interval': 10
})
self.collector = ExampleCollector(config, None)
def test_import(self):
self.assertTrue(ExampleCollector)
@patch.object(Collector, 'publish')
def test(self, publish_mock):
self.collector.collect()
metrics = {
'my.example.metric': 42
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
##########################################################################
if __name__ == "__main__":
unittest.main()
|
from collections import deque
from functools import wraps
import logging
from typing import Any
from homeassistant.helpers.typing import HomeAssistantType
from .const import ATTR_DISCOVERY_PAYLOAD, ATTR_DISCOVERY_TOPIC
from .models import MessageCallbackType
_LOGGER = logging.getLogger(__name__)
DATA_MQTT_DEBUG_INFO = "mqtt_debug_info"
STORED_MESSAGES = 10
def log_messages(hass: HomeAssistantType, entity_id: str) -> MessageCallbackType:
"""Wrap an MQTT message callback to support message logging."""
def _log_message(msg):
"""Log message."""
debug_info = hass.data[DATA_MQTT_DEBUG_INFO]
messages = debug_info["entities"][entity_id]["subscriptions"][
msg.subscribed_topic
]["messages"]
if msg not in messages:
messages.append(msg)
def _decorator(msg_callback: MessageCallbackType):
@wraps(msg_callback)
def wrapper(msg: Any) -> None:
"""Log message."""
_log_message(msg)
msg_callback(msg)
setattr(wrapper, "__entity_id", entity_id)
return wrapper
return _decorator
def add_subscription(hass, message_callback, subscription):
"""Prepare debug data for subscription."""
entity_id = getattr(message_callback, "__entity_id", None)
if entity_id:
debug_info = hass.data.setdefault(
DATA_MQTT_DEBUG_INFO, {"entities": {}, "triggers": {}}
)
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}}
)
if subscription not in entity_info["subscriptions"]:
entity_info["subscriptions"][subscription] = {
"count": 0,
"messages": deque([], STORED_MESSAGES),
}
entity_info["subscriptions"][subscription]["count"] += 1
def remove_subscription(hass, message_callback, subscription):
"""Remove debug data for subscription if it exists."""
entity_id = getattr(message_callback, "__entity_id", None)
if entity_id and entity_id in hass.data[DATA_MQTT_DEBUG_INFO]["entities"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"][
subscription
]["count"] -= 1
if not hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"][
subscription
]["count"]:
hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]["subscriptions"].pop(
subscription
)
def add_entity_discovery_data(hass, discovery_data, entity_id):
"""Add discovery data."""
debug_info = hass.data.setdefault(
DATA_MQTT_DEBUG_INFO, {"entities": {}, "triggers": {}}
)
entity_info = debug_info["entities"].setdefault(
entity_id, {"subscriptions": {}, "discovery_data": {}}
)
entity_info["discovery_data"] = discovery_data
def update_entity_discovery_data(hass, discovery_payload, entity_id):
"""Update discovery data."""
entity_info = hass.data[DATA_MQTT_DEBUG_INFO]["entities"][entity_id]
entity_info["discovery_data"][ATTR_DISCOVERY_PAYLOAD] = discovery_payload
def remove_entity_data(hass, entity_id):
"""Remove discovery data."""
hass.data[DATA_MQTT_DEBUG_INFO]["entities"].pop(entity_id)
def add_trigger_discovery_data(hass, discovery_hash, discovery_data, device_id):
"""Add discovery data."""
debug_info = hass.data.setdefault(
DATA_MQTT_DEBUG_INFO, {"entities": {}, "triggers": {}}
)
debug_info["triggers"][discovery_hash] = {
"device_id": device_id,
"discovery_data": discovery_data,
}
def update_trigger_discovery_data(hass, discovery_hash, discovery_payload):
"""Update discovery data."""
trigger_info = hass.data[DATA_MQTT_DEBUG_INFO]["triggers"][discovery_hash]
trigger_info["discovery_data"][ATTR_DISCOVERY_PAYLOAD] = discovery_payload
def remove_trigger_discovery_data(hass, discovery_hash):
"""Remove discovery data."""
hass.data[DATA_MQTT_DEBUG_INFO]["triggers"][discovery_hash]["discovery_data"] = None
async def info_for_device(hass, device_id):
"""Get debug info for a device."""
mqtt_info = {"entities": [], "triggers": []}
entity_registry = await hass.helpers.entity_registry.async_get_registry()
entries = hass.helpers.entity_registry.async_entries_for_device(
entity_registry, device_id
)
mqtt_debug_info = hass.data.setdefault(
DATA_MQTT_DEBUG_INFO, {"entities": {}, "triggers": {}}
)
for entry in entries:
if entry.entity_id not in mqtt_debug_info["entities"]:
continue
entity_info = mqtt_debug_info["entities"][entry.entity_id]
subscriptions = [
{
"topic": topic,
"messages": [
{
"payload": msg.payload,
"qos": msg.qos,
"retain": msg.retain,
"time": msg.timestamp,
"topic": msg.topic,
}
for msg in list(subscription["messages"])
],
}
for topic, subscription in entity_info["subscriptions"].items()
]
discovery_data = {
"topic": entity_info["discovery_data"].get(ATTR_DISCOVERY_TOPIC, ""),
"payload": entity_info["discovery_data"].get(ATTR_DISCOVERY_PAYLOAD, ""),
}
mqtt_info["entities"].append(
{
"entity_id": entry.entity_id,
"subscriptions": subscriptions,
"discovery_data": discovery_data,
}
)
for trigger in mqtt_debug_info["triggers"].values():
if trigger["device_id"] != device_id:
continue
discovery_data = {
"topic": trigger["discovery_data"][ATTR_DISCOVERY_TOPIC],
"payload": trigger["discovery_data"][ATTR_DISCOVERY_PAYLOAD],
}
mqtt_info["triggers"].append({"discovery_data": discovery_data})
return mqtt_info
|
import os
from os import path as op
title = 'mne-python flow diagram'
font_face = 'Arial'
node_size = 12
node_small_size = 9
edge_size = 9
sensor_color = '#7bbeca'
source_color = '#ff6347'
legend = """
<<FONT POINT-SIZE="%s">
<TABLE BORDER="0" CELLBORDER="0" CELLSPACING="4" CELLPADDING="4">
<TR><TD BGCOLOR="%s"> </TD><TD ALIGN="left">
Sensor (M/EEG) space</TD></TR>
<TR><TD BGCOLOR="%s"> </TD><TD ALIGN="left">
Source (brain) space</TD></TR>
</TABLE></FONT>>""" % (edge_size, sensor_color, source_color)
legend = ''.join(legend.split('\n'))
nodes = dict(
T1='T1',
flashes='Flash5/30',
trans='Head-MRI trans',
recon='Freesurfer surfaces',
bem='BEM',
src='Source space\nmne.SourceSpaces',
cov='Noise covariance\nmne.Covariance',
fwd='Forward solution\nmne.forward.Forward',
inv='Inverse operator\nmne.minimum_norm.InverseOperator',
stc='Source estimate\nmne.SourceEstimate',
raw='Raw data\nmne.io.Raw',
epo='Epoched data\nmne.Epochs',
evo='Averaged data\nmne.Evoked',
pre='Preprocessed data\nmne.io.Raw',
legend=legend,
)
sensor_space = ('raw', 'pre', 'epo', 'evo', 'cov')
source_space = ('src', 'stc', 'bem', 'flashes', 'recon', 'T1')
edges = (
('T1', 'recon'),
('flashes', 'bem'),
('recon', 'bem'),
('recon', 'src', 'mne.setup_source_space'),
('src', 'fwd'),
('bem', 'fwd'),
('trans', 'fwd', 'mne.make_forward_solution'),
('fwd', 'inv'),
('cov', 'inv', 'mne.make_inverse_operator'),
('inv', 'stc'),
('evo', 'stc', 'mne.minimum_norm.apply_inverse'),
('raw', 'pre', 'raw.filter\n'
'mne.preprocessing.ICA\n'
'mne.preprocessing.compute_proj_eog\n'
'mne.preprocessing.compute_proj_ecg\n'
'...'),
('pre', 'epo', 'mne.Epochs'),
('epo', 'evo', 'epochs.average'),
('epo', 'cov', 'mne.compute_covariance'),
)
subgraphs = (
[('T1', 'flashes', 'recon', 'bem', 'src'),
('<Structural information<BR/><FONT POINT-SIZE="%s"><I>'
'Freesurfer / MNE-C</I></FONT>>' % node_small_size)],
)
def setup(app):
app.connect('builder-inited', generate_flow_diagram)
app.add_config_value('make_flow_diagram', True, 'html')
def setup_module():
# HACK: Stop nosetests running setup() above
pass
def generate_flow_diagram(app):
out_dir = op.join(app.builder.outdir, '_static')
if not op.isdir(out_dir):
os.makedirs(out_dir)
out_fname = op.join(out_dir, 'mne-python_flow.svg')
make_flow_diagram = app is None or \
bool(app.builder.config.make_flow_diagram)
if not make_flow_diagram:
print('Skipping flow diagram, webpage will have a missing image')
return
import pygraphviz as pgv
g = pgv.AGraph(name=title, directed=True)
for key, label in nodes.items():
label = label.split('\n')
if len(label) > 1:
label[0] = ('<<FONT POINT-SIZE="%s">' % node_size
+ label[0] + '</FONT>')
for li in range(1, len(label)):
label[li] = ('<FONT POINT-SIZE="%s"><I>' % node_small_size
+ label[li] + '</I></FONT>')
label[-1] = label[-1] + '>'
label = '<BR/>'.join(label)
else:
label = label[0]
g.add_node(key, shape='plaintext', label=label)
# Create and customize nodes and edges
for edge in edges:
g.add_edge(*edge[:2])
e = g.get_edge(*edge[:2])
if len(edge) > 2:
e.attr['label'] = ('<<I>' +
'<BR ALIGN="LEFT"/>'.join(edge[2].split('\n')) +
'<BR ALIGN="LEFT"/></I>>')
e.attr['fontsize'] = edge_size
# Change colors
for these_nodes, color in zip((sensor_space, source_space),
(sensor_color, source_color)):
for node in these_nodes:
g.get_node(node).attr['fillcolor'] = color
g.get_node(node).attr['style'] = 'filled'
# Create subgraphs
for si, subgraph in enumerate(subgraphs):
g.add_subgraph(subgraph[0], 'cluster%s' % si,
label=subgraph[1], color='black')
# Format (sub)graphs
for gr in g.subgraphs() + [g]:
for x in [gr.node_attr, gr.edge_attr]:
x['fontname'] = font_face
g.node_attr['shape'] = 'box'
# A couple of special ones
for ni, node in enumerate(('fwd', 'inv', 'trans')):
node = g.get_node(node)
node.attr['gradientangle'] = 270
colors = (source_color, sensor_color)
colors = colors if ni == 0 else colors[::-1]
node.attr['fillcolor'] = ':'.join(colors)
node.attr['style'] = 'filled'
del node
g.get_node('legend').attr.update(shape='plaintext', margin=0, rank='sink')
# put legend in same rank/level as inverse
leg = g.add_subgraph(['legend', 'inv'], name='legendy')
leg.graph_attr['rank'] = 'same'
g.layout('dot')
g.draw(out_fname, format='svg')
return g
# This is useful for testing/iterating to see what the result looks like
if __name__ == '__main__':
from mne.io.constants import Bunch
out_dir = op.abspath(op.join(op.dirname(__file__), '..', '_build', 'html'))
app = Bunch(builder=Bunch(outdir=out_dir,
config=Bunch(make_flow_diagram=True)))
g = generate_flow_diagram(app)
|
from vine import promise, transform
from kombu.asynchronous.aws.ext import AWSRequest, get_response
from kombu.asynchronous.http import Headers, Request, get_client
import io
try: # pragma: no cover
from email import message_from_bytes
from email.mime.message import MIMEMessage
# py3
def message_from_headers(hdr): # noqa
bs = "\r\n".join("{}: {}".format(*h) for h in hdr)
return message_from_bytes(bs.encode())
except ImportError: # pragma: no cover
from mimetools import Message as MIMEMessage # noqa
# py2
def message_from_headers(hdr): # noqa
return io.BytesIO(b'\r\n'.join(
b'{}: {}'.format(*h) for h in hdr
))
__all__ = (
'AsyncHTTPSConnection', 'AsyncConnection',
)
class AsyncHTTPResponse:
"""Async HTTP Response."""
def __init__(self, response):
self.response = response
self._msg = None
self.version = 10
def read(self, *args, **kwargs):
return self.response.body
def getheader(self, name, default=None):
return self.response.headers.get(name, default)
def getheaders(self):
return list(self.response.headers.items())
@property
def msg(self):
if self._msg is None:
self._msg = MIMEMessage(message_from_headers(self.getheaders()))
return self._msg
@property
def status(self):
return self.response.code
@property
def reason(self):
if self.response.error:
return self.response.error.message
return ''
def __repr__(self):
return repr(self.response)
class AsyncHTTPSConnection:
"""Async HTTP Connection."""
Request = Request
Response = AsyncHTTPResponse
method = 'GET'
path = '/'
body = None
default_ports = {'http': 80, 'https': 443}
def __init__(self, strict=None, timeout=20.0, http_client=None):
self.headers = []
self.timeout = timeout
self.strict = strict
self.http_client = http_client or get_client()
def request(self, method, path, body=None, headers=None):
self.path = path
self.method = method
if body is not None:
try:
read = body.read
except AttributeError:
self.body = body
else:
self.body = read()
if headers is not None:
self.headers.extend(list(headers.items()))
def getrequest(self):
headers = Headers(self.headers)
return self.Request(self.path, method=self.method, headers=headers,
body=self.body, connect_timeout=self.timeout,
request_timeout=self.timeout, validate_cert=False)
def getresponse(self, callback=None):
request = self.getrequest()
request.then(transform(self.Response, callback))
return self.http_client.add_request(request)
def set_debuglevel(self, level):
pass
def connect(self):
pass
def close(self):
pass
def putrequest(self, method, path):
self.method = method
self.path = path
def putheader(self, header, value):
self.headers.append((header, value))
def endheaders(self):
pass
def send(self, data):
if self.body:
self.body += data
else:
self.body = data
def __repr__(self):
return f'<AsyncHTTPConnection: {self.getrequest()!r}>'
class AsyncConnection:
"""Async AWS Connection."""
def __init__(self, sqs_connection, http_client=None, **kwargs): # noqa
self.sqs_connection = sqs_connection
self._httpclient = http_client or get_client()
def get_http_connection(self):
return AsyncHTTPSConnection(http_client=self._httpclient)
def _mexe(self, request, sender=None, callback=None):
callback = callback or promise()
conn = self.get_http_connection()
if callable(sender):
sender(conn, request.method, request.path, request.body,
request.headers, callback)
else:
conn.request(request.method, request.url,
request.body, request.headers)
conn.getresponse(callback=callback)
return callback
class AsyncAWSQueryConnection(AsyncConnection):
"""Async AWS Query Connection."""
STATUS_CODE_OK = 200
STATUS_CODE_REQUEST_TIMEOUT = 408
STATUS_CODE_NETWORK_CONNECT_TIMEOUT_ERROR = 599
STATUS_CODE_INTERNAL_ERROR = 500
STATUS_CODE_BAD_GATEWAY = 502
STATUS_CODE_SERVICE_UNAVAILABLE_ERROR = 503
STATUS_CODE_GATEWAY_TIMEOUT = 504
STATUS_CODES_SERVER_ERRORS = (
STATUS_CODE_INTERNAL_ERROR,
STATUS_CODE_BAD_GATEWAY,
STATUS_CODE_SERVICE_UNAVAILABLE_ERROR
)
STATUS_CODES_TIMEOUT = (
STATUS_CODE_REQUEST_TIMEOUT,
STATUS_CODE_NETWORK_CONNECT_TIMEOUT_ERROR,
STATUS_CODE_GATEWAY_TIMEOUT
)
def __init__(self, sqs_connection, http_client=None,
http_client_params=None, **kwargs):
if not http_client_params:
http_client_params = {}
AsyncConnection.__init__(self, sqs_connection, http_client,
**http_client_params)
def make_request(self, operation, params_, path, verb, callback=None): # noqa
params = params_.copy()
if operation:
params['Action'] = operation
signer = self.sqs_connection._request_signer # noqa
# defaults for non-get
signing_type = 'standard'
param_payload = {'data': params}
if verb.lower() == 'get':
# query-based opts
signing_type = 'presignurl'
param_payload = {'params': params}
request = AWSRequest(method=verb, url=path, **param_payload)
signer.sign(operation, request, signing_type=signing_type)
prepared_request = request.prepare()
return self._mexe(prepared_request, callback=callback)
def get_list(self, operation, params, markers, path='/', parent=None, verb='POST', callback=None): # noqa
return self.make_request(
operation, params, path, verb,
callback=transform(
self._on_list_ready, callback, parent or self, markers,
operation
),
)
def get_object(self, operation, params, path='/', parent=None, verb='GET', callback=None): # noqa
return self.make_request(
operation, params, path, verb,
callback=transform(
self._on_obj_ready, callback, parent or self, operation
),
)
def get_status(self, operation, params, path='/', parent=None, verb='GET', callback=None): # noqa
return self.make_request(
operation, params, path, verb,
callback=transform(
self._on_status_ready, callback, parent or self, operation
),
)
def _on_list_ready(self, parent, markers, operation, response): # noqa
service_model = self.sqs_connection.meta.service_model
if response.status == self.STATUS_CODE_OK:
_, parsed = get_response(
service_model.operation_model(operation), response.response
)
return parsed
elif (
response.status in self.STATUS_CODES_TIMEOUT or
response.status in self.STATUS_CODES_SERVER_ERRORS
):
# When the server returns a timeout or 50X server error,
# the response is interpreted as an empty list.
# This prevents hanging the Celery worker.
return []
else:
raise self._for_status(response, response.read())
def _on_obj_ready(self, parent, operation, response): # noqa
service_model = self.sqs_connection.meta.service_model
if response.status == self.STATUS_CODE_OK:
_, parsed = get_response(
service_model.operation_model(operation), response.response
)
return parsed
else:
raise self._for_status(response, response.read())
def _on_status_ready(self, parent, operation, response): # noqa
service_model = self.sqs_connection.meta.service_model
if response.status == self.STATUS_CODE_OK:
httpres, _ = get_response(
service_model.operation_model(operation), response.response
)
return httpres.code
else:
raise self._for_status(response, response.read())
def _for_status(self, response, body):
context = 'Empty body' if not body else 'HTTP Error'
return Exception("Request {} HTTP {} {} ({})".format(
context, response.status, response.reason, body
))
|
import numpy as np
import unittest
from chainer import testing
from chainer.testing import attr
from chainercv.datasets import ade20k_semantic_segmentation_label_names
from chainercv.datasets import ADE20KSemanticSegmentationDataset
from chainercv.datasets import ADE20KTestImageDataset
from chainercv.utils import assert_is_semantic_segmentation_dataset
from chainercv.utils.testing.assertions.assert_is_image import assert_is_image
@testing.parameterize(
{'split': 'train'},
{'split': 'val'},
)
class TestADE20KSemanticSegmentationDataset(unittest.TestCase):
def setUp(self):
self.dataset = ADE20KSemanticSegmentationDataset(split=self.split)
@attr.slow
def test_ade20k_dataset(self):
assert_is_semantic_segmentation_dataset(
self.dataset, len(ade20k_semantic_segmentation_label_names),
n_example=10)
class TestADE20KTestImageDataset(unittest.TestCase):
def setUp(self):
self.dataset = ADE20KTestImageDataset()
@attr.slow
def test_ade20k_dataset(self):
indices = np.random.permutation(np.arange(len(self.dataset)))
for i in indices[:10]:
img = self.dataset[i]
assert_is_image(img, color=True)
testing.run_module(__name__, __file__)
|
from django.test import TestCase
from weblate.auth.models import User
from weblate.utils.markdown import get_mention_users, render_markdown
class MarkdownTestCase(TestCase):
def test_link(self):
self.assertEqual(
'<p><a rel="ugc" href="https://weblate.org/">link</a></p>\n',
render_markdown("[link](https://weblate.org/)"),
)
def test_js(self):
self.assertEqual(
"<p>link</p>\n", render_markdown('<a href="javascript:alert()">link</a>')
)
def test_intra_emphasis(self):
self.assertEqual(
"<p>foo<strong>bar</strong>baz</p>\n", render_markdown("foo**bar**baz")
)
class MarkdownMentionTestCase(TestCase):
def test_mention(self):
User.objects.create(username="testuser", full_name="Full Name")
self.assertEqual(
'<p><strong><a rel="ugc" href="/user/testuser/" '
'title="Full Name">@testuser</a></strong> really?</p>\n',
render_markdown("@testuser really?"),
)
def test_get_mentions(self):
user = User.objects.create(username="testuser", full_name="Full Name")
self.assertEqual(
{user.pk},
set(
get_mention_users("@testuser, @invalid, @testuser").values_list(
"pk", flat=True
)
),
)
def test_get_mentions_case_insentivite(self):
user = User.objects.create(username="testuser", full_name="Full Name")
self.assertEqual(
{user.pk},
set(
get_mention_users("@testUser, @invalid, @Testuser").values_list(
"pk", flat=True
)
),
)
|
import sys
from contextlib import contextmanager
try:
import urlparse
except ImportError:
# Python 3
import urllib.parse as urlparse
@contextmanager
def webserver(app, port=0, host=None):
"""Context manager entry point for the 'with' statement.
Pass 0 as port number to dynamically allocate a free port.
Usage:
with webserver(wsgi_app_function, 8080) as host_url:
do_ws_calls(host_url)
"""
server = build_web_server(app, port, host or '127.0.0.1')
host, port = server.socket.getsockname()
import threading
thread = threading.Thread(target=server.serve_forever,
kwargs={'poll_interval': 0.5})
thread.setDaemon(True)
thread.start()
try:
yield 'http://%s:%s/' % (host, port) # yield control to 'with' body
finally:
server.shutdown()
server.server_close()
thread.join(timeout=1)
try:
from SocketServer import ThreadingMixIn
except ImportError:
# Python 3
from socketserver import ThreadingMixIn
import wsgiref.simple_server as wsgiserver
class WebServer(wsgiserver.WSGIServer, ThreadingMixIn):
"""A web server that starts a new thread for each request.
"""
class _RequestHandler(wsgiserver.WSGIRequestHandler):
def get_stderr(self):
# don't write to stderr
return sys.stdout
def log_message(self, format, *args):
# message = "wsmock(%s) %s" % (self.address_string(), format % args)
pass # don't log messages
def build_web_server(app, port, host=None):
server = wsgiserver.make_server(
host or '', port, app,
server_class=WebServer,
handler_class=_RequestHandler)
return server
class HTTPRequestCollector(object):
def __init__(self, response_data, response_code=200, headers=()):
self.requests = []
self.response_code = response_code
self.response_data = response_data
self.headers = list(headers or ())
def __call__(self, environ, start_response):
self.requests.append((
environ.get('PATH_INFO'),
urlparse.parse_qsl(environ.get('QUERY_STRING'))))
start_response('%s OK' % self.response_code, self.headers)
return [self.response_data]
|
import html
import functools
import io
import os
import re
import sys
import uuid
import email.policy
import email.generator
import email.encoders
import email.mime.multipart
import email.message
import quopri
from typing import MutableMapping, Set, Tuple
import attr
from PyQt5.QtCore import QUrl
from qutebrowser.browser import downloads
from qutebrowser.browser.webkit import webkitelem
from qutebrowser.utils import log, objreg, message, usertypes, utils, urlutils
from qutebrowser.extensions import interceptors
@attr.s
class _File:
content = attr.ib()
content_type = attr.ib()
content_location = attr.ib()
transfer_encoding = attr.ib()
_CSS_URL_PATTERNS = [re.compile(x) for x in [
r"@import\s+'(?P<url>[^']+)'",
r'@import\s+"(?P<url>[^"]+)"',
r'''url\((?P<url>[^'"][^)]*)\)''',
r'url\("(?P<url>[^"]+)"\)',
r"url\('(?P<url>[^']+)'\)",
]]
def _get_css_imports_regex(data):
"""Return all assets that are referenced in the given CSS document.
The returned URLs are relative to the stylesheet's URL.
Args:
data: The content of the stylesheet to scan as string.
"""
urls = []
for pattern in _CSS_URL_PATTERNS:
for match in pattern.finditer(data):
url = match.group("url")
if url:
urls.append(url)
return urls
def _get_css_imports_cssutils(data, inline=False):
"""Return all assets that are referenced in the given CSS document.
The returned URLs are relative to the stylesheet's URL.
Args:
data: The content of the stylesheet to scan as string.
inline: True if the argument is an inline HTML style attribute.
"""
try:
import cssutils
except ImportError:
return None
# We don't care about invalid CSS data, this will only litter the log
# output with CSS errors
parser = cssutils.CSSParser(loglevel=100,
fetcher=lambda url: (None, ""), validate=False)
if not inline:
sheet = parser.parseString(data)
return list(cssutils.getUrls(sheet))
else:
urls = []
declaration = parser.parseStyle(data)
# prop = background, color, margin, ...
for prop in declaration:
# value = red, 10px, url(foobar), ...
for value in prop.propertyValue:
if isinstance(value, cssutils.css.URIValue):
if value.uri:
urls.append(value.uri)
return urls
def _get_css_imports(data, inline=False):
"""Return all assets that are referenced in the given CSS document.
The returned URLs are relative to the stylesheet's URL.
Args:
data: The content of the stylesheet to scan as string.
inline: True if the argument is an inline HTML style attribute.
"""
imports = _get_css_imports_cssutils(data, inline)
if imports is None:
imports = _get_css_imports_regex(data)
return imports
def _check_rel(element):
"""Return true if the element's rel attribute fits our criteria.
rel has to contain 'stylesheet' or 'icon'. Also returns True if the rel
attribute is unset.
Args:
element: The WebElementWrapper which should be checked.
"""
if 'rel' not in element:
return True
must_have = {'stylesheet', 'icon'}
rels = [rel.lower() for rel in element['rel'].split(' ')]
return any(rel in rels for rel in must_have)
def _encode_quopri_mhtml(msg):
"""Encode the message's payload in quoted-printable.
Substitute for quopri's default 'encode_quopri' method, which needlessly
encodes all spaces and tabs, instead of only those at the end on the
line.
Args:
msg: Email message to quote.
"""
orig = msg.get_payload(decode=True)
encdata = quopri.encodestring(orig, quotetabs=False)
msg.set_payload(encdata)
msg['Content-Transfer-Encoding'] = 'quoted-printable'
MHTMLPolicy = email.policy.default.clone(linesep='\r\n', max_line_length=0)
# Encode the file using base64 encoding.
E_BASE64 = email.encoders.encode_base64
# Encode the file using MIME quoted-printable encoding.
E_QUOPRI = _encode_quopri_mhtml
class MHTMLWriter:
"""A class for outputting multiple files to an MHTML document.
Attributes:
root_content: The root content as bytes.
content_location: The url of the page as str.
content_type: The MIME-type of the root content as str.
_files: Mapping of location->_File object.
"""
def __init__(self, root_content, content_location, content_type):
self.root_content = root_content
self.content_location = content_location
self.content_type = content_type
self._files: MutableMapping[QUrl, _File] = {}
def add_file(self, location, content, content_type=None,
transfer_encoding=E_QUOPRI):
"""Add a file to the given MHTML collection.
Args:
location: The original location (URL) of the file.
content: The binary content of the file.
content_type: The MIME-type of the content (if available)
transfer_encoding: The transfer encoding to use for this file.
"""
self._files[location] = _File(
content=content, content_type=content_type,
content_location=location, transfer_encoding=transfer_encoding,
)
def write_to(self, fp):
"""Output the MHTML file to the given file-like object.
Args:
fp: The file-object, opened in "wb" mode.
"""
msg = email.mime.multipart.MIMEMultipart(
'related', '---=_qute-{}'.format(uuid.uuid4()))
root = self._create_root_file()
msg.attach(root)
for _, file_data in sorted(self._files.items()):
msg.attach(self._create_file(file_data))
gen = email.generator.BytesGenerator(fp, policy=MHTMLPolicy)
gen.flatten(msg)
def _create_root_file(self):
"""Return the root document as MIMEMultipart."""
root_file = _File(
content=self.root_content, content_type=self.content_type,
content_location=self.content_location, transfer_encoding=E_QUOPRI,
)
return self._create_file(root_file)
def _create_file(self, f):
"""Return the single given file as email.message.Message."""
msg = email.message.Message()
msg['MIME-Version'] = '1.0'
msg['Content-Location'] = f.content_location
if f.content_type:
msg.set_type(f.content_type)
msg.set_payload(f.content)
f.transfer_encoding(msg)
return msg
_PendingDownloadType = Set[Tuple[QUrl, downloads.AbstractDownloadItem]]
class _Downloader:
"""A class to download whole websites.
Attributes:
tab: The AbstractTab which contains the website that will be saved.
target: DownloadTarget where the file should be downloaded to.
writer: The MHTMLWriter object which is used to save the page.
loaded_urls: A set of QUrls of finished asset downloads.
pending_downloads: A set of unfinished (url, DownloadItem) tuples.
_finished_file: A flag indicating if the file has already been
written.
_used: A flag indicating if the downloader has already been used.
"""
def __init__(self, tab, target):
self.tab = tab
self.target = target
self.writer = None
self.loaded_urls = {tab.url()}
self.pending_downloads: _PendingDownloadType = set()
self._finished_file = False
self._used = False
def run(self):
"""Download and save the page.
The object must not be reused, you should create a new one if
you want to download another page.
"""
if self._used:
raise ValueError("Downloader already used")
self._used = True
web_url = self.tab.url()
# FIXME:qtwebengine have a proper API for this
page = self.tab._widget.page() # pylint: disable=protected-access
web_frame = page.mainFrame()
self.writer = MHTMLWriter(
web_frame.toHtml().encode('utf-8'),
content_location=urlutils.encoded_url(web_url),
# I've found no way of getting the content type of a QWebView, but
# since we're using .toHtml, it's probably safe to say that the
# content-type is HTML
content_type='text/html; charset="UTF-8"',
)
# Currently only downloading <link> (stylesheets), <script>
# (javascript) and <img> (image) elements.
elements = web_frame.findAllElements('link, script, img')
for element in elements:
element = webkitelem.WebKitElement(element, tab=self.tab)
# Websites are free to set whatever rel=... attribute they want.
# We just care about stylesheets and icons.
if not _check_rel(element):
continue
if 'src' in element:
element_url = element['src']
elif 'href' in element:
element_url = element['href']
else:
# Might be a local <script> tag or something else
continue
absolute_url = web_url.resolved(QUrl(element_url))
self._fetch_url(absolute_url)
styles = web_frame.findAllElements('style')
for style in styles:
style = webkitelem.WebKitElement(style, tab=self.tab)
# The Mozilla Developer Network says:
# > type: This attribute defines the styling language as a MIME
# > type (charset should not be specified). This attribute is
# > optional and default to text/css if it's missing.
# https://developer.mozilla.org/en/docs/Web/HTML/Element/style
if 'type' in style and style['type'] != 'text/css':
continue
for element_url in _get_css_imports(str(style)):
self._fetch_url(web_url.resolved(QUrl(element_url)))
# Search for references in inline styles
for element in web_frame.findAllElements('[style]'):
element = webkitelem.WebKitElement(element, tab=self.tab)
style = element['style']
for element_url in _get_css_imports(style, inline=True):
self._fetch_url(web_url.resolved(QUrl(element_url)))
# Shortcut if no assets need to be downloaded, otherwise the file would
# never be saved. Also might happen if the downloads are fast enough to
# complete before connecting their finished signal.
self._collect_zombies()
if not self.pending_downloads and not self._finished_file:
self._finish_file()
def _fetch_url(self, url):
"""Download the given url and add the file to the collection.
Args:
url: The file to download as QUrl.
"""
assert self.writer is not None
if url.scheme() not in ['http', 'https']:
return
# Prevent loading an asset twice
if url in self.loaded_urls:
return
self.loaded_urls.add(url)
log.downloads.debug("loading asset at {}".format(url))
# Using the download manager to download host-blocked urls might crash
# qute, see the comments/discussion on
# https://github.com/qutebrowser/qutebrowser/pull/962#discussion_r40256987
# and https://github.com/qutebrowser/qutebrowser/issues/1053
request = interceptors.Request(first_party_url=None, request_url=url)
interceptors.run(request)
if request.is_blocked:
log.downloads.debug("Skipping {}, host-blocked".format(url))
# We still need an empty file in the output, QWebView can be pretty
# picky about displaying a file correctly when not all assets are
# at least referenced in the mhtml file.
self.writer.add_file(urlutils.encoded_url(url), b'')
return
download_manager = objreg.get('qtnetwork-download-manager')
target = downloads.FileObjDownloadTarget(_NoCloseBytesIO())
item = download_manager.get(url, target=target,
auto_remove=True)
self.pending_downloads.add((url, item))
item.finished.connect(functools.partial(self._finished, url, item))
item.error.connect(functools.partial(self._error, url, item))
item.cancelled.connect(functools.partial(self._cancelled, url, item))
def _finished(self, url, item):
"""Callback when a single asset is downloaded.
Args:
url: The original url of the asset as QUrl.
item: The DownloadItem given by the DownloadManager
"""
assert self.writer is not None
self.pending_downloads.remove((url, item))
mime = item.raw_headers.get(b'Content-Type', b'')
# Note that this decoding always works and doesn't produce errors
# RFC 7230 (https://tools.ietf.org/html/rfc7230) states:
# Historically, HTTP has allowed field content with text in the
# ISO-8859-1 charset [ISO-8859-1], supporting other charsets only
# through use of [RFC2047] encoding. In practice, most HTTP header
# field values use only a subset of the US-ASCII charset [USASCII].
# Newly defined header fields SHOULD limit their field values to
# US-ASCII octets. A recipient SHOULD treat other octets in field
# content (obs-text) as opaque data.
mime = mime.decode('iso-8859-1')
if mime.lower() == 'text/css' or url.fileName().endswith('.css'):
# We can't always assume that CSS files are UTF-8, but CSS files
# shouldn't contain many non-ASCII characters anyway (in most
# cases). Using "ignore" lets us decode the file even if it's
# invalid UTF-8 data.
# The file written to the MHTML file won't be modified by this
# decoding, since there we're taking the original bytestream.
try:
css_string = item.fileobj.getvalue().decode('utf-8')
except UnicodeDecodeError:
log.downloads.warning("Invalid UTF-8 data in {}".format(url))
css_string = item.fileobj.getvalue().decode('utf-8', 'ignore')
import_urls = _get_css_imports(css_string)
for import_url in import_urls:
absolute_url = url.resolved(QUrl(import_url))
self._fetch_url(absolute_url)
encode = E_QUOPRI if mime.startswith('text/') else E_BASE64
# Our MHTML handler refuses non-ASCII headers. This will replace every
# non-ASCII char with '?'. This is probably okay, as official Content-
# Type headers contain ASCII only anyway. Anything else is madness.
mime = utils.force_encoding(mime, 'ascii')
self.writer.add_file(urlutils.encoded_url(url),
item.fileobj.getvalue(), mime, encode)
item.fileobj.actual_close()
if self.pending_downloads:
return
self._finish_file()
def _error(self, url, item, *_args):
"""Callback when a download error occurred.
Args:
url: The original url of the asset as QUrl.
item: The DownloadItem given by the DownloadManager.
"""
assert self.writer is not None
try:
self.pending_downloads.remove((url, item))
except KeyError:
# This might happen if .collect_zombies() calls .finished() and the
# error handler will be called after .collect_zombies
log.downloads.debug("Oops! Download already gone: {}".format(item))
return
item.fileobj.actual_close()
# Add a stub file, see comment in .fetch_url() for more information
self.writer.add_file(urlutils.encoded_url(url), b'')
if self.pending_downloads:
return
self._finish_file()
def _cancelled(self, url, item):
"""Callback when a download is cancelled by the user.
Args:
url: The original url of the asset as QUrl.
item: The DownloadItem given by the DownloadManager.
"""
# This callback is called before _finished, so there's no need to
# remove the item or close the fileobject.
log.downloads.debug("MHTML download cancelled by user: {}".format(url))
# Write an empty file instead
item.fileobj.seek(0)
item.fileobj.truncate()
def _finish_file(self):
"""Save the file to the filename given in __init__."""
assert self.writer is not None
if self._finished_file:
log.downloads.debug("finish_file called twice, ignored!")
return
self._finished_file = True
log.downloads.debug("All assets downloaded, ready to finish off!")
if isinstance(self.target, downloads.FileDownloadTarget):
fobj = open(self.target.filename, 'wb')
elif isinstance(self.target, downloads.FileObjDownloadTarget):
fobj = self.target.fileobj
elif isinstance(self.target, downloads.OpenFileDownloadTarget):
try:
fobj = downloads.temp_download_manager.get_tmpfile(
self.tab.title() + '.mhtml')
except OSError as exc:
msg = "Download error: {}".format(exc)
message.error(msg)
return
else:
raise ValueError("Invalid DownloadTarget given: {!r}"
.format(self.target))
try:
with fobj:
self.writer.write_to(fobj)
except OSError as error:
message.error("Could not save file: {}".format(error))
return
log.downloads.debug("File successfully written.")
message.info("Page saved as {}".format(self.target))
if isinstance(self.target, downloads.OpenFileDownloadTarget):
utils.open_file(fobj.name, self.target.cmdline)
def _collect_zombies(self):
"""Collect done downloads and add their data to the MHTML file.
This is needed if a download finishes before attaching its
finished signal.
"""
items = {(url, item) for url, item in self.pending_downloads
if item.done}
log.downloads.debug("Zombie downloads: {}".format(items))
for url, item in items:
self._finished(url, item)
class _NoCloseBytesIO(io.BytesIO):
"""BytesIO that can't be .closed().
This is needed to prevent the DownloadManager from closing the stream, thus
discarding the data.
"""
def close(self):
"""Do nothing."""
def actual_close(self):
"""Close the stream."""
super().close()
def _start_download(target, tab):
"""Start downloading the current page and all assets to an MHTML file.
This will overwrite dest if it already exists.
Args:
target: The DownloadTarget where the resulting file should be saved.
tab: Specify the tab whose page should be loaded.
"""
loader = _Downloader(tab, target)
loader.run()
def start_download_checked(target, tab):
"""First check if dest is already a file, then start the download.
Args:
target: The DownloadTarget where the resulting file should be saved.
tab: Specify the tab whose page should be loaded.
"""
if not isinstance(target, downloads.FileDownloadTarget):
_start_download(target, tab)
return
# The default name is 'page title.mhtml'
title = tab.title()
default_name = utils.sanitize_filename(title + '.mhtml', shorten=True)
# Remove characters which cannot be expressed in the file system encoding
encoding = sys.getfilesystemencoding()
default_name = utils.force_encoding(default_name, encoding)
dest = utils.force_encoding(target.filename, encoding)
dest = os.path.expanduser(dest)
# See if we already have an absolute path
path = downloads.create_full_filename(default_name, dest)
if path is None:
# We still only have a relative path, prepend download_dir and
# try again.
path = downloads.create_full_filename(
default_name, os.path.join(downloads.download_dir(), dest))
downloads.last_used_directory = os.path.dirname(path)
# Avoid downloading files if we can't save the output anyway...
# Yes, this is prone to race conditions, but we're checking again before
# saving the file anyway.
if not os.path.isdir(os.path.dirname(path)):
folder = os.path.dirname(path)
message.error("Directory {} does not exist.".format(folder))
return
target = downloads.FileDownloadTarget(path)
if not os.path.isfile(path):
_start_download(target, tab=tab)
return
q = usertypes.Question()
q.mode = usertypes.PromptMode.yesno
q.title = "Overwrite existing file?"
q.text = "<b>{}</b> already exists. Overwrite?".format(
html.escape(path))
q.completed.connect(q.deleteLater)
q.answered_yes.connect(functools.partial(
_start_download, target, tab=tab))
message.global_bridge.ask(q, blocking=False)
|
import copy
import json
from typing import Any
from typing import Dict
from typing import Tuple
from typing import Type
from typing import TypeVar
from typing import Union
from kazoo.client import KazooClient
from kazoo.exceptions import BadVersionError
from kazoo.exceptions import NodeExistsError
from kazoo.exceptions import NoNodeError
from kazoo.protocol.states import ZnodeStat
from paasta_tools.utils import _log
class MesosTaskParametersIsImmutableError(Exception):
pass
_SelfT = TypeVar("_SelfT", bound="MesosTaskParameters")
class MesosTaskParameters:
health: Any
mesos_task_state: str
is_draining: bool
is_healthy: bool
offer: Any
resources: Any
def __init__(
self,
health=None,
mesos_task_state=None,
is_draining=None,
is_healthy=None,
offer=None,
resources=None,
):
self.__dict__["health"] = health
self.__dict__["mesos_task_state"] = mesos_task_state
self.__dict__["is_draining"] = is_draining
self.__dict__["is_healthy"] = is_healthy
self.__dict__["offer"] = offer
self.__dict__["resources"] = resources
def __eq__(self, other):
return self.__dict__ == other.__dict__
def __repr__(self):
return "{}(\n {})".format(
type(self).__name__,
",\n ".join(["%s=%r" % kv for kv in self.__dict__.items()]),
)
def __setattr__(self, name, value):
raise MesosTaskParametersIsImmutableError()
def __delattr__(self, name):
raise MesosTaskParametersIsImmutableError()
def merge(self: _SelfT, **kwargs) -> "MesosTaskParameters":
"""Return a merged MesosTaskParameters object, where attributes in other take precedence over self."""
new_dict = copy.deepcopy(self.__dict__)
new_dict.update(kwargs)
return MesosTaskParameters(**new_dict)
@classmethod
def deserialize(cls: Type[_SelfT], serialized_params: Union[str, bytes]) -> _SelfT:
return cls(**json.loads(serialized_params))
def serialize(self):
return json.dumps(self.__dict__).encode("utf-8")
class TaskStore:
def __init__(self, service_name, instance_name, framework_id, system_paasta_config):
self.service_name = service_name
self.instance_name = instance_name
self.framework_id = framework_id
self.system_paasta_config = system_paasta_config
def get_task(self, task_id: str) -> MesosTaskParameters:
"""Get task data for task_id. If we don't know about task_id, return None"""
raise NotImplementedError()
def get_all_tasks(self) -> Dict[str, MesosTaskParameters]:
"""Returns a dictionary of task_id -> MesosTaskParameters for all known tasks."""
raise NotImplementedError()
def overwrite_task(self, task_id: str, params: MesosTaskParameters) -> None:
raise NotImplementedError()
def add_task_if_doesnt_exist(self, task_id: str, **kwargs) -> None:
"""Add a task if it does not already exist. If it already exists, do nothing."""
if self.get_task(task_id) is not None:
return
else:
self.overwrite_task(task_id, MesosTaskParameters(**kwargs))
def update_task(self, task_id: str, **kwargs) -> MesosTaskParameters:
existing_task = self.get_task(task_id)
if existing_task:
merged_params = existing_task.merge(**kwargs)
else:
merged_params = MesosTaskParameters(**kwargs)
self.overwrite_task(task_id, merged_params)
return merged_params
def garbage_collect_old_tasks(self, max_dead_task_age: float) -> None:
# TODO: call me.
# TODO: implement in base class.
raise NotImplementedError()
def close(self):
pass
class DictTaskStore(TaskStore):
def __init__(self, service_name, instance_name, framework_id, system_paasta_config):
self.tasks: Dict[str, MesosTaskParameters] = {}
super().__init__(
service_name, instance_name, framework_id, system_paasta_config
)
def get_task(self, task_id: str) -> MesosTaskParameters:
return self.tasks.get(task_id)
def get_all_tasks(self) -> Dict[str, MesosTaskParameters]:
"""Returns a dictionary of task_id -> MesosTaskParameters for all known tasks."""
return dict(self.tasks)
def overwrite_task(self, task_id: str, params: MesosTaskParameters) -> None:
# serialize/deserialize to make sure the returned values are the same format as ZKTaskStore.
self.tasks[task_id] = MesosTaskParameters.deserialize(params.serialize())
class ZKTaskStore(TaskStore):
def __init__(self, service_name, instance_name, framework_id, system_paasta_config):
super().__init__(
service_name, instance_name, framework_id, system_paasta_config
)
self.zk_hosts = system_paasta_config.get_zk_hosts()
# For some reason, I could not get the code suggested by this SO post to work to ensure_path on the chroot.
# https://stackoverflow.com/a/32785625/25327
# Plus, it just felt dirty to modify instance attributes of a running connection, especially given that
# KazooClient.set_hosts() doesn't allow you to change the chroot. Must be for a good reason.
chroot = f"task_store/{service_name}/{instance_name}/{framework_id}"
temp_zk_client = KazooClient(hosts=self.zk_hosts)
temp_zk_client.start()
temp_zk_client.ensure_path(chroot)
temp_zk_client.stop()
temp_zk_client.close()
self.zk_client = KazooClient(hosts=f"{self.zk_hosts}/{chroot}")
self.zk_client.start()
self.zk_client.ensure_path("/")
def close(self):
self.zk_client.stop()
self.zk_client.close()
def get_task(self, task_id: str) -> MesosTaskParameters:
params, stat = self._get_task(task_id)
return params
def _get_task(self, task_id: str) -> Tuple[MesosTaskParameters, ZnodeStat]:
"""Like get_task, but also returns the ZnodeStat that self.zk_client.get() returns """
try:
data, stat = self.zk_client.get("/%s" % task_id)
return MesosTaskParameters.deserialize(data), stat
except NoNodeError:
return None, None
except json.decoder.JSONDecodeError:
_log(
service=self.service_name,
instance=self.instance_name,
level="debug",
component="deploy",
line=f"Warning: found non-json-decodable value in zookeeper for task {task_id}: {data}",
)
return None, None
def get_all_tasks(self):
all_tasks = {}
for child_path in self.zk_client.get_children("/"):
task_id = self._task_id_from_zk_path(child_path)
params = self.get_task(task_id)
# sometimes there are bogus child ZK nodes. Ignore them.
if params is not None:
all_tasks[task_id] = params
return all_tasks
def update_task(self, task_id: str, **kwargs):
retry = True
while retry:
retry = False
existing_task, stat = self._get_task(task_id)
zk_path = self._zk_path_from_task_id(task_id)
if existing_task:
merged_params = existing_task.merge(**kwargs)
try:
self.zk_client.set(
zk_path, merged_params.serialize(), version=stat.version
)
except BadVersionError:
retry = True
else:
merged_params = MesosTaskParameters(**kwargs)
try:
self.zk_client.create(zk_path, merged_params.serialize())
except NodeExistsError:
retry = True
return merged_params
def overwrite_task(
self, task_id: str, params: MesosTaskParameters, version=-1
) -> None:
try:
self.zk_client.set(
self._zk_path_from_task_id(task_id), params.serialize(), version=version
)
except NoNodeError:
self.zk_client.create(
self._zk_path_from_task_id(task_id), params.serialize()
)
def _zk_path_from_task_id(self, task_id: str) -> str:
return "/%s" % task_id
def _task_id_from_zk_path(self, zk_path: str) -> str:
return zk_path.lstrip("/")
|
from __future__ import absolute_import
import numpy as np
from six.moves import zip
def add_params(param_list_left, param_list_right):
"""Add two lists of parameters one by one
:param param_list_left: list of numpy arrays
:param param_list_right: list of numpy arrays
:return: list of numpy arrays
"""
res = []
for x, y in zip(param_list_left, param_list_right):
res.append(x + y)
return res
def subtract_params(param_list_left, param_list_right):
"""Subtract two lists of parameters
:param param_list_left: list of numpy arrays
:param param_list_right: list of numpy arrays
:return: list of numpy arrays
"""
res = []
for x, y in zip(param_list_left, param_list_right):
res.append(x - y)
return res
def get_neutral(array_list):
"""Get list of zero-valued numpy arrays for
specified list of numpy arrays
:param array_list: list of numpy arrays
:return: list of zeros of same shape as input
"""
res = []
for x in array_list:
res.append(np.zeros_like(x))
return res
def divide_by(array_list, num_workers):
"""Divide a list of parameters by an integer num_workers.
:param array_list:
:param num_workers:
:return:
"""
for i, x in enumerate(array_list):
array_list[i] /= num_workers
return array_list
|
import logging
import pytest
from PyQt5.QtCore import QProcess
from qutebrowser.misc import guiprocess
from qutebrowser.utils import usertypes
from qutebrowser.browser import qutescheme
@pytest.fixture()
def proc(qtbot, caplog):
"""A fixture providing a GUIProcess and cleaning it up after the test."""
p = guiprocess.GUIProcess('testprocess')
yield p
if p._proc.state() == QProcess.Running:
with caplog.at_level(logging.ERROR):
with qtbot.waitSignal(p.finished, timeout=10000,
raising=False) as blocker:
p._proc.terminate()
if not blocker.signal_triggered:
p._proc.kill()
p._proc.waitForFinished()
@pytest.fixture()
def fake_proc(monkeypatch, stubs):
"""A fixture providing a GUIProcess with a mocked QProcess."""
p = guiprocess.GUIProcess('testprocess')
monkeypatch.setattr(p, '_proc', stubs.fake_qprocess())
return p
def test_start(proc, qtbot, message_mock, py_proc):
"""Test simply starting a process."""
with qtbot.waitSignal(proc.started, timeout=10000), \
qtbot.waitSignal(proc.finished, timeout=10000):
argv = py_proc("import sys; print('test'); sys.exit(0)")
proc.start(*argv)
expected = proc._spawn_format(exitinfo="Testprocess exited successfully.",
stdout="test", stderr="")
assert not message_mock.messages
assert qutescheme.spawn_output == expected
assert proc.exit_status() == QProcess.NormalExit
def test_start_verbose(proc, qtbot, message_mock, py_proc):
"""Test starting a process verbosely."""
proc.verbose = True
with qtbot.waitSignal(proc.started, timeout=10000), \
qtbot.waitSignal(proc.finished, timeout=10000):
argv = py_proc("import sys; print('test'); sys.exit(0)")
proc.start(*argv)
expected = proc._spawn_format(exitinfo="Testprocess exited successfully.",
stdout="test", stderr="")
msgs = message_mock.messages
assert msgs[0].level == usertypes.MessageLevel.info
assert msgs[1].level == usertypes.MessageLevel.info
assert msgs[0].text.startswith("Executing:")
assert msgs[1].text == "Testprocess exited successfully."
assert qutescheme.spawn_output == expected
@pytest.mark.parametrize('stdout', [True, False])
@pytest.mark.parametrize('stderr', [True, False])
def test_start_output_message(proc, qtbot, caplog, message_mock, py_proc,
stdout, stderr):
proc._output_messages = True
code = ['import sys']
if stdout:
code.append('print("stdout text")')
if stderr:
code.append(r'sys.stderr.write("stderr text\n")')
code.append("sys.exit(0)")
with caplog.at_level(logging.ERROR, 'message'):
with qtbot.waitSignal(proc.started, timeout=10000), \
qtbot.waitSignal(proc.finished, timeout=10000):
argv = py_proc(';'.join(code))
proc.start(*argv)
if stdout and stderr:
stdout_msg = message_mock.messages[0]
stderr_msg = message_mock.messages[1]
msg_count = 2
elif stdout:
stdout_msg = message_mock.messages[0]
stderr_msg = None
msg_count = 1
elif stderr:
stdout_msg = None
stderr_msg = message_mock.messages[0]
msg_count = 1
else:
stdout_msg = None
stderr_msg = None
msg_count = 0
assert len(message_mock.messages) == msg_count
if stdout_msg is not None:
assert stdout_msg.level == usertypes.MessageLevel.info
assert stdout_msg.text == 'stdout text'
if stderr_msg is not None:
assert stderr_msg.level == usertypes.MessageLevel.error
assert stderr_msg.text == 'stderr text'
def test_start_env(monkeypatch, qtbot, py_proc):
monkeypatch.setenv('QUTEBROWSER_TEST_1', '1')
env = {'QUTEBROWSER_TEST_2': '2'}
proc = guiprocess.GUIProcess('testprocess', additional_env=env)
argv = py_proc("""
import os
import json
import sys
env = dict(os.environ)
print(json.dumps(env))
sys.exit(0)
""")
with qtbot.waitSignal(proc.started, timeout=10000), \
qtbot.waitSignal(proc.finished, timeout=10000):
proc.start(*argv)
data = qutescheme.spawn_output
assert 'QUTEBROWSER_TEST_1' in data
assert 'QUTEBROWSER_TEST_2' in data
def test_start_detached(fake_proc):
"""Test starting a detached process."""
argv = ['foo', 'bar']
fake_proc._proc.startDetached.return_value = (True, 0)
fake_proc.start_detached(*argv)
fake_proc._proc.startDetached.assert_called_with(*list(argv) + [None])
def test_start_detached_error(fake_proc, message_mock, caplog):
"""Test starting a detached process with ok=False."""
argv = ['foo', 'bar']
fake_proc._proc.startDetached.return_value = (False, 0)
with caplog.at_level(logging.ERROR):
fake_proc.start_detached(*argv)
msg = message_mock.getmsg(usertypes.MessageLevel.error)
expected = "Error while spawning testprocess"
assert msg.text == expected
def test_double_start(qtbot, proc, py_proc):
"""Test starting a GUIProcess twice."""
with qtbot.waitSignal(proc.started, timeout=10000):
argv = py_proc("import time; time.sleep(10)")
proc.start(*argv)
with pytest.raises(ValueError):
proc.start('', [])
def test_double_start_finished(qtbot, proc, py_proc):
"""Test starting a GUIProcess twice (with the first call finished)."""
with qtbot.waitSignal(proc.started, timeout=10000), \
qtbot.waitSignal(proc.finished, timeout=10000):
argv = py_proc("import sys; sys.exit(0)")
proc.start(*argv)
with qtbot.waitSignal(proc.started, timeout=10000), \
qtbot.waitSignal(proc.finished, timeout=10000):
argv = py_proc("import sys; sys.exit(0)")
proc.start(*argv)
def test_cmd_args(fake_proc):
"""Test the cmd and args attributes."""
cmd = 'does_not_exist'
args = ['arg1', 'arg2']
fake_proc.start(cmd, args)
assert (fake_proc.cmd, fake_proc.args) == (cmd, args)
def test_start_logging(fake_proc, caplog):
"""Make sure that starting logs the executed commandline."""
cmd = 'does_not_exist'
args = ['arg', 'arg with spaces']
with caplog.at_level(logging.DEBUG):
fake_proc.start(cmd, args)
assert caplog.messages == [
"Starting process.",
"Executing: does_not_exist arg 'arg with spaces'"
]
def test_error(qtbot, proc, caplog, message_mock):
"""Test the process emitting an error."""
with caplog.at_level(logging.ERROR, 'message'):
with qtbot.waitSignal(proc.error, timeout=5000):
proc.start('this_does_not_exist_either', [])
msg = message_mock.getmsg(usertypes.MessageLevel.error)
assert msg.text.startswith("Error while spawning testprocess:")
def test_exit_unsuccessful(qtbot, proc, message_mock, py_proc, caplog):
with caplog.at_level(logging.ERROR):
with qtbot.waitSignal(proc.finished, timeout=10000):
proc.start(*py_proc('import sys; sys.exit(1)'))
msg = message_mock.getmsg(usertypes.MessageLevel.error)
expected = "Testprocess exited with status 1, see :messages for details."
assert msg.text == expected
@pytest.mark.parametrize('stream', ['stdout', 'stderr'])
def test_exit_unsuccessful_output(qtbot, proc, caplog, py_proc, stream):
"""When a process fails, its output should be logged."""
with caplog.at_level(logging.ERROR):
with qtbot.waitSignal(proc.finished, timeout=10000):
proc.start(*py_proc("""
import sys
print("test", file=sys.{})
sys.exit(1)
""".format(stream)))
assert caplog.messages[-1] == 'Process {}:\ntest'.format(stream)
@pytest.mark.parametrize('stream', ['stdout', 'stderr'])
def test_exit_successful_output(qtbot, proc, py_proc, stream):
"""When a process succeeds, no output should be logged.
The test doesn't actually check the log as it'd fail because of the error
logging.
"""
with qtbot.waitSignal(proc.finished, timeout=10000):
proc.start(*py_proc("""
import sys
print("test", file=sys.{})
sys.exit(0)
""".format(stream)))
def test_stdout_not_decodable(proc, qtbot, message_mock, py_proc):
"""Test handling malformed utf-8 in stdout."""
with qtbot.waitSignal(proc.started, timeout=10000), \
qtbot.waitSignal(proc.finished, timeout=10000):
argv = py_proc(r"""
import sys
# Using \x81 because it's invalid in UTF-8 and CP1252
sys.stdout.buffer.write(b"A\x81B")
sys.exit(0)
""")
proc.start(*argv)
expected = proc._spawn_format(exitinfo="Testprocess exited successfully.",
stdout="A\ufffdB", stderr="")
assert not message_mock.messages
assert qutescheme.spawn_output == expected
|
from homeassistant.auth.permissions.merge import merge_policies
def test_merging_permissions_true_rules_dict():
"""Test merging policy with two entities."""
policy1 = {
"something_else": True,
"entities": {"entity_ids": {"light.kitchen": True}},
}
policy2 = {"entities": {"entity_ids": True}}
assert merge_policies([policy1, policy2]) == {
"something_else": True,
"entities": {"entity_ids": True},
}
def test_merging_permissions_multiple_subcategories():
"""Test merging policy with two entities."""
policy1 = {"entities": None}
policy2 = {"entities": {"entity_ids": True}}
policy3 = {"entities": True}
assert merge_policies([policy1, policy2]) == policy2
assert merge_policies([policy1, policy3]) == policy3
assert merge_policies([policy2, policy3]) == policy3
|