text
stringlengths 213
32.3k
|
---|
import voluptuous as vol
from homeassistant.components import rpi_gpio
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import DEVICE_DEFAULT_NAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
from homeassistant.helpers.reload import setup_reload_service
from . import DOMAIN, PLATFORMS
CONF_PULL_MODE = "pull_mode"
CONF_PORTS = "ports"
CONF_INVERT_LOGIC = "invert_logic"
DEFAULT_INVERT_LOGIC = False
_SWITCHES_SCHEMA = vol.Schema({cv.positive_int: cv.string})
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_PORTS): _SWITCHES_SCHEMA,
vol.Optional(CONF_INVERT_LOGIC, default=DEFAULT_INVERT_LOGIC): cv.boolean,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Raspberry PI GPIO devices."""
setup_reload_service(hass, DOMAIN, PLATFORMS)
invert_logic = config.get(CONF_INVERT_LOGIC)
switches = []
ports = config.get(CONF_PORTS)
for port, name in ports.items():
switches.append(RPiGPIOSwitch(name, port, invert_logic))
add_entities(switches)
class RPiGPIOSwitch(ToggleEntity):
"""Representation of a Raspberry Pi GPIO."""
def __init__(self, name, port, invert_logic):
"""Initialize the pin."""
self._name = name or DEVICE_DEFAULT_NAME
self._port = port
self._invert_logic = invert_logic
self._state = False
rpi_gpio.setup_output(self._port)
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
@property
def name(self):
"""Return the name of the switch."""
return self._name
@property
def should_poll(self):
"""No polling needed."""
return False
@property
def is_on(self):
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs):
"""Turn the device on."""
rpi_gpio.write_output(self._port, 0 if self._invert_logic else 1)
self._state = True
self.schedule_update_ha_state()
def turn_off(self, **kwargs):
"""Turn the device off."""
rpi_gpio.write_output(self._port, 1 if self._invert_logic else 0)
self._state = False
self.schedule_update_ha_state()
|
import logging
import yelp_meteorite
from paasta_tools.cli.utils import get_instance_config
from paasta_tools.utils import get_services_for_cluster
from paasta_tools.utils import load_system_paasta_config
log = logging.getLogger(__name__)
def emit_metrics_for_type(instance_type):
cluster = load_system_paasta_config().get_cluster()
instances = get_services_for_cluster(cluster=cluster, instance_type=instance_type)
for service, instance in instances:
service_instance_config = get_instance_config(
service=service, instance=instance, cluster=cluster
)
dimensions = {
"paasta_service": service_instance_config.service,
"paasta_cluster": service_instance_config.cluster,
"paasta_instance": service_instance_config.instance,
"paasta_pool": service_instance_config.get_pool(),
}
log.info(f"Emitting paasta.service.* with dimensions {dimensions}")
gauge = yelp_meteorite.create_gauge("paasta.service.cpus", dimensions)
gauge.set(service_instance_config.get_cpus())
gauge = yelp_meteorite.create_gauge("paasta.service.mem", dimensions)
gauge.set(service_instance_config.get_mem())
gauge = yelp_meteorite.create_gauge("paasta.service.disk", dimensions)
gauge.set(service_instance_config.get_disk())
if hasattr(service_instance_config, "get_instances"):
if service_instance_config.get_max_instances() is None:
gauge = yelp_meteorite.create_gauge(
"paasta.service.instances", dimensions
)
gauge.set(service_instance_config.get_instances())
def main():
logging.basicConfig(level=logging.INFO)
for thing in ["marathon", "adhoc"]:
emit_metrics_for_type(thing)
if __name__ == "__main__":
main()
|
from datetime import timedelta
import time
from pythinkingcleaner import Discovery, ThinkingCleaner
import voluptuous as vol
from homeassistant import util
from homeassistant.components.switch import PLATFORM_SCHEMA
from homeassistant.const import CONF_HOST, STATE_OFF, STATE_ON
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import ToggleEntity
MIN_TIME_BETWEEN_SCANS = timedelta(seconds=10)
MIN_TIME_BETWEEN_FORCED_SCANS = timedelta(milliseconds=100)
MIN_TIME_TO_WAIT = timedelta(seconds=5)
MIN_TIME_TO_LOCK_UPDATE = 5
SWITCH_TYPES = {
"clean": ["Clean", None, None],
"dock": ["Dock", None, None],
"find": ["Find", None, None],
}
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({vol.Optional(CONF_HOST): cv.string})
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the ThinkingCleaner platform."""
host = config.get(CONF_HOST)
if host:
devices = [ThinkingCleaner(host, "unknown")]
else:
discovery = Discovery()
devices = discovery.discover()
@util.Throttle(MIN_TIME_BETWEEN_SCANS, MIN_TIME_BETWEEN_FORCED_SCANS)
def update_devices():
"""Update all devices."""
for device_object in devices:
device_object.update()
dev = []
for device in devices:
for type_name in SWITCH_TYPES:
dev.append(ThinkingCleanerSwitch(device, type_name, update_devices))
add_entities(dev)
class ThinkingCleanerSwitch(ToggleEntity):
"""ThinkingCleaner Switch (dock, clean, find me)."""
def __init__(self, tc_object, switch_type, update_devices):
"""Initialize the ThinkingCleaner."""
self.type = switch_type
self._update_devices = update_devices
self._tc_object = tc_object
self._state = self._tc_object.is_cleaning if switch_type == "clean" else False
self.lock = False
self.last_lock_time = None
self.graceful_state = False
def lock_update(self):
"""Lock the update since TC clean takes some time to update."""
if self.is_update_locked():
return
self.lock = True
self.last_lock_time = time.time()
def reset_update_lock(self):
"""Reset the update lock."""
self.lock = False
self.last_lock_time = None
def set_graceful_lock(self, state):
"""Set the graceful state."""
self.graceful_state = state
self.reset_update_lock()
self.lock_update()
def is_update_locked(self):
"""Check if the update method is locked."""
if self.last_lock_time is None:
return False
if time.time() - self.last_lock_time >= MIN_TIME_TO_LOCK_UPDATE:
self.last_lock_time = None
return False
return True
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._tc_object.name} {SWITCH_TYPES[self.type][0]}"
@property
def is_on(self):
"""Return true if device is on."""
if self.type == "clean":
return (
self.graceful_state
if self.is_update_locked()
else self._tc_object.is_cleaning
)
return False
def turn_on(self, **kwargs):
"""Turn the device on."""
if self.type == "clean":
self.set_graceful_lock(True)
self._tc_object.start_cleaning()
elif self.type == "dock":
self._tc_object.dock()
elif self.type == "find":
self._tc_object.find_me()
def turn_off(self, **kwargs):
"""Turn the device off."""
if self.type == "clean":
self.set_graceful_lock(False)
self._tc_object.stop_cleaning()
def update(self):
"""Update the switch state (Only for clean)."""
if self.type == "clean" and not self.is_update_locked():
self._tc_object.update()
self._state = STATE_ON if self._tc_object.is_cleaning else STATE_OFF
|
from django.test import TestCase
from django.utils.encoding import smart_str
from zinnia.admin.fields import MPTTModelChoiceIterator
from zinnia.admin.fields import MPTTModelMultipleChoiceField
from zinnia.models import Category
class MPTTModelChoiceIteratorTestCase(TestCase):
def test_choice(self):
category_1 = Category.objects.create(
title='Category 1', slug='cat-1')
category_2 = Category.objects.create(
title='Category 2', slug='cat-2',
parent=category_1)
class FakeField(object):
queryset = Category.objects.all()
def prepare_value(self, value):
return value.pk
def label_from_instance(self, obj):
return smart_str(obj)
field = FakeField()
iterator = MPTTModelChoiceIterator(field)
self.assertEqual(iterator.choice(category_1),
(category_1.pk, 'Category 1', (1, 1)))
self.assertEqual(iterator.choice(category_2),
(category_2.pk, 'Category 2', (1, 2)))
class MPTTModelMultipleChoiceFieldTestCase(TestCase):
def setUp(self):
self.category_1 = Category.objects.create(
title='Category 1', slug='cat-1')
self.category_2 = Category.objects.create(
title='Category 2', slug='cat-2',
parent=self.category_1)
def test_label_from_instance(self):
queryset = Category.objects.all()
field = MPTTModelMultipleChoiceField(
queryset=queryset)
self.assertEqual(field.label_from_instance(self.category_1),
'Category 1')
self.assertEqual(field.label_from_instance(self.category_2),
'|-- Category 2')
field = MPTTModelMultipleChoiceField(
level_indicator='-->', queryset=queryset)
self.assertEqual(field.label_from_instance(self.category_2),
'--> Category 2')
def test_get_choices(self):
queryset = Category.objects.all()
field = MPTTModelMultipleChoiceField(
queryset=queryset)
self.assertEqual(list(field.choices),
[(self.category_1.pk, 'Category 1', (1, 1)),
(self.category_2.pk, '|-- Category 2', (1, 2))])
field = MPTTModelMultipleChoiceField(
level_indicator='-->', queryset=queryset)
self.assertEqual(list(field.choices),
[(self.category_1.pk, 'Category 1', (1, 1)),
(self.category_2.pk, '--> Category 2', (1, 2))])
|
import pytest
from homeassistant.components import geo_location
from homeassistant.components.demo.geo_location import (
DEFAULT_UPDATE_INTERVAL,
NUMBER_OF_DEMO_DEVICES,
)
from homeassistant.const import (
ATTR_LATITUDE,
ATTR_LONGITUDE,
ATTR_UNIT_OF_MEASUREMENT,
LENGTH_KILOMETERS,
)
from homeassistant.setup import async_setup_component
import homeassistant.util.dt as dt_util
from tests.async_mock import patch
from tests.common import assert_setup_component, async_fire_time_changed
CONFIG = {geo_location.DOMAIN: [{"platform": "demo"}]}
@pytest.fixture(autouse=True)
def mock_legacy_time(legacy_patchable_time):
"""Make time patchable for all the tests."""
yield
async def test_setup_platform(hass):
"""Test setup of demo platform via configuration."""
utcnow = dt_util.utcnow()
# Patching 'utcnow' to gain more control over the timed update.
with patch("homeassistant.util.dt.utcnow", return_value=utcnow):
with assert_setup_component(1, geo_location.DOMAIN):
assert await async_setup_component(hass, geo_location.DOMAIN, CONFIG)
await hass.async_block_till_done()
# In this test, one zone and geolocation entities have been
# generated.
all_states = [
hass.states.get(entity_id)
for entity_id in hass.states.async_entity_ids(geo_location.DOMAIN)
]
assert len(all_states) == NUMBER_OF_DEMO_DEVICES
for state in all_states:
# Check a single device's attributes.
if state.domain != geo_location.DOMAIN:
# ignore home zone state
continue
assert abs(state.attributes[ATTR_LATITUDE] - hass.config.latitude) < 1.0
assert abs(state.attributes[ATTR_LONGITUDE] - hass.config.longitude) < 1.0
assert state.attributes[ATTR_UNIT_OF_MEASUREMENT] == LENGTH_KILOMETERS
# Update (replaces 1 device).
async_fire_time_changed(hass, utcnow + DEFAULT_UPDATE_INTERVAL)
await hass.async_block_till_done()
# Get all states again, ensure that the number of states is still
# the same, but the lists are different.
all_states_updated = [
hass.states.get(entity_id)
for entity_id in hass.states.async_entity_ids(geo_location.DOMAIN)
]
assert len(all_states_updated) == NUMBER_OF_DEMO_DEVICES
assert all_states != all_states_updated
|
import asyncio
from datetime import datetime, timedelta
import logging
from typing import Any, Callable, Dict, Hashable, Optional
from homeassistant.const import MAX_TIME_TRACKING_ERROR
from homeassistant.core import HomeAssistant, callback
import homeassistant.util.dt as dt_util
_LOGGER = logging.getLogger(__name__)
class KeyedRateLimit:
"""Class to track rate limits."""
def __init__(
self,
hass: HomeAssistant,
):
"""Initialize ratelimit tracker."""
self.hass = hass
self._last_triggered: Dict[Hashable, datetime] = {}
self._rate_limit_timers: Dict[Hashable, asyncio.TimerHandle] = {}
@callback
def async_has_timer(self, key: Hashable) -> bool:
"""Check if a rate limit timer is running."""
if not self._rate_limit_timers:
return False
return key in self._rate_limit_timers
@callback
def async_triggered(self, key: Hashable, now: Optional[datetime] = None) -> None:
"""Call when the action we are tracking was triggered."""
self.async_cancel_timer(key)
self._last_triggered[key] = now or dt_util.utcnow()
@callback
def async_cancel_timer(self, key: Hashable) -> None:
"""Cancel a rate limit time that will call the action."""
if not self._rate_limit_timers or not self.async_has_timer(key):
return
self._rate_limit_timers.pop(key).cancel()
@callback
def async_remove(self) -> None:
"""Remove all timers."""
for timer in self._rate_limit_timers.values():
timer.cancel()
self._rate_limit_timers.clear()
@callback
def async_schedule_action(
self,
key: Hashable,
rate_limit: Optional[timedelta],
now: datetime,
action: Callable,
*args: Any,
) -> Optional[datetime]:
"""Check rate limits and schedule an action if we hit the limit.
If the rate limit is hit:
Schedules the action for when the rate limit expires
if there are no pending timers. The action must
be called in async.
Returns the time the rate limit will expire
If the rate limit is not hit:
Return None
"""
if rate_limit is None:
return None
last_triggered = self._last_triggered.get(key)
if not last_triggered:
return None
next_call_time = last_triggered + rate_limit
if next_call_time <= now:
self.async_cancel_timer(key)
return None
_LOGGER.debug(
"Reached rate limit of %s for %s and deferred action until %s",
rate_limit,
key,
next_call_time,
)
if key not in self._rate_limit_timers:
self._rate_limit_timers[key] = self.hass.loop.call_later(
(next_call_time - now).total_seconds() + MAX_TIME_TRACKING_ERROR,
action,
*args,
)
return next_call_time
|
from __future__ import unicode_literals
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import shop.models.fields
from shop.models.notification import Notify
def forwards(apps, schema_editor):
if schema_editor.connection.alias != 'default':
return
Notification = apps.get_model('shop', 'Notification')
for row in Notification.objects.all():
if row.mail_to is None:
row.notify = Notify.NOBODY
if row.mail_to == 0:
row.mail_to = None
row.notify = Notify.CUSTOMER
row.save(update_fields=['mail_to', 'notify'])
def backwards(apps, schema_editor):
if schema_editor.connection.alias != 'default':
return
Notification = apps.get_model('shop', 'Notification')
for row in Notification.objects.all():
if row.notify is Notify.NOBODY:
row.mail_to = None
if row.notify is Notify.CUSTOMER:
row.mail_to = 0
row.save(update_fields=['mail_to'])
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('shop', '0006_rename_to_catalog'),
]
operations = [
migrations.AddField(
model_name='notification',
name='notify',
field=shop.models.fields.ChoiceEnumField(verbose_name='Whom to notify', default=Notify.NOBODY),
),
migrations.RunPython(forwards, reverse_code=backwards),
migrations.RenameField(
model_name='notification',
old_name='mail_to',
new_name='recipient',
),
migrations.AlterField(
model_name='notification',
name='recipient',
field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, related_name='notifications', to=settings.AUTH_USER_MODEL, verbose_name='Recipient'),
),
migrations.AlterModelOptions(
name='notification',
options={'ordering': ['transition_target', 'recipient_id'], 'verbose_name': 'Notification',
'verbose_name_plural': 'Notifications'},
),
]
|
import unittest
from trashcli.trash import TrashDirs
from mock import Mock
from mock import MagicMock
from trashcli.trash import TopTrashDirRules
class TestTrashDirs_listing(unittest.TestCase):
def test_the_method_2_is_always_in(self):
self.uid = 123
self.volumes = ['/usb']
assert '/usb/.Trash-123' in self.trashdirs()
def test_the_method_1_is_in_if_it_is_a_sticky_dir(self):
self.uid = 123
self.volumes = ['/usb']
self.having_sticky_Trash_dir()
assert '/usb/.Trash/123' in self.trashdirs()
def test_the_method_1_is_not_considered_if_not_sticky_dir(self):
self.uid = 123
self.volumes = ['/usb']
self.having_non_sticky_Trash_dir()
assert '/usb/.Trash/123' not in self.trashdirs()
def test_should_return_home_trashcan_when_XDG_DATA_HOME_is_defined(self):
self.environ['XDG_DATA_HOME'] = '~/.local/share'
assert '~/.local/share/Trash' in self.trashdirs()
def trashdirs(self):
result = []
def append(trash_dir, volume):
result.append(trash_dir)
class FileReader:
def is_sticky_dir(_, path):
return self.Trash_dir_is_sticky
def exists(_, path):
return True
def is_symlink(_, path):
return False
class FakeTopTrashDirRules:
def valid_to_be_read(_, path, out):
if self.Trash_dir_is_sticky:
out.is_valid()
else:
out.not_valid_parent_should_be_sticky()
trash_dirs = TrashDirs(
environ=self.environ,
getuid=lambda:self.uid,
top_trashdir_rules = FakeTopTrashDirRules(),
list_volumes = lambda: self.volumes,
)
trash_dirs.on_trash_dir_found = append
trash_dirs.list_trashdirs()
return result
def setUp(self):
self.uid = -1
self.volumes = ()
self.Trash_dir_is_sticky = not_important_for_now()
self.environ = {}
def having_sticky_Trash_dir(self): self.Trash_dir_is_sticky = True
def having_non_sticky_Trash_dir(self): self.Trash_dir_is_sticky = False
def not_important_for_now(): None
class TestDescribe_AvailableTrashDirs_when_parent_is_unsticky(unittest.TestCase):
def setUp(self):
self.fs = MagicMock()
self.dirs = TrashDirs(environ = {},
getuid = lambda:123,
top_trashdir_rules = TopTrashDirRules(self.fs),
list_volumes = lambda: ['/topdir'],
)
self.dirs.on_trashdir_skipped_because_parent_not_sticky = Mock()
self.dirs.on_trashdir_skipped_because_parent_is_symlink = Mock()
self.fs.is_sticky_dir.side_effect = (
lambda path: {'/topdir/.Trash':False}[path])
def test_it_should_report_skipped_dir_non_sticky(self):
self.fs.exists.side_effect = (
lambda path: {'/topdir/.Trash/123':True}[path])
self.dirs.list_trashdirs()
(self.dirs.on_trashdir_skipped_because_parent_not_sticky.
assert_called_with('/topdir/.Trash/123'))
def test_it_shouldnot_care_about_non_existent(self):
self.fs.exists.side_effect = (
lambda path: {'/topdir/.Trash/123':False}[path])
self.dirs.list_trashdirs()
assert [] == self.dirs.on_trashdir_skipped_because_parent_not_sticky.mock_calls
class TestDescribe_AvailableTrashDirs_when_parent_is_symlink(unittest.TestCase):
def setUp(self):
self.fs = MagicMock()
self.dirs = TrashDirs(environ = {},
getuid = lambda:123,
top_trashdir_rules = TopTrashDirRules(self.fs),
list_volumes = lambda: ['/topdir'])
self.fs.exists.side_effect = (lambda path: {'/topdir/.Trash/123':True}[path])
self.symlink_error = Mock()
self.dirs.on_trashdir_skipped_because_parent_is_symlink = self.symlink_error
def test_it_should_skip_symlink(self):
self.fs.is_sticky_dir.return_value = True
self.fs.is_symlink.return_value = True
self.dirs.list_trashdirs()
self.symlink_error.assert_called_with('/topdir/.Trash/123')
|
import logging
from quantum_gateway import QuantumGatewayScanner
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant.components.device_tracker import (
DOMAIN,
PLATFORM_SCHEMA,
DeviceScanner,
)
from homeassistant.const import CONF_HOST, CONF_PASSWORD, CONF_SSL
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DEFAULT_HOST = "myfiosgateway.com"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST, default=DEFAULT_HOST): cv.string,
vol.Optional(CONF_SSL, default=True): cv.boolean,
vol.Required(CONF_PASSWORD): cv.string,
}
)
def get_scanner(hass, config):
"""Validate the configuration and return a Quantum Gateway scanner."""
scanner = QuantumGatewayDeviceScanner(config[DOMAIN])
return scanner if scanner.success_init else None
class QuantumGatewayDeviceScanner(DeviceScanner):
"""This class queries a Quantum Gateway."""
def __init__(self, config):
"""Initialize the scanner."""
self.host = config[CONF_HOST]
self.password = config[CONF_PASSWORD]
self.use_https = config[CONF_SSL]
_LOGGER.debug("Initializing")
try:
self.quantum = QuantumGatewayScanner(
self.host, self.password, self.use_https
)
self.success_init = self.quantum.success_init
except RequestException:
self.success_init = False
_LOGGER.error("Unable to connect to gateway. Check host")
if not self.success_init:
_LOGGER.error("Unable to login to gateway. Check password and host")
def scan_devices(self):
"""Scan for new devices and return a list of found MACs."""
connected_devices = []
try:
connected_devices = self.quantum.scan_devices()
except RequestException:
_LOGGER.error("Unable to scan devices. Check connection to router")
return connected_devices
def get_device_name(self, device):
"""Return the name of the given device or None if we don't know."""
return self.quantum.get_device_name(device)
|
import json
from aiohttp.hdrs import AUTHORIZATION
import pytest
from homeassistant import const, core, setup
from homeassistant.components import (
alarm_control_panel,
cover,
fan,
google_assistant as ga,
light,
lock,
media_player,
switch,
)
from homeassistant.components.climate import const as climate
from homeassistant.components.humidifier import const as humidifier
from homeassistant.const import CLOUD_NEVER_EXPOSED_ENTITIES
from . import DEMO_DEVICES
API_PASSWORD = "test1234"
PROJECT_ID = "hasstest-1234"
CLIENT_ID = "helloworld"
ACCESS_TOKEN = "superdoublesecret"
@pytest.fixture
def auth_header(hass_access_token):
"""Generate an HTTP header with bearer token authorization."""
return {AUTHORIZATION: f"Bearer {hass_access_token}"}
@pytest.fixture
def assistant_client(loop, hass, aiohttp_client):
"""Create web client for the Google Assistant API."""
loop.run_until_complete(
setup.async_setup_component(
hass,
"google_assistant",
{
"google_assistant": {
"project_id": PROJECT_ID,
"entity_config": {
"light.ceiling_lights": {
"aliases": ["top lights", "ceiling lights"],
"name": "Roof Lights",
}
},
}
},
)
)
return loop.run_until_complete(aiohttp_client(hass.http.app))
@pytest.fixture
def hass_fixture(loop, hass):
"""Set up a Home Assistant instance for these tests."""
# We need to do this to get access to homeassistant/turn_(on,off)
loop.run_until_complete(setup.async_setup_component(hass, core.DOMAIN, {}))
loop.run_until_complete(
setup.async_setup_component(
hass, light.DOMAIN, {"light": [{"platform": "demo"}]}
)
)
loop.run_until_complete(
setup.async_setup_component(
hass, switch.DOMAIN, {"switch": [{"platform": "demo"}]}
)
)
loop.run_until_complete(
setup.async_setup_component(
hass, cover.DOMAIN, {"cover": [{"platform": "demo"}]}
)
)
loop.run_until_complete(
setup.async_setup_component(
hass, media_player.DOMAIN, {"media_player": [{"platform": "demo"}]}
)
)
loop.run_until_complete(
setup.async_setup_component(hass, fan.DOMAIN, {"fan": [{"platform": "demo"}]})
)
loop.run_until_complete(
setup.async_setup_component(
hass, climate.DOMAIN, {"climate": [{"platform": "demo"}]}
)
)
loop.run_until_complete(
setup.async_setup_component(
hass, humidifier.DOMAIN, {"humidifier": [{"platform": "demo"}]}
)
)
loop.run_until_complete(
setup.async_setup_component(hass, lock.DOMAIN, {"lock": [{"platform": "demo"}]})
)
loop.run_until_complete(
setup.async_setup_component(
hass,
alarm_control_panel.DOMAIN,
{"alarm_control_panel": [{"platform": "demo"}]},
)
)
return hass
# pylint: disable=redefined-outer-name
async def test_sync_request(hass_fixture, assistant_client, auth_header):
"""Test a sync request."""
reqid = "5711642932632160983"
data = {"requestId": reqid, "inputs": [{"intent": "action.devices.SYNC"}]}
result = await assistant_client.post(
ga.const.GOOGLE_ASSISTANT_API_ENDPOINT,
data=json.dumps(data),
headers=auth_header,
)
assert result.status == 200
body = await result.json()
assert body.get("requestId") == reqid
devices = body["payload"]["devices"]
assert sorted([dev["id"] for dev in devices]) == sorted(
[dev["id"] for dev in DEMO_DEVICES]
)
for dev in devices:
assert dev["id"] not in CLOUD_NEVER_EXPOSED_ENTITIES
for dev, demo in zip(
sorted(devices, key=lambda d: d["id"]),
sorted(DEMO_DEVICES, key=lambda d: d["id"]),
):
assert dev["name"] == demo["name"]
assert set(dev["traits"]) == set(demo["traits"])
assert dev["type"] == demo["type"]
async def test_query_request(hass_fixture, assistant_client, auth_header):
"""Test a query request."""
reqid = "5711642932632160984"
data = {
"requestId": reqid,
"inputs": [
{
"intent": "action.devices.QUERY",
"payload": {
"devices": [
{"id": "light.ceiling_lights"},
{"id": "light.bed_light"},
{"id": "light.kitchen_lights"},
{"id": "media_player.lounge_room"},
]
},
}
],
}
result = await assistant_client.post(
ga.const.GOOGLE_ASSISTANT_API_ENDPOINT,
data=json.dumps(data),
headers=auth_header,
)
assert result.status == 200
body = await result.json()
assert body.get("requestId") == reqid
devices = body["payload"]["devices"]
assert len(devices) == 4
assert devices["light.bed_light"]["on"] is False
assert devices["light.ceiling_lights"]["on"] is True
assert devices["light.ceiling_lights"]["brightness"] == 70
assert devices["light.ceiling_lights"]["color"]["temperatureK"] == 2631
assert devices["light.kitchen_lights"]["color"]["spectrumHsv"] == {
"hue": 345,
"saturation": 0.75,
"value": 0.7058823529411765,
}
assert devices["media_player.lounge_room"]["on"] is True
async def test_query_climate_request(hass_fixture, assistant_client, auth_header):
"""Test a query request."""
reqid = "5711642932632160984"
data = {
"requestId": reqid,
"inputs": [
{
"intent": "action.devices.QUERY",
"payload": {
"devices": [
{"id": "climate.hvac"},
{"id": "climate.heatpump"},
{"id": "climate.ecobee"},
]
},
}
],
}
result = await assistant_client.post(
ga.const.GOOGLE_ASSISTANT_API_ENDPOINT,
data=json.dumps(data),
headers=auth_header,
)
assert result.status == 200
body = await result.json()
assert body.get("requestId") == reqid
devices = body["payload"]["devices"]
assert len(devices) == 3
assert devices["climate.heatpump"] == {
"online": True,
"thermostatTemperatureSetpoint": 20.0,
"thermostatTemperatureAmbient": 25.0,
"thermostatMode": "heat",
}
assert devices["climate.ecobee"] == {
"online": True,
"thermostatTemperatureSetpointHigh": 24,
"thermostatTemperatureAmbient": 23,
"thermostatMode": "heatcool",
"thermostatTemperatureSetpointLow": 21,
"currentFanSpeedSetting": "Auto Low",
}
assert devices["climate.hvac"] == {
"online": True,
"thermostatTemperatureSetpoint": 21,
"thermostatTemperatureAmbient": 22,
"thermostatMode": "cool",
"thermostatHumidityAmbient": 54,
"currentFanSpeedSetting": "On High",
}
async def test_query_climate_request_f(hass_fixture, assistant_client, auth_header):
"""Test a query request."""
# Mock demo devices as fahrenheit to see if we convert to celsius
hass_fixture.config.units.temperature_unit = const.TEMP_FAHRENHEIT
for entity_id in ("climate.hvac", "climate.heatpump", "climate.ecobee"):
state = hass_fixture.states.get(entity_id)
attr = dict(state.attributes)
hass_fixture.states.async_set(entity_id, state.state, attr)
reqid = "5711642932632160984"
data = {
"requestId": reqid,
"inputs": [
{
"intent": "action.devices.QUERY",
"payload": {
"devices": [
{"id": "climate.hvac"},
{"id": "climate.heatpump"},
{"id": "climate.ecobee"},
]
},
}
],
}
result = await assistant_client.post(
ga.const.GOOGLE_ASSISTANT_API_ENDPOINT,
data=json.dumps(data),
headers=auth_header,
)
assert result.status == 200
body = await result.json()
assert body.get("requestId") == reqid
devices = body["payload"]["devices"]
assert len(devices) == 3
assert devices["climate.heatpump"] == {
"online": True,
"thermostatTemperatureSetpoint": -6.7,
"thermostatTemperatureAmbient": -3.9,
"thermostatMode": "heat",
}
assert devices["climate.ecobee"] == {
"online": True,
"thermostatTemperatureSetpointHigh": -4.4,
"thermostatTemperatureAmbient": -5,
"thermostatMode": "heatcool",
"thermostatTemperatureSetpointLow": -6.1,
"currentFanSpeedSetting": "Auto Low",
}
assert devices["climate.hvac"] == {
"online": True,
"thermostatTemperatureSetpoint": -6.1,
"thermostatTemperatureAmbient": -5.6,
"thermostatMode": "cool",
"thermostatHumidityAmbient": 54,
"currentFanSpeedSetting": "On High",
}
hass_fixture.config.units.temperature_unit = const.TEMP_CELSIUS
async def test_query_humidifier_request(hass_fixture, assistant_client, auth_header):
"""Test a query request."""
reqid = "5711642932632160984"
data = {
"requestId": reqid,
"inputs": [
{
"intent": "action.devices.QUERY",
"payload": {
"devices": [
{"id": "humidifier.humidifier"},
{"id": "humidifier.dehumidifier"},
{"id": "humidifier.hygrostat"},
]
},
}
],
}
result = await assistant_client.post(
ga.const.GOOGLE_ASSISTANT_API_ENDPOINT,
data=json.dumps(data),
headers=auth_header,
)
assert result.status == 200
body = await result.json()
assert body.get("requestId") == reqid
devices = body["payload"]["devices"]
assert len(devices) == 3
assert devices["humidifier.humidifier"] == {
"on": True,
"online": True,
"humiditySetpointPercent": 68,
}
assert devices["humidifier.dehumidifier"] == {
"on": True,
"online": True,
"humiditySetpointPercent": 54,
}
assert devices["humidifier.hygrostat"] == {
"on": True,
"online": True,
"humiditySetpointPercent": 50,
"currentModeSettings": {"mode": "home"},
}
async def test_execute_request(hass_fixture, assistant_client, auth_header):
"""Test an execute request."""
reqid = "5711642932632160985"
data = {
"requestId": reqid,
"inputs": [
{
"intent": "action.devices.EXECUTE",
"payload": {
"commands": [
{
"devices": [
{"id": "light.ceiling_lights"},
{"id": "switch.decorative_lights"},
{"id": "media_player.lounge_room"},
],
"execution": [
{
"command": "action.devices.commands.OnOff",
"params": {"on": False},
}
],
},
{
"devices": [{"id": "media_player.walkman"}],
"execution": [
{
"command": "action.devices.commands.setVolume",
"params": {"volumeLevel": 70},
}
],
},
{
"devices": [{"id": "light.kitchen_lights"}],
"execution": [
{
"command": "action.devices.commands.ColorAbsolute",
"params": {"color": {"spectrumRGB": 16711680}},
}
],
},
{
"devices": [{"id": "light.bed_light"}],
"execution": [
{
"command": "action.devices.commands.ColorAbsolute",
"params": {"color": {"spectrumRGB": 65280}},
},
{
"command": "action.devices.commands.ColorAbsolute",
"params": {"color": {"temperature": 4700}},
},
],
},
{
"devices": [{"id": "humidifier.humidifier"}],
"execution": [
{
"command": "action.devices.commands.OnOff",
"params": {"on": False},
}
],
},
{
"devices": [{"id": "humidifier.dehumidifier"}],
"execution": [
{
"command": "action.devices.commands.SetHumidity",
"params": {"humidity": 45},
}
],
},
{
"devices": [{"id": "humidifier.hygrostat"}],
"execution": [
{
"command": "action.devices.commands.SetModes",
"params": {"updateModeSettings": {"mode": "eco"}},
}
],
},
]
},
}
],
}
result = await assistant_client.post(
ga.const.GOOGLE_ASSISTANT_API_ENDPOINT,
data=json.dumps(data),
headers=auth_header,
)
assert result.status == 200
body = await result.json()
assert body.get("requestId") == reqid
commands = body["payload"]["commands"]
assert len(commands) == 9
assert not any(result["status"] == "ERROR" for result in commands)
ceiling = hass_fixture.states.get("light.ceiling_lights")
assert ceiling.state == "off"
kitchen = hass_fixture.states.get("light.kitchen_lights")
assert kitchen.attributes.get(light.ATTR_RGB_COLOR) == (255, 0, 0)
bed = hass_fixture.states.get("light.bed_light")
assert bed.attributes.get(light.ATTR_COLOR_TEMP) == 212
assert hass_fixture.states.get("switch.decorative_lights").state == "off"
walkman = hass_fixture.states.get("media_player.walkman")
assert walkman.state == "playing"
assert walkman.attributes.get(media_player.ATTR_MEDIA_VOLUME_LEVEL) == 0.7
lounge = hass_fixture.states.get("media_player.lounge_room")
assert lounge.state == "off"
humidifier_state = hass_fixture.states.get("humidifier.humidifier")
assert humidifier_state.state == "off"
dehumidifier = hass_fixture.states.get("humidifier.dehumidifier")
assert dehumidifier.attributes.get(humidifier.ATTR_HUMIDITY) == 45
hygrostat = hass_fixture.states.get("humidifier.hygrostat")
assert hygrostat.attributes.get(humidifier.ATTR_MODE) == "eco"
|
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links.model.ssd import Multibox
@testing.parameterize(*testing.product({
'n_class': [1, 5],
'aspect_ratios': [((2,),), ((2,), (3, 4), (5,))],
'batchsize': [1, 2],
}))
class TestMultibox(unittest.TestCase):
def setUp(self):
self.link = Multibox(self.n_class, self.aspect_ratios)
xs = []
n_bbox = 0
for ar in self.aspect_ratios:
C, H, W = np.random.randint(1, 10, size=3)
xs.append(
np.random.uniform(size=(self.batchsize, C, H, W))
.astype(np.float32))
n_bbox += H * W * (len(ar) + 1) * 2
self.xs = xs
self.n_bbox = n_bbox
def _check_forward(self, xs):
mb_locs, mb_confs = self.link(xs)
self.assertIsInstance(mb_locs, chainer.Variable)
self.assertIsInstance(mb_locs.array, type(xs[0]))
self.assertEqual(mb_locs.shape, (self.batchsize, self.n_bbox, 4))
self.assertEqual(mb_locs.dtype, xs[0].dtype)
self.assertIsInstance(mb_confs, chainer.Variable)
self.assertIsInstance(mb_confs.array, type(xs[0]))
self.assertEqual(
mb_confs.shape, (self.batchsize, self.n_bbox, self.n_class))
self.assertEqual(mb_confs.dtype, xs[0].dtype)
def test_forward_cpu(self):
self._check_forward(self.xs)
@attr.gpu
def test_forward_gpu(self):
self.link.to_gpu()
self._check_forward(list(map(chainer.backends.cuda.to_gpu, self.xs)))
testing.run_module(__name__, __file__)
|
import asyncio
from collections import defaultdict
from enum import Enum
from typing import Any
from typing import DefaultDict
from typing import Dict
from typing import Iterable
from typing import List
from typing import Mapping
from typing import MutableMapping
from typing import Sequence
import a_sync
import pytz
from kubernetes.client import V1Pod
from kubernetes.client import V1ReplicaSet
from kubernetes.client.rest import ApiException
from mypy_extensions import TypedDict
from paasta_tools import cassandracluster_tools
from paasta_tools import envoy_tools
from paasta_tools import flink_tools
from paasta_tools import kafkacluster_tools
from paasta_tools import kubernetes_tools
from paasta_tools import marathon_tools
from paasta_tools import nrtsearchservice_tools
from paasta_tools import smartstack_tools
from paasta_tools.cli.utils import LONG_RUNNING_INSTANCE_TYPE_HANDLERS
from paasta_tools.instance.hpa_metrics_parser import HPAMetricsDict
from paasta_tools.instance.hpa_metrics_parser import HPAMetricsParser
from paasta_tools.kubernetes_tools import get_pod_event_messages
from paasta_tools.kubernetes_tools import get_tail_lines_for_kubernetes_container
from paasta_tools.kubernetes_tools import KubernetesDeploymentConfig
from paasta_tools.long_running_service_tools import LongRunningServiceConfig
from paasta_tools.long_running_service_tools import ServiceNamespaceConfig
from paasta_tools.smartstack_tools import KubeSmartstackEnvoyReplicationChecker
from paasta_tools.smartstack_tools import match_backends_and_pods
from paasta_tools.utils import calculate_tail_lines
INSTANCE_TYPES_CR = {"flink", "cassandracluster", "kafkacluster"}
INSTANCE_TYPES_K8S = {"kubernetes", "cassandracluster"}
INSTANCE_TYPES = INSTANCE_TYPES_K8S.union(INSTANCE_TYPES_CR)
INSTANCE_TYPES_WITH_SET_STATE = {"flink"}
INSTANCE_TYPE_CR_ID = dict(
flink=flink_tools.cr_id,
cassandracluster=cassandracluster_tools.cr_id,
kafkacluster=kafkacluster_tools.cr_id,
nrtsearchservice=nrtsearchservice_tools.cr_id,
)
class ServiceMesh(Enum):
SMARTSTACK = "smartstack"
ENVOY = "envoy"
class KubernetesAutoscalingStatusDict(TypedDict):
min_instances: int
max_instances: int
metrics: List
desired_replicas: int
last_scale_time: str
def cr_id(service: str, instance: str, instance_type: str) -> Mapping[str, str]:
cr_id_fn = INSTANCE_TYPE_CR_ID.get(instance_type)
if not cr_id_fn:
raise RuntimeError(f"Unknown instance type {instance_type}")
return cr_id_fn(service, instance)
def can_handle(instance_type: str) -> bool:
return instance_type in INSTANCE_TYPES
def can_set_state(instance_type: str) -> bool:
return instance_type in INSTANCE_TYPES_WITH_SET_STATE
def set_cr_desired_state(
kube_client: kubernetes_tools.KubeClient,
service: str,
instance: str,
instance_type: str,
desired_state: str,
):
try:
kubernetes_tools.set_cr_desired_state(
kube_client=kube_client,
cr_id=cr_id(service, instance, instance_type),
desired_state=desired_state,
)
except ApiException as e:
error_message = (
f"Error while setting state {desired_state} of "
f"{service}.{instance}: {e}"
)
raise RuntimeError(error_message)
def autoscaling_status(
kube_client: kubernetes_tools.KubeClient,
job_config: LongRunningServiceConfig,
namespace: str,
) -> KubernetesAutoscalingStatusDict:
try:
hpa = kube_client.autoscaling.read_namespaced_horizontal_pod_autoscaler(
name=job_config.get_sanitised_deployment_name(), namespace=namespace
)
except ApiException as e:
if e.status == 404:
return KubernetesAutoscalingStatusDict(
min_instances=-1,
max_instances=-1,
metrics=[],
desired_replicas=-1,
last_scale_time="unknown (could not find HPA object)",
)
else:
raise
# Parse metrics sources, based on
# https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V2beta2ExternalMetricSource.md#v2beta2externalmetricsource
parser = HPAMetricsParser(hpa)
# https://github.com/python/mypy/issues/7217
metrics_by_name: DefaultDict[str, HPAMetricsDict] = defaultdict(
lambda: HPAMetricsDict()
)
if hpa.spec.metrics is not None:
for metric_spec in hpa.spec.metrics:
parsed = parser.parse_target(metric_spec)
metrics_by_name[parsed["name"]].update(parsed)
if hpa.status.current_metrics is not None:
for metric_spec in hpa.status.current_metrics:
parsed = parser.parse_current(metric_spec)
metrics_by_name[parsed["name"]].update(parsed)
metric_stats = list(metrics_by_name.values())
last_scale_time = (
hpa.status.last_scale_time.replace(tzinfo=pytz.UTC).isoformat()
if getattr(hpa.status, "last_scale_time")
else "N/A"
)
return KubernetesAutoscalingStatusDict(
min_instances=hpa.spec.min_replicas,
max_instances=hpa.spec.max_replicas,
metrics=metric_stats,
desired_replicas=hpa.status.desired_replicas,
last_scale_time=last_scale_time,
)
async def pod_info(
pod: V1Pod, client: kubernetes_tools.KubeClient, num_tail_lines: int,
):
container_statuses = pod.status.container_statuses or []
pod_event_messages = await get_pod_event_messages(client, pod)
containers = [
dict(
name=container.name,
tail_lines=await get_tail_lines_for_kubernetes_container(
client, pod, container, num_tail_lines,
),
)
for container in container_statuses
]
return {
"name": pod.metadata.name,
"host": kubernetes_tools.get_pod_hostname(client, pod),
"deployed_timestamp": pod.metadata.creation_timestamp.timestamp(),
"phase": pod.status.phase,
"ready": kubernetes_tools.is_pod_ready(pod),
"containers": containers,
"reason": pod.status.reason,
"message": pod.status.message,
"events": pod_event_messages,
"git_sha": pod.metadata.labels.get("paasta.yelp.com/git_sha"),
"config_sha": pod.metadata.labels.get("paasta.yelp.com/config_sha"),
}
@a_sync.to_blocking
async def job_status(
kstatus: MutableMapping[str, Any],
client: kubernetes_tools.KubeClient,
job_config: LongRunningServiceConfig,
pod_list: Sequence[V1Pod],
replicaset_list: Sequence[V1ReplicaSet],
verbose: int,
namespace: str,
) -> None:
app_id = job_config.get_sanitised_deployment_name()
kstatus["app_id"] = app_id
kstatus["pods"] = []
kstatus["replicasets"] = []
if verbose > 0:
num_tail_lines = calculate_tail_lines(verbose)
kstatus["pods"] = await asyncio.gather(
*[pod_info(pod, client, num_tail_lines) for pod in pod_list]
)
for replicaset in replicaset_list:
try:
ready_replicas = replicaset.status.ready_replicas
if ready_replicas is None:
ready_replicas = 0
except AttributeError:
ready_replicas = 0
kstatus["replicasets"].append(
{
"name": replicaset.metadata.name,
"replicas": replicaset.spec.replicas,
"ready_replicas": ready_replicas,
"create_timestamp": replicaset.metadata.creation_timestamp.timestamp(),
"git_sha": replicaset.metadata.labels.get(
"paasta.yelp.com/git_sha"
),
"config_sha": replicaset.metadata.labels.get(
"paasta.yelp.com/config_sha"
),
}
)
kstatus["expected_instance_count"] = job_config.get_instances()
app = kubernetes_tools.get_kubernetes_app_by_name(
name=app_id, kube_client=client, namespace=namespace
)
desired_instances = (
job_config.get_instances() if job_config.get_desired_state() != "stop" else 0
)
deploy_status, message = await kubernetes_tools.get_kubernetes_app_deploy_status(
app=app, kube_client=client, desired_instances=desired_instances,
)
kstatus["deploy_status"] = kubernetes_tools.KubernetesDeployStatus.tostring(
deploy_status
)
kstatus["deploy_status_message"] = message
kstatus["running_instance_count"] = (
app.status.ready_replicas if app.status.ready_replicas else 0
)
kstatus["create_timestamp"] = app.metadata.creation_timestamp.timestamp()
kstatus["namespace"] = app.metadata.namespace
def mesh_status(
service: str,
service_mesh: ServiceMesh,
instance: str,
job_config: LongRunningServiceConfig,
service_namespace_config: ServiceNamespaceConfig,
pods: Sequence[V1Pod],
settings: Any,
should_return_individual_backends: bool = False,
) -> Mapping[str, Any]:
registration = job_config.get_registrations()[0]
instance_pool = job_config.get_pool()
replication_checker = KubeSmartstackEnvoyReplicationChecker(
nodes=kubernetes_tools.get_all_nodes(settings.kubernetes_client),
system_paasta_config=settings.system_paasta_config,
)
node_hostname_by_location = replication_checker.get_allowed_locations_and_hosts(
job_config
)
expected_smartstack_count = marathon_tools.get_expected_instance_count_for_namespace(
service=service,
namespace=job_config.get_nerve_namespace(),
cluster=settings.cluster,
instance_type_class=KubernetesDeploymentConfig,
)
expected_count_per_location = int(
expected_smartstack_count / len(node_hostname_by_location)
)
mesh_status: MutableMapping[str, Any] = {
"registration": registration,
"expected_backends_per_location": expected_count_per_location,
"locations": [],
}
for location, hosts in node_hostname_by_location.items():
host = replication_checker.get_first_host_in_pool(hosts, instance_pool)
if service_mesh == ServiceMesh.SMARTSTACK:
mesh_status["locations"].append(
_build_smartstack_location_dict(
synapse_host=host,
synapse_port=settings.system_paasta_config.get_synapse_port(),
synapse_haproxy_url_format=settings.system_paasta_config.get_synapse_haproxy_url_format(),
registration=registration,
pods=pods,
location=location,
should_return_individual_backends=should_return_individual_backends,
)
)
elif service_mesh == ServiceMesh.ENVOY:
mesh_status["locations"].append(
_build_envoy_location_dict(
envoy_host=host,
envoy_admin_port=settings.system_paasta_config.get_envoy_admin_port(),
envoy_admin_endpoint_format=settings.system_paasta_config.get_envoy_admin_endpoint_format(),
registration=registration,
pods=pods,
location=location,
should_return_individual_backends=should_return_individual_backends,
)
)
return mesh_status
def _build_envoy_location_dict(
envoy_host: str,
envoy_admin_port: int,
envoy_admin_endpoint_format: str,
registration: str,
pods: Iterable[V1Pod],
location: str,
should_return_individual_backends: bool,
) -> MutableMapping[str, Any]:
backends = envoy_tools.get_backends(
registration,
envoy_host=envoy_host,
envoy_admin_port=envoy_admin_port,
envoy_admin_endpoint_format=envoy_admin_endpoint_format,
)
sorted_envoy_backends = sorted(
[
backend[0]
for _, service_backends in backends.items()
for backend in service_backends
],
key=lambda backend: backend["eds_health_status"],
)
casper_proxied_backends = {
(backend["address"], backend["port_value"])
for _, service_backends in backends.items()
for backend, is_casper_proxied_backend in service_backends
if is_casper_proxied_backend
}
matched_envoy_backends_and_pods = envoy_tools.match_backends_and_pods(
sorted_envoy_backends, pods,
)
return envoy_tools.build_envoy_location_dict(
location,
matched_envoy_backends_and_pods,
should_return_individual_backends,
casper_proxied_backends,
)
def _build_smartstack_location_dict(
synapse_host: str,
synapse_port: int,
synapse_haproxy_url_format: str,
registration: str,
pods: Iterable[V1Pod],
location: str,
should_return_individual_backends: bool,
) -> MutableMapping[str, Any]:
sorted_backends = sorted(
smartstack_tools.get_backends(
registration,
synapse_host=synapse_host,
synapse_port=synapse_port,
synapse_haproxy_url_format=synapse_haproxy_url_format,
),
key=lambda backend: backend["status"],
reverse=True, # put 'UP' backends above 'MAINT' backends
)
matched_backends_and_pods = match_backends_and_pods(sorted_backends, pods)
location_dict = smartstack_tools.build_smartstack_location_dict(
location, matched_backends_and_pods, should_return_individual_backends
)
return location_dict
def cr_status(
service: str, instance: str, verbose: int, instance_type: str, kube_client: Any,
) -> Mapping[str, Any]:
status: MutableMapping[str, Any] = {}
cr = (
kubernetes_tools.get_cr(
kube_client=kube_client, cr_id=cr_id(service, instance, instance_type)
)
or {}
)
crstatus = cr.get("status")
metadata = cr.get("metadata")
if crstatus is not None:
status["status"] = crstatus
if metadata is not None:
status["metadata"] = metadata
return status
def kubernetes_status(
service: str,
instance: str,
verbose: int,
include_smartstack: bool,
include_envoy: bool,
instance_type: str,
settings: Any,
) -> Mapping[str, Any]:
kstatus: Dict[str, Any] = {}
config_loader = LONG_RUNNING_INSTANCE_TYPE_HANDLERS[instance_type].loader
job_config = config_loader(
service=service,
instance=instance,
cluster=settings.cluster,
soa_dir=settings.soa_dir,
load_deployments=True,
)
kube_client = settings.kubernetes_client
if kube_client is None:
return kstatus
app = kubernetes_tools.get_kubernetes_app_by_name(
name=job_config.get_sanitised_deployment_name(),
kube_client=kube_client,
namespace=job_config.get_kubernetes_namespace(),
)
# bouncing status can be inferred from app_count, ref get_bouncing_status
pod_list = kubernetes_tools.pods_for_service_instance(
service=job_config.service,
instance=job_config.instance,
kube_client=kube_client,
namespace=job_config.get_kubernetes_namespace(),
)
replicaset_list = kubernetes_tools.replicasets_for_service_instance(
service=job_config.service,
instance=job_config.instance,
kube_client=kube_client,
namespace=job_config.get_kubernetes_namespace(),
)
active_shas = kubernetes_tools.get_active_shas_for_service(
[app, *pod_list, *replicaset_list]
)
kstatus["app_count"] = len(active_shas)
kstatus["desired_state"] = job_config.get_desired_state()
kstatus["bounce_method"] = job_config.get_bounce_method()
kstatus["active_shas"] = list(active_shas)
job_status(
kstatus=kstatus,
client=kube_client,
namespace=job_config.get_kubernetes_namespace(),
job_config=job_config,
verbose=verbose,
pod_list=pod_list,
replicaset_list=replicaset_list,
)
if (
job_config.is_autoscaling_enabled() is True
and job_config.get_autoscaling_params().get("decision_policy", "") != "bespoke" # type: ignore
):
try:
kstatus["autoscaling_status"] = autoscaling_status(
kube_client, job_config, job_config.get_kubernetes_namespace()
)
except Exception as e:
kstatus[
"error_message"
] = f"Unknown error happened. Please contact #compute-infra for help: {e}"
evicted_count = 0
for pod in pod_list:
if pod.status.reason == "Evicted":
evicted_count += 1
kstatus["evicted_count"] = evicted_count
if include_smartstack or include_envoy:
service_namespace_config = kubernetes_tools.load_service_namespace_config(
service=service,
namespace=job_config.get_nerve_namespace(),
soa_dir=settings.soa_dir,
)
if "proxy_port" in service_namespace_config:
if include_smartstack:
kstatus["smartstack"] = mesh_status(
service=service,
service_mesh=ServiceMesh.SMARTSTACK,
instance=job_config.get_nerve_namespace(),
job_config=job_config,
service_namespace_config=service_namespace_config,
pods=pod_list,
should_return_individual_backends=verbose > 0,
settings=settings,
)
if include_envoy:
kstatus["envoy"] = mesh_status(
service=service,
service_mesh=ServiceMesh.ENVOY,
instance=job_config.get_nerve_namespace(),
job_config=job_config,
service_namespace_config=service_namespace_config,
pods=pod_list,
should_return_individual_backends=verbose > 0,
settings=settings,
)
return kstatus
def instance_status(
service: str,
instance: str,
verbose: int,
include_smartstack: bool,
include_envoy: bool,
instance_type: str,
settings: Any,
) -> Mapping[str, Any]:
status = {}
if not can_handle(instance_type):
raise RuntimeError(
f"Unknown instance type: {instance_type!r}, "
f"can handle: {INSTANCE_TYPES}"
)
if instance_type in INSTANCE_TYPES_CR:
status[instance_type] = cr_status(
service=service,
instance=instance,
instance_type=instance_type,
verbose=verbose,
kube_client=settings.kubernetes_client,
)
if instance_type in INSTANCE_TYPES_K8S:
status["kubernetes"] = kubernetes_status(
service=service,
instance=instance,
instance_type=instance_type,
verbose=verbose,
include_smartstack=include_smartstack,
include_envoy=include_envoy,
settings=settings,
)
return status
|
from homeassistant.components import sensor
from homeassistant.setup import async_setup_component
from tests.async_mock import patch
from tests.common import assert_setup_component
VALID_CONFIG = {
"platform": "nsw_fuel_station",
"station_id": 350,
"fuel_types": ["E10", "P95"],
}
class MockPrice:
"""Mock Price implementation."""
def __init__(self, price, fuel_type, last_updated, price_unit, station_code):
"""Initialize a mock price instance."""
self.price = price
self.fuel_type = fuel_type
self.last_updated = last_updated
self.price_unit = price_unit
self.station_code = station_code
class MockStation:
"""Mock Station implementation."""
def __init__(self, name, code):
"""Initialize a mock Station instance."""
self.name = name
self.code = code
class MockGetReferenceDataResponse:
"""Mock GetReferenceDataResponse implementation."""
def __init__(self, stations):
"""Initialize a mock GetReferenceDataResponse instance."""
self.stations = stations
class FuelCheckClientMock:
"""Mock FuelCheckClient implementation."""
def get_fuel_prices_for_station(self, station):
"""Return a fake fuel prices response."""
return [
MockPrice(
price=150.0,
fuel_type="P95",
last_updated=None,
price_unit=None,
station_code=350,
),
MockPrice(
price=140.0,
fuel_type="E10",
last_updated=None,
price_unit=None,
station_code=350,
),
]
def get_reference_data(self):
"""Return a fake reference data response."""
return MockGetReferenceDataResponse(
stations=[MockStation(code=350, name="My Fake Station")]
)
@patch(
"homeassistant.components.nsw_fuel_station.sensor.FuelCheckClient",
new=FuelCheckClientMock,
)
async def test_setup(hass):
"""Test the setup with custom settings."""
with assert_setup_component(1, sensor.DOMAIN):
assert await async_setup_component(
hass, sensor.DOMAIN, {"sensor": VALID_CONFIG}
)
await hass.async_block_till_done()
fake_entities = ["my_fake_station_p95", "my_fake_station_e10"]
for entity_id in fake_entities:
state = hass.states.get(f"sensor.{entity_id}")
assert state is not None
@patch(
"homeassistant.components.nsw_fuel_station.sensor.FuelCheckClient",
new=FuelCheckClientMock,
)
async def test_sensor_values(hass):
"""Test retrieval of sensor values."""
assert await async_setup_component(hass, sensor.DOMAIN, {"sensor": VALID_CONFIG})
await hass.async_block_till_done()
assert "140.0" == hass.states.get("sensor.my_fake_station_e10").state
assert "150.0" == hass.states.get("sensor.my_fake_station_p95").state
|
from pytest import mark
from cerberus import errors
from cerberus.tests import assert_fail, assert_success
@mark.parametrize('value', ('', (), {}, []))
def test_empty(value):
field = 'test'
document = {field: value}
assert_success(schema={field: {}}, document=document)
assert_success(schema={field: {"empty": True}}, document=document)
assert_fail(
schema={field: {"empty": False}},
document=document,
error=(field, (field, 'empty'), errors.EMPTY, False),
)
def test_empty_skips_regex(validator):
assert validator(
document={'foo': ''}, schema={'foo': {'empty': True, 'regex': r'\d?\d\.\d\d'}}
)
|
import numpy as np
import unittest
from pgmpy.factors.discrete import DiscreteFactor
from pgmpy.models import FactorGraph
from pgmpy.models import MarkovModel
from pgmpy.models import JunctionTree
from pgmpy.tests import help_functions as hf
class TestFactorGraphCreation(unittest.TestCase):
def setUp(self):
self.graph = FactorGraph()
def test_class_init_without_data(self):
self.assertIsInstance(self.graph, FactorGraph)
def test_class_init_data_string(self):
self.graph = FactorGraph([("a", "phi1"), ("b", "phi1")])
self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "phi1"])
self.assertListEqual(
hf.recursive_sorted(self.graph.edges()), [["a", "phi1"], ["b", "phi1"]]
)
def test_add_single_node(self):
self.graph.add_node("phi1")
self.assertEqual(list(self.graph.nodes()), ["phi1"])
def test_add_multiple_nodes(self):
self.graph.add_nodes_from(["a", "b", "phi1"])
self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "phi1"])
def test_add_single_edge(self):
self.graph.add_edge("a", "phi1")
self.assertListEqual(sorted(self.graph.nodes()), ["a", "phi1"])
self.assertListEqual(hf.recursive_sorted(self.graph.edges()), [["a", "phi1"]])
def test_add_multiple_edges(self):
self.graph.add_edges_from([("a", "phi1"), ("b", "phi1")])
self.assertListEqual(sorted(self.graph.nodes()), ["a", "b", "phi1"])
self.assertListEqual(
hf.recursive_sorted(self.graph.edges()), [["a", "phi1"], ["b", "phi1"]]
)
def test_add_self_loop_raises_error(self):
self.assertRaises(ValueError, self.graph.add_edge, "a", "a")
def tearDown(self):
del self.graph
class TestFactorGraphFactorOperations(unittest.TestCase):
def setUp(self):
self.graph = FactorGraph()
def test_add_single_factor(self):
self.graph.add_edges_from([("a", "phi1"), ("b", "phi1")])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
self.graph.add_factors(phi1)
self.assertCountEqual(self.graph.factors, [phi1])
def test_add_multiple_factors(self):
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.assertCountEqual(self.graph.factors, [phi1, phi2])
def test_get_factors(self):
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.assertCountEqual(self.graph.get_factors(), [])
self.graph.add_factors(phi1, phi2)
self.assertEqual(self.graph.get_factors(node=phi1), phi1)
self.assertEqual(self.graph.get_factors(node=phi2), phi2)
self.assertCountEqual(self.graph.get_factors(), [phi1, phi2])
self.graph.remove_factors(phi1)
self.assertRaises(ValueError, self.graph.get_factors, node=phi1)
def test_remove_factors(self):
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.graph.remove_factors(phi1)
self.assertEqual(set(self.graph.factors), set([phi2]))
self.assertTrue(
(("c", phi2) in self.graph.edges()) or ((phi2, "c") in self.graph.edges())
)
self.assertTrue(
(("b", phi2) in self.graph.edges()) or ((phi2, "b") in self.graph.edges())
)
self.assertEqual(set(self.graph.nodes()), set(["a", "b", "c", phi2]))
def test_get_partition_function(self):
phi1 = DiscreteFactor(["a", "b"], [2, 2], range(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], range(4))
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.assertEqual(self.graph.get_partition_function(), 22.0)
def tearDown(self):
del self.graph
class TestFactorGraphMethods(unittest.TestCase):
def setUp(self):
self.graph = FactorGraph()
def test_get_cardinality(self):
self.graph.add_edges_from(
[
("a", "phi1"),
("b", "phi1"),
("c", "phi2"),
("d", "phi2"),
("a", "phi3"),
("d", "phi3"),
]
)
self.assertDictEqual(self.graph.get_cardinality(), {})
phi1 = DiscreteFactor(["a", "b"], [1, 2], np.random.rand(2))
self.graph.add_factors(phi1)
self.assertDictEqual(self.graph.get_cardinality(), {"a": 1, "b": 2})
self.graph.remove_factors(phi1)
self.assertDictEqual(self.graph.get_cardinality(), {})
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["c", "d"], [1, 2], np.random.rand(2))
self.graph.add_factors(phi1, phi2)
self.assertDictEqual(
self.graph.get_cardinality(), {"d": 2, "a": 2, "b": 2, "c": 1}
)
phi3 = DiscreteFactor(["d", "a"], [1, 2], np.random.rand(2))
self.graph.add_factors(phi3)
self.assertDictEqual(
self.graph.get_cardinality(), {"d": 1, "c": 1, "b": 2, "a": 2}
)
self.graph.remove_factors(phi1, phi2, phi3)
self.assertDictEqual(self.graph.get_cardinality(), {})
def test_get_cardinality_with_node(self):
self.graph.add_nodes_from(["a", "b", "c"])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.assertEqual(self.graph.get_cardinality("a"), 2)
self.assertEqual(self.graph.get_cardinality("b"), 2)
self.assertEqual(self.graph.get_cardinality("c"), 2)
def test_get_factor_nodes(self):
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.assertCountEqual(self.graph.get_factor_nodes(), [phi1, phi2])
def test_get_variable_nodes(self):
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.assertCountEqual(self.graph.get_variable_nodes(), ["a", "b", "c"])
def test_get_variable_nodes_raises_error(self):
self.graph.add_edges_from(
[("a", "phi1"), ("b", "phi1"), ("b", "phi2"), ("c", "phi2")]
)
self.assertRaises(ValueError, self.graph.get_variable_nodes)
def test_to_markov_model(self):
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
mm = self.graph.to_markov_model()
self.assertIsInstance(mm, MarkovModel)
self.assertListEqual(sorted(mm.nodes()), ["a", "b", "c"])
self.assertListEqual(hf.recursive_sorted(mm.edges()), [["a", "b"], ["b", "c"]])
self.assertListEqual(
sorted(mm.get_factors(), key=lambda x: x.scope()), [phi1, phi2]
)
def test_to_junction_tree(self):
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
jt = self.graph.to_junction_tree()
self.assertIsInstance(jt, JunctionTree)
self.assertListEqual(hf.recursive_sorted(jt.nodes()), [["a", "b"], ["b", "c"]])
self.assertEqual(len(jt.edges()), 1)
def test_check_model(self):
self.graph.add_nodes_from(["a", "b", "c"])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.assertTrue(self.graph.check_model())
phi1 = DiscreteFactor(["a", "b"], [4, 2], np.random.rand(8))
self.graph.add_factors(phi1, replace=True)
self.assertTrue(self.graph.check_model())
def test_check_model1(self):
self.graph.add_nodes_from(["a", "b", "c", "d"])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.assertRaises(ValueError, self.graph.check_model)
self.graph.remove_node("d")
self.assertTrue(self.graph.check_model())
def test_check_model2(self):
self.graph.add_nodes_from(["a", "b", "c"])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.graph.add_edges_from([("a", "b")])
self.assertRaises(ValueError, self.graph.check_model)
self.graph.add_edges_from([(phi1, phi2)])
self.assertRaises(ValueError, self.graph.check_model)
self.graph.remove_edges_from([("a", "b"), (phi1, phi2)])
self.assertTrue(self.graph.check_model())
def test_check_model3(self):
self.graph.add_nodes_from(["a", "b", "c"])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
phi3 = DiscreteFactor(["a", "c"], [2, 2], np.random.rand(4))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2, phi3)
self.assertRaises(ValueError, self.graph.check_model)
self.graph.remove_factors(phi3)
self.assertTrue(self.graph.check_model())
def test_check_model4(self):
self.graph.add_nodes_from(["a", "b", "c"])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [3, 2], np.random.rand(6))
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
self.graph.add_factors(phi1, phi2)
self.assertRaises(ValueError, self.graph.check_model)
phi3 = DiscreteFactor(["c", "a"], [4, 4], np.random.rand(16))
self.graph.add_factors(phi3, replace=True)
self.assertRaises(ValueError, self.graph.check_model)
def test_copy(self):
self.graph.add_nodes_from(["a", "b", "c"])
phi1 = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
phi2 = DiscreteFactor(["b", "c"], [2, 2], np.random.rand(4))
self.graph.add_factors(phi1, phi2)
self.graph.add_nodes_from([phi1, phi2])
self.graph.add_edges_from([("a", phi1), ("b", phi1), ("b", phi2), ("c", phi2)])
graph_copy = self.graph.copy()
self.assertIsInstance(graph_copy, FactorGraph)
self.assertTrue(graph_copy.check_model())
self.assertEqual(self.graph.get_factors(), graph_copy.get_factors())
self.graph.remove_factors(phi1, phi2)
self.assertTrue(
phi1 not in self.graph.factors and phi2 not in self.graph.factors
)
self.assertTrue(phi1 in graph_copy.factors and phi2 in graph_copy.factors)
self.graph.add_factors(phi1, phi2)
self.graph.factors[0] = DiscreteFactor(["a", "b"], [2, 2], np.random.rand(4))
self.assertNotEqual(self.graph.get_factors()[0], graph_copy.get_factors()[0])
self.assertNotEqual(self.graph.factors, graph_copy.factors)
def tearDown(self):
del self.graph
|
import numpy as np
import unittest
import chainer
from chainer import testing
from chainer.testing import attr
from chainercv.links import YOLOv3
@testing.parameterize(*testing.product({
'n_fg_class': [1, 5, 20],
}))
class TestYOLOv3(unittest.TestCase):
def setUp(self):
self.link = YOLOv3(n_fg_class=self.n_fg_class)
self.insize = 416
self.n_bbox = (13 * 13 + 26 * 26 + 52 * 52) * 3
def _check_call(self):
x = self.link.xp.array(
np.random.uniform(-1, 1, size=(1, 3, self.insize, self.insize)),
dtype=np.float32)
locs, objs, confs = self.link(x)
self.assertIsInstance(locs, chainer.Variable)
self.assertIsInstance(locs.array, self.link.xp.ndarray)
self.assertEqual(locs.shape, (1, self.n_bbox, 4))
self.assertIsInstance(objs, chainer.Variable)
self.assertIsInstance(objs.array, self.link.xp.ndarray)
self.assertEqual(objs.shape, (1, self.n_bbox))
self.assertIsInstance(confs, chainer.Variable)
self.assertIsInstance(confs.array, self.link.xp.ndarray)
self.assertEqual(confs.shape, (1, self.n_bbox, self.n_fg_class))
@attr.slow
def test_call_cpu(self):
self._check_call()
@attr.gpu
@attr.slow
def test_call_gpu(self):
self.link.to_gpu()
self._check_call()
@testing.parameterize(*testing.product({
'n_fg_class': [None, 10, 20],
'pretrained_model': ['voc0712'],
}))
class TestYOLOv3Pretrained(unittest.TestCase):
@attr.slow
def test_pretrained(self):
kwargs = {
'n_fg_class': self.n_fg_class,
'pretrained_model': self.pretrained_model,
}
if self.pretrained_model == 'voc0712':
valid = self.n_fg_class in {None, 20}
if valid:
YOLOv3(**kwargs)
else:
with self.assertRaises(ValueError):
YOLOv3(**kwargs)
testing.run_module(__name__, __file__)
|
from aiopvapi.resources.shade import factory as PvShade
from homeassistant.const import DEVICE_CLASS_BATTERY, PERCENTAGE
from homeassistant.core import callback
from .const import (
COORDINATOR,
DEVICE_INFO,
DOMAIN,
PV_API,
PV_SHADE_DATA,
SHADE_BATTERY_LEVEL,
SHADE_BATTERY_LEVEL_MAX,
)
from .entity import ShadeEntity
async def async_setup_entry(hass, entry, async_add_entities):
"""Set up the hunter douglas shades sensors."""
pv_data = hass.data[DOMAIN][entry.entry_id]
shade_data = pv_data[PV_SHADE_DATA]
pv_request = pv_data[PV_API]
coordinator = pv_data[COORDINATOR]
device_info = pv_data[DEVICE_INFO]
entities = []
for raw_shade in shade_data.values():
shade = PvShade(raw_shade, pv_request)
if SHADE_BATTERY_LEVEL not in shade.raw_data:
continue
name_before_refresh = shade.name
entities.append(
PowerViewShadeBatterySensor(
coordinator, device_info, shade, name_before_refresh
)
)
async_add_entities(entities)
class PowerViewShadeBatterySensor(ShadeEntity):
"""Representation of an shade battery charge sensor."""
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return PERCENTAGE
@property
def name(self):
"""Name of the shade battery."""
return f"{self._shade_name} Battery"
@property
def device_class(self):
"""Shade battery Class."""
return DEVICE_CLASS_BATTERY
@property
def unique_id(self):
"""Shade battery Uniqueid."""
return f"{self._unique_id}_charge"
@property
def state(self):
"""Get the current value in percentage."""
return round(
self._shade.raw_data[SHADE_BATTERY_LEVEL] / SHADE_BATTERY_LEVEL_MAX * 100
)
async def async_added_to_hass(self):
"""When entity is added to hass."""
self.async_on_remove(
self.coordinator.async_add_listener(self._async_update_shade_from_group)
)
@callback
def _async_update_shade_from_group(self):
"""Update with new data from the coordinator."""
self._shade.raw_data = self.coordinator.data[self._shade.id]
self.async_write_ha_state()
|
import unittest
import numpy as np
import pandas as pd
import networkx as nx
from pgmpy.models.BayesianModel import BayesianModel
from pgmpy.estimators import TreeSearch
from pgmpy.factors.discrete import TabularCPD
from pgmpy.sampling import BayesianModelSampling
class TestTreeSearch(unittest.TestCase):
def setUp(self):
# test data for chow-liu
self.data11 = pd.DataFrame(
data=[[1, 1, 0], [1, 1, 1], [1, 1, 0]], columns=["B", "C", "D"]
)
# test data for chow-liu
self.data12 = pd.DataFrame(
np.random.randint(low=0, high=2, size=(100, 5)),
columns=["A", "B", "C", "D", "E"],
)
# test data for chow-liu
model = BayesianModel(
[("A", "B"), ("A", "C"), ("B", "D"), ("B", "E"), ("C", "F")]
)
cpd_a = TabularCPD("A", 2, [[0.4], [0.6]])
cpd_b = TabularCPD(
"B",
3,
[[0.6, 0.2], [0.3, 0.5], [0.1, 0.3]],
evidence=["A"],
evidence_card=[2],
)
cpd_c = TabularCPD(
"C", 2, [[0.3, 0.4], [0.7, 0.6]], evidence=["A"], evidence_card=[2]
)
cpd_d = TabularCPD(
"D",
3,
[[0.5, 0.3, 0.1], [0.4, 0.4, 0.8], [0.1, 0.3, 0.1]],
evidence=["B"],
evidence_card=[3],
)
cpd_e = TabularCPD(
"E",
2,
[[0.3, 0.5, 0.2], [0.7, 0.5, 0.8]],
evidence=["B"],
evidence_card=[3],
)
cpd_f = TabularCPD(
"F",
3,
[[0.3, 0.6], [0.5, 0.2], [0.2, 0.2]],
evidence=["C"],
evidence_card=[2],
)
model.add_cpds(cpd_a, cpd_b, cpd_c, cpd_d, cpd_e, cpd_f)
inference = BayesianModelSampling(model)
self.data13 = inference.forward_sample(size=10000, return_type="dataframe")
# test data for TAN
self.data21 = pd.DataFrame(
data=[[0, 1, 1, 0], [1, 1, 1, 1], [0, 1, 1, 0]],
columns=["A", "B", "C", "D"],
)
# test data for TAN
model = BayesianModel(
[
("A", "R"),
("A", "B"),
("A", "C"),
("A", "D"),
("A", "E"),
("R", "B"),
("R", "C"),
("R", "D"),
("R", "E"),
]
)
cpd_a = TabularCPD("A", 2, [[0.7], [0.3]])
cpd_r = TabularCPD(
"R",
3,
[[0.6, 0.2], [0.3, 0.5], [0.1, 0.3]],
evidence=["A"],
evidence_card=[2],
)
cpd_b = TabularCPD(
"B",
3,
[
[0.1, 0.1, 0.2, 0.2, 0.7, 0.1],
[0.1, 0.3, 0.1, 0.2, 0.1, 0.2],
[0.8, 0.6, 0.7, 0.6, 0.2, 0.7],
],
evidence=["A", "R"],
evidence_card=[2, 3],
)
cpd_c = TabularCPD(
"C",
2,
[[0.7, 0.2, 0.2, 0.5, 0.1, 0.3], [0.3, 0.8, 0.8, 0.5, 0.9, 0.7]],
evidence=["A", "R"],
evidence_card=[2, 3],
)
cpd_d = TabularCPD(
"D",
3,
[
[0.3, 0.8, 0.2, 0.8, 0.4, 0.7],
[0.4, 0.1, 0.4, 0.1, 0.1, 0.1],
[0.3, 0.1, 0.4, 0.1, 0.5, 0.2],
],
evidence=["A", "R"],
evidence_card=[2, 3],
)
cpd_e = TabularCPD(
"E",
2,
[[0.5, 0.6, 0.6, 0.5, 0.5, 0.4], [0.5, 0.4, 0.4, 0.5, 0.5, 0.6]],
evidence=["A", "R"],
evidence_card=[2, 3],
)
model.add_cpds(cpd_a, cpd_r, cpd_b, cpd_c, cpd_d, cpd_e)
inference = BayesianModelSampling(model)
self.data22 = inference.forward_sample(size=10000, return_type="dataframe")
def test_estimate_chow_liu(self):
# learn tree structure using D as root node
for n_jobs in [-1, 1]:
est = TreeSearch(self.data11, root_node="D", n_jobs=n_jobs)
dag = est.estimate(estimator_type="chow-liu")
# check number of nodes and edges are as expected
self.assertCountEqual(dag.nodes(), ["B", "C", "D"])
self.assertCountEqual(dag.edges(), [("D", "B"), ("D", "C")])
# check tree structure exists
self.assertTrue(dag.has_edge("D", "B"))
self.assertTrue(dag.has_edge("D", "C"))
# learn tree structure using B as root node
est = TreeSearch(self.data11, root_node="B", n_jobs=n_jobs)
dag = est.estimate(estimator_type="chow-liu")
# check number of nodes and edges are as expected
self.assertCountEqual(dag.nodes(), ["B", "C", "D"])
self.assertCountEqual(dag.edges(), [("B", "D"), ("D", "C")])
# check tree structure exists
self.assertTrue(dag.has_edge("B", "D"))
self.assertTrue(dag.has_edge("D", "C"))
# check invalid root node
with self.assertRaises(ValueError):
est = TreeSearch(self.data11, root_node="A", n_jobs=n_jobs)
# learn graph structure
est = TreeSearch(self.data12, root_node="A", n_jobs=n_jobs)
dag = est.estimate(estimator_type="chow-liu")
# check number of nodes and edges are as expected
self.assertCountEqual(dag.nodes(), ["A", "B", "C", "D", "E"])
self.assertTrue(nx.is_tree(dag))
# learn tree structure using A as root node
est = TreeSearch(self.data13, root_node="A", n_jobs=n_jobs)
dag = est.estimate(estimator_type="chow-liu")
# check number of nodes and edges are as expected
self.assertCountEqual(dag.nodes(), ["A", "B", "C", "D", "E", "F"])
self.assertCountEqual(
dag.edges(),
[("A", "B"), ("A", "C"), ("B", "D"), ("B", "E"), ("C", "F")],
)
# check tree structure exists
self.assertTrue(dag.has_edge("A", "B"))
self.assertTrue(dag.has_edge("A", "C"))
self.assertTrue(dag.has_edge("B", "D"))
self.assertTrue(dag.has_edge("B", "E"))
self.assertTrue(dag.has_edge("C", "F"))
def test_estimate_tan(self):
for n_jobs in [-1, 1]:
# learn graph structure
est = TreeSearch(self.data21, root_node="D", n_jobs=n_jobs)
dag = est.estimate(estimator_type="tan", class_node="A")
# check number of nodes and edges are as expected
self.assertCountEqual(dag.nodes(), ["A", "B", "C", "D"])
self.assertCountEqual(
dag.edges(),
[("A", "B"), ("A", "C"), ("A", "D"), ("D", "B"), ("D", "C")],
)
# check directed edge between dependent and independent variables
self.assertTrue(dag.has_edge("A", "B"))
self.assertTrue(dag.has_edge("A", "C"))
self.assertTrue(dag.has_edge("A", "D"))
# check tree structure exists over independent variables
self.assertTrue(dag.has_edge("D", "B"))
self.assertTrue(dag.has_edge("D", "C"))
# check invalid root node
with self.assertRaises(ValueError):
est = TreeSearch(self.data21, root_node="X", n_jobs=n_jobs)
# check invalid class node
est = TreeSearch(self.data21, root_node="D", n_jobs=n_jobs)
with self.assertRaises(ValueError):
est.estimate(estimator_type="tan", class_node="X")
est = TreeSearch(self.data21, root_node="D", n_jobs=n_jobs)
with self.assertRaises(ValueError):
est.estimate(estimator_type="tan", class_node="D")
# learn graph structure
est = TreeSearch(self.data22, root_node="R", n_jobs=n_jobs)
dag = est.estimate(estimator_type="tan", class_node="A")
# check number of nodes and edges are as expected
self.assertCountEqual(dag.nodes(), ["A", "B", "C", "D", "E", "R"])
self.assertCountEqual(
dag.edges(),
[
("A", "B"),
("A", "C"),
("A", "D"),
("A", "E"),
("A", "R"),
("R", "B"),
("R", "C"),
("R", "D"),
("R", "E"),
],
)
# check directed edge between class and independent variables
self.assertTrue(dag.has_edge("A", "B"))
self.assertTrue(dag.has_edge("A", "C"))
self.assertTrue(dag.has_edge("A", "D"))
self.assertTrue(dag.has_edge("A", "E"))
# check tree structure exists over independent variables
self.assertTrue(dag.has_edge("R", "B"))
self.assertTrue(dag.has_edge("R", "C"))
self.assertTrue(dag.has_edge("R", "D"))
self.assertTrue(dag.has_edge("R", "E"))
def tearDown(self):
del self.data11
del self.data12
del self.data21
del self.data22
|
from collections import OrderedDict
import uuid
from homeassistant.components.automation import DOMAIN, PLATFORM_SCHEMA
from homeassistant.components.automation.config import async_validate_config_item
from homeassistant.config import AUTOMATION_CONFIG_PATH
from homeassistant.const import CONF_ID, SERVICE_RELOAD
from homeassistant.helpers import config_validation as cv, entity_registry
from . import ACTION_DELETE, EditIdBasedConfigView
async def async_setup(hass):
"""Set up the Automation config API."""
async def hook(action, config_key):
"""post_write_hook for Config View that reloads automations."""
await hass.services.async_call(DOMAIN, SERVICE_RELOAD)
if action != ACTION_DELETE:
return
ent_reg = await entity_registry.async_get_registry(hass)
entity_id = ent_reg.async_get_entity_id(DOMAIN, DOMAIN, config_key)
if entity_id is None:
return
ent_reg.async_remove(entity_id)
hass.http.register_view(
EditAutomationConfigView(
DOMAIN,
"config",
AUTOMATION_CONFIG_PATH,
cv.string,
PLATFORM_SCHEMA,
post_write_hook=hook,
data_validator=async_validate_config_item,
)
)
return True
class EditAutomationConfigView(EditIdBasedConfigView):
"""Edit automation config."""
def _write_value(self, hass, data, config_key, new_value):
"""Set value."""
index = None
for index, cur_value in enumerate(data):
# When people copy paste their automations to the config file,
# they sometimes forget to add IDs. Fix it here.
if CONF_ID not in cur_value:
cur_value[CONF_ID] = uuid.uuid4().hex
elif cur_value[CONF_ID] == config_key:
break
else:
cur_value = OrderedDict()
cur_value[CONF_ID] = config_key
index = len(data)
data.append(cur_value)
# Iterate through some keys that we want to have ordered in the output
updated_value = OrderedDict()
for key in ("id", "alias", "description", "trigger", "condition", "action"):
if key in cur_value:
updated_value[key] = cur_value[key]
if key in new_value:
updated_value[key] = new_value[key]
# We cover all current fields above, but just in case we start
# supporting more fields in the future.
updated_value.update(cur_value)
updated_value.update(new_value)
data[index] = updated_value
|
from typing import Optional, Tuple
import attr
from pychromecast.const import CAST_MANUFACTURERS
from .const import DEFAULT_PORT
@attr.s(slots=True, frozen=True)
class ChromecastInfo:
"""Class to hold all data about a chromecast for creating connections.
This also has the same attributes as the mDNS fields by zeroconf.
"""
services: Optional[set] = attr.ib()
host: Optional[str] = attr.ib(default=None)
port: Optional[int] = attr.ib(default=0)
uuid: Optional[str] = attr.ib(
converter=attr.converters.optional(str), default=None
) # always convert UUID to string if not None
model_name: str = attr.ib(default="")
friendly_name: Optional[str] = attr.ib(default=None)
@property
def is_audio_group(self) -> bool:
"""Return if this is an audio group."""
return self.port != DEFAULT_PORT
@property
def host_port(self) -> Tuple[str, int]:
"""Return the host+port tuple."""
return self.host, self.port
@property
def manufacturer(self) -> str:
"""Return the manufacturer."""
if not self.model_name:
return None
return CAST_MANUFACTURERS.get(self.model_name.lower(), "Google Inc.")
class ChromeCastZeroconf:
"""Class to hold a zeroconf instance."""
__zconf = None
@classmethod
def set_zeroconf(cls, zconf):
"""Set zeroconf."""
cls.__zconf = zconf
@classmethod
def get_zeroconf(cls):
"""Get zeroconf."""
return cls.__zconf
class CastStatusListener:
"""Helper class to handle pychromecast status callbacks.
Necessary because a CastDevice entity can create a new socket client
and therefore callbacks from multiple chromecast connections can
potentially arrive. This class allows invalidating past chromecast objects.
"""
def __init__(self, cast_device, chromecast, mz_mgr):
"""Initialize the status listener."""
self._cast_device = cast_device
self._uuid = chromecast.uuid
self._valid = True
self._mz_mgr = mz_mgr
chromecast.register_status_listener(self)
chromecast.socket_client.media_controller.register_status_listener(self)
chromecast.register_connection_listener(self)
if cast_device._cast_info.is_audio_group:
self._mz_mgr.add_multizone(chromecast)
else:
self._mz_mgr.register_listener(chromecast.uuid, self)
def new_cast_status(self, cast_status):
"""Handle reception of a new CastStatus."""
if self._valid:
self._cast_device.new_cast_status(cast_status)
def new_media_status(self, media_status):
"""Handle reception of a new MediaStatus."""
if self._valid:
self._cast_device.new_media_status(media_status)
def new_connection_status(self, connection_status):
"""Handle reception of a new ConnectionStatus."""
if self._valid:
self._cast_device.new_connection_status(connection_status)
@staticmethod
def added_to_multizone(group_uuid):
"""Handle the cast added to a group."""
def removed_from_multizone(self, group_uuid):
"""Handle the cast removed from a group."""
if self._valid:
self._cast_device.multizone_new_media_status(group_uuid, None)
def multizone_new_cast_status(self, group_uuid, cast_status):
"""Handle reception of a new CastStatus for a group."""
def multizone_new_media_status(self, group_uuid, media_status):
"""Handle reception of a new MediaStatus for a group."""
if self._valid:
self._cast_device.multizone_new_media_status(group_uuid, media_status)
def invalidate(self):
"""Invalidate this status listener.
All following callbacks won't be forwarded.
"""
# pylint: disable=protected-access
if self._cast_device._cast_info.is_audio_group:
self._mz_mgr.remove_multizone(self._uuid)
else:
self._mz_mgr.deregister_listener(self._uuid, self)
self._valid = False
|
import os.path as op
import numpy as np
from numpy.testing import (assert_array_almost_equal, assert_array_equal,
assert_equal)
import pytest
from mne.datasets import testing
from mne import (read_label, read_forward_solution, pick_types_forward,
convert_forward_solution)
from mne.label import Label
from mne.simulation.source import simulate_stc, simulate_sparse_stc
from mne.simulation.source import SourceSimulator
from mne.utils import run_tests_if_main, check_version
data_path = testing.data_path(download=False)
fname_fwd = op.join(data_path, 'MEG', 'sample',
'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
label_names = ['Aud-lh', 'Aud-rh', 'Vis-rh']
subjects_dir = op.join(data_path, 'subjects')
@pytest.fixture(scope="module", params=[testing._pytest_param()])
def _get_fwd_labels():
fwd = read_forward_solution(fname_fwd)
fwd = convert_forward_solution(fwd, force_fixed=True, use_cps=True)
fwd = pick_types_forward(fwd, meg=True, eeg=False)
labels = [read_label(op.join(data_path, 'MEG', 'sample', 'labels',
'%s.label' % label)) for label in label_names]
return fwd, labels
def _get_idx_label_stc(label, stc):
hemi_idx_mapping = dict(lh=0, rh=1)
hemi_idx = hemi_idx_mapping[label.hemi]
idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
idx = np.searchsorted(stc.vertices[hemi_idx], idx)
if hemi_idx == 1:
idx += len(stc.vertices[0])
return idx
def test_simulate_stc(_get_fwd_labels):
"""Test generation of source estimate."""
fwd, labels = _get_fwd_labels
mylabels = []
for i, label in enumerate(labels):
new_label = Label(vertices=label.vertices,
pos=label.pos,
values=2 * i * np.ones(len(label.values)),
hemi=label.hemi,
comment=label.comment)
mylabels.append(new_label)
n_times = 10
tmin = 0
tstep = 1e-3
stc_data = np.ones((len(labels), n_times))
stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
assert_equal(stc.subject, 'sample')
for label in labels:
idx = _get_idx_label_stc(label, stc)
assert (np.all(stc.data[idx] == 1.0))
assert (stc.data[idx].shape[1] == n_times)
# test with function
def fun(x):
return x ** 2
stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
# the first label has value 0, the second value 2, the third value 6
for i, label in enumerate(labels):
idx = _get_idx_label_stc(label, stc)
res = ((2. * i) ** 2.) * np.ones((len(idx), n_times))
assert_array_almost_equal(stc.data[idx], res)
# degenerate conditions
label_subset = mylabels[:2]
data_subset = stc_data[:2]
stc = simulate_stc(fwd['src'], label_subset, data_subset, tmin, tstep, fun)
pytest.raises(ValueError, simulate_stc, fwd['src'],
label_subset, data_subset[:-1], tmin, tstep, fun)
pytest.raises(RuntimeError, simulate_stc, fwd['src'], label_subset * 2,
np.concatenate([data_subset] * 2, axis=0), tmin, tstep, fun)
i = np.where(fwd['src'][0]['inuse'] == 0)[0][0]
label_single_vert = Label(vertices=[i],
pos=fwd['src'][0]['rr'][i:i + 1, :],
hemi='lh')
stc = simulate_stc(fwd['src'], [label_single_vert], stc_data[:1], tmin,
tstep)
assert_equal(len(stc.lh_vertno), 1)
def test_simulate_sparse_stc(_get_fwd_labels):
"""Test generation of sparse source estimate."""
fwd, labels = _get_fwd_labels
n_times = 10
tmin = 0
tstep = 1e-3
times = np.arange(n_times, dtype=np.float64) * tstep + tmin
pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(labels),
times, labels=labels, location='center', subject='sample',
subjects_dir=subjects_dir) # no non-zero values
mylabels = []
for label in labels:
this_label = label.copy()
this_label.values.fill(1.)
mylabels.append(this_label)
for location in ('random', 'center'):
random_state = 0 if location == 'random' else None
stc_1 = simulate_sparse_stc(fwd['src'], len(mylabels), times,
labels=mylabels, random_state=random_state,
location=location,
subjects_dir=subjects_dir)
assert_equal(stc_1.subject, 'sample')
assert (stc_1.data.shape[0] == len(mylabels))
assert (stc_1.data.shape[1] == n_times)
# make sure we get the same result when using the same seed
stc_2 = simulate_sparse_stc(fwd['src'], len(mylabels), times,
labels=mylabels, random_state=random_state,
location=location,
subjects_dir=subjects_dir)
assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
# Degenerate cases
pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(mylabels),
times, labels=mylabels, location='center', subject='foo',
subjects_dir=subjects_dir) # wrong subject
del fwd['src'][0]['subject_his_id'] # remove subject
pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(mylabels),
times, labels=mylabels, location='center',
subjects_dir=subjects_dir) # no subject
fwd['src'][0]['subject_his_id'] = 'sample' # put back subject
pytest.raises(ValueError, simulate_sparse_stc, fwd['src'], len(mylabels),
times, labels=mylabels, location='foo') # bad location
err_str = 'Number of labels'
with pytest.raises(ValueError, match=err_str):
simulate_sparse_stc(
fwd['src'], len(mylabels) + 1, times, labels=mylabels,
random_state=random_state, location=location,
subjects_dir=subjects_dir)
def test_generate_stc_single_hemi(_get_fwd_labels):
"""Test generation of source estimate, single hemi."""
fwd, labels = _get_fwd_labels
labels_single_hemi = labels[1:] # keep only labels in one hemisphere
mylabels = []
for i, label in enumerate(labels_single_hemi):
new_label = Label(vertices=label.vertices,
pos=label.pos,
values=2 * i * np.ones(len(label.values)),
hemi=label.hemi,
comment=label.comment)
mylabels.append(new_label)
n_times = 10
tmin = 0
tstep = 1e-3
stc_data = np.ones((len(labels_single_hemi), n_times))
stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep)
for label in labels_single_hemi:
idx = _get_idx_label_stc(label, stc)
assert (np.all(stc.data[idx] == 1.0))
assert (stc.data[idx].shape[1] == n_times)
# test with function
def fun(x):
return x ** 2
stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep, fun)
# the first label has value 0, the second value 2, the third value 6
for i, label in enumerate(labels_single_hemi):
if label.hemi == 'lh':
hemi_idx = 0
else:
hemi_idx = 1
idx = np.intersect1d(stc.vertices[hemi_idx], label.vertices)
idx = np.searchsorted(stc.vertices[hemi_idx], idx)
if hemi_idx == 1:
idx += len(stc.vertices[0])
res = ((2. * i) ** 2.) * np.ones((len(idx), n_times))
assert_array_almost_equal(stc.data[idx], res)
def test_simulate_sparse_stc_single_hemi(_get_fwd_labels):
"""Test generation of sparse source estimate."""
fwd, labels = _get_fwd_labels
labels_single_hemi = labels[1:] # keep only labels in one hemisphere
n_times = 10
tmin = 0
tstep = 1e-3
times = np.arange(n_times, dtype=np.float64) * tstep + tmin
stc_1 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times,
labels=labels_single_hemi, random_state=0)
assert (stc_1.data.shape[0] == len(labels_single_hemi))
assert (stc_1.data.shape[1] == n_times)
# make sure we get the same result when using the same seed
stc_2 = simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times,
labels=labels_single_hemi, random_state=0)
assert_array_equal(stc_1.lh_vertno, stc_2.lh_vertno)
assert_array_equal(stc_1.rh_vertno, stc_2.rh_vertno)
# smoke test for new API
if check_version('numpy', '1.17'):
simulate_sparse_stc(fwd['src'], len(labels_single_hemi), times,
labels=labels_single_hemi,
random_state=np.random.default_rng(0))
@testing.requires_testing_data
def test_simulate_stc_labels_overlap(_get_fwd_labels):
"""Test generation of source estimate, overlapping labels."""
fwd, labels = _get_fwd_labels
mylabels = []
for i, label in enumerate(labels):
new_label = Label(vertices=label.vertices,
pos=label.pos,
values=2 * i * np.ones(len(label.values)),
hemi=label.hemi,
comment=label.comment)
mylabels.append(new_label)
# Adding the last label twice
mylabels.append(new_label)
n_times = 10
tmin = 0
tstep = 1e-3
stc_data = np.ones((len(mylabels), n_times))
# Test false
with pytest.raises(RuntimeError, match='must be non-overlapping'):
simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep,
allow_overlap=False)
# test True
stc = simulate_stc(fwd['src'], mylabels, stc_data, tmin, tstep,
allow_overlap=True)
assert_equal(stc.subject, 'sample')
assert (stc.data.shape[1] == n_times)
# Some of the elements should be equal to 2 since we have duplicate labels
assert (2 in stc.data)
def test_source_simulator(_get_fwd_labels):
"""Test Source Simulator."""
fwd, _ = _get_fwd_labels
src = fwd['src']
hemi_to_ind = {'lh': 0, 'rh': 1}
tstep = 1. / 6.
label_vertices = [[], [], []]
label_vertices[0] = np.arange(1000)
label_vertices[1] = np.arange(500, 1500)
label_vertices[2] = np.arange(1000)
hemis = ['lh', 'lh', 'rh']
mylabels = []
src_vertices = []
for i, vert in enumerate(label_vertices):
new_label = Label(vertices=vert, hemi=hemis[i])
mylabels.append(new_label)
src_vertices.append(np.intersect1d(
src[hemi_to_ind[hemis[i]]]['vertno'],
new_label.vertices))
wfs = [[], [], []]
wfs[0] = np.array([0, 1., 0]) # 1d array
wfs[1] = [np.array([0, 1., 0]), # list
np.array([0, 1.5, 0])]
wfs[2] = np.array([[1, 1, 1.]]) # 2d array
events = [[], [], []]
events[0] = np.array([[0, 0, 1], [3, 0, 1]])
events[1] = np.array([[0, 0, 1], [3, 0, 1]])
events[2] = np.array([[0, 0, 1], [2, 0, 1]])
verts_lh = np.intersect1d(range(1500), src[0]['vertno'])
verts_rh = np.intersect1d(range(1000), src[1]['vertno'])
diff_01 = len(np.setdiff1d(src_vertices[0], src_vertices[1]))
diff_10 = len(np.setdiff1d(src_vertices[1], src_vertices[0]))
inter_10 = len(np.intersect1d(src_vertices[1], src_vertices[0]))
output_data_lh = np.zeros([len(verts_lh), 6])
tmp = np.array([0, 1., 0, 0, 1, 0])
output_data_lh[:diff_01, :] = np.tile(tmp, (diff_01, 1))
tmp = np.array([0, 2, 0, 0, 2.5, 0])
output_data_lh[diff_01:diff_01 + inter_10, :] = np.tile(tmp, (inter_10, 1))
tmp = np.array([0, 1, 0, 0, 1.5, 0])
output_data_lh[diff_01 + inter_10:, :] = np.tile(tmp, (diff_10, 1))
data_rh_wf = np.array([1., 1, 2, 1, 1, 0])
output_data_rh = np.tile(data_rh_wf, (len(src_vertices[2]), 1))
output_data = np.vstack([output_data_lh, output_data_rh])
ss = SourceSimulator(src, tstep)
for i in range(3):
ss.add_data(mylabels[i], wfs[i], events[i])
stc = ss.get_stc()
stim_channel = ss.get_stim_channel()
# Stim channel data must have the same size as stc time samples
assert len(stim_channel) == stc.data.shape[1]
stim_channel = ss.get_stim_channel(0, 0)
assert len(stim_channel) == 0
assert np.all(stc.vertices[0] == verts_lh)
assert np.all(stc.vertices[1] == verts_rh)
assert_array_almost_equal(stc.lh_data, output_data_lh)
assert_array_almost_equal(stc.rh_data, output_data_rh)
assert_array_almost_equal(stc.data, output_data)
counter = 0
for stc, stim in ss:
assert stc.data.shape[1] == 6
counter += 1
assert counter == 1
half_ss = SourceSimulator(src, tstep, duration=0.5)
for i in range(3):
half_ss.add_data(mylabels[i], wfs[i], events[i])
half_stc = half_ss.get_stc()
assert_array_almost_equal(stc.data[:, :3], half_stc.data)
ss = SourceSimulator(src)
with pytest.raises(ValueError, match='No simulation parameters'):
ss.get_stc()
with pytest.raises(ValueError, match='label must be a Label'):
ss.add_data(1, wfs, events)
with pytest.raises(ValueError, match='Number of waveforms and events '
'should match'):
ss.add_data(mylabels[0], wfs[:2], events)
# Verify that the chunks have the correct length.
source_simulator = SourceSimulator(src, tstep=tstep, duration=10 * tstep)
source_simulator.add_data(mylabels[0], np.array([1, 1, 1]), [[0, 0, 0]])
source_simulator._chk_duration = 6 # Quick hack to get short chunks.
stcs = [stc for stc, _ in source_simulator]
assert len(stcs) == 2
assert stcs[0].data.shape[1] == 6
assert stcs[1].data.shape[1] == 4
run_tests_if_main()
|
import requests
import pandas as pd
import deepdish as dd
import os
import pickle
import warnings
from .analyze import analyze
from ..datageometry import DataGeometry
BASE_URL = 'https://docs.google.com/uc?export=download'
homedir = os.path.expanduser('~/')
datadir = os.path.join(homedir, 'hypertools_data')
datadict = {
'weights' : '1-zzaUMHuXHSzFcGT4vNlqcV8tMY4q7jS',
'weights_avg' : '1v_IrU6n72nTOHwD3AnT2LKgtKfHINyXt',
'weights_sample' : '1CiVSP-8sjdQN_cdn3uCrBH5lNOkvgJp1',
'spiral' : '1JB4RIgNfzGaTFWRBCzi8CQ2syTE-BnWg',
'mushrooms' : '1wRXObmwLjSHPAUWC8QvUl37iY2qRObg8',
'wiki' : '1e5lCi17bLbOXuRjiGO2eqkEWVpeCuRvM',
'sotus' : '1D2dsrLAXkC3eUUaw2VV_mldzxX5ufmkm',
'nips' : '1Vva4Xcc5kUX78R0BKkLtdCWQx9GI-FG2',
'wiki_model' : '1OrN1F39GkMPjrB2bOTgNRT1pNBmsCQsN',
'nips_model' : '1orgxWJdWYzBlU3EF2u7EDsZrp3jTNNLG',
'sotus_model' : '1g2F18WLxfFosIqhiLs79G0MpiG72mWQr'
}
def load(dataset, reduce=None, ndims=None, align=None, normalize=None):
"""
Load a .geo file or example data
Parameters
----------
dataset : string
The name of the example dataset. Can be a `.geo` file, or one of a
number of example datasets listed below.
`weights` is list of 2 numpy arrays, each containing average brain
activity (fMRI) from 18 subjects listening to the same story, fit using
Hierarchical Topographic Factor Analysis (HTFA) with 100 nodes. The rows
are fMRI measurements and the columns are parameters of the model.
`weights_sample` is a sample of 3 subjects from that dataset.
`weights_avg` is the dataset split in half and averaged into two groups.
`spiral` is numpy array containing data for a 3D spiral, used to
highlight the `procrustes` function.
`mushrooms` is a numpy array comprised of features (columns) of a
collection of 8,124 mushroomm samples (rows).
`sotus` is a collection of State of the Union speeches from 1989-2018.
`wiki` is a collection of wikipedia pages used to fit wiki-model.
`wiki-model` is a sklearn Pipeline (CountVectorizer->LatentDirichletAllocation)
trained on a sample of wikipedia articles. It can be used to transform
text to topic vectors.
normalize : str or False or None
If set to 'across', the columns of the input data will be z-scored
across lists (default). That is, the z-scores will be computed with
with respect to column n across all arrays passed in the list. If set
to 'within', the columns will be z-scored within each list that is
passed. If set to 'row', each row of the input data will be z-scored.
If set to False, the input data will be returned with no z-scoring.
reduce : str or dict
Decomposition/manifold learning model to use. Models supported: PCA,
IncrementalPCA, SparsePCA, MiniBatchSparsePCA, KernelPCA, FastICA,
FactorAnalysis, TruncatedSVD, DictionaryLearning, MiniBatchDictionaryLearning,
TSNE, Isomap, SpectralEmbedding, LocallyLinearEmbedding, and MDS. Can be
passed as a string, but for finer control of the model parameters, pass
as a dictionary, e.g. reduce={'model' : 'PCA', 'params' : {'whiten' : True}}.
See scikit-learn specific model docs for details on parameters supported
for each model.
ndims : int
Number of dimensions to reduce
align : str or dict
If str, either 'hyper' or 'SRM'. If 'hyper', alignment algorithm will be
hyperalignment. If 'SRM', alignment algorithm will be shared response
model. You can also pass a dictionary for finer control, where the 'model'
key is a string that specifies the model and the params key is a dictionary
of parameter values (default : 'hyper').
Returns
----------
data : Numpy Array
Example data
"""
if dataset[-4:] == '.geo':
geo = dd.io.load(dataset)
if 'dtype' in geo:
if 'list' in geo['dtype']:
geo['data'] = list(geo['data'])
elif 'df' in geo['dtype']:
geo['data'] = pd.DataFrame(geo['data'])
geo['xform_data'] = list(geo['xform_data'])
data = DataGeometry(**geo)
elif dataset in datadict.keys():
data = _load_data(dataset, datadict[dataset])
else:
raise RuntimeError('No data loaded. Please specify a .geo file or '
'one of the following sample files: weights, '
'weights_avg, weights_sample, spiral, mushrooms, '
'wiki, nips or sotus.')
if data is not None:
if dataset in ('wiki_model', 'nips_model', 'sotus_model'):
return data
if isinstance(data, DataGeometry):
if any([reduce, ndims, align, normalize]):
from ..plot.plot import plot
if ndims:
if reduce is None:
reduce='IncrementalPCA'
d = analyze(data.get_data(), reduce=reduce, ndims=ndims, align=align, normalize=normalize)
return plot(d, show=False)
else:
return data
else:
return analyze(data, reduce=reduce, ndims=ndims, align=align, normalize=normalize)
def _load_data(dataset, fileid):
fullpath = os.path.join(homedir, 'hypertools_data', dataset)
if not os.path.exists(datadir):
os.makedirs(datadir)
if not os.path.exists(fullpath):
try:
_download(dataset, _load_stream(fileid))
data = _load_from_disk(dataset)
except:
raise ValueError('Download failed.')
else:
try:
data = _load_from_disk(dataset)
except:
try:
_download(dataset, _load_stream(fileid))
data = _load_from_disk(dataset)
except:
raise ValueError('Download failed. Try deleting cache data in'
' /Users/homedir/hypertools_data.')
return data
def _load_stream(fileid):
def _get_confirm_token(response):
for key, value in response.cookies.items():
if key.startswith('download_warning'):
return value
return None
url = BASE_URL + fileid
session = requests.Session()
response = session.get(BASE_URL, params = { 'id' : fileid }, stream = True)
token = _get_confirm_token(response)
if token:
params = { 'id' : fileid, 'confirm' : token }
response = session.get(BASE_URL, params = params, stream = True)
return response
def _download(dataset, data):
fullpath = os.path.join(homedir, 'hypertools_data', dataset)
with open(fullpath, 'wb') as f:
f.write(data.content)
def _load_from_disk(dataset):
fullpath = os.path.join(homedir, 'hypertools_data', dataset)
if dataset in ('wiki_model', 'nips_model', 'sotus_model',):
try:
with open(fullpath, 'rb') as f:
return pickle.load(f)
except ValueError as e:
print(e)
else:
with warnings.catch_warnings():
warnings.filterwarnings("ignore")
geo = dd.io.load(fullpath)
if 'dtype' in geo:
if 'list' in geo['dtype']:
geo['data'] = list(geo['data'])
elif 'df' in geo['dtype']:
geo['data'] = pd.DataFrame(geo['data'])
geo['xform_data'] = list(geo['xform_data'])
return DataGeometry(**geo)
|
import argparse
import asyncio
import logging
import sys
from typing import Optional
import discord
from discord import __version__ as discord_version
def confirm(text: str, default: Optional[bool] = None) -> bool:
if default is None:
options = "y/n"
elif default is True:
options = "Y/n"
elif default is False:
options = "y/N"
else:
raise TypeError(f"expected bool, not {type(default)}")
while True:
try:
value = input(f"{text}: [{options}] ").lower().strip()
except (KeyboardInterrupt, EOFError):
print("\nAborted!")
sys.exit(1)
if value in ("y", "yes"):
return True
if value in ("n", "no"):
return False
if value == "":
if default is not None:
return default
print("Error: invalid input")
async def interactive_config(red, token_set, prefix_set, *, print_header=True):
token = None
if print_header:
print("Red - Discord Bot | Configuration process\n")
if not token_set:
print(
"Please enter a valid token.\n"
"You can find out how to obtain a token with this guide:\n"
"https://docs.discord.red/en/stable/bot_application_guide.html#creating-a-bot-account"
)
while not token:
token = input("> ")
if not len(token) >= 50:
print("That doesn't look like a valid token.")
token = None
if token:
await red._config.token.set(token)
if not prefix_set:
prefix = ""
print(
"\nPick a prefix. A prefix is what you type before a "
"command. Example:\n"
"!help\n^ The exclamation mark is the prefix in this case.\n"
"The prefix can be multiple characters. You will be able to change it "
"later and add more of them.\nChoose your prefix:\n"
)
while not prefix:
prefix = input("Prefix> ")
if len(prefix) > 10:
if not confirm("Your prefix seems overly long. Are you sure that it's correct?"):
prefix = ""
if prefix:
await red._config.prefix.set([prefix])
return token
def positive_int(arg: str) -> int:
try:
x = int(arg)
except ValueError:
raise argparse.ArgumentTypeError("Message cache size has to be a number.")
if x < 1000:
raise argparse.ArgumentTypeError(
"Message cache size has to be greater than or equal to 1000."
)
if x > sys.maxsize:
raise argparse.ArgumentTypeError(
f"Message cache size has to be lower than or equal to {sys.maxsize}."
)
return x
def parse_cli_flags(args):
parser = argparse.ArgumentParser(
description="Red - Discord Bot", usage="redbot <instance_name> [arguments]"
)
parser.add_argument("--version", "-V", action="store_true", help="Show Red's current version")
parser.add_argument("--debuginfo", action="store_true", help="Show debug information.")
parser.add_argument(
"--list-instances",
action="store_true",
help="List all instance names setup with 'redbot-setup'",
)
parser.add_argument(
"--edit",
action="store_true",
help="Edit the instance. This can be done without console interaction "
"by passing --no-prompt and arguments that you want to change (available arguments: "
"--edit-instance-name, --edit-data-path, --copy-data, --owner, --token, --prefix).",
)
parser.add_argument(
"--edit-instance-name",
type=str,
help="New name for the instance. This argument only works with --edit argument passed.",
)
parser.add_argument(
"--overwrite-existing-instance",
action="store_true",
help="Confirm overwriting of existing instance when changing name."
" This argument only works with --edit argument passed.",
)
parser.add_argument(
"--edit-data-path",
type=str,
help=(
"New data path for the instance. This argument only works with --edit argument passed."
),
)
parser.add_argument(
"--copy-data",
action="store_true",
help="Copy data from old location. This argument only works "
"with --edit and --edit-data-path arguments passed.",
)
parser.add_argument(
"--owner",
type=int,
help="ID of the owner. Only who hosts "
"Red should be owner, this has "
"serious security implications if misused.",
)
parser.add_argument(
"--co-owner",
type=int,
default=[],
nargs="+",
help="ID of a co-owner. Only people who have access "
"to the system that is hosting Red should be "
"co-owners, as this gives them complete access "
"to the system's data. This has serious "
"security implications if misused. Can be "
"multiple.",
)
parser.add_argument(
"--prefix", "-p", action="append", help="Global prefix. Can be multiple", default=[]
)
parser.add_argument(
"--no-prompt",
action="store_true",
help="Disables console inputs. Features requiring "
"console interaction could be disabled as a "
"result",
)
parser.add_argument(
"--no-cogs", action="store_true", help="Starts Red with no cogs loaded, only core"
)
parser.add_argument(
"--load-cogs",
type=str,
nargs="+",
help="Force loading specified cogs from the installed packages. "
"Can be used with the --no-cogs flag to load these cogs exclusively.",
)
parser.add_argument(
"--dry-run",
action="store_true",
help="Makes Red quit with code 0 just before the "
"login. This is useful for testing the boot "
"process.",
)
parser.add_argument(
"--debug",
action="store_const",
dest="logging_level",
const=logging.DEBUG,
default=logging.INFO,
help="Sets the loggers level as debug",
)
parser.add_argument("--dev", action="store_true", help="Enables developer mode")
parser.add_argument(
"--mentionable",
action="store_true",
help="Allows mentioning the bot as an alternative to using the bot prefix",
)
parser.add_argument(
"--rpc",
action="store_true",
help="Enables the built-in RPC server. Please read the docs prior to enabling this!",
)
parser.add_argument(
"--rpc-port",
type=int,
default=6133,
help="The port of the built-in RPC server to use. Default to 6133.",
)
parser.add_argument("--token", type=str, help="Run Red with the given token.")
parser.add_argument(
"--no-instance",
action="store_true",
help=(
"Run Red without any existing instance. "
"The data will be saved under a temporary folder "
"and deleted on next system restart."
),
)
parser.add_argument(
"instance_name", nargs="?", help="Name of the bot instance created during `redbot-setup`."
)
parser.add_argument(
"--team-members-are-owners",
action="store_true",
dest="use_team_features",
default=False,
help=(
"Treat application team members as owners. "
"This is off by default. Owners can load and run arbitrary code. "
"Do not enable if you would not trust all of your team members with "
"all of the data on the host machine."
),
)
parser.add_argument(
"--message-cache-size",
type=positive_int,
default=1000,
help="Set the maximum number of messages to store in the internal message cache.",
)
parser.add_argument(
"--no-message-cache", action="store_true", help="Disable the internal message cache."
)
parser.add_argument(
"--disable-intent",
action="append",
choices=list(discord.Intents.VALID_FLAGS), # DEP-WARN
default=[],
help="Unsupported flag that allows disabling the given intent."
" Currently NOT SUPPORTED (and not covered by our version guarantees)"
" as Red is not prepared to work without all intents.\n"
f"Go to https://discordpy.readthedocs.io/en/v{discord_version}/api.html#discord.Intents"
" to see what each intent does.\n"
"This flag can be used multiple times to specify multiple intents.",
)
parser.add_argument(
"--force-rich-logging",
action="store_true",
dest="rich_logging",
default=None,
help="Forcefully enables the Rich logging handlers. This is normally enabled for supported active terminals.",
)
parser.add_argument(
"--force-disable-rich-logging",
action="store_false",
dest="rich_logging",
default=None,
help="Forcefully disables the Rich logging handlers.",
)
args = parser.parse_args(args)
if args.prefix:
args.prefix = sorted(args.prefix, reverse=True)
else:
args.prefix = []
return args
|
import mock
from paasta_tools import remote_git
from paasta_tools.cli.cli import parse_args
from paasta_tools.cli.cmds import start_stop_restart
from paasta_tools.marathon_tools import MarathonServiceConfig
def test_format_tag():
expected = "refs/tags/paasta-BRANCHNAME-TIMESTAMP-stop"
actual = start_stop_restart.format_tag(
branch="BRANCHNAME", force_bounce="TIMESTAMP", desired_state="stop"
)
assert actual == expected
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.trigger_deploys", autospec=True)
@mock.patch("paasta_tools.utils.get_git_url", autospec=True)
@mock.patch("dulwich.client.get_transport_and_path", autospec=True)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.log_event", autospec=True)
def test_issue_state_change_for_service(
mock_log_event, get_transport_and_path, get_git_url, mock_trigger_deploys,
):
fake_git_url = "BLOORGRGRGRGR.yelpcorp.com"
fake_path = "somepath"
get_git_url.return_value = fake_git_url
mock_git_client = mock.Mock()
get_transport_and_path.return_value = (mock_git_client, fake_path)
start_stop_restart.issue_state_change_for_service(
MarathonServiceConfig(
cluster="fake_cluster",
instance="fake_instance",
service="fake_service",
config_dict={},
branch_dict=None,
),
"0",
"stop",
)
get_transport_and_path.assert_called_once_with(fake_git_url)
mock_git_client.send_pack.assert_called_once_with(fake_path, mock.ANY, mock.ANY)
assert mock_log_event.call_count == 1
mock_trigger_deploys.assert_called_once_with("fake_service")
def test_make_mutate_refs_func():
mutate_refs = start_stop_restart.make_mutate_refs_func(
service_config=MarathonServiceConfig(
cluster="fake_cluster",
instance="fake_instance",
service="fake_service",
config_dict={"deploy_group": "a"},
branch_dict=None,
),
force_bounce="FORCEBOUNCE",
desired_state="stop",
)
old_refs = {
"refs/tags/paasta-a-20160308T053933-deploy": "hash_for_a",
"refs/tags/paasta-b-20160308T053933-deploy": "hash_for_b",
"refs/tags/paasta-c-20160308T053933-deploy": "hash_for_c",
"refs/tags/paasta-d-20160308T053933-deploy": "hash_for_d",
}
expected = dict(old_refs)
expected.update(
{"refs/tags/paasta-fake_cluster.fake_instance-FORCEBOUNCE-stop": "hash_for_a"}
)
actual = mutate_refs(old_refs)
assert actual == expected
def test_log_event():
with mock.patch(
"paasta_tools.utils.get_username", autospec=True, return_value="fake_user"
), mock.patch(
"paasta_tools.utils.get_hostname", autospec=True, return_value="fake_fqdn"
), mock.patch(
"socket.getfqdn", autospec=True, return_value="fake_fqdn"
), mock.patch(
"paasta_tools.utils._log", autospec=True
) as mock_log, mock.patch(
"paasta_tools.utils._log_audit", autospec=True
) as mock_log_audit:
service_config = MarathonServiceConfig(
cluster="fake_cluster",
instance="fake_instance",
service="fake_service",
config_dict={"deploy_group": "fake_deploy_group"},
branch_dict=None,
)
start_stop_restart.log_event(service_config, "stop")
mock_log.assert_called_once_with(
instance="fake_instance",
service="fake_service",
level="event",
component="deploy",
cluster="fake_cluster",
line=(
"Issued request to change state of fake_instance (an instance of "
"fake_service) to 'stop' by fake_user@fake_fqdn"
),
)
mock_log_audit.assert_called_once_with(
action="stop",
instance="fake_instance",
service="fake_service",
cluster="fake_cluster",
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.confirm_to_continue", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.apply_args_filters", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.issue_state_change_for_service",
autospec=True,
)
@mock.patch("paasta_tools.utils.format_timestamp", autospec=True)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_latest_deployment_tag", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.get_remote_refs", autospec=True)
@mock.patch("paasta_tools.utils.InstanceConfig", autospec=True)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_instance_config", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.utils.get_git_url", autospec=True)
@mock.patch("paasta_tools.cli.cmds.status.list_clusters", autospec=True)
def test_paasta_start_or_stop(
mock_list_clusters,
mock_get_git_url,
mock_get_instance_config,
mock_instance_config,
get_remote_refs,
mock_get_latest_deployment_tag,
mock_format_timestamp,
mock_issue_state_change_for_service,
mock_apply_args_filters,
mock_confirm_to_continue,
):
args, _ = parse_args(
[
"start",
"-s",
"fake_service",
"-i",
"main1,canary",
"-c",
"cluster1,cluster2",
"-d",
"/soa/dir",
]
)
mock_list_clusters.return_value = ["cluster1", "cluster2"]
mock_get_git_url.return_value = "fake_git_url"
mock_get_instance_config.return_value = mock_instance_config
mock_instance_config.get_deploy_group.return_value = "some_group"
get_remote_refs.return_value = ["not_a_real_tag", "fake_tag"]
mock_get_latest_deployment_tag.return_value = ("not_a_real_tag", None)
mock_format_timestamp.return_value = "not_a_real_timestamp"
mock_apply_args_filters.return_value = {
"cluster1": {"fake_service": {"main1": None, "canary": None}},
"cluster2": {"fake_service": {"main1": None, "canary": None}},
}
mock_confirm_to_continue.return_value = True
ret = args.command(args)
c1_get_instance_config_call = mock.call(
service="fake_service",
cluster="cluster1",
instance="main1",
soa_dir="/soa/dir",
load_deployments=False,
)
c2_get_instance_config_call = mock.call(
service="fake_service",
cluster="cluster1",
instance="canary",
soa_dir="/soa/dir",
load_deployments=False,
)
c3_get_instance_config_call = mock.call(
service="fake_service",
cluster="cluster2",
instance="main1",
soa_dir="/soa/dir",
load_deployments=False,
)
c4_get_instance_config_call = mock.call(
service="fake_service",
cluster="cluster2",
instance="canary",
soa_dir="/soa/dir",
load_deployments=False,
)
mock_get_instance_config.assert_has_calls(
[
c1_get_instance_config_call,
c2_get_instance_config_call,
c3_get_instance_config_call,
c4_get_instance_config_call,
],
any_order=True,
)
mock_get_latest_deployment_tag.assert_called_with(
["not_a_real_tag", "fake_tag"], "some_group"
)
mock_issue_state_change_for_service.assert_called_with(
service_config=mock_instance_config,
force_bounce="not_a_real_timestamp",
desired_state="start",
)
assert mock_issue_state_change_for_service.call_count == 4
assert ret == 0
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.confirm_to_continue", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.apply_args_filters", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.issue_state_change_for_service",
autospec=True,
)
@mock.patch("paasta_tools.utils.format_timestamp", autospec=True)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_latest_deployment_tag", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.get_remote_refs", autospec=True)
@mock.patch("paasta_tools.utils.InstanceConfig", autospec=True)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_instance_config", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.utils.get_git_url", autospec=True)
@mock.patch("paasta_tools.cli.cmds.status.list_clusters", autospec=True)
def test_paasta_start_or_stop_with_deploy_group(
mock_list_clusters,
mock_get_git_url,
mock_get_instance_config,
mock_instance_config,
mock_get_remote_refs,
mock_get_latest_deployment_tag,
mock_format_timestamp,
mock_issue_state_change_for_service,
mock_apply_args_filters,
mock_confirm_to_continue,
):
args, _ = parse_args(
[
"start",
"-s",
"fake_service",
"-c",
"cluster1",
"-l",
"fake_group",
"-d",
"/soa/dir",
]
)
mock_list_clusters.return_value = ["cluster1", "cluster2"]
mock_get_git_url.return_value = "fake_git_url"
mock_get_instance_config.return_value = mock_instance_config
mock_instance_config.get_deploy_group.return_value = args.deploy_group
mock_get_remote_refs.return_value = ["not_a_real_tag", "fake_tag"]
mock_get_latest_deployment_tag.return_value = ("not_a_real_tag", None)
mock_format_timestamp.return_value = "not_a_real_timestamp"
mock_apply_args_filters.return_value = {
"cluster1": {"fake_service": {"instance1": None}}
}
mock_confirm_to_continue.return_value = True
ret = args.command(args)
mock_get_instance_config.assert_called_once_with(
service="fake_service",
cluster="cluster1",
instance="instance1",
soa_dir="/soa/dir",
load_deployments=False,
)
mock_get_latest_deployment_tag.assert_called_with(
["not_a_real_tag", "fake_tag"], args.deploy_group
)
mock_issue_state_change_for_service.assert_called_once_with(
service_config=mock_instance_config,
force_bounce="not_a_real_timestamp",
desired_state="start",
)
assert ret == 0
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.confirm_to_continue", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.apply_args_filters", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.issue_state_change_for_service",
autospec=True,
)
@mock.patch("paasta_tools.utils.format_timestamp", autospec=True)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_latest_deployment_tag", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.get_remote_refs", autospec=True)
@mock.patch("paasta_tools.utils.InstanceConfig", autospec=True)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_instance_config", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.utils.get_git_url", autospec=True)
@mock.patch("paasta_tools.cli.cmds.status.list_clusters", autospec=True)
def test_stop_or_start_figures_out_correct_instances(
mock_list_clusters,
mock_get_git_url,
mock_get_instance_config,
mock_instance_config,
mock_get_remote_refs,
mock_get_latest_deployment_tag,
mock_format_timestamp,
mock_issue_state_change_for_service,
mock_apply_args_filters,
mock_confirm_to_continue,
):
args, _ = parse_args(
[
"start",
"-s",
"fake_service",
"-i",
"main1,canary",
"-c",
"cluster1,cluster2",
"-d",
"/soa/dir",
]
)
mock_list_clusters.return_value = ["cluster1", "cluster2"]
mock_get_git_url.return_value = "fake_git_url"
mock_get_instance_config.return_value = mock_instance_config
mock_instance_config.get_deploy_group.return_value = "some_group"
mock_get_remote_refs.return_value = ["not_a_real_tag", "fake_tag"]
mock_get_latest_deployment_tag.return_value = ("not_a_real_tag", None)
mock_format_timestamp.return_value = "not_a_real_timestamp"
mock_apply_args_filters.return_value = {
"cluster1": {"fake_service": {"main1": None}},
"cluster2": {"fake_service": {"main1": None, "canary": None}},
}
mock_confirm_to_continue.return_value = True
ret = args.command(args)
c1_get_instance_config_call = mock.call(
service="fake_service",
cluster="cluster1",
instance="main1",
soa_dir="/soa/dir",
load_deployments=False,
)
c2_get_instance_config_call = mock.call(
service="fake_service",
cluster="cluster2",
instance="main1",
soa_dir="/soa/dir",
load_deployments=False,
)
c3_get_instance_config_call = mock.call(
service="fake_service",
cluster="cluster2",
instance="canary",
soa_dir="/soa/dir",
load_deployments=False,
)
mock_get_instance_config.assert_has_calls(
[
c1_get_instance_config_call,
c2_get_instance_config_call,
c3_get_instance_config_call,
],
any_order=True,
)
mock_get_latest_deployment_tag.assert_called_with(
["not_a_real_tag", "fake_tag"], "some_group"
)
mock_issue_state_change_for_service.assert_called_with(
service_config=mock_instance_config,
force_bounce="not_a_real_timestamp",
desired_state="start",
)
assert mock_issue_state_change_for_service.call_count == 3
assert ret == 0
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.confirm_to_continue", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.apply_args_filters", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.get_remote_refs", autospec=True)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_instance_config", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.utils.get_git_url", autospec=True)
@mock.patch("paasta_tools.cli.cmds.status.list_clusters", autospec=True)
def test_stop_or_start_handle_ls_remote_failures(
mock_list_clusters,
mock_get_git_url,
mock_get_instance_config,
mock_get_remote_refs,
mock_apply_args_filters,
mock_confirm_to_continue,
capfd,
):
args, _ = parse_args(
["restart", "-s", "fake_service", "-c", "cluster1", "-d", "/soa/dir"]
)
mock_list_clusters.return_value = ["cluster1"]
mock_get_git_url.return_value = "fake_git_url"
mock_get_instance_config.return_value = None
mock_get_remote_refs.side_effect = remote_git.LSRemoteException
mock_apply_args_filters.return_value = {
"cluster1": {"fake_service": {"instance1": mock.Mock()}}
}
mock_confirm_to_continue.return_value = True
assert args.command(args) == 1
assert "may be down" in capfd.readouterr()[0]
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.confirm_to_continue", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.apply_args_filters", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_instance_config", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.get_remote_refs", autospec=True)
@mock.patch("paasta_tools.cli.cmds.status.list_clusters", autospec=True)
def test_start_or_stop_bad_refs(
mock_list_clusters,
mock_get_remote_refs,
mock_get_instance_config,
mock_apply_args_filters,
mock_confirm_to_continue,
capfd,
):
args, _ = parse_args(
[
"restart",
"-s",
"fake_service",
"-i",
"fake_instance",
"-c",
"fake_cluster1,fake_cluster2",
"-d",
"/fake/soa/dir",
]
)
mock_list_clusters.return_value = ["fake_cluster1", "fake_cluster2"]
mock_get_instance_config.return_value = MarathonServiceConfig(
cluster="fake_cluster1",
instance="fake_instance",
service="fake_service",
config_dict={},
branch_dict=None,
)
mock_get_remote_refs.return_value = {
"refs/tags/paasta-deliberatelyinvalidref-20160304T053919-deploy": "70f7245ccf039d778c7e527af04eac00d261d783"
}
mock_apply_args_filters.return_value = {
"fake_cluster1": {"fake_service": {"fake_instance": None}},
"fake_cluster2": {"fake_service": {"fake_instance": None}},
}
mock_confirm_to_continue.return_value = True
assert args.command(args) == 1
assert "deployed there yet?" in capfd.readouterr()[0]
def test_cluster_list_defaults_to_all():
return True
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.apply_args_filters", autospec=True
)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.issue_state_change_for_service",
autospec=True,
)
@mock.patch("paasta_tools.utils.format_timestamp", autospec=True)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_latest_deployment_tag", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.get_remote_refs", autospec=True)
@mock.patch("paasta_tools.utils.InstanceConfig", autospec=True)
@mock.patch(
"paasta_tools.cli.cmds.start_stop_restart.get_instance_config", autospec=True
)
@mock.patch("paasta_tools.cli.cmds.start_stop_restart.utils.get_git_url", autospec=True)
@mock.patch("paasta_tools.cli.cmds.status.list_clusters", autospec=True)
def test_stop_or_start_warn_on_multi_instance(
mock_list_clusters,
mock_get_git_url,
mock_get_instance_config,
mock_instance_config,
get_remote_refs,
mock_get_latest_deployment_tag,
mock_format_timestamp,
mock_issue_state_change_for_service,
mock_apply_args_filters,
capfd,
):
args, _ = parse_args(
[
"start",
"-s",
"fake_service,other_service",
"-c",
"cluster1,cluster2",
"-d",
"/soa/dir",
]
)
mock_list_clusters.return_value = ["cluster1", "cluster2"]
mock_get_git_url.return_value = "fake_git_url"
mock_get_instance_config.return_value = mock_instance_config
mock_instance_config.get_deploy_group.return_value = "some_group"
get_remote_refs.return_value = ["not_a_real_tag", "fake_tag"]
mock_get_latest_deployment_tag.return_value = ("not_a_real_tag", None)
mock_format_timestamp.return_value = "not_a_real_timestamp"
mock_apply_args_filters.return_value = {
"cluster1": {"fake_service": {"main1": None}, "other_service": {"main1": None}},
"cluster2": {"fake_service": {"main1": None, "canary": None}},
}
ret = args.command(args)
out, err = capfd.readouterr()
assert ret == 1
assert "Warning: trying to start/stop/restart multiple services" in out
|
import pytest
from homeassistant import data_entry_flow
from homeassistant.components.tradfri import config_flow
from tests.async_mock import patch
from tests.common import MockConfigEntry
@pytest.fixture
def mock_auth():
"""Mock authenticate."""
with patch(
"homeassistant.components.tradfri.config_flow.authenticate"
) as mock_auth:
yield mock_auth
async def test_user_connection_successful(hass, mock_auth, mock_entry_setup):
"""Test a successful connection."""
mock_auth.side_effect = lambda hass, host, code: {"host": host, "gateway_id": "bla"}
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"host": "123.123.123.123", "security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"import_groups": False,
}
async def test_user_connection_timeout(hass, mock_auth, mock_entry_setup):
"""Test a connection timeout."""
mock_auth.side_effect = config_flow.AuthError("timeout")
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"host": "127.0.0.1", "security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 0
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"base": "timeout"}
async def test_user_connection_bad_key(hass, mock_auth, mock_entry_setup):
"""Test a connection with bad key."""
mock_auth.side_effect = config_flow.AuthError("invalid_security_code")
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "user"}
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"host": "127.0.0.1", "security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 0
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
assert result["errors"] == {"security_code": "invalid_security_code"}
async def test_discovery_connection(hass, mock_auth, mock_entry_setup):
"""Test a connection via discovery."""
mock_auth.side_effect = lambda hass, host, code: {"host": host, "gateway_id": "bla"}
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "123.123.123.123", "properties": {"id": "homekit-id"}},
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"security_code": "abcd"}
)
assert len(mock_entry_setup.mock_calls) == 1
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].unique_id == "homekit-id"
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"import_groups": False,
}
async def test_import_connection(hass, mock_auth, mock_entry_setup):
"""Test a connection via import."""
mock_auth.side_effect = lambda hass, host, code: {
"host": host,
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
}
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "import_groups": True},
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"security_code": "abcd"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
"import_groups": True,
}
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_no_groups(hass, mock_auth, mock_entry_setup):
"""Test a connection via import and no groups allowed."""
mock_auth.side_effect = lambda hass, host, code: {
"host": host,
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
}
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "import_groups": False},
)
result = await hass.config_entries.flow.async_configure(
flow["flow_id"], {"security_code": "abcd"}
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "bla",
"identity": "mock-iden",
"key": "mock-key",
"import_groups": False,
}
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_legacy(hass, mock_gateway_info, mock_entry_setup):
"""Test a connection via import."""
mock_gateway_info.side_effect = lambda hass, host, identity, key: {
"host": host,
"identity": identity,
"key": key,
"gateway_id": "mock-gateway",
}
result = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "key": "mock-key", "import_groups": True},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "mock-gateway",
"identity": "homeassistant",
"key": "mock-key",
"import_groups": True,
}
assert len(mock_gateway_info.mock_calls) == 1
assert len(mock_entry_setup.mock_calls) == 1
async def test_import_connection_legacy_no_groups(
hass, mock_gateway_info, mock_entry_setup
):
"""Test a connection via legacy import and no groups allowed."""
mock_gateway_info.side_effect = lambda hass, host, identity, key: {
"host": host,
"identity": identity,
"key": key,
"gateway_id": "mock-gateway",
}
result = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "import"},
data={"host": "123.123.123.123", "key": "mock-key", "import_groups": False},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_CREATE_ENTRY
assert result["result"].data == {
"host": "123.123.123.123",
"gateway_id": "mock-gateway",
"identity": "homeassistant",
"key": "mock-key",
"import_groups": False,
}
assert len(mock_gateway_info.mock_calls) == 1
assert len(mock_entry_setup.mock_calls) == 1
async def test_discovery_duplicate_aborted(hass):
"""Test a duplicate discovery host aborts and updates existing entry."""
entry = MockConfigEntry(
domain="tradfri", data={"host": "some-host"}, unique_id="homekit-id"
)
entry.add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "new-host", "properties": {"id": "homekit-id"}},
)
assert flow["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert flow["reason"] == "already_configured"
assert entry.data["host"] == "new-host"
async def test_import_duplicate_aborted(hass):
"""Test a duplicate import host is ignored."""
MockConfigEntry(domain="tradfri", data={"host": "some-host"}).add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
"tradfri", context={"source": "import"}, data={"host": "some-host"}
)
assert flow["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert flow["reason"] == "already_configured"
async def test_duplicate_discovery(hass, mock_auth, mock_entry_setup):
"""Test a duplicate discovery in progress is ignored."""
result = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "123.123.123.123", "properties": {"id": "homekit-id"}},
)
assert result["type"] == data_entry_flow.RESULT_TYPE_FORM
result2 = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "123.123.123.123", "properties": {"id": "homekit-id"}},
)
assert result2["type"] == data_entry_flow.RESULT_TYPE_ABORT
async def test_discovery_updates_unique_id(hass):
"""Test a duplicate discovery host aborts and updates existing entry."""
entry = MockConfigEntry(
domain="tradfri",
data={"host": "some-host"},
)
entry.add_to_hass(hass)
flow = await hass.config_entries.flow.async_init(
"tradfri",
context={"source": "homekit"},
data={"host": "some-host", "properties": {"id": "homekit-id"}},
)
assert flow["type"] == data_entry_flow.RESULT_TYPE_ABORT
assert flow["reason"] == "already_configured"
assert entry.unique_id == "homekit-id"
|
from django_filters import filters
from djng.forms import fields
class Filter(filters.Filter):
field_class = fields.Field
class CharFilter(filters.CharFilter):
field_class = fields.CharField
class BooleanFilter(filters.BooleanFilter):
field_class = fields.NullBooleanField
class ChoiceFilter(filters.ChoiceFilter):
field_class = fields.ChoiceField
class TypedChoiceFilter(filters.TypedChoiceFilter):
field_class = fields.TypedChoiceField
class UUIDFilter(filters.UUIDFilter):
field_class = fields.UUIDField
class MultipleChoiceFilter(filters.MultipleChoiceFilter):
field_class = fields.MultipleChoiceField
class TypedMultipleChoiceFilter(filters.TypedMultipleChoiceFilter):
field_class = fields.TypedMultipleChoiceField
class DateFilter(filters.DateFilter):
field_class = fields.DateField
class DateTimeFilter(filters.DateTimeFilter):
field_class = fields.DateTimeField
class TimeFilter(filters.TimeFilter):
field_class = fields.TimeField
class DurationFilter(filters.DurationFilter):
field_class = fields.DurationField
class ModelChoiceFilter(filters.ModelChoiceFilter):
field_class = fields.ModelChoiceField
class ModelMultipleChoiceFilter(filters.ModelMultipleChoiceFilter):
field_class = fields.ModelMultipleChoiceField
class NumberFilter(filters.NumberFilter):
field_class = fields.DecimalField
class NumericRangeFilter(filters.NumericRangeFilter):
"""
TODO: we first must redeclare the RangeField
"""
|
import os
import warnings
import numpy as np
from ..core import indexing
from ..core.dataarray import DataArray
from ..core.utils import is_scalar
from .common import BackendArray
from .file_manager import CachingFileManager
from .locks import SerializableLock
# TODO: should this be GDAL_LOCK instead?
RASTERIO_LOCK = SerializableLock()
_ERROR_MSG = (
"The kind of indexing operation you are trying to do is not "
"valid on rasterio files. Try to load your data with ds.load()"
"first."
)
class RasterioArrayWrapper(BackendArray):
"""A wrapper around rasterio dataset objects"""
def __init__(self, manager, lock, vrt_params=None):
from rasterio.vrt import WarpedVRT
self.manager = manager
self.lock = lock
# cannot save riods as an attribute: this would break pickleability
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
self.vrt_params = vrt_params
self._shape = (riods.count, riods.height, riods.width)
dtypes = riods.dtypes
if not np.all(np.asarray(dtypes) == dtypes[0]):
raise ValueError("All bands should have the same dtype")
self._dtype = np.dtype(dtypes[0])
@property
def dtype(self):
return self._dtype
@property
def shape(self):
return self._shape
def _get_indexer(self, key):
"""Get indexer for rasterio array.
Parameter
---------
key: tuple of int
Returns
-------
band_key: an indexer for the 1st dimension
window: two tuples. Each consists of (start, stop).
squeeze_axis: axes to be squeezed
np_ind: indexer for loaded numpy array
See also
--------
indexing.decompose_indexer
"""
assert len(key) == 3, "rasterio datasets should always be 3D"
# bands cannot be windowed but they can be listed
band_key = key[0]
np_inds = []
# bands (axis=0) cannot be windowed but they can be listed
if isinstance(band_key, slice):
start, stop, step = band_key.indices(self.shape[0])
band_key = np.arange(start, stop, step)
# be sure we give out a list
band_key = (np.asarray(band_key) + 1).tolist()
if isinstance(band_key, list): # if band_key is not a scalar
np_inds.append(slice(None))
# but other dims can only be windowed
window = []
squeeze_axis = []
for i, (k, n) in enumerate(zip(key[1:], self.shape[1:])):
if isinstance(k, slice):
# step is always positive. see indexing.decompose_indexer
start, stop, step = k.indices(n)
np_inds.append(slice(None, None, step))
elif is_scalar(k):
# windowed operations will always return an array
# we will have to squeeze it later
squeeze_axis.append(-(2 - i))
start = k
stop = k + 1
else:
start, stop = np.min(k), np.max(k) + 1
np_inds.append(k - start)
window.append((start, stop))
if isinstance(key[1], np.ndarray) and isinstance(key[2], np.ndarray):
# do outer-style indexing
np_inds[-2:] = np.ix_(*np_inds[-2:])
return band_key, tuple(window), tuple(squeeze_axis), tuple(np_inds)
def _getitem(self, key):
from rasterio.vrt import WarpedVRT
band_key, window, squeeze_axis, np_inds = self._get_indexer(key)
if not band_key or any(start == stop for (start, stop) in window):
# no need to do IO
shape = (len(band_key),) + tuple(stop - start for (start, stop) in window)
out = np.zeros(shape, dtype=self.dtype)
else:
with self.lock:
riods = self.manager.acquire(needs_lock=False)
if self.vrt_params is not None:
riods = WarpedVRT(riods, **self.vrt_params)
out = riods.read(band_key, window=window)
if squeeze_axis:
out = np.squeeze(out, axis=squeeze_axis)
return out[np_inds]
def __getitem__(self, key):
return indexing.explicit_indexing_adapter(
key, self.shape, indexing.IndexingSupport.OUTER, self._getitem
)
def _parse_envi(meta):
"""Parse ENVI metadata into Python data structures.
See the link for information on the ENVI header file format:
http://www.harrisgeospatial.com/docs/enviheaderfiles.html
Parameters
----------
meta : dict
Dictionary of keys and str values to parse, as returned by the rasterio
tags(ns='ENVI') call.
Returns
-------
parsed_meta : dict
Dictionary containing the original keys and the parsed values
"""
def parsevec(s):
return np.fromstring(s.strip("{}"), dtype="float", sep=",")
def default(s):
return s.strip("{}")
parse = {"wavelength": parsevec, "fwhm": parsevec}
parsed_meta = {k: parse.get(k, default)(v) for k, v in meta.items()}
return parsed_meta
def open_rasterio(filename, parse_coordinates=None, chunks=None, cache=None, lock=None):
"""Open a file with rasterio (experimental).
This should work with any file that rasterio can open (most often:
geoTIFF). The x and y coordinates are generated automatically from the
file's geoinformation, shifted to the center of each pixel (see
`"PixelIsArea" Raster Space
<http://web.archive.org/web/20160326194152/http://remotesensing.org/geotiff/spec/geotiff2.5.html#2.5.2>`_
for more information).
You can generate 2D coordinates from the file's attributes with::
from affine import Affine
da = xr.open_rasterio('path_to_file.tif')
transform = Affine.from_gdal(*da.attrs['transform'])
nx, ny = da.sizes['x'], da.sizes['y']
x, y = np.meshgrid(np.arange(nx)+0.5, np.arange(ny)+0.5) * transform
Parameters
----------
filename : str, rasterio.DatasetReader, or rasterio.WarpedVRT
Path to the file to open. Or already open rasterio dataset.
parse_coordinates : bool, optional
Whether to parse the x and y coordinates out of the file's
``transform`` attribute or not. The default is to automatically
parse the coordinates only if they are rectilinear (1D).
It can be useful to set ``parse_coordinates=False``
if your files are very large or if you don't need the coordinates.
chunks : int, tuple or dict, optional
Chunk sizes along each dimension, e.g., ``5``, ``(5, 5)`` or
``{'x': 5, 'y': 5}``. If chunks is provided, it used to load the new
DataArray into a dask array.
cache : bool, optional
If True, cache data loaded from the underlying datastore in memory as
NumPy arrays when accessed to avoid reading from the underlying data-
store multiple times. Defaults to True unless you specify the `chunks`
argument to use dask, in which case it defaults to False.
lock : False, True or threading.Lock, optional
If chunks is provided, this argument is passed on to
:py:func:`dask.array.from_array`. By default, a global lock is
used to avoid issues with concurrent access to the same file when using
dask's multithreaded backend.
Returns
-------
data : DataArray
The newly created DataArray.
"""
import rasterio
from rasterio.vrt import WarpedVRT
vrt_params = None
if isinstance(filename, rasterio.io.DatasetReader):
filename = filename.name
elif isinstance(filename, rasterio.vrt.WarpedVRT):
vrt = filename
filename = vrt.src_dataset.name
vrt_params = dict(
src_crs=vrt.src_crs.to_string(),
crs=vrt.crs.to_string(),
resampling=vrt.resampling,
tolerance=vrt.tolerance,
src_nodata=vrt.src_nodata,
nodata=vrt.nodata,
width=vrt.width,
height=vrt.height,
src_transform=vrt.src_transform,
transform=vrt.transform,
dtype=vrt.working_dtype,
warp_extras=vrt.warp_extras,
)
if lock is None:
lock = RASTERIO_LOCK
manager = CachingFileManager(rasterio.open, filename, lock=lock, mode="r")
riods = manager.acquire()
if vrt_params is not None:
riods = WarpedVRT(riods, **vrt_params)
if cache is None:
cache = chunks is None
coords = {}
# Get bands
if riods.count < 1:
raise ValueError("Unknown dims")
coords["band"] = np.asarray(riods.indexes)
# Get coordinates
if riods.transform.is_rectilinear:
# 1d coordinates
parse = True if parse_coordinates is None else parse_coordinates
if parse:
nx, ny = riods.width, riods.height
# xarray coordinates are pixel centered
x, _ = riods.transform * (np.arange(nx) + 0.5, np.zeros(nx) + 0.5)
_, y = riods.transform * (np.zeros(ny) + 0.5, np.arange(ny) + 0.5)
coords["y"] = y
coords["x"] = x
else:
# 2d coordinates
parse = False if (parse_coordinates is None) else parse_coordinates
if parse:
warnings.warn(
"The file coordinates' transformation isn't "
"rectilinear: xarray won't parse the coordinates "
"in this case. Set `parse_coordinates=False` to "
"suppress this warning.",
RuntimeWarning,
stacklevel=3,
)
# Attributes
attrs = {}
# Affine transformation matrix (always available)
# This describes coefficients mapping pixel coordinates to CRS
# For serialization store as tuple of 6 floats, the last row being
# always (0, 0, 1) per definition (see
# https://github.com/sgillies/affine)
attrs["transform"] = tuple(riods.transform)[:6]
if hasattr(riods, "crs") and riods.crs:
# CRS is a dict-like object specific to rasterio
# If CRS is not None, we convert it back to a PROJ4 string using
# rasterio itself
try:
attrs["crs"] = riods.crs.to_proj4()
except AttributeError:
attrs["crs"] = riods.crs.to_string()
if hasattr(riods, "res"):
# (width, height) tuple of pixels in units of CRS
attrs["res"] = riods.res
if hasattr(riods, "is_tiled"):
# Is the TIF tiled? (bool)
# We cast it to an int for netCDF compatibility
attrs["is_tiled"] = np.uint8(riods.is_tiled)
if hasattr(riods, "nodatavals"):
# The nodata values for the raster bands
attrs["nodatavals"] = tuple(
np.nan if nodataval is None else nodataval for nodataval in riods.nodatavals
)
if hasattr(riods, "scales"):
# The scale values for the raster bands
attrs["scales"] = riods.scales
if hasattr(riods, "offsets"):
# The offset values for the raster bands
attrs["offsets"] = riods.offsets
if hasattr(riods, "descriptions") and any(riods.descriptions):
# Descriptions for each dataset band
attrs["descriptions"] = riods.descriptions
if hasattr(riods, "units") and any(riods.units):
# A list of units string for each dataset band
attrs["units"] = riods.units
# Parse extra metadata from tags, if supported
parsers = {"ENVI": _parse_envi, "GTiff": lambda m: m}
driver = riods.driver
if driver in parsers:
if driver == "GTiff":
meta = parsers[driver](riods.tags())
else:
meta = parsers[driver](riods.tags(ns=driver))
for k, v in meta.items():
# Add values as coordinates if they match the band count,
# as attributes otherwise
if isinstance(v, (list, np.ndarray)) and len(v) == riods.count:
coords[k] = ("band", np.asarray(v))
else:
attrs[k] = v
data = indexing.LazilyOuterIndexedArray(
RasterioArrayWrapper(manager, lock, vrt_params)
)
# this lets you write arrays loaded with rasterio
data = indexing.CopyOnWriteArray(data)
if cache and chunks is None:
data = indexing.MemoryCachedArray(data)
result = DataArray(data=data, dims=("band", "y", "x"), coords=coords, attrs=attrs)
if chunks is not None:
from dask.base import tokenize
# augment the token with the file modification time
try:
mtime = os.path.getmtime(filename)
except OSError:
# the filename is probably an s3 bucket rather than a regular file
mtime = None
token = tokenize(filename, mtime, chunks)
name_prefix = "open_rasterio-%s" % token
result = result.chunk(chunks, name_prefix=name_prefix, token=token)
# Make the file closeable
result._file_obj = manager
return result
|
import numpy as np
import pandas as pd
DEFAULT_GRAPH_PARAMS = {
'charset': "latin1",
'outputorder': "edgesfirst",
'overlap': "prism"
}
DEFAULT_NODE_PARAMS = {
'fontname': "'IBM Plex Sans'",
'fontsize': 10
}
class ComponentDiGraph(object):
def __init__(self, orig_edge_df, id_node_df, node_df, edge_df, components, graph_params=None, node_params=None):
self.edge_df = edge_df
self.orig_edge_df = orig_edge_df
self.id_node_df = id_node_df
self.node_df = node_df
self.components = components
self.graph_params = DEFAULT_GRAPH_PARAMS
if graph_params is not None:
for k, v in graph_params.items():
self.graph_params[k] = v
self.node_params = DEFAULT_NODE_PARAMS
if node_params is not None:
for k, v in node_params.items():
self.node_params[k] = v
def get_dot(self, component):
# graph = '''digraph {\n node [fixedsize="true", fontname="'IBM Plex Sans'", height="0.0001", label="\n", margin="0", shape="plaintext", width="0.0001"];\n'''
# graph = '''digraph \n{\n graph [bb="0,0,1297.2,881.5", charset="latin1", outputorder="edgesfirst", overlap="prism"]\n node [fontname="'IBM Plex Sans'", fontsize=10] ;\n'''
graph = '''digraph \n{\n graph [%s]\n node [%s] ;\n''' % (
self._format_graphviz_paramters(self.graph_params),
self._format_graphviz_paramters(self.node_params)
)
mynode_df = self.id_node_df[self.id_node_df['Component'] == component]
nodes = '\n'.join(mynode_df
.reset_index()
.apply(lambda x: '''"%s" [label="%s"] ;''' % (x['index'], x['name']), axis=1)
.values)
edges = '\n'.join(self.edge_df[self.edge_df.Component == component].
apply(lambda x: '"%s" -> "%s" ;' % (x.source_id, x.target_id), axis=1).values)
return graph + nodes + '\n\n' + edges + '\n}'
def get_components_at_least_size(self, min_size):
component_sizes = (pd.DataFrame({'component': self.components})
.reset_index()
.groupby('component')[['index']]
.apply(len)
.where(lambda x: x >= min_size).dropna())
return np.array(component_sizes.sort_values(ascending=False).index)
def get_node_to_component_dict(self):
return self.id_node_df.set_index('name')['Component'].to_dict()
def component_to_node_list_dict(self):
return self.id_node_df.groupby('Component')['name'].apply(list).to_dict()
def _format_graphviz_paramters(self, params):
return ', '.join([k + '=' + ('"' if type(v) == str else '') + str(v) + ('"' if type(v) == str else '')
for k, v in params.items()])
|
from numato_gpio import NumatoGpioError
import pytest
from homeassistant.components import numato
from homeassistant.setup import async_setup_component
from .common import NUMATO_CFG, mockup_raise, mockup_return
async def test_setup_no_devices(hass, numato_fixture, monkeypatch):
"""Test handling of an 'empty' discovery.
Platform setups are expected to return after handling errors locally
without raising.
"""
monkeypatch.setattr(numato_fixture, "discover", mockup_return)
assert await async_setup_component(hass, "numato", NUMATO_CFG)
assert len(numato_fixture.devices) == 0
async def test_fail_setup_raising_discovery(hass, numato_fixture, caplog, monkeypatch):
"""Test handling of an exception during discovery.
Setup shall return False.
"""
monkeypatch.setattr(numato_fixture, "discover", mockup_raise)
assert not await async_setup_component(hass, "numato", NUMATO_CFG)
await hass.async_block_till_done()
async def test_hass_numato_api_wrong_port_directions(hass, numato_fixture):
"""Test handling of wrong port directions.
This won't happen in the current platform implementation but would raise
in case of an introduced bug in the platforms.
"""
numato_fixture.discover()
api = numato.NumatoAPI()
api.setup_output(0, 5)
api.setup_input(0, 2)
api.setup_input(0, 6)
with pytest.raises(NumatoGpioError):
api.read_adc_input(0, 5) # adc_read from output
api.read_input(0, 6) # read from output
api.write_output(0, 2, 1) # write to input
async def test_hass_numato_api_errors(hass, numato_fixture, monkeypatch):
"""Test whether Home Assistant numato API (re-)raises errors."""
numato_fixture.discover()
monkeypatch.setattr(numato_fixture.devices[0], "setup", mockup_raise)
monkeypatch.setattr(numato_fixture.devices[0], "adc_read", mockup_raise)
monkeypatch.setattr(numato_fixture.devices[0], "read", mockup_raise)
monkeypatch.setattr(numato_fixture.devices[0], "write", mockup_raise)
api = numato.NumatoAPI()
with pytest.raises(NumatoGpioError):
api.setup_input(0, 5)
api.read_adc_input(0, 1)
api.read_input(0, 2)
api.write_output(0, 2, 1)
async def test_invalid_port_number(hass, numato_fixture, config):
"""Test validation of ADC port number type."""
sensorports_cfg = config["numato"]["devices"][0]["sensors"]["ports"]
port1_config = sensorports_cfg["1"]
sensorports_cfg["one"] = port1_config
del sensorports_cfg["1"]
assert not await async_setup_component(hass, "numato", config)
await hass.async_block_till_done()
assert not numato_fixture.devices
async def test_too_low_adc_port_number(hass, numato_fixture, config):
"""Test handling of failing component setup.
Tries setting up an ADC on a port below (0) the allowed range.
"""
sensorports_cfg = config["numato"]["devices"][0]["sensors"]["ports"]
sensorports_cfg.update({0: {"name": "toolow"}})
assert not await async_setup_component(hass, "numato", config)
assert not numato_fixture.devices
async def test_too_high_adc_port_number(hass, numato_fixture, config):
"""Test handling of failing component setup.
Tries setting up an ADC on a port above (8) the allowed range.
"""
sensorports_cfg = config["numato"]["devices"][0]["sensors"]["ports"]
sensorports_cfg.update({8: {"name": "toohigh"}})
assert not await async_setup_component(hass, "numato", config)
assert not numato_fixture.devices
async def test_invalid_adc_range_value_type(hass, numato_fixture, config):
"""Test validation of ADC range config's types.
Replaces the source range beginning by a string.
"""
sensorports_cfg = config["numato"]["devices"][0]["sensors"]["ports"]
sensorports_cfg["1"]["source_range"][0] = "zero"
assert not await async_setup_component(hass, "numato", config)
assert not numato_fixture.devices
async def test_invalid_adc_source_range_length(hass, numato_fixture, config):
"""Test validation of ADC range config's length.
Adds an element to the source range.
"""
sensorports_cfg = config["numato"]["devices"][0]["sensors"]["ports"]
sensorports_cfg["1"]["source_range"].append(42)
assert not await async_setup_component(hass, "numato", config)
assert not numato_fixture.devices
async def test_invalid_adc_source_range_order(hass, numato_fixture, config):
"""Test validation of ADC range config's order.
Sets the source range to a decreasing [2, 1].
"""
sensorports_cfg = config["numato"]["devices"][0]["sensors"]["ports"]
sensorports_cfg["1"]["source_range"] = [2, 1]
assert not await async_setup_component(hass, "numato", config)
assert not numato_fixture.devices
async def test_invalid_adc_destination_range_value_type(hass, numato_fixture, config):
"""Test validation of ADC range .
Replaces the destination range beginning by a string.
"""
sensorports_cfg = config["numato"]["devices"][0]["sensors"]["ports"]
sensorports_cfg["1"]["destination_range"][0] = "zero"
assert not await async_setup_component(hass, "numato", config)
assert not numato_fixture.devices
async def test_invalid_adc_destination_range_length(hass, numato_fixture, config):
"""Test validation of ADC range config's length.
Adds an element to the destination range.
"""
sensorports_cfg = config["numato"]["devices"][0]["sensors"]["ports"]
sensorports_cfg["1"]["destination_range"].append(42)
assert not await async_setup_component(hass, "numato", config)
assert not numato_fixture.devices
async def test_invalid_adc_destination_range_order(hass, numato_fixture, config):
"""Test validation of ADC range config's order.
Sets the destination range to a decreasing [2, 1].
"""
sensorports_cfg = config["numato"]["devices"][0]["sensors"]["ports"]
sensorports_cfg["1"]["destination_range"] = [2, 1]
assert not await async_setup_component(hass, "numato", config)
assert not numato_fixture.devices
|
import os
import unittest
from perfkitbenchmarker import sample
from perfkitbenchmarker import test_util
from perfkitbenchmarker.windows_packages import hammerdb
class HammerDBBenchmarkTestCase(unittest.TestCase, test_util.SamplesTestMixin):
def getDataContents(self, file_name):
path = os.path.join(os.path.dirname(__file__), '..', 'data', file_name)
with open(path) as fp:
contents = fp.read()
return contents
def setUp(self):
self.result_xml = self.getDataContents('hammerdb_output_log.txt')
def testHammerDBTpccParsing(self):
samples = hammerdb.ParseHammerDBResultTPCC(self.result_xml, {}, [1])
expected_metadata = {
'hammerdb_tpcc_virtual_user': 1
}
expected_samples = [
sample.Sample('TPM', 60301, 'times/minutes', expected_metadata)
]
self.assertSampleListsEqualUpToTimestamp(expected_samples, samples)
def testHammerDBTpchParsing(self):
samples = hammerdb.ParseHammerDBResultTPCH(self.result_xml, {}, 1)
query_time_list = [68.5, 10, 6.9, 6.3, 20.5, 5.1, 28.1,
19.3, 75.9, 17.2, 22.8, 29, 34.2, 2.4, 15.4,
12.2, 33.3, 94.1, 34, 15.5, 124, 15.8]
rf_time_list = [61.4, 26.8]
power = hammerdb._CalculateTPCHPower(query_time_list, rf_time_list, 300)
assert int(power) == 49755
expected_metadata = {}
expected_samples = [
sample.Sample('qphh', 1421.1785988465313, 'N/A', expected_metadata)
]
self.assertSampleListsEqualUpToTimestamp(expected_samples, samples)
if __name__ == '__main__':
unittest.main()
|
from __future__ import division, print_function, unicode_literals
import argparse
import os
import sys
def main(args):
p = argparse.ArgumentParser(description=__doc__)
p.add_argument("variables", action="store", nargs="*", help="variables to be printed")
ns = p.parse_args(args)
if ns.variables:
vardict = {k: v for k, v in os.environ.items() if k in ns.variables}
else:
vardict = os.environ
vardict = {k: v for k, v in vardict.items() if k[0] not in "$@?!#*0123456789"}
for k, v in vardict.items():
print("{}={}".format(k, v))
sys.exit(0)
if __name__ == "__main__":
main(sys.argv[1:])
|
import numpy as np
import unittest
from chainer.dataset import DatasetMixin
from chainercv.utils import assert_is_point_dataset
from chainercv.utils import testing
class PointDataset(DatasetMixin):
H = 48
W = 64
def __init__(self, n_point_candidates,
return_visible, *options):
self.n_point_candidates = n_point_candidates
self.return_visible = return_visible
self.options = options
def __len__(self):
return 10
def get_example(self, i):
n_inst = 2
img = np.random.randint(0, 256, size=(3, self.H, self.W))
n_point = np.random.choice(self.n_point_candidates)
point_y = np.random.uniform(0, self.H, size=(n_inst, n_point))
point_x = np.random.uniform(0, self.W, size=(n_inst, n_point))
point = np.stack((point_y, point_x), axis=2).astype(np.float32)
if self.return_visible:
visible = np.random.randint(
0, 2, size=(n_inst, n_point)).astype(np.bool)
return (img, point, visible) + self.options
else:
return (img, point) + self.options
class InvalidSampleSizeDataset(PointDataset):
def get_example(self, i):
img = super(
InvalidSampleSizeDataset, self).get_example(i)[0]
return img
class InvalidImageDataset(PointDataset):
def get_example(self, i):
img = super(
InvalidImageDataset, self).get_example(i)[0]
rest = super(
InvalidImageDataset, self).get_example(i)[1:]
return (img[0],) + rest
class InvalidPointDataset(PointDataset):
def get_example(self, i):
img, point = super(InvalidPointDataset, self).get_example(i)[:2]
rest = super(InvalidPointDataset, self).get_example(i)[2:]
point += 1000
return (img, point) + rest
@testing.parameterize(
# No optional Values
{'dataset': PointDataset([10, 15], True), 'valid': True, 'n_point': None},
{'dataset': PointDataset([10, 15], False), 'valid': True, 'n_point': None},
{'dataset': PointDataset([15], True), 'valid': True, 'n_point': 15},
{'dataset': PointDataset([15], False), 'valid': True, 'n_point': 15},
# Invalid n_point
{'dataset': PointDataset([15], True), 'valid': False, 'n_point': 10},
{'dataset': PointDataset([15], False), 'valid': False, 'n_point': 10},
# Return optional values
{'dataset': PointDataset([10, 15], True, 'option'),
'valid': True, 'n_point': None},
{'dataset': PointDataset([10, 15], False, 'option'),
'valid': True, 'n_point': None, 'no_visible': True},
{'dataset': PointDataset([15], True, 'option'),
'valid': True, 'n_point': 15},
{'dataset': PointDataset([15], False, 'option'),
'valid': True, 'n_point': 15, 'no_visible': True},
# Invalid datasets
{'dataset': InvalidSampleSizeDataset([10], True),
'valid': False, 'n_point': None},
{'dataset': InvalidImageDataset([10], True),
'valid': False, 'n_point': None},
{'dataset': InvalidPointDataset([10], True),
'valid': False, 'n_point': None},
)
class TestAssertIsPointDataset(unittest.TestCase):
def setUp(self):
if not hasattr(self, 'no_visible'):
self.no_visible = False
def test_assert_is_point_dataset(self):
if self.valid:
assert_is_point_dataset(
self.dataset, self.n_point, 20, self.no_visible)
else:
with self.assertRaises(AssertionError):
assert_is_point_dataset(
self.dataset, self.n_point, 20, self.no_visible)
testing.run_module(__name__, __file__)
|
import logging
from typing import Callable
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import TEMP_CELSIUS
from homeassistant.core import HomeAssistant, callback
from homeassistant.helpers.update_coordinator import DataUpdateCoordinator
from . import NotionEntity
from .const import DATA_COORDINATOR, DOMAIN, SENSOR_TEMPERATURE
_LOGGER = logging.getLogger(__name__)
SENSOR_TYPES = {SENSOR_TEMPERATURE: ("Temperature", "temperature", TEMP_CELSIUS)}
async def async_setup_entry(
hass: HomeAssistant, entry: ConfigEntry, async_add_entities: Callable
):
"""Set up Notion sensors based on a config entry."""
coordinator = hass.data[DOMAIN][DATA_COORDINATOR][entry.entry_id]
sensor_list = []
for task_id, task in coordinator.data["tasks"].items():
if task["task_type"] not in SENSOR_TYPES:
continue
name, device_class, unit = SENSOR_TYPES[task["task_type"]]
sensor = coordinator.data["sensors"][task["sensor_id"]]
sensor_list.append(
NotionSensor(
coordinator,
task_id,
sensor["id"],
sensor["bridge"]["id"],
sensor["system_id"],
name,
device_class,
unit,
)
)
async_add_entities(sensor_list)
class NotionSensor(NotionEntity):
"""Define a Notion sensor."""
def __init__(
self,
coordinator: DataUpdateCoordinator,
task_id: str,
sensor_id: str,
bridge_id: str,
system_id: str,
name: str,
device_class: str,
unit: str,
):
"""Initialize the entity."""
super().__init__(
coordinator, task_id, sensor_id, bridge_id, system_id, name, device_class
)
self._unit = unit
@property
def state(self) -> str:
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self) -> str:
"""Return the unit of measurement."""
return self._unit
@callback
def _async_update_from_latest_data(self) -> None:
"""Fetch new state data for the sensor."""
task = self.coordinator.data["tasks"][self._task_id]
if task["task_type"] == SENSOR_TEMPERATURE:
self._state = round(float(task["status"]["value"]), 1)
else:
_LOGGER.error(
"Unknown task type: %s: %s",
self.coordinator.data["sensors"][self._sensor_id],
task["task_type"],
)
|
import os
import subprocess
from kalliope.core.TTS.TTSModule import TTSModule, MissingTTSParameter
import logging
import sys
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Pico2wave(TTSModule):
def __init__(self, **kwargs):
super(Pico2wave, self).__init__(**kwargs)
self.path = kwargs.get('path', None)
self._check_parameters()
def _check_parameters(self):
"""
Check parameters are ok, raise MissingTTSParameters exception otherwise.
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingTTSParameterException
"""
if self.language == "default" or self.language is None:
raise MissingTTSParameter("[pico2wave] Missing parameters, check documentation !")
return True
def say(self, words):
"""
:param words: The sentence to say
"""
self.generate_and_play(words, self._generate_audio_file)
def _generate_audio_file(self):
"""
Generic method used as a Callback in TTSModule
- must provided the audio file and write it on the disk
.. raises:: FailToLoadSoundFile
"""
if self.path is None:
# we try to get the path from the env
self.path = self._get_pico_path()
# if still None, we set a default value
if self.path is None:
self.path = "/usr/bin/pico2wave"
# pico2wave needs that the file path ends with .wav
tmp_path = self.file_path+".wav"
pico2wave_options = ["-l=%s" % self.language, "-w=%s" % tmp_path]
final_command = list()
final_command.extend([self.path])
final_command.extend(pico2wave_options)
final_command.append(self.words)
logger.debug("[Pico2wave] command: %s" % final_command)
# generate the file with pico2wav
subprocess.call(final_command)
# remove the extension .wav
os.rename(tmp_path, self.file_path)
@staticmethod
def _get_pico_path():
prog = "pico2wave"
for path in os.environ["PATH"].split(os.pathsep):
path = path.strip('"')
exe_file = os.path.join(path, prog)
if os.path.isfile(exe_file):
return exe_file
return None
|
from django.apps import AppConfig
from django.core.checks import register
from weblate.gitexport.utils import find_git_http_backend
from weblate.utils.checks import weblate_check
class GitExportConfig(AppConfig):
name = "weblate.gitexport"
label = "gitexport"
verbose_name = "Git Exporter"
def ready(self):
super().ready()
register(check_git_backend)
def check_git_backend(app_configs, **kwargs):
if find_git_http_backend() is None:
return [
weblate_check(
"weblate.E022",
"Failed to find git-http-backend, " "the git exporter will not work.",
)
]
return []
|
import numpy as np
from scipy import linalg
from .defaults import _handle_default
from .io.meas_info import _simplify_info
from .io.pick import (_picks_by_type, pick_info, pick_channels_cov,
_picks_to_idx)
from .io.proj import make_projector
from .utils import (logger, _compute_row_norms, _pl, _validate_type,
_apply_scaling_cov, _undo_scaling_cov,
_scaled_array, warn, _check_rank, verbose)
@verbose
def estimate_rank(data, tol='auto', return_singular=False, norm=True,
tol_kind='absolute', verbose=None):
"""Estimate the rank of data.
This function will normalize the rows of the data (typically
channels or vertices) such that non-zero singular values
should be close to one.
Parameters
----------
data : array
Data to estimate the rank of (should be 2-dimensional).
%(rank_tol)s
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
norm : bool
If True, data will be scaled by their estimated row-wise norm.
Else data are assumed to be scaled. Defaults to True.
%(rank_tol_kind)s
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
if norm:
data = data.copy() # operate on a copy
norms = _compute_row_norms(data)
data /= norms[:, np.newaxis]
s = linalg.svdvals(data)
rank = _estimate_rank_from_s(s, tol, tol_kind)
if return_singular is True:
return rank, s
else:
return rank
def _estimate_rank_from_s(s, tol='auto', tol_kind='absolute'):
"""Estimate the rank of a matrix from its singular values.
Parameters
----------
s : ndarray, shape (..., ndim)
The singular values of the matrix.
tol : float | 'auto'
Tolerance for singular values to consider non-zero in calculating the
rank. Can be 'auto' to use the same thresholding as
``scipy.linalg.orth`` (assuming np.float64 datatype) adjusted
by a factor of 2.
tol_kind : str
Can be "absolute" or "relative".
Returns
-------
rank : ndarray, shape (...)
The estimated rank.
"""
s = np.array(s, float)
max_s = np.amax(s, axis=-1)
if isinstance(tol, str):
if tol not in ('auto', 'float32'):
raise ValueError('tol must be "auto" or float, got %r' % (tol,))
# XXX this should be float32 probably due to how we save and
# load data, but it breaks test_make_inverse_operator (!)
# The factor of 2 gets test_compute_covariance_auto_reg[None]
# to pass without breaking minimum norm tests. :(
# Passing 'float32' is a hack workaround for test_maxfilter_get_rank :(
if tol == 'float32':
eps = np.finfo(np.float32).eps
else:
eps = np.finfo(np.float64).eps
tol = s.shape[-1] * max_s * eps
if s.ndim == 1: # typical
logger.info(' Using tolerance %0.2g (%0.2g eps * %d dim * %0.2g'
' max singular value)' % (tol, eps, len(s), max_s))
elif not (isinstance(tol, np.ndarray) and tol.dtype.kind == 'f'):
tol = float(tol)
if tol_kind == 'relative':
tol = tol * max_s
rank = np.sum(s > tol, axis=-1)
return rank
def _estimate_rank_raw(raw, picks=None, tol=1e-4, scalings='norm',
with_ref_meg=False, tol_kind='absolute'):
"""Aid the deprecation of raw.estimate_rank."""
if picks is None:
picks = _picks_to_idx(raw.info, picks, with_ref_meg=with_ref_meg)
# conveniency wrapper to expose the expert "tol" option + scalings options
return _estimate_rank_meeg_signals(
raw[picks][0], pick_info(raw.info, picks), scalings,
tol, False, tol_kind)
def _estimate_rank_meeg_signals(data, info, scalings, tol='auto',
return_singular=False, tol_kind='absolute'):
"""Estimate rank for M/EEG data.
Parameters
----------
data : np.ndarray of float, shape(n_channels, n_samples)
The M/EEG signals.
info : Info
The measurement info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e15, grad=1e13, eeg=1e6)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
tol : float | str
Tolerance. See ``estimate_rank``.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
tol_kind : str
Tolerance kind. See ``estimate_rank``.
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
with _scaled_array(data, picks_list, scalings):
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular,
tol_kind=tol_kind)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info(' Estimated rank (%s): %d' % (ch_type, rank))
return out
def _estimate_rank_meeg_cov(data, info, scalings, tol='auto',
return_singular=False):
"""Estimate rank of M/EEG covariance data, given the covariance.
Parameters
----------
data : np.ndarray of float, shape (n_channels, n_channels)
The M/EEG covariance.
info : Info
The measurement info.
scalings : dict | 'norm' | np.ndarray | None
The rescaling method to be applied. If dict, it will override the
following default dict:
dict(mag=1e12, grad=1e11, eeg=1e5)
If 'norm' data will be scaled by channel-wise norms. If array,
pre-specified norms will be used. If None, no scaling will be applied.
tol : float | str
Tolerance. See ``estimate_rank``.
return_singular : bool
If True, also return the singular values that were used
to determine the rank.
Returns
-------
rank : int
Estimated rank of the data.
s : array
If return_singular is True, the singular values that were
thresholded to determine the rank are also returned.
"""
picks_list = _picks_by_type(info)
scalings = _handle_default('scalings_cov_rank', scalings)
_apply_scaling_cov(data, picks_list, scalings)
if data.shape[1] < data.shape[0]:
ValueError("You've got fewer samples than channels, your "
"rank estimate might be inaccurate.")
out = estimate_rank(data, tol=tol, norm=False,
return_singular=return_singular)
rank = out[0] if isinstance(out, tuple) else out
ch_type = ' + '.join(list(zip(*picks_list))[0])
logger.info(' Estimated rank (%s): %d' % (ch_type, rank))
_undo_scaling_cov(data, picks_list, scalings)
return out
@verbose
def _get_rank_sss(inst, msg='You should use data-based rank estimate instead',
verbose=None):
"""Look up rank from SSS data.
.. note::
Throws an error if SSS has not been applied.
Parameters
----------
inst : instance of Raw, Epochs or Evoked, or Info
Any MNE object with an .info attribute
Returns
-------
rank : int
The numerical rank as predicted by the number of SSS
components.
"""
# XXX this is too basic for movement compensated data
# https://github.com/mne-tools/mne-python/issues/4676
from .io.meas_info import Info
info = inst if isinstance(inst, Info) else inst.info
del inst
proc_info = info.get('proc_history', [])
if len(proc_info) > 1:
logger.info('Found multiple SSS records. Using the first.')
if len(proc_info) == 0 or 'max_info' not in proc_info[0] or \
'in_order' not in proc_info[0]['max_info']['sss_info']:
raise ValueError('Could not find Maxfilter information in '
'info["proc_history"]. %s' % msg)
proc_info = proc_info[0]
max_info = proc_info['max_info']
inside = max_info['sss_info']['in_order']
nfree = (inside + 1) ** 2 - 1
nfree -= (len(max_info['sss_info']['components'][:nfree]) -
max_info['sss_info']['components'][:nfree].sum())
return nfree
def _info_rank(info, ch_type, picks, rank):
if ch_type == 'meg' and rank != 'full':
try:
return _get_rank_sss(info)
except ValueError:
pass
return len(picks)
def _compute_rank_int(inst, *args, **kwargs):
"""Wrap compute_rank but yield an int."""
# XXX eventually we should unify how channel types are handled
# so that we don't need to do this, or we do it everywhere.
# Using pca=True in compute_whitener might help.
return sum(compute_rank(inst, *args, **kwargs).values())
@verbose
def compute_rank(inst, rank=None, scalings=None, info=None, tol='auto',
proj=True, tol_kind='absolute', verbose=None):
"""Compute the rank of data or noise covariance.
This function will normalize the rows of the data (typically
channels or vertices) such that non-zero singular values
should be close to one.
Parameters
----------
inst : instance of Raw, Epochs, or Covariance
Raw measurements to compute the rank from or the covariance.
%(rank_None)s
scalings : dict | None (default None)
Defaults to ``dict(mag=1e15, grad=1e13, eeg=1e6)``.
These defaults will scale different channel types
to comparable values.
info : instance of Info | None
The measurement info used to compute the covariance. It is
only necessary if inst is a Covariance object (since this does
not provide ``inst.info``).
%(rank_tol)s
proj : bool
If True, all projs in ``inst`` and ``info`` will be applied or
considered when ``rank=None`` or ``rank='info'``.
%(rank_tol_kind)s
%(verbose)s
Returns
-------
rank : dict
Estimated rank of the data for each channel type.
To get the total rank, you can use ``sum(rank.values())``.
Notes
-----
The ``rank`` parameter can be:
:data:`python:None` (default)
Rank will be estimated from the data after proper scaling of
different channel types.
``'info'``
Rank is inferred from ``info``. If data have been processed
with Maxwell filtering, the Maxwell filtering header is used.
Otherwise, the channel counts themselves are used.
In both cases, the number of projectors is subtracted from
the (effective) number of channels in the data.
For example, if Maxwell filtering reduces the rank to 68, with
two projectors the returned value will be 68.
``'full'``
Rank is assumed to be full, i.e. equal to the
number of good channels. If a `Covariance` is passed, this can make
sense if it has been (possibly improperly) regularized without taking
into account the true data rank.
.. versionadded:: 0.18
"""
from .io.base import BaseRaw
from .epochs import BaseEpochs
from . import Covariance
rank = _check_rank(rank)
scalings = _handle_default('scalings_cov_rank', scalings)
if isinstance(inst, Covariance):
inst_type = 'covariance'
if info is None:
raise ValueError('info cannot be None if inst is a Covariance.')
inst = pick_channels_cov(
inst, set(inst['names']) & set(info['ch_names']))
if info['ch_names'] != inst['names']:
info = pick_info(info, [info['ch_names'].index(name)
for name in inst['names']])
else:
info = inst.info
inst_type = 'data'
logger.info('Computing rank from %s with rank=%r' % (inst_type, rank))
_validate_type(rank, (str, dict, None), 'rank')
if isinstance(rank, str): # string, either 'info' or 'full'
rank_type = 'info'
info_type = rank
rank = dict()
else: # None or dict
rank_type = 'estimated'
if rank is None:
rank = dict()
simple_info = _simplify_info(info)
picks_list = _picks_by_type(info, meg_combined=True, ref_meg=False,
exclude='bads')
for ch_type, picks in picks_list:
if ch_type in rank:
continue
ch_names = [info['ch_names'][pick] for pick in picks]
n_chan = len(ch_names)
if proj:
proj_op, n_proj, _ = make_projector(info['projs'], ch_names)
else:
proj_op, n_proj = None, 0
if rank_type == 'info':
# use info
rank[ch_type] = _info_rank(info, ch_type, picks, info_type)
if info_type != 'full':
rank[ch_type] -= n_proj
logger.info(' %s: rank %d after %d projector%s applied to '
'%d channel%s'
% (ch_type.upper(), rank[ch_type],
n_proj, _pl(n_proj), n_chan, _pl(n_chan)))
else:
logger.info(' %s: rank %d from info'
% (ch_type.upper(), rank[ch_type]))
else:
# Use empirical estimation
assert rank_type == 'estimated'
if isinstance(inst, (BaseRaw, BaseEpochs)):
if isinstance(inst, BaseRaw):
data = inst.get_data(picks, None, None,
reject_by_annotation='omit')
else: # isinstance(inst, BaseEpochs):
data = inst.get_data()[:, picks, :]
data = np.concatenate(data, axis=1)
if proj:
data = np.dot(proj_op, data)
rank[ch_type] = _estimate_rank_meeg_signals(
data, pick_info(simple_info, picks), scalings, tol, False,
tol_kind)
else:
assert isinstance(inst, Covariance)
if inst['diag']:
rank[ch_type] = (inst['data'][picks] > 0).sum() - n_proj
else:
data = inst['data'][picks][:, picks]
if proj:
data = np.dot(np.dot(proj_op, data), proj_op.T)
rank[ch_type] = _estimate_rank_meeg_cov(
data, pick_info(simple_info, picks), scalings, tol)
this_info_rank = _info_rank(info, ch_type, picks, 'info')
logger.info(' %s: rank %d computed from %d data channel%s '
'with %d projector%s'
% (ch_type.upper(), rank[ch_type], n_chan, _pl(n_chan),
n_proj, _pl(n_proj)))
if rank[ch_type] > this_info_rank:
warn('Something went wrong in the data-driven estimation of '
'the data rank as it exceeds the theoretical rank from '
'the info (%d > %d). Consider setting rank to "auto" or '
'setting it explicitly as an integer.' %
(rank[ch_type], this_info_rank))
return rank
|
from molecule.driver import base
from molecule import util
class Linode(base.Base):
"""
The class responsible for managing `Linode`_ instances. `Linode`_
is `not` the default driver used in Molecule.
Molecule leverages Ansible's `linode_module`_, by mapping variables
from ``molecule.yml`` into ``create.yml`` and ``destroy.yml``.
.. important::
Please note, the Ansible Linode module is currently using the deprecated
API and there are a number of outstanding usability issues with the module.
However, there is ongoing work to migrate to the new API (v4) and migrate
this driver when that time comes. In the mean time, this driver can be
considered at somewhat of an Alpha status quality.
.. _`linode_module`: https://docs.ansible.com/ansible/latest/modules/linode_module.html
.. code-block:: yaml
driver:
name: linode
platforms:
- name: instance
plan: 1
datacenter: 7
distribution: 129
.. code-block:: bash
$ pip install 'molecule[linode]'
Change the options passed to the ssh client.
.. code-block:: yaml
driver:
name: linode
ssh_connection_options:
-o ControlPath=~/.ansible/cp/%r@%h-%p
.. important::
Molecule does not merge lists, when overriding the developer must
provide all options.
Provide a list of files Molecule will preserve, relative to the scenario
ephemeral directory, after any ``destroy`` subcommand execution.
.. code-block:: yaml
driver:
name: linode
safe_files:
- foo
.. _`Linode`: https://www.linode.com/
""" # noqa
def __init__(self, config):
super(Linode, self).__init__(config)
self._name = 'linode'
@property
def name(self):
return self._name
@name.setter
def name(self, value):
self._name = value
@property
def login_cmd_template(self):
connection_options = ' '.join(self.ssh_connection_options)
return ('ssh {{address}} '
'-l {{user}} '
'-p {{port}} '
'-i {{identity_file}} '
'{}').format(connection_options)
@property
def default_safe_files(self):
return [
self.instance_config,
]
@property
def default_ssh_connection_options(self):
return self._get_ssh_connection_options()
def login_options(self, instance_name):
d = {'instance': instance_name}
return util.merge_dicts(d, self._get_instance_config(instance_name))
def ansible_connection_options(self, instance_name):
try:
d = self._get_instance_config(instance_name)
return {
'ansible_user': d['user'],
'ansible_host': d['address'],
'ansible_port': d['port'],
'ansible_ssh_pass': d['ssh_pass'],
'ansible_private_key_file': d['identity_file'],
'connection': 'ssh',
'ansible_ssh_common_args':
' '.join(self.ssh_connection_options),
}
except StopIteration:
return {}
except IOError:
# Instance has yet to be provisioned , therefore the
# instance_config is not on disk.
return {}
def _get_instance_config(self, instance_name):
instance_config_dict = util.safe_load_file(
self._config.driver.instance_config)
return next(
item for item in instance_config_dict if any((
# NOTE(lwm): Handle both because of transitioning label logic
# https://github.com/ansible/ansible/pull/44719
item['instance'] == '{}_{}'.format(item['linode_id'],
instance_name),
item['instance'] == '{}-{}'.format(item['linode_id'],
instance_name))))
def sanity_checks(self):
# FIXME(decentral1se): Implement sanity checks
pass
|
from datetime import timedelta
import logging
from qnapstats import QNAPStats
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
ATTR_NAME,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_PASSWORD,
CONF_PORT,
CONF_SSL,
CONF_TIMEOUT,
CONF_USERNAME,
CONF_VERIFY_SSL,
DATA_GIBIBYTES,
DATA_RATE_MEBIBYTES_PER_SECOND,
PERCENTAGE,
TEMP_CELSIUS,
)
from homeassistant.exceptions import PlatformNotReady
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.util import Throttle
_LOGGER = logging.getLogger(__name__)
ATTR_DRIVE = "Drive"
ATTR_DRIVE_SIZE = "Drive Size"
ATTR_IP = "IP Address"
ATTR_MAC = "MAC Address"
ATTR_MASK = "Mask"
ATTR_MAX_SPEED = "Max Speed"
ATTR_MEMORY_SIZE = "Memory Size"
ATTR_MODEL = "Model"
ATTR_PACKETS_TX = "Packets (TX)"
ATTR_PACKETS_RX = "Packets (RX)"
ATTR_PACKETS_ERR = "Packets (Err)"
ATTR_SERIAL = "Serial #"
ATTR_TYPE = "Type"
ATTR_UPTIME = "Uptime"
ATTR_VOLUME_SIZE = "Volume Size"
CONF_DRIVES = "drives"
CONF_NICS = "nics"
CONF_VOLUMES = "volumes"
DEFAULT_NAME = "QNAP"
DEFAULT_PORT = 8080
DEFAULT_TIMEOUT = 5
MIN_TIME_BETWEEN_UPDATES = timedelta(minutes=1)
NOTIFICATION_ID = "qnap_notification"
NOTIFICATION_TITLE = "QNAP Sensor Setup"
_SYSTEM_MON_COND = {
"status": ["Status", None, "mdi:checkbox-marked-circle-outline"],
"system_temp": ["System Temperature", TEMP_CELSIUS, "mdi:thermometer"],
}
_CPU_MON_COND = {
"cpu_temp": ["CPU Temperature", TEMP_CELSIUS, "mdi:thermometer"],
"cpu_usage": ["CPU Usage", PERCENTAGE, "mdi:chip"],
}
_MEMORY_MON_COND = {
"memory_free": ["Memory Available", DATA_GIBIBYTES, "mdi:memory"],
"memory_used": ["Memory Used", DATA_GIBIBYTES, "mdi:memory"],
"memory_percent_used": ["Memory Usage", PERCENTAGE, "mdi:memory"],
}
_NETWORK_MON_COND = {
"network_link_status": ["Network Link", None, "mdi:checkbox-marked-circle-outline"],
"network_tx": ["Network Up", DATA_RATE_MEBIBYTES_PER_SECOND, "mdi:upload"],
"network_rx": ["Network Down", DATA_RATE_MEBIBYTES_PER_SECOND, "mdi:download"],
}
_DRIVE_MON_COND = {
"drive_smart_status": ["SMART Status", None, "mdi:checkbox-marked-circle-outline"],
"drive_temp": ["Temperature", TEMP_CELSIUS, "mdi:thermometer"],
}
_VOLUME_MON_COND = {
"volume_size_used": ["Used Space", DATA_GIBIBYTES, "mdi:chart-pie"],
"volume_size_free": ["Free Space", DATA_GIBIBYTES, "mdi:chart-pie"],
"volume_percentage_used": ["Volume Used", PERCENTAGE, "mdi:chart-pie"],
}
_MONITORED_CONDITIONS = (
list(_SYSTEM_MON_COND)
+ list(_CPU_MON_COND)
+ list(_MEMORY_MON_COND)
+ list(_NETWORK_MON_COND)
+ list(_DRIVE_MON_COND)
+ list(_VOLUME_MON_COND)
)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_TIMEOUT, default=DEFAULT_TIMEOUT): cv.positive_int,
vol.Required(CONF_USERNAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_MONITORED_CONDITIONS): vol.All(
cv.ensure_list, [vol.In(_MONITORED_CONDITIONS)]
),
vol.Optional(CONF_NICS): cv.ensure_list,
vol.Optional(CONF_DRIVES): cv.ensure_list,
vol.Optional(CONF_VOLUMES): cv.ensure_list,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the QNAP NAS sensor."""
api = QNAPStatsAPI(config)
api.update()
# QNAP is not available
if not api.data:
raise PlatformNotReady
sensors = []
# Basic sensors
for variable in config[CONF_MONITORED_CONDITIONS]:
if variable in _SYSTEM_MON_COND:
sensors.append(QNAPSystemSensor(api, variable, _SYSTEM_MON_COND[variable]))
if variable in _CPU_MON_COND:
sensors.append(QNAPCPUSensor(api, variable, _CPU_MON_COND[variable]))
if variable in _MEMORY_MON_COND:
sensors.append(QNAPMemorySensor(api, variable, _MEMORY_MON_COND[variable]))
# Network sensors
for nic in config.get(CONF_NICS, api.data["system_stats"]["nics"]):
sensors += [
QNAPNetworkSensor(api, variable, _NETWORK_MON_COND[variable], nic)
for variable in config[CONF_MONITORED_CONDITIONS]
if variable in _NETWORK_MON_COND
]
# Drive sensors
for drive in config.get(CONF_DRIVES, api.data["smart_drive_health"]):
sensors += [
QNAPDriveSensor(api, variable, _DRIVE_MON_COND[variable], drive)
for variable in config[CONF_MONITORED_CONDITIONS]
if variable in _DRIVE_MON_COND
]
# Volume sensors
for volume in config.get(CONF_VOLUMES, api.data["volumes"]):
sensors += [
QNAPVolumeSensor(api, variable, _VOLUME_MON_COND[variable], volume)
for variable in config[CONF_MONITORED_CONDITIONS]
if variable in _VOLUME_MON_COND
]
add_entities(sensors)
def round_nicely(number):
"""Round a number based on its size (so it looks nice)."""
if number < 10:
return round(number, 2)
if number < 100:
return round(number, 1)
return round(number)
class QNAPStatsAPI:
"""Class to interface with the API."""
def __init__(self, config):
"""Initialize the API wrapper."""
protocol = "https" if config[CONF_SSL] else "http"
self._api = QNAPStats(
f"{protocol}://{config.get(CONF_HOST)}",
config.get(CONF_PORT),
config.get(CONF_USERNAME),
config.get(CONF_PASSWORD),
verify_ssl=config.get(CONF_VERIFY_SSL),
timeout=config.get(CONF_TIMEOUT),
)
self.data = {}
@Throttle(MIN_TIME_BETWEEN_UPDATES)
def update(self):
"""Update API information and store locally."""
try:
self.data["system_stats"] = self._api.get_system_stats()
self.data["system_health"] = self._api.get_system_health()
self.data["smart_drive_health"] = self._api.get_smart_disk_health()
self.data["volumes"] = self._api.get_volumes()
self.data["bandwidth"] = self._api.get_bandwidth()
except: # noqa: E722 pylint: disable=bare-except
_LOGGER.exception("Failed to fetch QNAP stats from the NAS")
class QNAPSensor(Entity):
"""Base class for a QNAP sensor."""
def __init__(self, api, variable, variable_info, monitor_device=None):
"""Initialize the sensor."""
self.var_id = variable
self.var_name = variable_info[0]
self.var_units = variable_info[1]
self.var_icon = variable_info[2]
self.monitor_device = monitor_device
self._api = api
@property
def name(self):
"""Return the name of the sensor, if any."""
server_name = self._api.data["system_stats"]["system"]["name"]
if self.monitor_device is not None:
return f"{server_name} {self.var_name} ({self.monitor_device})"
return f"{server_name} {self.var_name}"
@property
def icon(self):
"""Return the icon to use in the frontend, if any."""
return self.var_icon
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self.var_units
def update(self):
"""Get the latest data for the states."""
self._api.update()
class QNAPCPUSensor(QNAPSensor):
"""A QNAP sensor that monitors CPU stats."""
@property
def state(self):
"""Return the state of the sensor."""
if self.var_id == "cpu_temp":
return self._api.data["system_stats"]["cpu"]["temp_c"]
if self.var_id == "cpu_usage":
return self._api.data["system_stats"]["cpu"]["usage_percent"]
class QNAPMemorySensor(QNAPSensor):
"""A QNAP sensor that monitors memory stats."""
@property
def state(self):
"""Return the state of the sensor."""
free = float(self._api.data["system_stats"]["memory"]["free"]) / 1024
if self.var_id == "memory_free":
return round_nicely(free)
total = float(self._api.data["system_stats"]["memory"]["total"]) / 1024
used = total - free
if self.var_id == "memory_used":
return round_nicely(used)
if self.var_id == "memory_percent_used":
return round(used / total * 100)
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._api.data:
data = self._api.data["system_stats"]["memory"]
size = round_nicely(float(data["total"]) / 1024)
return {ATTR_MEMORY_SIZE: f"{size} {DATA_GIBIBYTES}"}
class QNAPNetworkSensor(QNAPSensor):
"""A QNAP sensor that monitors network stats."""
@property
def state(self):
"""Return the state of the sensor."""
if self.var_id == "network_link_status":
nic = self._api.data["system_stats"]["nics"][self.monitor_device]
return nic["link_status"]
data = self._api.data["bandwidth"][self.monitor_device]
if self.var_id == "network_tx":
return round_nicely(data["tx"] / 1024 / 1024)
if self.var_id == "network_rx":
return round_nicely(data["rx"] / 1024 / 1024)
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._api.data:
data = self._api.data["system_stats"]["nics"][self.monitor_device]
return {
ATTR_IP: data["ip"],
ATTR_MASK: data["mask"],
ATTR_MAC: data["mac"],
ATTR_MAX_SPEED: data["max_speed"],
ATTR_PACKETS_TX: data["tx_packets"],
ATTR_PACKETS_RX: data["rx_packets"],
ATTR_PACKETS_ERR: data["err_packets"],
}
class QNAPSystemSensor(QNAPSensor):
"""A QNAP sensor that monitors overall system health."""
@property
def state(self):
"""Return the state of the sensor."""
if self.var_id == "status":
return self._api.data["system_health"]
if self.var_id == "system_temp":
return int(self._api.data["system_stats"]["system"]["temp_c"])
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._api.data:
data = self._api.data["system_stats"]
days = int(data["uptime"]["days"])
hours = int(data["uptime"]["hours"])
minutes = int(data["uptime"]["minutes"])
return {
ATTR_NAME: data["system"]["name"],
ATTR_MODEL: data["system"]["model"],
ATTR_SERIAL: data["system"]["serial_number"],
ATTR_UPTIME: f"{days:0>2d}d {hours:0>2d}h {minutes:0>2d}m",
}
class QNAPDriveSensor(QNAPSensor):
"""A QNAP sensor that monitors HDD/SSD drive stats."""
@property
def state(self):
"""Return the state of the sensor."""
data = self._api.data["smart_drive_health"][self.monitor_device]
if self.var_id == "drive_smart_status":
return data["health"]
if self.var_id == "drive_temp":
return int(data["temp_c"]) if data["temp_c"] is not None else 0
@property
def name(self):
"""Return the name of the sensor, if any."""
server_name = self._api.data["system_stats"]["system"]["name"]
return f"{server_name} {self.var_name} (Drive {self.monitor_device})"
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._api.data:
data = self._api.data["smart_drive_health"][self.monitor_device]
return {
ATTR_DRIVE: data["drive_number"],
ATTR_MODEL: data["model"],
ATTR_SERIAL: data["serial"],
ATTR_TYPE: data["type"],
}
class QNAPVolumeSensor(QNAPSensor):
"""A QNAP sensor that monitors storage volume stats."""
@property
def state(self):
"""Return the state of the sensor."""
data = self._api.data["volumes"][self.monitor_device]
free_gb = int(data["free_size"]) / 1024 / 1024 / 1024
if self.var_id == "volume_size_free":
return round_nicely(free_gb)
total_gb = int(data["total_size"]) / 1024 / 1024 / 1024
used_gb = total_gb - free_gb
if self.var_id == "volume_size_used":
return round_nicely(used_gb)
if self.var_id == "volume_percentage_used":
return round(used_gb / total_gb * 100)
@property
def device_state_attributes(self):
"""Return the state attributes."""
if self._api.data:
data = self._api.data["volumes"][self.monitor_device]
total_gb = int(data["total_size"]) / 1024 / 1024 / 1024
return {ATTR_VOLUME_SIZE: f"{round_nicely(total_gb)} {DATA_GIBIBYTES}"}
|
import os
import unittest
import mock
from perfkitbenchmarker.linux_benchmarks import gpu_pcie_bandwidth_benchmark
class GpuBandwidthTestCase(unittest.TestCase):
def setUp(self):
p = mock.patch(gpu_pcie_bandwidth_benchmark.__name__ + '.FLAGS')
p.start()
self.addCleanup(p.stop)
path = os.path.join(os.path.dirname(__file__), '../data',
'cuda_bandwidth_test_results.txt')
with open(path) as fp:
self.test_output = fp.read()
path = os.path.join(os.path.dirname(__file__), '../data',
'cuda_bandwidth_test_range_results.txt')
with open(path) as fp:
self.range_test_output = fp.read()
def testParseDeviceMetadata(self):
actual = gpu_pcie_bandwidth_benchmark.\
_ParseDeviceInfo(self.test_output)
expected = {'0': 'Tesla K80', '1': 'Tesla K80'}
self.assertEqual(expected, actual)
def testParseCudaBandwidthTestResults(self):
results = gpu_pcie_bandwidth_benchmark.\
_ParseOutputFromSingleIteration(self.test_output)
self.assertEqual(3, len(results))
self.assertAlmostEqual(9254.7, results['Host to device bandwidth'])
self.assertAlmostEqual(9686.1, results['Device to host bandwidth'])
self.assertAlmostEqual(155985.8, results['Device to device bandwidth'])
def testParseCudaBandwidthTestRangeResults(self):
results = gpu_pcie_bandwidth_benchmark.\
_ParseOutputFromSingleIteration(self.range_test_output)
self.assertEqual(3, len(results))
self.assertAlmostEqual(8063.3666667,
results['Host to device bandwidth'])
self.assertAlmostEqual(10518.7666667,
results['Device to host bandwidth'])
self.assertAlmostEqual(157524.4333333,
results['Device to device bandwidth'])
def testCalculateMetrics(self):
raw_results = [{
'Host to device bandwidth': 9250,
'Device to host bandwidth': 9000,
'Device to device bandwidth': 155000
}, {
'Host to device bandwidth': 8000,
'Device to host bandwidth': 8500,
'Device to device bandwidth': 152000
}]
samples = gpu_pcie_bandwidth_benchmark.\
_CalculateMetricsOverAllIterations(raw_results)
metrics = {i[0]: i[1] for i in samples}
sample = next(x for x in samples if x.metadata == {'iteration': 0}
and x.metric == 'Host to device bandwidth')
self.assertAlmostEqual(9250, sample.value)
sample = next(x for x in samples if x.metadata == {'iteration': 0}
and x.metric == 'Device to host bandwidth')
self.assertAlmostEqual(9000, sample.value)
sample = next(x for x in samples if x.metadata == {'iteration': 0}
and x.metric == 'Device to device bandwidth')
self.assertAlmostEqual(155000, sample.value)
sample = next(x for x in samples if x.metadata == {'iteration': 1}
and x.metric == 'Host to device bandwidth')
self.assertAlmostEqual(8000, sample.value)
sample = next(x for x in samples if x.metadata == {'iteration': 1}
and x.metric == 'Device to host bandwidth')
self.assertAlmostEqual(8500, sample.value)
sample = next(x for x in samples if x.metadata == {'iteration': 1}
and x.metric == 'Device to device bandwidth')
self.assertAlmostEqual(152000, sample.value)
self.assertAlmostEqual(8000, metrics['Host to device bandwidth, min'])
self.assertAlmostEqual(9250, metrics['Host to device bandwidth, max'])
self.assertAlmostEqual(8625.0, metrics['Host to device bandwidth, mean'])
self.assertAlmostEqual(625.0, metrics['Host to device bandwidth, stddev'])
self.assertAlmostEqual(8500, metrics['Device to host bandwidth, min'])
self.assertAlmostEqual(9000, metrics['Device to host bandwidth, max'])
self.assertAlmostEqual(8750.0, metrics['Device to host bandwidth, mean'])
self.assertAlmostEqual(250.0, metrics['Device to host bandwidth, stddev'])
self.assertAlmostEqual(152000, metrics['Device to device bandwidth, min'])
self.assertAlmostEqual(155000, metrics['Device to device bandwidth, max'])
self.assertAlmostEqual(153500, metrics['Device to device bandwidth, mean'])
self.assertAlmostEqual(1500, metrics['Device to device bandwidth, stddev'])
if __name__ == '__main__':
unittest.main()
|
from hashlib import sha1
import pytest
from homeassistant.bootstrap import async_setup_component
import homeassistant.components.mailbox as mailbox
from homeassistant.const import HTTP_INTERNAL_SERVER_ERROR, HTTP_NOT_FOUND
@pytest.fixture
def mock_http_client(hass, hass_client):
"""Start the Home Assistant HTTP component."""
config = {mailbox.DOMAIN: {"platform": "demo"}}
hass.loop.run_until_complete(async_setup_component(hass, mailbox.DOMAIN, config))
return hass.loop.run_until_complete(hass_client())
async def test_get_platforms_from_mailbox(mock_http_client):
"""Get platforms from mailbox."""
url = "/api/mailbox/platforms"
req = await mock_http_client.get(url)
assert req.status == 200
result = await req.json()
assert len(result) == 1 and "DemoMailbox" == result[0].get("name", None)
async def test_get_messages_from_mailbox(mock_http_client):
"""Get messages from mailbox."""
url = "/api/mailbox/messages/DemoMailbox"
req = await mock_http_client.get(url)
assert req.status == 200
result = await req.json()
assert len(result) == 10
async def test_get_media_from_mailbox(mock_http_client):
"""Get audio from mailbox."""
mp3sha = "3f67c4ea33b37d1710f772a26dd3fb43bb159d50"
msgtxt = "Message 1. Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
msgsha = sha1(msgtxt.encode("utf-8")).hexdigest()
url = "/api/mailbox/media/DemoMailbox/%s" % (msgsha)
req = await mock_http_client.get(url)
assert req.status == 200
data = await req.read()
assert sha1(data).hexdigest() == mp3sha
async def test_delete_from_mailbox(mock_http_client):
"""Get audio from mailbox."""
msgtxt1 = "Message 1. Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
msgtxt2 = "Message 3. Lorem ipsum dolor sit amet, consectetur adipiscing elit. "
msgsha1 = sha1(msgtxt1.encode("utf-8")).hexdigest()
msgsha2 = sha1(msgtxt2.encode("utf-8")).hexdigest()
for msg in [msgsha1, msgsha2]:
url = "/api/mailbox/delete/DemoMailbox/%s" % (msg)
req = await mock_http_client.delete(url)
assert req.status == 200
url = "/api/mailbox/messages/DemoMailbox"
req = await mock_http_client.get(url)
assert req.status == 200
result = await req.json()
assert len(result) == 8
async def test_get_messages_from_invalid_mailbox(mock_http_client):
"""Get messages from mailbox."""
url = "/api/mailbox/messages/mailbox.invalid_mailbox"
req = await mock_http_client.get(url)
assert req.status == HTTP_NOT_FOUND
async def test_get_media_from_invalid_mailbox(mock_http_client):
"""Get messages from mailbox."""
msgsha = "0000000000000000000000000000000000000000"
url = "/api/mailbox/media/mailbox.invalid_mailbox/%s" % (msgsha)
req = await mock_http_client.get(url)
assert req.status == HTTP_NOT_FOUND
async def test_get_media_from_invalid_msgid(mock_http_client):
"""Get messages from mailbox."""
msgsha = "0000000000000000000000000000000000000000"
url = "/api/mailbox/media/DemoMailbox/%s" % (msgsha)
req = await mock_http_client.get(url)
assert req.status == HTTP_INTERNAL_SERVER_ERROR
async def test_delete_from_invalid_mailbox(mock_http_client):
"""Get audio from mailbox."""
msgsha = "0000000000000000000000000000000000000000"
url = "/api/mailbox/delete/mailbox.invalid_mailbox/%s" % (msgsha)
req = await mock_http_client.delete(url)
assert req.status == HTTP_NOT_FOUND
|
import string
from queue import Empty
from kombu.utils.encoding import bytes_to_str, safe_str
from kombu.utils.json import loads, dumps
from kombu.utils.objects import cached_property
from . import virtual
try:
# azure-servicebus version <= 0.21.1
from azure.servicebus import ServiceBusService, Message, Queue
except ImportError:
try:
# azure-servicebus version >= 0.50.0
from azure.servicebus.control_client import \
ServiceBusService, Message, Queue
except ImportError:
ServiceBusService = Message = Queue = None
# dots are replaced by dash, all other punctuation replaced by underscore.
CHARS_REPLACE_TABLE = {
ord(c): 0x5f for c in string.punctuation if c not in '_'
}
class Channel(virtual.Channel):
"""Azure Service Bus channel."""
default_visibility_timeout = 1800 # 30 minutes.
default_wait_time_seconds = 5 # in seconds
default_peek_lock = False
domain_format = 'kombu%(vhost)s'
_queue_service = None
_queue_cache = {}
def __init__(self, *args, **kwargs):
if ServiceBusService is None:
raise ImportError('Azure Service Bus transport requires the '
'azure-servicebus library')
super().__init__(*args, **kwargs)
for queue in self.queue_service.list_queues():
self._queue_cache[queue] = queue
def entity_name(self, name, table=CHARS_REPLACE_TABLE):
"""Format AMQP queue name into a valid ServiceBus queue name."""
return str(safe_str(name)).translate(table)
def _new_queue(self, queue, **kwargs):
"""Ensure a queue exists in ServiceBus."""
queue = self.entity_name(self.queue_name_prefix + queue)
try:
return self._queue_cache[queue]
except KeyError:
self.queue_service.create_queue(queue, fail_on_exist=False)
q = self._queue_cache[queue] = self.queue_service.get_queue(queue)
return q
def _delete(self, queue, *args, **kwargs):
"""Delete queue by name."""
queue_name = self.entity_name(queue)
self._queue_cache.pop(queue_name, None)
self.queue_service.delete_queue(queue_name)
super()._delete(queue_name)
def _put(self, queue, message, **kwargs):
"""Put message onto queue."""
msg = Message(dumps(message))
self.queue_service.send_queue_message(self.entity_name(queue), msg)
def _get(self, queue, timeout=None):
"""Try to retrieve a single message off ``queue``."""
message = self.queue_service.receive_queue_message(
self.entity_name(queue),
timeout=timeout or self.wait_time_seconds,
peek_lock=self.peek_lock
)
if message.body is None:
raise Empty()
return loads(bytes_to_str(message.body))
def _size(self, queue):
"""Return the number of messages in a queue."""
return self._new_queue(queue).message_count
def _purge(self, queue):
"""Delete all current messages in a queue."""
n = 0
while True:
message = self.queue_service.read_delete_queue_message(
self.entity_name(queue), timeout=0.1)
if not message.body:
break
else:
n += 1
return n
@property
def queue_service(self):
if self._queue_service is None:
self._queue_service = ServiceBusService(
service_namespace=self.conninfo.hostname,
shared_access_key_name=self.conninfo.userid,
shared_access_key_value=self.conninfo.password)
return self._queue_service
@property
def conninfo(self):
return self.connection.client
@property
def transport_options(self):
return self.connection.client.transport_options
@cached_property
def visibility_timeout(self):
return (self.transport_options.get('visibility_timeout') or
self.default_visibility_timeout)
@cached_property
def queue_name_prefix(self):
return self.transport_options.get('queue_name_prefix', '')
@cached_property
def wait_time_seconds(self):
return self.transport_options.get('wait_time_seconds',
self.default_wait_time_seconds)
@cached_property
def peek_lock(self):
return self.transport_options.get('peek_lock',
self.default_peek_lock)
class Transport(virtual.Transport):
"""Azure Service Bus transport."""
Channel = Channel
polling_interval = 1
default_port = None
|
import hashlib
import logging
import os
logger = logging.getLogger(__name__)
class IncrementalHasher(object):
__slots__ = ('hasher',)
def __init__(self):
self.hasher = hashlib.md5()
def update(self, chunk):
self.hasher.update(chunk)
def get_result(self) -> str:
return self.hasher.hexdigest()
def hash_file_obj(fo) -> str:
hasher = hashlib.md5()
fo.seek(0)
for chunk in iter(lambda: fo.read(1024 ** 2), b''):
hasher.update(chunk)
return hasher.hexdigest()
def hash_file(file_name: str) -> str:
with open(file_name, 'rb') as f:
md5 = hash_file_obj(f)
logger.debug('MD5 of "%s" is %s' % (os.path.basename(file_name), md5))
return md5
|
import unittest
from argparse import Namespace
from trashcli.list_mount_points import os_mount_points
from trashcli.restore import RestoreCmd, make_trash_directories, \
TrashDirectory, TrashedFiles, Command
from .myStringIO import StringIO
from mock import call
from trashcli import restore
import datetime
from mock import Mock
from integration_tests.files import make_file, require_empty_dir
from trashcli.fs import remove_file
from trashcli.fs import contents_of
from trashcli.restore import TrashedFile
import os
class Test_parse_args(unittest.TestCase):
def test_default_path(self):
args = restore.parse_args([''], "curdir")
self.assertEqual((Command.RunRestore,
{'path': 'curdir',
'sort': 'date',
'trash_dir': None}),
args)
def test_path_specified_relative_path(self):
args = restore.parse_args(['', 'path'], "curdir")
self.assertEqual((Command.RunRestore,
{'path': 'path',
'sort': 'date',
'trash_dir': None}),
args)
def test_path_specified_fullpath(self):
args = restore.parse_args(['', '/a/path'], "ignored")
self.assertEqual((Command.RunRestore,
{'path': '/a/path',
'sort': 'date',
'trash_dir': None}),
args)
def test_show_version(self):
args = restore.parse_args(['', '--version'], "ignored")
self.assertEqual((Command.PrintVersion, None), args)
class TestListingInRestoreCmd(unittest.TestCase):
def setUp(self):
trash_directories = make_trash_directories()
trashed_files = TrashedFiles(trash_directories, None, contents_of)
self.cmd = RestoreCmd(None, None,
exit=None,
input=None,
curdir=lambda: "dir",
trashed_files=trashed_files,
mount_points=os_mount_points,
fs=restore.FileSystem())
self.cmd.handle_trashed_files = self.capture_trashed_files
self.trashed_files = Mock(spec=['all_trashed_files'])
self.cmd.trashed_files = self.trashed_files
def test_with_no_args_and_files_in_trashcan(self):
self.trashed_files.all_trashed_files.return_value = [
FakeTrashedFile('<date>', 'dir/location'),
FakeTrashedFile('<date>', 'dir/location'),
FakeTrashedFile('<date>', 'anotherdir/location')
]
self.cmd.run(['trash-restore'])
assert [
'dir/location'
, 'dir/location'
] ==self.original_locations
def test_with_no_args_and_files_in_trashcan_2(self):
self.trashed_files.all_trashed_files.return_value = [
FakeTrashedFile('<date>', 'dir/location'),
FakeTrashedFile('<date>', 'dir/location'),
FakeTrashedFile('<date>', 'specific/path'),
]
self.cmd.run(['trash-restore', 'specific/path'])
assert [
'specific/path'
] ==self.original_locations
def capture_trashed_files(self,arg):
self.original_locations = []
for trashed_file in arg:
self.original_locations.append(trashed_file.original_location)
class FakeTrashedFile(object):
def __init__(self, deletion_date, original_location):
self.deletion_date = deletion_date
self.original_location = original_location
def __repr__(self):
return ('FakeTrashedFile(\'%s\', ' % self.deletion_date +
'\'%s\')' % self.original_location)
class TestTrashRestoreCmd(unittest.TestCase):
def setUp(self):
self.stdout = StringIO()
self.stderr = StringIO()
trash_directories = make_trash_directories()
trashed_files = TrashedFiles(trash_directories, TrashDirectory(),
contents_of)
self.fs = Mock(spec=restore.FileSystem)
self.cmd = RestoreCmd(stdout=self.stdout,
stderr=self.stderr,
exit = self.capture_exit_status,
input =lambda x: self.user_reply,
version=None,
trashed_files=trashed_files,
mount_points=os_mount_points,
fs=self.fs)
def capture_exit_status(self, exit_status):
self.exit_status = exit_status
def test_should_print_version(self):
self.cmd.version = '1.2.3'
self.cmd.run(['trash-restore', '--version'])
assert 'trash-restore 1.2.3\n' == self.stdout.getvalue()
def test_with_no_args_and_no_files_in_trashcan(self):
self.cmd.curdir = lambda: "cwd"
self.cmd.run(['trash-restore'])
assert ("No files trashed from current dir ('cwd')\n" ==
self.stdout.getvalue())
def test_until_the_restore_unit(self):
self.fs.path_exists.return_value = False
trashed_file = TrashedFile(
'parent/path',
None,
'info_file',
'orig_file')
self.user_reply = '0'
self.cmd.restore_asking_the_user([trashed_file])
assert '' == self.stdout.getvalue()
assert '' == self.stderr.getvalue()
assert [call.path_exists('parent/path'),
call.mkdirs('parent'),
call.move('orig_file', 'parent/path'),
call.remove_file('info_file')] == self.fs.mock_calls
def test_when_user_reply_with_empty_string(self):
self.user_reply = ''
self.cmd.restore_asking_the_user([])
assert 'Exiting\n' == self.stdout.getvalue()
def test_when_user_reply_with_not_number(self):
self.user_reply = 'non numeric'
self.cmd.restore_asking_the_user([])
assert 'Invalid entry: not an index: non numeric\n' == \
self.stderr.getvalue()
assert '' == self.stdout.getvalue()
assert 1 == self.exit_status
class TestTrashedFileRestoreIntegration(unittest.TestCase):
def setUp(self):
remove_file_if_exists('parent/path')
remove_dir_if_exists('parent')
trash_directories = make_trash_directories()
trashed_files = TrashedFiles(trash_directories, TrashDirectory(),
contents_of)
self.cmd = RestoreCmd(None,
None,
exit=None,
input=None,
trashed_files=trashed_files,
mount_points=os_mount_points,
fs=restore.FileSystem())
def test_restore(self):
trashed_file = TrashedFile('parent/path',
None,
'info_file',
'orig')
open('orig','w').close()
open('info_file','w').close()
self.cmd.restore(trashed_file)
assert os.path.exists('parent/path')
assert not os.path.exists('info_file')
def test_restore_over_existing_file(self):
trashed_file = TrashedFile('path',None,None,None)
open('path','w').close()
self.assertRaises(IOError, lambda:self.cmd.restore(trashed_file))
def tearDown(self):
remove_file_if_exists('path')
remove_file_if_exists('parent/path')
remove_dir_if_exists('parent')
def test_restore_create_needed_directories(self):
require_empty_dir('sandbox')
make_file('sandbox/TrashDir/files/bar')
instance = TrashedFile('sandbox/foo/bar',
'deletion_date', 'info_file',
'sandbox/TrashDir/files/bar')
self.cmd.restore(instance)
assert os.path.exists("sandbox/foo/bar")
class TestTrashedFiles(unittest.TestCase):
def setUp(self):
self.trash_directories = Mock(spec=['trash_directories_or_user'])
self.trash_directory = Mock(spec=['all_info_files'])
self.contents_of = Mock()
self.trashed_files = TrashedFiles(self.trash_directories,
self.trash_directory,
self.contents_of)
def test_something(self):
self.trash_directories.trash_directories_or_user.return_value = \
[("path", "/volume")]
self.contents_of.return_value='Path=name\nDeletionDate=2001-01-01T10:10:10'
self.trash_directory.all_info_files.return_value = \
[('trashinfo', 'info/info_path.trashinfo')]
trashed_files = list(self.trashed_files.all_trashed_files([], None))
trashed_file = trashed_files[0]
assert '/volume/name' == trashed_file.original_location
assert (datetime.datetime(2001, 1, 1, 10, 10, 10) ==
trashed_file.deletion_date)
assert 'info/info_path.trashinfo' == trashed_file.info_file
assert 'files/info_path' == trashed_file.original_file
assert ([call.trash_directories_or_user([], None)] ==
self.trash_directories.mock_calls)
class TestTrashedFilesIntegration(unittest.TestCase):
def setUp(self):
self.trash_directories = Mock(spec=['trash_directories_or_user'])
self.trash_directory = Mock(spec=['all_info_files'])
self.trashed_files = TrashedFiles(self.trash_directories,
self.trash_directory,
contents_of)
def test_something(self):
require_empty_dir('info')
self.trash_directories.trash_directories_or_user.return_value = \
[("path", "/volume")]
open('info/info_path.trashinfo', 'w').write(
'Path=name\nDeletionDate=2001-01-01T10:10:10')
self.trash_directory.all_info_files = Mock([], return_value=[
('trashinfo', 'info/info_path.trashinfo')])
trashed_files = list(self.trashed_files.all_trashed_files([], None))
trashed_file = trashed_files[0]
assert '/volume/name' == trashed_file.original_location
assert (datetime.datetime(2001, 1, 1, 10, 10, 10) ==
trashed_file.deletion_date)
assert 'info/info_path.trashinfo' == trashed_file.info_file
assert 'files/info_path' == trashed_file.original_file
def tearDown(self):
remove_file('info/info_path.trashinfo')
remove_dir_if_exists('info')
def remove_dir_if_exists(dir):
if os.path.exists(dir):
os.rmdir(dir)
def remove_file_if_exists(path):
if os.path.lexists(path):
os.unlink(path)
|
from datetime import timedelta
import logging
import pysma
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_HOST,
CONF_PASSWORD,
CONF_PATH,
CONF_SCAN_INTERVAL,
CONF_SSL,
CONF_VERIFY_SSL,
EVENT_HOMEASSISTANT_STOP,
)
from homeassistant.core import callback
from homeassistant.helpers.aiohttp_client import async_get_clientsession
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.event import async_track_time_interval
_LOGGER = logging.getLogger(__name__)
CONF_CUSTOM = "custom"
CONF_FACTOR = "factor"
CONF_GROUP = "group"
CONF_KEY = "key"
CONF_SENSORS = "sensors"
CONF_UNIT = "unit"
GROUPS = ["user", "installer"]
def _check_sensor_schema(conf):
"""Check sensors and attributes are valid."""
try:
valid = [s.name for s in pysma.Sensors()]
except (ImportError, AttributeError):
return conf
customs = list(conf[CONF_CUSTOM])
for sensor in conf[CONF_SENSORS]:
if sensor in customs:
_LOGGER.warning(
"All custom sensors will be added automatically, no need to include them in sensors: %s",
sensor,
)
elif sensor not in valid:
raise vol.Invalid(f"{sensor} does not exist")
return conf
CUSTOM_SCHEMA = vol.Any(
{
vol.Required(CONF_KEY): vol.All(cv.string, vol.Length(min=13, max=15)),
vol.Required(CONF_UNIT): cv.string,
vol.Optional(CONF_FACTOR, default=1): vol.Coerce(float),
vol.Optional(CONF_PATH): vol.All(cv.ensure_list, [cv.string]),
}
)
PLATFORM_SCHEMA = vol.All(
PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_SSL, default=False): cv.boolean,
vol.Optional(CONF_VERIFY_SSL, default=True): cv.boolean,
vol.Required(CONF_PASSWORD): cv.string,
vol.Optional(CONF_GROUP, default=GROUPS[0]): vol.In(GROUPS),
vol.Optional(CONF_SENSORS, default=[]): vol.Any(
cv.schema_with_slug_keys(cv.ensure_list), # will be deprecated
vol.All(cv.ensure_list, [str]),
),
vol.Optional(CONF_CUSTOM, default={}): cv.schema_with_slug_keys(
CUSTOM_SCHEMA
),
},
extra=vol.PREVENT_EXTRA,
),
_check_sensor_schema,
)
async def async_setup_platform(hass, config, async_add_entities, discovery_info=None):
"""Set up SMA WebConnect sensor."""
# Check config again during load - dependency available
config = _check_sensor_schema(config)
# Init all default sensors
sensor_def = pysma.Sensors()
# Sensor from the custom config
sensor_def.add(
[
pysma.Sensor(o[CONF_KEY], n, o[CONF_UNIT], o[CONF_FACTOR], o.get(CONF_PATH))
for n, o in config[CONF_CUSTOM].items()
]
)
# Use all sensors by default
config_sensors = config[CONF_SENSORS]
hass_sensors = []
used_sensors = []
if isinstance(config_sensors, dict): # will be remove from 0.99
if not config_sensors: # Use all sensors by default
config_sensors = {s.name: [] for s in sensor_def}
# Prepare all Home Assistant sensor entities
for name, attr in config_sensors.items():
sub_sensors = [sensor_def[s] for s in attr]
hass_sensors.append(SMAsensor(sensor_def[name], sub_sensors))
used_sensors.append(name)
used_sensors.extend(attr)
if isinstance(config_sensors, list):
if not config_sensors: # Use all sensors by default
config_sensors = [s.name for s in sensor_def]
used_sensors = list(set(config_sensors + list(config[CONF_CUSTOM])))
for sensor in used_sensors:
hass_sensors.append(SMAsensor(sensor_def[sensor], []))
used_sensors = [sensor_def[s] for s in set(used_sensors)]
async_add_entities(hass_sensors)
# Init the SMA interface
session = async_get_clientsession(hass, verify_ssl=config[CONF_VERIFY_SSL])
grp = config[CONF_GROUP]
protocol = "https" if config[CONF_SSL] else "http"
url = f"{protocol}://{config[CONF_HOST]}"
sma = pysma.SMA(session, url, config[CONF_PASSWORD], group=grp)
# Ensure we logout on shutdown
async def async_close_session(event):
"""Close the session."""
await sma.close_session()
hass.bus.async_listen(EVENT_HOMEASSISTANT_STOP, async_close_session)
backoff = 0
backoff_step = 0
async def async_sma(event):
"""Update all the SMA sensors."""
nonlocal backoff, backoff_step
if backoff > 1:
backoff -= 1
return
values = await sma.read(used_sensors)
if not values:
try:
backoff = [1, 1, 1, 6, 30][backoff_step]
backoff_step += 1
except IndexError:
backoff = 60
return
backoff_step = 0
for sensor in hass_sensors:
sensor.async_update_values()
interval = config.get(CONF_SCAN_INTERVAL) or timedelta(seconds=5)
async_track_time_interval(hass, async_sma, interval)
class SMAsensor(Entity):
"""Representation of a SMA sensor."""
def __init__(self, pysma_sensor, sub_sensors):
"""Initialize the sensor."""
self._sensor = pysma_sensor
self._sub_sensors = sub_sensors # Can be remove from 0.99
self._attr = {s.name: "" for s in sub_sensors}
self._state = self._sensor.value
@property
def name(self):
"""Return the name of the sensor."""
return self._sensor.name
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit the value is expressed in."""
return self._sensor.unit
@property
def device_state_attributes(self): # Can be remove from 0.99
"""Return the state attributes of the sensor."""
return self._attr
@property
def poll(self):
"""SMA sensors are updated & don't poll."""
return False
@callback
def async_update_values(self):
"""Update this sensor."""
update = False
for sens in self._sub_sensors: # Can be remove from 0.99
newval = f"{sens.value} {sens.unit}"
if self._attr[sens.name] != newval:
update = True
self._attr[sens.name] = newval
if self._sensor.value != self._state:
update = True
self._state = self._sensor.value
if update:
self.async_write_ha_state()
@property
def unique_id(self):
"""Return a unique identifier for this sensor."""
return f"sma-{self._sensor.key}-{self._sensor.name}"
|
from typing import Callable, List, Optional
from gogogate2_api.common import (
AbstractDoor,
DoorStatus,
get_configured_doors,
get_door_by_id,
)
import voluptuous as vol
from homeassistant.components.cover import (
DEVICE_CLASS_GARAGE,
DEVICE_CLASS_GATE,
SUPPORT_CLOSE,
SUPPORT_OPEN,
CoverEntity,
)
from homeassistant.config_entries import SOURCE_IMPORT, ConfigEntry
from homeassistant.const import (
CONF_DEVICE,
CONF_IP_ADDRESS,
CONF_PASSWORD,
CONF_USERNAME,
)
from homeassistant.core import HomeAssistant
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.update_coordinator import CoordinatorEntity
from .common import (
DeviceDataUpdateCoordinator,
cover_unique_id,
get_data_update_coordinator,
)
from .const import DEVICE_TYPE_GOGOGATE2, DEVICE_TYPE_ISMARTGATE, DOMAIN, MANUFACTURER
COVER_SCHEMA = vol.Schema(
{
vol.Required(CONF_IP_ADDRESS): cv.string,
vol.Required(CONF_DEVICE, default=DEVICE_TYPE_GOGOGATE2): vol.In(
(DEVICE_TYPE_GOGOGATE2, DEVICE_TYPE_ISMARTGATE)
),
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_USERNAME): cv.string,
}
)
async def async_setup_platform(
hass: HomeAssistant, config: dict, add_entities: Callable, discovery_info=None
) -> None:
"""Convert old style file configs to new style configs."""
hass.async_create_task(
hass.config_entries.flow.async_init(
DOMAIN, context={"source": SOURCE_IMPORT}, data=config
)
)
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], Optional[bool]], None],
) -> None:
"""Set up the config entry."""
data_update_coordinator = get_data_update_coordinator(hass, config_entry)
async_add_entities(
[
DeviceCover(config_entry, data_update_coordinator, door)
for door in get_configured_doors(data_update_coordinator.data)
]
)
class DeviceCover(CoordinatorEntity, CoverEntity):
"""Cover entity for goggate2."""
def __init__(
self,
config_entry: ConfigEntry,
data_update_coordinator: DeviceDataUpdateCoordinator,
door: AbstractDoor,
) -> None:
"""Initialize the object."""
super().__init__(data_update_coordinator)
self._config_entry = config_entry
self._door = door
self._api = data_update_coordinator.api
self._unique_id = cover_unique_id(config_entry, door)
self._is_available = True
@property
def unique_id(self) -> Optional[str]:
"""Return a unique ID."""
return self._unique_id
@property
def name(self):
"""Return the name of the door."""
return self._get_door().name
@property
def is_closed(self):
"""Return true if cover is closed, else False."""
door = self._get_door()
if door.status == DoorStatus.OPENED:
return False
if door.status == DoorStatus.CLOSED:
return True
return None
@property
def device_class(self):
"""Return the class of this device, from component DEVICE_CLASSES."""
door = self._get_door()
if door.gate:
return DEVICE_CLASS_GATE
return DEVICE_CLASS_GARAGE
@property
def supported_features(self):
"""Flag supported features."""
return SUPPORT_OPEN | SUPPORT_CLOSE
async def async_open_cover(self, **kwargs):
"""Open the door."""
await self.hass.async_add_executor_job(
self._api.open_door, self._get_door().door_id
)
async def async_close_cover(self, **kwargs):
"""Close the door."""
await self.hass.async_add_executor_job(
self._api.close_door, self._get_door().door_id
)
@property
def state_attributes(self):
"""Return the state attributes."""
attrs = super().state_attributes
attrs["door_id"] = self._get_door().door_id
return attrs
def _get_door(self) -> AbstractDoor:
door = get_door_by_id(self._door.door_id, self.coordinator.data)
self._door = door or self._door
return self._door
@property
def device_info(self):
"""Device info for the controller."""
data = self.coordinator.data
return {
"identifiers": {(DOMAIN, self._config_entry.unique_id)},
"name": self._config_entry.title,
"manufacturer": MANUFACTURER,
"model": data.model,
"sw_version": data.firmwareversion,
}
|
import xml.etree.ElementTree as ElementTree
import numpy as np
from ..utils import _check_fname, Bunch, warn
# XXX: to split as _parse like bvct
def _read_dig_montage_egi(
fname,
_scaling,
_all_data_kwargs_are_none,
):
if not _all_data_kwargs_are_none:
raise ValueError('hsp, hpi, elp, point_names, fif must all be '
'None if egi is not None')
_check_fname(fname, overwrite='read', must_exist=True)
root = ElementTree.parse(fname).getroot()
ns = root.tag[root.tag.index('{'):root.tag.index('}') + 1]
sensors = root.find('%ssensorLayout/%ssensors' % (ns, ns))
fids = dict()
dig_ch_pos = dict()
fid_name_map = {'Nasion': 'nasion',
'Right periauricular point': 'rpa',
'Left periauricular point': 'lpa'}
for s in sensors:
name, number, kind = s[0].text, int(s[1].text), int(s[2].text)
coordinates = np.array([float(s[3].text), float(s[4].text),
float(s[5].text)])
coordinates *= _scaling
# EEG Channels
if kind == 0:
dig_ch_pos['EEG %03d' % number] = coordinates
# Reference
elif kind == 1:
dig_ch_pos['EEG %03d' %
(len(dig_ch_pos.keys()) + 1)] = coordinates
# XXX: The EGI reader needs to be fixed with this code here.
# As a reference channel it should be called EEG000 or
# REF to follow the conventions. I should be:
# dig_ch_pos['REF'] = coordinates
# Fiducials
elif kind == 2:
fid_name = fid_name_map[name]
fids[fid_name] = coordinates
# Unknown
else:
warn('Unknown sensor type %s detected. Skipping sensor...'
'Proceed with caution!' % kind)
return Bunch(
# EGI stuff
nasion=fids['nasion'], lpa=fids['lpa'], rpa=fids['rpa'],
dig_ch_pos=dig_ch_pos, coord_frame='unknown',
# not EGI stuff
hsp=None, hpi=None, elp=None, point_names=None,
)
def _parse_brainvision_dig_montage(fname, scale):
FID_NAME_MAP = {'Nasion': 'nasion', 'RPA': 'rpa', 'LPA': 'lpa'}
root = ElementTree.parse(fname).getroot()
sensors = root.find('CapTrakElectrodeList')
fids, dig_ch_pos = dict(), dict()
for s in sensors:
name = s.find('Name').text
is_fid = name in FID_NAME_MAP
coordinates = scale * np.array([float(s.find('X').text),
float(s.find('Y').text),
float(s.find('Z').text)])
# Fiducials
if is_fid:
fids[FID_NAME_MAP[name]] = coordinates
# EEG Channels
else:
dig_ch_pos[name] = coordinates
return dict(
# BVCT stuff
nasion=fids['nasion'], lpa=fids['lpa'], rpa=fids['rpa'],
ch_pos=dig_ch_pos, coord_frame='unknown'
)
|
import sys
import functools
import collections
import validators
from schema import Schema, SchemaError, And, Use, Optional, Regex
from flask import make_response, request, abort
from app.utils.ResponseUtil import standard_response
PY3 = sys.version_info.major > 2
if PY3:
text_type = str
else:
text_type = unicode # noqa
class Validator(object):
def __init__(self):
self.optional = Optional
self.regex = Regex
self.bool = lambda: bool
for name in ['email', 'ipv4', 'ipv6', 'domain',
'url', 'mac_address', 'uuid', 'iban']:
setattr(self, name, self._make_validator(name))
def _make_validator(self, name):
validate = getattr(validators, name)
return lambda: And(validate)
def int(self, min=-sys.maxsize, max=sys.maxsize):
return And(Use(int), lambda x: min <= x <= max)
def float(self, min=-sys.float_info.max, max=sys.float_info.max):
return And(Use(float), lambda x: min <= x <= max)
def str(self, minlen=0, maxlen=1024 * 1024):
return And(text_type, lambda x: minlen <= len(x) <= maxlen)
def enum(self, *items):
return And(text_type, lambda x: x in items)
def register(self, name):
"""A decorator for register validator"""
def decorator(f):
setattr(self, name, f)
return f
return decorator
def get_data(self):
"""
Get request data based on request.method and request.mimetype
Returns:
A regular dict which can be modified(scheme will modify data
on validating)
"""
if request.method in ['GET', 'DELETE']:
return request.args.to_dict()
else:
if request.mimetype == 'application/json':
data = request.get_json()
if not isinstance(data, collections.Mapping):
self.handle_error('JSON content must be object')
return data
else:
return request.form.to_dict()
def handle_error(self, message):
"""Abort with suitable error response"""
message = standard_response(0, message)
abort(400, response=make_response(message, 200))
def param(self, schema):
"""A decorator for validate request data"""
if not isinstance(schema, collections.Mapping):
raise TypeError('schema must be Mapping')
# add error message
schema = {k: And(v, error='%s invalid' % k)
for k, v in schema.items()}
validate = Schema(schema).validate
def decorator(f):
@functools.wraps(f)
def wrapper(*args, **kwargs):
data = self.get_data()
try:
data = validate(data)
except SchemaError as ex:
self.handle_error(str(ex))
kwargs.update(data)
return f(*args, **kwargs)
return wrapper
return decorator
|
from typing import Optional
from homeassistant.components.switch import SwitchEntity
from . import AVAILABLE_PUMPS, DATA_ECOAL_BOILER
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up switches based on ecoal interface."""
if discovery_info is None:
return
ecoal_contr = hass.data[DATA_ECOAL_BOILER]
switches = []
for pump_id in discovery_info:
name = AVAILABLE_PUMPS[pump_id]
switches.append(EcoalSwitch(ecoal_contr, name, pump_id))
add_entities(switches, True)
class EcoalSwitch(SwitchEntity):
"""Representation of Ecoal switch."""
def __init__(self, ecoal_contr, name, state_attr):
"""
Initialize switch.
Sets HA switch to state as read from controller.
"""
self._ecoal_contr = ecoal_contr
self._name = name
self._state_attr = state_attr
# Ecoalcotroller holds convention that same postfix is used
# to set attribute
# set_<attr>()
# as attribute name in status instance:
# status.<attr>
self._contr_set_fun = getattr(self._ecoal_contr, f"set_{state_attr}")
# No value set, will be read from controller instead
self._state = None
@property
def name(self) -> Optional[str]:
"""Return the name of the switch."""
return self._name
def update(self):
"""Fetch new state data for the sensor.
This is the only method that should fetch new data for Home Assistant.
"""
status = self._ecoal_contr.get_cached_status()
self._state = getattr(status, self._state_attr)
def invalidate_ecoal_cache(self):
"""Invalidate ecoal interface cache.
Forces that next read from ecaol interface to not use cache.
"""
self._ecoal_contr.status = None
@property
def is_on(self) -> bool:
"""Return true if device is on."""
return self._state
def turn_on(self, **kwargs) -> None:
"""Turn the device on."""
self._contr_set_fun(1)
self.invalidate_ecoal_cache()
def turn_off(self, **kwargs) -> None:
"""Turn the device off."""
self._contr_set_fun(0)
self.invalidate_ecoal_cache()
|
import time
from absl import flags
from perfkitbenchmarker import configs
from perfkitbenchmarker import errors
from perfkitbenchmarker import sample
from perfkitbenchmarker import vm_util
from perfkitbenchmarker.linux_packages import build_tools
from perfkitbenchmarker.linux_packages import speccpu
from perfkitbenchmarker.linux_packages import speccpu2017
INT_SUITE = ['perlbench', 'gcc', 'mcf', 'omnetpp',
'xalancbmk', 'x264', 'deepsjeng', 'leela',
'exchange2', 'xz']
INTSPEED_SUITE = [benchmark + '_s' for benchmark in INT_SUITE]
INTRATE_SUITE = [benchmark + '_r' for benchmark in INT_SUITE]
COMMON_FP_SUITE = ['bwaves', 'cactuBSSN', 'lbm', 'wrf', 'cam4', 'imagick',
'nab', 'fotonik3d', 'roms']
FPSPEED_SUITE = [benchmark + '_s' for benchmark in COMMON_FP_SUITE] + ['pop2_s']
FPRATE_SUITE = [benchmark + '_r' for benchmark in COMMON_FP_SUITE] + [
'namd_r', 'parest_r', 'povray_r', 'blender_r']
FLAGS = flags.FLAGS
flags.DEFINE_boolean('spec17_build_only', False,
'Compile benchmarks only, but don\'t run benchmarks. '
'Defaults to False. The benchmark fails if the build '
'fails.')
flags.DEFINE_boolean('spec17_rebuild', True,
'Rebuild spec binaries, defaults to True. Set to False '
'when using run_stage_iterations > 1 to avoid recompiling')
BENCHMARK_NAME = 'speccpu2017'
BENCHMARK_CONFIG = """
speccpu2017:
description: Runs SPEC CPU2017
vm_groups:
default:
vm_spec: *default_single_core
disk_spec: *default_500_gb
os_type: ubuntu1604
"""
KB_TO_GB_MULTIPLIER = 1000000
LOG_FILENAME = {
'fprate': 'CPU2017.001.fprate.refrate.txt',
'fpspeed': 'CPU2017.001.fpspeed.refspeed.txt',
'intrate': 'CPU2017.001.intrate.refrate.txt',
'intspeed': 'CPU2017.001.intspeed.refspeed.txt',
}
def GetConfig(user_config):
return configs.LoadConfig(BENCHMARK_CONFIG, user_config, BENCHMARK_NAME)
def CheckPrerequisites(benchmark_config):
"""FDO is only allow with peak measurement.
Args:
benchmark_config: benchmark_config
Raises:
errors.Config.InvalidValue: On invalid flag setting.
"""
del benchmark_config # unused
if FLAGS.spec17_fdo and FLAGS.spec_runmode == 'base':
raise errors.Config.InvalidValue(
'Feedback Directed Optimization is not allowed with base report.')
def CheckVmPrerequisites(vm):
"""Checks the system memory on this vm.
Rate runs require 2 GB minimum system memory.
Speed runs require 16 GB minimum system memory.
Taken from https://www.spec.org/cpu2017/Docs/system-requirements.html
Args:
vm: virtual machine to run spec on.
Raises:
errors.Config.InvalidValue: On insufficient vm memory.
"""
available_memory = vm.total_free_memory_kb
if 'intspeed' in FLAGS.spec17_subset or 'fpspeed' in FLAGS.spec17_subset:
# AWS machines that advertise 16 GB have slightly less than that
if available_memory < 15.6 * KB_TO_GB_MULTIPLIER:
raise errors.Config.InvalidValue(
'Available memory of %s GB is insufficient for spec17 speed runs.'
% (available_memory / KB_TO_GB_MULTIPLIER))
if 'intrate' in FLAGS.spec17_subset or 'fprate' in FLAGS.spec17_subset:
if available_memory < 2 * KB_TO_GB_MULTIPLIER:
raise errors.Config.InvalidValue(
'Available memory of %s GB is insufficient for spec17 rate runs.'
% (available_memory / KB_TO_GB_MULTIPLIER))
def Prepare(benchmark_spec):
"""Installs SPEC CPU2017 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm_util.RunThreaded(_Prepare, vms)
def _Prepare(vm):
CheckVmPrerequisites(vm)
vm.Install('speccpu2017')
# Set attribute outside of the install function, so benchmark will work
# even with --install_packages=False.
config = speccpu2017.GetSpecInstallConfig(vm.GetScratchDir())
setattr(vm, speccpu.VM_STATE_ATTR, config)
def Run(benchmark_spec):
"""Runs SPEC CPU2017 on the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
Returns:
A list of lists of sample.Sample objects.
"""
vms = benchmark_spec.vms
samples = []
grouped_samples = vm_util.RunThreaded(_Run, vms)
for samples_list in grouped_samples:
samples.extend(samples_list)
return samples
def _Run(vm):
"""See base method.
Args:
vm: The vm to run the benchmark on.
Returns:
A list of sample.Sample objects.
"""
# swap only if necessary; free local node memory and avoid remote memory;
# reset caches; set stack size to unlimited
# Also consider setting enable_transparent_hugepages flag to true
cmd = ('echo 1 | sudo tee /proc/sys/vm/swappiness && '
'echo 1 | sudo tee /proc/sys/vm/zone_reclaim_mode && '
'sync ; echo 3 | sudo tee /proc/sys/vm/drop_caches && '
'ulimit -s unlimited && ')
cmd += 'runcpu '
if FLAGS.spec17_build_only:
cmd += '--action build '
if FLAGS.spec17_rebuild:
cmd += '--rebuild '
version_specific_parameters = []
# rate runs require 2 GB minimum system main memory per copy,
# not including os overhead. Refer to:
# https://www.spec.org/cpu2017/Docs/system-requirements.html#memory
copies = min(vm.NumCpusForBenchmark(),
vm.total_free_memory_kb // (2 * KB_TO_GB_MULTIPLIER))
version_specific_parameters.append(' --copies=%s ' %
(FLAGS.spec17_copies or copies))
version_specific_parameters.append(
' --threads=%s ' % (FLAGS.spec17_threads or vm.NumCpusForBenchmark()))
if FLAGS.spec17_fdo:
version_specific_parameters.append('--feedback ')
vm.RemoteCommand('cd /scratch/cpu2017; mkdir fdo_profiles')
start_time = time.time()
stdout, _ = speccpu.Run(vm, cmd, ' '.join(FLAGS.spec17_subset),
version_specific_parameters)
if FLAGS.spec17_build_only:
if 'Error' in stdout and 'Please review this file' in stdout:
raise errors.Benchmarks.RunError('Error during SPEC compilation.')
return [
sample.Sample(
'compilation_time',
time.time() - start_time, 's', {
'spec17_subset': FLAGS.spec17_subset,
'gcc_version': build_tools.GetVersion(vm, 'gcc')
})
]
partial_results = True
# Do not allow partial results if any benchmark subset is a full suite.
for benchmark_subset in FLAGS.benchmark_subset:
if benchmark_subset in ['intspeed', 'fpspeed', 'intrate', 'fprate']:
partial_results = False
log_files = set()
for test in FLAGS.spec17_subset:
if test in LOG_FILENAME:
log_files.add(LOG_FILENAME[test])
else:
if test in INTSPEED_SUITE:
log_files.add(LOG_FILENAME['intspeed'])
elif test in INTRATE_SUITE:
log_files.add(LOG_FILENAME['intrate'])
elif test in FPSPEED_SUITE:
log_files.add(LOG_FILENAME['fpspeed'])
elif test in FPRATE_SUITE:
log_files.add(LOG_FILENAME['fprate'])
samples = speccpu.ParseOutput(vm, log_files, partial_results, None)
for item in samples:
item.metadata['vm_name'] = vm.name
return samples
def Cleanup(benchmark_spec):
"""Cleans up SPEC CPU2017 from the target vm.
Args:
benchmark_spec: The benchmark specification. Contains all data that is
required to run the benchmark.
"""
vms = benchmark_spec.vms
vm_util.RunThreaded(speccpu.Uninstall, vms)
|
import alsaaudio
import logging
import wave
from kalliope.core.PlayerModule import PlayerModule
logging.basicConfig()
logger = logging.getLogger("kalliope")
CHUNK = 1024
ALSAAUDIO_BIT_MAPPING = {8: alsaaudio.PCM_FORMAT_S8,
16: alsaaudio.PCM_FORMAT_S16_LE,
24: alsaaudio.PCM_FORMAT_S24_LE,
32: alsaaudio.PCM_FORMAT_S32_LE}
STANDARD_SAMPLE_RATES = (
8000, 9600, 11025, 12000, 16000, 22050, 24000, 32000, 44100, 48000, 88200,
96000, 192000
)
DEVICE_TYPE_ALL = 'all'
DEVICE_TYPE_INPUT = 'input'
DEVICE_TYPE_OUTPUT = 'output'
def bits_to_samplefmt(bits):
if bits in ALSAAUDIO_BIT_MAPPING.keys():
return ALSAAUDIO_BIT_MAPPING[bits]
else:
raise ValueError('Unsupported format')
class Pyalsaaudio(PlayerModule):
"""
This Class is representing the Player Object used to play the all sound of the system.
"""
def __init__(self, **kwargs):
super(Pyalsaaudio, self).__init__(**kwargs)
# List devices
logger.debug("[pyalsaaudio.__init__] instance")
logger.debug("[pyalsaaudio.__init__] devices : %s " % (str(self.get_devices(DEVICE_TYPE_OUTPUT))))
logger.debug("[pyalsaaudio.__init__] args : %s " % str(kwargs))
self.device = kwargs.get('device', 'default')
@staticmethod
def get_devices(device_type=DEVICE_TYPE_ALL):
devices = set()
if device_type in (DEVICE_TYPE_ALL,
DEVICE_TYPE_OUTPUT):
devices.update(set(alsaaudio.pcms(alsaaudio.PCM_PLAYBACK)))
if device_type in (DEVICE_TYPE_ALL,
DEVICE_TYPE_INPUT):
devices.update(set(alsaaudio.pcms(alsaaudio.PCM_CAPTURE)))
device_names = sorted(list(devices))
num_devices = len(device_names)
logger.debug('Found %d ALSA devices', num_devices)
return device_names
def play(self, file_path):
if self.convert:
self.convert_mp3_to_wav(file_path_mp3=file_path)
f = wave.open(file_path, 'rb')
pcm_type = alsaaudio.PCM_PLAYBACK
stream = alsaaudio.PCM(type=pcm_type,
mode=alsaaudio.PCM_NORMAL,
device=self.device)
# Set attributes
stream.setchannels(f.getnchannels())
stream.setrate(f.getframerate())
bits = f.getsampwidth()*8
stream.setformat(bits_to_samplefmt(bits))
stream.setperiodsize(CHUNK)
logger.debug("[PyAlsaAudioPlayer] %d channels, %d sampling rate, %d bit" % (f.getnchannels(),
f.getframerate(),
bits))
data = f.readframes(CHUNK)
while data:
# Read data from stdin
stream.write(data)
data = f.readframes(CHUNK)
f.close()
stream.close()
|
import time
import pytest
import ecdsa
import tinychain as t
from tinychain import Block, TxIn, TxOut, Transaction
from client import make_txin
def test_merkle_trees():
root = t.get_merkle_root('foo', 'bar')
fooh = t.sha256d('foo')
barh = t.sha256d('bar')
assert root
assert root.val == t.sha256d(fooh + barh)
assert root.children[0].val == fooh
assert root.children[1].val == barh
root = t.get_merkle_root('foo', 'bar', 'baz')
bazh = t.sha256d('baz')
assert root
assert len(root.children) == 2
assert root.children[0].val == t.sha256d(fooh + barh)
assert root.children[1].val == t.sha256d(bazh + bazh)
def test_serialization():
op1 = t.OutPoint(txid='c0ffee', txout_idx=0)
op2 = t.OutPoint(txid='c0ffee', txout_idx=1)
txin1 = t.TxIn(
to_spend=op1, unlock_sig=b'oursig', unlock_pk=b'foo', sequence=1)
txin2 = t.TxIn(
to_spend=op2, unlock_sig=b'oursig', unlock_pk=b'foo', sequence=2)
txout = t.TxOut(value=101, to_address='1zxoijw')
txn1 = t.Transaction(txins=[txin1], txouts=[txout], locktime=0)
txn2 = t.Transaction(txins=[txin2], txouts=[txout], locktime=0)
block = t.Block(
1, 'deadbeef', 'c0ffee', int(time.time()), 100, 100, [txn1, txn2])
utxo = t.UnspentTxOut(
*txout, txid=txn1.id, txout_idx=0, is_coinbase=False, height=0)
utxo_set = [utxo.outpoint, utxo]
for obj in (
op1, op2, txin1, txin2, txout, txn1, txn2, block, utxo, utxo_set):
assert t.deserialize(t.serialize(obj)) == obj
def test_build_spend_message():
txout = t.TxOut(value=101, to_address='1zz8w9')
txin = t.TxIn(
to_spend=t.OutPoint('c0ffee', 0),
unlock_sig=b'oursig', unlock_pk=b'foo', sequence=1)
txn = t.Transaction(txins=[txin], txouts=[txout], locktime=0)
spend_msg = t.build_spend_message(
txin.to_spend, txin.unlock_pk, txin.sequence, txn.txouts)
assert spend_msg == (
b'677c2d8f9843d1cc456e7bfbc507c0f6d07d19c69e6bca0cbaa7bfaea4dd840a')
# Adding a new output to the txn creates a new spend message.
txn.txouts.append(t.TxOut(value=1, to_address='1zz'))
assert t.build_spend_message(
txin.to_spend, txin.unlock_pk, txin.sequence, txn.txouts) != spend_msg
def test_get_median_time_past():
t.active_chain = []
assert t.get_median_time_past(10) == 0
timestamps = [1, 30, 60, 90, 400]
t.active_chain = [_dummy_block(timestamp=t) for t in timestamps]
assert t.get_median_time_past(1) == 400
assert t.get_median_time_past(3) == 90
assert t.get_median_time_past(2) == 90
assert t.get_median_time_past(5) == 60
def test_dependent_txns_in_single_block():
t.active_chain = []
t.mempool = {}
assert t.connect_block(chain1[0]) == t.ACTIVE_CHAIN_IDX
assert t.connect_block(chain1[1]) == t.ACTIVE_CHAIN_IDX
assert len(t.active_chain) == 2
assert len(t.utxo_set) == 2
utxo1 = t.utxo_set[list(t.utxo_set.keys())[0]]
txout1 = TxOut(value=901, to_address=utxo1.to_address)
txin1 = make_txin(signing_key, utxo1.outpoint, txout1)
txn1 = t.Transaction(txins=[txin1], txouts=[txout1], locktime=0)
# Create a transaction that is dependent on the yet-unconfirmed transaction
# above.
txout2 = TxOut(value=9001, to_address=txout1.to_address)
txin2 = make_txin(signing_key, t.OutPoint(txn1.id, 0), txout2)
txn2 = t.Transaction(txins=[txin2], txouts=[txout2], locktime=0)
# Assert that we don't accept this txn -- too early to spend the coinbase.
with pytest.raises(t.TxnValidationError) as excinfo:
t.validate_txn(txn2)
assert 'UTXO not ready' in str(excinfo.value)
t.connect_block(chain1[2])
# Now the coinbase has matured to spending.
t.add_txn_to_mempool(txn1)
assert txn1.id in t.mempool
# In txn2, we're attempting to spend more than is available (9001 vs. 901).
assert not t.add_txn_to_mempool(txn2)
with pytest.raises(t.TxnValidationError) as excinfo:
t.validate_txn(txn2)
assert 'Spend value is more than available' in str(excinfo.value)
# Recreate the transaction with an acceptable value.
txout2 = TxOut(value=901, to_address=txout1.to_address)
txin2 = make_txin(signing_key, t.OutPoint(txn1.id, 0), txout2)
txn2 = t.Transaction(txins=[txin2], txouts=[txout2], locktime=0)
t.add_txn_to_mempool(txn2)
assert txn2.id in t.mempool
block = t.assemble_and_solve_block(t.pubkey_to_address(
signing_key.get_verifying_key().to_string()))
assert t.connect_block(block) == t.ACTIVE_CHAIN_IDX
assert t.active_chain[-1] == block
assert block.txns[1:] == [txn1, txn2]
assert txn1.id not in t.mempool
assert txn2.id not in t.mempool
assert t.OutPoint(txn1.id, 0) not in t.utxo_set # Spent by txn2.
assert t.OutPoint(txn2.id, 0) in t.utxo_set
def test_pubkey_to_address():
assert t.pubkey_to_address(
b'k\xd4\xd8M3\xc8\xf7h*\xd2\x16O\xe39a\xc9]\x18i\x08\xf1\xac\xb8\x0f'
b'\x9af\xdd\xd1\'\xe2\xc2v\x8eCo\xd3\xc4\xff\x0e\xfc\x9eBzS\\=\x7f'
b'\x7f\x1a}\xeen"\x9f\x9c\x17E\xeaMH\x88\xec\xf5F') == (
'18kZswtcPRKCcf9GQsJLNFEMUE8V9tCJr')
def test_reorg():
t.active_chain = []
for block in chain1:
assert t.connect_block(block) == t.ACTIVE_CHAIN_IDX
t.side_branches = []
t.mempool = {}
t.utxo_set = {}
_add_to_utxo_for_chain(t.active_chain)
def assert_no_change():
assert t.active_chain == chain1
assert t.mempool == {}
assert [k.txid[:6] for k in t.utxo_set] == [
'8b7bfc', 'b8a642', '6708b9']
assert len(t.utxo_set) == 3
# No reorg necessary when side branches are empty.
assert not t.reorg_if_necessary()
# No reorg necessary when side branch is shorter than the main chain.
for block in chain2[1:2]:
assert t.connect_block(block) == 1
assert not t.reorg_if_necessary()
assert t.side_branches == [chain2[1:2]]
assert_no_change()
# No reorg necessary when side branch is as long as the main chain.
assert t.connect_block(chain2[2]) == 1
assert not t.reorg_if_necessary()
assert t.side_branches == [chain2[1:3]]
assert_no_change()
# No reorg necessary when side branch is a longer but invalid chain.
# Block doesn't connect to anything because it's invalid.
assert t.connect_block(chain3_faulty[3]) is None
assert not t.reorg_if_necessary()
# No change in side branches for an invalid block.
assert t.side_branches == [chain2[1:3]]
assert_no_change()
# Reorg necessary when a side branch is longer than the main chain.
assert t.connect_block(chain2[3]) == 1
assert t.connect_block(chain2[4]) == 1
# Chain1 was reorged into side_branches.
assert [len(c) for c in t.side_branches] == [2]
assert [b.id for b in t.side_branches[0]] == [b.id for b in chain1[1:]]
assert t.side_branches == [chain1[1:]]
assert t.mempool == {}
assert [k.txid[:6] for k in t.utxo_set] == [
'8b7bfc', 'b8a642', '6708b9', '543683', '53f3c1']
def _add_to_utxo_for_chain(chain):
for block in chain:
for tx in block.txns:
for i, txout in enumerate(tx.txouts):
t.add_to_utxo(txout, tx, i, tx.is_coinbase, len(chain))
signing_key = ecdsa.SigningKey.from_string(
b'\xf1\xad2y\xbf\xa2x\xabn\xfbO\x98\xf7\xa7\xb4\xc0\xf4fOzX\xbf\xf6\\\xd2\xcb-\x1d:0 \xa7',
curve=ecdsa.SECP256k1)
chain1 = [
# Block id: 000000154275885a72c004d02aaa9524fc0c4896aef0b0f3bcde2de38f9be561
Block(version=0, prev_block_hash=None, merkle_hash='7118894203235a955a908c0abfc6d8fe6edec47b0a04ce1bf7263da3b4366d22', timestamp=1501821412, bits=24, nonce=10126761, txns=[Transaction(txins=[TxIn(to_spend=None, unlock_sig=b'0', unlock_pk=None, sequence=0)], txouts=[TxOut(value=5000000000, to_address='143UVyz7ooiAv1pMqbwPPpnH4BV9ifJGFF')], locktime=None)]),
# Block id: 00000095f785bc8fbd6007b36c2f1c414d66db930e2e7354076c035c8f92700b
Block(version=0, prev_block_hash='000000154275885a72c004d02aaa9524fc0c4896aef0b0f3bcde2de38f9be561', merkle_hash='27661bd9b23552832becf6c18cb6035a3d77b4e66b5520505221a93922eb82f2', timestamp=1501826444, bits=24, nonce=22488415, txns=[Transaction(txins=[TxIn(to_spend=None, unlock_sig=b'1', unlock_pk=None, sequence=0)], txouts=[TxOut(value=5000000000, to_address='1Piq91dFUqSb7tdddCWvuGX5UgdzXeoAwA')], locktime=None)]),
# Block id: 000000f9b679482f24902297fc59c745e759436ac95e93d2c1eff4d5dbd39e33
Block(version=0, prev_block_hash='00000095f785bc8fbd6007b36c2f1c414d66db930e2e7354076c035c8f92700b', merkle_hash='031f45ad7b5ddf198f7dfa88f53c0262fb14c850c5c1faf506258b9dcad32aef', timestamp=1501826556, bits=24, nonce=30715680, txns=[Transaction(txins=[TxIn(to_spend=None, unlock_sig=b'2', unlock_pk=None, sequence=0)], txouts=[TxOut(value=5000000000, to_address='1Piq91dFUqSb7tdddCWvuGX5UgdzXeoAwA')], locktime=None)])
]
chain2 = [
# Block id: 000000154275885a72c004d02aaa9524fc0c4896aef0b0f3bcde2de38f9be561
Block(version=0, prev_block_hash=None, merkle_hash='7118894203235a955a908c0abfc6d8fe6edec47b0a04ce1bf7263da3b4366d22', timestamp=1501821412, bits=24, nonce=10126761, txns=[Transaction(txins=[TxIn(to_spend=None, unlock_sig=b'0', unlock_pk=None, sequence=0)], txouts=[TxOut(value=5000000000, to_address='143UVyz7ooiAv1pMqbwPPpnH4BV9ifJGFF')], locktime=None)]),
# Block id: 000000e4785f0f384d13e24caaddcf6723ee008d6a179428ce9246e1b32e3b2c
Block(version=0, prev_block_hash='000000154275885a72c004d02aaa9524fc0c4896aef0b0f3bcde2de38f9be561', merkle_hash='27661bd9b23552832becf6c18cb6035a3d77b4e66b5520505221a93922eb82f2', timestamp=1501826757, bits=24, nonce=25773772, txns=[Transaction(txins=[TxIn(to_spend=None, unlock_sig=b'1', unlock_pk=None, sequence=0)], txouts=[TxOut(value=5000000000, to_address='1Piq91dFUqSb7tdddCWvuGX5UgdzXeoAwA')], locktime=None)]),
# Block id: 000000a1698495a3b125d9cd08837cdabffa192639588cdda8018ed8f5af3f8c
Block(version=0, prev_block_hash='000000e4785f0f384d13e24caaddcf6723ee008d6a179428ce9246e1b32e3b2c', merkle_hash='031f45ad7b5ddf198f7dfa88f53c0262fb14c850c5c1faf506258b9dcad32aef', timestamp=1501826872, bits=24, nonce=16925076, txns=[Transaction(txins=[TxIn(to_spend=None, unlock_sig=b'2', unlock_pk=None, sequence=0)], txouts=[TxOut(value=5000000000, to_address='1Piq91dFUqSb7tdddCWvuGX5UgdzXeoAwA')], locktime=None)]),
# Up until this point, we're same length as chain1.
# This block is where chain3_faulty goes bad.
# Block id: 000000ef44dd5a56c89a43b9cff28e51e5fd91624be3a2de722d864ae4f6a853
Block(version=0, prev_block_hash='000000a1698495a3b125d9cd08837cdabffa192639588cdda8018ed8f5af3f8c', merkle_hash='dbf593cf959b3a03ea97bbeb7a44ee3f4841b338d5ceaa5705b637c853c956ef', timestamp=1501826949, bits=24, nonce=12052237, txns=[Transaction(txins=[TxIn(to_spend=None, unlock_sig=b'3', unlock_pk=None, sequence=0)], txouts=[TxOut(value=5000000000, to_address='1Piq91dFUqSb7tdddCWvuGX5UgdzXeoAwA')], locktime=None)]),
# Block id:
Block(version=0, prev_block_hash='000000ef44dd5a56c89a43b9cff28e51e5fd91624be3a2de722d864ae4f6a853', merkle_hash='a3a55fe5e9f9e5e3282333ac4d149fd186f157a3c1d2b2e04af78c20a519f6b9', timestamp=1501827000, bits=24, nonce=752898, txns=[Transaction(txins=[TxIn(to_spend=None, unlock_sig=b'4', unlock_pk=None, sequence=0)], txouts=[TxOut(value=5000000000, to_address='1Piq91dFUqSb7tdddCWvuGX5UgdzXeoAwA')], locktime=None)])
]
# Make this chain invalid.
chain3_faulty = list(chain2)
chain3_faulty[-2] = chain3_faulty[-2]._replace(nonce=1)
def _dummy_block(**kwargs):
defaults = dict(
version=1, prev_block_hash='c0ffee', merkle_hash='deadbeef',
timestamp=1, bits=1, nonce=1, txns=[])
return t.Block(**{**defaults, **kwargs})
|
import math
import numpy as np
from ..cov import compute_whitener
from ..io.pick import pick_info
from ..forward import apply_forward
from ..utils import (logger, verbose, check_random_state, _check_preload,
_validate_type)
@verbose
def simulate_evoked(fwd, stc, info, cov, nave=30, iir_filter=None,
random_state=None, use_cps=True, verbose=None):
"""Generate noisy evoked data.
.. note:: No projections from ``info`` will be present in the
output ``evoked``. You can use e.g.
:func:`evoked.add_proj <mne.Evoked.add_proj>` or
:func:`evoked.set_eeg_reference <mne.Evoked.set_eeg_reference>`
to add them afterward as necessary.
Parameters
----------
fwd : instance of Forward
A forward solution.
stc : SourceEstimate object
The source time courses.
info : dict
Measurement info to generate the evoked.
cov : Covariance object
The noise covariance.
nave : int
Number of averaged epochs (defaults to 30).
.. versionadded:: 0.15.0
iir_filter : None | array
IIR filter coefficients (denominator) e.g. [1, -1, 0.2].
%(random_state)s
%(use_cps)s
.. versionadded:: 0.15
%(verbose)s
Returns
-------
evoked : Evoked object
The simulated evoked data.
See Also
--------
simulate_raw
simulate_stc
simulate_sparse_stc
Notes
-----
To make the equivalence between snr and nave, when the snr is given
instead of nave::
nave = (1 / 10 ** ((actual_snr - snr)) / 20) ** 2
where actual_snr is the snr to the generated noise before scaling.
.. versionadded:: 0.10.0
"""
evoked = apply_forward(fwd, stc, info, use_cps=use_cps)
if nave < np.inf:
noise = _simulate_noise_evoked(evoked, cov, iir_filter, random_state)
evoked.data += noise.data / math.sqrt(nave)
evoked.nave = np.int64(nave)
if cov is not None and cov.get('projs', None):
evoked.add_proj(cov['projs']).apply_proj()
return evoked
def _simulate_noise_evoked(evoked, cov, iir_filter, random_state):
noise = evoked.copy()
noise.data[:] = 0
return _add_noise(noise, cov, iir_filter, random_state,
allow_subselection=False)
@verbose
def add_noise(inst, cov, iir_filter=None, random_state=None,
verbose=None):
"""Create noise as a multivariate Gaussian.
The spatial covariance of the noise is given from the cov matrix.
Parameters
----------
inst : instance of Evoked, Epochs, or Raw
Instance to which to add noise.
cov : instance of Covariance
The noise covariance.
iir_filter : None | array-like
IIR filter coefficients (denominator).
%(random_state)s
%(verbose)s
Returns
-------
inst : instance of Evoked, Epochs, or Raw
The instance, modified to have additional noise.
Notes
-----
Only channels in both ``inst.info['ch_names']`` and
``cov['names']`` will have noise added to them.
This function operates inplace on ``inst``.
.. versionadded:: 0.18.0
"""
# We always allow subselection here
return _add_noise(inst, cov, iir_filter, random_state)
def _add_noise(inst, cov, iir_filter, random_state, allow_subselection=True):
"""Add noise, possibly with channel subselection."""
from ..cov import Covariance
from ..io import BaseRaw
from ..epochs import BaseEpochs
from ..evoked import Evoked
_validate_type(cov, Covariance, 'cov')
_validate_type(inst, (BaseRaw, BaseEpochs, Evoked),
'inst', 'Raw, Epochs, or Evoked')
_check_preload(inst, 'Adding noise')
data = inst._data
assert data.ndim in (2, 3)
if data.ndim == 2:
data = data[np.newaxis]
# Subselect if necessary
info = inst.info
info._check_consistency()
picks = gen_picks = slice(None)
if allow_subselection:
use_chs = list(set(info['ch_names']) & set(cov['names']))
picks = np.where(np.in1d(info['ch_names'], use_chs))[0]
logger.info('Adding noise to %d/%d channels (%d channels in cov)'
% (len(picks), len(info['chs']), len(cov['names'])))
info = pick_info(inst.info, picks)
info._check_consistency()
gen_picks = np.arange(info['nchan'])
for epoch in data:
epoch[picks] += _generate_noise(info, cov, iir_filter, random_state,
epoch.shape[1], picks=gen_picks)[0]
return inst
def _generate_noise(info, cov, iir_filter, random_state, n_samples, zi=None,
picks=None):
"""Create spatially colored and temporally IIR-filtered noise."""
from scipy.signal import lfilter
rng = check_random_state(random_state)
_, _, colorer = compute_whitener(cov, info, pca=True, return_colorer=True,
picks=picks, verbose=False)
noise = np.dot(colorer, rng.standard_normal((colorer.shape[1], n_samples)))
if iir_filter is not None:
if zi is None:
zi = np.zeros((len(colorer), len(iir_filter) - 1))
noise, zf = lfilter([1], iir_filter, noise, axis=-1, zi=zi)
else:
zf = None
return noise, zf
|
from __future__ import division, print_function
"""
Curses-based UI implementation
"""
import curses
import _curses
from urwid import escape
from urwid.display_common import BaseScreen, RealTerminal, AttrSpec, \
UNPRINTABLE_TRANS_TABLE
from urwid.compat import bytes, PYTHON3, text_type, xrange, ord2
KEY_RESIZE = 410 # curses.KEY_RESIZE (sometimes not defined)
KEY_MOUSE = 409 # curses.KEY_MOUSE
_curses_colours = {
'default': (-1, 0),
'black': (curses.COLOR_BLACK, 0),
'dark red': (curses.COLOR_RED, 0),
'dark green': (curses.COLOR_GREEN, 0),
'brown': (curses.COLOR_YELLOW, 0),
'dark blue': (curses.COLOR_BLUE, 0),
'dark magenta': (curses.COLOR_MAGENTA, 0),
'dark cyan': (curses.COLOR_CYAN, 0),
'light gray': (curses.COLOR_WHITE, 0),
'dark gray': (curses.COLOR_BLACK, 1),
'light red': (curses.COLOR_RED, 1),
'light green': (curses.COLOR_GREEN, 1),
'yellow': (curses.COLOR_YELLOW, 1),
'light blue': (curses.COLOR_BLUE, 1),
'light magenta': (curses.COLOR_MAGENTA, 1),
'light cyan': (curses.COLOR_CYAN, 1),
'white': (curses.COLOR_WHITE, 1),
}
class Screen(BaseScreen, RealTerminal):
def __init__(self):
super(Screen,self).__init__()
self.curses_pairs = [
(None,None), # Can't be sure what pair 0 will default to
]
self.palette = {}
self.has_color = False
self.s = None
self.cursor_state = None
self._keyqueue = []
self.prev_input_resize = 0
self.set_input_timeouts()
self.last_bstate = 0
self._mouse_tracking_enabled = False
self.register_palette_entry(None, 'default','default')
def set_mouse_tracking(self, enable=True):
"""
Enable mouse tracking.
After calling this function get_input will include mouse
click events along with keystrokes.
"""
enable = bool(enable)
if enable == self._mouse_tracking_enabled:
return
if enable:
curses.mousemask(0
| curses.BUTTON1_PRESSED | curses.BUTTON1_RELEASED
| curses.BUTTON2_PRESSED | curses.BUTTON2_RELEASED
| curses.BUTTON3_PRESSED | curses.BUTTON3_RELEASED
| curses.BUTTON4_PRESSED | curses.BUTTON4_RELEASED
| curses.BUTTON1_DOUBLE_CLICKED | curses.BUTTON1_TRIPLE_CLICKED
| curses.BUTTON2_DOUBLE_CLICKED | curses.BUTTON2_TRIPLE_CLICKED
| curses.BUTTON3_DOUBLE_CLICKED | curses.BUTTON3_TRIPLE_CLICKED
| curses.BUTTON4_DOUBLE_CLICKED | curses.BUTTON4_TRIPLE_CLICKED
| curses.BUTTON_SHIFT | curses.BUTTON_ALT
| curses.BUTTON_CTRL)
else:
raise NotImplementedError()
self._mouse_tracking_enabled = enable
def _start(self):
"""
Initialize the screen and input mode.
"""
self.s = curses.initscr()
self.has_color = curses.has_colors()
if self.has_color:
curses.start_color()
if curses.COLORS < 8:
# not colourful enough
self.has_color = False
if self.has_color:
try:
curses.use_default_colors()
self.has_default_colors=True
except _curses.error:
self.has_default_colors=False
self._setup_colour_pairs()
curses.noecho()
curses.meta(1)
curses.halfdelay(10) # use set_input_timeouts to adjust
self.s.keypad(0)
if not self._signal_keys_set:
self._old_signal_keys = self.tty_signal_keys()
super(Screen, self)._start()
def _stop(self):
"""
Restore the screen.
"""
curses.echo()
self._curs_set(1)
try:
curses.endwin()
except _curses.error:
pass # don't block original error with curses error
if self._old_signal_keys:
self.tty_signal_keys(*self._old_signal_keys)
super(Screen, self)._stop()
def _setup_colour_pairs(self):
"""
Initialize all 63 color pairs based on the term:
bg * 8 + 7 - fg
So to get a color, we just need to use that term and get the right color
pair number.
"""
if not self.has_color:
return
for fg in xrange(8):
for bg in xrange(8):
# leave out white on black
if fg == curses.COLOR_WHITE and \
bg == curses.COLOR_BLACK:
continue
curses.init_pair(bg * 8 + 7 - fg, fg, bg)
def _curs_set(self,x):
if self.cursor_state== "fixed" or x == self.cursor_state:
return
try:
curses.curs_set(x)
self.cursor_state = x
except _curses.error:
self.cursor_state = "fixed"
def _clear(self):
self.s.clear()
self.s.refresh()
def _getch(self, wait_tenths):
if wait_tenths==0:
return self._getch_nodelay()
if wait_tenths is None:
curses.cbreak()
else:
curses.halfdelay(wait_tenths)
self.s.nodelay(0)
return self.s.getch()
def _getch_nodelay(self):
self.s.nodelay(1)
while 1:
# this call fails sometimes, but seems to work when I try again
try:
curses.cbreak()
break
except _curses.error:
pass
return self.s.getch()
def set_input_timeouts(self, max_wait=None, complete_wait=0.1,
resize_wait=0.1):
"""
Set the get_input timeout values. All values have a granularity
of 0.1s, ie. any value between 0.15 and 0.05 will be treated as
0.1 and any value less than 0.05 will be treated as 0. The
maximum timeout value for this module is 25.5 seconds.
max_wait -- amount of time in seconds to wait for input when
there is no input pending, wait forever if None
complete_wait -- amount of time in seconds to wait when
get_input detects an incomplete escape sequence at the
end of the available input
resize_wait -- amount of time in seconds to wait for more input
after receiving two screen resize requests in a row to
stop urwid from consuming 100% cpu during a gradual
window resize operation
"""
def convert_to_tenths( s ):
if s is None:
return None
return int( (s+0.05)*10 )
self.max_tenths = convert_to_tenths(max_wait)
self.complete_tenths = convert_to_tenths(complete_wait)
self.resize_tenths = convert_to_tenths(resize_wait)
def get_input(self, raw_keys=False):
"""Return pending input as a list.
raw_keys -- return raw keycodes as well as translated versions
This function will immediately return all the input since the
last time it was called. If there is no input pending it will
wait before returning an empty list. The wait time may be
configured with the set_input_timeouts function.
If raw_keys is False (default) this function will return a list
of keys pressed. If raw_keys is True this function will return
a ( keys pressed, raw keycodes ) tuple instead.
Examples of keys returned:
* ASCII printable characters: " ", "a", "0", "A", "-", "/"
* ASCII control characters: "tab", "enter"
* Escape sequences: "up", "page up", "home", "insert", "f1"
* Key combinations: "shift f1", "meta a", "ctrl b"
* Window events: "window resize"
When a narrow encoding is not enabled:
* "Extended ASCII" characters: "\\xa1", "\\xb2", "\\xfe"
When a wide encoding is enabled:
* Double-byte characters: "\\xa1\\xea", "\\xb2\\xd4"
When utf8 encoding is enabled:
* Unicode characters: u"\\u00a5", u'\\u253c"
Examples of mouse events returned:
* Mouse button press: ('mouse press', 1, 15, 13),
('meta mouse press', 2, 17, 23)
* Mouse button release: ('mouse release', 0, 18, 13),
('ctrl mouse release', 0, 17, 23)
"""
assert self._started
keys, raw = self._get_input( self.max_tenths )
# Avoid pegging CPU at 100% when slowly resizing, and work
# around a bug with some braindead curses implementations that
# return "no key" between "window resize" commands
if keys==['window resize'] and self.prev_input_resize:
while True:
keys, raw2 = self._get_input(self.resize_tenths)
raw += raw2
if not keys:
keys, raw2 = self._get_input(
self.resize_tenths)
raw += raw2
if keys!=['window resize']:
break
if keys[-1:]!=['window resize']:
keys.append('window resize')
if keys==['window resize']:
self.prev_input_resize = 2
elif self.prev_input_resize == 2 and not keys:
self.prev_input_resize = 1
else:
self.prev_input_resize = 0
if raw_keys:
return keys, raw
return keys
def _get_input(self, wait_tenths):
# this works around a strange curses bug with window resizing
# not being reported correctly with repeated calls to this
# function without a doupdate call in between
curses.doupdate()
key = self._getch(wait_tenths)
resize = False
raw = []
keys = []
while key >= 0:
raw.append(key)
if key==KEY_RESIZE:
resize = True
elif key==KEY_MOUSE:
keys += self._encode_mouse_event()
else:
keys.append(key)
key = self._getch_nodelay()
processed = []
try:
while keys:
run, keys = escape.process_keyqueue(keys, True)
processed += run
except escape.MoreInputRequired:
key = self._getch(self.complete_tenths)
while key >= 0:
raw.append(key)
if key==KEY_RESIZE:
resize = True
elif key==KEY_MOUSE:
keys += self._encode_mouse_event()
else:
keys.append(key)
key = self._getch_nodelay()
while keys:
run, keys = escape.process_keyqueue(keys, False)
processed += run
if resize:
processed.append('window resize')
return processed, raw
def _encode_mouse_event(self):
# convert to escape sequence
last = next = self.last_bstate
(id,x,y,z,bstate) = curses.getmouse()
mod = 0
if bstate & curses.BUTTON_SHIFT: mod |= 4
if bstate & curses.BUTTON_ALT: mod |= 8
if bstate & curses.BUTTON_CTRL: mod |= 16
l = []
def append_button( b ):
b |= mod
l.extend([ 27, ord2('['), ord2('M'), b+32, x+33, y+33 ])
if bstate & curses.BUTTON1_PRESSED and last & 1 == 0:
append_button( 0 )
next |= 1
if bstate & curses.BUTTON2_PRESSED and last & 2 == 0:
append_button( 1 )
next |= 2
if bstate & curses.BUTTON3_PRESSED and last & 4 == 0:
append_button( 2 )
next |= 4
if bstate & curses.BUTTON4_PRESSED and last & 8 == 0:
append_button( 64 )
next |= 8
if bstate & curses.BUTTON1_RELEASED and last & 1:
append_button( 0 + escape.MOUSE_RELEASE_FLAG )
next &= ~ 1
if bstate & curses.BUTTON2_RELEASED and last & 2:
append_button( 1 + escape.MOUSE_RELEASE_FLAG )
next &= ~ 2
if bstate & curses.BUTTON3_RELEASED and last & 4:
append_button( 2 + escape.MOUSE_RELEASE_FLAG )
next &= ~ 4
if bstate & curses.BUTTON4_RELEASED and last & 8:
append_button( 64 + escape.MOUSE_RELEASE_FLAG )
next &= ~ 8
if bstate & curses.BUTTON1_DOUBLE_CLICKED:
append_button( 0 + escape.MOUSE_MULTIPLE_CLICK_FLAG )
if bstate & curses.BUTTON2_DOUBLE_CLICKED:
append_button( 1 + escape.MOUSE_MULTIPLE_CLICK_FLAG )
if bstate & curses.BUTTON3_DOUBLE_CLICKED:
append_button( 2 + escape.MOUSE_MULTIPLE_CLICK_FLAG )
if bstate & curses.BUTTON4_DOUBLE_CLICKED:
append_button( 64 + escape.MOUSE_MULTIPLE_CLICK_FLAG )
if bstate & curses.BUTTON1_TRIPLE_CLICKED:
append_button( 0 + escape.MOUSE_MULTIPLE_CLICK_FLAG*2 )
if bstate & curses.BUTTON2_TRIPLE_CLICKED:
append_button( 1 + escape.MOUSE_MULTIPLE_CLICK_FLAG*2 )
if bstate & curses.BUTTON3_TRIPLE_CLICKED:
append_button( 2 + escape.MOUSE_MULTIPLE_CLICK_FLAG*2 )
if bstate & curses.BUTTON4_TRIPLE_CLICKED:
append_button( 64 + escape.MOUSE_MULTIPLE_CLICK_FLAG*2 )
self.last_bstate = next
return l
def _dbg_instr(self): # messy input string (intended for debugging)
curses.echo()
self.s.nodelay(0)
curses.halfdelay(100)
str = self.s.getstr()
curses.noecho()
return str
def _dbg_out(self,str): # messy output function (intended for debugging)
self.s.clrtoeol()
self.s.addstr(str)
self.s.refresh()
self._curs_set(1)
def _dbg_query(self,question): # messy query (intended for debugging)
self._dbg_out(question)
return self._dbg_instr()
def _dbg_refresh(self):
self.s.refresh()
def get_cols_rows(self):
"""Return the terminal dimensions (num columns, num rows)."""
rows,cols = self.s.getmaxyx()
return cols,rows
def _setattr(self, a):
if a is None:
self.s.attrset(0)
return
elif not isinstance(a, AttrSpec):
p = self._palette.get(a, (AttrSpec('default', 'default'),))
a = p[0]
if self.has_color:
if a.foreground_basic:
if a.foreground_number >= 8:
fg = a.foreground_number - 8
else:
fg = a.foreground_number
else:
fg = 7
if a.background_basic:
bg = a.background_number
else:
bg = 0
attr = curses.color_pair(bg * 8 + 7 - fg)
else:
attr = 0
if a.bold:
attr |= curses.A_BOLD
if a.standout:
attr |= curses.A_STANDOUT
if a.underline:
attr |= curses.A_UNDERLINE
if a.blink:
attr |= curses.A_BLINK
self.s.attrset(attr)
def draw_screen(self, size, r ):
"""Paint screen with rendered canvas."""
assert self._started
cols, rows = size
assert r.rows() == rows, "canvas size and passed size don't match"
y = -1
for row in r.content():
y += 1
try:
self.s.move( y, 0 )
except _curses.error:
# terminal shrunk?
# move failed so stop rendering.
return
first = True
lasta = None
nr = 0
for a, cs, seg in row:
if cs != 'U':
seg = seg.translate(UNPRINTABLE_TRANS_TABLE)
assert isinstance(seg, bytes)
if first or lasta != a:
self._setattr(a)
lasta = a
try:
if cs in ("0", "U"):
for i in range(len(seg)):
self.s.addch( 0x400000 + ord2(seg[i]) )
else:
assert cs is None
if PYTHON3:
assert isinstance(seg, bytes)
self.s.addstr(seg.decode('utf-8'))
else:
self.s.addstr(seg)
except _curses.error:
# it's ok to get out of the
# screen on the lower right
if (y == rows-1 and nr == len(row)-1):
pass
else:
# perhaps screen size changed
# quietly abort.
return
nr += 1
if r.cursor is not None:
x,y = r.cursor
self._curs_set(1)
try:
self.s.move(y,x)
except _curses.error:
pass
else:
self._curs_set(0)
self.s.move(0,0)
self.s.refresh()
self.keep_cache_alive_link = r
def clear(self):
"""
Force the screen to be completely repainted on the next
call to draw_screen().
"""
self.s.clear()
class _test:
def __init__(self):
self.ui = Screen()
self.l = list(_curses_colours.keys())
self.l.sort()
for c in self.l:
self.ui.register_palette( [
(c+" on black", c, 'black', 'underline'),
(c+" on dark blue",c, 'dark blue', 'bold'),
(c+" on light gray",c,'light gray', 'standout'),
])
self.ui.run_wrapper(self.run)
def run(self):
class FakeRender: pass
r = FakeRender()
text = [" has_color = "+repr(self.ui.has_color),""]
attr = [[],[]]
r.coords = {}
r.cursor = None
for c in self.l:
t = ""
a = []
for p in c+" on black",c+" on dark blue",c+" on light gray":
a.append((p,27))
t=t+ (p+27*" ")[:27]
text.append( t )
attr.append( a )
text += ["","return values from get_input(): (q exits)", ""]
attr += [[],[],[]]
cols,rows = self.ui.get_cols_rows()
keys = None
while keys!=['q']:
r.text=([t.ljust(cols) for t in text]+[""]*rows)[:rows]
r.attr=(attr+[[]]*rows) [:rows]
self.ui.draw_screen((cols,rows),r)
keys, raw = self.ui.get_input( raw_keys = True )
if 'window resize' in keys:
cols, rows = self.ui.get_cols_rows()
if not keys:
continue
t = ""
a = []
for k in keys:
if type(k) == text_type: k = k.encode("utf-8")
t += "'"+k + "' "
a += [(None,1), ('yellow on dark blue',len(k)),
(None,2)]
text.append(t + ": "+ repr(raw))
attr.append(a)
text = text[-rows:]
attr = attr[-rows:]
if '__main__'==__name__:
_test()
|
from asyncio import coroutines, ensure_future, get_running_loop
from asyncio.events import AbstractEventLoop
import concurrent.futures
import functools
import logging
import threading
from traceback import extract_stack
from typing import Any, Callable, Coroutine, TypeVar
_LOGGER = logging.getLogger(__name__)
T = TypeVar("T")
def fire_coroutine_threadsafe(coro: Coroutine, loop: AbstractEventLoop) -> None:
"""Submit a coroutine object to a given event loop.
This method does not provide a way to retrieve the result and
is intended for fire-and-forget use. This reduces the
work involved to fire the function on the loop.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
if not coroutines.iscoroutine(coro):
raise TypeError("A coroutine object is required: %s" % coro)
def callback() -> None:
"""Handle the firing of a coroutine."""
ensure_future(coro, loop=loop)
loop.call_soon_threadsafe(callback)
def run_callback_threadsafe(
loop: AbstractEventLoop, callback: Callable[..., T], *args: Any
) -> "concurrent.futures.Future[T]":
"""Submit a callback object to a given event loop.
Return a concurrent.futures.Future to access the result.
"""
ident = loop.__dict__.get("_thread_ident")
if ident is not None and ident == threading.get_ident():
raise RuntimeError("Cannot be called from within the event loop")
future: concurrent.futures.Future = concurrent.futures.Future()
def run_callback() -> None:
"""Run callback and store result."""
try:
future.set_result(callback(*args))
except Exception as exc: # pylint: disable=broad-except
if future.set_running_or_notify_cancel():
future.set_exception(exc)
else:
_LOGGER.warning("Exception on lost future: ", exc_info=True)
loop.call_soon_threadsafe(run_callback)
return future
def check_loop() -> None:
"""Warn if called inside the event loop."""
try:
get_running_loop()
in_loop = True
except RuntimeError:
in_loop = False
if not in_loop:
return
found_frame = None
for frame in reversed(extract_stack()):
for path in ("custom_components/", "homeassistant/components/"):
try:
index = frame.filename.index(path)
found_frame = frame
break
except ValueError:
continue
if found_frame is not None:
break
# Did not source from integration? Hard error.
if found_frame is None:
raise RuntimeError(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue"
)
start = index + len(path)
end = found_frame.filename.index("/", start)
integration = found_frame.filename[start:end]
if path == "custom_components/":
extra = " to the custom component author"
else:
extra = ""
_LOGGER.warning(
"Detected I/O inside the event loop. This is causing stability issues. Please report issue%s for %s doing I/O at %s, line %s: %s",
extra,
integration,
found_frame.filename[index:],
found_frame.lineno,
found_frame.line.strip(),
)
def protect_loop(func: Callable) -> Callable:
"""Protect function from running in event loop."""
@functools.wraps(func)
def protected_loop_func(*args, **kwargs): # type: ignore
check_loop()
return func(*args, **kwargs)
return protected_loop_func
|
import argparse
import asyncio
import os
import homeassistant.config as config_util
from homeassistant.core import HomeAssistant
# mypy: allow-untyped-calls, allow-untyped-defs
def run(args):
"""Handle ensure config commandline script."""
parser = argparse.ArgumentParser(
description=("Ensure a Home Assistant config exists, creates one if necessary.")
)
parser.add_argument(
"-c",
"--config",
metavar="path_to_config_dir",
default=config_util.get_default_config_dir(),
help="Directory that contains the Home Assistant configuration",
)
parser.add_argument("--script", choices=["ensure_config"])
args = parser.parse_args()
config_dir = os.path.join(os.getcwd(), args.config)
# Test if configuration directory exists
if not os.path.isdir(config_dir):
print("Creating directory", config_dir)
os.makedirs(config_dir)
config_path = asyncio.run(async_run(config_dir))
print("Configuration file:", config_path)
return 0
async def async_run(config_dir):
"""Make sure config exists."""
hass = HomeAssistant()
hass.config.config_dir = config_dir
path = await config_util.async_ensure_config_exists(hass)
await hass.async_stop(force=True)
return path
|
import json
import logging
from aiohttp.hdrs import CONTENT_TYPE
import requests
import voluptuous as vol
from homeassistant.components.notify import (
ATTR_DATA,
ATTR_TARGET,
PLATFORM_SCHEMA,
BaseNotificationService,
)
from homeassistant.const import CONTENT_TYPE_JSON, HTTP_OK
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
CONF_PAGE_ACCESS_TOKEN = "page_access_token"
BASE_URL = "https://graph.facebook.com/v2.6/me/messages"
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_PAGE_ACCESS_TOKEN): cv.string}
)
def get_service(hass, config, discovery_info=None):
"""Get the Facebook notification service."""
return FacebookNotificationService(config[CONF_PAGE_ACCESS_TOKEN])
class FacebookNotificationService(BaseNotificationService):
"""Implementation of a notification service for the Facebook service."""
def __init__(self, access_token):
"""Initialize the service."""
self.page_access_token = access_token
def send_message(self, message="", **kwargs):
"""Send some message."""
payload = {"access_token": self.page_access_token}
targets = kwargs.get(ATTR_TARGET)
data = kwargs.get(ATTR_DATA)
body_message = {"text": message}
if data is not None:
body_message.update(data)
# Only one of text or attachment can be specified
if "attachment" in body_message:
body_message.pop("text")
if not targets:
_LOGGER.error("At least 1 target is required")
return
for target in targets:
# If the target starts with a "+", it's a phone number,
# otherwise it's a user id.
if target.startswith("+"):
recipient = {"phone_number": target}
else:
recipient = {"id": target}
body = {
"recipient": recipient,
"message": body_message,
"messaging_type": "MESSAGE_TAG",
"tag": "ACCOUNT_UPDATE",
}
resp = requests.post(
BASE_URL,
data=json.dumps(body),
params=payload,
headers={CONTENT_TYPE: CONTENT_TYPE_JSON},
timeout=10,
)
if resp.status_code != HTTP_OK:
log_error(resp)
def log_error(response):
"""Log error message."""
obj = response.json()
error_message = obj["error"]["message"]
error_code = obj["error"]["code"]
_LOGGER.error(
"Error %s : %s (Code %s)", response.status_code, error_message, error_code
)
|
from unittest import TestCase
import pandas as pd
from scattertext import CorpusFromPandas, CSRMatrixFactory, IndexStore
from scattertext import whitespace_nlp
from scattertext.CategoryColorAssigner import CategoryColorAssigner
from scattertext.test.test_corpusFromPandas import get_docs_categories
class TestCategoryColorAssigner(TestCase):
def test_main(self):
categories, documents = get_docs_categories()
df = pd.DataFrame({'category': categories, 'text': documents})
corpus = CorpusFromPandas(df, 'category', 'text', nlp=whitespace_nlp).build()
self.assertEqual(CategoryColorAssigner(corpus).get_category_colors().to_dict(),
{'???': [255, 127, 14],
'hamlet': [174, 199, 232],
'jay-z/r. kelly': [31, 119, 180]})
term_colors = CategoryColorAssigner(corpus).get_term_colors()
self.assertEqual(term_colors['this time'], 'aec7e8')
self.assertEqual(term_colors['sire'], '1f77b4')
self.assertEqual(len(term_colors), corpus.get_num_terms())
mfact = CSRMatrixFactory()
mis = IndexStore()
for i, c in enumerate(df['category']):
mfact[i, mis.getidx(c)] = 1
corpus = corpus.add_metadata(mfact.get_csr_matrix(), mis)
meta_colors = CategoryColorAssigner(corpus, use_non_text_features=True).get_term_colors()
self.assertEqual(meta_colors, {'hamlet': 'aec7e8', 'jay-z/r. kelly': '1f77b4', '???': 'ff7f0e'})
self.assertNotEqual(CategoryColorAssigner(corpus).get_term_colors(), meta_colors)
|
from PyQt5.QtCore import pyqtSignal, pyqtSlot, Qt, QSize
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtWidgets import QSizePolicy, QWidget
from qutebrowser.keyinput import modeman, modeparsers
from qutebrowser.api import cmdutils
from qutebrowser.misc import cmdhistory, editor
from qutebrowser.misc import miscwidgets as misc
from qutebrowser.utils import usertypes, log, objreg, message, utils
from qutebrowser.config import config
class Command(misc.MinimalLineEditMixin, misc.CommandLineEdit):
"""The commandline part of the statusbar.
Attributes:
_win_id: The window ID this widget is associated with.
Signals:
got_cmd: Emitted when a command is triggered by the user.
arg: The command string and also potentially the count.
got_search: Emitted when a search should happen.
clear_completion_selection: Emitted before the completion widget is
hidden.
hide_completion: Emitted when the completion widget should be hidden.
update_completion: Emitted when the completion should be shown/updated.
show_cmd: Emitted when command input should be shown.
hide_cmd: Emitted when command input can be hidden.
"""
got_cmd = pyqtSignal([str], [str, int])
got_search = pyqtSignal(str, bool) # text, reverse
clear_completion_selection = pyqtSignal()
hide_completion = pyqtSignal()
update_completion = pyqtSignal()
show_cmd = pyqtSignal()
hide_cmd = pyqtSignal()
def __init__(self, *, win_id: int,
private: bool,
parent: QWidget = None) -> None:
misc.CommandLineEdit.__init__(self, parent=parent)
misc.MinimalLineEditMixin.__init__(self)
self._win_id = win_id
if not private:
command_history = objreg.get('command-history')
self.history.history = command_history.data
self.history.changed.connect(command_history.changed)
self.setSizePolicy(QSizePolicy.MinimumExpanding, QSizePolicy.Ignored)
self.cursorPositionChanged.connect(self.update_completion)
self.textChanged.connect(self.update_completion)
self.textChanged.connect(self.updateGeometry)
self.textChanged.connect(self._incremental_search)
def _handle_search(self) -> bool:
"""Check if the currently entered text is a search, and if so, run it.
Return:
True if a search was executed, False otherwise.
"""
if self.prefix() == '/':
self.got_search.emit(self.text()[1:], False)
return True
elif self.prefix() == '?':
self.got_search.emit(self.text()[1:], True)
return True
else:
return False
def prefix(self) -> str:
"""Get the currently entered command prefix."""
text = self.text()
if not text:
return ''
elif text[0] in modeparsers.STARTCHARS:
return text[0]
else:
return ''
def set_cmd_text(self, text: str) -> None:
"""Preset the statusbar to some text.
Args:
text: The text to set as string.
"""
self.setText(text)
log.modes.debug("Setting command text, focusing {!r}".format(self))
modeman.enter(self._win_id, usertypes.KeyMode.command, 'cmd focus')
self.setFocus()
self.show_cmd.emit()
@cmdutils.register(instance='status-command', name='set-cmd-text',
scope='window', maxsplit=0)
@cmdutils.argument('count', value=cmdutils.Value.count)
def set_cmd_text_command(self, text: str,
count: int = None,
space: bool = False,
append: bool = False,
run_on_count: bool = False) -> None:
"""Preset the statusbar to some text.
//
Wrapper for set_cmd_text to check the arguments and allow multiple
strings which will get joined.
Args:
text: The commandline to set.
count: The count if given.
space: If given, a space is added to the end.
append: If given, the text is appended to the current text.
run_on_count: If given with a count, the command is run with the
given count rather than setting the command text.
"""
if space:
text += ' '
if append:
if not self.text():
raise cmdutils.CommandError("No current text!")
text = self.text() + text
if not text or text[0] not in modeparsers.STARTCHARS:
raise cmdutils.CommandError(
"Invalid command text '{}'.".format(text))
if run_on_count and count is not None:
self.got_cmd[str, int].emit(text, count)
else:
self.set_cmd_text(text)
@cmdutils.register(instance='status-command',
modes=[usertypes.KeyMode.command], scope='window')
def command_history_prev(self) -> None:
"""Go back in the commandline history."""
try:
if not self.history.is_browsing():
item = self.history.start(self.text().strip())
else:
item = self.history.previtem()
except (cmdhistory.HistoryEmptyError,
cmdhistory.HistoryEndReachedError):
return
if item:
self.set_cmd_text(item)
@cmdutils.register(instance='status-command',
modes=[usertypes.KeyMode.command], scope='window')
def command_history_next(self) -> None:
"""Go forward in the commandline history."""
if not self.history.is_browsing():
return
try:
item = self.history.nextitem()
except cmdhistory.HistoryEndReachedError:
return
if item:
self.set_cmd_text(item)
@cmdutils.register(instance='status-command',
modes=[usertypes.KeyMode.command], scope='window')
def command_accept(self, rapid: bool = False) -> None:
"""Execute the command currently in the commandline.
Args:
rapid: Run the command without closing or clearing the command bar.
"""
text = self.text()
self.history.append(text)
was_search = self._handle_search()
if not rapid:
modeman.leave(self._win_id, usertypes.KeyMode.command,
'cmd accept')
if not was_search:
self.got_cmd[str].emit(text[1:])
@cmdutils.register(instance='status-command', scope='window')
def edit_command(self, run: bool = False) -> None:
"""Open an editor to modify the current command.
Args:
run: Run the command if the editor exits successfully.
"""
ed = editor.ExternalEditor(parent=self)
def callback(text: str) -> None:
"""Set the commandline to the edited text."""
if not text or text[0] not in modeparsers.STARTCHARS:
message.error('command must start with one of {}'
.format(modeparsers.STARTCHARS))
return
self.set_cmd_text(text)
if run:
self.command_accept()
ed.file_updated.connect(callback)
ed.edit(self.text())
@pyqtSlot(usertypes.KeyMode)
def on_mode_left(self, mode: usertypes.KeyMode) -> None:
"""Clear up when command mode was left.
- Clear the statusbar text if it's explicitly unfocused.
- Clear completion selection
- Hide completion
Args:
mode: The mode which was left.
"""
if mode == usertypes.KeyMode.command:
self.setText('')
self.history.stop()
self.hide_cmd.emit()
self.clear_completion_selection.emit()
self.hide_completion.emit()
def setText(self, text: str) -> None:
"""Extend setText to set prefix and make sure the prompt is ok."""
if not text:
pass
elif text[0] in modeparsers.STARTCHARS:
super().set_prompt(text[0])
else:
raise utils.Unreachable("setText got called with invalid text "
"'{}'!".format(text))
super().setText(text)
def keyPressEvent(self, e: QKeyEvent) -> None:
"""Override keyPressEvent to ignore Return key presses.
If this widget is focused, we are in passthrough key mode, and
Enter/Shift+Enter/etc. will cause QLineEdit to think it's finished
without command_accept to be called.
"""
text = self.text()
if text in modeparsers.STARTCHARS and e.key() == Qt.Key_Backspace:
e.accept()
modeman.leave(self._win_id, usertypes.KeyMode.command,
'prefix deleted')
return
if e.key() == Qt.Key_Return:
e.ignore()
return
else:
super().keyPressEvent(e)
def sizeHint(self) -> QSize:
"""Dynamically calculate the needed size."""
height = super().sizeHint().height()
text = self.text()
if not text:
text = 'x'
width = self.fontMetrics().width(text)
return QSize(width, height)
@pyqtSlot()
def _incremental_search(self) -> None:
if not config.val.search.incremental:
return
self._handle_search()
|
from sqlalchemy import Column, Integer, String
from sqlalchemy_utils import JSONType
from lemur.database import db
from lemur.plugins.base import plugins
class Authorization(db.Model):
__tablename__ = "pending_dns_authorizations"
id = Column(Integer, primary_key=True, autoincrement=True)
account_number = Column(String(128))
domains = Column(JSONType)
dns_provider_type = Column(String(128))
options = Column(JSONType)
@property
def plugin(self):
return plugins.get(self.plugin_name)
def __repr__(self):
return "Authorization(id={id})".format(id=self.id)
def __init__(self, account_number, domains, dns_provider_type, options=None):
self.account_number = account_number
self.domains = domains
self.dns_provider_type = dns_provider_type
self.options = options
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import call, patch
from diamond.collector import Collector
from solr import SolrCollector
##########################################################################
class TestSolrCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('SolrCollector', {})
self.collector = SolrCollector(config, None)
def test_import(self):
self.assertTrue(SolrCollector)
@patch('urllib2.urlopen')
@patch.object(Collector, 'publish')
def test_should_work_with_real_data(self, publish_mock, urlopen_mock):
returns = [self.getFixture('cores'),
self.getFixture('ping'),
self.getFixture('stats'),
self.getFixture('system')]
urlopen_mock.side_effect = lambda *args: returns.pop(0)
self.collector.collect()
metrics = {
'response.QueryTime': 5,
'response.Status': 0,
"core.maxDoc": 321,
"core.numDocs": 184,
"core.warmupTime": 0,
"queryhandler.standard.requests": 3,
"queryhandler.standard.errors": 0,
"queryhandler.standard.timeouts": 0,
"queryhandler.standard.totalTime": 270,
"queryhandler.standard.avgTimePerRequest": 90,
"queryhandler.standard.avgRequestsPerSecond": 0.00016776958,
"queryhandler.update.requests": 0,
"queryhandler.update.errors": 0,
"queryhandler.update.timeouts": 0,
"queryhandler.update.totalTime": 0,
"queryhandler.update.avgRequestsPerSecond": 0,
"updatehandler.commits": 0,
"updatehandler.autocommits": 0,
"updatehandler.optimizes": 0,
"updatehandler.rollbacks": 0,
"updatehandler.docsPending": 0,
"updatehandler.adds": 0,
"updatehandler.errors": 0,
"updatehandler.cumulative_adds": 0,
"updatehandler.cumulative_errors": 0,
'cache.fieldValueCache.lookups': 0,
'cache.fieldValueCache.hits': 0,
'cache.fieldValueCache.hitratio': 0.0,
'cache.fieldValueCache.inserts': 0,
'cache.fieldValueCache.evictions': 0,
'cache.fieldValueCache.size': 0,
'cache.fieldValueCache.warmupTime': 0,
'cache.fieldValueCache.cumulative_lookups': 0,
'cache.fieldValueCache.cumulative_hits': 0,
'cache.fieldValueCache.cumulative_hitratio': 0.0,
'cache.fieldValueCache.cumulative_inserts': 0,
'cache.fieldValueCache.cumulative_evictions': 0,
'cache.filterCache.lookups': 0,
'cache.filterCache.hits': 0,
'cache.filterCache.hitratio': 0.0,
'cache.filterCache.inserts': 0,
'cache.filterCache.evictions': 0,
'cache.filterCache.size': 0,
'cache.filterCache.warmupTime': 0,
'cache.filterCache.cumulative_lookups': 0,
'cache.filterCache.cumulative_hits': 0,
'cache.filterCache.cumulative_hitratio': 0.0,
'cache.filterCache.cumulative_inserts': 0,
'cache.filterCache.cumulative_evictions': 0,
'cache.documentCache.lookups': 0,
'cache.documentCache.hits': 0,
'cache.documentCache.hitratio': 0.0,
'cache.documentCache.inserts': 0,
'cache.documentCache.evictions': 0,
'cache.documentCache.size': 0,
'cache.documentCache.warmupTime': 0,
'cache.documentCache.cumulative_lookups': 0,
'cache.documentCache.cumulative_hits': 0,
'cache.documentCache.cumulative_hitratio': 0.0,
'cache.documentCache.cumulative_inserts': 0,
'cache.documentCache.cumulative_evictions': 0,
'cache.queryResultCache.lookups': 3,
'cache.queryResultCache.hits': 2,
'cache.queryResultCache.hitratio': 0.66,
'cache.queryResultCache.inserts': 1,
'cache.queryResultCache.evictions': 0,
'cache.queryResultCache.size': 1,
'cache.queryResultCache.warmupTime': 0,
'cache.queryResultCache.cumulative_lookups': 3,
'cache.queryResultCache.cumulative_hits': 2,
'cache.queryResultCache.cumulative_hitratio': 0.66,
'cache.queryResultCache.cumulative_inserts': 1,
'cache.queryResultCache.cumulative_evictions': 0,
'jvm.mem.free': 42.7,
'jvm.mem.total': 61.9,
'jvm.mem.max': 185.6,
'jvm.mem.used': 19.2,
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics)
self.assertPublishedMany(publish_mock, metrics)
urlopen_mock.assert_has_calls([
call(
'http://localhost:8983/solr/admin/cores?action=STATUS&wt=json'),
call('http://localhost:8983/solr/admin/ping?wt=json'),
call('http://localhost:8983/solr/admin/mbeans?stats=true&wt=json'),
call('http://localhost:8983/solr/admin/system?stats=true&wt=json')
])
@patch('urllib2.urlopen')
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock, urlopen_mock):
urlopen_mock.return_value = self.getFixture('stats_blank')
self.collector.collect()
self.assertPublishedMany(publish_mock, {})
urlopen_mock.assert_called_once_with(
'http://localhost:8983/solr/admin/cores?action=STATUS&wt=json')
##########################################################################
if __name__ == "__main__":
unittest.main()
|
import os
import re
from collections import defaultdict
from io import StringIO
from unittest import mock
import dateutil.tz
import pytest
from lxml import etree
from nikola.nikola import Nikola, Post
from nikola.utils import LocaleBorg, TranslatableSetting
def test_feed_is_valid(rss_feed_content, rss_schema):
"""
A testcase to check if the generated feed is valid.
Validation can be tested with W3 FEED Validator that can be found
at http://feedvalidator.org
"""
document = etree.parse(StringIO(rss_feed_content))
assert rss_schema.validate(document)
@pytest.fixture
def rss_schema(rss_schema_filename):
with open(rss_schema_filename, "r") as rss_schema_file:
xmlschema_doc = etree.parse(rss_schema_file)
return etree.XMLSchema(xmlschema_doc)
@pytest.fixture
def rss_schema_filename(test_dir):
return os.path.join(test_dir, "data", "rss-2_0.xsd")
@pytest.mark.parametrize("element", ["guid", "link"])
def test_feed_items_have_valid_URLs(rss_feed_content, blog_url, element):
"""
The items in the feed need to have valid urls in link and guid.
As stated by W3 FEED Validator:
* "link must be a full and valid URL"
* "guid must be a full URL, unless isPermaLink attribute is false: /weblog/posts/the-minimal-server.html"
"""
# This validation regex is taken from django.core.validators
url_validation_regex = re.compile(
r"^(?:http|ftp)s?://" # http:// or https://
# domain...
r"(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|"
r"localhost|" # localhost...
# ...or ipv4
r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|"
# ...or ipv6
r"\[?[A-F0-9]*:[A-F0-9:]+\]?)" r"(?::\d+)?" r"(?:/?|[/?]\S+)$", # optional port
re.IGNORECASE,
)
def is_valid_URL(url):
return url_validation_regex.match(url) is not None
et = etree.parse(StringIO(rss_feed_content))
channel = et.find("channel")
item = channel.find("item")
element = item.find(element)
assert is_valid_URL(element.text)
assert blog_url in element.text
@pytest.fixture(autouse=True)
def localeborg(default_locale):
"""
LocaleBorg with default settings
"""
LocaleBorg.reset()
LocaleBorg.initialize({}, default_locale)
try:
yield
finally:
LocaleBorg.reset()
@pytest.fixture
def rss_feed_content(blog_url, config, default_locale):
default_post = {
"title": "post title",
"slug": "awesome_article",
"date": "2012-10-01 22:41",
"author": None,
"tags": "tags",
"link": "link",
"description": "description",
"enclosure": "http://www.example.org/foo.mp3",
"enclosure_length": "5",
}
meta_mock = mock.Mock(return_value=(defaultdict(str, default_post), None))
with mock.patch("nikola.post.get_meta", meta_mock):
with \
mock.patch(
"nikola.nikola.utils.os.path.isdir", mock.Mock(return_value=True)), \
mock.patch(
"nikola.nikola.Post.text", mock.Mock(return_value="some long text")
):
with mock.patch(
"nikola.post.os.path.isfile", mock.Mock(return_value=True)):
example_post = Post(
"source.file",
config,
"blog_folder",
True,
{"en": ""},
"post.tmpl",
FakeCompiler(),
)
filename = "testfeed.rss"
opener_mock = mock.mock_open()
with mock.patch("nikola.nikola.io.open", opener_mock, create=True):
Nikola().generic_rss_renderer(
default_locale,
"blog_title",
blog_url,
"blog_description",
[example_post, ],
filename,
True,
False,
)
opener_mock.assert_called_once_with(filename, "w+", encoding="utf-8")
# Python 3 / unicode strings workaround
# lxml will complain if the encoding is specified in the
# xml when running with unicode strings.
# We do not include this in our content.
file_content = [call[1][0] for call in opener_mock.mock_calls[2:-1]][0]
splitted_content = file_content.split("\n")
# encoding_declaration = splitted_content[0]
content_without_encoding_declaration = splitted_content[1:]
yield "\n".join(content_without_encoding_declaration)
@pytest.fixture
def config(blog_url, default_locale):
fake_conf = defaultdict(str)
fake_conf["TIMEZONE"] = "UTC"
fake_conf["__tzinfo__"] = dateutil.tz.tzutc()
fake_conf["DEFAULT_LANG"] = default_locale
fake_conf["TRANSLATIONS"] = {default_locale: ""}
fake_conf["BASE_URL"] = blog_url
fake_conf["BLOG_AUTHOR"] = TranslatableSetting(
"BLOG_AUTHOR", "Nikola Tesla", [default_locale]
)
fake_conf["TRANSLATIONS_PATTERN"] = "{path}.{lang}.{ext}"
return fake_conf
@pytest.fixture
def blog_url():
return "http://some.blog"
class FakeCompiler:
demote_headers = False
compile = None
def extension(self):
return ".html"
def read_metadata(*args, **kwargs):
return {}
def register_extra_dependencies(self, post):
pass
|
try:
import cPickle as pickle
except ImportError:
import pickle
import pytest
import six
from mock import patch, MagicMock, sentinel, create_autospec, Mock, call
from pymongo.errors import OperationFailure, AutoReconnect
from pymongo.mongo_client import MongoClient
from arctic.arctic import Arctic, ArcticLibraryBinding, \
register_library_type, LIBRARY_TYPES
from arctic.auth import Credential
from arctic.exceptions import LibraryNotFoundException, \
ArcticException, QuotaExceededException
from arctic._cache import Cache
def test_arctic_lazy_init():
with patch('pymongo.MongoClient', return_value=MagicMock(), autospec=True) as mc, \
patch('arctic.arctic.mongo_retry', side_effect=lambda x: x, autospec=True), \
patch('arctic._cache.Cache._is_not_expired', return_value=True), \
patch('arctic.arctic.get_auth', autospec=True) as ga:
store = Arctic('cluster')
assert not mc.called
# do something to trigger lazy arctic init
store.list_libraries()
assert mc.called
def test_arctic_lazy_init_ssl_true():
with patch('pymongo.MongoClient', return_value=MagicMock(), autospec=True) as mc, \
patch('arctic.arctic.mongo_retry', side_effect=lambda x: x, autospec=True), \
patch('arctic._cache.Cache._is_not_expired', return_value=True), \
patch('arctic.arctic.get_auth', autospec=True) as ga:
store = Arctic('cluster', ssl=True)
assert not mc.called
# do something to trigger lazy arctic init
store.list_libraries()
assert mc.called
assert len(mc.mock_calls) == 1
assert mc.mock_calls[0] == call(connectTimeoutMS=2000,
host='cluster',
maxPoolSize=4,
serverSelectionTimeoutMS=30000,
socketTimeoutMS=600000,
ssl=True)
def test_connection_passed_warning_raised():
with patch('pymongo.MongoClient', return_value=MagicMock(), autospec=True), \
patch('arctic.arctic.mongo_retry', side_effect=lambda x: x, autospec=True), \
patch('arctic._cache.Cache._is_not_expired', return_value=True), \
patch('arctic.arctic.get_auth', autospec=True), \
patch('arctic.arctic.logger') as lg:
magic_mock = MagicMock(nodes={("host", "port")})
store = Arctic(magic_mock, ssl=True)
# Increment _pid to simulate forking the process
store._pid += 1
_ = store._conn
assert lg.mock_calls[0] == call.warn('Forking process. Arctic was passed a pymongo connection during init, '
'the new pymongo connection may have different parameters.')
def test_arctic_auth():
with patch('pymongo.MongoClient', return_value=MagicMock(), autospec=True), \
patch('arctic.arctic.mongo_retry', autospec=True), \
patch('arctic._cache.Cache._is_not_expired', return_value=True), \
patch('arctic.arctic.get_auth', autospec=True) as ga:
ga.return_value = Credential('db', 'admin_user', 'admin_pass')
store = Arctic('cluster')
# do something to trigger lazy arctic init
store.list_libraries()
ga.assert_called_once_with('cluster', 'arctic', 'admin')
store._adminDB.authenticate.assert_called_once_with('admin_user', 'admin_pass')
ga.reset_mock()
# Get a 'missing' library
with pytest.raises(LibraryNotFoundException):
with patch('arctic.arctic.ArcticLibraryBinding.get_library_type', return_value=None, autospec=True):
ga.return_value = Credential('db', 'user', 'pass')
store._conn['arctic_jblackburn'].name = 'arctic_jblackburn'
store['jblackburn.library']
# Creating the library will have attempted to auth against it
ga.assert_called_once_with('cluster', 'arctic', 'arctic_jblackburn')
store._conn['arctic_jblackburn'].authenticate.assert_called_once_with('user', 'pass')
def test_arctic_auth_custom_app_name():
with patch('pymongo.MongoClient', return_value=MagicMock(), autospec=True), \
patch('arctic.arctic.mongo_retry', autospec=True), \
patch('arctic._cache.Cache._is_not_expired', return_value=True), \
patch('arctic.arctic.get_auth', autospec=True) as ga:
ga.return_value = Credential('db', 'admin_user', 'admin_pass')
store = Arctic('cluster', app_name=sentinel.app_name)
# do something to trigger lazy arctic init
store.list_libraries()
assert ga.call_args_list == [call('cluster', sentinel.app_name, 'admin')]
ga.reset_mock()
# Get a 'missing' library
with pytest.raises(LibraryNotFoundException):
with patch('arctic.arctic.ArcticLibraryBinding.get_library_type', return_value=None, autospec=True):
ga.return_value = Credential('db', 'user', 'pass')
store._conn['arctic_jblackburn'].name = 'arctic_jblackburn'
store['jblackburn.library']
# Creating the library will have attempted to auth against it
assert ga.call_args_list == [call('cluster', sentinel.app_name, 'arctic_jblackburn')]
def test_arctic_connect_hostname():
with patch('pymongo.MongoClient', return_value=MagicMock(), autospec=True) as mc, \
patch('arctic.arctic.mongo_retry', autospec=True) as ar, \
patch('arctic._cache.Cache._is_not_expired', return_value=True), \
patch('arctic.arctic.get_mongodb_uri', autospec=True) as gmu:
store = Arctic('hostname', socketTimeoutMS=sentinel.socket_timeout,
connectTimeoutMS=sentinel.connect_timeout,
serverSelectionTimeoutMS=sentinel.select_timeout)
# do something to trigger lazy arctic init
store.list_libraries()
mc.assert_called_once_with(host=gmu('hostname'), maxPoolSize=4,
socketTimeoutMS=sentinel.socket_timeout,
connectTimeoutMS=sentinel.connect_timeout,
serverSelectionTimeoutMS=sentinel.select_timeout)
def test_arctic_connect_with_environment_name():
with patch('pymongo.MongoClient', return_value=MagicMock(), autospec=True) as mc, \
patch('arctic.arctic.mongo_retry', autospec=True) as ar, \
patch('arctic.arctic.get_auth', autospec=True), \
patch('arctic._cache.Cache._is_not_expired', return_value=True), \
patch('arctic.arctic.get_mongodb_uri') as gmfe:
store = Arctic('live', socketTimeoutMS=sentinel.socket_timeout,
connectTimeoutMS=sentinel.connect_timeout,
serverSelectionTimeoutMS=sentinel.select_timeout)
# do something to trigger lazy arctic init
store.list_libraries()
assert gmfe.call_args_list == [call('live')]
assert mc.call_args_list == [call(host=gmfe.return_value, maxPoolSize=4,
socketTimeoutMS=sentinel.socket_timeout,
connectTimeoutMS=sentinel.connect_timeout,
serverSelectionTimeoutMS=sentinel.select_timeout)]
@pytest.mark.parametrize(
["library", "expected_library", "expected_database"], [
('library', 'library', 'arctic'),
('user.library', 'library', 'arctic_user'),
])
def test_database_library_specifier(library, expected_library, expected_database):
mongo = MagicMock()
with patch('arctic.arctic.ArcticLibraryBinding._auth'):
ml = ArcticLibraryBinding(mongo, library)
assert ml.library == expected_library
mongo._conn.__getitem__.assert_called_with(expected_database)
def test_arctic_repr():
with patch('pymongo.MongoClient', return_value=MagicMock(), autospec=True):
with patch('arctic.arctic.mongo_retry', autospec=True):
with patch('arctic.arctic.get_auth', autospec=True) as ga:
ga.return_value = Credential('db', 'admin_user', 'admin_pass')
store = Arctic('cluster')
assert str(store) == repr(store)
def test_lib_repr():
mongo = MagicMock()
with patch('arctic.arctic.ArcticLibraryBinding._auth'):
ml = ArcticLibraryBinding(mongo, 'asdf')
assert str(ml) == repr(ml)
def test_register_library_type():
class DummyType(object):
pass
register_library_type("new_dummy_type", DummyType)
assert LIBRARY_TYPES['new_dummy_type'] == DummyType
with pytest.raises(ArcticException) as e:
register_library_type("new_dummy_type", DummyType)
assert "Library new_dummy_type already registered" in str(e.value)
def test_set_quota():
m = Mock(spec=ArcticLibraryBinding)
ArcticLibraryBinding.set_quota(m, 10000)
m.set_library_metadata.assert_called_once_with('QUOTA', 10000)
assert m.quota_countdown == 0
assert m.quota == 10000
def test_get_quota():
m = Mock(spec=ArcticLibraryBinding)
m.get_library_metadata.return_value = 42
assert ArcticLibraryBinding.get_quota(m) == 42
m.get_library_metadata.assert_called_once_with('QUOTA')
def test_check_quota_Zero():
self = create_autospec(ArcticLibraryBinding)
self.get_library_metadata.return_value = 0
self.quota_countdown = 0
ArcticLibraryBinding.check_quota(self)
def test_check_quota_None():
m = Mock(spec=ArcticLibraryBinding)
m.quota = None
m.quota_countdown = 0
m.get_library_metadata.return_value = None
ArcticLibraryBinding.check_quota(m)
m.get_library_metadata.assert_called_once_with('QUOTA')
assert m.quota == 0
def test_check_quota_Zero2():
m = Mock(spec=ArcticLibraryBinding)
m.quota = None
m.quota_countdown = 0
m.get_library_metadata.return_value = 0
ArcticLibraryBinding.check_quota(m)
m.get_library_metadata.assert_called_once_with('QUOTA')
assert m.quota == 0
def test_check_quota_countdown():
self = create_autospec(ArcticLibraryBinding)
self.get_library_metadata.return_value = 10
self.quota_countdown = 10
ArcticLibraryBinding.check_quota(self)
assert self.quota_countdown == 9
def test_check_quota():
self = create_autospec(ArcticLibraryBinding, database_name='arctic_db',
library='lib')
self.arctic = create_autospec(Arctic)
self.get_library_metadata.return_value = 1024 * 1024 * 1024
self.quota_countdown = 0
self.arctic.__getitem__.return_value = Mock(stats=Mock(return_value={'totals':
{'size': 900 * 1024 * 1024,
'count': 100,
}
}))
with patch('arctic.arctic.logger.warning') as warn:
ArcticLibraryBinding.check_quota(self)
self.arctic.__getitem__.assert_called_once_with(self.get_name.return_value)
warn.assert_called_once_with('Mongo Quota: arctic_db.lib 0.879 / 1 GB used')
assert self.quota_countdown == 6
def test_check_quota_90_percent():
self = create_autospec(ArcticLibraryBinding, database_name='arctic_db',
library='lib')
self.arctic = create_autospec(Arctic)
self.get_library_metadata.return_value = 1024 * 1024 * 1024
self.quota_countdown = 0
self.arctic.__getitem__.return_value = Mock(stats=Mock(return_value={'totals':
{'size': 0.91 * 1024 * 1024 * 1024,
'count': 1000000,
}
}))
with patch('arctic.arctic.logger.warning') as warn:
ArcticLibraryBinding.check_quota(self)
self.arctic.__getitem__.assert_called_once_with(self.get_name.return_value)
warn.assert_called_once_with('Mongo Quota: arctic_db.lib 0.910 / 1 GB used')
def test_check_quota_info():
self = create_autospec(ArcticLibraryBinding, database_name='arctic_db',
library='lib')
self.arctic = create_autospec(Arctic)
self.get_library_metadata.return_value = 1024 * 1024 * 1024
self.quota_countdown = 0
self.arctic.__getitem__.return_value = Mock(stats=Mock(return_value={'totals':
{'size': 1 * 1024 * 1024,
'count': 100,
}
}))
with patch('arctic.arctic.logger.info') as info:
ArcticLibraryBinding.check_quota(self)
self.arctic.__getitem__.assert_called_once_with(self.get_name.return_value)
info.assert_called_once_with('Mongo Quota: arctic_db.lib 0.001 / 1 GB used')
assert self.quota_countdown == 51153
def test_check_quota_exceeded():
self = create_autospec(ArcticLibraryBinding, database_name='arctic_db',
library='lib')
self.arctic = create_autospec(Arctic)
self.get_library_metadata.return_value = 1024 * 1024 * 1024
self.quota_countdown = 0
self.arctic.__getitem__.return_value = Mock(stats=Mock(return_value={'totals':
{'size': 1024 * 1024 * 1024,
'count': 100,
}
}))
with pytest.raises(QuotaExceededException) as e:
ArcticLibraryBinding.check_quota(self)
assert "Quota Exceeded: arctic_db.lib 1.000 / 1 GB used" in str(e.value)
def test_initialize_library():
self = create_autospec(Arctic)
self._conn = create_autospec(MongoClient)
self._cache = create_autospec(Cache)
lib = create_autospec(ArcticLibraryBinding)
lib.database_name = sentinel.db_name
lib.get_quota.return_value = None
lib_type = Mock()
with patch.dict('arctic.arctic.LIBRARY_TYPES', {sentinel.lib_type: lib_type}), \
patch('arctic.arctic.ArcticLibraryBinding', return_value=lib, autospec=True) as ML:
Arctic.initialize_library(self, sentinel.lib_name, sentinel.lib_type, thing=sentinel.thing)
assert ML.call_args_list == [call(self, sentinel.lib_name)]
assert ML.return_value.set_library_type.call_args_list == [call(sentinel.lib_type)]
assert ML.return_value.set_quota.call_args_list == [call(10 * 1024 * 1024 * 1024)]
assert lib_type.initialize_library.call_args_list == [call(ML.return_value, thing=sentinel.thing)]
def test_initialize_library_too_many_ns():
self = create_autospec(Arctic)
self._conn = create_autospec(MongoClient)
self._cache = create_autospec(Cache)
lib = create_autospec(ArcticLibraryBinding)
lib.database_name = sentinel.db_name
self._conn.__getitem__.return_value.list_collection_names.return_value = [x for x in six.moves.xrange(5001)]
lib_type = Mock()
with pytest.raises(ArcticException) as e:
with patch.dict('arctic.arctic.LIBRARY_TYPES', {sentinel.lib_type: lib_type}), \
patch('arctic.arctic.ArcticLibraryBinding', return_value=lib, autospec=True) as ML:
Arctic.initialize_library(self, sentinel.lib_name, sentinel.lib_type, thing=sentinel.thing)
assert self._conn.__getitem__.call_args_list == [call(sentinel.db_name),
call(sentinel.db_name)]
assert lib_type.initialize_library.call_count == 0
assert 'Too many namespaces 5001, not creating: sentinel.lib_name' in str(e.value)
def test_initialize_library_with_list_coll_names():
self = create_autospec(Arctic)
self._conn = create_autospec(MongoClient)
self._cache = create_autospec(Cache)
lib = create_autospec(ArcticLibraryBinding)
lib.database_name = sentinel.db_name
lib.get_quota.return_value = None
self._conn.__getitem__.return_value.list_collection_names.return_value = [x for x in six.moves.xrange(5001)]
lib_type = Mock()
with patch.dict('arctic.arctic.LIBRARY_TYPES', {sentinel.lib_type: lib_type}), \
patch('arctic.arctic.ArcticLibraryBinding', return_value=lib, autospec=True) as ML:
Arctic.initialize_library(self, sentinel.lib_name, sentinel.lib_type, thing=sentinel.thing, check_library_count=False)
assert ML.call_args_list == [call(self, sentinel.lib_name)]
assert ML.return_value.set_library_type.call_args_list == [call(sentinel.lib_type)]
assert ML.return_value.set_quota.call_args_list == [call(10 * 1024 * 1024 * 1024)]
assert lib_type.initialize_library.call_args_list == [call(ML.return_value, thing=sentinel.thing)]
def test_library_exists():
self = create_autospec(Arctic)
self.get_library.return_value = 'not an exception'
assert Arctic.library_exists(self, 'mylib')
def test_library_doesnt_exist():
self = create_autospec(Arctic)
self.get_library.side_effect = LibraryNotFoundException('not found')
assert not Arctic.library_exists(self, 'mylib')
def test_get_library():
self = create_autospec(Arctic)
self._library_cache = {}
library_type = Mock()
register_library_type(sentinel.lib_type, library_type)
with patch('arctic.arctic.ArcticLibraryBinding', autospec=True) as ML:
ML.return_value.get_library_type.return_value = sentinel.lib_type
library = Arctic.get_library(self, sentinel.lib_name)
del LIBRARY_TYPES[sentinel.lib_type]
assert ML.call_args_list == [call(self, sentinel.lib_name)]
assert library_type.call_args_list == [call(ML.return_value)]
assert library == library_type.return_value
def test_get_library_not_initialized():
self = create_autospec(Arctic,
mongo_host=sentinel.host)
self._library_cache = {}
with pytest.raises(LibraryNotFoundException) as e, \
patch('arctic.arctic.ArcticLibraryBinding', autospec=True) as ML:
ML.return_value.get_library_type.return_value = None
Arctic.get_library(self, sentinel.lib_name)
assert "Library %s was not correctly initialized in %s." % (sentinel.lib_name, self) in str(e.value)
def test_get_library_auth_issue():
self = create_autospec(Arctic, mongo_host=sentinel.host)
self._library_cache = {}
with pytest.raises(LibraryNotFoundException) as e, \
patch('arctic.arctic.ArcticLibraryBinding', autospec=True) as ML:
ML.return_value.get_library_type.side_effect = OperationFailure('database error: not authorized for query on arctic_marketdata.index.ARCTIC')
Arctic.get_library(self, sentinel.lib_name)
assert "Library %s was not correctly initialized in %s." % (sentinel.lib_name, self) in str(e.value)
def test_get_library_not_registered():
self = create_autospec(Arctic)
self._library_cache = {}
with pytest.raises(LibraryNotFoundException) as e, \
patch('arctic.arctic.ArcticLibraryBinding', autospec=True) as ML:
ML.return_value.get_library_type.return_value = sentinel.lib_type
Arctic.get_library(self, sentinel.lib_name)
assert ("Couldn't load LibraryType '%s' for '%s' (has the class been registered?)" %
(sentinel.lib_type, sentinel.lib_name)
)in str(e.value)
def test_mongo_host_get_set():
sentinel.mongo_host = Mock(nodes={("host", "port")})
with patch('arctic._cache.Cache.__init__', autospec=True, return_value=None):
arctic = Arctic(sentinel.mongo_host)
assert arctic.mongo_host == "host:port"
def test_arctic_set_get_state():
sentinel.mongo_host = Mock(nodes={("host", "port")})
with patch('arctic._cache.Cache.__init__', autospec=True, return_value=None):
store = Arctic(sentinel.mongo_host, allow_secondary="allow_secondary", app_name="app_name",
socketTimeoutMS=1234, connectTimeoutMS=2345, serverSelectionTimeoutMS=3456)
buff = pickle.dumps(store)
mnew = pickle.loads(buff)
assert mnew.mongo_host == "host:port"
assert mnew._allow_secondary == "allow_secondary"
assert mnew._application_name == "app_name"
assert mnew._socket_timeout == 1234
assert mnew._connect_timeout == 2345
assert mnew._server_selection_timeout == 3456
def test__conn_auth_issue():
auth_timeout = [0]
a = Arctic("host:12345")
sentinel.creds = Mock()
def flaky_auth(*args, **kwargs):
if not auth_timeout[0]:
auth_timeout[0] = 1
raise AutoReconnect()
with patch('arctic.arctic.authenticate', flaky_auth), \
patch('arctic.arctic.get_auth', return_value=sentinel.creds), \
patch('arctic._cache.Cache.__init__', autospec=True, return_value=None), \
patch('arctic.decorators._handle_error') as he:
a._conn
assert he.call_count == 1
assert auth_timeout[0]
def test_reset():
c = MagicMock()
with patch('pymongo.MongoClient', return_value=c, autospec=True) as mc, \
patch('arctic._cache.Cache._is_not_expired', return_value=True):
store = Arctic('hostname')
# do something to trigger lazy arctic init
store.list_libraries()
store.reset()
# Doesn't matter how many times we call it:
store.reset()
c.close.assert_called_once()
def test_ArcticLibraryBinding_db():
arctic = create_autospec(Arctic)
arctic._conn = create_autospec(MongoClient)
alb = ArcticLibraryBinding(arctic, "sentinel.library")
with patch.object(alb, '_auth') as _auth:
# connection is cached during __init__
alb._db
assert _auth.call_count == 0
# Change the arctic connection
arctic._conn = create_autospec(MongoClient)
alb._db
assert _auth.call_count == 1
# connection is still cached
alb._db
assert _auth.call_count == 1
|
import argparse
import collections
import contextlib
import difflib
import itertools
import json
import logging
import os
import pprint
import shlex
import shutil
import subprocess
import tempfile
import jinja2
DEFAULT_FLAGS = ('--cloud=GCP', '--machine_type=n1-standard-4',
'--benchmarks=netperf')
# Keys in the sample JSON we expect to vary between runs.
# These will be removed prior to diffing samples.
VARYING_KEYS = 'run_uri', 'sample_uri', 'timestamp', 'value'
# Template name, in same directory as this file.
TEMPLATE = 'side_by_side.html.j2'
# Thresholds for highlighting results
SMALL_CHANGE_THRESHOLD = 5
MEDIUM_CHANGE_THRESHOLD = 10
LARGE_CHANGE_THRESHOLD = 25
PerfKitBenchmarkerResult = collections.namedtuple(
'PerfKitBenchmarkerResult',
['name', 'description', 'sha1', 'samples', 'flags'])
@contextlib.contextmanager
def TempDir(delete=True, **kwargs):
"""Directory equivalent of tempfile.NamedTemporaryFile.
When used as a context manager, yields a temporary directory which by default
is removed when the context manager goes our of scope.
Example usage:
>>> with TempDir(prefix='perfkit') as td:
... shutil.copy('test.txt', td)
Args:
delete: Delete the directory on exit?
**kwargs: Passed to tempfile.mkdtemp.
Yields:
String. Path to the temporary directory.
"""
td = tempfile.mkdtemp(**kwargs)
logging.info('Created %s', td)
try:
yield td
finally:
if delete:
logging.info('Removing %s', td)
shutil.rmtree(td)
def _GitCommandPrefix():
"""Prefix for all git commands.
Returns:
list of strings; 'git' with an appropriate '--git-dir' flag.
"""
git_dir = os.path.join(os.path.dirname(__file__), '..', '..', '.git')
return ['git', '--git-dir', git_dir]
def _GitRevParse(revision):
"""Returns the output of 'git rev-parse' for 'revision'."""
output = subprocess.check_output(_GitCommandPrefix() +
['rev-parse', revision])
return output.rstrip()
def _GitDescribe(revision):
"""Returns the output of 'git describe' for 'revision'."""
output = subprocess.check_output(_GitCommandPrefix() +
['describe', '--always', revision])
return output.rstrip()
@contextlib.contextmanager
def PerfKitBenchmarkerCheckout(revision):
"""Yields a directory with PerfKitBenchmarker checked out to 'revision'."""
archive_cmd = _GitCommandPrefix() + ['archive', revision]
logging.info('Running: %s', archive_cmd)
p_archive = subprocess.Popen(archive_cmd, stdout=subprocess.PIPE)
with TempDir(prefix='pkb-test-') as td:
tar_cmd = ['tar', 'xf', '-']
logging.info('Running %s in %s', tar_cmd, td)
p_tar = subprocess.Popen(tar_cmd, stdin=p_archive.stdout, cwd=td)
archive_status = p_archive.wait()
tar_status = p_tar.wait()
if archive_status:
raise subprocess.CalledProcessError(archive_cmd, archive_status)
if tar_status:
raise subprocess.CalledProcessError(tar_status, tar_cmd)
yield td
def RunPerfKitBenchmarker(revision, flags):
"""Runs perfkitbenchmarker, returning the results as parsed JSON.
Args:
revision: string. git commit identifier. Version of PerfKitBenchmarker to
run.
flags: list of strings. Default arguments to pass to `pkb.py.`
Returns:
List of dicts. Deserialized JSON output of running PerfKitBenchmarker with
`--json_path`.
"""
sha1 = _GitRevParse(revision)
description = _GitDescribe(revision)
with PerfKitBenchmarkerCheckout(revision) as td:
with tempfile.NamedTemporaryFile(suffix='.json') as tf:
flags = flags + ['--json_path=' + tf.name]
cmd = ['./pkb.py'] + flags
logging.info('Running %s in %s', cmd, td)
subprocess.check_call(cmd, cwd=td)
samples = [json.loads(line) for line in tf]
return PerfKitBenchmarkerResult(name=revision, sha1=sha1, flags=flags,
samples=samples, description=description)
def _SplitLabels(labels):
"""Parse the 'labels' key from a PerfKitBenchmarker record.
Labels are recorded in '|key:value|,|key:value|' form.
This function transforms them to a dict.
Args:
labels: string. labels to parse.
Returns:
dict. Parsed 'labels'.
"""
result = {}
for item in labels.strip('|').split('|,|'):
k, v = item.split(':', 1)
result[k] = v
return result
def _CompareSamples(a, b, context=True, numlines=1):
"""Generate an HTML table showing differences between 'a' and 'b'.
Args:
a: dict, as output by PerfKitBenchmarker.
b: dict, as output by PerfKitBenchmarker.
context: boolean. Show context in diff? If False, all lines are output, even
those which are equal.
numlines: int. Passed to difflib.Htmldiff.make_table.
Returns:
string or None. An HTML table, or None if there are no differences.
"""
a = a.copy()
b = b.copy()
a['metadata'] = _SplitLabels(a.pop('labels', ''))
b['metadata'] = _SplitLabels(b.pop('labels', ''))
# Prune the keys in VARYING_KEYS prior to comparison to make the diff more
# informative.
for d in (a, b):
for key in VARYING_KEYS:
d.pop(key, None)
astr = pprint.pformat(a).splitlines()
bstr = pprint.pformat(b).splitlines()
if astr == bstr and context:
return None
differ = difflib.HtmlDiff()
return differ.make_table(astr, bstr, context=context, numlines=numlines)
def _MatchSamples(base_samples, head_samples):
"""Match items from base_samples with items from head_samples.
Rows are matched using 'test', 'metric', and 'unit' fields.
Args:
base_samples: List of dicts.
head_samples: List of dicts.
Returns:
List of pairs, each item of the pair containing either a dict or None.
"""
def ExtractKeys(samples):
return [(i['test'], i['metric'], i['unit']) for i in samples]
base_keys = ExtractKeys(base_samples)
head_keys = ExtractKeys(head_samples)
sm = difflib.SequenceMatcher('', base_keys, head_keys)
result = []
for opcode, base_begin, base_end, head_begin, head_end in sm.get_opcodes():
if opcode == 'equal':
result.extend(zip(base_samples[base_begin:base_end],
head_samples[head_begin:head_end]))
elif opcode == 'replace':
result.extend(zip(base_samples[base_begin:base_end],
[None] * (base_end - base_begin)))
result.extend(zip([None] * (head_end - head_begin),
head_samples[head_begin:head_end]))
elif opcode == 'delete':
result.extend(zip(base_samples[base_begin:base_end],
[None] * (base_end - base_begin)))
elif opcode == 'insert':
result.extend(zip([None] * (head_end - head_begin),
head_samples[head_begin:head_end]))
else:
raise AssertionError('Unknown op: ' + opcode)
return result
def RenderResults(base_result, head_result, template_name=TEMPLATE,
**kwargs):
"""Render the results of a comparison as an HTML page.
Args:
base_result: PerfKitBenchmarkerResult. Result of running against base
revision.
head_result: PerfKitBenchmarkerResult. Result of running against head
revision.
template_name: string. The filename of the template.
kwargs: Additional arguments to Template.render.
Returns:
String. The HTML template.
"""
def _ClassForPercentDifference(percent_diff):
"""Crude highlighting of differences between runs.
Samples varying by >25% are colored red.
Samples varying by 5-25% are colored orange.
Other samples are colored green.
Args:
percent_diff: float. percent difference between values.
"""
if percent_diff < 0:
direction = 'decrease'
else:
direction = 'increase'
percent_diff = abs(percent_diff)
if percent_diff > LARGE_CHANGE_THRESHOLD:
size = 'large'
elif percent_diff > MEDIUM_CHANGE_THRESHOLD:
size = 'medium'
elif percent_diff > SMALL_CHANGE_THRESHOLD:
size = 'small'
else:
return ''
return 'value-{0}-{1}'.format(direction, size)
env = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
undefined=jinja2.StrictUndefined)
env.globals['class_for_percent_diff'] = _ClassForPercentDifference
env.globals['izip_longest'] = itertools.izip_longest
template = env.get_template('side_by_side.html.j2')
matched = _MatchSamples(base_result.samples,
head_result.samples)
# Generate sample diffs
sample_context_diffs = []
sample_diffs = []
for base_sample, head_sample in matched:
if not base_sample or not head_sample:
# Sample inserted or deleted.
continue
sample_context_diffs.append(
_CompareSamples(base_sample, head_sample))
sample_diffs.append(
_CompareSamples(base_sample, head_sample, context=False))
# Generate flag diffs
flag_diffs = difflib.HtmlDiff().make_table(
base_result.flags, head_result.flags, context=False)
# Used for generating a chart with differences.
matched_json = json.dumps(matched)\
.replace(u'<', u'\\u003c') \
.replace(u'>', u'\\u003e') \
.replace(u'&', u'\\u0026') \
.replace(u"'", u'\\u0027')
return template.render(base=base_result,
head=head_result,
matched_samples=matched,
matched_samples_json=matched_json,
sample_diffs=sample_diffs,
sample_context_diffs=sample_context_diffs,
flag_diffs=flag_diffs,
infinity=float('inf'),
**kwargs)
def main():
p = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=__doc__)
p.add_argument('-t', '--title', default='PerfKitBenchmarker Comparison',
help="""HTML report title""")
p.add_argument('--base', default='master', help="""Base revision.""")
p.add_argument('--head', default='dev', help="""Head revision.""")
p.add_argument('--base-flags', default=None, help="""Flags for run against
'--base' revision. Will be combined with --flags.""",
type=shlex.split)
p.add_argument('--head-flags', default=None, help="""Flags for run against
'--head' revision. Will be combined with --flags.""",
type=shlex.split)
p.add_argument('-f', '--flags', type=shlex.split,
help="""Command line flags (Default: {0})""".format(
' '.join(DEFAULT_FLAGS)))
p.add_argument('-p', '--parallel', default=False, action='store_true',
help="""Run concurrently""")
p.add_argument('--rerender', help="""Re-render the HTML report from a JSON
file [for developers].""", action='store_true')
p.add_argument('json_output', help="""JSON output path.""")
p.add_argument('html_output', help="""HTML output path.""")
a = p.parse_args()
if (a.base_flags or a.head_flags):
if not (a.base_flags and a.head_flags):
p.error('--base-flags and --head-flags must be specified together.\n'
'\tbase flags={0}\n\thead flags={1}'.format(
a.base_flags, a.head_flags))
a.base_flags = a.base_flags + (a.flags or [])
a.head_flags = a.head_flags + (a.flags or [])
else:
# Just --flags
assert not a.base_flags, a.base_flags
assert not a.head_flags, a.head_flags
a.base_flags = a.flags or list(DEFAULT_FLAGS)
a.head_flags = a.flags or list(DEFAULT_FLAGS)
if not a.rerender:
if a.parallel:
from concurrent import futures
with futures.ThreadPoolExecutor(max_workers=2) as executor:
base_res_fut = executor.submit(RunPerfKitBenchmarker, a.base,
a.base_flags)
head_res_fut = executor.submit(RunPerfKitBenchmarker, a.head,
a.head_flags)
base_res = base_res_fut.result()
head_res = head_res_fut.result()
else:
base_res = RunPerfKitBenchmarker(a.base, a.base_flags)
head_res = RunPerfKitBenchmarker(a.head, a.head_flags)
logging.info('Base result: %s', base_res)
logging.info('Head result: %s', head_res)
with argparse.FileType('w')(a.json_output) as json_fp:
logging.info('Writing JSON to %s', a.json_output)
json.dump({'head': head_res._asdict(),
'base': base_res._asdict()},
json_fp,
indent=2)
json_fp.write('\n')
else:
logging.info('Loading results from %s', a.json_output)
with argparse.FileType('r')(a.json_output) as json_fp:
d = json.load(json_fp)
base_res = PerfKitBenchmarkerResult(**d['base'])
head_res = PerfKitBenchmarkerResult(**d['head'])
with argparse.FileType('w')(a.html_output) as html_fp:
logging.info('Writing HTML to %s', a.html_output)
html_fp.write(RenderResults(base_result=base_res,
head_result=head_res,
varying_keys=VARYING_KEYS,
title=a.title))
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO)
main()
|
import numpy as np
from ...io import BaseRaw
from ...io.constants import FIFF
from ...utils import _validate_type, warn
from ...io.pick import _picks_to_idx
def optical_density(raw):
r"""Convert NIRS raw data to optical density.
Parameters
----------
raw : instance of Raw
The raw data.
Returns
-------
raw : instance of Raw
The modified raw instance.
"""
raw = raw.copy().load_data()
_validate_type(raw, BaseRaw, 'raw')
picks = _picks_to_idx(raw.info, 'fnirs_cw_amplitude')
data_means = np.mean(raw.get_data(), axis=1)
# The devices measure light intensity. Negative light intensities should
# not occur. If they do it is likely due to hardware or movement issues.
# Set all negative values to abs(x), this also has the benefit of ensuring
# that the means are all greater than zero for the division below.
if np.any(raw._data[picks] <= 0):
warn("Negative intensities encountered. Setting to abs(x)")
raw._data[picks] = np.abs(raw._data[picks])
for ii in picks:
raw._data[ii] /= data_means[ii]
np.log(raw._data[ii], out=raw._data[ii])
raw._data[ii] *= -1
raw.info['chs'][ii]['coil_type'] = FIFF.FIFFV_COIL_FNIRS_OD
return raw
|
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
class DmozSpider(CrawlSpider):
"""Follow categories and extract links."""
name = 'dmoz'
allowed_domains = ['dmoz-odp.org']
start_urls = ['http://www.dmoz-odp.org/']
rules = [
Rule(LinkExtractor(
restrict_css=('.top-cat', '.sub-cat', '.cat-item')
), callback='parse_directory', follow=True),
]
def parse_directory(self, response):
for div in response.css('.title-and-desc'):
yield {
'name': div.css('.site-title::text').extract_first(),
'description': div.css('.site-descr::text').extract_first().strip(),
'link': div.css('a::attr(href)').extract_first(),
}
|
import requests
from kalliope.core import FileManager
from kalliope.core.TTS.TTSModule import TTSModule, FailToLoadSoundFile, MissingTTSParameter
import logging
logging.basicConfig()
logger = logging.getLogger("kalliope")
class Marytts(TTSModule):
def __init__(self, **kwargs):
super(Marytts, self).__init__(**kwargs)
self.host = kwargs.get('host', 'localhost')
self.port = kwargs.get('port', '59125')
self.locale = kwargs.get('locale', None)
self.voice = kwargs.get('voice', None)
self._check_parameters()
def say(self, words):
"""
:param words: The sentence to say
"""
self.generate_and_play(words, self._generate_audio_file)
def _check_parameters(self):
"""
Check parameters are ok, raise MissingTTSParameterException exception otherwise.
:return: true if parameters are ok, raise an exception otherwise
.. raises:: MissingTTSParameterException
"""
if self.locale is None or self.voice is None:
raise MissingTTSParameter("[MaryTTS] Missing parameters, check documentation !")
return True
def _generate_audio_file(self):
"""
Generic method used as a Callback in TTSModule
- must provided the audio file and write it on the disk
.. raises:: FailToLoadSoundFile
"""
# Prepare payload
payload = self.get_payload()
# getting the audio
r = requests.get('http://' + self.host + ':' + self.port + "/process?", payload)
if r.status_code != 200:
raise FailToLoadSoundFile("MaryTTS : Fail while trying to remotely access the audio file")
# OK we get the audio we can write the sound file
FileManager.write_in_file(self.file_path, r.content)
def get_payload(self):
"""
Generic method used load the payload used to access the remote api
:return: Payload to use to access the remote api
"""
return {"INPUT_TEXT":self.words,
"INPUT_TYPE":"TEXT",
"LOCALE": self.locale,
"VOICE":self.voice,
"OUTPUT_TYPE":"AUDIO",
"AUDIO":"WAVE",
}
|
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from files import FilesCollector
###############################################################################
class TestFilesCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('FilesCollector', {
})
self.collector = FilesCollector(config, None)
def test_import(self):
self.assertTrue(FilesCollector)
###############################################################################
if __name__ == "__main__":
unittest.main()
|
SUPPORTED = {
'CURRENCIES': [
'USD', 'GBP', 'EUR'
],
'FEE_MODEL': {
'ZeroFeeModel': 'qstrader.broker.fee_model.zero_fee_model'
}
}
LOGGING = {
'DATE_FORMAT': '%Y-%m-%d %H:%M:%S'
}
PRINT_EVENTS = True
def set_print_events(print_events=True):
global PRINT_EVENTS
PRINT_EVENTS = print_events
|
from datetime import timedelta
import logging
import pyrepetier
import voluptuous as vol
from homeassistant.const import (
CONF_API_KEY,
CONF_HOST,
CONF_MONITORED_CONDITIONS,
CONF_NAME,
CONF_PORT,
CONF_SENSORS,
PERCENTAGE,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.discovery import load_platform
from homeassistant.helpers.dispatcher import dispatcher_send
from homeassistant.helpers.event import track_time_interval
from homeassistant.util import slugify as util_slugify
_LOGGER = logging.getLogger(__name__)
DEFAULT_NAME = "RepetierServer"
DOMAIN = "repetier"
REPETIER_API = "repetier_api"
SCAN_INTERVAL = timedelta(seconds=10)
UPDATE_SIGNAL = "repetier_update_signal"
TEMP_DATA = {"tempset": "temp_set", "tempread": "state", "output": "output"}
API_PRINTER_METHODS = {
"bed_temperature": {
"offline": {"heatedbeds": None, "state": "off"},
"state": {"heatedbeds": "temp_data"},
"temp_data": TEMP_DATA,
"attribute": "heatedbeds",
},
"extruder_temperature": {
"offline": {"extruder": None, "state": "off"},
"state": {"extruder": "temp_data"},
"temp_data": TEMP_DATA,
"attribute": "extruder",
},
"chamber_temperature": {
"offline": {"heatedchambers": None, "state": "off"},
"state": {"heatedchambers": "temp_data"},
"temp_data": TEMP_DATA,
"attribute": "heatedchambers",
},
"current_state": {
"offline": {"state": None},
"state": {
"state": "state",
"activeextruder": "active_extruder",
"hasxhome": "x_homed",
"hasyhome": "y_homed",
"haszhome": "z_homed",
"firmware": "firmware",
"firmwareurl": "firmware_url",
},
},
"current_job": {
"offline": {"job": None, "state": "off"},
"state": {
"done": "state",
"job": "job_name",
"jobid": "job_id",
"totallines": "total_lines",
"linessent": "lines_sent",
"oflayer": "total_layers",
"layer": "current_layer",
"speedmultiply": "feed_rate",
"flowmultiply": "flow",
"x": "x",
"y": "y",
"z": "z",
},
},
"job_end": {
"offline": {"job": None, "state": "off", "start": None, "printtime": None},
"state": {
"job": "job_name",
"start": "start",
"printtime": "print_time",
"printedtimecomp": "from_start",
},
},
"job_start": {
"offline": {
"job": None,
"state": "off",
"start": None,
"printedtimecomp": None,
},
"state": {"job": "job_name", "start": "start", "printedtimecomp": "from_start"},
},
}
def has_all_unique_names(value):
"""Validate that printers have an unique name."""
names = [util_slugify(printer[CONF_NAME]) for printer in value]
vol.Schema(vol.Unique())(names)
return value
SENSOR_TYPES = {
# Type, Unit, Icon, post
"bed_temperature": ["temperature", TEMP_CELSIUS, "mdi:thermometer", "_bed_"],
"extruder_temperature": [
"temperature",
TEMP_CELSIUS,
"mdi:thermometer",
"_extruder_",
],
"chamber_temperature": [
"temperature",
TEMP_CELSIUS,
"mdi:thermometer",
"_chamber_",
],
"current_state": ["state", None, "mdi:printer-3d", ""],
"current_job": ["progress", PERCENTAGE, "mdi:file-percent", "_current_job"],
"job_end": ["progress", None, "mdi:clock-end", "_job_end"],
"job_start": ["progress", None, "mdi:clock-start", "_job_start"],
}
SENSOR_SCHEMA = vol.Schema(
{
vol.Optional(CONF_MONITORED_CONDITIONS, default=list(SENSOR_TYPES)): vol.All(
cv.ensure_list, [vol.In(SENSOR_TYPES)]
),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
}
)
CONFIG_SCHEMA = vol.Schema(
{
DOMAIN: vol.All(
cv.ensure_list,
[
vol.Schema(
{
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_PORT, default=3344): cv.port,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_SENSORS, default={}): SENSOR_SCHEMA,
}
)
],
has_all_unique_names,
)
},
extra=vol.ALLOW_EXTRA,
)
def setup(hass, config):
"""Set up the Repetier Server component."""
hass.data[REPETIER_API] = {}
for repetier in config[DOMAIN]:
_LOGGER.debug("Repetier server config %s", repetier[CONF_HOST])
url = "http://{}".format(repetier[CONF_HOST])
port = repetier[CONF_PORT]
api_key = repetier[CONF_API_KEY]
client = pyrepetier.Repetier(url=url, port=port, apikey=api_key)
printers = client.getprinters()
if not printers:
return False
sensors = repetier[CONF_SENSORS][CONF_MONITORED_CONDITIONS]
api = PrinterAPI(hass, client, printers, sensors, repetier[CONF_NAME], config)
api.update()
track_time_interval(hass, api.update, SCAN_INTERVAL)
hass.data[REPETIER_API][repetier[CONF_NAME]] = api
return True
class PrinterAPI:
"""Handle the printer API."""
def __init__(self, hass, client, printers, sensors, conf_name, config):
"""Set up instance."""
self._hass = hass
self._client = client
self.printers = printers
self.sensors = sensors
self.conf_name = conf_name
self.config = config
self._known_entities = set()
def get_data(self, printer_id, sensor_type, temp_id):
"""Get data from the state cache."""
printer = self.printers[printer_id]
methods = API_PRINTER_METHODS[sensor_type]
for prop, offline in methods["offline"].items():
state = getattr(printer, prop)
if state == offline:
# if state matches offline, sensor is offline
return None
data = {}
for prop, attr in methods["state"].items():
prop_data = getattr(printer, prop)
if attr == "temp_data":
temp_methods = methods["temp_data"]
for temp_prop, temp_attr in temp_methods.items():
data[temp_attr] = getattr(prop_data[temp_id], temp_prop)
else:
data[attr] = prop_data
return data
def update(self, now=None):
"""Update the state cache from the printer API."""
for printer in self.printers:
printer.get_data()
self._load_entities()
dispatcher_send(self._hass, UPDATE_SIGNAL)
def _load_entities(self):
sensor_info = []
for pidx, printer in enumerate(self.printers):
for sensor_type in self.sensors:
info = {}
info["sensor_type"] = sensor_type
info["printer_id"] = pidx
info["name"] = printer.slug
info["printer_name"] = self.conf_name
known = f"{printer.slug}-{sensor_type}"
if known in self._known_entities:
continue
methods = API_PRINTER_METHODS[sensor_type]
if "temp_data" in methods["state"].values():
prop_data = getattr(printer, methods["attribute"])
if prop_data is None:
continue
for idx, _ in enumerate(prop_data):
prop_info = info.copy()
prop_info["temp_id"] = idx
sensor_info.append(prop_info)
else:
info["temp_id"] = None
sensor_info.append(info)
self._known_entities.add(known)
if not sensor_info:
return
load_platform(self._hass, "sensor", DOMAIN, sensor_info, self.config)
|
import re
import numpy as np
import pandas as pd
from scipy import sparse
from scipy.stats import pearsonr
from scattertext.representations import Word2VecFromParsedCorpus
class EmbeddingsResolver:
def __init__(self, corpus):
self.corpus_ = corpus
self.embeddings_ = None
self.word2vec_model_ = None
def set_embeddings(self, embeddings):
'''
Specifies fixed set of embeddings
:param embeddings: array-like, sparse or dense, shape should be (embedding size, # terms)
:return: EmbeddingsResolver
'''
if self.embeddings_ is not None:
raise Exception("You have already set embeddings by running set_embeddings or set_embeddings_model.")
assert embeddings.shape[1] == self.corpus_.get_num_terms()
self.embeddings_ = embeddings.T
self.vocab_ = self.corpus_.get_terms()
return self
def set_embeddings_model(self, model=None, term_acceptance_re=re.compile('[a-z]{3,}')):
'''
:param model: gensim word2vec.Word2Vec model
:param term_acceptance_re : SRE_Pattern, Regular expression to identify
valid terms, default re.compile('[a-z]{3,}')
:return: EmbeddingsResolver
'''
if self.embeddings_ is not None:
raise Exception("You have already set embeddings by running set_embeddings or set_embeddings_model.")
self.word2vec_model_ = model
if term_acceptance_re is not None:
acceptable_terms = set([t for t in self.corpus_.get_terms() if term_acceptance_re.match(t)])
else:
acceptable_terms = set(self.corpus_.get_terms())
model = Word2VecFromParsedCorpus(self.corpus_, model).train()
self.corpus_ = self.corpus_.remove_terms(set(self.corpus_.get_terms()) - acceptable_terms)
weight_list = [model[word] for word in model.wv.vocab]
self.embeddings_ = np.stack(weight_list)
self.vocab_ = model.wv.vocab
return self
def project_embeddings(self, projection_model=None, x_dim=0, y_dim=1):
'''
:param projection_model: sklearn unsupervised model (e.g., PCA) by default the recommended model is umap.UMAP,
which requires UMAP in to be installed
:param x_dim: int, default 0, dimension of transformation matrix for x-axis
:param y_dim: int, default 1, dimension of transformation matrix for y-axis
:return:
'''
axes = self.project(projection_model)
word_axes = (pd.DataFrame({'term': [w for w in self.vocab_],
'x': axes.T[x_dim],
'y': axes.T[y_dim]})
.set_index('term')
.reindex(pd.Series(self.corpus_.get_terms()))
.dropna())
self.corpus_ = self.corpus_.remove_terms(set(self.corpus_.get_terms()) - set(word_axes.index))
word_axes = word_axes.reindex(self.corpus_.get_terms()).dropna()
return self.corpus_, word_axes
'''
def get_svd(self, num_dims, category):
U, s, V = sparse.linalg.svds(self.corpus_._X.astype('d'), k=num_dims)
Y = self.corpus_.get_category_ids() == category
[pearsonr(U.T[i], ) for i in range(num_dims)]
'''
def project(self, projection_model=None):
'''
:param projection_model: sklearn unsupervised model (e.g., PCA) by default the recommended model is umap.UMAP,
which requires UMAP in to be installed
:return: array, shape (num dimension, vocab size)
'''
if self.embeddings_ is None:
raise Exception("Run set_embeddings_model or set_embeddings to get embeddings")
if projection_model is None:
try:
import umap
except:
raise Exception("Please install umap (pip install umap-learn) to use the default projection_model.")
projection_model = umap.UMAP(min_dist=0.5, metric='cosine')
axes = projection_model.fit_transform(self.embeddings_)
return axes
|
import typing
import numpy as np
import matchzoo as mz
from matchzoo.engine.base_task import BaseTask
from matchzoo.engine.base_model import BaseModel
from matchzoo.engine.base_preprocessor import BasePreprocessor
from matchzoo.data_generator import DataGeneratorBuilder
class Preparer(object):
"""
Unified setup processes of all MatchZoo models.
`config` is used to control specific behaviors. The default `config`
will be updated accordingly if a `config` dictionary is passed. e.g. to
override the default `bin_size`, pass `config={'bin_size': 15}`.
See `tutorials/automation.ipynb` for a detailed walkthrough on usage.
Default `config`:
{
# pair generator builder kwargs
'num_dup': 1,
# histogram unit of DRMM
'bin_size': 30,
'hist_mode': 'LCH',
# dynamic Pooling of MatchPyramid
'compress_ratio_left': 1.0,
'compress_ratio_right': 1.0,
# if no `matchzoo.Embedding` is passed to `tune`
'embedding_output_dim': 50
}
:param task: Task.
:param config: Configuration of specific behaviors.
Example:
>>> import matchzoo as mz
>>> task = mz.tasks.Ranking(loss=mz.losses.RankCrossEntropyLoss())
>>> preparer = mz.auto.Preparer(task)
>>> model_class = mz.models.DenseBaseline
>>> train_raw = mz.datasets.toy.load_data('train', 'ranking')
>>> model, prpr, gen_builder, matrix = preparer.prepare(model_class,
... train_raw)
>>> model.params.completed()
True
"""
def __init__(
self,
task: BaseTask,
config: typing.Optional[dict] = None
):
"""Init."""
self._task = task
self._config = self.get_default_config()
if config:
self._config.update(config)
self._infer_num_neg()
def prepare(
self,
model_class: typing.Type[BaseModel],
data_pack: mz.DataPack,
preprocessor: typing.Optional[BasePreprocessor] = None,
embedding: typing.Optional['mz.Embedding'] = None,
) -> typing.Tuple[
BaseModel,
BasePreprocessor,
DataGeneratorBuilder,
np.ndarray
]:
"""
Prepare.
:param model_class: Model class.
:param data_pack: DataPack used to fit the preprocessor.
:param preprocessor: Preprocessor used to fit the `data_pack`.
(default: the default preprocessor of `model_class`)
:param embedding: Embedding to build a embedding matrix. If not set,
then a correctly shaped randomized matrix will be built.
:return: A tuple of `(model, preprocessor, data_generator_builder,
embedding_matrix)`.
"""
if not preprocessor:
preprocessor = model_class.get_default_preprocessor()
if issubclass(model_class, (mz.models.DSSM, mz.models.CDSSM)):
preprocessor.with_word_hashing = False
preprocessor.fit(data_pack, verbose=0)
model, embedding_matrix = self._build_model(
model_class,
preprocessor,
embedding
)
data_gen_builder = self._build_data_gen_builder(
model,
embedding_matrix,
preprocessor
)
return (
model,
preprocessor,
data_gen_builder,
embedding_matrix
)
def _build_model(
self,
model_class,
preprocessor,
embedding
) -> typing.Tuple[BaseModel, np.ndarray]:
model = model_class()
model.params['task'] = self._task
model.params.update(preprocessor.context)
if 'with_embedding' in model.params:
embedding_matrix = self._build_matrix(preprocessor, embedding)
model.params['embedding_input_dim'] = embedding_matrix.shape[0]
model.params['embedding_output_dim'] = embedding_matrix.shape[1]
else:
embedding_matrix = None
self._handle_match_pyramid_dpool_size(model)
self._handle_drmm_input_shapes(model)
assert model.params.completed()
model.build()
model.compile()
if 'with_embedding' in model.params:
model.load_embedding_matrix(embedding_matrix)
return model, embedding_matrix
def _handle_match_pyramid_dpool_size(self, model):
if isinstance(model, mz.models.MatchPyramid):
suggestion = mz.layers.DynamicPoolingLayer.get_size_suggestion(
msize1=model.params['input_shapes'][0][0],
msize2=model.params['input_shapes'][1][0],
psize1=model.params['dpool_size'][0],
psize2=model.params['dpool_size'][1],
)
model.params['dpool_size'] = suggestion
def _handle_drmm_input_shapes(self, model):
if isinstance(model, mz.models.DRMM):
left = model.params['input_shapes'][0]
right = left + (self._config['bin_size'],)
model.params['input_shapes'] = (left, right)
def _build_matrix(self, preprocessor, embedding):
if embedding:
vocab_unit = preprocessor.context['vocab_unit']
term_index = vocab_unit.state['term_index']
return embedding.build_matrix(term_index)
else:
matrix_shape = (
preprocessor.context['vocab_size'],
self._config['embedding_output_dim']
)
return np.random.uniform(-0.2, 0.2, matrix_shape)
def _build_data_gen_builder(self, model, embedding_matrix, preprocessor):
builder_kwargs = dict(callbacks=[])
if isinstance(self._task.loss, (mz.losses.RankHingeLoss,
mz.losses.RankCrossEntropyLoss)):
builder_kwargs.update(dict(
mode='pair',
num_dup=self._config['num_dup'],
num_neg=self._config['num_neg']
))
if isinstance(model, mz.models.DRMM):
histo_callback = mz.data_generator.callbacks.Histogram(
embedding_matrix=embedding_matrix,
bin_size=self._config['bin_size'],
hist_mode=self._config['hist_mode']
)
builder_kwargs['callbacks'].append(histo_callback)
if isinstance(model, mz.models.MatchPyramid):
dpool_callback = mz.data_generator.callbacks.DynamicPooling(
fixed_length_left=model.params['input_shapes'][0][0],
fixed_length_right=model.params['input_shapes'][1][0],
compress_ratio_left=self._config['compress_ratio_left'],
compress_ratio_right=self._config['compress_ratio_right']
)
builder_kwargs['callbacks'].append(dpool_callback)
if isinstance(model, (mz.models.DSSM, mz.models.CDSSM)):
term_index = preprocessor.context['vocab_unit'].state['term_index']
hashing_unit = mz.preprocessors.units.WordHashing(term_index)
hashing_callback = mz.data_generator.callbacks.LambdaCallback(
on_batch_data_pack=lambda data_pack:
data_pack.apply_on_text(
func=hashing_unit.transform,
inplace=True,
verbose=0
)
)
builder_kwargs['callbacks'].append(hashing_callback)
return DataGeneratorBuilder(**builder_kwargs)
def _infer_num_neg(self):
if isinstance(self._task.loss, (mz.losses.RankHingeLoss,
mz.losses.RankCrossEntropyLoss)):
self._config['num_neg'] = self._task.loss.num_neg
@classmethod
def get_default_config(cls) -> dict:
"""Default config getter."""
return {
# pair generator builder kwargs
'num_dup': 1,
# histogram unit of DRMM
'bin_size': 30,
'hist_mode': 'LCH',
# dynamic Pooling of MatchPyramid
'compress_ratio_left': 1.0,
'compress_ratio_right': 1.0,
# if no `matchzoo.Embedding` is passed to `tune`
'embedding_output_dim': 100
}
|
from collections import defaultdict, deque
import logging
import voluptuous as vol
from homeassistant.components import automation, group, script, websocket_api
from homeassistant.components.homeassistant import scene
from homeassistant.core import HomeAssistant, callback, split_entity_id
from homeassistant.helpers import device_registry, entity_registry
DOMAIN = "search"
_LOGGER = logging.getLogger(__name__)
async def async_setup(hass: HomeAssistant, config: dict):
"""Set up the Search component."""
websocket_api.async_register_command(hass, websocket_search_related)
return True
@websocket_api.async_response
@websocket_api.websocket_command(
{
vol.Required("type"): "search/related",
vol.Required("item_type"): vol.In(
(
"area",
"automation",
"config_entry",
"device",
"entity",
"group",
"scene",
"script",
)
),
vol.Required("item_id"): str,
}
)
async def websocket_search_related(hass, connection, msg):
"""Handle search."""
searcher = Searcher(
hass,
await device_registry.async_get_registry(hass),
await entity_registry.async_get_registry(hass),
)
connection.send_result(
msg["id"], searcher.async_search(msg["item_type"], msg["item_id"])
)
class Searcher:
"""Find related things.
Few rules:
Scenes, scripts, automations and config entries will only be expanded if they are
the entry point. They won't be expanded if we process them. This is because they
turn the results into garbage.
"""
# These types won't be further explored. Config entries + Output types.
DONT_RESOLVE = {"scene", "automation", "script", "group", "config_entry", "area"}
# These types exist as an entity and so need cleanup in results
EXIST_AS_ENTITY = {"script", "scene", "automation", "group"}
def __init__(
self,
hass: HomeAssistant,
device_reg: device_registry.DeviceRegistry,
entity_reg: entity_registry.EntityRegistry,
):
"""Search results."""
self.hass = hass
self._device_reg = device_reg
self._entity_reg = entity_reg
self.results = defaultdict(set)
self._to_resolve = deque()
@callback
def async_search(self, item_type, item_id):
"""Find results."""
_LOGGER.debug("Searching for %s/%s", item_type, item_id)
self.results[item_type].add(item_id)
self._to_resolve.append((item_type, item_id))
while self._to_resolve:
search_type, search_id = self._to_resolve.popleft()
getattr(self, f"_resolve_{search_type}")(search_id)
# Clean up entity_id items, from the general "entity" type result,
# that are also found in the specific entity domain type.
for result_type in self.EXIST_AS_ENTITY:
self.results["entity"] -= self.results[result_type]
# Remove entry into graph from search results.
to_remove_item_type = item_type
if item_type == "entity":
domain = split_entity_id(item_id)[0]
if domain in self.EXIST_AS_ENTITY:
to_remove_item_type = domain
self.results[to_remove_item_type].remove(item_id)
# Filter out empty sets.
return {key: val for key, val in self.results.items() if val}
@callback
def _add_or_resolve(self, item_type, item_id):
"""Add an item to explore."""
if item_id in self.results[item_type]:
return
self.results[item_type].add(item_id)
if item_type not in self.DONT_RESOLVE:
self._to_resolve.append((item_type, item_id))
@callback
def _resolve_area(self, area_id) -> None:
"""Resolve an area."""
for device in device_registry.async_entries_for_area(self._device_reg, area_id):
self._add_or_resolve("device", device.id)
for entity_entry in entity_registry.async_entries_for_area(
self._entity_reg, area_id
):
self._add_or_resolve("entity", entity_entry.entity_id)
@callback
def _resolve_device(self, device_id) -> None:
"""Resolve a device."""
device_entry = self._device_reg.async_get(device_id)
# Unlikely entry doesn't exist, but let's guard for bad data.
if device_entry is not None:
if device_entry.area_id:
self._add_or_resolve("area", device_entry.area_id)
for config_entry_id in device_entry.config_entries:
self._add_or_resolve("config_entry", config_entry_id)
# We do not resolve device_entry.via_device_id because that
# device is not related data-wise inside HA.
for entity_entry in entity_registry.async_entries_for_device(
self._entity_reg, device_id
):
self._add_or_resolve("entity", entity_entry.entity_id)
for entity_id in script.scripts_with_device(self.hass, device_id):
self._add_or_resolve("entity", entity_id)
for entity_id in automation.automations_with_device(self.hass, device_id):
self._add_or_resolve("entity", entity_id)
@callback
def _resolve_entity(self, entity_id) -> None:
"""Resolve an entity."""
# Extra: Find automations and scripts that reference this entity.
for entity in scene.scenes_with_entity(self.hass, entity_id):
self._add_or_resolve("entity", entity)
for entity in group.groups_with_entity(self.hass, entity_id):
self._add_or_resolve("entity", entity)
for entity in automation.automations_with_entity(self.hass, entity_id):
self._add_or_resolve("entity", entity)
for entity in script.scripts_with_entity(self.hass, entity_id):
self._add_or_resolve("entity", entity)
# Find devices
entity_entry = self._entity_reg.async_get(entity_id)
if entity_entry is not None:
if entity_entry.device_id:
self._add_or_resolve("device", entity_entry.device_id)
if entity_entry.config_entry_id is not None:
self._add_or_resolve("config_entry", entity_entry.config_entry_id)
domain = split_entity_id(entity_id)[0]
if domain in self.EXIST_AS_ENTITY:
self._add_or_resolve(domain, entity_id)
@callback
def _resolve_automation(self, automation_entity_id) -> None:
"""Resolve an automation.
Will only be called if automation is an entry point.
"""
for entity in automation.entities_in_automation(
self.hass, automation_entity_id
):
self._add_or_resolve("entity", entity)
for device in automation.devices_in_automation(self.hass, automation_entity_id):
self._add_or_resolve("device", device)
@callback
def _resolve_script(self, script_entity_id) -> None:
"""Resolve a script.
Will only be called if script is an entry point.
"""
for entity in script.entities_in_script(self.hass, script_entity_id):
self._add_or_resolve("entity", entity)
for device in script.devices_in_script(self.hass, script_entity_id):
self._add_or_resolve("device", device)
@callback
def _resolve_group(self, group_entity_id) -> None:
"""Resolve a group.
Will only be called if group is an entry point.
"""
for entity_id in group.get_entity_ids(self.hass, group_entity_id):
self._add_or_resolve("entity", entity_id)
@callback
def _resolve_scene(self, scene_entity_id) -> None:
"""Resolve a scene.
Will only be called if scene is an entry point.
"""
for entity in scene.entities_in_scene(self.hass, scene_entity_id):
self._add_or_resolve("entity", entity)
@callback
def _resolve_config_entry(self, config_entry_id) -> None:
"""Resolve a config entry.
Will only be called if config entry is an entry point.
"""
for device_entry in device_registry.async_entries_for_config_entry(
self._device_reg, config_entry_id
):
self._add_or_resolve("device", device_entry.id)
for entity_entry in entity_registry.async_entries_for_config_entry(
self._entity_reg, config_entry_id
):
self._add_or_resolve("entity", entity_entry.entity_id)
|
import logging
from pytrackr.api import trackrApiInterface
import voluptuous as vol
from homeassistant.components.device_tracker import PLATFORM_SCHEMA
from homeassistant.const import CONF_PASSWORD, CONF_USERNAME
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.event import track_utc_time_change
from homeassistant.util import slugify
_LOGGER = logging.getLogger(__name__)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{vol.Required(CONF_USERNAME): cv.string, vol.Required(CONF_PASSWORD): cv.string}
)
def setup_scanner(hass, config: dict, see, discovery_info=None):
"""Validate the configuration and return a TrackR scanner."""
TrackRDeviceScanner(hass, config, see)
return True
class TrackRDeviceScanner:
"""A class representing a TrackR device."""
def __init__(self, hass, config: dict, see) -> None:
"""Initialize the TrackR device scanner."""
self.hass = hass
self.api = trackrApiInterface(
config.get(CONF_USERNAME), config.get(CONF_PASSWORD)
)
self.see = see
self.devices = self.api.get_trackrs()
self._update_info()
track_utc_time_change(self.hass, self._update_info, second=range(0, 60, 30))
def _update_info(self, now=None) -> None:
"""Update the device info."""
_LOGGER.debug("Updating devices %s", now)
# Update self.devices to collect new devices added
# to the users account.
self.devices = self.api.get_trackrs()
for trackr in self.devices:
trackr.update_state()
trackr_id = trackr.tracker_id()
trackr_device_id = trackr.id()
lost = trackr.lost()
dev_id = slugify(trackr.name())
if dev_id is None:
dev_id = trackr_id
location = trackr.last_known_location()
lat = location["latitude"]
lon = location["longitude"]
attrs = {
"last_updated": trackr.last_updated(),
"last_seen": trackr.last_time_seen(),
"trackr_id": trackr_id,
"id": trackr_device_id,
"lost": lost,
"battery_level": trackr.battery_level(),
}
self.see(dev_id=dev_id, gps=(lat, lon), attributes=attrs)
|
import diamond.collector
import os
class IPCollector(diamond.collector.Collector):
PROC = [
'/proc/net/snmp',
]
GAUGES = [
'Forwarding',
'DefaultTTL',
]
def process_config(self):
super(IPCollector, self).process_config()
if self.config['allowed_names'] is None:
self.config['allowed_names'] = []
def get_default_config_help(self):
config_help = super(IPCollector, self).get_default_config_help()
config_help.update({
'allowed_names': 'list of entries to collect, empty to collect all'
})
return config_help
def get_default_config(self):
""" Returns the default collector settings
"""
config = super(IPCollector, self).get_default_config()
config.update({
'path': 'ip',
'allowed_names': 'InAddrErrors, InDelivers, InDiscards, ' +
'InHdrErrors, InReceives, InUnknownProtos, OutDiscards, ' +
'OutNoRoutes, OutRequests'
})
return config
def collect(self):
metrics = {}
for filepath in self.PROC:
if not os.access(filepath, os.R_OK):
self.log.error('Permission to access %s denied', filepath)
continue
header = ''
data = ''
# Seek the file for the lines which start with Ip
file = open(filepath)
if not file:
self.log.error('Failed to open %s', filepath)
continue
while True:
line = file.readline()
# Reached EOF?
if len(line) == 0:
break
# Line has metrics?
if line.startswith('Ip'):
header = line
data = file.readline()
break
file.close()
# No data from the file?
if header == '' or data == '':
self.log.error('%s has no lines starting with Ip' % filepath)
continue
header = header.split()
data = data.split()
# Zip up the keys and values
for i in xrange(1, len(header)):
metrics[header[i]] = data[i]
for metric_name in metrics.keys():
if ((len(self.config['allowed_names']) > 0 and
metric_name not in self.config['allowed_names'])):
continue
value = long(metrics[metric_name])
# Publish the metric
if metric_name in self.GAUGES:
self.publish_gauge(metric_name, value, 0)
else:
self.publish_counter(metric_name, value, 0)
|
import glob
import os.path
from coverage.misc import CoverageException, file_be_gone
from coverage.sqldata import CoverageData
def line_counts(data, fullpath=False):
"""Return a dict summarizing the line coverage data.
Keys are based on the file names, and values are the number of executed
lines. If `fullpath` is true, then the keys are the full pathnames of
the files, otherwise they are the basenames of the files.
Returns a dict mapping file names to counts of lines.
"""
summ = {}
if fullpath:
filename_fn = lambda f: f
else:
filename_fn = os.path.basename
for filename in data.measured_files():
summ[filename_fn(filename)] = len(data.lines(filename))
return summ
def add_data_to_hash(data, filename, hasher):
"""Contribute `filename`'s data to the `hasher`.
`hasher` is a `coverage.misc.Hasher` instance to be updated with
the file's data. It should only get the results data, not the run
data.
"""
if data.has_arcs():
hasher.update(sorted(data.arcs(filename) or []))
else:
hasher.update(sorted(data.lines(filename) or []))
hasher.update(data.file_tracer(filename))
def combine_parallel_data(data, aliases=None, data_paths=None, strict=False):
"""Combine a number of data files together.
Treat `data.filename` as a file prefix, and combine the data from all
of the data files starting with that prefix plus a dot.
If `aliases` is provided, it's a `PathAliases` object that is used to
re-map paths to match the local machine's.
If `data_paths` is provided, it is a list of directories or files to
combine. Directories are searched for files that start with
`data.filename` plus dot as a prefix, and those files are combined.
If `data_paths` is not provided, then the directory portion of
`data.filename` is used as the directory to search for data files.
Every data file found and combined is then deleted from disk. If a file
cannot be read, a warning will be issued, and the file will not be
deleted.
If `strict` is true, and no files are found to combine, an error is
raised.
"""
# Because of the os.path.abspath in the constructor, data_dir will
# never be an empty string.
data_dir, local = os.path.split(data.base_filename())
localdot = local + '.*'
data_paths = data_paths or [data_dir]
files_to_combine = []
for p in data_paths:
if os.path.isfile(p):
files_to_combine.append(os.path.abspath(p))
elif os.path.isdir(p):
pattern = os.path.join(os.path.abspath(p), localdot)
files_to_combine.extend(glob.glob(pattern))
else:
raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
if strict and not files_to_combine:
raise CoverageException("No data to combine")
files_combined = 0
for f in files_to_combine:
if f == data.data_filename():
# Sometimes we are combining into a file which is one of the
# parallel files. Skip that file.
if data._debug.should('dataio'):
data._debug.write("Skipping combining ourself: %r" % (f,))
continue
if data._debug.should('dataio'):
data._debug.write("Combining data file %r" % (f,))
try:
new_data = CoverageData(f, debug=data._debug)
new_data.read()
except CoverageException as exc:
if data._warn:
# The CoverageException has the file name in it, so just
# use the message as the warning.
data._warn(str(exc))
else:
data.update(new_data, aliases=aliases)
files_combined += 1
if data._debug.should('dataio'):
data._debug.write("Deleting combined data file %r" % (f,))
file_be_gone(f)
if strict and not files_combined:
raise CoverageException("No usable data files")
|
from homeassistant.const import PERCENTAGE, TEMP_CELSIUS
from .util import async_init_integration
async def test_create_sensors(hass):
"""Test creation of sensors."""
await async_init_integration(hass)
state = hass.states.get("sensor.nick_office_temperature")
assert state.state == "23"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"device_class": "temperature",
"friendly_name": "Nick Office Temperature",
"unit_of_measurement": TEMP_CELSIUS,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.nick_office_zone_setpoint_status")
assert state.state == "Permanent Hold"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Nick Office Zone Setpoint Status",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.nick_office_zone_status")
assert state.state == "Relieving Air"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Nick Office Zone Status",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_air_cleaner_mode")
assert state.state == "auto"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Master Suite Air Cleaner Mode",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_current_compressor_speed")
assert state.state == "69.0"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Master Suite Current Compressor Speed",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_outdoor_temperature")
assert state.state == "30.6"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"device_class": "temperature",
"friendly_name": "Master Suite Outdoor Temperature",
"unit_of_measurement": TEMP_CELSIUS,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_relative_humidity")
assert state.state == "52.0"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"device_class": "humidity",
"friendly_name": "Master Suite Relative Humidity",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_requested_compressor_speed")
assert state.state == "69.0"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Master Suite Requested Compressor Speed",
"unit_of_measurement": PERCENTAGE,
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
state = hass.states.get("sensor.master_suite_system_status")
assert state.state == "Cooling"
expected_attributes = {
"attribution": "Data provided by mynexia.com",
"friendly_name": "Master Suite System Status",
}
# Only test for a subset of attributes in case
# HA changes the implementation and a new one appears
assert all(
state.attributes[key] == expected_attributes[key] for key in expected_attributes
)
|
import datetime
from itertools import count
import os
import threading
import time
import urllib.parse
import pytest
import cherrypy
from cherrypy.lib import httputil
from cherrypy.test import helper
curdir = os.path.join(os.getcwd(), os.path.dirname(__file__))
gif_bytes = (
b'GIF89a\x01\x00\x01\x00\x82\x00\x01\x99"\x1e\x00\x00\x00\x00\x00'
b'\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00'
b'\x00,\x00\x00\x00\x00\x01\x00\x01\x00\x02\x03\x02\x08\t\x00;'
)
class CacheTest(helper.CPWebCase):
@staticmethod
def setup_server():
@cherrypy.config(**{'tools.caching.on': True})
class Root:
def __init__(self):
self.counter = 0
self.control_counter = 0
self.longlock = threading.Lock()
@cherrypy.expose
def index(self):
self.counter += 1
msg = 'visit #%s' % self.counter
return msg
@cherrypy.expose
def control(self):
self.control_counter += 1
return 'visit #%s' % self.control_counter
@cherrypy.expose
def a_gif(self):
cherrypy.response.headers[
'Last-Modified'] = httputil.HTTPDate()
return gif_bytes
@cherrypy.expose
def long_process(self, seconds='1'):
try:
self.longlock.acquire()
time.sleep(float(seconds))
finally:
self.longlock.release()
return 'success!'
@cherrypy.expose
def clear_cache(self, path):
cherrypy._cache.store[cherrypy.request.base + path].clear()
@cherrypy.config(**{
'tools.caching.on': True,
'tools.response_headers.on': True,
'tools.response_headers.headers': [
('Vary', 'Our-Varying-Header')
],
})
class VaryHeaderCachingServer(object):
def __init__(self):
self.counter = count(1)
@cherrypy.expose
def index(self):
return 'visit #%s' % next(self.counter)
@cherrypy.config(**{
'tools.expires.on': True,
'tools.expires.secs': 60,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir,
})
class UnCached(object):
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': 0})
def force(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
self._cp_config['tools.expires.force'] = True
self._cp_config['tools.expires.secs'] = 0
return 'being forceful'
@cherrypy.expose
def dynamic(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
cherrypy.response.headers['Cache-Control'] = 'private'
return 'D-d-d-dynamic!'
@cherrypy.expose
def cacheable(self):
cherrypy.response.headers['Etag'] = 'bibbitybobbityboo'
return "Hi, I'm cacheable."
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': 86400})
def specific(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return 'I am being specific'
class Foo(object):
pass
@cherrypy.expose
@cherrypy.config(**{'tools.expires.secs': Foo()})
def wrongtype(self):
cherrypy.response.headers[
'Etag'] = 'need_this_to_make_me_cacheable'
return 'Woops'
@cherrypy.config(**{
'tools.gzip.mime_types': ['text/*', 'image/*'],
'tools.caching.on': True,
'tools.staticdir.on': True,
'tools.staticdir.dir': 'static',
'tools.staticdir.root': curdir
})
class GzipStaticCache(object):
pass
cherrypy.tree.mount(Root())
cherrypy.tree.mount(UnCached(), '/expires')
cherrypy.tree.mount(VaryHeaderCachingServer(), '/varying_headers')
cherrypy.tree.mount(GzipStaticCache(), '/gzip_static_cache')
cherrypy.config.update({'tools.gzip.on': True})
def testCaching(self):
elapsed = 0.0
for trial in range(10):
self.getPage('/')
# The response should be the same every time,
# except for the Age response header.
self.assertBody('visit #1')
if trial != 0:
age = int(self.assertHeader('Age'))
assert age >= elapsed
elapsed = age
# POST, PUT, DELETE should not be cached.
self.getPage('/', method='POST')
self.assertBody('visit #2')
# Because gzip is turned on, the Vary header should always Vary for
# content-encoding
self.assertHeader('Vary', 'Accept-Encoding')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage('/', method='GET')
self.assertBody('visit #3')
# ...but this request should get the cached copy.
self.getPage('/', method='GET')
self.assertBody('visit #3')
self.getPage('/', method='DELETE')
self.assertBody('visit #4')
# The previous request should have invalidated the cache,
# so this request will recalc the response.
self.getPage('/', method='GET', headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertHeader('Vary')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), b'visit #5')
# Now check that a second request gets the gzip header and gzipped body
# This also tests a bug in 3.0 to 3.0.2 whereby the cached, gzipped
# response body was being gzipped a second time.
self.getPage('/', method='GET', headers=[('Accept-Encoding', 'gzip')])
self.assertHeader('Content-Encoding', 'gzip')
self.assertEqual(
cherrypy.lib.encoding.decompress(self.body), b'visit #5')
# Now check that a third request that doesn't accept gzip
# skips the cache (because the 'Vary' header denies it).
self.getPage('/', method='GET')
self.assertNoHeader('Content-Encoding')
self.assertBody('visit #6')
def testVaryHeader(self):
self.getPage('/varying_headers/')
self.assertStatus('200 OK')
self.assertHeaderItemValue('Vary', 'Our-Varying-Header')
self.assertBody('visit #1')
# Now check that different 'Vary'-fields don't evict each other.
# This test creates 2 requests with different 'Our-Varying-Header'
# and then tests if the first one still exists.
self.getPage('/varying_headers/',
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus('200 OK')
self.assertBody('visit #2')
self.getPage('/varying_headers/',
headers=[('Our-Varying-Header', 'request 2')])
self.assertStatus('200 OK')
self.assertBody('visit #2')
self.getPage('/varying_headers/')
self.assertStatus('200 OK')
self.assertBody('visit #1')
def testExpiresTool(self):
# test setting an expires header
self.getPage('/expires/specific')
self.assertStatus('200 OK')
self.assertHeader('Expires')
# test exceptions for bad time values
self.getPage('/expires/wrongtype')
self.assertStatus(500)
self.assertInBody('TypeError')
# static content should not have "cache prevention" headers
self.getPage('/expires/index.html')
self.assertStatus('200 OK')
self.assertNoHeader('Pragma')
self.assertNoHeader('Cache-Control')
self.assertHeader('Expires')
# dynamic content that sets indicators should not have
# "cache prevention" headers
self.getPage('/expires/cacheable')
self.assertStatus('200 OK')
self.assertNoHeader('Pragma')
self.assertNoHeader('Cache-Control')
self.assertHeader('Expires')
self.getPage('/expires/dynamic')
self.assertBody('D-d-d-dynamic!')
# the Cache-Control header should be untouched
self.assertHeader('Cache-Control', 'private')
self.assertHeader('Expires')
# configure the tool to ignore indicators and replace existing headers
self.getPage('/expires/force')
self.assertStatus('200 OK')
# This also gives us a chance to test 0 expiry with no other headers
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
# static content should now have "cache prevention" headers
self.getPage('/expires/index.html')
self.assertStatus('200 OK')
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
# the cacheable handler should now have "cache prevention" headers
self.getPage('/expires/cacheable')
self.assertStatus('200 OK')
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
self.getPage('/expires/dynamic')
self.assertBody('D-d-d-dynamic!')
# dynamic sets Cache-Control to private but it should be
# overwritten here ...
self.assertHeader('Pragma', 'no-cache')
if cherrypy.server.protocol_version == 'HTTP/1.1':
self.assertHeader('Cache-Control', 'no-cache, must-revalidate')
self.assertHeader('Expires', 'Sun, 28 Jan 2007 00:00:00 GMT')
def _assert_resp_len_and_enc_for_gzip(self, uri):
"""
Test that after querying gzipped content it's remains valid in
cache and available non-gzipped as well.
"""
ACCEPT_GZIP_HEADERS = [('Accept-Encoding', 'gzip')]
content_len = None
for _ in range(3):
self.getPage(uri, method='GET', headers=ACCEPT_GZIP_HEADERS)
if content_len is not None:
# all requests should get the same length
self.assertHeader('Content-Length', content_len)
self.assertHeader('Content-Encoding', 'gzip')
content_len = dict(self.headers)['Content-Length']
# check that we can still get non-gzipped version
self.getPage(uri, method='GET')
self.assertNoHeader('Content-Encoding')
# non-gzipped version should have a different content length
self.assertNoHeaderItemValue('Content-Length', content_len)
def testGzipStaticCache(self):
"""Test that cache and gzip tools play well together when both enabled.
Ref GitHub issue #1190.
"""
GZIP_STATIC_CACHE_TMPL = '/gzip_static_cache/{}'
resource_files = ('index.html', 'dirback.jpg')
for f in resource_files:
uri = GZIP_STATIC_CACHE_TMPL.format(f)
self._assert_resp_len_and_enc_for_gzip(uri)
def testLastModified(self):
self.getPage('/a.gif')
self.assertStatus(200)
self.assertBody(gif_bytes)
lm1 = self.assertHeader('Last-Modified')
# this request should get the cached copy.
self.getPage('/a.gif')
self.assertStatus(200)
self.assertBody(gif_bytes)
self.assertHeader('Age')
lm2 = self.assertHeader('Last-Modified')
self.assertEqual(lm1, lm2)
# this request should match the cached copy, but raise 304.
self.getPage('/a.gif', [('If-Modified-Since', lm1)])
self.assertStatus(304)
self.assertNoHeader('Last-Modified')
if not getattr(cherrypy.server, 'using_apache', False):
self.assertHeader('Age')
@pytest.mark.xfail(reason='#1536')
def test_antistampede(self):
SECONDS = 4
slow_url = '/long_process?seconds={SECONDS}'.format(**locals())
# We MUST make an initial synchronous request in order to create the
# AntiStampedeCache object, and populate its selecting_headers,
# before the actual stampede.
self.getPage(slow_url)
self.assertBody('success!')
path = urllib.parse.quote(slow_url, safe='')
self.getPage('/clear_cache?path=' + path)
self.assertStatus(200)
start = datetime.datetime.now()
def run():
self.getPage(slow_url)
# The response should be the same every time
self.assertBody('success!')
ts = [threading.Thread(target=run) for i in range(100)]
for t in ts:
t.start()
for t in ts:
t.join()
finish = datetime.datetime.now()
# Allow for overhead, two seconds for slow hosts
allowance = SECONDS + 2
self.assertEqualDates(start, finish, seconds=allowance)
def test_cache_control(self):
self.getPage('/control')
self.assertBody('visit #1')
self.getPage('/control')
self.assertBody('visit #1')
self.getPage('/control', headers=[('Cache-Control', 'no-cache')])
self.assertBody('visit #2')
self.getPage('/control')
self.assertBody('visit #2')
self.getPage('/control', headers=[('Pragma', 'no-cache')])
self.assertBody('visit #3')
self.getPage('/control')
self.assertBody('visit #3')
time.sleep(1)
self.getPage('/control', headers=[('Cache-Control', 'max-age=0')])
self.assertBody('visit #4')
self.getPage('/control')
self.assertBody('visit #4')
|
import pandas as pd
from qstrader.asset.equity import Equity
from qstrader.broker.transaction.transaction import Transaction
def test_transaction_representation():
"""
Tests that the Transaction representation
correctly recreates the object.
"""
dt = pd.Timestamp('2015-05-06')
asset = Equity('Apple, Inc.', 'AAPL')
transaction = Transaction(
asset, quantity=168, dt=dt, price=56.18, order_id=153
)
exp_repr = (
"Transaction(asset=Equity(name='Apple, Inc.', symbol='AAPL', tax_exempt=True), "
"quantity=168, dt=2015-05-06 00:00:00, price=56.18, order_id=153)"
)
assert repr(transaction) == exp_repr
|
from __future__ import absolute_import
import re
import sys
import unittest
import threading
from .common_imports import etree, HelperTestCase, BytesIO, _bytes
try:
from Queue import Queue
except ImportError:
from queue import Queue # Py3
class ThreadingTestCase(HelperTestCase):
"""Threading tests"""
etree = etree
def _run_thread(self, func):
thread = threading.Thread(target=func)
thread.start()
thread.join()
def _run_threads(self, count, func, main_func=None):
sync = threading.Event()
lock = threading.Lock()
counter = dict(started=0, finished=0, failed=0)
def sync_start(func):
with lock:
started = counter['started'] + 1
counter['started'] = started
if started < count + (main_func is not None):
sync.wait(4) # wait until the other threads have started up
assert sync.is_set()
sync.set() # all waiting => go!
try:
func()
except:
with lock:
counter['failed'] += 1
raise
else:
with lock:
counter['finished'] += 1
threads = [threading.Thread(target=sync_start, args=(func,)) for _ in range(count)]
for thread in threads:
thread.start()
if main_func is not None:
sync_start(main_func)
for thread in threads:
thread.join()
self.assertEqual(0, counter['failed'])
self.assertEqual(counter['finished'], counter['started'])
def test_subtree_copy_thread(self):
tostring = self.etree.tostring
XML = self.etree.XML
xml = _bytes("<root><threadtag/></root>")
main_root = XML(_bytes("<root/>"))
def run_thread():
thread_root = XML(xml)
main_root.append(thread_root[0])
del thread_root
self._run_thread(run_thread)
self.assertEqual(xml, tostring(main_root))
def test_main_xslt_in_thread(self):
XML = self.etree.XML
style = XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<foo><xsl:copy><xsl:value-of select="/a/b/text()" /></xsl:copy></foo>
</xsl:template>
</xsl:stylesheet>'''))
st = etree.XSLT(style)
result = []
def run_thread():
root = XML(_bytes('<a><b>B</b><c>C</c></a>'))
result.append( st(root) )
self._run_thread(run_thread)
self.assertEqual('''\
<?xml version="1.0"?>
<foo><a>B</a></foo>
''',
str(result[0]))
def test_thread_xslt(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(_bytes('<a><b>B</b><c>C</c></a>'))
def run_thread():
style = XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<foo><xsl:copy><xsl:value-of select="/a/b/text()" /></xsl:copy></foo>
</xsl:template>
</xsl:stylesheet>'''))
st = etree.XSLT(style)
root.append( st(root).getroot() )
self._run_thread(run_thread)
self.assertEqual(_bytes('<a><b>B</b><c>C</c><foo><a>B</a></foo></a>'),
tostring(root))
def test_thread_xslt_parsing_error_log(self):
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="tag" />
<!-- extend time for parsing + transform -->
''' + '\n'.join('<xsl:template match="tag%x" />' % i for i in range(200)) + '''
<xsl:UnExpectedElement />
</xsl:stylesheet>''')
self.assertRaises(etree.XSLTParseError,
etree.XSLT, style)
error_logs = []
def run_thread():
try:
etree.XSLT(style)
except etree.XSLTParseError as e:
error_logs.append(e.error_log)
else:
self.assertFalse(True, "XSLT parsing should have failed but didn't")
self._run_threads(16, run_thread)
self.assertEqual(16, len(error_logs))
last_log = None
for log in error_logs:
self.assertTrue(len(log))
if last_log is not None:
self.assertEqual(len(last_log), len(log))
self.assertTrue(len(log) >= 2, len(log))
for error in log:
self.assertTrue(':ERROR:XSLT:' in str(error), str(error))
self.assertTrue(any('UnExpectedElement' in str(error) for error in log), log)
last_log = log
def test_thread_xslt_apply_error_log(self):
tree = self.parse('<tagFF/>')
style = self.parse('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template name="tag0">
<xsl:message terminate="yes">FAIL</xsl:message>
</xsl:template>
<!-- extend time for parsing + transform -->
''' + '\n'.join('<xsl:template match="tag%X" name="tag%x"> <xsl:call-template name="tag%x" /> </xsl:template>' % (i, i, i-1)
for i in range(1, 256)) + '''
</xsl:stylesheet>''')
self.assertRaises(etree.XSLTApplyError,
etree.XSLT(style), tree)
error_logs = []
def run_thread():
transform = etree.XSLT(style)
try:
transform(tree)
except etree.XSLTApplyError:
error_logs.append(transform.error_log)
else:
self.assertFalse(True, "XSLT parsing should have failed but didn't")
self._run_threads(16, run_thread)
self.assertEqual(16, len(error_logs))
last_log = None
for log in error_logs:
self.assertTrue(len(log))
if last_log is not None:
self.assertEqual(len(last_log), len(log))
self.assertEqual(1, len(log))
for error in log:
self.assertTrue(':ERROR:XSLT:' in str(error))
last_log = log
def test_thread_xslt_attr_replace(self):
# this is the only case in XSLT where the result tree can be
# modified in-place
XML = self.etree.XML
tostring = self.etree.tostring
style = self.etree.XSLT(XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<root class="abc">
<xsl:copy-of select="@class" />
<xsl:attribute name="class">xyz</xsl:attribute>
</root>
</xsl:template>
</xsl:stylesheet>''')))
result = []
def run_thread():
root = XML(_bytes('<ROOT class="ABC" />'))
result.append( style(root).getroot() )
self._run_thread(run_thread)
self.assertEqual(_bytes('<root class="xyz"/>'),
tostring(result[0]))
def test_thread_create_xslt(self):
XML = self.etree.XML
tostring = self.etree.tostring
root = XML(_bytes('<a><b>B</b><c>C</c></a>'))
stylesheets = []
def run_thread():
style = XML(_bytes('''\
<xsl:stylesheet
xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
version="1.0">
<xsl:output method="xml" />
<xsl:template match="/">
<div id="test">
<xsl:apply-templates/>
</div>
</xsl:template>
</xsl:stylesheet>'''))
stylesheets.append( etree.XSLT(style) )
self._run_thread(run_thread)
st = stylesheets[0]
result = tostring( st(root) )
self.assertEqual(_bytes('<div id="test">BC</div>'),
result)
def test_thread_error_log(self):
XML = self.etree.XML
expected_error = [self.etree.ErrorTypes.ERR_TAG_NAME_MISMATCH]
children = "<a>test</a>" * 100
def parse_error_test(thread_no):
tag = "tag%d" % thread_no
xml = "<%s>%s</%s>" % (tag, children, tag.upper())
parser = self.etree.XMLParser()
for _ in range(10):
errors = None
try:
XML(xml, parser)
except self.etree.ParseError:
e = sys.exc_info()[1]
errors = e.error_log.filter_types(expected_error)
self.assertTrue(errors, "Expected error not found")
for error in errors:
self.assertTrue(
tag in error.message and tag.upper() in error.message,
"%s and %s not found in '%s'" % (
tag, tag.upper(), error.message))
self.etree.clear_error_log()
threads = []
for thread_no in range(1, 10):
t = threading.Thread(target=parse_error_test,
args=(thread_no,))
threads.append(t)
t.start()
parse_error_test(0)
for t in threads:
t.join()
def test_thread_mix(self):
XML = self.etree.XML
Element = self.etree.Element
SubElement = self.etree.SubElement
tostring = self.etree.tostring
xml = _bytes('<a><b>B</b><c xmlns="test">C</c></a>')
root = XML(xml)
fragment = XML(_bytes("<other><tags/></other>"))
result = self.etree.Element("{myns}root", att = "someval")
def run_XML():
thread_root = XML(xml)
result.append(thread_root[0])
result.append(thread_root[-1])
def run_parse():
thread_root = self.etree.parse(BytesIO(xml)).getroot()
result.append(thread_root[0])
result.append(thread_root[-1])
def run_move_main():
result.append(fragment[0])
def run_build():
result.append(
Element("{myns}foo", attrib={'{test}attr':'val'}))
SubElement(result, "{otherns}tasty")
def run_xslt():
style = XML(_bytes('''\
<xsl:stylesheet version="1.0"
xmlns:xsl="http://www.w3.org/1999/XSL/Transform">
<xsl:template match="*">
<xsl:copy><foo><xsl:value-of select="/a/b/text()" /></foo></xsl:copy>
</xsl:template>
</xsl:stylesheet>'''))
st = etree.XSLT(style)
result.append( st(root).getroot() )
for test in (run_XML, run_parse, run_move_main, run_xslt, run_build):
tostring(result)
self._run_thread(test)
self.assertEqual(
_bytes('<ns0:root xmlns:ns0="myns" att="someval"><b>B</b>'
'<c xmlns="test">C</c><b>B</b><c xmlns="test">C</c><tags/>'
'<a><foo>B</foo></a>'
'<ns0:foo xmlns:ns1="test" ns1:attr="val"/>'
'<ns1:tasty xmlns:ns1="otherns"/></ns0:root>'),
tostring(result))
def strip_first():
root = Element("newroot")
root.append(result[0])
while len(result):
self._run_thread(strip_first)
self.assertEqual(
_bytes('<ns0:root xmlns:ns0="myns" att="someval"/>'),
tostring(result))
def test_concurrent_attribute_names_in_dicts(self):
SubElement = self.etree.SubElement
names = list('abcdefghijklmnop')
runs_per_name = range(50)
result_matches = re.compile(
br'<thread_root>'
br'(?:<[a-p]{5} thread_attr_[a-p]="value" thread_attr2_[a-p]="value2"\s?/>)+'
br'</thread_root>').match
def testrun():
for _ in range(3):
root = self.etree.Element('thread_root')
for name in names:
tag_name = name * 5
new = []
for _ in runs_per_name:
el = SubElement(root, tag_name, {'thread_attr_' + name: 'value'})
new.append(el)
for el in new:
el.set('thread_attr2_' + name, 'value2')
s = etree.tostring(root)
self.assertTrue(result_matches(s))
# first, run only in sub-threads
self._run_threads(10, testrun)
# then, additionally include the main thread (and its parent dict)
self._run_threads(10, testrun, main_func=testrun)
def test_concurrent_proxies(self):
XML = self.etree.XML
root = XML(_bytes('<root><a>A</a><b xmlns="test">B</b><c/></root>'))
child_count = len(root)
def testrun():
for i in range(10000):
el = root[i%child_count]
del el
self._run_threads(10, testrun)
def test_concurrent_class_lookup(self):
XML = self.etree.XML
class TestElement(etree.ElementBase):
pass
class MyLookup(etree.CustomElementClassLookup):
repeat = range(100)
def lookup(self, t, d, ns, name):
count = 0
for i in self.repeat:
# allow other threads to run
count += 1
return TestElement
parser = self.etree.XMLParser()
parser.set_element_class_lookup(MyLookup())
root = XML(_bytes('<root><a>A</a><b xmlns="test">B</b><c/></root>'),
parser)
child_count = len(root)
def testrun():
for i in range(1000):
el = root[i%child_count]
del el
self._run_threads(10, testrun)
class ThreadPipelineTestCase(HelperTestCase):
"""Threading tests based on a thread worker pipeline.
"""
etree = etree
item_count = 40
class Worker(threading.Thread):
def __init__(self, in_queue, in_count, **kwargs):
threading.Thread.__init__(self)
self.in_queue = in_queue
self.in_count = in_count
self.out_queue = Queue(in_count)
self.__dict__.update(kwargs)
def run(self):
get, put = self.in_queue.get, self.out_queue.put
handle = self.handle
for _ in range(self.in_count):
put(handle(get()))
def handle(self, data):
raise NotImplementedError()
class ParseWorker(Worker):
def handle(self, xml, _fromstring=etree.fromstring):
return _fromstring(xml)
class RotateWorker(Worker):
def handle(self, element):
first = element[0]
element[:] = element[1:]
element.append(first)
return element
class ReverseWorker(Worker):
def handle(self, element):
element[:] = element[::-1]
return element
class ParseAndExtendWorker(Worker):
def handle(self, element, _fromstring=etree.fromstring):
element.extend(_fromstring(self.xml))
return element
class ParseAndInjectWorker(Worker):
def handle(self, element, _fromstring=etree.fromstring):
root = _fromstring(self.xml)
root.extend(element)
return root
class Validate(Worker):
def handle(self, element):
element.getroottree().docinfo.internalDTD.assertValid(element)
return element
class SerialiseWorker(Worker):
def handle(self, element):
return etree.tostring(element)
xml = (b'''\
<!DOCTYPE threadtest [
<!ELEMENT threadtest (thread-tag1,thread-tag2)+>
<!ATTLIST threadtest
version CDATA "1.0"
>
<!ELEMENT thread-tag1 EMPTY>
<!ELEMENT thread-tag2 (div)>
<!ELEMENT div (threaded)>
<!ATTLIST div
huhu CDATA #IMPLIED
>
<!ELEMENT threaded EMPTY>
<!ATTLIST threaded
host CDATA #REQUIRED
>
]>
<threadtest version="123">
''' + (b'''
<thread-tag1 />
<thread-tag2>
<div huhu="true">
<threaded host="here" />
</div>
</thread-tag2>
''') * 20 + b'''
</threadtest>''')
def _build_pipeline(self, item_count, *classes, **kwargs):
in_queue = Queue(item_count)
start = last = classes[0](in_queue, item_count, **kwargs)
start.setDaemon(True)
for worker_class in classes[1:]:
last = worker_class(last.out_queue, item_count, **kwargs)
last.setDaemon(True)
last.start()
return in_queue, start, last
def test_thread_pipeline_thread_parse(self):
item_count = self.item_count
xml = self.xml.replace(b'thread', b'THREAD') # use fresh tag names
# build and start the pipeline
in_queue, start, last = self._build_pipeline(
item_count,
self.ParseWorker,
self.RotateWorker,
self.ReverseWorker,
self.ParseAndExtendWorker,
self.Validate,
self.ParseAndInjectWorker,
self.SerialiseWorker,
xml=xml)
# fill the queue
put = start.in_queue.put
for _ in range(item_count):
put(xml)
# start the first thread and thus everything
start.start()
# make sure the last thread has terminated
last.join(60) # time out after 60 seconds
self.assertEqual(item_count, last.out_queue.qsize())
# read the results
get = last.out_queue.get
results = [get() for _ in range(item_count)]
comparison = results[0]
for i, result in enumerate(results[1:]):
self.assertEqual(comparison, result)
def test_thread_pipeline_global_parse(self):
item_count = self.item_count
xml = self.xml.replace(b'thread', b'GLOBAL') # use fresh tag names
XML = self.etree.XML
# build and start the pipeline
in_queue, start, last = self._build_pipeline(
item_count,
self.RotateWorker,
self.ReverseWorker,
self.ParseAndExtendWorker,
self.Validate,
self.SerialiseWorker,
xml=xml)
# fill the queue
put = start.in_queue.put
for _ in range(item_count):
put(XML(xml))
# start the first thread and thus everything
start.start()
# make sure the last thread has terminated
last.join(60) # time out after 90 seconds
self.assertEqual(item_count, last.out_queue.qsize())
# read the results
get = last.out_queue.get
results = [get() for _ in range(item_count)]
comparison = results[0]
for i, result in enumerate(results[1:]):
self.assertEqual(comparison, result)
def test_suite():
suite = unittest.TestSuite()
suite.addTests([unittest.makeSuite(ThreadingTestCase)])
suite.addTests([unittest.makeSuite(ThreadPipelineTestCase)])
return suite
if __name__ == '__main__':
print('to test use test.py %s' % __file__)
|
revision = "5770674184de"
down_revision = "ce547319f7be"
from flask_sqlalchemy import SQLAlchemy
from lemur.models import certificate_notification_associations
db = SQLAlchemy()
session = db.session()
def upgrade():
print("Querying for all entries in certificate_notification_associations.")
# Query for all entries in table
results = session.query(certificate_notification_associations).with_entities(
certificate_notification_associations.c.certificate_id,
certificate_notification_associations.c.notification_id,
certificate_notification_associations.c.id,
)
seen = {}
# Iterate through all entries and mark as seen for each certificate_id and notification_id pair
for x in results:
# If we've seen a pair already, delete the duplicates
if seen.get("{}-{}".format(x.certificate_id, x.notification_id)):
print("Deleting duplicate: {}".format(x))
d = session.query(certificate_notification_associations).filter(
certificate_notification_associations.c.id == x.id
)
d.delete(synchronize_session=False)
seen["{}-{}".format(x.certificate_id, x.notification_id)] = True
db.session.commit()
db.session.flush()
def downgrade():
# No way to downgrade this
pass
|
import asyncio
from html.parser import HTMLParser
from ipaddress import ip_address
import logging
from urllib.parse import urljoin, urlparse
import aiohttp
from homeassistant.util.network import is_local
_LOGGER = logging.getLogger(__name__)
async def verify_redirect_uri(hass, client_id, redirect_uri):
"""Verify that the client and redirect uri match."""
try:
client_id_parts = _parse_client_id(client_id)
except ValueError:
return False
redirect_parts = _parse_url(redirect_uri)
# Verify redirect url and client url have same scheme and domain.
is_valid = (
client_id_parts.scheme == redirect_parts.scheme
and client_id_parts.netloc == redirect_parts.netloc
)
if is_valid:
return True
# Whitelist the iOS and Android callbacks so that people can link apps
# without being connected to the internet.
if redirect_uri == "homeassistant://auth-callback" and client_id in (
"https://home-assistant.io/android",
"https://home-assistant.io/iOS",
):
return True
# IndieAuth 4.2.2 allows for redirect_uri to be on different domain
# but needs to be specified in link tag when fetching `client_id`.
redirect_uris = await fetch_redirect_uris(hass, client_id)
return redirect_uri in redirect_uris
class LinkTagParser(HTMLParser):
"""Parser to find link tags."""
def __init__(self, rel):
"""Initialize a link tag parser."""
super().__init__()
self.rel = rel
self.found = []
def handle_starttag(self, tag, attrs):
"""Handle finding a start tag."""
if tag != "link":
return
attrs = dict(attrs)
if attrs.get("rel") == self.rel:
self.found.append(attrs.get("href"))
async def fetch_redirect_uris(hass, url):
"""Find link tag with redirect_uri values.
IndieAuth 4.2.2
The client SHOULD publish one or more <link> tags or Link HTTP headers with
a rel attribute of redirect_uri at the client_id URL.
We limit to the first 10kB of the page.
We do not implement extracting redirect uris from headers.
"""
parser = LinkTagParser("redirect_uri")
chunks = 0
try:
async with aiohttp.ClientSession() as session:
async with session.get(url, timeout=5) as resp:
async for data in resp.content.iter_chunked(1024):
parser.feed(data.decode())
chunks += 1
if chunks == 10:
break
except asyncio.TimeoutError:
_LOGGER.error("Timeout while looking up redirect_uri %s", url)
except aiohttp.client_exceptions.ClientSSLError:
_LOGGER.error("SSL error while looking up redirect_uri %s", url)
except aiohttp.client_exceptions.ClientOSError as ex:
_LOGGER.error("OS error while looking up redirect_uri %s: %s", url, ex.strerror)
except aiohttp.client_exceptions.ClientConnectionError:
_LOGGER.error(
"Low level connection error while looking up redirect_uri %s", url
)
except aiohttp.client_exceptions.ClientError:
_LOGGER.error("Unknown error while looking up redirect_uri %s", url)
# Authorization endpoints verifying that a redirect_uri is allowed for use
# by a client MUST look for an exact match of the given redirect_uri in the
# request against the list of redirect_uris discovered after resolving any
# relative URLs.
return [urljoin(url, found) for found in parser.found]
def verify_client_id(client_id):
"""Verify that the client id is valid."""
try:
_parse_client_id(client_id)
return True
except ValueError:
return False
def _parse_url(url):
"""Parse a url in parts and canonicalize according to IndieAuth."""
parts = urlparse(url)
# Canonicalize a url according to IndieAuth 3.2.
# SHOULD convert the hostname to lowercase
parts = parts._replace(netloc=parts.netloc.lower())
# If a URL with no path component is ever encountered,
# it MUST be treated as if it had the path /.
if parts.path == "":
parts = parts._replace(path="/")
return parts
def _parse_client_id(client_id):
"""Test if client id is a valid URL according to IndieAuth section 3.2.
https://indieauth.spec.indieweb.org/#client-identifier
"""
parts = _parse_url(client_id)
# Client identifier URLs
# MUST have either an https or http scheme
if parts.scheme not in ("http", "https"):
raise ValueError()
# MUST contain a path component
# Handled by url canonicalization.
# MUST NOT contain single-dot or double-dot path segments
if any(segment in (".", "..") for segment in parts.path.split("/")):
raise ValueError(
"Client ID cannot contain single-dot or double-dot path segments"
)
# MUST NOT contain a fragment component
if parts.fragment != "":
raise ValueError("Client ID cannot contain a fragment")
# MUST NOT contain a username or password component
if parts.username is not None:
raise ValueError("Client ID cannot contain username")
if parts.password is not None:
raise ValueError("Client ID cannot contain password")
# MAY contain a port
try:
# parts raises ValueError when port cannot be parsed as int
parts.port
except ValueError as ex:
raise ValueError("Client ID contains invalid port") from ex
# Additionally, hostnames
# MUST be domain names or a loopback interface and
# MUST NOT be IPv4 or IPv6 addresses except for IPv4 127.0.0.1
# or IPv6 [::1]
# We are not goint to follow the spec here. We are going to allow
# any internal network IP to be used inside a client id.
address = None
try:
netloc = parts.netloc
# Strip the [, ] from ipv6 addresses before parsing
if netloc[0] == "[" and netloc[-1] == "]":
netloc = netloc[1:-1]
address = ip_address(netloc)
except ValueError:
# Not an ip address
pass
if address is None or is_local(address):
return parts
raise ValueError("Hostname should be a domain name or local IP address")
|
from unittest import mock
import aiohomekit
from aiohomekit.model import Accessories, Accessory
from aiohomekit.model.characteristics import CharacteristicsTypes
from aiohomekit.model.services import ServicesTypes
import pytest
from homeassistant.components.homekit_controller import config_flow
from homeassistant.helpers import device_registry
import tests.async_mock
from tests.async_mock import patch
from tests.common import MockConfigEntry, mock_device_registry
PAIRING_START_FORM_ERRORS = [
(KeyError, "pairing_failed"),
]
PAIRING_START_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error"),
(aiohomekit.UnavailableError, "already_paired"),
]
PAIRING_TRY_LATER_ERRORS = [
(aiohomekit.BusyError, "busy_error"),
(aiohomekit.MaxTriesError, "max_tries_error"),
(IndexError, "protocol_error"),
]
PAIRING_FINISH_FORM_ERRORS = [
(aiohomekit.exceptions.MalformedPinError, "authentication_error"),
(aiohomekit.MaxPeersError, "max_peers_error"),
(aiohomekit.AuthenticationError, "authentication_error"),
(aiohomekit.UnknownError, "unknown_error"),
(KeyError, "pairing_failed"),
]
PAIRING_FINISH_ABORT_ERRORS = [
(aiohomekit.AccessoryNotFoundError, "accessory_not_found_error")
]
INVALID_PAIRING_CODES = [
"aaa-aa-aaa",
"aaa-11-aaa",
"111-aa-aaa",
"aaa-aa-111",
"1111-1-111",
"a111-11-111",
" 111-11-111",
"111-11-111 ",
"111-11-111a",
"1111111",
"22222222",
]
VALID_PAIRING_CODES = [
"114-11-111",
"123-45-679",
"123-45-679 ",
"11121111",
"98765432",
" 98765432 ",
]
def _setup_flow_handler(hass, pairing=None):
flow = config_flow.HomekitControllerFlowHandler()
flow.hass = hass
flow.context = {}
finish_pairing = tests.async_mock.AsyncMock(return_value=pairing)
discovery = mock.Mock()
discovery.device_id = "00:00:00:00:00:00"
discovery.start_pairing = tests.async_mock.AsyncMock(return_value=finish_pairing)
flow.controller = mock.Mock()
flow.controller.pairings = {}
flow.controller.find_ip_by_device_id = tests.async_mock.AsyncMock(
return_value=discovery
)
return flow
@pytest.mark.parametrize("pairing_code", INVALID_PAIRING_CODES)
def test_invalid_pairing_codes(pairing_code):
"""Test ensure_pin_format raises for an invalid pin code."""
with pytest.raises(aiohomekit.exceptions.MalformedPinError):
config_flow.ensure_pin_format(pairing_code)
@pytest.mark.parametrize("pairing_code", VALID_PAIRING_CODES)
def test_valid_pairing_codes(pairing_code):
"""Test ensure_pin_format corrects format for a valid pin in an alternative format."""
valid_pin = config_flow.ensure_pin_format(pairing_code).split("-")
assert len(valid_pin) == 3
assert len(valid_pin[0]) == 3
assert len(valid_pin[1]) == 2
assert len(valid_pin[2]) == 3
def get_flow_context(hass, result):
"""Get the flow context from the result of async_init or async_configure."""
flow = next(
flow
for flow in hass.config_entries.flow.async_progress()
if flow["flow_id"] == result["flow_id"]
)
return flow["context"]
def get_device_discovery_info(device, upper_case_props=False, missing_csharp=False):
"""Turn a aiohomekit format zeroconf entry into a homeassistant one."""
record = device.info
result = {
"host": record["address"],
"port": record["port"],
"hostname": record["name"],
"type": "_hap._tcp.local.",
"name": record["name"],
"properties": {
"md": record["md"],
"pv": record["pv"],
"id": device.device_id,
"c#": record["c#"],
"s#": record["s#"],
"ff": record["ff"],
"ci": record["ci"],
"sf": 0x01, # record["sf"],
"sh": "",
},
}
if missing_csharp:
del result["properties"]["c#"]
if upper_case_props:
result["properties"] = {
key.upper(): val for (key, val) in result["properties"].items()
}
return result
def setup_mock_accessory(controller):
"""Add a bridge accessory to a test controller."""
bridge = Accessories()
accessory = Accessory.create_with_info(
name="Koogeek-LS1-20833F",
manufacturer="Koogeek",
model="LS1",
serial_number="12345",
firmware_revision="1.1",
)
service = accessory.add_service(ServicesTypes.LIGHTBULB)
on_char = service.add_char(CharacteristicsTypes.ON)
on_char.value = 0
bridge.add_accessory(accessory)
return controller.add_device(bridge)
@pytest.mark.parametrize("upper_case_props", [True, False])
@pytest.mark.parametrize("missing_csharp", [True, False])
async def test_discovery_works(hass, controller, upper_case_props, missing_csharp):
"""Test a device being discovered."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device, upper_case_props, missing_csharp)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"source": "zeroconf",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
}
# User initiates pairing - device enters pairing mode and displays code
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "pair"
# Pairing doesn't error error and pairing results
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
assert result["data"] == {}
async def test_abort_duplicate_flow(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "already_in_progress"
async def test_pair_already_paired_1(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Flag device as already paired
discovery_info["properties"]["sf"] = 0x0
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "already_paired"
async def test_id_missing(hass, controller):
"""Test id is missing."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Remove id from device
del discovery_info["properties"]["id"]
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "invalid_properties"
async def test_discovery_ignored_model(hass, controller):
"""Already paired."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
config_entry = MockConfigEntry(domain=config_flow.HOMEKIT_BRIDGE_DOMAIN, data={})
formatted_mac = device_registry.format_mac("AA:BB:CC:DD:EE:FF")
dev_reg = mock_device_registry(hass)
dev_reg.async_get_or_create(
config_entry_id=config_entry.entry_id,
identifiers={
(
config_flow.HOMEKIT_BRIDGE_DOMAIN,
config_entry.entry_id,
config_flow.HOMEKIT_BRIDGE_SERIAL_NUMBER,
)
},
connections={(device_registry.CONNECTION_NETWORK_MAC, formatted_mac)},
model=config_flow.HOMEKIT_BRIDGE_MODEL,
)
discovery_info["properties"]["id"] = "AA:BB:CC:DD:EE:FF"
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "ignored_model"
async def test_discovery_invalid_config_entry(hass, controller):
"""There is already a config entry for the pairing id but it's invalid."""
MockConfigEntry(
domain="homekit_controller",
data={"AccessoryPairingID": "00:00:00:00:00:00"},
unique_id="00:00:00:00:00:00",
).add_to_hass(hass)
# We just added a mock config entry so it must be visible in hass
assert len(hass.config_entries.async_entries()) == 1
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
# Discovery of a HKID that is in a pairable state but for which there is
# already a config entry - in that case the stale config entry is
# automatically removed.
config_entry_count = len(hass.config_entries.async_entries())
assert config_entry_count == 0
# And new config flow should continue allowing user to set up a new pairing
assert result["type"] == "form"
async def test_discovery_already_configured(hass, controller):
"""Already configured."""
MockConfigEntry(
domain="homekit_controller",
data={"AccessoryPairingID": "00:00:00:00:00:00"},
unique_id="00:00:00:00:00:00",
).add_to_hass(hass)
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Set device as already paired
discovery_info["properties"]["sf"] = 0x00
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert result["type"] == "abort"
assert result["reason"] == "already_configured"
@pytest.mark.parametrize("exception,expected", PAIRING_START_ABORT_ERRORS)
async def test_pair_abort_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
# User initiates pairing - device refuses to enter pairing mode
test_exc = exception("error")
with patch.object(device, "start_pairing", side_effect=test_exc):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "abort"
assert result["reason"] == expected
@pytest.mark.parametrize("exception,expected", PAIRING_TRY_LATER_ERRORS)
async def test_pair_try_later_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
# User initiates pairing - device refuses to enter pairing mode but may be successful after entering pairing mode or rebooting
test_exc = exception("error")
with patch.object(device, "start_pairing", side_effect=test_exc):
result2 = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result2["step_id"] == expected
assert result2["type"] == "form"
# Device is rebooted or placed into pairing mode as they have been instructed
# We start pairing again
result3 = await hass.config_entries.flow.async_configure(
result2["flow_id"], user_input={"any": "key"}
)
# .. and successfully complete pair
result4 = await hass.config_entries.flow.async_configure(
result3["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result4["type"] == "create_entry"
assert result4["title"] == "Koogeek-LS1-20833F"
@pytest.mark.parametrize("exception,expected", PAIRING_START_FORM_ERRORS)
async def test_pair_form_errors_on_start(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User initiates pairing - device refuses to enter pairing mode
test_exc = exception("error")
with patch.object(device, "start_pairing", side_effect=test_exc):
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User gets back the form
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["errors"] == {}
# User re-tries entering pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_ABORT_ERRORS)
async def test_pair_abort_errors_on_finish(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User initiates pairing - this triggers the device to show a pairing code
# and then HA to show a pairing form
finish_pairing = tests.async_mock.AsyncMock(side_effect=exception("error"))
with patch.object(device, "start_pairing", return_value=finish_pairing):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User enters pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "abort"
assert result["reason"] == expected
@pytest.mark.parametrize("exception,expected", PAIRING_FINISH_FORM_ERRORS)
async def test_pair_form_errors_on_finish(hass, controller, exception, expected):
"""Test various pairing errors."""
device = setup_mock_accessory(controller)
discovery_info = get_device_discovery_info(device)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "zeroconf"}, data=discovery_info
)
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User initiates pairing - this triggers the device to show a pairing code
# and then HA to show a pairing form
finish_pairing = tests.async_mock.AsyncMock(side_effect=exception("error"))
with patch.object(device, "start_pairing", return_value=finish_pairing):
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
# User enters pairing code
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "form"
assert result["errors"]["pairing_code"] == expected
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "zeroconf",
}
async def test_user_works(hass, controller):
"""Test user initiated disovers devices."""
setup_mock_accessory(controller)
# Device is discovered
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "user"}
)
assert result["type"] == "form"
assert result["step_id"] == "user"
assert get_flow_context(hass, result) == {
"source": "user",
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"device": "TestDevice"}
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"source": "user",
"unique_id": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
}
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_user_no_devices(hass, controller):
"""Test user initiated pairing where no devices discovered."""
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "user"}
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_user_no_unpaired_devices(hass, controller):
"""Test user initiated pairing where no unpaired devices discovered."""
device = setup_mock_accessory(controller)
# Pair the mock device so that it shows as paired in discovery
finish_pairing = await device.start_pairing(device.device_id)
await finish_pairing(device.pairing_code)
# Device discovery is requested
result = await hass.config_entries.flow.async_init(
"homekit_controller", context={"source": "user"}
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
async def test_unignore_works(hass, controller):
"""Test rediscovery triggered disovers work."""
device = setup_mock_accessory(controller)
# Device is unignored
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": "unignore"},
data={"unique_id": device.device_id},
)
assert result["type"] == "form"
assert result["step_id"] == "pair"
assert get_flow_context(hass, result) == {
"hkid": "00:00:00:00:00:00",
"title_placeholders": {"name": "TestDevice"},
"unique_id": "00:00:00:00:00:00",
"source": "unignore",
}
# User initiates pairing by clicking on 'configure' - device enters pairing mode and displays code
result = await hass.config_entries.flow.async_configure(result["flow_id"])
assert result["type"] == "form"
assert result["step_id"] == "pair"
# Pairing finalized
result = await hass.config_entries.flow.async_configure(
result["flow_id"], user_input={"pairing_code": "111-22-333"}
)
assert result["type"] == "create_entry"
assert result["title"] == "Koogeek-LS1-20833F"
async def test_unignore_ignores_missing_devices(hass, controller):
"""Test rediscovery triggered disovers handle devices that have gone away."""
setup_mock_accessory(controller)
# Device is unignored
result = await hass.config_entries.flow.async_init(
"homekit_controller",
context={"source": "unignore"},
data={"unique_id": "00:00:00:00:00:01"},
)
assert result["type"] == "abort"
assert result["reason"] == "no_devices"
|
import argparse
import logging
import sys
from paasta_tools.kubernetes_tools import delete_deployment
from paasta_tools.kubernetes_tools import ensure_namespace
from paasta_tools.kubernetes_tools import get_kubernetes_app_name
from paasta_tools.kubernetes_tools import KubeClient
from paasta_tools.utils import decompose_job_id
from paasta_tools.utils import InvalidJobNameError
from paasta_tools.utils import SPACER
log = logging.getLogger(__name__)
def parse_args() -> argparse.Namespace:
parser = argparse.ArgumentParser(description="Deletes list of deployments.")
parser.add_argument(
"service_instance_list",
nargs="+",
help="The list of service instances to delete",
metavar=f"SERVICE{SPACER}INSTANCE",
)
args = parser.parse_args()
return args
def get_deployment_names_from_list(service_instance_list):
app_names = []
for service_instance in service_instance_list:
try:
service, instance, _, __ = decompose_job_id(service_instance)
app_name = get_kubernetes_app_name(service, instance)
app_names.append(app_name)
except InvalidJobNameError:
log.error(
f"Invalid service instance specified. Format is service{SPACER}instance."
)
sys.exit(1)
return app_names
def main() -> None:
args = parse_args()
service_instance_list = args.service_instance_list
deployment_names = get_deployment_names_from_list(service_instance_list)
log.debug(f"Deleting deployments: {deployment_names}")
kube_client = KubeClient()
ensure_namespace(kube_client=kube_client, namespace="paasta")
for deployment_name in deployment_names:
try:
log.debug(f"Deleting {deployment_name}")
delete_deployment(kube_client=kube_client, deployment_name=deployment_name)
except Exception as err:
log.error(f"Unable to delete {deployment_name}: {err}")
sys.exit(1)
sys.exit(0)
if __name__ == "__main__":
main()
|
import time
from queue import Empty
from pytest import fixture
from pytest import raises
from zake.fake_client import FakeClient
from paasta_tools.deployd.common import ServiceInstance
from paasta_tools.deployd.queue import ZKDelayDeadlineQueue
def make_si(wait_until, bounce_by):
"""Just using mock.Mock(wait_until=wait_until, bounce_by=bounce_by) mostly works, but our PriorityQueues
occasionally will compare two ServiceInstances directly, and Mocks aren't comparable unless you define an __eq__."""
return ServiceInstance(
service="service",
instance="instance",
bounce_by=bounce_by,
wait_until=wait_until,
watcher="watcher",
failures=0,
processed_count=0,
bounce_start_time=1.0,
enqueue_time=2.0,
)
class TestZKDelayDeadlineQueue:
@fixture
def queue(self):
client = FakeClient()
client.start()
yield ZKDelayDeadlineQueue(client, "/")
@fixture
def multiple_queues(self):
client = FakeClient()
client.start()
yield [ZKDelayDeadlineQueue(client, "/") for _ in range(5)]
def test_put_then_get_single_threaded(self, queue):
si = make_si(wait_until=time.time() - 0.01, bounce_by=time.time())
queue.put(si)
# block=false or a really short timeout would fail here, as we have to wait for queue's watchers to be notified
# by ZK that something has changed.
with queue.get(timeout=1.0) as result:
assert result == si
# Non-blocking get should return results immediately if we force _update_local_state.
queue.put(si)
queue._update_local_state(None)
with queue.get(block=False) as result:
assert result == si
def test_put_then_get_different_instances(self, multiple_queues):
queue1 = multiple_queues[0]
queue2 = multiple_queues[1]
si = make_si(wait_until=time.time() - 0.01, bounce_by=time.time())
queue1.put(si)
# block=false or a really short timeout would fail here, as we have to wait for queue2's watchers to be notified
# by ZK that something has changed.
with queue2.get(timeout=1.0) as result:
assert result == si
# Non-blocking get should return results immediately if we force _update_local_state.
queue1.put(si)
queue2._update_local_state(None)
with queue2.get(block=False) as result:
assert result == si
def test_dont_block_indefinitely_when_wait_until_is_in_future(self, queue):
"""Regression test for a specific bug in the first implementation of DelayDeadlineQueue"""
# First, put an item with a distant wait_until
queue.put(make_si(wait_until=time.time() + 100, bounce_by=time.time() + 100))
# an immediate get should fail.
with raises(Empty):
with queue.get(block=False) as result:
print(f"Should have raised, got {result}")
# a get with a short timeout should fail.
with raises(Empty):
with queue.get(timeout=0.0001) as result:
print(f"Should have raised, got {result}")
wait_until = time.time() + 0.01
queue.put(make_si(wait_until=wait_until, bounce_by=wait_until))
# but if we wait a short while it should return.
with queue.get(
timeout=1.0
) as result: # This timeout is only there so that if this test fails it doesn't take forever.
pass
assert (
time.time() + 0.001 > wait_until
) # queue rounds to millisecond, so we might be slightly under.
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import errno
import os
import pdb
import sys
import traceback
from absl import command_name
from absl import flags
from absl import logging
try:
import faulthandler
except ImportError:
faulthandler = None
FLAGS = flags.FLAGS
flags.DEFINE_boolean('run_with_pdb', False, 'Set to true for PDB debug mode')
flags.DEFINE_boolean('pdb_post_mortem', False,
'Set to true to handle uncaught exceptions with PDB '
'post mortem.')
flags.DEFINE_alias('pdb', 'pdb_post_mortem')
flags.DEFINE_boolean('run_with_profiling', False,
'Set to true for profiling the script. '
'Execution will be slower, and the output format might '
'change over time.')
flags.DEFINE_string('profile_file', None,
'Dump profile information to a file (for python -m '
'pstats). Implies --run_with_profiling.')
flags.DEFINE_boolean('use_cprofile_for_profiling', True,
'Use cProfile instead of the profile module for '
'profiling. This has no effect unless '
'--run_with_profiling is set.')
flags.DEFINE_boolean('only_check_args', False,
'Set to true to validate args and exit.',
allow_hide_cpp=True)
# If main() exits via an abnormal exception, call into these
# handlers before exiting.
EXCEPTION_HANDLERS = []
class Error(Exception):
pass
class UsageError(Error):
"""Exception raised when the arguments supplied by the user are invalid.
Raise this when the arguments supplied are invalid from the point of
view of the application. For example when two mutually exclusive
flags have been supplied or when there are not enough non-flag
arguments. It is distinct from flags.Error which covers the lower
level of parsing and validating individual flags.
"""
def __init__(self, message, exitcode=1):
super(UsageError, self).__init__(message)
self.exitcode = exitcode
class HelpFlag(flags.BooleanFlag):
"""Special boolean flag that displays usage and raises SystemExit."""
NAME = 'help'
SHORT_NAME = '?'
def __init__(self):
super(HelpFlag, self).__init__(
self.NAME, False, 'show this help',
short_name=self.SHORT_NAME, allow_hide_cpp=True)
def parse(self, arg):
if self._parse(arg):
usage(shorthelp=True, writeto_stdout=True)
# Advertise --helpfull on stdout, since usage() was on stdout.
print()
print('Try --helpfull to get a list of all flags.')
sys.exit(1)
class HelpshortFlag(HelpFlag):
"""--helpshort is an alias for --help."""
NAME = 'helpshort'
SHORT_NAME = None
class HelpfullFlag(flags.BooleanFlag):
"""Display help for flags in the main module and all dependent modules."""
def __init__(self):
super(HelpfullFlag, self).__init__(
'helpfull', False, 'show full help', allow_hide_cpp=True)
def parse(self, arg):
if self._parse(arg):
usage(writeto_stdout=True)
sys.exit(1)
class HelpXMLFlag(flags.BooleanFlag):
"""Similar to HelpfullFlag, but generates output in XML format."""
def __init__(self):
super(HelpXMLFlag, self).__init__(
'helpxml', False, 'like --helpfull, but generates XML output',
allow_hide_cpp=True)
def parse(self, arg):
if self._parse(arg):
flags.FLAGS.write_help_in_xml_format(sys.stdout)
sys.exit(1)
def parse_flags_with_usage(args):
"""Tries to parse the flags, print usage, and exit if unparseable.
Args:
args: [str], a non-empty list of the command line arguments including
program name.
Returns:
[str], a non-empty list of remaining command line arguments after parsing
flags, including program name.
"""
try:
return FLAGS(args)
except flags.Error as error:
sys.stderr.write('FATAL Flags parsing error: %s\n' % error)
sys.stderr.write('Pass --helpshort or --helpfull to see help on flags.\n')
sys.exit(1)
_define_help_flags_called = False
def define_help_flags():
"""Registers help flags. Idempotent."""
# Use a global to ensure idempotence.
global _define_help_flags_called
if not _define_help_flags_called:
flags.DEFINE_flag(HelpFlag())
flags.DEFINE_flag(HelpshortFlag()) # alias for --help
flags.DEFINE_flag(HelpfullFlag())
flags.DEFINE_flag(HelpXMLFlag())
_define_help_flags_called = True
def _register_and_parse_flags_with_usage(
argv=None,
flags_parser=parse_flags_with_usage,
):
"""Registers help flags, parses arguments and shows usage if appropriate.
This also calls sys.exit(0) if flag --only_check_args is True.
Args:
argv: [str], a non-empty list of the command line arguments including
program name, sys.argv is used if None.
flags_parser: Callable[[List[Text]], Any], the function used to parse flags.
The return value of this function is passed to `main` untouched.
It must guarantee FLAGS is parsed after this function is called.
Returns:
The return value of `flags_parser`. When using the default `flags_parser`,
it returns the following:
[str], a non-empty list of remaining command line arguments after parsing
flags, including program name.
Raises:
Error: Raised when flags_parser is called, but FLAGS is not parsed.
SystemError: Raised when it's called more than once.
"""
if _register_and_parse_flags_with_usage.done:
raise SystemError('Flag registration can be done only once.')
define_help_flags()
original_argv = sys.argv if argv is None else argv
args_to_main = flags_parser(original_argv)
if not FLAGS.is_parsed():
raise Error('FLAGS must be parsed after flags_parser is called.')
# Exit when told so.
if FLAGS.only_check_args:
sys.exit(0)
# Immediately after flags are parsed, bump verbosity to INFO if the flag has
# not been set.
if FLAGS['verbosity'].using_default_value:
FLAGS.verbosity = 0
_register_and_parse_flags_with_usage.done = True
return args_to_main
_register_and_parse_flags_with_usage.done = False
def _run_main(main, argv):
"""Calls main, optionally with pdb or profiler."""
if FLAGS.run_with_pdb:
sys.exit(pdb.runcall(main, argv))
elif FLAGS.run_with_profiling or FLAGS.profile_file:
# Avoid import overhead since most apps (including performance-sensitive
# ones) won't be run with profiling.
import atexit
if FLAGS.use_cprofile_for_profiling:
import cProfile as profile
else:
import profile
profiler = profile.Profile()
if FLAGS.profile_file:
atexit.register(profiler.dump_stats, FLAGS.profile_file)
else:
atexit.register(profiler.print_stats)
retval = profiler.runcall(main, argv)
sys.exit(retval)
else:
sys.exit(main(argv))
def _call_exception_handlers(exception):
"""Calls any installed exception handlers."""
for handler in EXCEPTION_HANDLERS:
try:
if handler.wants(exception):
handler.handle(exception)
except: # pylint: disable=bare-except
try:
# We don't want to stop for exceptions in the exception handlers but
# we shouldn't hide them either.
logging.error(traceback.format_exc())
except: # pylint: disable=bare-except
# In case even the logging statement fails, ignore.
pass
def run(
main,
argv=None,
flags_parser=parse_flags_with_usage,
):
"""Begins executing the program.
Args:
main: The main function to execute. It takes an single argument "argv",
which is a list of command line arguments with parsed flags removed.
The return value is passed to `sys.exit`, and so for example
a return value of 0 or None results in a successful termination, whereas
a return value of 1 results in abnormal termination.
For more details, see https://docs.python.org/3/library/sys#sys.exit
argv: A non-empty list of the command line arguments including program name,
sys.argv is used if None.
flags_parser: Callable[[List[Text]], Any], the function used to parse flags.
The return value of this function is passed to `main` untouched.
It must guarantee FLAGS is parsed after this function is called.
- Parses command line flags with the flag module.
- If there are any errors, prints usage().
- Calls main() with the remaining arguments.
- If main() raises a UsageError, prints usage and the error message.
"""
try:
args = _run_init(
sys.argv if argv is None else argv,
flags_parser,
)
while _init_callbacks:
callback = _init_callbacks.popleft()
callback()
try:
_run_main(main, args)
except UsageError as error:
usage(shorthelp=True, detailed_error=error, exitcode=error.exitcode)
except:
exc = sys.exc_info()[1]
# Don't try to post-mortem debug successful SystemExits, since those
# mean there wasn't actually an error. In particular, the test framework
# raises SystemExit(False) even if all tests passed.
if isinstance(exc, SystemExit) and not exc.code:
raise
# Check the tty so that we don't hang waiting for input in an
# non-interactive scenario.
if FLAGS.pdb_post_mortem and sys.stdout.isatty():
traceback.print_exc()
print()
print(' *** Entering post-mortem debugging ***')
print()
pdb.post_mortem()
raise
except Exception as e:
_call_exception_handlers(e)
raise
# Callbacks which have been deferred until after _run_init has been called.
_init_callbacks = collections.deque()
def call_after_init(callback):
"""Calls the given callback only once ABSL has finished initialization.
If ABSL has already finished initialization when `call_after_init` is
called then the callback is executed immediately, otherwise `callback` is
stored to be executed after `app.run` has finished initializing (aka. just
before the main function is called).
If called after `app.run`, this is equivalent to calling `callback()` in the
caller thread. If called before `app.run`, callbacks are run sequentially (in
an undefined order) in the same thread as `app.run`.
Args:
callback: a callable to be called once ABSL has finished initialization.
This may be immediate if initialization has already finished. It
takes no arguments and returns nothing.
"""
if _run_init.done:
callback()
else:
_init_callbacks.append(callback)
def _run_init(
argv,
flags_parser,
):
"""Does one-time initialization and re-parses flags on rerun."""
if _run_init.done:
return flags_parser(argv)
command_name.make_process_name_useful()
# Set up absl logging handler.
logging.use_absl_handler()
args = _register_and_parse_flags_with_usage(
argv=argv,
flags_parser=flags_parser,
)
if faulthandler:
try:
faulthandler.enable()
except Exception: # pylint: disable=broad-except
# Some tests verify stderr output very closely, so don't print anything.
# Disabled faulthandler is a low-impact error.
pass
_run_init.done = True
return args
_run_init.done = False
def usage(shorthelp=False, writeto_stdout=False, detailed_error=None,
exitcode=None):
"""Writes __main__'s docstring to stderr with some help text.
Args:
shorthelp: bool, if True, prints only flags from the main module,
rather than all flags.
writeto_stdout: bool, if True, writes help message to stdout,
rather than to stderr.
detailed_error: str, additional detail about why usage info was presented.
exitcode: optional integer, if set, exits with this status code after
writing help.
"""
if writeto_stdout:
stdfile = sys.stdout
else:
stdfile = sys.stderr
doc = sys.modules['__main__'].__doc__
if not doc:
doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
doc = flags.text_wrap(doc, indent=' ', firstline_indent='')
else:
# Replace all '%s' with sys.argv[0], and all '%%' with '%'.
num_specifiers = doc.count('%') - 2 * doc.count('%%')
try:
doc %= (sys.argv[0],) * num_specifiers
except (OverflowError, TypeError, ValueError):
# Just display the docstring as-is.
pass
if shorthelp:
flag_str = FLAGS.main_module_help()
else:
flag_str = FLAGS.get_help()
try:
stdfile.write(doc)
if flag_str:
stdfile.write('\nflags:\n')
stdfile.write(flag_str)
stdfile.write('\n')
if detailed_error is not None:
stdfile.write('\n%s\n' % detailed_error)
except IOError as e:
# We avoid printing a huge backtrace if we get EPIPE, because
# "foo.par --help | less" is a frequent use case.
if e.errno != errno.EPIPE:
raise
if exitcode is not None:
sys.exit(exitcode)
class ExceptionHandler(object):
"""Base exception handler from which other may inherit."""
def wants(self, exc):
"""Returns whether this handler wants to handle the exception or not.
This base class returns True for all exceptions by default. Override in
subclass if it wants to be more selective.
Args:
exc: Exception, the current exception.
"""
del exc # Unused.
return True
def handle(self, exc):
"""Do something with the current exception.
Args:
exc: Exception, the current exception
This method must be overridden.
"""
raise NotImplementedError()
def install_exception_handler(handler):
"""Installs an exception handler.
Args:
handler: ExceptionHandler, the exception handler to install.
Raises:
TypeError: Raised when the handler was not of the correct type.
All installed exception handlers will be called if main() exits via
an abnormal exception, i.e. not one of SystemExit, KeyboardInterrupt,
FlagsError or UsageError.
"""
if not isinstance(handler, ExceptionHandler):
raise TypeError('handler of type %s does not inherit from ExceptionHandler'
% type(handler))
EXCEPTION_HANDLERS.append(handler)
|
from collections import defaultdict
from django.utils.translation import gettext_lazy as _
from weblate.addons.base import BaseAddon
from weblate.addons.events import EVENT_POST_COMMIT
from weblate.addons.forms import GitSquashForm
from weblate.utils.errors import report_error
from weblate.vcs.base import RepositoryException
class GitSquashAddon(BaseAddon):
name = "weblate.git.squash"
verbose = _("Squash Git commits")
description = _("Squash Git commits prior to pushing changes.")
settings_form = GitSquashForm
compat = {
"vcs": {
"git",
"gerrit",
"subversion",
"github",
"pagure",
"gitlab",
"git-force-push",
}
}
events = (EVENT_POST_COMMIT,)
icon = "compress.svg"
repo_scope = True
def squash_all(self, component, repository, base=None, author=None):
remote = base if base else repository.get_remote_branch_name()
message = self.get_squash_commit_message(repository, "%B", remote)
repository.execute(["reset", "--mixed", remote])
# Can happen for added and removed translation
if repository.needs_commit():
repository.commit(message, author)
def get_filenames(self, component):
languages = defaultdict(list)
for origin in [component] + list(component.linked_childs):
for translation in origin.translation_set.prefetch_related("language"):
code = translation.language.code
if not translation.filename:
continue
languages[code].extend(translation.filenames)
return languages
def get_squash_commit_message(self, repository, log_format, remote, filenames=None):
commit_message = self.instance.configuration.get("commit_message")
if not commit_message:
command = [
"log",
f"--format={log_format}",
f"{remote}..HEAD",
]
if filenames:
command += ["--"] + filenames
commit_message = repository.execute(command)
if self.instance.configuration.get("append_trailers", True):
command = [
"log",
"--format=%(trailers)%nCo-authored-by: %an <%ae>",
f"{remote}..HEAD",
]
if filenames:
command += ["--"] + filenames
trailer_lines = {
trailer
for trailer in repository.execute(command).split("\n")
if trailer.strip()
}
commit_message_lines_with_trailers_removed = [
line for line in commit_message.split("\n") if line not in trailer_lines
]
commit_message = "\n\n".join(
[
"\n".join(commit_message_lines_with_trailers_removed),
"\n".join(sorted(trailer_lines)),
]
).strip("\n")
return commit_message
def squash_language(self, component, repository):
remote = repository.get_remote_branch_name()
languages = self.get_filenames(component)
messages = {}
for code, filenames in languages.items():
if not filenames:
continue
messages[code] = self.get_squash_commit_message(
repository, "%B", remote, filenames
)
repository.execute(["reset", "--mixed", remote])
for code, message in messages.items():
if not message:
continue
repository.commit(message, files=languages[code])
def squash_file(self, component, repository):
remote = repository.get_remote_branch_name()
languages = self.get_filenames(component)
messages = {}
for filenames in languages.values():
for filename in filenames:
messages[filename] = self.get_squash_commit_message(
repository, "%B", remote, [filename]
)
repository.execute(["reset", "--mixed", remote])
for filename, message in messages.items():
if not message:
continue
repository.commit(message, files=[filename])
def squash_author(self, component, repository):
remote = repository.get_remote_branch_name()
# Get list of pending commits with authors
commits = [
x.split(None, 1)
for x in reversed(
repository.execute(
["log", "--format=%H %aE", f"{remote}..HEAD"]
).splitlines()
)
]
gpg_sign = repository.get_gpg_sign_args()
tmp = "weblate-squash-tmp"
repository.delete_branch(tmp)
try:
# Create local branch for upstream
repository.execute(["branch", tmp, remote])
# Checkout upstream branch
repository.execute(["checkout", tmp])
while commits:
commit, author = commits.pop(0)
# Remember current revision for final squash
base = repository.get_last_revision()
# Cherry pick current commit (this should work
# unless something is messed up)
repository.execute(["cherry-pick", commit] + gpg_sign)
handled = []
# Pick other commits by same author
for i, other in enumerate(commits):
if other[1] != author:
continue
try:
repository.execute(["cherry-pick", other[0]] + gpg_sign)
handled.append(i)
except RepositoryException:
# If fails, continue to another author, we will
# pick this commit later (it depends on some other)
repository.execute(["cherry-pick", "--abort"])
break
# Remove processed commits from list
for i in reversed(handled):
del commits[i]
# Squash all current commits from one author
self.squash_all(component, repository, base, author)
# Update working copy with squashed commits
repository.execute(["checkout", repository.branch])
repository.execute(["reset", "--hard", tmp])
repository.delete_branch(tmp)
except RepositoryException:
report_error(cause="Failed squash")
# Revert to original branch without any changes
repository.execute(["reset", "--hard"])
repository.execute(["checkout", repository.branch])
repository.delete_branch(tmp)
def post_commit(self, component):
repository = component.repository
with repository.lock:
# Ensure repository is rebased on current remote prior to squash, otherwise
# we might be squashing upstream changes as well due to reset.
if component.repo_needs_merge() and not component.update_branch(
method="rebase", skip_push=True
):
return
if not repository.needs_push():
return
method = getattr(
self, "squash_{}".format(self.instance.configuration["squash"])
)
method(component, repository)
# Commit any left files, those were most likely generated
# by addon and do not exactly match patterns above
component.commit_files(
template=component.addon_message,
extra_context={"addon_name": self.verbose},
signals=False,
skip_push=True,
)
|
import os
import time
from multiprocessing import Process
from random import random
import pytest
from arctic import Arctic, VERSION_STORE
from arctic.exceptions import LibraryNotFoundException
from arctic.hooks import register_get_auth_hook
from arctic.store.version_store import VersionStore
MY_ARCTIC = None # module-level Arctic singleton
AUTH_COUNT = 0
def f(library_name, total_writes, do_reset):
my_pid = os.getpid()
data = [str(my_pid)] * 100
if do_reset:
global AUTH_COUNT
AUTH_COUNT = 0
MY_ARCTIC.reset()
assert AUTH_COUNT > 0
while True:
try:
vstore = MY_ARCTIC[library_name] # wait for parent to initialize
break
except LibraryNotFoundException:
pass
time.sleep(random() * 0.2)
for i in range(total_writes):
if i % 20 == 0: # add some randomisation, make sure that processes are multiplexed across time
time.sleep(random())
key = "{}_{}".format(my_pid, i)
vstore.write(key, data + [key])
for i in range(total_writes):
key = "{}_{}".format(my_pid, i)
assert vstore.read(key).data == data + [key]
def my_auth_hook(host, app_name, database_name):
global AUTH_COUNT
AUTH_COUNT += 1
@pytest.mark.timeout(600)
def test_multiprocessing_safety(mongo_host, library_name):
# Create/initialize library at the parent process, then spawn children, and start them aligned in time
total_processes = 64
total_writes_per_child = 100
register_get_auth_hook(my_auth_hook)
global MY_ARCTIC
MY_ARCTIC = Arctic(mongo_host=mongo_host)
MY_ARCTIC.initialize_library(library_name, VERSION_STORE)
assert isinstance(MY_ARCTIC.get_library(library_name), VersionStore)
processes = [Process(target=f, args=(library_name, total_writes_per_child, True)) for _ in range(total_processes)]
for p in processes:
p.start()
for p in processes:
p.join()
for p in processes:
assert p.exitcode == 0
assert isinstance(MY_ARCTIC.get_library(library_name), VersionStore)
@pytest.mark.timeout(600)
def test_multiprocessing_safety_parent_children_race(mongo_host, library_name):
# Create Arctic and directly fork/start children (no wait)
total_iterations = 12
total_processes = 6
total_writes_per_child = 20
global MY_ARCTIC
for i in range(total_iterations):
processes = list()
MY_ARCTIC = Arctic(mongo_host=mongo_host)
for j in range(total_processes):
p = Process(target=f, args=(library_name, total_writes_per_child, False))
p.start() # start directly, don't wait to create first all children procs
processes.append(p)
MY_ARCTIC.initialize_library(library_name, VERSION_STORE) # this will unblock spinning children
for p in processes:
p.join()
for p in processes:
assert p.exitcode == 0
MY_ARCTIC.reset()
assert isinstance(MY_ARCTIC.get_library(library_name), VersionStore)
|
import tests
from vcr.stubs import VCRHTTPSConnection
from vcr import config
from pyVmomi import SoapAdapter
from pyVmomi import SoapStubAdapter
from pyVmomi import vim
from pyVmomi.VmomiSupport import GetRequestContext
class SerializerTests(tests.VCRTestBase):
def test_serialize_object(self):
val = vim.vm.device.VirtualDeviceSpec.FileOperation()
# This line should not raise an exception, especially on Python 3.
SoapAdapter.Serialize(val)
def test_serialize_integer(self):
lp = vim.LongPolicy()
lp.inherited = False
lp.value = 100
SoapAdapter.Serialize(lp, version='vim.version.version10')
def test_serialize_float(self):
pc = vim.host.VsanInternalSystem.PolicyCost()
pc.diskSpaceToAddressSpaceRatio = 1.0
SoapAdapter.Serialize(pc, version='vim.version.version10')
def test_serialize_unicode(self):
self.assertEqual(SoapAdapter.SerializeToUnicode('Ḃ'),
u'<object xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns="urn:vim25" xsi:type="xsd:string">\u1e02</object>')
self.assertEqual(SoapAdapter.SerializeToUnicode(u'Ḃ'),
u'<object xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns="urn:vim25" xsi:type="xsd:string">\u1e02</object>')
self.assertEqual(SoapAdapter.SerializeToUnicode(u'\u1e02'),
u'<object xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:xsd="http://www.w3.org/2001/XMLSchema" xmlns="urn:vim25" xsi:type="xsd:string">\u1e02</object>')
def _base_serialize_test(self, soap_creator, request_matcher):
my_vcr = config.VCR(
custom_patches=(
(SoapAdapter, '_HTTPSConnection', VCRHTTPSConnection),))
my_vcr.register_matcher('request_matcher', request_matcher)
with my_vcr.use_cassette(
'test_simple_request_serializer.yaml',
cassette_library_dir=tests.fixtures_path,
record_mode='none',
match_on=['request_matcher']) as cass:
stub = soap_creator()
si = vim.ServiceInstance("ServiceInstance", stub)
content = si.RetrieveContent()
self.assertTrue(content is not None)
self.assertTrue(
'<_this type="ServiceInstance">ServiceInstance</_this>'
in cass.requests[0].body.decode("utf-8"))
def _body_request_matcher(self, r1, r2):
soap_msg = ('<soapenv:Body>'
'<RetrieveServiceContent xmlns="urn:vim25">'
'<_this type="ServiceInstance">'
'ServiceInstance'
'</_this>'
'</RetrieveServiceContent>'
'</soapenv:Body>')
if soap_msg in r1.body.decode("utf-8"):
return True
raise SystemError('serialization error occurred')
def _request_context_request_matcher(self, r1, r2):
request_context = ('<soapenv:Header><vcSessionCookie>123456789</vcSessionCookie></soapenv:Header>')
if request_context in r1.body.decode("utf-8"):
return True
raise SystemError('serialization error occurred')
def test_simple_request_serializer(self):
def soap_creator():
return SoapStubAdapter('vcsa', 443)
self._base_serialize_test(soap_creator, self._body_request_matcher)
def test_request_context_serializer_instance(self):
def request_matcher(r1, r2):
return self._request_context_request_matcher(r1, r2) and self._body_request_matcher(r1, r2)
def soap_creator():
return SoapStubAdapter('vcsa', 443, requestContext={'vcSessionCookie': '123456789'})
self._base_serialize_test(soap_creator, request_matcher)
def test_request_context_serializer_global(self):
def request_matcher(r1, r2):
return self._request_context_request_matcher(r1, r2) and self._body_request_matcher(r1, r2)
def soap_creator():
return SoapStubAdapter('vcsa', 443)
GetRequestContext()['vcSessionCookie'] = '123456789'
try:
self._base_serialize_test(soap_creator, request_matcher)
finally:
GetRequestContext().pop("vcSessionCookie")
|
import numpy as np
from matchzoo.engine import base_metric
from . import Precision
class AveragePrecision(base_metric.BaseMetric):
"""Average precision metric."""
ALIAS = ['average_precision', 'ap']
def __init__(self, threshold: float = 0.):
"""
:class:`AveragePrecision` constructor.
:param threshold: The label threshold of relevance degree.
"""
self._threshold = threshold
def __repr__(self) -> str:
""":return: Formated string representation of the metric."""
return f"{self.ALIAS[0]}({self._threshold})"
def __call__(self, y_true: np.array, y_pred: np.array) -> float:
"""
Calculate average precision (area under PR curve).
Example:
>>> y_true = [0, 1]
>>> y_pred = [0.1, 0.6]
>>> round(AveragePrecision()(y_true, y_pred), 2)
0.75
>>> round(AveragePrecision()([], []), 2)
0.0
:param y_true: The ground true label of each document.
:param y_pred: The predicted scores of each document.
:return: Average precision.
"""
precision_metrics = [Precision(k + 1) for k in range(len(y_pred))]
out = [metric(y_true, y_pred) for metric in precision_metrics]
if not out:
return 0.
return np.asscalar(np.mean(out))
|
import asyncio
from functools import partial
import logging
from typing import Any, Dict, Optional
from async_timeout import timeout
from dsmr_parser import obis_references as obis_ref
from dsmr_parser.clients.protocol import create_dsmr_reader, create_tcp_dsmr_reader
import serial
from homeassistant import config_entries, core, exceptions
from homeassistant.const import CONF_HOST, CONF_PORT
from .const import ( # pylint:disable=unused-import
CONF_DSMR_VERSION,
CONF_SERIAL_ID,
CONF_SERIAL_ID_GAS,
DOMAIN,
)
_LOGGER = logging.getLogger(__name__)
class DSMRConnection:
"""Test the connection to DSMR and receive telegram to read serial ids."""
def __init__(self, host, port, dsmr_version):
"""Initialize."""
self._host = host
self._port = port
self._dsmr_version = dsmr_version
self._telegram = {}
def equipment_identifier(self):
"""Equipment identifier."""
if obis_ref.EQUIPMENT_IDENTIFIER in self._telegram:
dsmr_object = self._telegram[obis_ref.EQUIPMENT_IDENTIFIER]
return getattr(dsmr_object, "value", None)
def equipment_identifier_gas(self):
"""Equipment identifier gas."""
if obis_ref.EQUIPMENT_IDENTIFIER_GAS in self._telegram:
dsmr_object = self._telegram[obis_ref.EQUIPMENT_IDENTIFIER_GAS]
return getattr(dsmr_object, "value", None)
async def validate_connect(self, hass: core.HomeAssistant) -> bool:
"""Test if we can validate connection with the device."""
def update_telegram(telegram):
if obis_ref.EQUIPMENT_IDENTIFIER in telegram:
self._telegram = telegram
transport.close()
if self._host is None:
reader_factory = partial(
create_dsmr_reader,
self._port,
self._dsmr_version,
update_telegram,
loop=hass.loop,
)
else:
reader_factory = partial(
create_tcp_dsmr_reader,
self._host,
self._port,
self._dsmr_version,
update_telegram,
loop=hass.loop,
)
try:
transport, protocol = await asyncio.create_task(reader_factory())
except (serial.serialutil.SerialException, OSError):
_LOGGER.exception("Error connecting to DSMR")
return False
if transport:
try:
async with timeout(30):
await protocol.wait_closed()
except asyncio.TimeoutError:
# Timeout (no data received), close transport and return True (if telegram is empty, will result in CannotCommunicate error)
transport.close()
await protocol.wait_closed()
return True
async def _validate_dsmr_connection(hass: core.HomeAssistant, data):
"""Validate the user input allows us to connect."""
conn = DSMRConnection(data.get(CONF_HOST), data[CONF_PORT], data[CONF_DSMR_VERSION])
if not await conn.validate_connect(hass):
raise CannotConnect
equipment_identifier = conn.equipment_identifier()
equipment_identifier_gas = conn.equipment_identifier_gas()
# Check only for equipment identifier in case no gas meter is connected
if equipment_identifier is None:
raise CannotCommunicate
info = {
CONF_SERIAL_ID: equipment_identifier,
CONF_SERIAL_ID_GAS: equipment_identifier_gas,
}
return info
class DSMRFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow for DSMR."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_PUSH
def _abort_if_host_port_configured(
self,
port: str,
host: str = None,
updates: Optional[Dict[Any, Any]] = None,
reload_on_update: bool = True,
):
"""Test if host and port are already configured."""
for entry in self._async_current_entries():
if entry.data.get(CONF_HOST) == host and entry.data[CONF_PORT] == port:
if updates is not None:
changed = self.hass.config_entries.async_update_entry(
entry, data={**entry.data, **updates}
)
if (
changed
and reload_on_update
and entry.state
in (
config_entries.ENTRY_STATE_LOADED,
config_entries.ENTRY_STATE_SETUP_RETRY,
)
):
self.hass.async_create_task(
self.hass.config_entries.async_reload(entry.entry_id)
)
return self.async_abort(reason="already_configured")
return None
async def async_step_import(self, import_config=None):
"""Handle the initial step."""
host = import_config.get(CONF_HOST)
port = import_config[CONF_PORT]
status = self._abort_if_host_port_configured(port, host, import_config)
if status is not None:
return status
try:
info = await _validate_dsmr_connection(self.hass, import_config)
except CannotConnect:
return self.async_abort(reason="cannot_connect")
except CannotCommunicate:
return self.async_abort(reason="cannot_communicate")
if host is not None:
name = f"{host}:{port}"
else:
name = port
data = {**import_config, **info}
await self.async_set_unique_id(info[CONF_SERIAL_ID])
self._abort_if_unique_id_configured(data)
return self.async_create_entry(title=name, data=data)
class CannotConnect(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
class CannotCommunicate(exceptions.HomeAssistantError):
"""Error to indicate we cannot connect."""
|
from functools import partial
from ...utils import verbose
from ..utils import (has_dataset, _data_path, _data_path_doc,
_get_version, _version_doc)
data_name = "fieldtrip_cmc"
conf_name = "MNE_DATASETS_FIELDTRIP_CMC_PATH"
has_mtrf_data = partial(has_dataset, name=data_name)
@verbose
def data_path(path=None, force_update=False, update_path=True, download=True,
verbose=None): # noqa: D103
return _data_path(path=path, force_update=force_update,
update_path=update_path, name=data_name,
download=download)
data_path.__doc__ = _data_path_doc.format(name=data_name,
conf=conf_name)
def get_version(): # noqa: D103
return _get_version(data_name)
get_version.__doc__ = _version_doc.format(name=data_name)
|
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.db.models import Count
from django.http import JsonResponse
from django.shortcuts import render
from django.urls import reverse
from django.utils.decorators import method_decorator
from django.utils.translation import gettext as _
from django.views.generic.base import TemplateView
from weblate.memory.forms import DeleteForm, UploadForm
from weblate.memory.models import Memory, MemoryImportError
from weblate.utils import messages
from weblate.utils.views import ErrorFormView, get_project
from weblate.wladmin.views import MENU
CD_TEMPLATE = 'attachment; filename="weblate-memory.{}"'
def get_objects(request, kwargs):
if "project" in kwargs:
return {"project": get_project(request, kwargs["project"])}
if "manage" in kwargs:
return {"from_file": True}
return {"user": request.user}
def check_perm(user, permission, objects):
if "project" in objects:
return user.has_perm(permission, objects["project"])
if "user" in objects:
# User can edit own translation memory
return True
if "from_file" in objects:
return user.has_perm("memory.edit")
return False
@method_decorator(login_required, name="dispatch")
class MemoryFormView(ErrorFormView):
def get_success_url(self):
if "manage" in self.kwargs:
return reverse("manage-memory")
return reverse("memory", kwargs=self.kwargs)
def dispatch(self, request, *args, **kwargs):
self.objects = get_objects(request, kwargs)
return super().dispatch(request, *args, **kwargs)
class DeleteView(MemoryFormView):
form_class = DeleteForm
def form_valid(self, form):
if not check_perm(self.request.user, "memory.delete", self.objects):
raise PermissionDenied()
entries = Memory.objects.filter_type(**self.objects)
if "origin" in self.request.POST:
entries = entries.filter(origin=self.request.POST["origin"])
entries.delete()
messages.success(self.request, _("Entries deleted."))
return super().form_valid(form)
class UploadView(MemoryFormView):
form_class = UploadForm
def form_valid(self, form):
if not check_perm(self.request.user, "memory.edit", self.objects):
raise PermissionDenied()
try:
Memory.objects.import_file(
self.request, form.cleaned_data["file"], **self.objects
)
messages.success(
self.request, _("File processed, the entries will appear shortly.")
)
except MemoryImportError as error:
messages.error(self.request, str(error)) # noqa: G200
return super().form_valid(form)
@method_decorator(login_required, name="dispatch")
class MemoryView(TemplateView):
template_name = "memory/index.html"
def dispatch(self, request, *args, **kwargs):
self.objects = get_objects(request, kwargs)
return super().dispatch(request, *args, **kwargs)
def get_url(self, name):
if "manage" in self.kwargs:
return reverse(f"manage-{name}")
return reverse(name, kwargs=self.kwargs)
def get_context_data(self, **kwargs):
context = super().get_context_data(**kwargs)
context.update(self.objects)
entries = Memory.objects.filter_type(**self.objects)
context["num_entries"] = entries.count()
context["entries_origin"] = (
entries.values("origin").order_by("origin").annotate(Count("id"))
)
context["total_entries"] = Memory.objects.all().count()
context["upload_url"] = self.get_url("memory-upload")
context["download_url"] = self.get_url("memory-download")
user = self.request.user
if check_perm(user, "memory.delete", self.objects):
context["delete_url"] = self.get_url("memory-delete")
if check_perm(user, "memory.edit", self.objects):
context["upload_form"] = UploadForm()
if "from_file" in self.objects:
context["menu_items"] = MENU
context["menu_page"] = "memory"
if "from_file" in self.objects or (
"project" in self.objects and self.objects["project"].use_shared_tm
):
context["shared_entries"] = Memory.objects.filter(shared=True).count()
return context
class DownloadView(MemoryView):
def get(self, request, *args, **kwargs):
fmt = request.GET.get("format", "json")
data = Memory.objects.filter_type(**self.objects).prefetch_lang()
if "origin" in request.GET:
data = data.filter(origin=request.GET["origin"])
if "from_file" in self.objects and "kind" in request.GET:
if request.GET["kind"] == "shared":
data = Memory.objects.filter_type(use_shared=True).prefetch_lang()
elif request.GET["kind"] == "all":
data = Memory.objects.prefetch_lang()
if fmt == "tmx":
response = render(
request,
"memory/dump.tmx",
{"data": data},
content_type="application/x-tmx",
)
else:
fmt = "json"
response = JsonResponse([item.as_dict() for item in data], safe=False)
response["Content-Disposition"] = CD_TEMPLATE.format(fmt)
return response
|
from scrapy_redis import picklecompat
def test_picklecompat():
obj = {'_encoding': 'utf-8',
'body': '',
'callback': '_response_downloaded',
'cookies': {},
'dont_filter': False,
'errback': None,
'headers': {'Referer': ['http://www.dmoz.org/']},
'meta': {'depth': 1, 'link_text': u'Fran\xe7ais', 'rule': 0},
'method': 'GET',
'priority': 0,
'url': u'http://www.dmoz.org/World/Fran%C3%A7ais/',
}
assert obj == picklecompat.loads(picklecompat.dumps(obj))
|
from lark import Lark, Transformer, v_args
from lark.indenter import Indenter
from bytecode import Instr, Bytecode
class PythonIndenter(Indenter):
NL_type = '_NEWLINE'
OPEN_PAREN_types = ['LPAR', 'LSQB', 'LBRACE']
CLOSE_PAREN_types = ['RPAR', 'RSQB', 'RBRACE']
INDENT_type = '_INDENT'
DEDENT_type = '_DEDENT'
tab_len = 8
@v_args(inline=True)
class Compile(Transformer):
def number(self, n):
return [Instr('LOAD_CONST', int(n))]
def string(self, s):
return [Instr('LOAD_CONST', s[1:-1])]
def var(self, n):
return [Instr('LOAD_NAME', n)]
def arith_expr(self, a, op, b):
# TODO support chain arithmetic
assert op == '+'
return a + b + [Instr('BINARY_ADD')]
def arguments(self, args):
return args
def funccall(self, name, args):
return name + args + [Instr('CALL_FUNCTION', 1)]
@v_args(inline=False)
def file_input(self, stmts):
return sum(stmts, []) + [Instr("RETURN_VALUE")]
def expr_stmt(self, lval, rval):
# TODO more complicated than that
name ,= lval
assert name.name == 'LOAD_NAME' # XXX avoid with another layer of abstraction
return rval + [Instr("STORE_NAME", name.arg)]
def __default__(self, *args):
assert False, args
python_parser3 = Lark.open('python3.lark', rel_to=__file__, start='file_input',
parser='lalr', postlex=PythonIndenter(),
transformer=Compile(), propagate_positions=False)
def compile_python(s):
insts = python_parser3.parse(s+"\n")
return Bytecode(insts).to_code()
code = compile_python("""
a = 3
b = 5
print("Hello World!")
print(a+(b+2))
print((a+b)+2)
""")
exec(code)
# -- Output --
# Hello World!
# 10
# 10
|
import functools
import unittest
from absl import flags
import mock
from perfkitbenchmarker import benchmark_spec
from perfkitbenchmarker import configs
from perfkitbenchmarker import context
from perfkitbenchmarker import pkb
from perfkitbenchmarker import providers
from perfkitbenchmarker import timing_util
from perfkitbenchmarker.configs import benchmark_config_spec
from perfkitbenchmarker.linux_benchmarks import ping_benchmark
from tests import pkb_common_test_case
FLAGS = flags.FLAGS
NAME = 'ping'
UID = 'name0'
class TestBackgroundWorkloadFramework(pkb_common_test_case.PkbCommonTestCase):
def setUp(self):
self.last_call = 0
super(TestBackgroundWorkloadFramework, self).setUp()
FLAGS.cloud = providers.GCP
FLAGS.temp_dir = 'tmp'
self.addCleanup(context.SetThreadBenchmarkSpec, None)
def _CheckAndIncrement(self, throwaway=None, expected_last_call=None):
self.assertEqual(self.last_call, expected_last_call)
self.last_call += 1
def testBackgroundWorkloadSpec(self):
"""Check that benchmark spec calls the prepare, stop, and start on vm."""
config = configs.LoadConfig(ping_benchmark.BENCHMARK_CONFIG, {}, NAME)
config_spec = benchmark_config_spec.BenchmarkConfigSpec(
NAME, flag_values=FLAGS, **config)
spec = benchmark_spec.BenchmarkSpec(ping_benchmark, config_spec, UID)
vm0 = mock.MagicMock()
vm1 = mock.MagicMock()
vm0.IsInterruptible = mock.MagicMock(return_value=False)
vm1.IsInterruptible = mock.MagicMock(return_value=False)
spec.ConstructVirtualMachines()
spec.vms = [vm0, vm1]
timer = timing_util.IntervalTimer()
pkb.DoPreparePhase(spec, timer)
for vm in spec.vms:
self.assertEqual(vm.PrepareBackgroundWorkload.call_count, 1)
with mock.patch(ping_benchmark.__name__ + '.Run'):
vm0.StopBackgroundWorkload.side_effect = functools.partial(
self._CheckAndIncrement, expected_last_call=0)
pkb.DoCleanupPhase(spec, timer)
for vm in spec.vms:
self.assertEqual(vm.StartBackgroundWorkload.call_count, 1)
self.assertEqual(vm.StopBackgroundWorkload.call_count, 1)
self.assertEqual(vm.PrepareBackgroundWorkload.call_count, 1)
if __name__ == '__main__':
unittest.main()
|
import unittest
import numpy as np
import nnabla as nn
import nnabla.functions as F
from nnabla.ext_utils import get_extension_context
from common import gpu_test
class TestNNabla(unittest.TestCase):
def test_addition(self):
# entry variables
a = nn.Variable.from_numpy_array(np.random.random())
b = nn.Variable.from_numpy_array(np.random.random())
# add operation
c = a + b
# forward
c.forward()
self.assertAlmostEqual(c.d, a.d + b.d)
@gpu_test
def test_cuda_ext(self):
ctx = get_extension_context('cudnn', device_id='0')
nn.set_default_context(ctx)
|
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import unittest
from compare_gan.metrics import prd_score as prd
import numpy as np
class PRDTest(unittest.TestCase):
def test_compute_prd_no_overlap(self):
eval_dist = [0, 1]
ref_dist = [1, 0]
result = np.ravel(prd.compute_prd(eval_dist, ref_dist))
np.testing.assert_almost_equal(result, 0)
def test_compute_prd_perfect_overlap(self):
eval_dist = [1, 0]
ref_dist = [1, 0]
result = prd.compute_prd(eval_dist, ref_dist, num_angles=11)
np.testing.assert_almost_equal([result[0][5], result[1][5]], [1, 1])
def test_compute_prd_low_precision_high_recall(self):
eval_dist = [0.5, 0.5]
ref_dist = [1, 0]
result = prd.compute_prd(eval_dist, ref_dist, num_angles=11)
np.testing.assert_almost_equal(result[0][5], 0.5)
np.testing.assert_almost_equal(result[1][5], 0.5)
np.testing.assert_almost_equal(result[0][10], 0.5)
np.testing.assert_almost_equal(result[1][1], 1)
def test_compute_prd_high_precision_low_recall(self):
eval_dist = [1, 0]
ref_dist = [0.5, 0.5]
result = prd.compute_prd(eval_dist, ref_dist, num_angles=11)
np.testing.assert_almost_equal([result[0][5], result[1][5]], [0.5, 0.5])
np.testing.assert_almost_equal(result[1][1], 0.5)
np.testing.assert_almost_equal(result[0][10], 1)
def test_compute_prd_bad_epsilon(self):
with self.assertRaises(ValueError):
prd.compute_prd([1], [1], epsilon=0)
with self.assertRaises(ValueError):
prd.compute_prd([1], [1], epsilon=1)
with self.assertRaises(ValueError):
prd.compute_prd([1], [1], epsilon=-1)
def test_compute_prd_bad_num_angles(self):
with self.assertRaises(ValueError):
prd.compute_prd([1], [1], num_angles=0)
with self.assertRaises(ValueError):
prd.compute_prd([1], [1], num_angles=1)
with self.assertRaises(ValueError):
prd.compute_prd([1], [1], num_angles=-1)
with self.assertRaises(ValueError):
prd.compute_prd([1], [1], num_angles=1e6+1)
with self.assertRaises(ValueError):
prd.compute_prd([1], [1], num_angles=2.5)
def test__cluster_into_bins(self):
eval_data = np.zeros([5, 4])
ref_data = np.ones([5, 4])
result = prd._cluster_into_bins(eval_data, ref_data, 3)
self.assertEqual(len(result), 2)
self.assertEqual(len(result[0]), 3)
self.assertEqual(len(result[1]), 3)
np.testing.assert_almost_equal(sum(result[0]), 1)
np.testing.assert_almost_equal(sum(result[1]), 1)
def test_compute_prd_from_embedding_mismatch_num_samples_should_fail(self):
# Mismatch in number of samples with enforce_balance set to True
with self.assertRaises(ValueError):
prd.compute_prd_from_embedding(
np.array([[0], [0], [1]]), np.array([[0], [1]]), num_clusters=2,
enforce_balance=True)
def test_compute_prd_from_embedding_mismatch_num_samples_should_work(self):
# Mismatch in number of samples with enforce_balance set to False
try:
prd.compute_prd_from_embedding(
np.array([[0], [0], [1]]), np.array([[0], [1]]), num_clusters=2,
enforce_balance=False)
except ValueError:
self.fail(
'compute_prd_from_embedding should not raise a ValueError when '
'enforce_balance is set to False.')
def test__prd_to_f_beta_correct_computation(self):
precision = np.array([1, 1, 0, 0, 0.5, 1, 0.5])
recall = np.array([1, 0, 1, 0, 0.5, 0.5, 1])
expected = np.array([1, 0, 0, 0, 0.5, 2/3, 2/3])
with np.errstate(invalid='ignore'):
result = prd._prd_to_f_beta(precision, recall, beta=1)
np.testing.assert_almost_equal(result, expected)
expected = np.array([1, 0, 0, 0, 0.5, 5/9, 5/6])
with np.errstate(invalid='ignore'):
result = prd._prd_to_f_beta(precision, recall, beta=2)
np.testing.assert_almost_equal(result, expected)
expected = np.array([1, 0, 0, 0, 0.5, 5/6, 5/9])
with np.errstate(invalid='ignore'):
result = prd._prd_to_f_beta(precision, recall, beta=1/2)
np.testing.assert_almost_equal(result, expected)
result = prd._prd_to_f_beta(np.array([]), np.array([]), beta=1)
expected = np.array([])
np.testing.assert_almost_equal(result, expected)
def test__prd_to_f_beta_bad_beta(self):
with self.assertRaises(ValueError):
prd._prd_to_f_beta(np.ones(1), np.ones(1), beta=0)
with self.assertRaises(ValueError):
prd._prd_to_f_beta(np.ones(1), np.ones(1), beta=-3)
def test__prd_to_f_beta_bad_precision_or_recall(self):
with self.assertRaises(ValueError):
prd._prd_to_f_beta(-np.ones(1), np.ones(1), beta=1)
with self.assertRaises(ValueError):
prd._prd_to_f_beta(np.ones(1), -np.ones(1), beta=1)
def test_plot_not_enough_labels(self):
with self.assertRaises(ValueError):
prd.plot(np.zeros([3, 2, 5]), labels=['1', '2'])
def test_plot_too_many_labels(self):
with self.assertRaises(ValueError):
prd.plot(np.zeros([1, 2, 5]), labels=['1', '2', '3'])
if __name__ == '__main__':
unittest.main()
|
import calendar
import datetime
import sys
from datetime import timedelta
from ._daterange import DateRange
from ._generalslice import OPEN_OPEN, CLOSED_CLOSED, OPEN_CLOSED, CLOSED_OPEN
from ._mktz import mktz
from ._parse import parse
if sys.version_info > (3,):
long = int
# Support standard brackets syntax for open/closed ranges.
Ranges = {'()': OPEN_OPEN,
'(]': OPEN_CLOSED,
'[)': CLOSED_OPEN,
'[]': CLOSED_CLOSED}
def string_to_daterange(str_range, delimiter='-', as_dates=False, interval=CLOSED_CLOSED):
"""
Convert a string to a DateRange type. If you put only one date, it generates the
relevant range for just that date or datetime till 24 hours later. You can optionally
use mixtures of []/() around the DateRange for OPEN/CLOSED interval behaviour.
Parameters
----------
str_range : `String`
The range as a string of dates separated by one delimiter.
delimiter : `String`
The separator between the dates, using '-' as default.
as_dates : `Boolean`
True if you want the date-range to use datetime.date rather than datetime.datetime.
interval : `int`
CLOSED_CLOSED, OPEN_CLOSED, CLOSED_OPEN or OPEN_OPEN.
**Default is CLOSED_CLOSED**.
Returns
-------
`arctic.date.DateRange` : the DateRange parsed from the string.
Examples
--------
>>> from arctic.date import string_to_daterange
>>> string_to_daterange('20111020', as_dates=True)
DateRange(start=datetime.date(2011, 10, 20), end=datetime.date(2011, 10, 21))
>>> string_to_daterange('201110201030')
DateRange(start=datetime.datetime(2011, 10, 20, 10, 30), end=datetime.datetime(2011, 10, 21, 10, 30))
>>> string_to_daterange('20111020-20120120', as_dates=True)
DateRange(start=datetime.date(2011, 10, 20), end=datetime.date(2012, 1, 20))
>>> string_to_daterange('[20111020-20120120)', as_dates=True)
DateRange(start=datetime.date(2011, 10, 20), end=datetime.date(2012, 1, 20))
"""
num_dates = str_range.count(delimiter) + 1
if num_dates > 2:
raise ValueError('Too many dates in input string [%s] with delimiter (%s)' % (str_range, delimiter))
# Allow the user to use the [date-date), etc. range syntax to specify the interval.
range_mode = Ranges.get(str_range[0] + str_range[-1], None)
if range_mode:
return string_to_daterange(str_range[1:-1], delimiter, as_dates, interval=range_mode)
if as_dates:
parse_dt = lambda s: parse(s).date() if s else None
else:
parse_dt = lambda s: parse(s) if s else None
if num_dates == 2:
d = [parse_dt(x) for x in str_range.split(delimiter)]
oc = interval
else:
start = parse_dt(str_range)
d = [start, start + datetime.timedelta(1)]
oc = CLOSED_OPEN # Always use closed-open for a single date/datetime.
return DateRange(d[0], d[1], oc)
def to_dt(date, default_tz=None):
"""
Returns a non-naive datetime.datetime.
Interprets numbers as ms-since-epoch.
Parameters
----------
date : `int` or `datetime.datetime`
The datetime to convert
default_tz : tzinfo
The TimeZone to use if none is found. If not supplied, and the
datetime doesn't have a timezone, then we raise ValueError
Returns
-------
Non-naive datetime
"""
if isinstance(date, (int, long)):
return ms_to_datetime(date, default_tz)
elif date.tzinfo is None:
if default_tz is None:
raise ValueError("Must specify a TimeZone on incoming data")
return date.replace(tzinfo=default_tz)
return date
def to_pandas_closed_closed(date_range, add_tz=True):
"""
Pandas DateRange slicing is CLOSED-CLOSED inclusive at both ends.
Parameters
----------
date_range : `DateRange` object
converted to CLOSED_CLOSED form for Pandas slicing
add_tz : `bool`
Adds a TimeZone to the daterange start and end if it doesn't
have one.
Returns
-------
Returns a date_range with start-end suitable for slicing in pandas.
"""
if not date_range:
return None
start = date_range.start
end = date_range.end
if start:
start = to_dt(start, mktz()) if add_tz else start
if date_range.startopen:
start += timedelta(milliseconds=1)
if end:
end = to_dt(end, mktz()) if add_tz else end
if date_range.endopen:
end -= timedelta(milliseconds=1)
return DateRange(start, end)
def ms_to_datetime(ms, tzinfo=None):
"""Convert a millisecond time value to an offset-aware Python datetime object."""
if not isinstance(ms, (int, long)):
raise TypeError('expected integer, not %s' % type(ms))
if tzinfo is None:
tzinfo = mktz()
return datetime.datetime.fromtimestamp(ms * 1e-3, tzinfo)
def _add_tzone(dtm):
if dtm.tzinfo is None:
dtm = dtm.replace(tzinfo=mktz())
return dtm
def datetime_to_ms(d):
"""Convert a Python datetime object to a millisecond epoch (UTC) time value."""
try:
millisecond = d.microsecond // 1000
return calendar.timegm(_add_tzone(d).utctimetuple()) * 1000 + millisecond
except AttributeError:
raise TypeError('expect Python datetime object, not %s' % type(d))
def utc_dt_to_local_dt(dtm):
"""Convert a UTC datetime to datetime in local timezone"""
utc_zone = mktz("UTC")
if dtm.tzinfo is not None and dtm.tzinfo != utc_zone:
raise ValueError(
"Expected dtm without tzinfo or with UTC, not %r" % (
dtm.tzinfo
)
)
if dtm.tzinfo is None:
dtm = dtm.replace(tzinfo=utc_zone)
return dtm.astimezone(mktz())
|
import mne
import os.path
import pytest
import copy
import itertools
import numpy as np
from mne.datasets import testing
from mne.io.fieldtrip.utils import NOINFO_WARNING, _create_events
from mne.utils import _check_pandas_installed, requires_h5py
from mne.io.fieldtrip.tests.helpers import (check_info_fields, get_data_paths,
get_raw_data, get_epochs,
get_evoked, _has_h5py,
pandas_not_found_warning_msg,
get_raw_info, check_data,
assert_warning_in_record)
# missing: KIT: biggest problem here is that the channels do not have the same
# names.
# EGI: no calibration done in FT. so data is VERY different
all_systems_raw = ['neuromag306', 'CTF', 'CNT', 'BTI', 'eximia']
all_systems_epochs = ['neuromag306', 'CTF', 'CNT']
all_versions = ['v7', 'v73']
use_info = [True, False]
all_test_params_raw = list(itertools.product(all_systems_raw, all_versions,
use_info))
all_test_params_epochs = list(itertools.product(all_systems_epochs,
all_versions,
use_info))
# just for speed we skip some slowest ones -- the coverage should still
# be sufficient
for key in [('CTF', 'v73', True), ('neuromag306', 'v73', False)]:
all_test_params_epochs.pop(all_test_params_epochs.index(key))
all_test_params_raw.pop(all_test_params_raw.index(key))
no_info_warning = {'expected_warning': RuntimeWarning,
'match': NOINFO_WARNING}
@pytest.mark.slowtest
@testing.requires_testing_data
# Reading the sample CNT data results in a RuntimeWarning because it cannot
# parse the measurement date. We need to ignore that warning.
@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning')
@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning')
@pytest.mark.parametrize('cur_system, version, use_info',
all_test_params_epochs)
def test_read_evoked(cur_system, version, use_info):
"""Test comparing reading an Evoked object and the FieldTrip version."""
test_data_folder_ft = get_data_paths(cur_system)
mne_avg = get_evoked(cur_system)
if use_info:
info = get_raw_info(cur_system)
pytestwarning = {'expected_warning': None}
else:
info = None
pytestwarning = no_info_warning
cur_fname = os.path.join(test_data_folder_ft,
'averaged_%s.mat' % (version,))
if version == 'v73' and not _has_h5py():
with pytest.raises(ImportError):
mne.io.read_evoked_fieldtrip(cur_fname, info)
return
with pytest.warns(**pytestwarning):
avg_ft = mne.io.read_evoked_fieldtrip(cur_fname, info)
mne_data = mne_avg.data[:, :-1]
ft_data = avg_ft.data
check_data(mne_data, ft_data, cur_system)
check_info_fields(mne_avg, avg_ft, use_info)
@testing.requires_testing_data
# Reading the sample CNT data results in a RuntimeWarning because it cannot
# parse the measurement date. We need to ignore that warning.
@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning')
@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning')
@pytest.mark.parametrize('cur_system, version, use_info',
all_test_params_epochs)
# Strange, non-deterministic Pandas errors:
# "ValueError: cannot expose native-only dtype 'g' in non-native
# byte order '<' via buffer interface"
@pytest.mark.skipif(os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true',
reason='Pandas problem on Azure CI')
def test_read_epochs(cur_system, version, use_info, monkeypatch):
"""Test comparing reading an Epochs object and the FieldTrip version."""
pandas = _check_pandas_installed(strict=False)
has_pandas = pandas is not False
test_data_folder_ft = get_data_paths(cur_system)
mne_epoched = get_epochs(cur_system)
if use_info:
info = get_raw_info(cur_system)
pytestwarning = {'expected_warning': None}
else:
info = None
pytestwarning = no_info_warning
cur_fname = os.path.join(test_data_folder_ft,
'epoched_%s.mat' % (version,))
if has_pandas:
if version == 'v73' and not _has_h5py():
with pytest.raises(ImportError):
mne.io.read_epochs_fieldtrip(cur_fname, info)
return
with pytest.warns(**pytestwarning):
epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info)
assert isinstance(epoched_ft.metadata, pandas.DataFrame)
else:
with pytest.warns(None) as warn_record:
if version == 'v73' and not _has_h5py():
with pytest.raises(ImportError):
mne.io.read_epochs_fieldtrip(cur_fname, info)
return
epoched_ft = mne.io.read_epochs_fieldtrip(cur_fname, info)
assert epoched_ft.metadata is None
assert_warning_in_record(pandas_not_found_warning_msg, warn_record)
if pytestwarning['expected_warning'] is not None:
assert_warning_in_record(pytestwarning['match'], warn_record)
mne_data = mne_epoched.get_data()[:, :, :-1]
ft_data = epoched_ft.get_data()
check_data(mne_data, ft_data, cur_system)
check_info_fields(mne_epoched, epoched_ft, use_info)
# weird sfreq
from mne.externals.pymatreader import read_mat
def modify_mat(fname, variable_names=None, ignore_fields=None):
out = read_mat(fname, variable_names, ignore_fields)
if 'fsample' in out['data']:
out['data']['fsample'] = np.repeat(out['data']['fsample'], 2)
return out
monkeypatch.setattr(mne.externals.pymatreader, 'read_mat', modify_mat)
with pytest.warns(RuntimeWarning, match='multiple'):
mne.io.read_epochs_fieldtrip(cur_fname, info)
@testing.requires_testing_data
# Reading the sample CNT data results in a RuntimeWarning because it cannot
# parse the measurement date. We need to ignore that warning.
@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning')
@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning')
@pytest.mark.parametrize('cur_system, version, use_info', all_test_params_raw)
def test_raw(cur_system, version, use_info):
"""Test comparing reading a raw fiff file and the FieldTrip version."""
# Load the raw fiff file with mne
test_data_folder_ft = get_data_paths(cur_system)
raw_fiff_mne = get_raw_data(cur_system, drop_extra_chs=True)
if use_info:
info = get_raw_info(cur_system)
pytestwarning = {'expected_warning': None}
else:
info = None
pytestwarning = no_info_warning
cur_fname = os.path.join(test_data_folder_ft,
'raw_%s.mat' % (version,))
if version == 'v73' and not _has_h5py():
with pytest.raises(ImportError):
mne.io.read_raw_fieldtrip(cur_fname, info)
return
with pytest.warns(**pytestwarning):
raw_fiff_ft = mne.io.read_raw_fieldtrip(cur_fname, info)
if cur_system == 'BTI' and not use_info:
raw_fiff_ft.drop_channels(['MzA', 'MxA', 'MyaA',
'MyA', 'MxaA', 'MzaA'])
if cur_system == 'eximia' and not use_info:
raw_fiff_ft.drop_channels(['TRIG2', 'TRIG1', 'GATE'])
# Check that the data was loaded correctly
check_data(raw_fiff_mne.get_data(),
raw_fiff_ft.get_data(),
cur_system)
# Check info field
check_info_fields(raw_fiff_mne, raw_fiff_ft, use_info)
@testing.requires_testing_data
def test_load_epoched_as_raw():
"""Test whether exception is thrown when loading epochs as raw."""
test_data_folder_ft = get_data_paths('neuromag306')
info = get_raw_info('neuromag306')
cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat')
with pytest.raises(RuntimeError):
mne.io.read_raw_fieldtrip(cur_fname, info)
@testing.requires_testing_data
def test_invalid_trialinfocolumn():
"""Test for exceptions when using wrong values for trialinfo parameter."""
test_data_folder_ft = get_data_paths('neuromag306')
info = get_raw_info('neuromag306')
cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat')
with pytest.raises(ValueError):
mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=-1)
with pytest.raises(ValueError):
mne.io.read_epochs_fieldtrip(cur_fname, info, trialinfo_column=3)
@testing.requires_testing_data
def test_create_events():
"""Test 2dim trialinfo fields."""
from mne.externals.pymatreader import read_mat
test_data_folder_ft = get_data_paths('neuromag306')
cur_fname = os.path.join(test_data_folder_ft, 'epoched_v7.mat')
original_data = read_mat(cur_fname, ['data', ])
new_data = copy.deepcopy(original_data)
new_data['trialinfo'] = np.array([[1, 2, 3, 4],
[1, 2, 3, 4],
[1, 2, 3, 4]])
with pytest.raises(ValueError):
_create_events(new_data, -1)
for cur_col in np.arange(4):
evts = _create_events(new_data, cur_col)
assert np.all(evts[:, 2] == cur_col + 1)
with pytest.raises(ValueError):
_create_events(new_data, 4)
@testing.requires_testing_data
@pytest.mark.parametrize('version', all_versions)
@requires_h5py
def test_one_channel_elec_bug(version):
"""Test if loading data having only one elec in the elec field works."""
fname = os.path.join(mne.datasets.testing.data_path(), 'fieldtrip',
'one_channel_elec_bug_data_%s.mat' % (version, ))
with pytest.warns(**no_info_warning):
mne.io.read_raw_fieldtrip(fname, info=None)
@testing.requires_testing_data
# Reading the sample CNT data results in a RuntimeWarning because it cannot
# parse the measurement date. We need to ignore that warning.
@pytest.mark.filterwarnings('ignore:.*parse meas date.*:RuntimeWarning')
@pytest.mark.filterwarnings('ignore:.*number of bytes.*:RuntimeWarning')
@pytest.mark.parametrize('version', all_versions)
@pytest.mark.parametrize('type', ['averaged', 'epoched', 'raw'])
@requires_h5py
def test_throw_exception_on_cellarray(version, type):
"""Test for a meaningful exception when the data is a cell array."""
fname = os.path.join(get_data_paths('cellarray'),
'%s_%s.mat' % (type, version))
info = get_raw_info('CNT')
with pytest.raises(RuntimeError, match='Loading of data in cell arrays '
'is not supported'):
if type == 'averaged':
mne.read_evoked_fieldtrip(fname, info)
elif type == 'epoched':
mne.read_epochs_fieldtrip(fname, info)
elif type == 'raw':
mne.io.read_raw_fieldtrip(fname, info)
@testing.requires_testing_data
def test_with_missing_channels():
"""Test _create_info when channels are missing from info."""
cur_system = 'neuromag306'
test_data_folder_ft = get_data_paths(cur_system)
info = get_raw_info(cur_system)
del info['chs'][1:20]
info._update_redundant()
with pytest.warns(RuntimeWarning):
mne.io.read_raw_fieldtrip(
os.path.join(test_data_folder_ft, 'raw_v7.mat'), info)
mne.read_evoked_fieldtrip(
os.path.join(test_data_folder_ft, 'averaged_v7.mat'), info)
mne.read_epochs_fieldtrip(
os.path.join(test_data_folder_ft, 'epoched_v7.mat'), info)
@testing.requires_testing_data
@pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info')
@pytest.mark.filterwarnings('ignore: Cannot guess the correct type')
def test_throw_error_on_non_uniform_time_field():
"""Test if an error is thrown when time fields are not uniform."""
fname = os.path.join(mne.datasets.testing.data_path(),
'fieldtrip',
'not_uniform_time.mat')
with pytest.raises(RuntimeError, match='Loading data with non-uniform '
'times per epoch is not supported'):
mne.io.read_epochs_fieldtrip(fname, info=None)
@testing.requires_testing_data
@pytest.mark.filterwarnings('ignore: Importing FieldTrip data without an info')
def test_throw_error_when_importing_old_ft_version_data():
"""Test if an error is thrown if the data was saved with an old version."""
fname = os.path.join(mne.datasets.testing.data_path(),
'fieldtrip',
'old_version.mat')
with pytest.raises(RuntimeError, match='This file was created with '
'an old version of FieldTrip. You '
'can convert the data to the new '
'version by loading it into '
'FieldTrip and applying '
'ft_selectdata with an '
'empty cfg structure on it. '
'Otherwise you can supply '
'the Info field.'):
mne.io.read_epochs_fieldtrip(fname, info=None)
|
from datetime import timedelta
import logging
from georss_client import UPDATE_OK, UPDATE_OK_NO_DATA
from georss_generic_client import GenericFeed
import voluptuous as vol
from homeassistant.components.sensor import PLATFORM_SCHEMA
from homeassistant.const import (
CONF_LATITUDE,
CONF_LONGITUDE,
CONF_NAME,
CONF_RADIUS,
CONF_UNIT_OF_MEASUREMENT,
CONF_URL,
LENGTH_KILOMETERS,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.entity import Entity
_LOGGER = logging.getLogger(__name__)
ATTR_CATEGORY = "category"
ATTR_DISTANCE = "distance"
ATTR_TITLE = "title"
CONF_CATEGORIES = "categories"
DEFAULT_ICON = "mdi:alert"
DEFAULT_NAME = "Event Service"
DEFAULT_RADIUS_IN_KM = 20.0
DEFAULT_UNIT_OF_MEASUREMENT = "Events"
DOMAIN = "geo_rss_events"
SCAN_INTERVAL = timedelta(minutes=5)
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Required(CONF_URL): cv.string,
vol.Optional(CONF_LATITUDE): cv.latitude,
vol.Optional(CONF_LONGITUDE): cv.longitude,
vol.Optional(CONF_RADIUS, default=DEFAULT_RADIUS_IN_KM): vol.Coerce(float),
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_CATEGORIES, default=[]): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(
CONF_UNIT_OF_MEASUREMENT, default=DEFAULT_UNIT_OF_MEASUREMENT
): cv.string,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the GeoRSS component."""
latitude = config.get(CONF_LATITUDE, hass.config.latitude)
longitude = config.get(CONF_LONGITUDE, hass.config.longitude)
url = config.get(CONF_URL)
radius_in_km = config.get(CONF_RADIUS)
name = config.get(CONF_NAME)
categories = config.get(CONF_CATEGORIES)
unit_of_measurement = config.get(CONF_UNIT_OF_MEASUREMENT)
_LOGGER.debug(
"latitude=%s, longitude=%s, url=%s, radius=%s",
latitude,
longitude,
url,
radius_in_km,
)
# Create all sensors based on categories.
devices = []
if not categories:
device = GeoRssServiceSensor(
(latitude, longitude), url, radius_in_km, None, name, unit_of_measurement
)
devices.append(device)
else:
for category in categories:
device = GeoRssServiceSensor(
(latitude, longitude),
url,
radius_in_km,
category,
name,
unit_of_measurement,
)
devices.append(device)
add_entities(devices, True)
class GeoRssServiceSensor(Entity):
"""Representation of a Sensor."""
def __init__(
self, coordinates, url, radius, category, service_name, unit_of_measurement
):
"""Initialize the sensor."""
self._category = category
self._service_name = service_name
self._state = None
self._state_attributes = None
self._unit_of_measurement = unit_of_measurement
self._feed = GenericFeed(
coordinates,
url,
filter_radius=radius,
filter_categories=None if not category else [category],
)
@property
def name(self):
"""Return the name of the sensor."""
return f"{self._service_name} {'Any' if self._category is None else self._category}"
@property
def state(self):
"""Return the state of the sensor."""
return self._state
@property
def unit_of_measurement(self):
"""Return the unit of measurement."""
return self._unit_of_measurement
@property
def icon(self):
"""Return the default icon to use in the frontend."""
return DEFAULT_ICON
@property
def device_state_attributes(self):
"""Return the state attributes."""
return self._state_attributes
def update(self):
"""Update this sensor from the GeoRSS service."""
status, feed_entries = self._feed.update()
if status == UPDATE_OK:
_LOGGER.debug(
"Adding events to sensor %s: %s", self.entity_id, feed_entries
)
self._state = len(feed_entries)
# And now compute the attributes from the filtered events.
matrix = {}
for entry in feed_entries:
matrix[entry.title] = f"{entry.distance_to_home:.0f}{LENGTH_KILOMETERS}"
self._state_attributes = matrix
elif status == UPDATE_OK_NO_DATA:
_LOGGER.debug("Update successful, but no data received from %s", self._feed)
# Don't change the state or state attributes.
else:
_LOGGER.warning(
"Update not successful, no data received from %s", self._feed
)
# If no events were found due to an error then just set state to
# zero.
self._state = 0
self._state_attributes = {}
|